text
stringlengths 29
850k
|
|---|
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Ansible shared module for building modules that require an interactive
SSH Shell such as those for command line driven devices. This module
provides a native SSH transport using paramiko and builds a base Shell
class for creating shell driven modules.
In order to use this module, include it as part of a custom
module as shown below and create and subclass Shell.
** Note: The order of the import statements does matter. **
from ansible.module_utils.basic import *
from ansible.module_utils.ssh import *
This module provides the following common argument spec for creating
shell connections:
* host (str) - [Required] The IPv4 address or FQDN of the device
* port (int) - Overrides the default SSH port.
* username (str) - [Required] The username to use to authenticate
the SSH session.
* password (str) - [Required] The password to use to authenticate
the SSH session
* connect_timeout (int) - Specifies the connection timeout in seconds
"""
import re
import socket
from StringIO import StringIO
import paramiko
def shell_argument_spec(spec=None):
""" Generates an argument spec for the Shell class
"""
arg_spec = dict(
host=dict(required=True),
port=dict(default=22, type='int'),
username=dict(required=True),
password=dict(required=True),
connect_timeout=dict(default=10, type='int'),
)
if spec:
arg_spec.update(spec)
return arg_spec
class ShellError(Exception):
def __init__(self, msg, command=None):
super(ShellError, self).__init__(msg)
self.message = msg
self.command = command
class Command(object):
def __init__(self, command, prompt=None, response=None):
self.command = command
self.prompt = prompt
self.response = response
def __str__(self):
return self.command
class Ssh(object):
def __init__(self):
self.client = None
def open(self, host, port=22, username=None, password=None,
timeout=10, key_filename=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
use_keys = password is None
ssh.connect(host, port=port, username=username, password=password,
timeout=timeout, allow_agent=use_keys, look_for_keys=use_keys,
key_filename=key_filename)
self.client = ssh
return self.on_open()
def on_open(self):
pass
def close(self):
self.client.close()
return self.on_close()
def on_close(self):
pass
class Shell(Ssh):
def __init__(self):
super(Shell, self).__init__()
self.shell = None
self.prompts = list()
self.errors = list()
def on_open(self):
self.shell = self.client.invoke_shell()
self.shell.settimeout(10)
self.receive()
def receive(self, cmd=None):
recv = StringIO()
while True:
recv.write(self.shell.recv(200))
recv.seek(recv.tell() - 200)
window = recv.read()
if isinstance(cmd, Command):
self.handle_input(window, prompt=cmd.prompt,
response=cmd.response)
try:
if self.read(window):
resp = recv.getvalue()
return self.sanitize(cmd, resp)
except ShellError, exc:
exc.command = cmd
raise
def send(self, command):
try:
cmd = '%s\r' % str(command)
self.shell.sendall(cmd)
return self.receive(command)
except socket.timeout, exc:
raise ShellError("timeout trying to send command", cmd)
def handle_input(self, resp, prompt, response):
if not prompt or not response:
return
prompt = to_list(prompt)
response = to_list(response)
for pr, ans in zip(prompt, response):
match = pr.search(resp)
if match:
cmd = '%s\r' % ans
self.shell.sendall(cmd)
def sanitize(self, cmd, resp):
cleaned = []
for line in resp.splitlines():
if line.startswith(str(cmd)) or self.read(line):
continue
cleaned.append(line)
return "\n".join(cleaned)
def read(self, response):
for regex in self.errors:
if regex.search(response):
raise ShellError('{}'.format(response))
for regex in self.prompts:
if regex.search(response):
return True
|
The biggest toy sellers (Target, Walmart, JCPenney and Amazon) have announced their picks for the hottest toys of the year. Coming in at number one are Pomsies – animatronic, interactive pets that kids can take anywhere. Let’s hope supply chains are doing their thing by producing enough of these cuddly critters to meet demand so the world doesn’t have to deal with another Cabbage Patch Kid-like debacle.
Sales and operations planning (S&OP) has long been the backbone of businesses. Yet many companies are still reliant on decades-old processes that haven’t kept pace with digitization, big data and evolving technologies. These processes were never designed to deal with the complex challenges plaguing organizations today. It’s no wonder they’re wreaking havoc – putting profitability and performance at risk.
Traditional S&OP follows a monthly cycle. But for many, the reality is actually closer to six weeks. And a lot can happen in that timeframe. Wouldn’t it be amazing if you could update your plans in real-time? Well good news. You can.
Looking for more specifics on exactly what it takes to improve your S&OP?
Download the ultimate S&OP toolkit gives you all the information you’ll need to kick start your processes and find success in the New Year and beyond.
Do you have any S&OP resources to share? Let us know!
|
from __future__ import print_function
from collections import defaultdict
from astropy.io import fits
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def telluric_systematics(file_list, header_kw='HUMIDITY', plot=True, ref_num=10):
"""
Find the average change in the requested header keyword over all IGRINS telluric fits
:param file_list: A list of IGRINS Corrected_* files
:param header_kw: the keyword to search for in each fits extension header
:param plot: If true, plot all of the values
:param ref_num: Which extension to use as the reference
:return: The median value of the keyword for each fits extension
"""
data = defaultdict(list)
for fname in file_list:
print(fname)
hdulist = fits.open(fname)
for i, hdu in enumerate(hdulist[1:]):
data['fname'].append(fname)
data['Ext_num'].append(i + 1)
data['Middle_Wavelength'].append(np.median(hdu.data.field('wavelength')))
data['value'].append(hdu.header[header_kw])
# Convert the dict to a dataframe for easier access
df = pd.DataFrame(data=data)
# Scale by the median value of the keyword within the given filename
# (to account for weather variations rather than data systematics)
median_values = defaultdict(float)
for fname in file_list:
median_values[fname] = float(df.loc[(df.fname == fname) & (df.Ext_num == ref_num)]['value'])
print(median_values)
make_scaled = lambda row: row['value'] / median_values[row['fname']]
df['scaled_value'] = df.apply(make_scaled, axis=1)
# Determine the median value for each fits extension
median = df.groupby('Ext_num').median()[['Middle_Wavelength', 'scaled_value']]
# Plot, if desired
if plot:
plt.scatter(df.Middle_Wavelength, df.scaled_value, color='red', alpha=0.1)
plt.plot(median.Middle_Wavelength, median.scaled_value, color='green', lw=2)
plt.xlabel('Wavelength (nm)')
plt.ylabel('{} scale factor'.format(header_kw))
plt.show()
return median
|
It is often said to be “the greatest show on earth”, but these days the Olympic ideals of citius, altius, fortius (faster, higher, stronger) can just as easily be applied to the lucrative commercial ventures accompanying the world of sport.
As the events of London 2012 play out around the world, London Business School, in collaboration with Deloitte, is exploring the lessons shared between business and sport, bringing together a world-class line-up of speakers from the worlds of sport, business and academia at its Global Leadership Summit on 21 May 2012.
how principles of teamwork, leadership and motivation in the sports world can be applied to business.
The sporting world will also be represented from the business brains behind such big brands as Juventus, Manchester United and Arsenal football teams, and cricket’s Indian Premier League.
Sir John Armitt, Chair of Olympic Delivery Authority, Beth Comstock, Senior Vice President & Chief Marketing Officer, GE, and Heather Hancock, Lead Partner, London 2012 & Managing Partner for Innovation & Brand, Deloitte UK, are among the top-level business thinkers debating the global impact of major international sporting events. They will cover infrastructure, sustainable legacy, the value of sports industry brands and the reasons behind corporate sponsorship of major sporting events.
Sean Fitzpatrick, Founder, Front Row Leadership and former New Zealand All Black rugby captain will be tackling a panel on strategies for leadership, looking at the parallels between business and sport alongside Niels de Vos, Chief Executive Officer at UK Athletics, and London Business School’s Professor Lynda Gratton.
Sir Andrew Likierman, Dean, London Business School, said: “The relationship between business and sport is not always straightforward. Without sponsorship, some sporting activity would not be possible. But there are criticisms about excessive commercialisation. So what is the right balance?
For the first time at the Global Leadership Summit, academics from London Business School will be joined by peers from some of the world’s leading business schools. Stepping up to the plate will be experts from US-based Wharton, NYU Stern and Fuqua School of Business, Duke University, and Spanish school IE.
|
from __future__ import division
class MetrixDB(object):
'''
A high level class to perform operations on the metrix database
'''
def __init__(self, overwrite=False):
'''
Initialise the database
'''
from metrix_db.initialiser import Initialiser
initialiser = Initialiser(overwrite=overwrite)
self.handle = initialiser.handle
def add_pdb_entry(self, pdb_id, filename):
'''
Add a pdb entry to the database
'''
from metrix_db.pdb_parser import PDBParser
parser = PDBParser(self.handle)
parser.add_entry(pdb_id, filename)
def add_xia2_entry(self,
pdb_id,
xia2_txt_filename,
xia2_json_filename):
'''
Add a xia2 entry to the database
'''
from metrix_db.xia2_parser import XIA2Parser
parser = XIA2Parser(self.handle)
parser.add_entry(pdb_id, xia2_txt_filename, xia2_json_filename)
def add_protein_entry(self, pdb_id, filename):
'''
Add a protein entry to the database
'''
from metrix_db.protein_parser import ProteinParser
parser = ProteinParser(self.handle)
parser.add_protein(pdb_id, filename)
def write_csv(self, filename):
'''
Write a CSV file from the database
'''
from metrix_db.csv_writer import CSVWriter
writer = CSVWriter(self.handle)
writer.write(filename)
|
The dry cargo chartering team operates from the company’s Monaco office and has considerable experience in both short and long term charters as well as the arrangement of contracts of affreightment. The team handles all dry cargo business on a worldwide basis and covers all vessel sizes ranging from Handysize up to Capesize.
The dry cargo team works closely with the newbuilding and sale & purchase teams and can facilitate employment for vessels to be ordered and/or purchased. Similarly, the company has also concluded several period charters in other sectors including tankers and PCTCs.
|
# -*- encoding: utf-8 -*-
#
# Copyright 2012 Martin Zimmermann <info@posativ.org>. All rights reserved.
# License: BSD Style, 2 clauses -- see LICENSE.
import math
import random
from collections import defaultdict
from acrylamid.compat import iteritems
from acrylamid.helpers import expand, safeslug, hash
from acrylamid.views.index import Index, Paginator
def fetch(entrylist):
"""Fetch tags from list of entries and map tags to most common tag name
"""
tags = defaultdict(list)
tmap = defaultdict(int)
for e in entrylist:
for tag in e.tags:
tags[tag.lower()].append(e)
tmap[tag] += 1
# map tags to the most counted tag name
for name in list(tags.keys()):
key = max([(tmap[key], key) for key in tmap
if key.lower() == name])[1]
rv = tags.pop(key.lower())
tags[key] = rv
return tags
class Tagcloud(object):
"""Tagcloud helper class similar (almost identical) to pelican's tagcloud helper object.
Takes a bunch of tags and produces a logarithm-based partition and returns a iterable
object yielding a Tag-object with two attributes: name and step where step is the
calculated step size (== font size) and reaches from 0 to steps-1.
:param tags: a dictionary of tags, e.g. {'name', [list of entries]}
:param steps: maximum steps
:param max_items: maximum items shown in tagcloud
:param start: start index of steps resulting in start to steps+start-1 steps."""
def __init__(self, tags, steps=4, max_items=100, start=0, shuffle=False):
lst = sorted([(k, len(v)) for k, v in iteritems(tags)],
key=lambda x: x[0])[:max_items]
# stolen from pelican/generators.py:286
max_count = max(lst, key=lambda k: k[1])[1] if lst else None
self.lst = [(tag, count,
int(math.floor(steps - (steps - 1) * math.log(count)
/ (math.log(max_count) or 1)))+start-1)
for tag, count in lst]
if shuffle:
random.shuffle(self.lst)
self.tags = tags
def __iter__(self):
for tag, count, step in self.lst:
yield type('Tag', (), {'name': tag, 'step': step, 'count': count})
def __hash__(self):
return hash(*self.lst)
def __getitem__(self, tag):
return self.tags[tag.name]
class Tag(Index):
"""Same behaviour like Index except ``route`` that defaults to */tag/:name/* and
``pagination`` that defaults to */tag/:name/:num/* where :name is the current
tag identifier.
To create a tag cloud head over to :doc:`conf.py`.
"""
export = ['prev', 'curr', 'next', 'items_per_page', 'tag', 'entrylist']
template = 'main.html'
def populate_tags(self, request):
tags = fetch(request['entrylist'])
self.tags = tags
return tags
def context(self, conf, env, request):
class Link:
def __init__(self, title, href):
self.title = title
self.href = href
def tagify(tags):
href = lambda t: expand(self.path, {'name': safeslug(t)})
return [Link(t, href(t)) for t in tags] if isinstance(tags, (list, tuple)) \
else Link(tags, href(tags))
tags = self.populate_tags(request)
env.engine.register('tagify', tagify)
env.tag_cloud = Tagcloud(tags, conf['tag_cloud_steps'],
conf['tag_cloud_max_items'],
conf['tag_cloud_start_index'],
conf['tag_cloud_shuffle'])
return env
def generate(self, conf, env, data):
"""Creates paged listing by tag."""
for tag in self.tags:
data['entrylist'] = [entry for entry in self.tags[tag]]
for res in Paginator.generate(self, conf, env, data, tag=tag, name=safeslug(tag)):
yield res
|
How a Home Insurance Helps a Home Owner Protect his Precious Investment In life, everything you spend money on can be treated as a form investment as you strive for bigger things but, you’ll surely be hard-pressed to find many investments that can exceed a house when it comes to worth and price. Not only is a home expensive – it also provides insurmountable amount of benefits for an individual along with his family, as it renders fulfillment and a form of security for one’s family. However, no matter how tough and robust your home is, the vast possibility of it becoming part of the news the next morning due to some accident, isn’t entirely impossible and there’s no doubt that you would not want any unfortunate events to befall your precious investment. There are many accidents that may bear ill will towards your home and even damage it to some extent – from fire, theft, severe explosions that may blow up your house entirely, too much snow, typhoons, floods and other disasters you may have in your mind. Although a home insurance doesn’t literally erect a barrier of protection for your home, it does provide you a sense of security which in a way, would allow you to protect your investment. There are many out there in the society, who fails to purchase home insurance because of the mindset that makes one think that it is something that’s far too out of reach for an average individual’s bankroll. Truth be told, there are expensive plans out there but if you really strive and push deeper in the market, there’s a high chance that you’ll find an affordable home insurance that would not make your knees go weak. You’re certainly not far off from success as long as you search and soon, you’ll likely find a suitable company for you.
If the time comes when your home is unfortunately struck with a disaster which damaged it or even destroyed it, you’ll certainly have your home insurance companies supporting you according to your agreements. Depending on the plan that you have purchased, the damages done by the event would be compensated through cash, which in a way, brings you back the foundation of your investment.
Through the aid of the home insurance agencies, you will also be able to reassure that your family is well-protected from the stresses and worries the catastrophic event may have brought them. After destructive events that ends you up with a damaged home, your family will surely be concerned about expenses and rising up from the ashes but with the help of the home insurance compensations, you’ll be able to come back up with minimal effort.
|
# -*- coding: utf-8 -*-
"""
Sahana Eden Volunteers Management
(Extends modules/eden/hrm.py)
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3VolunteerModel",
"S3VolunteerAwardModel",
"S3VolunteerClusterModel",
"vol_service_record",
]
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3VolunteerModel(S3Model):
names = ("vol_details",)
def model(self):
T = current.T
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
availability_opts = {1: T("No Restrictions"),
2: T("Weekends only"),
3: T("School Holidays only"),
}
# ---------------------------------------------------------------------
# Volunteer Details
# - extra details for volunteers
#
tablename = "vol_details"
self.define_table(tablename,
self.hrm_human_resource_id(ondelete = "CASCADE"),
Field("active", "boolean",
default = False,
label = T("Active"),
represent = self.vol_active_represent,
),
Field("availability", "integer",
label = T("Availability"),
represent = lambda opt: \
availability_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(availability_opts)
),
),
Field("card", "boolean",
default = False,
label = T("Card holder"),
represent = self.vol_active_represent,
# Enable in-template when-required
readable = False,
writable = False,
),
*s3_meta_fields())
# =========================================================================
@staticmethod
def vol_active_represent(opt):
""" Represent the Active status of a Volunteer """
if "report" in current.request.args:
# We can't use a represent
return opt
# List view, so HTML represent is fine
if opt:
output = DIV(current.T("Yes"), _style="color:green")
else:
output = DIV(current.T("No"), _style="color:red")
return output
# =============================================================================
class S3VolunteerAwardModel(S3Model):
names = ("vol_award",
"vol_volunteer_award",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
# ---------------------------------------------------------------------
# Volunteer Award
#
tablename = "vol_award"
define_table(tablename,
Field("name",
label = T("Name")),
# Only included in order to be able to set
# realm_entity to filter appropriately
self.org_organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(label=T("Description"),
comment=None),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Award"),
title_display = T("Award"),
title_list = T("Award"),
title_update = T("Edit Award"),
title_upload = T("Import Awards"),
label_list_button = T("List Awards"),
label_delete_button = T("Delete Award"),
msg_record_created = T("Award added"),
msg_record_modified = T("Award updated"),
msg_record_deleted = T("Award deleted"),
msg_list_empty = T("No Awards found"))
comment = S3AddResourceLink(c = "vol",
f = "award",
label = crud_strings[tablename].label_create,
title = T("Award"),
)
represent = S3Represent(lookup=tablename)
award_id = S3ReusableField("award_id", "reference %s" % tablename,
label = T("Award"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"vol_award.id",
represent,
filterby="organisation_id",
filter_opts=filter_opts)),
represent = represent,
comment = comment
)
# ---------------------------------------------------------------------
# Volunteers <> Awards link table
#
tablename = "vol_volunteer_award"
define_table(tablename,
self.pr_person_id(empty=False),
award_id(),
s3_date(),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Award"),
title_display = T("Award"),
title_list = T("Award"),
title_update = T("Edit Award"),
title_upload = T("Import Awards"),
label_list_button = T("List Awards"),
label_delete_button = T("Delete Award"),
msg_record_created = T("Award added"),
msg_record_modified = T("Award updated"),
msg_record_deleted = T("Award deleted"),
msg_list_empty = T("No Awards found"))
self.configure(tablename,
context = {"person": "person_id"},
)
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3VolunteerClusterModel(S3Model):
names = ("vol_cluster_type",
"vol_cluster",
"vol_cluster_position",
"vol_volunteer_cluster",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Volunteer Cluster
tablename = "vol_cluster_type"
define_table(tablename,
Field("name", length=255, unique=True,
label = T("Name")),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Volunteer Cluster Type"),
title_display = T("Volunteer Cluster Type"),
title_list = T("Volunteer Cluster Type"),
title_update = T("Edit Volunteer Cluster Type"),
title_upload = T("Import Volunteer Cluster Types"),
label_list_button = T("List Volunteer Cluster Types"),
label_delete_button = T("Delete Volunteer Cluster Type"),
msg_record_created = T("Volunteer Cluster Type added"),
msg_record_modified = T("Volunteer Cluster Type updated"),
msg_record_deleted = T("Volunteer Cluster Type deleted"),
msg_list_empty = T("No Volunteer Cluster Types"))
comment = S3AddResourceLink(c = "vol",
f = "cluster_type",
vars = dict(child = "vol_cluster_type_id",
parent = "volunteer_cluster"),
label = crud_strings[tablename].label_create,
title = T("Volunteer Cluster Type"),
)
represent = S3Represent(lookup=tablename)
vol_cluster_type_id = S3ReusableField("vol_cluster_type_id", "reference %s" % tablename,
label = T("Volunteer Cluster Type"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"vol_cluster_type.id",
represent)),
represent = represent,
comment = comment
)
# ---------------------------------------------------------------------
# Volunteer Cluster
tablename = "vol_cluster"
define_table(tablename,
vol_cluster_type_id(),
Field("name", length=255, unique=True,
label = T("Name")),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Volunteer Cluster"),
title_display = T("Volunteer Cluster"),
title_list = T("Volunteer Cluster"),
title_update = T("Edit Volunteer Cluster"),
title_upload = T("Import Volunteer Clusters"),
label_list_button = T("List Volunteer Clusters"),
label_delete_button = T("Delete Volunteer Cluster"),
msg_record_created = T("Volunteer Cluster added"),
msg_record_modified = T("Volunteer Cluster updated"),
msg_record_deleted = T("Volunteer Cluster deleted"),
msg_list_empty = T("No Volunteer Clusters"))
comment = S3AddResourceLink(c = "vol",
f = "cluster",
vars = dict(child = "vol_cluster_id",
parent = "volunteer_cluster"),
label = crud_strings[tablename].label_create,
title = T("Volunteer Cluster"),
)
represent = S3Represent(lookup=tablename)
vol_cluster_id = S3ReusableField("vol_cluster_id", "reference %s" % tablename,
label = T("Volunteer Cluster"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"vol_cluster.id",
represent)),
represent = represent,
comment = comment
)
# ---------------------------------------------------------------------
# Volunteer Group Position
#
tablename = "vol_cluster_position"
define_table(tablename,
Field("name", length=255, unique=True,
label = T("Name")),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Volunteer Cluster Position"),
title_display = T("Volunteer Cluster Position"),
title_list = T("Volunteer Cluster Position"),
title_update = T("Edit Volunteer Cluster Position"),
title_upload = T("Import Volunteer Cluster Positions"),
label_list_button = T("List Volunteer Cluster Positions"),
label_delete_button = T("Delete Volunteer Cluster Position"),
msg_record_created = T("Volunteer Cluster Position added"),
msg_record_modified = T("Volunteer Cluster Position updated"),
msg_record_deleted = T("Volunteer Cluster Position deleted"),
msg_list_empty = T("No Volunteer Cluster Positions"))
comment = S3AddResourceLink(c = "vol",
f = "cluster_position",
vars = dict(child = "vol_cluster_position_id",
parent = "volunteer_cluster"),
label = crud_strings[tablename].label_create,
title = T("Volunteer Cluster Position"),
)
represent = S3Represent(lookup=tablename)
vol_cluster_position_id = S3ReusableField("vol_cluster_position_id", "reference %s" % tablename,
label = T("Volunteer Cluster Position"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"vol_cluster_position.id",
represent)),
represent = represent,
comment = comment
)
# ---------------------------------------------------------------------
# Volunteer Cluster Link Table
cluster_type_filter = '''
$.filterOptionsS3({
'trigger':'vol_cluster_type_id',
'target':'vol_cluster_id',
'lookupKey':'vol_cluster_type_id',
'lookupPrefix':'vol',
'lookupResource':'cluster',
})'''
tablename = "vol_volunteer_cluster"
define_table(tablename,
self.hrm_human_resource_id(ondelete = "CASCADE"),
vol_cluster_type_id(script = cluster_type_filter), # This field is ONLY here to provide a filter
vol_cluster_id(readable=False,
writable=False),
vol_cluster_position_id(readable=False,
writable=False),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict(vol_cluster_type_id = vol_cluster_type_id,
vol_cluster_id = vol_cluster_id,
)
# =====================================================================
@staticmethod
def defaults():
"""
Return safe defaults for model globals, this will be called instead
of model() in case the model has been deactivated in
deployment_settings.
"""
return dict(
vol_cluster_id = S3ReusableField("vol_cluster_id", "integer",
readable=False,
writable=False),
)
# =============================================================================
def vol_service_record(r, **attr):
"""
Generate a Volunteer Service Record
"""
record = r.record
if record.type != 2:
# Only relevant to volunteers
return None
T = current.T
db = current.db
ptable = db.pr_person
person_id = record.person_id
person = db(ptable.id == person_id).select(ptable.pe_id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.comments,
limitby=(0, 1),
).first()
vol_name = s3_fullname(person)
def callback(r):
# Header
s3db = current.s3db
otable = db.org_organisation
org_id = record.organisation_id
org = db(otable.id == org_id).select(otable.name,
otable.acronym,
otable.logo,
limitby=(0, 1),
).first()
org_name = org.name
logo = org.logo
if logo:
logo = s3db.org_organisation_logo(org)
elif current.deployment_settings.get_org_branches():
root_org = current.cache.ram(
# Common key with auth.root_org
"root_org_%s" % org_id,
lambda: s3db.org_root_organisation(org_id),
time_expire=120
)
logo = s3db.org_organisation_logo(root_org)
innerTable = TABLE(TR(TH(vol_name)),
TR(TD(org_name)))
person_details = TABLE(TR(TD(logo),
TD(innerTable)
))
pe_id = person.pe_id
# Photo
itable = s3db.pr_image
query = (itable.pe_id == pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby=(0, 1)).first()
if image:
image = image.image
size = (160, None)
image = s3db.pr_image_represent(image, size=size)
size = s3db.pr_image_size(image, size)
url = URL(c="default",
f="download",
args=image)
avatar = IMG(_src=url,
_width=size[0],
_height=size[1],
)
person_details[0].append(TD(avatar))
# Contact Details
contact_details = DIV()
# Addresses
addrtable = s3db.pr_address
ltable = db.gis_location
query = (addrtable.pe_id == pe_id) & \
(addrtable.location_id == ltable.id)
addresses = db(query).select(addrtable.type,
ltable.addr_street,
ltable.L3,
ltable.L2,
ltable.L1,
orderby = addrtable.type,
limitby=(0, 2))
address_list = []
for address in addresses:
_location = address["gis_location"]
address = TABLE(TR(TH(addrtable.type.represent(address["pr_address"].type))),
TR(_location.addr_street),
TR(_location.L3),
TR(_location.L2),
TR(_location.L1),
)
address_list.append(address)
# Contacts
ctable = s3db.pr_contact
contacts = db(ctable.pe_id == pe_id).select(ctable.contact_method,
ctable.value,
orderby = ctable.priority,
limitby=(0, 3))
contact_list = TABLE()
contact_represent = ctable.contact_method.represent
for contact in contacts:
contact_list.append(TH(contact_represent(contact.contact_method)))
contact_list.append(contact.value)
# Emergency Contact
#ectable = s3db.pr_contact_emergency
#emergency = db(ectable.pe_id == pe_id).select(ectable.name,
# ectable.relationship,
# ectable.phone,
# limitby=(0, 1)).first()
#if emergency:
# econtact = TABLE(TR(TH(T("Emergency Contact"))),
# TR(emergency.name),
# TR(emergency.relationship),
# TR(emergency.phone),
# )
#else:
# econtact = TABLE()
contact_row = TR()
if len(address_list) > 0:
contact_row.append(TD(address_list[0]))
if len(address_list) > 1:
contact_row.append(TD(address_list[1]))
contact_row.append(contact_list)
#contact_row.append(econtact)
# Identity
idtable = s3db.pr_identity
query = (idtable.person_id == person_id) & \
(idtable.deleted == False)
rows = db(query).select(idtable.type,
idtable.value,
idtable.valid_until)
id_row = TR()
for identity in rows:
id_row.append(TABLE(TR(TH(idtable.type.represent(identity.type))),
TR(identity.value),
TR(identity.valid_until),
))
# Comments:
comments = person.comments or ""
if comments:
comments = TABLE(TR(TH(T("Comments"))),
TR(comments))
# Training Courses
hours = {}
ttable = s3db.hrm_training
ctable = s3db.hrm_course
query = (ttable.deleted == False) & \
(ttable.person_id == person_id) & \
(ttable.course_id == ctable.id)
rows = db(query).select(ctable.name,
ttable.date,
ttable.hours,
orderby = ~ttable.date)
date_represent = ttable.date.represent
for row in rows:
_row = row["hrm_training"]
_date = _row.date
hours[_date.date()] = dict(course = row["hrm_course"].name,
date = date_represent(_date),
hours = _row.hours or "",
)
courses = TABLE(TR(TH(T("Date")),
TH(T("Training")),
TH(T("Hours"))))
_hours = {}
for key in sorted(hours.iterkeys()):
_hours[key] = hours[key]
total = 0
for hour in hours:
_hour = hours[hour]
__hours = _hour["hours"] or 0
courses.append(TR(_hour["date"],
_hour["course"],
str(__hours)
))
total += __hours
if total > 0:
courses.append(TR(TD(""), TD("Total"), TD("%d" % total)))
# Programme Hours
# - grouped by Programme/Role
programmes = OrderedDict()
hrstable = s3db.hrm_programme_hours
ptable = db.hrm_programme
jtable = db.hrm_job_title
query = (hrstable.deleted == False) & \
(hrstable.training == False) & \
(hrstable.person_id == person_id) & \
(hrstable.programme_id == ptable.id)
left = jtable.on(hrstable.job_title_id == jtable.id)
rows = db(query).select(hrstable.date,
hrstable.hours,
jtable.name,
ptable.name,
ptable.name_long,
left=left,
orderby = ~hrstable.date)
NONE = current.messages["NONE"]
for row in rows:
_row = row["hrm_programme_hours"]
_date = _row.date
hours = _row.hours or 0
role = row["hrm_job_title"]["name"] or NONE
prow = row["hrm_programme"]
if prow.name_long:
programme = prow.name_long
else:
programme = prow.name
if programme not in programmes:
programmes[programme] = OrderedDict()
p = programmes[programme]
if role in p:
p[role]["end_date"] = _date
p[role]["hours"] += hours
else:
p[role] = dict(start_date = _date,
end_date = _date,
hours = hours,
)
date_represent = hrstable.date.represent
programme = TABLE(TR(TH(T("Start Date")),
TH(T("End Date")),
TH(T("Work on Program")),
TH(T("Role")),
TH(T("Hours"))))
total = 0
for p in programmes:
_p = programmes[p]
for r in _p:
role = _p[r]
hours = role["hours"]
total += hours
programme.append(TR(date_represent(role["start_date"]),
date_represent(role["end_date"]),
p,
r,
str(hours)
))
if total > 0:
programme.append(TR("", "", "", TD("Total"), TD("%d" % total)))
# Space for the printed document to be signed
datestamp = S3DateTime.date_represent(current.request.now)
datestamp = "%s: %s" % (T("Date Printed"), datestamp)
manager = T("Branch Coordinator")
signature = TABLE(TR(TH(T("Signature"))),
TR(TD()),
TR(TD(manager)),
TR(TD(datestamp)))
output = DIV(TABLE(TR(TH(T("Volunteer Service Record")))),
person_details,
TABLE(contact_row),
TABLE(id_row),
TABLE(comments),
TABLE(courses),
TABLE(programme),
TABLE(signature),
)
return output
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "list",
pdf_title = "%s - %s" % \
(vol_name, T("Volunteer Service Record")),
pdf_table_autogrow = "B",
pdf_callback = callback,
**attr
)
# END =========================================================================
|
For a healthy version cook on a circle of Bake-O-Glide on the Simmering Plate without the need for any fat. Alternatively fry in a little butter or bacon fat.
Mash the potatoes and place in a bowl. Grate the raw potato into this and add the flour and bicarbonate of soda, season. Add enough buttermilk to make a soft batter.
2, 3, 4 and 5 oven AGA: Place a circle of Bake-O-Glide onto the Simmering Plate and place spoonfuls of the mixture onto the surface. Cook one side until golden then turn over and cook the other side.
Alternatively fry in a little butter in a frying pan. Serve with grilled bacon for breakfast or fried eggs.
Rayburn cooking: Cook on a piece of Bake-O-Glide on the hotplate at an appropriate heat.
Conventional cooking: Cook in a frying pan on the hob.
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 14 17:15:01 2016
@author: dan
"""
import re, pulp
import pandas as pd
import matplotlib.pyplot as plt
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
#from ..scripts.despine_axes import despine
def despine(ax, fontsize=15):
ax.tick_params(right=0, top=0, direction='out', labelsize=fontsize)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel(ax.get_xlabel(), size=15)
ax.set_ylabel(ax.get_ylabel(), size=15)
#%%
rid_mapping = pd.DataFrame.from_csv("../source/rid_mapping_cobra_2_Gerosa.csv")
MFA = pd.DataFrame.from_csv('../source/mmol_gCDW_hr_[Gerosa et al 2015].csv',
index_col=1)
MFA_std = pd.DataFrame.from_csv('../source/mmol_gCDW_hr_stdev_[Gerosa et al 2015].csv',
index_col=1)
conditions = pd.DataFrame.from_csv("../data/conditions.csv")
conditions = conditions[conditions.media_key>0]
conditions.sort_values('growth rate Gerosa [h-1]', inplace=True)
cs = conditions.index
#%%
measured_flux = pd.DataFrame(columns=cs, index=rid_mapping.index)
measured_flux_stdev = pd.DataFrame(columns=cs, index=rid_mapping.index)
for row in MFA.iterrows():
if not re.findall("[+-]", row[0]):
for r in row[0].split(';'):
cobra_reactions = rid_mapping[rid_mapping['gerosa_reaction_id']==r]
for r_cobra in cobra_reactions.index:
v = row[1]
measured_flux.loc[r_cobra] = v
measured_flux_stdev.loc[r_cobra] = MFA_std.loc[row[0]]
measured_flux.dropna(inplace=True)
measured_flux_stdev.dropna(inplace=True)
#%%
model = create_cobra_model_from_sbml_file('../source/iJO1366.xml')
all_reactions = map(str, model.reactions)
all_metabolites = map(str, model.metabolites)
mmol_gCDW_h = pd.DataFrame(columns=cs, index=measured_flux.index)
for c in cs:
cobra_c = conditions.loc[c, 'media_key']
gr = conditions.loc[c, 'growth rate Gerosa [h-1]']
flux_meas = measured_flux[c]
flux_stderr = measured_flux_stdev[c]
# load fresh copy of model
model = create_cobra_model_from_sbml_file('../source/iJO1366.xml')
# redefine sole carbon source uptake reaction in mmol/gr/h
model.reactions.get_by_id('EX_glc_e').lower_bound = 0
model.reactions.get_by_id('EX_' + cobra_c + '_e').lower_bound = -1000
# set growth rate according to measurements
biomass = "Ec_biomass_iJO1366_WT_53p95M"
growth_rate = model.reactions.get_by_id(biomass)
growth_rate.upper_bound = gr
growth_rate.lower_bound = gr
bounds_df = pd.DataFrame(index=all_reactions,columns=['lb','ub'])
m = model.to_array_based_model()
bounds_df.loc[all_reactions, 'lb'] = m.lower_bounds
bounds_df.loc[all_reactions, 'ub'] = m.upper_bounds
# initialize LP problem
pulp_solver = pulp.CPLEX(msg=0)
lp = pulp.LpProblem("MOMA", pulp.LpMinimize)
v_pred = pulp.LpVariable.dicts('v_pred', all_reactions)
v_meas = pulp.LpVariable.dicts('v_meas', all_reactions)
v_resid = pulp.LpVariable.dicts('residual', all_reactions)
# add flux bounds
for i in all_reactions:
lp += (v_pred[i] >= bounds_df.loc[i, 'lb']), 'lower_bound_%s' % i
lp += (v_pred[i] <= bounds_df.loc[i, 'ub']), 'upper_bound_%s' % i
# add constraint for each measured reaction i:
# |v_meas[i] - flux_meas[i]| <= flux_stderr[i]
# v_resid[i] >= |v_pred[i] - v_meas[i]|
for i in flux_meas.index:
lp += (v_meas[i] <= flux_meas[i] + flux_stderr[i]), 'measured_upper_%s' % i
lp += (v_meas[i] >= flux_meas[i] - flux_stderr[i]), 'measured_lower_%s' % i
lp += (v_pred[i] - v_resid[i] <= v_meas[i]), 'abs_diff_upper_%s' % i
lp += (-v_pred[i] - v_resid[i] <= -v_meas[i]), 'abs_diff_lower_%s' % i
# Some reactions in Gerosa et al. 2015 share constraints with other reactions
# here we manually constrain their fluxes according to measuremnts.
# Acetate exchange
lp += (v_meas['ACt2rpp'] + v_meas['ACS'] <= MFA.loc['PTAr+ACS', c] + MFA_std.loc['PTAr+ACS', c])
lp += (v_meas['ACt2rpp'] + v_meas['ACS'] >= MFA.loc['PTAr+ACS', c] - MFA_std.loc['PTAr+ACS', c])
# PFK/FBP reversible reaction
lp += (v_meas['PFK'] - v_meas['FBP'] <= MFA.loc['PFK-FBP', c] + MFA_std.loc['PFK-FBP', c])
lp += (v_meas['PFK'] - v_meas['FBP'] >= MFA.loc['PFK-FBP', c] - MFA_std.loc['PFK-FBP', c])
# MDH/MQO alternative
lp += (v_meas['MDH'] + v_meas['MDH2'] <= MFA.loc['MDH+MQO', c] + MFA_std.loc['MDH+MQO', c])
lp += (v_meas['MDH'] + v_meas['MDH2'] >= MFA.loc['MDH+MQO', c] - MFA_std.loc['MDH+MQO', c])
# ME alternative
lp += (v_meas['ME1'] + v_meas['ME2'] <= MFA.loc['ME1+ME2', c] + MFA_std.loc['ME1+ME2', c])
lp += (v_meas['ME1'] + v_meas['ME2'] >= MFA.loc['ME1+ME2', c] - MFA_std.loc['ME1+ME2', c])
# set the objective to minimize sum_i abs_diff[i]
objective = pulp.lpSum(v_resid.values())
lp.setObjective(objective)
# add stoichiometric constraints for all internal metabolites: S_int * v = 0
for i,j in enumerate(m.S):
row = [l * v_pred[all_reactions[k]] for k,l in zip(j.rows[0],j.data[0])]
lp += (pulp.lpSum(row) == 0), 'mass_balance_%s' % all_metabolites[i]
lp.solve()
# append fluxes to new dataframe
MEAS_FLUX_L = 'measured fluxes from Gerosa et al.'
MEAS_STDEV_L = 'standard deviation'
PRED_FLUX_L = 'projected fluxes'
RESID_L = 'residual'
fluxes_df = pd.DataFrame(index=all_reactions)
fluxes_df.loc[flux_meas.index, MEAS_FLUX_L] = flux_meas
fluxes_df.loc[flux_meas.index, MEAS_STDEV_L] = flux_stderr
fluxes_df.loc[all_reactions, PRED_FLUX_L] = \
map(lambda i: pulp.value(v_pred[i]), all_reactions)
fluxes_df.loc[measured_flux.index, RESID_L] = \
map(lambda i: pulp.value(v_resid[i]), measured_flux.index)
mmol_gCDW_h[c] = fluxes_df.loc[measured_flux.index, PRED_FLUX_L]
#%%
# normalize all fluxes to the biomass flux (i.e. set it to 1)
fluxes_df /= pulp.value(v_pred[biomass])
fig = plt.figure(figsize=(6,6))
ax = plt.axes()
fluxes_df.plot(kind='scatter', x=MEAS_FLUX_L, y=PRED_FLUX_L,
xerr=MEAS_STDEV_L, ax=ax, linewidth=0, s=20,
color=(0.7,0.2,0.5))
xlim, ylim = (ax.get_ylim(), ax.get_ylim())
plt.axis('equal')
plt.plot(xlim, ylim)
plt.xlim(xlim)
plt.ylim(ylim)
despine(ax)
ax.set_title(c, size=15)
for i in flux_meas.index:
xy = fluxes_df.loc[i, [MEAS_FLUX_L, PRED_FLUX_L]]
if fluxes_df.loc[i, RESID_L] > 2:
ax.annotate(i, xy,
fontsize=10, color='darkslategrey')
fig.savefig('../res/flux_projections/flux_projection_on_%s.pdf' %c)
mmol_gCDW_h.to_csv('../data/flux projections[mmol_gCDW_h].csv')
|
Stupid Builders? It’s a load of Bollards!
You will have probably seen the following image forwarded by email, with a caption about ‘stupid builders’ or ‘lack of forward planning’.
A number of forum discussions have placed the image in various cities but this is in fact in Manchester, UK at the junction of Quay Street and Lower Byrom Street, and the accusations of stupidity are unfounded as the following pictures show – the bollards are removable. The key can also be seen in the second photo.
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
This script can import a HiRISE DTM .IMG file.
"""
import bpy
from bpy.props import *
from struct import pack, unpack
import os
import queue, threading
class image_properties:
""" keeps track of image attributes throughout the hirise_dtm_importer class """
def __init__(self, name, dimensions, pixel_scale):
self.name( name )
self.dims( dimensions )
self.processed_dims( dimensions )
self.pixel_scale( pixel_scale )
def dims(self, dims=None):
if dims is not None:
self.__dims = dims
return self.__dims
def processed_dims(self, processed_dims=None):
if processed_dims is not None:
self.__processed_dims = processed_dims
return self.__processed_dims
def name(self, name=None):
if name is not None:
self.__name = name
return self.__name
def pixel_scale(self, pixel_scale=None):
if pixel_scale is not None:
self.__pixel_scale = pixel_scale
return self.__pixel_scale
class hirise_dtm_importer(object):
""" methods to understand/import a HiRISE DTM formatted as a PDS .IMG """
def __init__(self, context, filepath):
self.__context = context
self.__filepath = filepath
self.__ignore_value = 0x00000000
self.__bin_mode = 'BIN6'
self.scale( 1.0 )
self.__cropXY = False
def bin_mode(self, bin_mode=None):
if bin_mode != None:
self.__bin_mode = bin_mode
return self.__bin_mode
def scale(self, scale=None):
if scale is not None:
self.__scale = scale
return self.__scale
def crop(self, widthX, widthY, offX, offY):
self.__cropXY = [ widthX, widthY, offX, offY ]
return self.__cropXY
############################################################################
## PDS Label Operations
############################################################################
def parsePDSLabel(self, labelIter, currentObjectName=None, level = ""):
# Let's parse this thing... semi-recursively
## I started writing this caring about everything in the PDS standard but ...
## it's a mess and I only need a few things -- thar be hacks below
## Mostly I just don't care about continued data from previous lines
label_structure = []
# When are we done with this level?
endStr = "END"
if not currentObjectName is None:
endStr = "END_OBJECT = %s" % currentObjectName
line = ""
while not line.rstrip() == endStr:
line = next(labelIter)
# Get rid of comments
comment = line.find("/*")
if comment > -1:
line = line[:comment]
# Take notice of objects
if line[:8] == "OBJECT =":
objName = line[8:].rstrip()
label_structure.append(
(
objName.lstrip().rstrip(),
self.parsePDSLabel(labelIter, objName.lstrip().rstrip(), level + " ")
)
)
elif line.find("END_OBJECT =") > -1:
pass
elif len(line.rstrip().lstrip()) > 0:
key_val = line.split(" = ", 2)
if len(key_val) == 2:
label_structure.append( (key_val[0].rstrip().lstrip(), key_val[1].rstrip().lstrip()) )
return label_structure
# There has got to be a better way in python?
def iterArr(self, label):
for line in label:
yield line
def getPDSLabel(self, img):
# Just takes file and stores it into an array for later use
label = []
done = False;
# Grab label into array of lines
while not done:
line = str(img.readline(), 'utf-8')
if line.rstrip() == "END":
done = True
label.append(line)
return (label, self.parsePDSLabel(self.iterArr(label)))
def getLinesAndSamples(self, label):
""" uses the parsed PDS Label to get the LINES and LINE_SAMPLES parameters
from the first object named "IMAGE" -- is hackish
"""
for obj in label:
if obj[0] == "IMAGE":
return self.getLinesAndSamples(obj[1])
if obj[0] == "LINES":
lines = int(obj[1])
if obj[0] == "LINE_SAMPLES":
line_samples = int(obj[1])
return ( line_samples, lines )
def getValidMinMax(self, label):
""" uses the parsed PDS Label to get the VALID_MINIMUM and VALID_MAXIMUM parameters
from the first object named "IMAGE" -- is hackish
"""
for obj in label:
if obj[0] == "IMAGE":
return self.getValidMinMax(obj[1])
if obj[0] == "VALID_MINIMUM":
vmin = float(obj[1])
if obj[0] == "VALID_MAXIMUM":
vmax = float(obj[1])
return vmin, vmax
def getMissingConstant(self, label):
""" uses the parsed PDS Label to get the MISSING_CONSTANT parameter
from the first object named "IMAGE" -- is hackish
"""
for obj in label:
if obj[0] == "IMAGE":
return self.getMissingConstant(obj[1])
if obj[0] == "MISSING_CONSTANT":
bit_string_repr = obj[1]
# This is always the same for a HiRISE image, so we are just checking it
# to be a little less insane here. If someone wants to support another
# constant then go for it. Just make sure this one continues to work too
pieces = bit_string_repr.split("#")
if pieces[0] == "16" and pieces[1] == "FF7FFFFB":
ignore_value = unpack("f", pack("I", 0xFF7FFFFB))[0]
return ( ignore_value )
############################################################################
## Image operations
############################################################################
def bin2(self, image_iter, bin2_method_type="SLOW"):
""" this is an iterator that: Given an image iterator will yield binned lines """
ignore_value = self.__ignore_value
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
processed_dims = ( processed_dims[0]//2, processed_dims[1]//2 )
img_props.processed_dims( processed_dims )
# each pixel is larger as binning gets larger
pixel_scale = img_props.pixel_scale()
pixel_scale = ( pixel_scale[0]*2, pixel_scale[1]*2 )
img_props.pixel_scale( pixel_scale )
yield img_props
# Take two lists [a1, a2, a3], [b1, b2, b3] and combine them into one
# list of [a1 + b1, a2+b2, ... ] as long as both values are not ignorable
combine_fun = lambda a, b: a != ignore_value and b != ignore_value and (a + b)/2 or ignore_value
line_count = 0
ret_list = []
for line in image_iter:
if line_count == 1:
line_count = 0
tmp_list = list(map(combine_fun, line, last_line))
while len(tmp_list) > 1:
ret_list.append( combine_fun( tmp_list[0], tmp_list[1] ) )
del tmp_list[0:2]
yield ret_list
ret_list = []
else:
last_line = line
line_count += 1
def bin6(self, image_iter, bin6_method_type="SLOW"):
""" this is an iterator that: Given an image iterator will yield binned lines """
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
processed_dims = ( processed_dims[0]//6, processed_dims[1]//6 )
img_props.processed_dims( processed_dims )
# each pixel is larger as binning gets larger
pixel_scale = img_props.pixel_scale()
pixel_scale = ( pixel_scale[0]*6, pixel_scale[1]*6 )
img_props.pixel_scale( pixel_scale )
yield img_props
if bin6_method_type == "FAST":
bin6_method = self.bin6_real_fast
else:
bin6_method = self.bin6_real
raw_data = []
line_count = 0
for line in image_iter:
raw_data.append( line )
line_count += 1
if line_count == 6:
yield bin6_method( raw_data )
line_count = 0
raw_data = []
def bin6_real(self, raw_data):
""" does a 6x6 sample of raw_data and returns a single line of data """
# TODO: make this more efficient
binned_data = []
# Filter out those unwanted hugely negative values...
IGNORE_VALUE = self.__ignore_value
base = 0
for i in range(0, len(raw_data[0])//6):
ints = (raw_data[0][base:base+6] +
raw_data[1][base:base+6] +
raw_data[2][base:base+6] +
raw_data[3][base:base+6] +
raw_data[4][base:base+6] +
raw_data[5][base:base+6] )
ints = [num for num in ints if num != IGNORE_VALUE]
# If we have all pesky values, return a pesky value
if not ints:
binned_data.append( IGNORE_VALUE )
else:
binned_data.append( sum(ints, 0.0) / len(ints) )
base += 6
return binned_data
def bin6_real_fast(self, raw_data):
""" takes a single value from each 6x6 sample of raw_data and returns a single line of data """
# TODO: make this more efficient
binned_data = []
base = 0
for i in range(0, len(raw_data[0])//6):
binned_data.append( raw_data[0][base] )
base += 6
return binned_data
def bin12(self, image_iter, bin12_method_type="SLOW"):
""" this is an iterator that: Given an image iterator will yield binned lines """
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
processed_dims = ( processed_dims[0]//12, processed_dims[1]//12 )
img_props.processed_dims( processed_dims )
# each pixel is larger as binning gets larger
pixel_scale = img_props.pixel_scale()
pixel_scale = ( pixel_scale[0]*12, pixel_scale[1]*12 )
img_props.pixel_scale( pixel_scale )
yield img_props
if bin12_method_type == "FAST":
bin12_method = self.bin12_real_fast
else:
bin12_method = self.bin12_real
raw_data = []
line_count = 0
for line in image_iter:
raw_data.append( line )
line_count += 1
if line_count == 12:
yield bin12_method( raw_data )
line_count = 0
raw_data = []
def bin12_real(self, raw_data):
""" does a 12x12 sample of raw_data and returns a single line of data """
binned_data = []
# Filter out those unwanted hugely negative values...
filter_fun = lambda a: self.__ignore_value.__ne__(a)
base = 0
for i in range(0, len(raw_data[0])//12):
ints = list(filter( filter_fun, raw_data[0][base:base+12] +
raw_data[1][base:base+12] +
raw_data[2][base:base+12] +
raw_data[3][base:base+12] +
raw_data[4][base:base+12] +
raw_data[5][base:base+12] +
raw_data[6][base:base+12] +
raw_data[7][base:base+12] +
raw_data[8][base:base+12] +
raw_data[9][base:base+12] +
raw_data[10][base:base+12] +
raw_data[11][base:base+12] ))
len_ints = len( ints )
# If we have all pesky values, return a pesky value
if len_ints == 0:
binned_data.append( self.__ignore_value )
else:
binned_data.append( sum(ints) / len(ints) )
base += 12
return binned_data
def bin12_real_fast(self, raw_data):
""" takes a single value from each 12x12 sample of raw_data and returns a single line of data """
return raw_data[0][11::12]
def cropXY(self, image_iter, XSize=None, YSize=None, XOffset=0, YOffset=0):
""" return a cropped portion of the image """
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
if XSize is None:
XSize = processed_dims[0]
if YSize is None:
YSize = processed_dims[1]
if XSize + XOffset > processed_dims[0]:
XSize = processed_dims[0]
XOffset = 0
if YSize + YOffset > processed_dims[1]:
YSize = processed_dims[1]
YOffset = 0
img_props.processed_dims( (XSize, YSize) )
yield img_props
currentY = 0
for line in image_iter:
if currentY >= YOffset and currentY <= YOffset + YSize:
yield line[XOffset:XOffset+XSize]
# Not much point in reading the rest of the data...
if currentY == YOffset + YSize:
return
currentY += 1
def getImage(self, img, img_props):
""" Assumes 32-bit pixels -- bins image """
dims = img_props.dims()
# setup to unpack more efficiently.
x_len = dims[0]
# little endian (PC_REAL)
unpack_str = "<"
# unpack_str = ">"
unpack_bytes_str = "<"
pack_bytes_str = "="
# 32 bits/sample * samples/line = y_bytes (per line)
x_bytes = 4*x_len
for x in range(0, x_len):
# 32-bit float is "d"
unpack_str += "f"
unpack_bytes_str += "I"
pack_bytes_str += "I"
# Each iterator yields this first ... it is for reference of the next iterator:
yield img_props
for y in range(0, dims[1]):
# pixels is a byte array
pixels = b''
while len(pixels) < x_bytes:
new_pixels = img.read( x_bytes - len(pixels) )
pixels += new_pixels
if len(new_pixels) == 0:
x_bytes = -1
pixels = []
if len(pixels) == x_bytes:
if 0 == 1:
repacked_pixels = b''
for integer in unpack(unpack_bytes_str, pixels):
repacked_pixels += pack("=I", integer)
yield unpack( unpack_str, repacked_pixels )
else:
yield unpack( unpack_str, pixels )
def shiftToOrigin(self, image_iter, image_min_max):
""" takes a generator and shifts the points by the valid minimum
also removes points with value self.__ignore_value and replaces them with None
"""
# use the passed in values ...
valid_min = image_min_max[0]
# pass on dimensions/pixel_scale since we don't modify them here
yield next(image_iter)
# closures rock!
def normalize_fun(point):
if point == self.__ignore_value:
return None
return point - valid_min
for line in image_iter:
yield list(map(normalize_fun, line))
def scaleZ(self, image_iter, scale_factor):
""" scales the mesh values by a factor """
# pass on dimensions since we don't modify them here
yield next(image_iter)
scale_factor = self.scale()
def scale_fun(point):
try:
return point * scale_factor
except:
return None
for line in image_iter:
yield list(map(scale_fun, line))
def genMesh(self, image_iter):
"""Returns a mesh object from an image iterator this has the
value-added feature that a value of "None" is ignored
"""
# Get the output image size given the above transforms
img_props = next(image_iter)
# Let's interpolate the binned DTM with blender -- yay meshes!
coords = []
faces = []
face_count = 0
coord = -1
max_x = img_props.processed_dims()[0]
max_y = img_props.processed_dims()[1]
scale_x = self.scale() * img_props.pixel_scale()[0]
scale_y = self.scale() * img_props.pixel_scale()[1]
line_count = 0
# seed the last line (or previous line) with a line
last_line = next(image_iter)
point_offset = 0
previous_point_offset = 0
# Let's add any initial points that are appropriate
x = 0
point_offset += len( last_line ) - last_line.count(None)
for z in last_line:
if z != None:
coords.append( (x*scale_x, 0.0, z) )
coord += 1
x += 1
# We want to ignore points with a value of "None" but we also need to create vertices
# with an index that we can re-create on the next line. The solution is to remember
# two offsets: the point offset and the previous point offset.
# these offsets represent the point index that blender gets -- not the number of
# points we have read from the image
# if "x" represents points that are "None" valued then conceptually this is how we
# think of point indices:
#
# previous line: offset0 x x +1 +2 +3
# current line: offset1 x +1 +2 +3 x
# once we can map points we can worry about making triangular or square faces to fill
# the space between vertices so that blender is more efficient at managing the final
# structure.
# read each new line and generate coordinates+faces
for dtm_line in image_iter:
# Keep track of where we are in the image
line_count += 1
y_val = line_count*-scale_y
# Just add all points blindly
# TODO: turn this into a map
x = 0
for z in dtm_line:
if z != None:
coords.append( (x*scale_x, y_val, z) )
coord += 1
x += 1
# Calculate faces
for x in range(0, max_x - 1):
vals = [
last_line[ x + 1 ],
last_line[ x ],
dtm_line[ x ],
dtm_line[ x + 1 ],
]
# Two or more values of "None" means we can ignore this block
none_val = vals.count(None)
# Common case: we can create a square face
if none_val == 0:
faces.append( (
previous_point_offset,
previous_point_offset+1,
point_offset+1,
point_offset,
) )
face_count += 1
elif none_val == 1:
# special case: we can implement a triangular face
## NB: blender 2.5 makes a triangular face when the last coord is 0
# TODO: implement a triangular face
pass
if vals[1] != None:
previous_point_offset += 1
if vals[2] != None:
point_offset += 1
# Squeeze the last point offset increment out of the previous line
if last_line[-1] != None:
previous_point_offset += 1
# Squeeze the last point out of the current line
if dtm_line[-1] != None:
point_offset += 1
# remember what we just saw (and forget anything before that)
last_line = dtm_line
me = bpy.data.meshes.new(img_props.name()) # create a new mesh
#from_pydata(self, vertices, edges, faces)
#Make a mesh from a list of vertices/edges/faces
#Until we have a nicer way to make geometry, use this.
#:arg vertices:
# float triplets each representing (X, Y, Z)
# eg: [(0.0, 1.0, 0.5), ...].
#:type vertices: iterable object
#:arg edges:
# int pairs, each pair contains two indices to the
# *vertices* argument. eg: [(1, 2), ...]
#:type edges: iterable object
#:arg faces:
# iterator of faces, each faces contains three or more indices to
# the *vertices* argument. eg: [(5, 6, 8, 9), (1, 2, 3), ...]
#:type faces: iterable object
me.from_pydata(coords, [], faces)
# me.vertices.add(len(coords)/3)
# me.vertices.foreach_set("co", coords)
# me.faces.add(len(faces)/4)
# me.faces.foreach_set("vertices_raw", faces)
me.update()
bin_desc = self.bin_mode()
if bin_desc == 'NONE':
bin_desc = 'No Bin'
ob=bpy.data.objects.new("DTM - %s" % bin_desc, me)
return ob
################################################################################
# Yay, done with importer functions ... let's see the abstraction in action! #
################################################################################
def execute(self):
img = open(self.__filepath, 'rb')
(label, parsedLabel) = self.getPDSLabel(img)
image_dims = self.getLinesAndSamples(parsedLabel)
img_min_max_vals = self.getValidMinMax(parsedLabel)
self.__ignore_value = self.getMissingConstant(parsedLabel)
# MAGIC VALUE? -- need to formalize this to rid ourselves of bad points
img.seek(28)
# Crop off 4 lines
img.seek(4*image_dims[0])
# HiRISE images (and most others?) have 1m x 1m pixels
pixel_scale=(1, 1)
# The image we are importing
image_name = os.path.basename( self.__filepath )
# Set the properties of the image in a manageable object
img_props = image_properties( image_name, image_dims, pixel_scale )
# Get an iterator to iterate over lines
image_iter = self.getImage(img, img_props)
## Wrap the image_iter generator with other generators to modify the dtm on a
## line-by-line basis. This creates a stream of modifications instead of reading
## all of the data at once, processing all of the data (potentially several times)
## and then handing it off to blender
## TODO: find a way to alter projection based on transformations below
if self.__cropXY:
image_iter = self.cropXY(image_iter,
XSize=self.__cropXY[0],
YSize=self.__cropXY[1],
XOffset=self.__cropXY[2],
YOffset=self.__cropXY[3]
)
# Select an appropriate binning mode
## TODO: generalize the binning fn's
bin_mode = self.bin_mode()
bin_mode_funcs = {
'BIN2': self.bin2(image_iter),
'BIN6': self.bin6(image_iter),
'BIN6-FAST': self.bin6(image_iter, 'FAST'),
'BIN12': self.bin12(image_iter),
'BIN12-FAST': self.bin12(image_iter, 'FAST')
}
if bin_mode in bin_mode_funcs.keys():
image_iter = bin_mode_funcs[ bin_mode ]
image_iter = self.shiftToOrigin(image_iter, img_min_max_vals)
if self.scale != 1.0:
image_iter = self.scaleZ(image_iter, img_min_max_vals)
# Create a new mesh object and set data from the image iterator
ob_new = self.genMesh(image_iter)
if img:
img.close()
# Add mesh object to the current scene
scene = self.__context.scene
scene.objects.link(ob_new)
scene.update()
# deselect other objects
bpy.ops.object.select_all(action='DESELECT')
# scene.objects.active = ob_new
# Select the new mesh
ob_new.select = True
return ('FINISHED',)
def load(operator, context, filepath, scale, bin_mode, cropVars):
print("Bin Mode: %s" % bin_mode)
print("Scale: %f" % scale)
importer = hirise_dtm_importer(context,filepath)
importer.bin_mode( bin_mode )
importer.scale( scale )
if cropVars:
importer.crop( cropVars[0], cropVars[1], cropVars[2], cropVars[3] )
importer.execute()
print("Loading %s" % filepath)
return {'FINISHED'}
|
News flash: wealthy people cheat. Can we get on to holding the ones actually employed by the Universities involved in this crap accountable? Because I think that’s a bit more important than this Access Hollywood celebrity scandal stuff, frankly.
|
"""Added practice column to answers
Revision ID: 3f3dd5a97fc7
Revises: 17b7bd2e218c
Create Date: 2016-08-19 12:55:40.174238
"""
# revision identifiers, used by Alembic.
revision = '3f3dd5a97fc7'
down_revision = '17b7bd2e218c'
from alembic import op
import sqlalchemy as sa
from compair.models import convention
def upgrade():
with op.batch_alter_table('answer', naming_convention=convention) as batch_op:
batch_op.add_column(sa.Column('practice', sa.Boolean(), default=False, server_default='0', nullable=False))
op.create_index(op.f('ix_answer_practice'), 'answer', ['practice'], unique=False)
connection = op.get_bind()
comparison_example_table = sa.table('comparison_example',
sa.Column('answer1_id', sa.Integer),
sa.Column('answer2_id', sa.Integer),
)
answer_table = sa.table('answer',
sa.column('id', sa.Integer),
sa.Column('practice', sa.Boolean)
)
answer_ids = set()
for comparison_example in connection.execute(comparison_example_table.select()):
answer_ids.add(comparison_example.answer1_id)
answer_ids.add(comparison_example.answer2_id)
answer_ids = list(answer_ids)
if len(answer_ids) > 0:
connection.execute(
answer_table.update().where(
answer_table.c.id.in_(answer_ids)
).values(
practice=True
)
)
def downgrade():
with op.batch_alter_table('answer', naming_convention=convention) as batch_op:
batch_op.drop_index('ix_answer_practice')
batch_op.drop_column('practice')
|
Sneaker in soft white napa calf with perforation details all over and logo on side. Running design rubber sole in shades of blue. Made in Italy.
|
"""
Lowering implementation for object mode.
"""
from __future__ import print_function, division, absolute_import
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
from . import cgutils, generators, ir, types, utils
from .errors import ForbiddenConstruct
from .lowering import BaseLower
from .utils import builtins, intern
# Issue #475: locals() is unsupported as calling it naively would give
# out wrong results.
_unsupported_builtins = set([locals])
# Map operators to methods on the PythonAPI class
PYTHON_OPMAP = {
'+': "number_add",
'-': "number_subtract",
'*': "number_multiply",
'/?': "number_divide",
'/': "number_truedivide",
'//': "number_floordivide",
'%': "number_remainder",
'**': "number_power",
'<<': "number_lshift",
'>>': "number_rshift",
'&': "number_and",
'|': "number_or",
'^': "number_xor",
}
class PyLower(BaseLower):
GeneratorLower = generators.PyGeneratorLower
def init(self):
# Strings to be frozen into the Environment object
self._frozen_strings = set()
self._live_vars = set()
def pre_lower(self):
super(PyLower, self).pre_lower()
self.init_pyapi()
def post_lower(self):
pass
def pre_block(self, block):
self.init_vars(block)
def lower_inst(self, inst):
if isinstance(inst, ir.Assign):
value = self.lower_assign(inst)
self.storevar(value, inst.target.name)
elif isinstance(inst, ir.SetItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.object_setitem(target, index, value)
self.check_int_status(ok)
elif isinstance(inst, ir.SetAttr):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.object_setattr(target,
self._freeze_string(inst.attr),
value)
self.check_int_status(ok)
elif isinstance(inst, ir.DelAttr):
target = self.loadvar(inst.target.name)
ok = self.pyapi.object_delattr(target,
self._freeze_string(inst.attr))
self.check_int_status(ok)
elif isinstance(inst, ir.StoreMap):
dct = self.loadvar(inst.dct.name)
key = self.loadvar(inst.key.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.dict_setitem(dct, key, value)
self.check_int_status(ok)
elif isinstance(inst, ir.Return):
retval = self.loadvar(inst.value.name)
if self.generator_info:
# StopIteration
# We own a reference to the "return value", but we
# don't return it.
self.pyapi.decref(retval)
self.genlower.return_from_generator(self)
return
# No need to incref() as the reference is already owned.
self.call_conv.return_value(self.builder, retval)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
if cond.type == Type.int(1):
istrue = cond
else:
istrue = self.pyapi.object_istrue(cond)
zero = lc.Constant.null(istrue.type)
pred = self.builder.icmp(lc.ICMP_NE, istrue, zero)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
self.builder.cbranch(pred, tr, fl)
elif isinstance(inst, ir.Jump):
target = self.blkmap[inst.target]
self.builder.branch(target)
elif isinstance(inst, ir.Del):
self.delvar(inst.value)
elif isinstance(inst, ir.Raise):
if inst.exception is not None:
exc = self.loadvar(inst.exception.name)
# A reference will be stolen by raise_object() and another
# by return_exception_raised().
self.incref(exc)
else:
exc = None
self.pyapi.raise_object(exc)
self.return_exception_raised()
else:
raise NotImplementedError(type(inst), inst)
def lower_assign(self, inst):
"""
The returned object must have a new reference
"""
value = inst.value
if isinstance(value, (ir.Const, ir.FreeVar)):
return self.lower_const(value.value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
self.incref(val)
return val
elif isinstance(value, ir.Expr):
return self.lower_expr(value)
elif isinstance(value, ir.Global):
return self.lower_global(value.name, value.value)
elif isinstance(value, ir.Yield):
return self.lower_yield(value)
elif isinstance(value, ir.Arg):
value = self.fnargs[value.index]
self.incref(value)
return value
else:
raise NotImplementedError(type(value), value)
def lower_yield(self, inst):
yp = self.generator_info.yield_points[inst.index]
assert yp.inst is inst
self.genlower.init_generator_state(self)
# Save live vars in state
# We also need to save live vars that are del'ed afterwards.
y = generators.LowerYield(self, yp, yp.live_vars | yp.weak_live_vars)
y.lower_yield_suspend()
# Yield to caller
val = self.loadvar(inst.value.name)
# Let caller own the reference
self.pyapi.incref(val)
self.call_conv.return_value(self.builder, val)
# Resumption point
y.lower_yield_resume()
# None is returned by the yield expression
return self.pyapi.make_none()
def lower_binop(self, expr, op, inplace=False):
lhs = self.loadvar(expr.lhs.name)
rhs = self.loadvar(expr.rhs.name)
if op in PYTHON_OPMAP:
fname = PYTHON_OPMAP[op]
fn = getattr(self.pyapi, fname)
res = fn(lhs, rhs, inplace=inplace)
else:
# Assumed to be rich comparison
res = self.pyapi.object_richcompare(lhs, rhs, expr.fn)
self.check_error(res)
return res
def lower_expr(self, expr):
if expr.op == 'binop':
return self.lower_binop(expr, expr.fn, inplace=False)
elif expr.op == 'inplace_binop':
return self.lower_binop(expr, expr.immutable_fn, inplace=True)
elif expr.op == 'unary':
value = self.loadvar(expr.value.name)
if expr.fn == '-':
res = self.pyapi.number_negative(value)
elif expr.fn == '+':
res = self.pyapi.number_positive(value)
elif expr.fn == 'not':
res = self.pyapi.object_not(value)
self.check_int_status(res)
longval = self.builder.zext(res, self.pyapi.long)
res = self.pyapi.bool_from_long(longval)
elif expr.fn == '~':
res = self.pyapi.number_invert(value)
else:
raise NotImplementedError(expr)
self.check_error(res)
return res
elif expr.op == 'call':
argvals = [self.loadvar(a.name) for a in expr.args]
fn = self.loadvar(expr.func.name)
if not expr.kws:
# No keyword
ret = self.pyapi.call_function_objargs(fn, argvals)
else:
# Have Keywords
keyvalues = [(k, self.loadvar(v.name)) for k, v in expr.kws]
args = self.pyapi.tuple_pack(argvals)
kws = self.pyapi.dict_pack(keyvalues)
ret = self.pyapi.call(fn, args, kws)
self.decref(kws)
self.decref(args)
self.check_error(ret)
return ret
elif expr.op == 'getattr':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getattr(obj, self._freeze_string(expr.attr))
self.check_error(res)
return res
elif expr.op == 'build_tuple':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.tuple_pack(items)
self.check_error(res)
return res
elif expr.op == 'build_list':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.list_pack(items)
self.check_error(res)
return res
elif expr.op == 'build_map':
res = self.pyapi.dict_new(expr.size)
self.check_error(res)
for k, v in expr.items:
key = self.loadvar(k.name)
value = self.loadvar(v.name)
ok = self.pyapi.dict_setitem(res, key, value)
self.check_int_status(ok)
return res
elif expr.op == 'build_set':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.set_new()
self.check_error(res)
for it in items:
ok = self.pyapi.set_add(res, it)
self.check_int_status(ok)
return res
elif expr.op == 'getiter':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getiter(obj)
self.check_error(res)
return res
elif expr.op == 'iternext':
iterobj = self.loadvar(expr.value.name)
item = self.pyapi.iter_next(iterobj)
is_valid = cgutils.is_not_null(self.builder, item)
pair = self.pyapi.tuple_new(2)
with self.builder.if_else(is_valid) as (then, otherwise):
with then:
self.pyapi.tuple_setitem(pair, 0, item)
with otherwise:
self.check_occurred()
# Make the tuple valid by inserting None as dummy
# iteration "result" (it will be ignored).
self.pyapi.tuple_setitem(pair, 0, self.pyapi.make_none())
self.pyapi.tuple_setitem(pair, 1, self.pyapi.bool_from_bool(is_valid))
return pair
elif expr.op == 'pair_first':
pair = self.loadvar(expr.value.name)
first = self.pyapi.tuple_getitem(pair, 0)
self.incref(first)
return first
elif expr.op == 'pair_second':
pair = self.loadvar(expr.value.name)
second = self.pyapi.tuple_getitem(pair, 1)
self.incref(second)
return second
elif expr.op == 'exhaust_iter':
iterobj = self.loadvar(expr.value.name)
tup = self.pyapi.sequence_tuple(iterobj)
self.check_error(tup)
# Check tuple size is as expected
tup_size = self.pyapi.tuple_size(tup)
expected_size = self.context.get_constant(types.intp, expr.count)
has_wrong_size = self.builder.icmp(lc.ICMP_NE,
tup_size, expected_size)
with cgutils.if_unlikely(self.builder, has_wrong_size):
self.return_exception(ValueError)
return tup
elif expr.op == 'getitem':
value = self.loadvar(expr.value.name)
index = self.loadvar(expr.index.name)
res = self.pyapi.object_getitem(value, index)
self.check_error(res)
return res
elif expr.op == 'static_getitem':
value = self.loadvar(expr.value.name)
index = self.context.get_constant(types.intp, expr.index)
indexobj = self.pyapi.long_from_ssize_t(index)
self.check_error(indexobj)
res = self.pyapi.object_getitem(value, indexobj)
self.decref(indexobj)
self.check_error(res)
return res
elif expr.op == 'getslice':
target = self.loadvar(expr.target.name)
start = self.loadvar(expr.start.name)
stop = self.loadvar(expr.stop.name)
slicefn = self.get_builtin_obj("slice")
sliceobj = self.pyapi.call_function_objargs(slicefn, (start, stop))
self.decref(slicefn)
self.check_error(sliceobj)
res = self.pyapi.object_getitem(target, sliceobj)
self.check_error(res)
return res
elif expr.op == 'cast':
val = self.loadvar(expr.value.name)
self.incref(val)
return val
else:
raise NotImplementedError(expr)
def lower_const(self, const):
# All constants are frozen inside the environment
index = self.env_manager.add_const(const)
ret = self.env_manager.read_const(index)
self.check_error(ret)
self.incref(ret)
return ret
def lower_global(self, name, value):
"""
1) Check global scope dictionary.
2) Check __builtins__.
2a) is it a dictionary (for non __main__ module)
2b) is it a module (for __main__ module)
"""
moddict = self.get_module_dict()
obj = self.pyapi.dict_getitem(moddict, self._freeze_string(name))
self.incref(obj) # obj is borrowed
try:
if value in _unsupported_builtins:
raise ForbiddenConstruct("builtins %s() is not supported"
% name, loc=self.loc)
except TypeError:
# `value` is unhashable, ignore
pass
if hasattr(builtins, name):
obj_is_null = self.is_null(obj)
bbelse = self.builder.basic_block
with self.builder.if_then(obj_is_null):
mod = self.pyapi.dict_getitem(moddict,
self._freeze_string("__builtins__"))
builtin = self.builtin_lookup(mod, name)
bbif = self.builder.basic_block
retval = self.builder.phi(self.pyapi.pyobj)
retval.add_incoming(obj, bbelse)
retval.add_incoming(builtin, bbif)
else:
retval = obj
with cgutils.if_unlikely(self.builder, self.is_null(retval)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
return retval
# -------------------------------------------------------------------------
def get_module_dict(self):
return self.env_body.globals
def get_builtin_obj(self, name):
# XXX The builtins dict could be bound into the environment
moddict = self.get_module_dict()
mod = self.pyapi.dict_getitem(moddict,
self._freeze_string("__builtins__"))
return self.builtin_lookup(mod, name)
def builtin_lookup(self, mod, name):
"""
Args
----
mod:
The __builtins__ dictionary or module, as looked up in
a module's globals.
name: str
The object to lookup
"""
fromdict = self.pyapi.dict_getitem(mod, self._freeze_string(name))
self.incref(fromdict) # fromdict is borrowed
bbifdict = self.builder.basic_block
with cgutils.if_unlikely(self.builder, self.is_null(fromdict)):
# This happen if we are using the __main__ module
frommod = self.pyapi.object_getattr(mod, self._freeze_string(name))
with cgutils.if_unlikely(self.builder, self.is_null(frommod)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
bbifmod = self.builder.basic_block
builtin = self.builder.phi(self.pyapi.pyobj)
builtin.add_incoming(fromdict, bbifdict)
builtin.add_incoming(frommod, bbifmod)
return builtin
def check_occurred(self):
"""
Return if an exception occurred.
"""
err_occurred = cgutils.is_not_null(self.builder,
self.pyapi.err_occurred())
with cgutils.if_unlikely(self.builder, err_occurred):
self.return_exception_raised()
def check_error(self, obj):
"""
Return if *obj* is NULL.
"""
with cgutils.if_unlikely(self.builder, self.is_null(obj)):
self.return_exception_raised()
return obj
def check_int_status(self, num, ok_value=0):
"""
Raise an exception if *num* is smaller than *ok_value*.
"""
ok = lc.Constant.int(num.type, ok_value)
pred = self.builder.icmp(lc.ICMP_SLT, num, ok)
with cgutils.if_unlikely(self.builder, pred):
self.return_exception_raised()
def is_null(self, obj):
return cgutils.is_null(self.builder, obj)
def return_exception_raised(self):
"""
Return with the currently raised exception.
"""
self.cleanup_vars()
self.call_conv.return_exc(self.builder)
def init_vars(self, block):
"""
Initialize live variables for *block*.
"""
self._live_vars = set(self.interp.get_block_entry_vars(block))
def _getvar(self, name, ltype=None):
if name not in self.varmap:
self.varmap[name] = self.alloca(name, ltype=ltype)
return self.varmap[name]
def loadvar(self, name):
"""
Load the llvm value of the variable named *name*.
"""
# If this raises then the live variables analysis is wrong
assert name in self._live_vars, name
ptr = self.varmap[name]
val = self.builder.load(ptr)
with cgutils.if_unlikely(self.builder, self.is_null(val)):
self.pyapi.raise_missing_name_error(name)
self.return_exception_raised()
return val
def delvar(self, name):
"""
Delete the variable slot with the given name. This will decref
the corresponding Python object.
"""
# If this raises then the live variables analysis is wrong
self._live_vars.remove(name)
ptr = self._getvar(name) # initializes `name` if not already
self.decref(self.builder.load(ptr))
# This is a safety guard against double decref's, but really
# the IR should be correct and have only one Del per variable
# and code path.
self.builder.store(cgutils.get_null_value(ptr.type.pointee), ptr)
def storevar(self, value, name, clobber=False):
"""
Stores a llvm value and allocate stack slot if necessary.
The llvm value can be of arbitrary type.
"""
is_redefine = name in self._live_vars and not clobber
ptr = self._getvar(name, ltype=value.type)
if is_redefine:
old = self.builder.load(ptr)
else:
self._live_vars.add(name)
assert value.type == ptr.type.pointee, (str(value.type),
str(ptr.type.pointee))
self.builder.store(value, ptr)
# Safe to call decref even on non python object
if is_redefine:
self.decref(old)
def cleanup_vars(self):
"""
Cleanup live variables.
"""
for name in self._live_vars:
ptr = self._getvar(name)
self.decref(self.builder.load(ptr))
def alloca(self, name, ltype=None):
"""
Allocate a stack slot and initialize it to NULL.
The default is to allocate a pyobject pointer.
Use ``ltype`` to override.
"""
if ltype is None:
ltype = self.context.get_value_type(types.pyobject)
with self.builder.goto_block(self.entry_block):
ptr = self.builder.alloca(ltype, name=name)
self.builder.store(cgutils.get_null_value(ltype), ptr)
return ptr
def incref(self, value):
self.pyapi.incref(value)
def decref(self, value):
"""
This is allow to be called on non pyobject pointer, in which case
no code is inserted.
"""
lpyobj = self.context.get_value_type(types.pyobject)
if value.type.kind == lc.TYPE_POINTER:
if value.type != lpyobj:
pass
else:
self.pyapi.decref(value)
def _freeze_string(self, string):
"""
Freeze a Python string object into the code.
"""
return self.lower_const(string)
|
These measures evaluate the long-term impact of policy and programmatic changes related to health information sharing, including more efficient, higher quality care and lower costs. Because measures evaluating long-term outcomes are dependent on many factors, health information exchange accounts for only a portion of the change. The measures are listed in order of quality and cost impact, beginning with the most impactful.
|
# -*- coding: UTF-8 -*-
import markdown
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.choices import Choices
from symcon import querysets
from symcon.common.util.markdown import MarkDownToHtml
class Repository(models.Model):
user = models.CharField(max_length=100, verbose_name=_('User'))
name = models.CharField(max_length=100, verbose_name=_('Name'))
last_update = models.DateTimeField(null=True, blank=True, verbose_name=_('Last update'))
def get_url(self):
return '{owner_url}/{name}'.format(owner_url=self.get_owner_url(), name=self.name)
def get_issue_url(self):
return '{repo}/issues'.format(repo=self.get_url())
def get_owner_url(self):
return 'https://github.com/{user}'.format(user=self.user)
class Meta:
verbose_name = _('Repository')
verbose_name_plural = _('Repositories')
unique_together = ('user', 'name')
class Branch(models.Model):
repository = models.ForeignKey(to='Repository', verbose_name=_('Repository'))
name = models.CharField(max_length=200, verbose_name=_('Branch'))
last_update = models.DateTimeField(null=True, blank=True, verbose_name=_('Last update'))
default = models.BooleanField(default=False, verbose_name=_('Default'))
def get_raw_url(self):
return self.repository.get_url() + '/raw/' + self.name
class Meta:
verbose_name = _('Branch')
verbose_name_plural = _('Branches')
unique_together = ('repository', 'name')
class Library(models.Model):
objects = querysets.LibraryQuerySet.as_manager()
repository = models.ForeignKey(to='Repository', verbose_name=_('Repository'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
def get_default_librarybranch(self):
for librarybranch in self.librarybranch_set.all():
if librarybranch.branch.default:
return librarybranch
return None
class Meta:
verbose_name = _('Library')
verbose_name_plural = _('Libraries')
unique_together = ('repository', 'uuid')
class LibraryBranch(models.Model):
library = models.ForeignKey(to='Library', verbose_name=_('Library'))
branch = models.ForeignKey(to='Branch', verbose_name=_('Branch'))
name = models.CharField(max_length=200, blank=True, verbose_name=_('Name'))
title = models.TextField(blank=True, verbose_name=_('Title'))
description = models.TextField(blank=True, verbose_name=_('Description'))
req_ips_version = models.CharField(max_length=200, blank=True,
verbose_name=_('Minimum Symcon version'))
author = models.CharField(max_length=200, blank=True, verbose_name=_('Author'))
url = models.URLField(blank=True, verbose_name=_('URL'))
version = models.CharField(max_length=50, blank=True, verbose_name=_('Version'))
build = models.IntegerField(null=True, blank=True, verbose_name=_('Build'))
date = models.IntegerField(null=True, blank=True, verbose_name=_('Date'))
readme_markdown = models.TextField(blank=True, verbose_name=_('Readme MarkDown'))
readme_html = models.TextField(blank=True, verbose_name=_('Readme HTML'))
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.convert_readme()
super().save(force_insert, force_update, using, update_fields)
def convert_readme(self):
self.readme_html = MarkDownToHtml(
text=self.readme_markdown, branch=self.branch).transform()
def get_req_ips_version(self):
if self.req_ips_version:
return self.req_ips_version
return self.branch.name
class Meta:
verbose_name = _('Library branch')
verbose_name_plural = _('Library branches')
unique_together = ('library', 'branch')
ordering = ('-branch__default', 'name')
class LibraryBranchTag(models.Model):
librarybranch = models.ForeignKey(to='LibraryBranch', verbose_name=_('Library branch'))
name = models.CharField(max_length=200, verbose_name=_('Name'))
class Meta:
verbose_name = _('Library branch tag')
verbose_name_plural = _('Library branche tags')
unique_together = ('librarybranch', 'name')
ordering = ('librarybranch', 'name')
class Module(models.Model):
TYPE_CHOICES = Choices(
(0, 'core', _('Core')),
(1, 'io', _('I/O')),
(2, 'splitter', _('Splitter')),
(3, 'device', _('Device')),
(4, 'configurator', _('Configurator')),
)
librarybranch = models.ForeignKey(to='LibraryBranch', verbose_name=_('Library branch'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
name = models.CharField(max_length=200, blank=True, verbose_name=_('Name'))
title = models.TextField(blank=True, verbose_name=_('Title'))
description = models.TextField(blank=True, verbose_name=_('Description'))
type = models.IntegerField(choices=TYPE_CHOICES, null=True, blank=True, verbose_name=_('Type'))
vendor = models.CharField(max_length=200, blank=True, verbose_name=_('Vendor'))
prefix = models.CharField(max_length=200, blank=True, verbose_name=_('Prefix'))
readme_markdown = models.TextField(blank=True, verbose_name=_('Readme MarkDown'))
readme_html = models.TextField(blank=True, verbose_name=_('Readme HTML'))
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.convert_readme()
super().save(force_insert, force_update, using, update_fields)
def convert_readme(self):
self.readme_html = MarkDownToHtml(
text=self.readme_markdown, branch=self.librarybranch.branch).transform()
class Meta:
verbose_name = _('Module')
verbose_name_plural = _('Modules')
unique_together = ('librarybranch', 'uuid')
class ModuleAlias(models.Model):
module = models.ForeignKey(to='Module', verbose_name=_('Module'))
name = models.CharField(max_length=200, verbose_name=_('Name'))
deleted = models.BooleanField(default=False, verbose_name=_('Marked for deletion'))
class Meta:
verbose_name = _('Module alias')
verbose_name_plural = _('Module aliases')
unique_together = ('module', 'name')
class ModuleParentRequirement(models.Model):
module = models.ForeignKey(to='Module', verbose_name=_('Module'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
deleted = models.BooleanField(default=False, verbose_name=_('Marked for deletion'))
class Meta:
verbose_name = _('Module parent requirement')
verbose_name_plural = _('Module parent requirements')
unique_together = ('module', 'uuid')
class ModuleChildRequirement(models.Model):
module = models.ForeignKey(to='Module', verbose_name=_('Module'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
deleted = models.BooleanField(default=False, verbose_name=_('Marked for deletion'))
class Meta:
verbose_name = _('Module child requirement')
verbose_name_plural = _('Module child requirements')
unique_together = ('module', 'uuid')
class ModuleImplementedRequirement(models.Model):
module = models.ForeignKey(to='Module', verbose_name=_('Module'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
deleted = models.BooleanField(default=False, verbose_name=_('Marked for deletion'))
class Meta:
verbose_name = _('Module implemented requirement')
verbose_name_plural = _('Module implemented requirements')
unique_together = ('module', 'uuid')
|
The Free Text element is a read-only container which contains a single section of text. For example, you may choose to use it for an introductory paragraph at the beginning of your form.
Free Text can be formatted, and you can insert wildcards that will automatically be replaced with values from other fields on your form. This form element also supports the user of Bindings.
Field Name This is the name used to reference the field and is also the name of the column heading in the submission bin.
Type Choose from many heading types, including Basic Header, Custom Graphic, Free Text (covered here in this topic), Line/Separator, Dynamic, and SSL Seal. Each of these is also available by right-clicking the root form node and choosing Insert New Form Element > Heading/Graphic Text.
Dependency Create a dependency to hide or show this element based on a rule. The field becomes "dependent" on other values entered on the form. Learn more in Dependencies.
Hidden Default is false. Set this property to true if you want this element to be hidden.
Hide Mode This property determines how the element is hidden and if it will collapse within the space it would otherwise occupy.
Binding Click to open the Binding Editor screen. Create a conditional binding to calculate a total or concatenate two strings together. Learn more in Bindings.
Font-family Click the button to choose the heading font.
Font-color Click the button choose a heading font color.
Font-size Click the button choose a heading font size.
Font-weight Click the button choose a heading font weight.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011, 2012 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""A SOAP server for giving Postmaster's what they want of information from
Cerebrum.
Note that the logger is twisted's own logger and not Cerebrum's. Since twisted
works in parallell the logger should not be blocked. Due to this, the format of
the logs is not equal to the rest of Cerebrum. This might be something to work
on later.
"""
from __future__ import unicode_literals
import sys
import getopt
from twisted.python import log
from rpclib.model.primitive import Unicode
from rpclib.model.complex import Array
from rpclib.decorator import rpc
from cisconf import postmaster as cisconf
from Cerebrum.Utils import dyn_import
from Cerebrum import Errors
from Cerebrum.modules.cis import SoapListener
class PostmasterServer(SoapListener.BasicSoapServer):
"""The SOAP commands available for the clients.
TODO: is the following correct anymore? Note that an instance of this class
is created for each incoming call.
"""
# Headers: no need for headers for e.g. session IDs in this web service.
# The class where the Cerebrum-specific functionality is done. This is
# instantiated per call, to avoid thread conflicts.
cere_class = None
# The hock for the site object
site = None
@rpc(Array(Unicode), Array(Unicode), Array(Unicode),
_returns=Array(Unicode))
def get_addresses_by_affiliation(ctx, status=None, skos=None, source=None):
"""Get primary e-mail addresses for persons that match given
criteria."""
if not source and not status:
raise Errors.CerebrumRPCException('Input needed')
return ctx.udc['postmaster'].get_addresses_by_affiliation(
status=status, skos=skos, source=source)
# Events for the project:
def event_method_call(ctx):
"""Event for incoming calls."""
ctx.udc['postmaster'] = ctx.service_class.cere_class()
PostmasterServer.event_manager.add_listener('method_call', event_method_call)
def event_exit(ctx):
"""Event for cleaning after a call, i.e. close up db connections. Since
twisted runs all calls in a pool of threads, we can not trust __del__.
"""
# TODO: is this necessary any more, as we now are storing it in the method
# context? Are these deleted after each call? Check it out!
if 'postmaster' in ctx.udc:
ctx.udc['postmaster'].close()
PostmasterServer.event_manager.add_listener('method_return_object', event_exit)
PostmasterServer.event_manager.add_listener('method_exception_object',
event_exit)
def usage(exitcode=0):
print """Usage: %s --port PORT --instance INSTANCE --logfile FILE
Fire up the Postmaster's webservice.
--port What port to run the server. Default: cisconf.PORT.
--interface What interface the server should listen to (default: 0.0.0.0)
Default: cisconf.INTERFACE.
--logfile Where to log. Default: cisconf.LOG_FILE.
--fingerprints A comma separated list of certificate fingerprints. If this
is set, client certificates that doesn't generate fingerprints
which are in this list gets blocked from the service.
Default: cisconf.FINGERPRINTS.
--instance The Cerebrum instance which should be used. E.g:
Cerebrum.modules.no.uio.PostmasterCommands/Commands
Default: cisconf.CEREBRUM_CLASS.
--unencrypted Don't use https
--help Show this and quit
"""
sys.exit(exitcode)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'h',
['port=', 'unencrypted', 'logfile=',
'help', 'fingerprints=', 'instance=',
'interface='])
except getopt.GetoptError as e:
print e
usage(1)
use_encryption = True
port = getattr(cisconf, 'PORT', 0)
logfilename = getattr(cisconf, 'LOG_FILE', None)
instance = getattr(cisconf, 'CEREBRUM_CLASS', None)
interface = getattr(cisconf, 'INTERFACE', None)
log_prefix = getattr(cisconf, 'LOG_PREFIX', None)
log_formatters = getattr(cisconf, 'LOG_FORMATTERS', None)
for opt, val in opts:
if opt in ('--logfile',):
logfilename = val
elif opt in ('--port',):
port = int(val)
elif opt in ('--unencrypted',):
use_encryption = False
elif opt in ('--instance',):
instance = val
elif opt in ('--interface',):
interface = val
elif opt in ('-h', '--help'):
usage()
else:
print "Unknown argument: %s" % opt
usage(1)
if not port or not logfilename or not instance:
print "Missing arguments or cisconf variables"
usage(1)
# Get the cerebrum class and give it to the server
module, classname = instance.split('/', 1)
mod = dyn_import(module)
cls = getattr(mod, classname)
PostmasterServer.cere_class = cls
log.msg("DEBUG: Cerebrum class used: %s" % instance)
private_key_file = None
certificate_file = None
client_ca = None
fingerprints = None
if interface:
SoapListener.TwistedSoapStarter.interface = interface
if use_encryption:
private_key_file = cisconf.SERVER_PRIVATE_KEY_FILE
certificate_file = cisconf.SERVER_CERTIFICATE_FILE
client_ca = cisconf.CERTIFICATE_AUTHORITIES
fingerprints = getattr(cisconf, 'FINGERPRINTS', None)
server = SoapListener.TLSTwistedSoapStarter(
port=int(port),
applications=PostmasterServer,
private_key_file=private_key_file,
certificate_file=certificate_file,
client_ca=client_ca,
client_fingerprints=fingerprints,
logfile=logfilename,
log_prefix=log_prefix,
log_formatters=log_formatters)
else:
server = SoapListener.TwistedSoapStarter(
port=int(port),
applications=PostmasterServer,
logfile=logfilename,
log_prefix=log_prefix,
log_formatters=log_formatters)
# to make it global and reachable (wrong, I know)
PostmasterServer.site = server.site
# If sessions' behaviour should be changed (e.g. timeout):
# server.site.sessionFactory = BasicSession
# Fire up the server:
server.run()
|
The Gazoduq Project involves the construction of a 750 km underground gas transmission line. The proposed line is needed to transport Western Canadian natural gas to Énergie Saguenay, a future natural gas liquefaction facility in Quebec. This is a major industrial project for both the Quebec and Canadian economies that will generate significant economic benefits over the short, medium, and long-term.
The Gazoduq Project will help reduce global GHG emissions as the Énergie Saguenay facility will export LNG that is anticipated to replace coal and fuel oil in Europe, Asia and potentially other global markets.
The Project will contribute to strengthening Quebec’s and Canada’s position as leaders in the global fight against climate change.
Why the need for a new natural gas transmission line?
The natural gas line will supply natural gas and will enable the construction and operation of a natural gas liquefaction facility in Saguenay that will use renewable hydro-electricity power. This facility will set a new benchmark for environmental performance in the LNG industry and will generate 80% fewer GHG emissions compared to similar plants.
Gazoduq will also have the ability to support natural gas needs along the route through local distributors.
The new natural gas transmission line will be connected to the main natural gas transportation system in northeastern Ontario and will only transport natural gas.
There are thousands of kilometre of natural gas lines in Quebec and Ontario.
In fact, in many towns and cities across the country, gas transmission and distribution lines are located underground, unnoticed, and people have been living safely for decades alongside these infrastructure.
|
from __future__ import print_function, absolute_import
import re
import numpy as np
from .support import TestCase, override_config, captured_stdout
import numba
from numba import unittest_support as unittest
from numba import jit, njit, types, ir, compiler
from numba.ir_utils import guard, find_callname, find_const, get_definition
from numba.targets.registry import CPUDispatcher
from numba.inline_closurecall import inline_closure_call
from .test_parfors import skip_unsupported
@jit((types.int32,), nopython=True)
def inner(a):
return a + 1
@jit((types.int32,), nopython=True)
def more(a):
return inner(inner(a))
def outer_simple(a):
return inner(a) * 2
def outer_multiple(a):
return inner(a) * more(a)
@njit
def __dummy__():
return
class InlineTestPipeline(numba.compiler.BasePipeline):
"""compiler pipeline for testing inlining after optimization
"""
def define_pipelines(self, pm):
name = 'inline_test'
pm.create_pipeline(name)
self.add_preprocessing_stage(pm)
self.add_with_handling_stage(pm)
self.add_pre_typing_stage(pm)
self.add_typing_stage(pm)
pm.add_stage(self.stage_pre_parfor_pass, "Preprocessing for parfors")
if not self.flags.no_rewrites:
pm.add_stage(self.stage_nopython_rewrites, "nopython rewrites")
if self.flags.auto_parallel.enabled:
pm.add_stage(self.stage_parfor_pass, "convert to parfors")
pm.add_stage(self.stage_inline_test_pass, "inline test")
pm.add_stage(self.stage_ir_legalization,
"ensure IR is legal prior to lowering")
self.add_lowering_stage(pm)
self.add_cleanup_stage(pm)
pm.add_stage(self.stage_preserve_final_ir, "preserve IR")
def stage_preserve_final_ir(self):
self.metadata['final_func_ir'] = self.func_ir.copy()
def stage_inline_test_pass(self):
# assuming the function has one block with one call inside
assert len(self.func_ir.blocks) == 1
block = list(self.func_ir.blocks.values())[0]
for i, stmt in enumerate(block.body):
if guard(find_callname,self.func_ir, stmt.value) is not None:
inline_closure_call(self.func_ir, {}, block, i, lambda: None,
self.typingctx, (), self.typemap, self.calltypes)
break
class TestInlining(TestCase):
"""
Check that jitted inner functions are inlined into outer functions,
in nopython mode.
Note that not all inner functions are guaranteed to be inlined.
We just trust LLVM's inlining heuristics.
"""
def make_pattern(self, fullname):
"""
Make regexpr to match mangled name
"""
parts = fullname.split('.')
return r'_ZN?' + r''.join([r'\d+{}'.format(p) for p in parts])
def assert_has_pattern(self, fullname, text):
pat = self.make_pattern(fullname)
self.assertIsNotNone(re.search(pat, text),
msg='expected {}'.format(pat))
def assert_not_has_pattern(self, fullname, text):
pat = self.make_pattern(fullname)
self.assertIsNone(re.search(pat, text),
msg='unexpected {}'.format(pat))
def test_inner_function(self):
with override_config('DUMP_ASSEMBLY', True):
with captured_stdout() as out:
cfunc = jit((types.int32,), nopython=True)(outer_simple)
self.assertPreciseEqual(cfunc(1), 4)
# Check the inner function was elided from the output (which also
# guarantees it was inlined into the outer function).
asm = out.getvalue()
prefix = __name__
self.assert_has_pattern('%s.outer_simple' % prefix, asm)
self.assert_not_has_pattern('%s.inner' % prefix, asm)
def test_multiple_inner_functions(self):
# Same with multiple inner functions, and multiple calls to
# the same inner function (inner()). This checks that linking in
# the same library/module twice doesn't produce linker errors.
with override_config('DUMP_ASSEMBLY', True):
with captured_stdout() as out:
cfunc = jit((types.int32,), nopython=True)(outer_multiple)
self.assertPreciseEqual(cfunc(1), 6)
asm = out.getvalue()
prefix = __name__
self.assert_has_pattern('%s.outer_multiple' % prefix, asm)
self.assert_not_has_pattern('%s.more' % prefix, asm)
self.assert_not_has_pattern('%s.inner' % prefix, asm)
@skip_unsupported
def test_inline_call_after_parfor(self):
# replace the call to make sure inlining doesn't cause label conflict
# with parfor body
def test_impl(A):
__dummy__()
return A.sum()
j_func = njit(parallel=True, pipeline_class=InlineTestPipeline)(
test_impl)
A = np.arange(10)
self.assertEqual(test_impl(A), j_func(A))
@skip_unsupported
def test_inline_update_target_def(self):
def test_impl(a):
if a == 1:
b = 2
else:
b = 3
return b
func_ir = compiler.run_frontend(test_impl)
blocks = list(func_ir.blocks.values())
for block in blocks:
for i, stmt in enumerate(block.body):
# match b = 2 and replace with lambda: 2
if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Var)
and guard(find_const, func_ir, stmt.value) == 2):
# replace expr with a dummy call
func_ir._definitions[stmt.target.name].remove(stmt.value)
stmt.value = ir.Expr.call(ir.Var(block.scope, "myvar", loc=stmt.loc), (), (), stmt.loc)
func_ir._definitions[stmt.target.name].append(stmt.value)
#func = g.py_func#
inline_closure_call(func_ir, {}, block, i, lambda: 2)
break
self.assertEqual(len(func_ir._definitions['b']), 2)
@skip_unsupported
def test_inline_var_dict_ret(self):
# make sure inline_closure_call returns the variable replacement dict
# and it contains the original variable name used in locals
@numba.njit(locals={'b': numba.float64})
def g(a):
b = a + 1
return b
def test_impl():
return g(1)
func_ir = compiler.run_frontend(test_impl)
blocks = list(func_ir.blocks.values())
for block in blocks:
for i, stmt in enumerate(block.body):
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == 'call'):
func_def = guard(get_definition, func_ir, stmt.value.func)
if (isinstance(func_def, (ir.Global, ir.FreeVar))
and isinstance(func_def.value, CPUDispatcher)):
py_func = func_def.value.py_func
_, var_map = inline_closure_call(
func_ir, py_func.__globals__, block, i, py_func)
break
self.assertTrue('b' in var_map)
@skip_unsupported
def test_inline_call_branch_pruning(self):
# branch pruning pass should run properly in inlining to enable
# functions with type checks
@njit
def foo(A=None):
if A is None:
return 2
else:
return A
def test_impl(A=None):
return foo(A)
class InlineTestPipelinePrune(InlineTestPipeline):
def stage_inline_test_pass(self):
# assuming the function has one block with one call inside
assert len(self.func_ir.blocks) == 1
block = list(self.func_ir.blocks.values())[0]
for i, stmt in enumerate(block.body):
if (guard(find_callname, self.func_ir, stmt.value)
is not None):
inline_closure_call(self.func_ir, {}, block, i,
foo.py_func, self.typingctx,
(self.typemap[stmt.value.args[0].name],),
self.typemap, self.calltypes)
break
# make sure inline_closure_call runs in full pipeline
j_func = njit(pipeline_class=InlineTestPipelinePrune)(test_impl)
A = 3
self.assertEqual(test_impl(A), j_func(A))
self.assertEqual(test_impl(), j_func())
# make sure IR doesn't have branches
fir = j_func.overloads[(types.Omitted(None),)].metadata['final_func_ir']
fir.blocks = numba.ir_utils.simplify_CFG(fir.blocks)
self.assertEqual(len(fir.blocks), 1)
if __name__ == '__main__':
unittest.main()
|
Remi Drolet (bib #331) won first place in the Juvenile Boys’ 10-km Mass Start Classic on Saturday.
After a strong start at the Haywood 2016 Ski Nationals, Black Jack skiers continued to make Rossland proud.
After a strong start at the cross country ski nationals, Black Jack skiers continued to make Rossland proud leading into the final day of competition on Saturday.
Rossland’s own Remi Drolet ended the Haywood 2016 Ski Nationals with two gold and two silver medals, Julien Locke placed third in the Men’s 1.2-km sprint final, and Chiaki Yamamota earned a silver medal in the 5-km classic and captured a bronze medal in the 20-km classic on Saturday.
Remi Drolet, age 15, earned two silver medals in Juvenile Boys’ Sprints on Thursday, and on Saturday finished first in the Juvenile Boys’ 10-km Mass Start Classic, making the podium for every race he entered.
Drolet also placed first in the Juvenile Boys’ 5 km Interval Start Classic on Sunday, March 20.
His four medals were enough for Drolet to capture the overall National Juvenile Boys cross-country title. He also won the prestigious Sofie Manarin Award that goes to the top male and female juvenile skier (under 15 years) at the Canadian Cross-country Ski championship.
This was Drolet’s second year at Nationals, but he said he didn’t too as well last year because he was sick during the competition. He’s eager to build on this year’s success at next’s competition.
With his bronze-medal podium finish, Locke wrapped up the Haywood Buff Sprint Series championship title and Athlete of the Year award for his great results in the sprint races during the season and at the U23 World Ski championship in Romania last month.
Locke’s bronze medal firmly planted him among Canada’s top skiers, as the Nelson native finished behind Canadian National ski team members Alex Harvey who won gold and Lenny Valjas who claimed silver in the event.
Locke jumped out to a good start on the 1,200 metre course. With two laps and an exciting downhill corner, he pushed Canada’s best to the very end and was just edged out of a silver by Valjas at the finish.
Locke’s time of 2:15.43 put him less than a second behind Valjas, 2:14.48, and two seconds back of Harvey who clocked a gold-medal time of 2:13.39. Locke also finished ahead of Team Canada skiers Jesse Cockney who finished in 2:16.08 and Graeme Killick, 2:18.61.
The National ski championship was a positive ending to an already impressive season for Black Jack skiers and bodes well for the future. Results like Locke’s should garner him consideration on Canada’s ski team, but according to Wood, the former coach of the national team, that will be up to a cryptic selection process.
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
import tensorflow as tf
import utility_function as uf
# import my_seq2seq as mseq
import os
import data_queue
import nt
import time
import con_lstm as clstm
RESTORE = False
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('num_threads',2,'''the number of threads for enqueue''')
tf.app.flags.DEFINE_string('train_log_dir','auto_logs',
'''directory wherer to write event logs''')
tf.app.flags.DEFINE_integer('max_training_iter', 100000,
'''the max number of training iteration''')
tf.app.flags.DEFINE_float('init_learning_rate',0.01,
'''initial learning rate''')
tf.app.flags.DEFINE_string('model_dir', 'auto_model_logs',
'''directory where to save the model''')
# INPUT_DIM = 64 * 64
INPUT_H = 64
INPUT_W = 64
INPUT_C = 1
LABEL_C = 1
CELL_C = 4
KSIZE = 5
# LABEL_DIM = INPUT_DIM
# CELL_DIM = 1024
# CELL_LAYER = 1
BATCH_SIZE = 5
# UNROLLING_NUM = 10
UNROLLING_NUM = 10
def train():
input_data_queue = data_queue.DATA_QUEUE()
# image_name = tf.constant("lily.jpg", tf.string)
# image = uf.read_image(image_name, INPUT_H, INPUT_W)
# image_list = list()
# for _ in range(BATCH_SIZE):
# image_e = tf.expand_dims(image, 0)
# image_list.append(image_e)
# batch_image = tf.concat(0, image_list)
# batch_image = batching(image, FLAGS.batch_size)
clstm_cell = clstm.con_lstm_cell(BATCH_SIZE, INPUT_H, INPUT_W, INPUT_C, KSIZE, CELL_C)
# single_cell = tf.nn.rnn_cell.BasicLSTMCell(CELL_DIM)
# multi_cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * CELL_LAYER)
inputs_ph = list()
decodes1_ph = list()
decodes2_ph = list()
for _ in range(UNROLLING_NUM):
# inputs_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_DIM], name = "input_ph"))
# decodes1_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_DIM], name = "decodes1_ph"))
inputs_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_H,
INPUT_W, INPUT_C], name = "input_ph"))
decodes1_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_H,
INPUT_W, INPUT_C], name = "decodes1_ph"))
decodes2_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_H,
INPUT_W, INPUT_C], name = "decodes2_ph"))
# cell_initial_state = multi_cell.zero_state(BATCH_SIZE, tf.float32)
cell_initial_state = clstm_cell.get_zero_state(BATCH_SIZE, INPUT_H, INPUT_W, CELL_C, tf.float32)
# decoder_inputs_dict = dict()
# decoder_inputs_dict['reconstruction'] = decodes1_ph
# decoder_inputs_dict['prediction'] = decodes2_ph
# num_decoder_symbols_dict = dict()
# num_decoder_symbols_dict["reconstruction"] = 0
# num_decoder_symbols_dict["prediction"] = 1
# feed_previous_ph = tf.placeholder(tf.bool)
# loop_function = lambda x,y:x
def loop_function(inp, i, weights, biases):
""" loop function for decode """
output = nt._conv2d(inp, weights, biases, [1,1,1,1])
return output
# with tf.device('/gpu:%d' % 1):
_, state = clstm.clstm_encode(clstm_cell, inputs_ph, cell_initial_state)
outputs1, _ = clstm.clstm_decode([inputs_ph[-1]], state, clstm_cell, UNROLLING_NUM,
loop_function, "decoder1")
outputs2, _ = clstm.clstm_decode([inputs_ph[-1]], state, clstm_cell, UNROLLING_NUM,
loop_function, "decoder2")
# print(outputs)
con_cat_out = tf.concat(0, outputs1 + outputs2)
infer = nt.inference3(con_cat_out, KSIZE, CELL_C, LABEL_C)
con_cat_decodes = tf.concat(0, decodes1_ph + decodes2_ph)
loss = nt.loss1(infer, con_cat_decodes)
saver = tf.train.Saver()
global_step = tf.Variable(0, name = 'global_step', trainable = False)
train_op = nt.training1(loss, FLAGS.init_learning_rate, global_step = global_step)
config_proto = uf.define_graph_config(0.2)
sess = tf.Session(config = config_proto)
init_op = tf.initialize_all_variables()
sess.run(init_op)
if RESTORE:
ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)
print(ckpt.all_model_checkpoint_paths[-1])
if ckpt and ckpt.all_model_checkpoint_paths[-1]:
saver.restore(sess, ckpt.all_model_checkpoint_paths[-1])
else:
print('no check point')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord = coord, sess = sess)
for i in xrange(FLAGS.max_training_iter):
feed_data = dict()
for j in xrange(UNROLLING_NUM):
input_v = input_data_queue.get_next_batch_train(BATCH_SIZE, False, 4)
feed_data[inputs_ph[j]] = input_v[j]
feed_data[decodes1_ph[j]] = input_v[UNROLLING_NUM - j - 1]
# batch_image_v = sess.run(batch_image)
# feed_data[inputs_ph[j]] = batch_image_v
feed_data[decodes2_ph[j]] = input_v[UNROLLING_NUM + j]
# feed_data[feed_previous_ph] = True
_, loss_v = sess.run([train_op, loss], feed_dict = feed_data)
if i % 100 == 0:
# input_v = input_data_queue.get_next_batch_test(BATCH_SIZE, False, 4)
for j in range(UNROLLING_NUM):
feed_data[inputs_ph[j]] = input_v[j]
feed_data[decodes1_ph[j]] = input_v[UNROLLING_NUM - j - 1]
feed_data[decodes2_ph[j]] = input_v[UNROLLING_NUM + j]
# feed_data[inputs_ph[j]] = batch_image_v
# feed_data[decodes1_ph[j]] = batch_image_v
# feed_data[feed_previous_ph] = True
test_loss_v, infer_v = sess.run([loss, infer], feed_dict = feed_data)
# dis_image = np.concatenate((batch_image_v[0], infer_v[0]), axis = 0)
dis_image = np.concatenate((input_v[0,-1], infer_v[0,-1]), axis = 0)
uf.display_image(dis_image)
disp = "i:%d, train loss:%f, test loss:%f"%(i, loss_v, test_loss_v)
print(disp)
if i != 0 and i % 5000 == 0:
curr_time = time.strftime("%Y%m%d_%H%M")
model_name = FLAGS.model_dir + '/' + curr_time + '_iter_' + str(i) + '_model.ckpt'
saver.save(sess,model_name)
def main(argv = None):
if not os.path.exists(FLAGS.model_dir):
os.makedirs(FLAGS.model_dir)
if not os.path.exists(FLAGS.train_log_dir):
os.makedirs(FLAGS.train_log_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
It’s a NEW year! Every year people look at the past year and see a few flaws or things that they could have done better and this year they don’t want to make the same mistake. This always results in making a new years resolution of some sort.
Have you made yours yet? Well if you have selected one of these reasons below this year, you are not alone! These are a few of the most popular resolutions every year... Drink Less Alcohol, Eat Healthy Food, Get a Better Education, Get a Better Job, Get Fit, Lose Weight, Manage Debt, Manage Stress, Quit Smoking, Reduce, Reuse, Recycle, Save Money, Take a Trip, or Volunteer to Help Others.
Make it something you really want. Don’t make a resolution because it’s something someone else wants you to do or something you “should” want. Select something that fits into your own lifestyle and values.
Quality not Quantity. If you are going to select more then one resolution, keep it at or under three. Make only a few resolutions that you intend to keep. It ensures that you are focusing on the goals that you truly want.
Automation. Use automation to help assist you with completing your goals by setting alarms or reminders. For an example, if you are trying to pay down debt, the calculate the amount out of each check and then have it automatically deposited or paid toward that debt. It takes all the effort out of remembering or writing a check.
Make a plan. Create a series of smaller goals in order to complete and accomplish the larger goal. It helps to have a plan of action.
Keep all of these recommendations in mind when choosing a New Year’s resolution that way, at this time next year you are able to check them off your list!
If your resolution has to do with feeling better, getting fit, losing weight, managing stress, or anything else to improving your life then Pool World has a variety product to assist you with your resolution! Come into one of our four stores to find the special product for your resolution! To find one of our stores, please visit our Location and Hours page.
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Logictech MouseMan serial protocol.
http://www.softnco.demon.co.uk/SerialMouse.txt
"""
from twisted.internet import protocol
class MouseMan(protocol.Protocol):
"""
Parser for Logitech MouseMan serial mouse protocol (compatible
with Microsoft Serial Mouse).
"""
state = 'initial'
leftbutton=None
rightbutton=None
middlebutton=None
leftold=None
rightold=None
middleold=None
horiz=None
vert=None
horizold=None
vertold=None
def down_left(self):
pass
def up_left(self):
pass
def down_middle(self):
pass
def up_middle(self):
pass
def down_right(self):
pass
def up_right(self):
pass
def move(self, x, y):
pass
horiz=None
vert=None
def state_initial(self, byte):
if byte & 1<<6:
self.word1=byte
self.leftbutton = byte & 1<<5
self.rightbutton = byte & 1<<4
return 'horiz'
else:
return 'initial'
def state_horiz(self, byte):
if byte & 1<<6:
return self.state_initial(byte)
else:
x=(self.word1 & 0x03)<<6 | (byte & 0x3f)
if x>=128:
x=-256+x
self.horiz = x
return 'vert'
def state_vert(self, byte):
if byte & 1<<6:
# short packet
return self.state_initial(byte)
else:
x = (self.word1 & 0x0c)<<4 | (byte & 0x3f)
if x>=128:
x=-256+x
self.vert = x
self.snapshot()
return 'maybemiddle'
def state_maybemiddle(self, byte):
if byte & 1<<6:
self.snapshot()
return self.state_initial(byte)
else:
self.middlebutton=byte & 1<<5
self.snapshot()
return 'initial'
def snapshot(self):
if self.leftbutton and not self.leftold:
self.down_left()
self.leftold=1
if not self.leftbutton and self.leftold:
self.up_left()
self.leftold=0
if self.middlebutton and not self.middleold:
self.down_middle()
self.middleold=1
if not self.middlebutton and self.middleold:
self.up_middle()
self.middleold=0
if self.rightbutton and not self.rightold:
self.down_right()
self.rightold=1
if not self.rightbutton and self.rightold:
self.up_right()
self.rightold=0
if self.horiz or self.vert:
self.move(self.horiz, self.vert)
def dataReceived(self, data):
for c in data:
byte = ord(c)
self.state = getattr(self, 'state_'+self.state)(byte)
|
Explore the McNay with your little one during a stroller tour and story time. Free for Members; $10 for nonmember adults. Strollers and baby carriers strongly encouraged.
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Challenge.description'
db.add_column('core_challenge', 'description', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Challenge.description'
db.delete_column('core_challenge', 'description')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.challenge': {
'Meta': {'object_name': 'Challenge'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['core']
|
To solve difficulties with searchability together with assessment of information, it’s critical to understand something about the content. Just in case the info isn’t right, or even low-quality, the manager could possibly earn a decision that has a negative effect on the organization. Use the hyperlink down below to see the academic catalog which may supply all that info. Become the information supply, and allow the details be yours. Detailed information could be gotten from your nearest embassy or consulate of the nation you would like to visit. Incomplete details can result in bad decisions, due to the fact if a choice is created with only a number of the information chances are it’s not going to be the right choice. You’ll be supplied access to a difference for each edited article that will demonstrate the changes which were built.
Need not concerned if english you can’t locate a individuality remedy straightaway, though. The main element issue is to choose therapies for your existing emotions. In the same way, in regards to selecting Bach cures you should ignore any body symptoms.
On the BBB’s web-site, you may file a complaint against a neighborhood business, view national complaint statistics, and find out the complaint history and BETTER BUSINESS BUREAU standing of a particular company. There are not any decrease situation f’s. It can be hard to provide a positive business case for business intelligence initiatives, and frequently the assignments have to be prioritized through tactical initiatives.
When gathering the requirements from the company users, typically the regional IT department must also be consulted as a way to establish to which degree it’s possible to fulfill the business’s needs dependent on the particular available data. Fantastic supervisors must be in a position to separate the proper information from the sounds. Furthermore, editors may email you with clarification questions about the info in your articles. Only a few the articles in your consideration is going to be edited at exactly the same time, but you can have more compared to 1 article selected eventually. Your article is going to be locked while it’s being edited. An article that’s normally part of a helpful article might itself be a practical document, for instance, an ornamental steering wheel cover on a car. Make sure you remember that you’re absolutely free to edit a report to your liking once easy methods to unlocked.
Our life is basically full of helpful lessons that we ought to study. If you’ve done similar operate before, the contact information of earlier employer who will supply reference is beneficial. It is essential that personnel who take part in typically the project have a vision including a notion of the advantages and drawbacks of implementing a BI program.
Whether the answer offers expertise depends upon the informed individual. When you fully grasp the reaction to that question, you’ll be in a better place to establish what exactly data to collect and the way to transform it into information you ought to make decisions. If you concentrate on researching them, you will appear to be more interesting and dynamic. If you would like expressing that there’s a huge quantity, you might say there are several of them. If you wish to express that there’s a massive number, you would say there’s a lot. The needs and advantages of typically the implementation are occasionally driven by simply competition and the need to obtain an advantage on the market. If you need assistance with a physical problem you should discuss an experienced medical advisor along with taking Bach remedies.
User help can be incorporated in lots of ways, by way of example by developing a site. Even greater, helpdesk support may be used. Delivering user support is essential to keep the BI system plus resolve user issues.
|
import re
import threading
import time
import unittest
from selenium import webdriver
from app import create_app, db
from app.models import User
class SeleniumTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
# start Firefox
try:
cls.client = webdriver.Firefox()
except:
pass
# skip these tests if the browser could not be started
if cls.client:
# create the application
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
# suppress logging to keep unittest output clean
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel("ERROR")
# create the database and populate with some fake data
db.create_all()
User.generate_fake(10)
# add admin user
admin = User(
email='test@example.com',
username='test',
password='test',
confirmed=True)
db.session.add(admin)
db.session.commit()
# start the Flask server in a thread
threading.Thread(target=cls.app.run).start()
# give the server a second to ensure it is up
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.client:
# stop the flask server and the browser
cls.client.get('http://localhost:5000/shutdown')
cls.client.close()
# destroy database
db.drop_all()
db.session.remove()
# remove application context
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_home_page(self):
# navigate to home page
self.client.get('http://localhost:5000')
self.assertTrue(re.search(
'BrainDump',
self.client.page_source))
# navigate to login page
self.client.find_element_by_link_text('Log In').click()
self.assertTrue('<h1>Login</h1>' in self.client.page_source)
# login
self.client.find_element_by_name('email').\
send_keys('test@xample.com')
self.client.find_element_by_name(
'password').send_keys('test')
self.client.find_element_by_nameI('submit').click()
self.assertTrue('Log Out' in self.client.page_source)
|
Sanforized Fleetwood Mattress Protector. Made of 100% Cotton Rustproof Zippers. Natural Color. Extra Stitched. Better Housekeeping Guaranteed Product. Available in Cot Size, Twin Size, Full Size, Queen Size and King Size. This Cover will fit up to an 8 In. thick mattress. Other sizes are available.
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import random
from oslo_config import cfg
from osprofiler import profiler
from rally import osclients
from rally.task import scenario
configure = functools.partial(scenario.configure, platform="openstack")
CONF = cfg.CONF
class OpenStackScenario(scenario.Scenario):
"""Base class for all OpenStack scenarios."""
def __init__(self, context=None, admin_clients=None, clients=None):
super(OpenStackScenario, self).__init__(context)
if context:
api_info = {}
if "api_versions" in context.get("config", {}):
api_versions = context["config"]["api_versions"]
for service in api_versions:
api_info[service] = {
"version": api_versions[service].get("version"),
"service_type": api_versions[service].get(
"service_type")}
if admin_clients is None and "admin" in context:
self._admin_clients = osclients.Clients(
context["admin"]["credential"], api_info)
if clients is None:
if "users" in context and "user" not in context:
self._choose_user(context)
if "user" in context:
self._clients = osclients.Clients(
context["user"]["credential"], api_info)
if admin_clients:
self._admin_clients = admin_clients
if clients:
self._clients = clients
self._init_profiler(context)
def _choose_user(self, context):
"""Choose one user from users context
We are choosing on each iteration one user
"""
if context["user_choice_method"] == "random":
user = random.choice(context["users"])
tenant = context["tenants"][user["tenant_id"]]
else:
# Second and last case - 'round_robin'.
tenants_amount = len(context["tenants"])
# NOTE(amaretskiy): iteration is subtracted by `1' because it
# starts from `1' but we count from `0'
iteration = context["iteration"] - 1
tenant_index = int(iteration % tenants_amount)
tenant_id = sorted(context["tenants"].keys())[tenant_index]
tenant = context["tenants"][tenant_id]
users = context["tenants"][tenant_id]["users"]
user_index = int((iteration / tenants_amount) % len(users))
user = users[user_index]
context["user"], context["tenant"] = user, tenant
def clients(self, client_type, version=None):
"""Returns a python openstack client of the requested type.
Only one non-admin user is used per every run of scenario.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Standard python OpenStack client instance
"""
client = getattr(self._clients, client_type)
return client(version) if version is not None else client()
def admin_clients(self, client_type, version=None):
"""Returns a python admin openstack client of the requested type.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Python openstack client object
"""
client = getattr(self._admin_clients, client_type)
return client(version) if version is not None else client()
def _init_profiler(self, context):
"""Inits the profiler."""
if not CONF.benchmark.enable_profiler:
return
if context is not None:
cred = None
profiler_hmac_key = None
if context.get("admin"):
cred = context["admin"]["credential"]
if cred.profiler_hmac_key is not None:
profiler_hmac_key = cred.profiler_hmac_key
if context.get("user"):
cred = context["user"]["credential"]
if cred.profiler_hmac_key is not None:
profiler_hmac_key = cred.profiler_hmac_key
if profiler_hmac_key is None:
return
profiler.init(profiler_hmac_key)
trace_id = profiler.get().get_base_id()
self.add_output(complete={
"title": "OSProfiler Trace-ID",
"chart_plugin": "TextArea",
"data": [trace_id]})
|
Based on Lifetime's recent track record with biopics, "Whitney" didn't inspire much confidence, especially with "Directed by Angela Bassett" seemingly being all the network could muster to promote it. But what emerges is a surprisingly compelling if decidedly constricted take on the singer's life, focusing squarely on her relationship with Bobby Brown, and ending well before her untimely death at age 48. Mostly, the whole exercise benefits from the radiance of Yaya DaCosta (seen in "Lee Daniels' The Butler") in the title role, who brings a star quality to the thin material worthy of the artist she portrays.
"Time to be Whitney Houston," the pop diva, then 26, says as she exits a limo to attend an awards show as the movie begins, where she meets Bobby (Arlen Escarpeta). Before you know it, they're engaging in sexual acts that tiptoe up to the boundaries of basic cable, and he's talking about a long- term commitment.
Her family, however, responds to the news of their engagement with a decided chill, put off in part by the children Brown has already fathered with other women. And the duo's love for each other is further tested by her overwhelming stardom, to the point where a former fling dismissively refers to Brown as "Mr. Houston," and Houston's record mogul Clive Davis (an unrecognizable Mark Rolston) invites Bobby in to talk - not to sign him to the label, as Bobby hopes, but rather to enlist Brown to help prod his wife to get back to work.
Publication information: Article title: 'Whitney' Hits High Notes. Contributors: Lowry, Brian - Author. Magazine title: Variety. Volume: 326. Issue: 14 Publication date: January 14, 2015. Page number: 81. © Penske Business Media. Provided by ProQuest LLC. All Rights Reserved.
|
import numpy as np
# following the idea of halfedge data structure on David Gu's lecture
# https://www3.cs.stonybrook.edu/~gu/lectures/lecture_8_halfedge_data_structure.pdf
# and adapt it to a numpy friendly representatives
# * vertexes: all position of each vertex
# * faces: all position of each centroid of faces
# * edges: all position of each centroid of edges
# * halfedges: all vectors of each halfedge
# * vertexes2vertexes
class Manifold:
def __init__(self, vtk_mesh=None):
if vtk_mesh != None:
self.mesh = vtk_mesh # a VTK mesh structure
self.n_vertexes = vtk_mesh.n_points
self.n_faces = vtk_mesh.n_cells
cells = np.array(self.mesh.cells).copy()
self.vertexes = np.array(self.mesh.points).copy()
self.faces, cells_begin, cells_end = make_dual(self.n_faces, self.vertexes, cells)
self.edges, self.halfedges = make_edges(self.n_faces, self.vertexes, cells, cells_begin, cells_end)
self.n_edges = self.edges.shape[0]
self.n_halfedges = self.halfedges.shape[0]
self.adjacency_vertexes = None
self.adjacency_faces = None
self.adjacency_edges = None
self.adjacency_halfedges = None
self.adjacency_vertexes2faces = None
self.adjacency_vertexes2edges = None
self.adjacency_vertexes2halfedges = None
self.adjacency_faces2vertexes = None
self.adjacency_faces2edges = None
self.adjacency_faces2halfedges = None
self.adjacency_edges2vertexes = None
self.adjacency_edges2faces = None
self.adjacency_edges2halfedges = None
self.adjacency_halfedges2vertexes = None
self.adjacency_halfedges2faces = None
self.adjacency_halfedges2edges = None
self.orientation = None
def __getstate__(self):
state = self.__dict__.copy()
del state["mesh"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.mesh = None
def make_dual(n_faces, points, faces):
ix, cur = 0, 0
centroid = []
faces_begin = []
faces_end = []
while ix < n_faces:
sz = faces[cur]
assert sz > 2
ps = points[faces[cur + 1:cur + sz]]
assert ps.shape[1] == sz
centroid.append(np.mean(ps, axis=0))
faces_begin.append(cur + 1)
faces_end.append(cur + sz)
cur = cur + sz + 1
ix += 1
assert cur == faces.shape[0]
return np.array(centroid), np.array(faces_begin), np.array(faces_end)
def make_edges(n_faces, vertexes, cells, cells_begin, cells_end):
total = 0
for ix in range(n_faces):
begin, end = cells_begin[ix], cells_end[ix]
sz = end - begin + 1
total += sz
cur = 0
edges = np.array([total, 3], dtype=np.float64)
halfedges = np.array([2 * total, 3], dtype=np.float64)
for ix in range(n_faces):
begin, end = cells_begin[ix], cells_end[ix]
sz = end - begin + 1
pxs = vertexes[cells[begin:end]]
for p in range(sz):
src, tgt = pxs[p - 1], pxs[p]
edges[cur] = (src + tgt) / 2
halfedges[2 * cur] = tgt - src
halfedges[2 * cur + 1] = src - tgt
cur += 1
return edges, halfedges
|
I have a custom query for users which works fine. But need to use ajax for the pagination.
Cab you please show me your site ?
Sorry. It is in local environment.
I don't think you got what I need. I need Ajax pagination. The pagination already works.
I have no problem with the pagination. it works fine. Please read the post.
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.components.Component import Component
class RecordLocator(Component):
class Inventory(Component.Inventory):
import pyre.inventory
alphabet = pyre.inventory.str('alphabet', default="23456789ABCDEFGHIJKLMNPQRSTUVWXYZ")
def decode(self, locator):
locator = locator.upper()
locator = list(locator)
locator.reverse()
tid = 0
for index, letter in enumerate(locator):
tid += self._hashtable[letter] * self._base**index
label = str(tid)
return label[:-4], label[-4:]
def encode(self, transactionId, date=None):
if date is None:
import time
tick = time.localtime()
date = time.strftime("%y%m%d", tick)
bcd = int(str(transactionId) + date)
locator = self._encode(bcd)
return locator
def __init__(self):
Component.__init__(self, "locator", facility="recordLocator")
self._alphabet = None
self._base = None
self._hashtable = None
return
def _init(self):
Component._init(self)
self._alphabet = list(self.inventory.alphabet)
self._base = len(self._alphabet)
self._hashtable = self._hash(self._alphabet)
return
def _encode(self, bcd):
label = []
while 1:
bcd, remainder = divmod(bcd, self._base)
label.append(self._alphabet[remainder])
if bcd == 0:
break
label.reverse()
label = "".join(label)
return label
def _hash(self, alphabet):
hash = {}
for index, letter in enumerate(alphabet):
hash[letter] = index
return hash
# version
__id__ = "$Id: RecordLocator.py,v 1.3 2005/04/28 03:37:16 pyre Exp $"
# End of file
|
Home Doctor What’s later on for Physician Who?
What’s later on for Physician Who?
Comments Off on What’s later on for Physician Who?
What’s later on of Physician Who? This can be a question that plaques Physician Who fans each year. Once we come nearer to the fiftieth anniversary from the series it’s possible to only fathom what is available for the favorite Time Lord.
However the way forward for the Physician looks very promising. Ratings are in a record high. Merchandise adorns the shelves of stores for those manners of products from toys to clothing as well as decoration. Yes, you can purchase Physician Who bed coverings.
One small shadow looms over all this recognition, the physician’s remaining regenerations. The Physician, like several Time Lords, are only able to regenerate twelve occasions, equaling 13 lives before his final dying. The reason why behind this change from source to source however the finish result is identical, a period Lord are only able to regenerate twelve occasions.
The present Physician, Matt Cruz, may be the eleventh incarnation from the televised Time Lord. Although he’s no aim of departing the series, fans will not help but question what can happen then. The Physician has only two more regenerations left. This implies he has only two more lives.
Now most Doctors existed for some time however, many are extremely short resided such as the sixth, eighth and ninth Doctors. Some fans worry the Physician might be breezing through his remaining lives. However producer and script author Craig Letts had this to state regarding what can happen once the Physician reaches the finish of his rope – “He’d simply try to keep on.” And that’s what the Physician does, he finds a means.
Now poor the series’ plot lines, what’s later on for Physician Who’s rather up in mid-air. We all know of the character named River Song who’s another time traveler, she’s apparently romantically attached to the Physician which provides light towards the theory the traveling Time Lord settles lower eventually.
The 2nd real question is regarding ‘the fall from the eleventh’ in the fight of Trenzalor. The Physician is destined to behave completely horrible, something so terrible that it’s to become known by everybody throughout time. Whatever this act might or might not be remains seen which is rumored that it’s the focus from the approaching seventh number of the tv show. However, regardless of what is coming for that Physician, the Physician will invariably overcome and win in the finish during the day!
Next article Which Actor Is the greatest Physician Who?
What Is the Difference Between Cytomel and Synthroid?
What are the Types of Invisible Braces Available?
Ultrasound is a Boon to Medical Science – Helps to Treat Better!
Which Actor Is the greatest Physician Who?
|
from datetime import datetime
def nextponies():
times = [
(datetime(2011, 11, 12, 15),2,6,"The Cutie Pox"),
(datetime(2011, 11, 19, 15),2,7,"May the Best Pet Win!"),
(datetime(2011, 11, 26, 15),2,8,"The Mysterious mare do Well"),
(datetime(2011, 12, 3, 15),2,9,"Sweet and Elite"),
(datetime(2011, 12, 10, 15),2,10,"Secret of My Excess"),
(datetime(2011, 12, 17, 15),2,11,"Hearth's Warming Eve"),
(datetime(2012, 1, 7, 15),2,12,"Family Appreciation Day"),
(datetime(2012, 1, 14, 15),2,13,"Baby Cakes"),
(datetime(2012, 1, 21, 15),2,14,"The last Roundup"),
(datetime(2012, 1, 28, 15),2,15,"The Super Speedy Cider Squeezy 6000"),
(datetime(2012, 2, 4, 15),2,16,"Read It and Weep"),
(datetime(2012, 11, 10, 15),3,1,"The Crystal Empire, Part 1"),
(datetime(2012, 11, 10, 16),3,2,"The Crystal Empire, Part 2"),
(datetime(2012, 11, 17, 15),3,3,"Too Many Pinkie Pies"),
(datetime(2012, 11, 24, 15),3,4,"One Bad Apple"),
(datetime(2012, 12, 3, 15),3,5,"Magic Duel"),
(datetime(2012, 12, 10, 15),3,6,"Sleepless in Ponyville"),
(datetime(2012, 12, 17, 15),3,7,"Wonderbolt Academy"),
(datetime(2012, 12, 24, 15),3,8,"Apple Family Reunion")
]
r=map(lambda x:(x[0]-datetime.now(),x[1],x[2],x[3]), times)
r=sorted(r)
for x in r:
if x[0].days>=0:
return "%s until Series %d episode %d - %s!" % (str(x[0]).split(".")[0], x[1], x[2], x[3])
return "OutOfPoniesException: no ponies found in the future."
def ponies(tbot, user, channel, msg):
tbot.msg(channel,nextponies())
ponies.rule = "^!ponies$"
|
(adjective) Causing resentment, animosity, or bitter envy.
EXAMPLE: His hard-working employees never forgave their arrogant boss for his many invidious remarks about how much harder he worked than anyone else in his company.
EXAMPLE: Executives must do their research carefully because they don't want their decisions to be off base.
|
# -*- coding: utf-8 -*-
"""
Plot PTMap|RMSMap|Response|DepthSlice from Inversion Model Results
Created on Tue Oct 04 13:13:29 2016
@author: Alison.Kirkby@ga.gov.au
@author: Fei.zhang@ga.gov.au
Test Run:
python examples/scripts/visualize_modem_models.py ./examples/data/ModEM_files/VicSynthetic07
"""
import os
import sys
#from mtpy.imaging.modem_phase_tensor_maps import PlotPTMaps
from mtpy.modeling.modem.phase_tensor_maps import PlotPTMaps
from mtpy.imaging.plot_depth_slice import PlotDepthSlice
#from legacy.plot_response import PlotResponse
#from legacy.plot_rms_map import PlotRMSMaps
from mtpy.modeling.modem.plot_response import PlotResponse
from mtpy.modeling.modem import PlotRMSMaps
# original test case:
# datfn='ModEM_Data_noise10inv.dat' # what is this noiseinv.dat?
# NLCG_datfn='Modular_MPI_NLCG_019.dat'
# resfn='Modular_MPI_NLCG_019.res'
# rhofn='Modular_MPI_NLCG_019.rho'
# FZ: below works fine
# datfn='Isa_run3_NLCG_049.dat' #'ModEM_Data_noise10inv.dat'
# NLCG_datfn='Isa_run3_NLCG_049.dat'
# resfn='Isa_run3_NLCG_049.res'
# rhofn='Isa_run3_NLCG_049.rho'
# rename/copy the final MODEM results to these file names:
datfn='NLCG.dat' # 'ModEM_Data_noise10inv.dat'
NLCG_datfn='NLCG.dat'
resfn='NLCG.res'
rhofn='NLCG.rho'
def plot_model(data_dir, plot_type='PTMap', depth_index=20, periodin=0):
"""
plot model of the plot_type
:param data_dir: directory where modem's NLCG.dat .rho .res files are located
:param plot_type: one of these 4: PTMap|RMSMap|Response|DepthSlice
:param di:
:param periodin:
:return:
"""
wd=data_dir
plot_type=plot_type
depth_index = depth_index # depth index
# plot phase tensor map with residuals:
# this will NOT work, an empty figure.
# plt.savefig(op.join(wd,'ptmaps.png'),dpi=300,ellipse_size=40)
if plot_type == 'PTMap':
ptmObj=PlotPTMaps(data_fn=os.path.join(wd, datfn),
resp_fn=os.path.join(wd, NLCG_datfn),
ellipse_size=30)
outfn=os.path.join(wd, 'ptmaps.png')
ptmObj.plot(period=periodin, save2file=outfn)
# plot map of RMS values
# python examples/modem_plotmodel2.py
# examples/data/ModEM_files/VicSynthetic07 RMSMap
if plot_type == 'RMSMap':
resfile=os.path.join(wd, resfn)
prmsObj=PlotRMSMaps(
residual_fn=resfile,
xminorticks=50000,
yminorticks=50000)
# ,depth_index=di, save_plots='y') # these are not in func args
# prmsObj.plot_loop(fig_format="png" ) #plot all periods and save
# figure
# plot responses at a station
if plot_type == 'Response':
outfn = os.path.join(wd, 'response.png')
pltObj=PlotResponse(data_fn=os.path.join(wd, datfn),plot_type=['16-L03S01','VIC001'])
#FZ: need to refactor plot_type= list of station names
pltObj.plot()
# plot depth slice
if plot_type == 'DepthSlice':
print("plot type is", plot_type)
modrho=os.path.join(wd, rhofn)
print(modrho)
# pltObj= PlotDepthSlice(model_fn=modrho, xminorticks=100000, yminorticks=100000, depth_index=di, save_plots='y')
pltObj=PlotDepthSlice(
model_fn=modrho,
save_plots='y',
depth_index=depth_index)
pltObj.plot(ind=depth_index)
return
#########################################################################
# plot_type=[ PTMap RMSMap Response DepthSlice ]
# How2Run:
# python examples/cmdline/visualize_modem_models.py ./examples/data/ModEM_files/VicSynthetic07 PTMap pindex
#
# python examples/cmdline/visualize_modem_models.py ./examples/data/ModEM_files/VicSynthetic07
# ---------------------------------------
if __name__ == '__main__':
if len(sys.argv) <= 2:
print("USAGE example:")
print(
"python %s examples/data/ModEM_files/VicSynthetic07 [PTMap|RMSMap|Response|DepthSlice]" %
(sys.argv[0]))
for plot_type in ['PTMap', 'RMSMap', 'Response', 'DepthSlice']:
plot_model(sys.argv[1], plot_type=plot_type)
elif len(sys.argv) == 3:
data_dir=sys.argv[1]
plot_type=sys.argv[2]
if (plot_type not in ['PTMap', 'RMSMap', 'Response', 'DepthSlice']):
print("Input Parameter plot type must be in:", [
'PTMap', 'RMSMap', 'Response', 'DepthSlice'])
plot_model(data_dir, plot_type=plot_type)
else:
data_dir=sys.argv[1]
plot_type=sys.argv[2]
period_index=int(sys.argv[3])
if (plot_type not in ['PTMap', 'RMSMap', 'Response', 'DepthSlice']):
print("Input Parameter plot type must be in:", [
'PTMap', 'RMSMap', 'Response', 'DepthSlice'])
plot_model(data_dir, plot_type=plot_type, periodin=period_index)
|
Interior Wall Graphics are a great way to give your facility a fresh new look. Whether you want to install wall graphics in your lobby to show customers what your business is all about or put up graphics of your mascot in your school’s gym, we can help you the whole way. Our interior wall graphics can be easily removed from the surface it is applied to, so that you can update your look frequently and with ease. Call us today to get a quote on your graphics and to schedule a site check to determine the best interior graphics for you!
|
#!/usr/bin/env python
"""
Numerical integration with autowrap
-----------------------------------
This example demonstrates how you can use the autowrap module in SymPy
to create fast, numerical integration routines callable from python. See
in the code for detailed explanations of the various steps. An
autowrapped sympy expression can be significantly faster than what you
would get by applying a sequence of the ufuncs shipped with numpy. [0]
We will find the coefficients needed to approximate a quantum mechanical
Hydrogen wave function in terms of harmonic oscillator solutions. For
the sake of demonstration, this will be done by setting up a simple
numerical integration scheme as a SymPy expression, and obtain a binary
implementation with autowrap.
You need to have numpy installed to run this example, as well as a
working fortran compiler. If you have pylab installed, you will be
rewarded with a nice plot in the end.
[0]:
http://ojensen.wordpress.com/2010/08/10/fast-ufunc-ish-hydrogen-solutions/
----
"""
import sys
from sympy.external import import_module
np = import_module('numpy')
if not np:
sys.exit("Cannot import numpy. Exiting.")
pylab = import_module('pylab', warn_not_installed=True)
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.autowrap import autowrap, ufuncify
from sympy import Idx, IndexedBase, Lambda, pprint, Symbol, oo, Integral,\
Function
from sympy.physics.sho import R_nl
from sympy.physics.hydrogen import R_nl as hydro_nl
# ***************************************************************************
# calculation parameters to play with
# ***************************************************************************
basis_dimension = 5 # Size of h.o. basis (n < basis_dimension)
omega2 = 0.1 # in atomic units: twice the oscillator frequency
orbital_momentum_l = 1 # the quantum number `l` for angular momentum
hydrogen_n = 2 # the nodal quantum number for the Hydrogen wave
rmax = 20 # cut off in the radial direction
gridsize = 200 # number of points in the grid
# ***************************************************************************
def main():
print(__doc__)
# arrays are represented with IndexedBase, indices with Idx
m = Symbol('m', integer=True)
i = Idx('i', m)
A = IndexedBase('A')
B = IndexedBase('B')
x = Symbol('x')
print("Compiling ufuncs for radial harmonic oscillator solutions")
# setup a basis of ho-solutions (for l=0)
basis_ho = {}
for n in range(basis_dimension):
# Setup the radial ho solution for this n
expr = R_nl(n, orbital_momentum_l, omega2, x)
# Reduce the number of operations in the expression by eval to float
expr = expr.evalf(15)
print("The h.o. wave function with l = %i and n = %i is" % (
orbital_momentum_l, n))
pprint(expr)
# implement, compile and wrap it as a ufunc
basis_ho[n] = ufuncify(x, expr)
# now let's see if we can express a hydrogen radial wave in terms of
# the ho basis. Here's the solution we will approximate:
H_ufunc = ufuncify(x, hydro_nl(hydrogen_n, orbital_momentum_l, 1, x))
# The transformation to a different basis can be written like this,
#
# psi(r) = sum_i c(i) phi_i(r)
#
# where psi(r) is the hydrogen solution, phi_i(r) are the H.O. solutions
# and c(i) are scalar coefficients.
#
# So in order to express a hydrogen solution in terms of the H.O. basis, we
# need to determine the coefficients c(i). In position space, it means
# that we need to evaluate an integral:
#
# psi(r) = sum_i Integral(R**2*conj(phi(R))*psi(R), (R, 0, oo)) phi_i(r)
#
# To calculate the integral with autowrap, we notice that it contains an
# element-wise sum over all vectors. Using the Indexed class, it is
# possible to generate autowrapped functions that perform summations in
# the low-level code. (In fact, summations are very easy to create, and as
# we will see it is often necessary to take extra steps in order to avoid
# them.)
# we need one integration ufunc for each wave function in the h.o. basis
binary_integrator = {}
for n in range(basis_dimension):
#
# setup basis wave functions
#
# To get inline expressions in the low level code, we attach the
# wave function expressions to a regular SymPy function using the
# implemented_function utility. This is an extra step needed to avoid
# erronous summations in the wave function expressions.
#
# Such function objects carry around the expression they represent,
# but the expression is not exposed unless explicit measures are taken.
# The benefit is that the routines that searches for repeated indices
# in order to make contractions will not search through the wave
# function expression.
psi_ho = implemented_function('psi_ho',
Lambda(x, R_nl(n, orbital_momentum_l, omega2, x)))
# We represent the hydrogen function by an array which will be an input
# argument to the binary routine. This will let the integrators find
# h.o. basis coefficients for any wave function we throw at them.
psi = IndexedBase('psi')
#
# setup expression for the integration
#
step = Symbol('step') # use symbolic stepsize for flexibility
# let i represent an index of the grid array, and let A represent the
# grid array. Then we can approximate the integral by a sum over the
# following expression (simplified rectangular rule, ignoring end point
# corrections):
expr = A[i]**2*psi_ho(A[i])*psi[i]*step
if n == 0:
print("Setting up binary integrators for the integral:")
pprint(Integral(x**2*psi_ho(x)*Function('psi')(x), (x, 0, oo)))
# But it needs to be an operation on indexed objects, so that the code
# generators will recognize it correctly as an array.
# expr = expr.subs(x, A[i])
# Autowrap it. For functions that take more than one argument, it is
# a good idea to use the 'args' keyword so that you know the signature
# of the wrapped function. (The dimension m will be an optional
# argument, but it must be present in the args list.)
binary_integrator[n] = autowrap(expr, args=[A.label, psi.label, step, m])
# Lets see how it converges with the grid dimension
print("Checking convergence of integrator for n = %i" % n)
for g in range(3, 8):
grid, step = np.linspace(0, rmax, 2**g, retstep=True)
print("grid dimension %5i, integral = %e" % (2**g,
binary_integrator[n](grid, H_ufunc(grid), step)))
print("A binary integrator has been set up for each basis state")
print("We will now use them to reconstruct a hydrogen solution.")
# Note: We didn't need to specify grid or use gridsize before now
grid, stepsize = np.linspace(0, rmax, gridsize, retstep=True)
print("Calculating coefficients with gridsize = %i and stepsize %f" % (
len(grid), stepsize))
coeffs = {}
for n in range(basis_dimension):
coeffs[n] = binary_integrator[n](grid, H_ufunc(grid), stepsize)
print("c(%i) = %e" % (n, coeffs[n]))
print("Constructing the approximate hydrogen wave")
hydro_approx = 0
all_steps = {}
for n in range(basis_dimension):
hydro_approx += basis_ho[n](grid)*coeffs[n]
all_steps[n] = hydro_approx.copy()
if pylab:
line = pylab.plot(grid, all_steps[n], ':', label='max n = %i' % n)
# check error numerically
diff = np.max(np.abs(hydro_approx - H_ufunc(grid)))
print("Error estimate: the element with largest deviation misses by %f" % diff)
if diff > 0.01:
print("This is much, try to increase the basis size or adjust omega")
else:
print("Ah, that's a pretty good approximation!")
# Check visually
if pylab:
print("Here's a plot showing the contribution for each n")
line[0].set_linestyle('-')
pylab.plot(grid, H_ufunc(grid), 'r-', label='exact')
pylab.legend()
pylab.show()
print("""Note:
These binary integrators were specialized to find coefficients for a
harmonic oscillator basis, but they can process any wave function as long
as it is available as a vector and defined on a grid with equidistant
points. That is, on any grid you get from numpy.linspace.
To make the integrators even more flexible, you can setup the harmonic
oscillator solutions with symbolic parameters omega and l. Then the
autowrapped binary routine will take these scalar variables as arguments,
so that the integrators can find coefficients for *any* isotropic harmonic
oscillator basis.
""")
if __name__ == '__main__':
main()
|
Entrepreneur. Ethics. Leadership. Alumni @University of Hawaii, @EastWestCenter, @GVSU.
I am an educator, entrepreneur, and business consultant. I am also an area expert on China having formally been trained in Chinese culture and language. I have recently been involved in researching, lecturing, and consulting on the intersection of ethics, business, and technology.
I have taught and worked in unique and stimulating environments, including a university in Shanghai, China, the East-West Center’s Asia Pacific Leadership Program in Honolulu, Hawaii, and as a Visiting Assistant Professor at Grand Valley State University.
I have served on non-profit boards in Honolulu, HI and San Francisco, CA and ethics committees in Michigan and California.
I am an entrepreneur that has developed business plans, facilitated the raising of capital for new ventures, and handled day-to-day management of small organizations. Most recently, I have helped to launch MotleyGreen, a social media and gamification platform focused on sustainability.
I speak Mandarin Chinese and have lived and worked extensively in China.
I have consulted with large and small business organizations in South Africa, China, and the United States on the future of business, the role of China's economic and social development, generational theory, leadership and ethics, and social media technology.
After working in the Asia Pacific Leadership Program for one year as a Leadership Seminar Assistant, I decided to return to school and spend time studying once again with Dr. Roger T. Ames. My goal was to become a better writer and develop advanced critical thinking skills that would prepare me for further academic work and consulting. Study throughout the duration of this degree included in-depth analysis of Chinese philosophy and strategic culture, the impact of industrial and organic food delivery systems on the natural environment, global environmental issues, Islamic culture and history, and advanced symbolic logic.
At GVSU I started in the Finance program and found the curriculum to be a bit less rigorous than I anticipated. After taking Ethics and the Philosophy of Religion, I was inspired to take a more comprehensive approach to my education and develop critical thinking skills through a philosophy degree, while at the same time completing a minor in Finance. Study throughout the duration of this degree included general undergraduate curriculum in comparative philosophy, social and political philosophy, East Asian religions, and business with a particular focus on finance. Extensive field study took place through the Community Working Classics Program in the underprivileged and economically challenged communities of Western Michigan with a particular focus on the Heartside community of Grand Rapids, Michigan and the Muskegon Correctional Facility in Muskegon, Michigan.
After spending a year in China following my undergraduate study at GVSU, I was enamored with the possibilities that China promised (both intellectually and in business). In addition, my comparative studies as an undergraduate helped me to see the value in studying culture broadly. To exhibit my commitment to China, and to open career possibilities in the Asia Pacific region, I chose to head to the University of Hawaii to study with China scholar Dr. Roger T. Ames. Study throughout the duration of this degree included in-depth analysis of Chinese popular culture, China and its modern economy, Chinese strategic culture, Chinese philosophy and religion, East Asian politics, the Asian built environment, and advanced Mandarin training. Extensive field study took place in China, particularly Beijing and the Eastern provinces of the P.R.C.
Eric is a highly compelling and effective educator, speaker, and consultant. His management skills, which are many, include development of competent leaders and navigating generational conflicts.
Overview of employing social media in the context of generational diversity.
|
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import imp
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication, but fall
# back to hardcoding so the dependency is optional
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
for key in dir(object):
# First try to get the value via __dict__. Some descriptors don't
# like calling their __get__ (see bug #1785).
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name, and where it was defined.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
# Furthermore, some objects may raise an Exception when fetched with
# getattr(). This is the case with some descriptors (bug #1785).
# Thus, we only use getattr() as a last resort.
homecls = None
for base in (cls,) + mro:
if name in base.__dict__:
obj = base.__dict__[name]
homecls = base
break
else:
obj = getattr(cls, name)
homecls = getattr(obj, "__objclass__", homecls)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif ismethoddescriptor(obj):
kind = "method"
elif isdatadescriptor(obj):
kind = "data"
else:
obj_via_getattr = getattr(cls, name)
if (isfunction(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
obj = obj_via_getattr
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[:1] + file[-1:] != '<>':
raise IOError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names.
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, kwonlyargs, varkw = _getfullargs(func.__code__)
return FullArgSpec(args, varargs, varkw, func.__defaults__,
kwonlyargs, func.__kwdefaults__, func.__annotations__)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def _get_user_defined_method(cls, method_name):
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = signature(obj.__func__)
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(obj)
raise ValueError(msg) from ex
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
class _void:
'''A private marker - used in Parameter & Signature'''
class _empty:
pass
class _ParameterKind(int):
def __new__(self, *args, name):
obj = int.__new__(self, *args)
obj._name = name
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter:
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not name.isidentifier():
msg = '{!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} at {:#x} {!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments:
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature:
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {} before {}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = {param: idx
for idx, param in enumerate(other.parameters.keys())}
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, *, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name)) from None
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(__bind_self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return __bind_self._bind(args, kwargs)
def bind_partial(__bind_self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return __bind_self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
|
With the recent news of Malaysian e-sports athletes creating history by winning US$1.4 million (RM5.6 million) in a competitive game of DOTA2, society is slowly being exposed to the idea that our local gamers are honing their talents for something many believed was simply a waste of time.
For our local gamers though, we still don’t have much of a community to broadcast and livestream gameplays to share the experiences we have when playing. So this trio of gamers had the idea to create a platform where gamers not only in Malaysia, but all over the world could join and interact with one another over their mutual passion.
One of the founders, Michelle Tan, had always dabbled in the gaming industry through tournament shoutcasting, which is essentially providing live commentary to an audience during an e-Sports match. She also handled her own streaming on the website Twitch. While doing this, she noticed the absence of a hub where gamers could gather to potentially be discovered by the masses and even sponsors seeking for gaming talents.
Michelle then sat with her co-founder Ariff and discussed this over lunch one day, in hopes of persuading him to jump on board as her tech co-founder. They then added the final member to their trio, Nicholas, who happened to be Michelle’s League of Legends amateur team coach, due to his experience as a community manager.
The team then brainstormed together and came up with Fundeavour, a platform where gamers everywhere can get a headstart in the fields of e-sports, YouTube gaming and livestreaming.
“We wanted to come up with a name that was catchy and reflected what we wanted to achieve. In the end, we decided on Fundeavour, a portmanteau of ‘fun’ and ‘endeavour’. This best represents our vision of becoming a hub for gamers everywhere to connect, have fun, and strive to make a name for themselves,” said Fundeavour to Vulcan Post.
The idea started back in October 2014 but the opportunity to bring it to reality only arose under ‘CodeAr.my’s Project Brainchild’ pre-accelerator program, which was an initiative supported by Khazanah Nasional Berhad that encourages entrepreneurship and innovation within the Khazanah group of companies and the public.
Fundeavour went through Batch One of the program and in 2016, decided to go live and has been functioning ever since.
For those who wish to be recognised as professional gamers, the path ahead is not an easy one. So Fundeavour aims to be the bridge that connects gamers with their potential audience and sponsors who can help them realise their dream careers.
Users who register on the site are able to redeem gaming merchandise based on points they receive when completing ‘quests’ available on the site.
“Our quests are a little unique as they are not the average ‘battle’ quests. We wanted to devise a fun way to encourage gamers to follow their passions, and the paths help you work out what you need to go further. We’re definitely exploring new ways for our quest system to continue to motivate our gamers to do this,” said Fundeavour.
Currently Fundeavour has over 3,000 gamers signed up from over 22 countries worldwide. Out of that number, their users have already completed over 10,000 quests, and counting. There have also been 20 rewards redeemed since the launch of the Rewards portion in April this year.
When it comes to their rewards, Fundeavour wants to ensure that they’re consistently stocking up and refreshing the rewards based on the agreements with their partnered gaming brands.
“The biggest rewards we’ve offered have been headsets and keyboards, notably the current Adventure with Armaggeddon where we’re offering their latest line of mobile gaming headsets—the Molotov 5! We also offer opportunities for gamers to feel what it’s like to be sponsored, including opportunities to be picked to review top-of-the-line equipment for some of our brands,” said Fundeavour.
With approximately 5 years worth of experience dabbling in the gaming industry between the 3 founders, from shoutcasting and professional gaming to game development, Fundeavour believes that being gamers themselves qualifies them to understand what goes on within the community and what gamers truly seek for.
“I worked as a part-time shoutcaster for Garena League of Legends in 2013, created YouTube videos and streamed on the side; Ariff “Ravenost” Wambeck worked on game development and design in his spare time, while Nicholas “Nih” Ngo was formerly a competitive player for League of Legends and Call of Duty 4: Modern Warfare. So it’s safe to say we have a good insight on what the gaming community is like in Malaysia,” said Michelle.
When asked if Fundeavour had succeeded in helping any gamers get more recognition, the team cited Malaysian YouTubers safwangba and CallMeMok both reported an increase in subscribers since joining Fundeavour, the latter doubling his count from 100+ to 200+. CallMeMok believes 70-80 subs are attributed to Fundeavour’s assistance.
“Besides YouTubers, we also currently help promote our top gamers via a partnership with Gamer Malaya, where the top three gamers on our leaderboard every month gets an exclusive interview written about them and published on Gamer Malaya.” said Fundeavour.
Talking about the future of gaming in Malaysia, Fundeavour believes it to be vast and that there is still much unexplored potential out there just waiting to be discovered. Seeing as YouTube gamers have grown in popularity and are among some of the most subscribed channels on the site, it’s clear how influencial gaming is and Fundeavour hopes the gaming industry will no longer be limited to just gamers becoming greater gamers.
The team hopes for Fundeavour to be the hub for gamers to go to if they want to kickstart their gaming passions in eSports, Youtube and streaming.
Whoa. Malaysians Were Behind Some Of The Iconic Titles In The Gaming Industry?!
|
#!/usr/bin/env python
"""
parallel_gripper_controller.py - controls a gripper built of two servos
Copyright (c) 2011 Vanadium Labs LLC. All right reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Vanadium Labs LLC nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import roslib; roslib.load_manifest('arbotix_controllers')
import rospy
import thread
from std_msgs.msg import Float64
from math import asin
class ParallelGripperController:
""" A simple controller that operates two opposing servos to
open/close to a particular size opening. """
def __init__(self):
rospy.init_node("parallel_gripper_controller")
# trapezoid model: base width connecting each gripper's rotation point
# + length of gripper fingers to computation point
# = compute angles based on a desired width at comp. point
self.pad_width = rospy.get_param("~pad_width", 0.01)
self.finger_length = rospy.get_param("~finger_length", 0.02)
self.min_opening = rospy.get_param("~min", 0.0)
self.max_opening = rospy.get_param("~max", 2*self.finger_length)
self.center_l = rospy.get_param("~center_left", 0.0)
self.center_r = rospy.get_param("~center_right", 0.0)
self.invert_l = rospy.get_param("~invert_left", False)
self.invert_r = rospy.get_param("~invert_right", False)
# publishers
self.l_pub = rospy.Publisher("l_gripper_joint/command", Float64)
self.r_pub = rospy.Publisher("r_gripper_joint/command", Float64)
# subscribe to command and then spin
rospy.Subscriber("~command", Float64, self.commandCb)
rospy.spin()
def commandCb(self, msg):
""" Take an input command of width to open gripper. """
# check limits
if msg.data > self.max_opening or msg.data < self.min_opening:
rospy.logerr("Command exceeds limits.")
return
# compute angles
angle = asin((msg.data - self.pad_width)/(2*self.finger_length))
if self.invert_l:
l = -angle + self.center_l
else:
l = angle + self.center_l
if self.invert_r:
r = angle + self.center_r
else:
r = -angle + self.center_r
# publish msgs
lmsg = Float64(l)
rmsg = Float64(r)
self.l_pub.publish(lmsg)
self.r_pub.publish(rmsg)
if __name__=="__main__":
try:
ParallelGripperController()
except rospy.ROSInterruptException:
rospy.loginfo("Hasta la Vista...")
|
Items where Author is "Abul Hashim, Mohd"
Abul Hashim, Mohd (1993) Applicability of Modified Kobatakes Equation to Artificial and Biological Membranes. (Masters thesis). Aligarh Muslim University.
This list was generated on Sat Apr 20 04:23:41 2019 IST.
|
# Let's try rendering the outline from
# http://en.literateprograms.org/Sieve_of_Eratosthenes_(Haskell)#Putting_it_together
# But I had to peek at their code for merge_all().
# (Could we make diff/merge shorter using Kragen's post on merging?)
# (Or how about defining diff in terms of merge and complement?)
def diff(xs, ys):
x, y = next(xs), next(ys)
while True:
if x < y: yield x
if x <= y: x = next(xs)
else: y = next(ys)
def merge(xs, ys):
x, y = next(xs), next(ys)
while True:
d = x - y
yield x if d <= 0 else y
if d <= 0: x = next(xs)
if 0 <= d: y = next(ys)
from itertools import count
from streams import LazyList
def gen_primes():
yield 2; yield 3; yield 5
multiples = merge_all(count(p*p, 2*p) for p in primes.tail())
for p in diff(count(7, 2), multiples): yield p
def merge_all(iters):
"Merge a stream of sorted streams, given map(next, iters) would be strictly increasing."
xs = next(iters)
yield next(xs)
for x in merge(xs, merge_all(iters)): yield x
primes = LazyList(gen_primes())
for p in primes: print(p)
|
Now that Narendra Modi has become the Prime Minister of India, it’s high time for some honest introspection by the Left and liberals of all hues on our collective failure to offer even a semblance of resistance to the corporate class and its media’s poll-time mythmaking around him. Fellow commentators like Nirmalangshu Mukherjee, Shuddhabrata Sengupta and Mukul Dube have rightly pointed out that Modi has come to power with just 31 per cent of polled votes (21 per cent of the electorate, 14 per cent of the population). But this is hardly a solace for what has happened.
I am yet to come across any insight into his success to entice overwhelming sections of the urban and semi-urban youth and the burgeo-ning middle class by virtually promising them an El Dorado in the name of development. In caste paradigms too, the huge transfer of OBC and Dalit as well as tribal votes to the Modified BJP, apart from his accomplishment in consolidating the upper-caste Hindu votes in the Hindi heartland States and elsewhere, also demands rigorous scrutiny. No doubt, Modi has harvested the hope generated by harping on the ‘aspirations’ of young India, their digital-era dreams and increasingly global networking.
The outreach of Modi’s mammoth propaganda machine to the increasing army of Internet and mobile users as well as interactive social media has been unmatched. He has linked his corporate-backed growth agendas of massive and rapid urbanisation, industrialisation and digitalisation to job generation for the educated and semi-educated youth who comprise more than 60 per cent of our total population. These wannabe youth and their middle class parents have become Modi’s natural constituency.
Gone are the decades of the sixties and seventies when the anti-establishment angst of the educated middle class youth had rocked India (and the world) that became germane to the Naxalite movement and later, the Nav Nirman movement. The economic and political scenarios have changed drastically since then. One fallout is the great disconnect between the aspirations today’s middle class youth and the resistances against neo-liberal plunders by tribals, workers, farmers, marginal communities and the struggle for survival of the poorest of the poor. The scourges of unemployment and job insecurities are still there while corruption and inflation have registered a phenomenal upswing. But the economy is now compara-tively more able to absorb a good number of youth, mainly in the market-dominated service sector and unorganised sectors, while alluring a tiny section of the educated to the globally-connected IT sector and high-tech white-colour jobs.
In the meantime, criminalisation of politics has been extended to the corporate use of the lumpenised youth, who work as musclemen for land sharks, recovery agents for banks and sundry dirty jobs for tycoons that defuses their anger against the power elites and turns them against the Aam Aadmi. The general food crisis and other aspects of sustenance blues are no more the fact of daily life for the upwardly mobile section of the middle class while its stakes in the present polity have increased substantially. Hatred against the political class is greater than before but its cutting edge has been lost, both because of the absence of ideological motivation for fundamental changes, widespread cynicism against idealism and the socio-economic cushion still available.
Modi has also played the pied piper to the fast emerging ‘new middle class’ in big cities and numerous small towns, which is not poor per se but yet to secure a firm foothold in the regular middle class milieu as the BJP manifesto had articulated. After having cultivated the same social strata in Gujarat for long, Modi appears to have managed to expand his support base among these people in other parts of the country. It has fetched a double dividend for him since many of these aspiring people belong to the OBCs. The anti-Muslim Gujarat pogrom in 2002 had revealed the Sangh Parivar’s penetration among the OBCs, Dalits and tribals and its success in saffronising a good section of them. The tribal fronts of the Sangh Parivar, including the Vanavasi Kalyan Ashram, have been active among the tribals across the country fomenting hatred against the Christian tribals and Muslims.
According to the National Election Study, conducted by the CSDS-Lokniti team, the BJP has increased its OBC vote-share by 12 points in the last five years, 22 per cent in 2009 to 34 per cent in 2014. Further, It has polled 42 per cent of the lower OBC votes as against 30 per cent of the upper OBCs. Since OBCs count for around 41 per cent of the national population, a thorough study into their voting patterns in the latest general election and the reasons behind their preferences for the BJP would shed light on Modi’s success in social engineering. The same study also revealed that the BJP gained a 14 per cent hike in the overall Hindu support, from 22 per cent in 2009 to 36 per cent in 2014 while its upper-caste vote share has shot up by 18 per cent, from 29 per cent in 2009 to 47 per cent this time. Another interesting finding is that the BJP’s share of Muslim votes has gone up to eight per cent at the national level while the Congress and its allies’ share of Muslim votes remained at 38 per cent since 2009.
In this backdrop, not only the corrupt and discredited Congress but also the confused and compromised Left as well as dispirited liberals, both those who are close to the Congress and its critics together have failed to address Modi’s development constituencies. The task to demystify Modi’s growth fundamentalism and its corollary trickle-down theory remained unattended despite worldwide empirical evidences against the fairy-tale of free market and its automatic distribution of income and wealth. This has contributed in extending and consolidating support for him in addition to his success in communal polarisation in some key States and partially in Bengal too.
Now that Modi is in saddle, how do the Left—both parliamentary and non-parliamentary (that is revolutionary)—plan to win back the middle class youth who have apparently converted to the Modi-mantra? How to reconnect to their different layers as well as bridge the gap between their aspirations and that of the basic masses? How to negotiate with the pro-development popular constituency of Modi, who is now being hailed as the harbinger of Reaganomics or Thatcherite politics in India? How to connect the urbanised youth, at least make them sensitive to various people’s movements against the grabbing of jal-jangal-zameen, evictions and dispossessions for private industries, mining, dams, nuclear or hydel power stations, highways or highrises and malls—in short, the neo-liberal development agenda? How to make them concerned about the attacks on labour rights, civil rights and livelihood rights of millions?
Should we dump the middle class youth and their parents as self-serving betrayers to the disadvantaged majority or wait for another great depression, another bubble burst a la American sub-prime crisis or accumulation of anger a la Greece and Brazil? Or, the Left, both parliamentary and radical, would examine their hypocritical, barren and unimaginative res-ponses to neoliberalism and ponder over the new strategies and tactics for the broadest possible social-political mobilisation against it?
Is it not the ripe time to make distinct strategies to fight for ground-level demands for development in rural and semi-urban areas— schools-colleges, hospitals, better roads and transport, water and power supply, telecomm-unication facilities, in short, bijli-pani-sadak issues while opposing corporate-touted elitist develop-ment? Of course, there are areas of convergences and divergences between the interests of the middle class and basic masses, which we need to deal deftly. The government-corporate nexus often succeeds in selling their agendas to the middle class and even to the poor by divisive propaganda and politics. They turn the rest of the population hostile to movements against land acquisition, evictions and inundation for dams and power plants by accusing the project-affected communities as the obstacles to development for greater societal interests.
Many movements, from the anti-nuclear plant movement in Kudankulam to the anti-eviction agitations in Kolkata have suffered isolation and hostility because of this dubious politics. Should it not trouble the anti-neoliberal forces that we need to crack the siege and reach out to the opponent’s constituency though counter-cam-paigns and inclusive demands? The old rhetoric of uniting the middle class and poor won’t suffice. Some new paradigms and imaginative practices must be evolved for forging an alliance of the 99 per cent against the ruling oligarchy.
So it is time to reorient the organised labour, farmer as well as student movements in tune with the changed times. We need to refresh the old Left agendas and gel with the new Left concerns that include environment, genders, consumers as well as indigenous and marginal communities, both rural and urban. While Modi has taken a leaf out of the World Bank’s book by promising ‘minimum government and maxi-mum governance’ to a corruption-ravaged society, the Left’s rhetoric against privatisation and downsizing of the government hardly evokes support except among affected workers and employees. Experiences since the anti-ESMA campaign in the eighties have revealed that the huge gulf between the workers-employees in organised sectors and workers in the unor-ganised sectors, in addition to consumers has helped the governments to suppress the rights of the first section.
The struggle against the LPG (Liberalisation-Privatisation-Globalisation) regime can gain a moral and social ground if the Left take up the anti-corruption, public accountability and service-related issues. But in contrast to the neoliberal anti-state sector campaign, the Left’s point of departure should be the radical democratic ideals of popular control or institutionalised vigilance/supervision on all resources under the government and private sector and bureaucracy through participatory decision-making at all levels by the common stake-holders, including workers, farmers and other project-affected communities, as well as consumers. The right to recall elected represen-tatives at all levels and the notion of public accountability should cover not only the politicians in power and Babudom but also the corporates since they enjoy public funding including bank loans and government concessions.
The traditional Left, led by the CPM, will consider the abovementioned ideas utopian or immature since they have been the votary of a centralised state and top-down development in the name of socialism and have never really supported the ideas of bottom-up radical democracy or socialist democracy. The CPM-led first Left Front Government’s initiative for panchayati raj in Bengal was hailed as proof of its commitment to decentralisation of power. But in the next three decades, the PRIs have been turned into an extension of the ruling party apparatus leaving little room for genuine rural democracy through mass participation.
With the neoliberal economy being rapidly introduced in the nineties, the CPM, after initial hiccups, has pursued a policy of ‘engagement with the market forces in the era of globalisation’ in Bengal that finally led to Singur and Nandigram. At the same time, it mouthed its anti-neoliberal rhetoric at the Centre while following a different trajectory in Kerala. This doublespeak has cost it dearly both in Bengal and beyond. The Marxist notion of a progressive bourgeoisie, that was relevant in another epoch, became handy for the Bengal CPM leadership and their backers at the party centre to court domestic and foreign capital while branding the farmers as reactionary.
Nevertheless, Buddhadeb Bhattacharjee’s claims to generate jobs for the youth failed to convince the target group while alienating the farmers. In the process, the LF lost power to Mamata Banerjee who has succeeded in fishing in troubled waters. Despite tall talk of learning from the Latin American Bolivarian experiences, the CPM could not provide any alternative to the Gujarat model since its leadership is obsessed with the Chinese model, which is not essentially different from Modi’s roadmap.
Amid the overarching chorus on courting big capital the Marxists have hemmed and hawed around claiming a niche but ended in playing second fiddle to the Congress and BJP’s develop-mental discourses, losing its residual ideological appeal. Bhattacharjee welcomed American capital but opposed the Indo-US nuke deal and strategic ties. In fact, the CPM and its allies had not only lost the opportunity to popularise anti-US sentiments in the wake of the murderous Gas disaster in the Union Carbide unit in Bhopal way back in 1984 but also exposed the hollowness of its anti-imperialist rhetoric after it invited Dow Chemicals, which now owns Carbide, and other American behemoths to invest in Bhattacharjee’s dream project of a chemical hub at the fag of the LF rule.
It’s another matter that old anti-US slogans do not appeal to the youth anymore for several reasons including the fall of the Soviet Union and the apparent success of Chinese capitalism in a one-party state. Also, the old-era imperialist economy had neither outsourced jobs that would have created certain catchments in the colony for its rule nor pumped in funds in the capital market to give it an illusive buoyancy like today’s global neoliberal regime. But neither did we benefit from any in-depth understanding of the new global economy nor did we learn about any imaginative counter-strategies from AKG Bhavan or Alimuddin Street.
It’s an irony of history that the self-proclaimed crusaders against Capital had betrayed every effort to run closed or sick industries by workers themselves—Kamani Tube to Kanoria Jute Mill. Bengal’s CPM-led LF Government did not mind hapless workers from thousands of closed and sick mills to run from pillar to post, be beggars or commit suicide and leave their families ruined. The party-government facilitated illegal transfer of prime factory land to real-estate promoters but opposed efforts for workers’ self-manage-ment or cooperatives calling it impractical and reformist under a capitalist state.
The party of the permanently postponed revolution in its effort to keep the Capital in good humour kept a tight leash on the trade unions, allowed fly-by-night operators who masqueraded as redeemers of sunset industries to rob workers of their dues and pocket the sale proceeds of prime industrial land. The workers’ independent initiatives were nipped in the bud for long. In their effort to adjust to the neo-liberal economy and stick to power, Buddhadeb Bhattacharjee and Co. moved Rightward by imitating Modi to draw investment. But they could not outmatch Modi in placating the predatory moneybags. Consequently, they lost their rural base to Mamata who played more Left than them. In the general election this time, Modi played the Hindi-Hindu card so well in Bengal that lotus has bloomed in Asansol and made some other industrial towns with large Hindi-speaking population quite fertile for the Sangh Parivar.
Even its continued hemorrhage has failed to wake up the CPM mandarins. Not only have they failed to counter the neoliberal myth but also lost the morale to do so. They have refused to examine the fundamentals of their develop-mental discourse, except muttering some cautions against forcible land takeover. They have not bothered to reorient the party and its mass organisations and develop relations with various people’s movements in order to form-ulate united class and mass politics against neoliberal onslaughts. Instead, they have been hostile to grassroots movements and resorted to state repression to suppress when they were in power.
After Gujarat 2002, anti-Hindutva campaig-ners saw the Bengal CPM playing a fence-sitter fearing a Hindu backlash in the polls. The party’s secular credentials took a further drubbing after Bhattacharjee’s ‘warm’ relation-ship with L.K. Advani and his echoing of the BJP’s original ironman’s charge that madrasas in border areas had become the hub of anti-national activities. The results this time have proved that Mamata continues to consolidate minority votes in her favour since 2011, while Modi ate into the CPM’s anti-Mamata and anti-Modi constituencies. In Kerala, the CPM managed to better its performance against the Congress-led UDF because overall polarisation was not that acute. So the BJP drew nil there unlike its impressive inroads in Bengal.
Expelled CPM veteran Abdur Rezzak Mollah has pointed out that not only Muslims but also a large section of the Hindu OBCs have deserted the CPM. Failing to expand in the Hindi heartland despite harbouring such a wish since 1974, the party willy-nilly tried to adjust itself to the surge of identity politics by forming an internal committee to accommodate minority concerns. It also spoke of understanding Ambedkar and post-Mandal dynamics. But the exercise did not go beyond tailing Mayawati and Mulayam, also Laloo and Paswan earlier who together stand for the travesty of Ambedkar, Lohia and JP’s insights in the Indian social-political reality.
In Bengal, State-level OBC Commission findings in post-Mandal years were suppressed, and a half-hearted effort for reservations for Muslim OBCs was initiated after the Sachar Committee reportedly divulged the poor state of Muslims in Bengal too. It was too little, too late. Neither the principles of class struggle nor social justice informed CPM practices but a partisan clientelism based on political allegiance and control. Thrown out of power, it has lost its distributive leverages now.
Despite the fact that their hackneyed Third Front politics has failed again, the CPM apparatchiks are only busy in Stalinist purges to subdue internal dissenters who are alarmed by the extent of the Left rout and want the leadership to be accountable to ordinary party workers and supporters. The big brother of the Indian Left and its ideological cousins have been pathologically immune from intellectual churning in social sciences and vibes from the ground. The winds of change hardly make way through their armour of democratic centralism, the disciplinary device to gag dissenters and demand for openness. Unless shaken to the roots by the party cadres, the CPM leadership is not likely to leave their beaten path in the near future.
The non-CPM Left, particularly of Naxalite varieties, are not an inspiring lot, both ideologically and organisationally. They have not bothered to undertake any sustained efforts for understanding the new economy and its impact on its core constituencies as well as its more multitudinous victims. No worthwhile ideo-political churning, debates and dialogues, based on experiences of localised, isolated and sporadic anti-neoliberal resistances, have taken place in order to evolve some cohesive alternative development paradigms and positive united actions.
For example, most of them did not question the basic premises of the statist socio-economic progress that stipulates accumulation of capital for heavy industrialisation and the pros and cons of the economy of scale since the Bolshevik and Chinese models. The problematic of such accumulation without any colony to exploit had plagued Lenin, Mao and Castro. Questions are now being raised increasingly regarding the trajectory of the Eurocentric idea of history and civilisation, progress and development and its impact on Marxist traditions. But rethinking Marxism in the light of old and new socialist experiences has hardly occurred to any CPI-ML groups except some fringe elements.
Other aspects of developmentalism too have been ignored at the theoretical level in the excitement for the short-lived and street-smart oppositional rhetoric. All of them have virtually followed the CPM-style politics and organi-sational practices in varied degrees that turned them into competitive closed circles with their mass fronts remaining mere tutelages of tiny outfits. Their endless ideological nitpicking often masks their aspirations to grow at the cost of the other fellow-travellers, a la the CPM. While amoeba-like perpetual self-division has become a crucial condition for their existence, sometimes one or other group calls for anti-neoliberal unity but only at its terms. No wonder that these caricatures of Marxist overlords could not build a facade of united struggle, electoral or otherwise, before Modi’s march to power.
The poll results show that the CPI-ML (Liberation) continues to lose its ground on both accounts along with the CPI and CPM in Bihar, its stronghold earlier. Some fellow-travellers fondly remember the party’s efforts to gather a broadbased platform of democratic and patriotic forces in the eighties, in the form of the Indian People’s Front, before the neoliberal economy took its wings. But we have not heard of any rekindling of such effort to coordinate heterogeneous grassroots, class and mass movements sans remote control by the party. Other smaller CPI-ML groups and offshoots, which are confined to a few pockets of activities across the country, contested a few seats but could not yield better in the latest electoral battle, primarily because of their inability to forge alliances with likeminded forces.
Some others advocated NOTA and thundered against all parliamentary parties ignoring the provincial and regional configurations against the BJP and Congress. NOTA is a significant recognition of the voters’ right to reject all candidates and their disenchantment with the political parties. Nevertheless, judging by the 1.1 per cent NOTA votes out of the total votes polled this time and the public responses to NOTA campaigns, the Indian voters’ disillusion-ment with bourgeois democracy is mere wishful thinking of the self-styled vanguards.
The largest Naxalite party, the CPI (Maoist), which follows a strategic poll boycott line, has done almost nothing to stop the Hindutva forces in the red zones. Sumanta Banerjee has rightly pointed this out in his recent open letter to Ganapati (frontierweekly.com, posted on May 9). Instead, the party’s lower rank-and-file has become prone to make deals with mining and land mafias in the region. The BJP has bagged 10 out of 11 seats in Chhattisgarh, including in Maoist-dominated Bastar, this time. In Jharkhand too, where the Maoists have a strong presence, the BJP swept the poll by taking 12 out of 14 seats.
The party’s armed activities have definitely played some role in arresting the government-corporate plan for extending the mining areas, particularly in Chhattisgarh. The idea of open mass movements, supported by armed militias wherever needed, is not new in the Naxalite movement. But the Maoist party’s militarism-obsessed politics, reckless violence, hostility to non-armed grassroots initiatives in addition to competitive state-Maoist bloodbath and terror have stymied the potentials for a genuine and strong mass movement of the tribal people affected by neo-liberal plunders. If there was such a movement, it could have impacted both the BJP and Congress prospects in the polls in the region and percolated a message outside.
As if what has been at stake is an Indian revolution or, at least, the possibility of a Left Government at the Centre. As if victory of the extreme Rightwing or corporate-religious fascist nexus has no immediate bearing on the survival and resistance of the classes and communities, which are facing the worst neo-liberal onslaught. As if Maoists can run parallel to the Modified polity by being blissfully oblivious to the electoral outcome. As if Maoists will escape the anxieties and contingencies of its fallout simply because of its poll boycott line. As if we are at the threshold of an Indian February or November 1917, Giri alluded to Lenin’s moment of clarity that had dawned on the latter in a radically different situation almost a century back.
“Hence we cannot simply fight to get a Congress or the Left or seculars back in government. Let us also not harangue with the big media and big corporates and complain about their pro-Modi stance. Let us accept that the present democratic order is inherently skewed against any possibility of real social transformation—or else one only indulges in an untruth,” Giri commented asking us not to be perturbed over Modi’s victory. His call for reinventing the idea of India from the Left perspective is welcome. But Marxism cannot be further removed from the Indian version of historical reductionism, which reduces ups and downs in bourgeois politics into distracting mayas.
Our enlightened radicals in their wisdom may have stayed away from such traps. But the traps have been inescapable for guerrillas too. Despite their official poll boycott, in the past Maoists have played second fiddle to the electoral politics of Chandrababu, YSR, Laloo and Mamata and lost ground miserably to them. Will the latest poll outcome trigger any fundamental rethinking in the Maoist rank-and-file? Better to keep one’s fingers crossed.
Soni Sori, the woman teacher who had suffered gruesome torture in police and jail custody for years as a suspected Maoist conduit, contested at Bastar as an AAP candidate. But she polled almost half of the Maoist-inspired NOTA votes, which was also higher than the CPI share. Sori as well as Medha Patkar, S.P. Udaykumar of Kudankulam and a few other leaders of various people’s movements lost their sheen in the eyes of the radicals after they had contested the polls as AAP nominees. The high priests of ideological purity of the CPM and Naxalite varieties had shrugged off the AAP as dangerous reformists ready to waylay revolutionaries with its agenda for clean capitalism in lieu of crony capitalism. So Sori-Patkar-Udaykumar’s defeat did not matter for the champions of class struggle or protracted people’s war.
The AAP’s anti-corruption crusade and practices towards participatory democracy had initially succeeded in addressing the middle class and youth concerns while attracting some sections of the urban poor too. But euphoria over its impressive electoral debut in Delhi dazzled the leadership so much that they mistook the National Capital Region for India. Arvind Kejriwal’s government resigned only after 49 days in order to spread its wings across the country. Instead of consolidating their imme-diate support base, Kejriwal and his comrades aped the Congress’ High Command culture and Modi’s personality cult.
After the party has fallen flat on its face, despite its success in bagging four seats in Punjab, they now admit that the resignation was a mistake. It has resulted in the party’s defeat in all the seven seats in Delhi that have gone to the BJP’s kitty. It is hardly a solace that the AAP vote-share has gone up slightly in the State. Team Kejriwal’s effort to resurrect the government with the help from the Congress has landed them in a new quandary. It will take a long time to regain the faith that the people of Delhi and the rest of the land had reposed in them.
Some commentators like Aditya Nigam have pointed out that ‘the big change is that the long winter of deadening consensus on neoliberalism has been broken’ with the ‘struggles against land acquisition’. Like many others outside the cocooned Left, the advent of the AAP also raised his spirit, which he said had broken the ‘political deadlock’.
My initial appreciation of the AAP phenome-non notwithstanding, experience cautions against putting all our eggs in one basket. Despite the continued intra-Left fights and disunity among liberals, radical democrats, including the AAP and neo-Left of the red and green hues, one can only hope against all hope that they would go for some honest introspection and take efforts for broadest possible mass mobilisation against aggressive neoliberalism under Modi. This alone can save them and salvage the setback.
The author is a Kolkata-based journalist.
|
# Character encoding routines
# Copyright 2010-2015 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
import cgi
import codecs
import collections
import re
try:
import chardet
except ImportError:
chardet = None
lazy_chardet_encoding = None
else:
def lazy_chardet_encoding(data):
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if isinstance(chardet_encoding, bytes_):
chardet_encoding = chardet_encoding.encode('ascii', 'ignore')
return chardet_encoding
from .exceptions import (
CharacterEncodingOverride, CharacterEncodingUnknown, NonXMLContentType,
)
bytes_ = type(b'')
unicode_ = type('')
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = b'\x4C\x6F\xA7\x94'
UTF16BE_MARKER = b'\x00\x3C\x00\x3F'
UTF16LE_MARKER = b'\x3C\x00\x3F\x00'
UTF32BE_MARKER = b'\x00\x00\x00\x3C'
UTF32LE_MARKER = b'\x3C\x00\x00\x00'
ZERO_BYTES = '\x00\x00'
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(b'^<\?.*encoding=[\'"](.*?)[\'"].*\?>')
def convert_to_utf8(http_headers, data, result):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = ''
xml_encoding = ''
rfc3023_encoding = ''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = 'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = 'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = 'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = 'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = 'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = 'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = 'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = 'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
'u16', 'utf-16', 'utf16', 'utf_16',
'u32', 'utf-32', 'utf32', 'utf_32',
'iso-10646-ucs-2', 'iso-10646-ucs-4',
'csucs4', 'csunicode', 'ucs-2', 'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if isinstance(http_encoding, bytes_):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd',
'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and
http_content_type.endswith('+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/') and
http_content_type.endswith('+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or 'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or 'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == 'gb2312':
rfc3023_encoding = 'gb18030'
if xml_encoding.lower() == 'gb2312':
xml_encoding = 'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
lazy_chardet_encoding, 'utf-8', 'windows-1252', 'iso-8859-2'):
if isinstance(proposed_encoding, collections.Callable):
proposed_encoding = proposed_encoding(data)
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + '\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = ''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
result['encoding'] = rfc3023_encoding
if error:
result['bozo'] = True
result['bozo_exception'] = error
return data
|
After a hard day at work, give your body the Spa treatment it deserves with our fantastic deals on pamper products.
Treatments There are 11 products.
The Cellulite Cream by Revuele activates the metabolic process in the skin, improving microcirculation, contributing to the decrease of fat deposits and skin lifting. A velvety and soft cream, the cellulite cream actively fights cellulite and the formation of fat, giving the skin elasticity and eliminating the effect of “orange peel”.
The Correcting Gel by Revuele promotes the breakdown of fat and has a tonic effect on blood vessels which stimulate blood circulation. When used with a balanced diet and physical activity the body's weight is reduced and the skin becomes tighter, protecting it from sagging and the appearance of stretch marks.
The Revuele Firming Gel stimulates cell renewal making the skin more toned and visually adjusting the contours of the body.
Care+ Antinfungal Nail Lacquer is medically proven to effectively treat mild fungal nail infections, kill the nail fungus and prevent speading. This carton contains 1 bottle 3ml nail lacquer, 10 spatulas, 30 cleaning swabs, and 30 nail files. Apply once weekly.
|
# pylint: disable=too-many-public-methods
"""CLI Tests for the errata management feature"""
# For ease of use hc refers to host-collection throughout this document
from robottelo.decorators import stubbed
from robottelo.test import CLITestCase
class ErrataTestCase(CLITestCase):
"""CLI Tests for the errata management feature"""
@stubbed()
def test_hc_errata_install_1(self):
"""@Test: Using hc-id and org id to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --id <id>
--organization-id <orgid>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_2(self):
"""@Test: Using hc-id and org name to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --id <id>
--organization <org name>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_3(self):
"""@Test: Use hc-id and org label to install an erratum in a hc
@Feature: Errata
@Setup: errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --id <id>
--organization-label <org label>
@Assert: Errata is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_4(self):
"""@Test: Use hc-name and org id to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --name <name>
--organization-id <orgid>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_5(self):
"""@Test: Use hc name and org name to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --name <name>
--organization <org name>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_6(self):
"""@Test: Use hc-name and org label to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --name <name>
--organization-label <org label>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_7(self):
"""@Test: Attempt to install an erratum in a hc using hc-id and not
specifying the erratum info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --id <id> --organization-id <orgid>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_8(self):
"""@Test: Attempt to install an erratum in a hc using hc-name and not
specifying the erratum info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --name <name> --organization-id
<orgid>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_9(self):
"""@Test: Attempt to install an erratum in a hc by not specifying hc
info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --organization-id
<orgid>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_10(self):
"""@Test: Attempt to install an erratum in a hc using hc-id and not
specifying org info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --id <id>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_11(self):
"""@Test: Attempt to install an erratum in a hc without specifying hc
info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --name <name>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_1(self):
"""@Test: Sort errata by Issued date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --order 'issued ASC'
2. erratum list --order 'issued DESC'
@Assert: Errata is sorted by Issued date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_2(self):
"""@Test: Filter errata by org id and sort by updated date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-id=<orgid> --order 'updated ASC'
2. erratum list --organization-id=<orgid> --order 'updated DESC'
@Assert: Errata is filtered by org id and sorted by updated date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_3(self):
"""@Test: Filter errata by org name and sort by updated date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization=<org name> --order 'updated ASC'
2. erratum list --organization=<org name> --order 'updated DESC'
@Assert: Errata is filtered by org name and sorted by updated date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_4(self):
"""@Test: Filter errata by org label and sort by updated date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-label=<org_label> --order 'updated ASC'
2. erratum list --organization-label=<org_label> --order 'updated DESC'
@Assert: Errata is filtered by org label and sorted by updated date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_5(self):
"""@Test: Filter errata by org id and sort by issued date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-id=<org_id> --order 'issued ASC'
2. erratum list --organization-id=<org_id> --order 'issued DESC'
@Assert: Errata is filtered by org id and sorted by issued date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_6(self):
"""@Test: Filter errata by org name and sort by issued date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization=<org_name> --order 'issued ASC'
2. erratum list --organization=<org_name> --order 'issued DESC'
@Assert: Errata is filtered by org name and sorted by issued date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_7(self):
"""@Test: Filter errata by org label and sort by issued date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-label=<org_label> --order 'issued ASC'
2. erratum list --organization-label=<org_label> --order 'issued DESC'
@Assert: Errata is filtered by org label and sorted by issued date.
@Status: Manual
"""
@stubbed()
def test_errata_list_1(self):
"""@Test: Filter errata by product id
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product-id=<productid>
@Assert: Errata is filtered by product id.
@Status: Manual
"""
@stubbed()
def test_errata_list_2(self):
"""@Test: Filter errata by product name
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<productname>
@Assert: Errata is filtered by product name.
@Status: Manual
"""
@stubbed()
def test_errata_list_3(self):
"""@Test: Filter errata by product id and Org id
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product-id=<product_id> --organization-id=<org_id>
@Assert: Errata is filtered by product id and Org id.
@Status: Manual
"""
@stubbed()
def test_errata_list_4(self):
"""@Test: Filter errata by product id and Org name
@Feature: Errata
@Setup: errata synced on satellite server.
@Steps:
1. erratum list --product-id=<product_id> --organization=<org_name>
@Assert: Errata is filtered by product id and Org name.
@Status: Manual
"""
@stubbed()
def test_errata_list_5(self):
"""@Test: Filter errata by product id
@Feature: Errata
@Setup: errata synced on satellite server.
@Steps:
1. erratum list --product-id=<productid>
2. erratum list --product-id=<product_id> --organization-id=<org_id>
3. erratum list --product-id=<product_id> --organization=<org_name>
4. erratum list --product-id=<product_id>
--organization-label=<org_label>
@Assert: Errata is filtered by product id.
@Status: Manual
"""
@stubbed()
def test_errata_list_6(self):
"""@Test: Filter errata by product name
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<productname>
@Assert: Errata is filtered by product name.
@Status: Manual
"""
@stubbed()
def test_errata_list_7(self):
"""@Test: Filter errata by product name and Org id
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<product_name> --organization-id=<org_id>
@Assert: Errata is filtered by product name and Org id.
@Status: Manual
"""
@stubbed()
def test_errata_list_8(self):
"""@Test: Filter errata by product name and Org name
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<product_name> --organization=<org_name>
@Assert: Errata is filtered by product name and Org name.
@Status: Manual
"""
@stubbed()
def test_errata_list_9(self):
"""@Test: Filter errata by product name and Org label
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<product_name>
--organization-label=<org_label>
@Assert: Errata is filtered by product name and Org label.
@Status: Manual
"""
@stubbed()
def test_errata_list_10(self):
"""@Test: Filter errata by Org id
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-id=<orgid>
@Assert: Errata is filtered by Org id.
@Status: Manual
"""
@stubbed()
def test_errata_list_11(self):
"""@Test: Filter errata by Org name
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization=<org name>
@Assert: Errata is filtered by Org name.
@Status: Manual
"""
@stubbed()
def test_errata_list_12(self):
"""@Test: Filter errata by Org label
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-label=<org_label>
@Assert: Errata is filtered by Org label.
@Status: Manual
"""
@stubbed()
def test_errata_list_13(self):
"""@Test: Filter errata by CVE
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --cve <cve_id>
@Assert: Errata is filtered by CVE.
@Status: Manual
"""
@stubbed()
def test_errata_list_permission_1(self):
"""@Test: Show errata only if the User has permissions to view them
@Feature: Errata
@Setup:
1. Create two products with one repo each. Sync them.
2. Make sure that they both have errata.
3. Create a user with view access on one product and not on the other.
@Steps:
1. erratum list --organization-id=<orgid>
@Assert: Check that the new user is able to see errata for one
product only.
@Status: Manual
"""
@stubbed()
def test_errata_systems_list_1(self):
"""@Test: View a list of affected content hosts for an erratum
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id>
@Assert: List of affected content hosts for an erratum is displayed.
@Status: Manual
"""
@stubbed()
def test_errata_systems_list_2(self):
"""@Test: View a list of affected content hosts for an erratum filtered
with restrict flags
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id> --erratum-restrict-available=1
2. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id> --erratum-restrict-unavailable=1
3. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id> --erratum-restrict-available=0
4. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id> --erratum-restrict-unavailable=0
@Assert: List of affected content hosts for an erratum is displayed
filtered with corresponding restrict flags.
@Status: Manual
"""
@stubbed()
def test_errata_content_host_1(self):
"""@Test: Available errata count displayed while viewing a list of
Content hosts
@Feature: Errata
@Setup:
1. Errata synced on satellite server.
2. Some content hosts present.
@Steps:
1. hammer content-host list --organization-id=<orgid>
@Assert: The available errata count is retrieved.
@Status: Manual
"""
|
In C# how to convert Formula to Text in Excel 2010?
Copying Formulas in MS Excel Copying formulas is one of the most common tasks that you do in a typical spreadsheet that relies primarily on formulas. When a formula uses cell references rather than constant values, Excel makes the task of copying an original formula to every place that requires a similar formula.
Get to know Excel 2010: Create formulas Quick Reference Card Spreadsheets, cell addresses, and formulas Spreadsheets are made up of columns, rows, and cells.
Complex Formula in Excel 2010. In simple formula we work with one mathematical operation. Such as 10 + 20. But when we want to do with more then one mathematical operation on the data then we use complex formula such as 10 + 20 - 30.
Using a Formula in a Text Box in Excel 2010 The steps in this article will show you how to link a text box to a cell in Microsoft Excel 2010. You cannot directly enter a formula into a text box, but you can link a cell with a formula to a text box, so that the result of the formula displays inside the text box.
|
import json
import re
TEXT_CODES = {'bold': {'start': '\x1b[1m',
'end': '\x1b[22m'},
'cyan': {'start': '\x1b[36m',
'end': '\x1b[39m'},
'blue': {'start': '\x1b[34m',
'end': '\x1b[39m'},
'red': {'start': '\x1b[31m',
'end': '\x1b[39m'},
'magenta': {'start': '\x1b[35m',
'end': '\x1b[39m'},
'green': {'start': '\x1b[32m',
'end': '\x1b[39m'},
'yellow': {'start': '\x1b[33m',
'end': '\x1b[39m'},
'underline': {'start': '\x1b[4m',
'end': '\x1b[24m'}}
class TextCodesStripper:
keys = [re.escape(v['start']) for k,v in TEXT_CODES.iteritems()]
keys += [re.escape(v['end']) for k,v in TEXT_CODES.iteritems()]
pattern = re.compile("|".join(keys))
@staticmethod
def strip (s):
return re.sub(TextCodesStripper.pattern, '', s)
def format_num (size, suffix = "", compact = True, opts = ()):
txt = "NaN"
if type(size) == str:
return "N/A"
u = ''
if compact:
for unit in ['','K','M','G','T','P']:
if abs(size) < 1000.0:
u = unit
break
size /= 1000.0
if isinstance(size, float):
txt = "%3.2f" % (size)
else:
txt = "{:,}".format(size)
if u or suffix:
txt += " {:}{:}".format(u, suffix)
if isinstance(opts, tuple):
return format_text(txt, *opts)
else:
return format_text(txt, (opts))
def format_time (t_sec):
if t_sec < 0:
return "infinite"
if t_sec < 1:
# low numbers
for unit in ['ms', 'usec', 'ns']:
t_sec *= 1000.0
if t_sec >= 1.0:
return '{:,.2f} [{:}]'.format(t_sec, unit)
return "NaN"
else:
# seconds
if t_sec < 60.0:
return '{:,.2f} [{:}]'.format(t_sec, 'sec')
# minutes
t_sec /= 60.0
if t_sec < 60.0:
return '{:,.2f} [{:}]'.format(t_sec, 'minutes')
# hours
t_sec /= 60.0
if t_sec < 24.0:
return '{:,.2f} [{:}]'.format(t_sec, 'hours')
# days
t_sec /= 24.0
return '{:,.2f} [{:}]'.format(t_sec, 'days')
def format_percentage (size):
return "%0.2f %%" % (size)
def bold(text):
return text_attribute(text, 'bold')
def cyan(text):
return text_attribute(text, 'cyan')
def blue(text):
return text_attribute(text, 'blue')
def red(text):
return text_attribute(text, 'red')
def magenta(text):
return text_attribute(text, 'magenta')
def green(text):
return text_attribute(text, 'green')
def yellow(text):
return text_attribute(text, 'yellow')
def underline(text):
return text_attribute(text, 'underline')
def text_attribute(text, attribute):
if isinstance(text, str):
return "{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
txt=text,
stop=TEXT_CODES[attribute]['end'])
elif isinstance(text, unicode):
return u"{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
txt=text,
stop=TEXT_CODES[attribute]['end'])
else:
raise Exception("not a string")
FUNC_DICT = {'blue': blue,
'bold': bold,
'green': green,
'yellow': yellow,
'cyan': cyan,
'magenta': magenta,
'underline': underline,
'red': red}
def format_text(text, *args):
return_string = text
for i in args:
func = FUNC_DICT.get(i)
if func:
return_string = func(return_string)
return return_string
def format_threshold (value, red_zone, green_zone):
if value >= red_zone[0] and value <= red_zone[1]:
return format_text("{0}".format(value), 'red')
if value >= green_zone[0] and value <= green_zone[1]:
return format_text("{0}".format(value), 'green')
return "{0}".format(value)
# pretty print for JSON
def pretty_json (json_str, use_colors = True):
pretty_str = json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True)
if not use_colors:
return pretty_str
try:
# int numbers
pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*[^.])',r'\1{0}'.format(blue(r'\2')), pretty_str)
# float
pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*\.[0-9]+)',r'\1{0}'.format(magenta(r'\2')), pretty_str)
# # strings
#
pretty_str = re.sub(r'([ ]*:[ ]+)("[^"]*")',r'\1{0}'.format(red(r'\2')), pretty_str)
pretty_str = re.sub(r"('[^']*')", r'{0}\1{1}'.format(TEXT_CODES['magenta']['start'],
TEXT_CODES['red']['start']), pretty_str)
except :
pass
return pretty_str
if __name__ == "__main__":
pass
|
It might be a bit late but it’s still worth taking a look at.
The monthly calendar shot takes in Tulketh Mill for September.
Once a symbol of Preston’s industrial might, it is has been refurbished as a business centre. It dominates Ashton and the surrounding areas.
|
import os
import textwrap
import random
import pygame
from pygame.locals import *
from const import *
FRAMERATE = 30
CANVAS_SIZE = (600, 600)
TEXT = (124, 164, 128)
BORDER = (64, 80, 116)
TRANSPARENT = (255, 0, 255)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 220, 0)
BLUE = (0, 0, 220)
PURPLE = (220, 0, 220)
# parts definition for source image rect
PARTS_RECT = {
'human torso': (16,35,39,104),
'human head': (20,0,32,37),
'human right arm': (0,47,18,81),
'human left arm': (53,47,18,81),
'human right leg': (12,130,20,69),
'human left leg': (39,130,20,69),
'alien torso': (92,35,39,102),
'alien head': (96,0,32,37),
'alien right arm': (76,47,18,81),
'alien left arm': (129,47,18,81),
'alien right leg': (88,130,20,69),
'alien left leg': (115,130,20,69),
'trex torso': (242,51,92,117),
'trex head': (174,123,56,72),
'trex tail': (160,0,131,46),
'trex legs': (168,53,66,63),
'cyclops torso': (371,60,43,65),
'cyclops skull': (379,0,48,59),
'cyclops right arm': (334,43,35,74),
'cyclops left arm': (416,61,35,48),
'cyclops right leg': (358,127,26,63),
'cyclops left leg': (402,127,31,61),
'ptreodactyl torso': (543,58,50,87),
'ptreodactyl skull': (543,0,72,58),
'ptreodactyl right wing': (453,19,90,147),
'ptreodactyl left wing': (615,0,54,150),
'tax returns': (677,0,75,100),
'shopping list': (677,0,75,100),
'todo list': (677,0,75,100),
'ludum dare comments': (677,0,75,100),
'bank accounts': (677,0,75,100),
'website passwords': (677,0,75,100),
'IP address scamlist': (677,0,75,100),
'codex page I': (677,0,75,100),
'codex page II': (677,0,75,100),
'codex page III': (677,0,75,100),
'codex page IV': (677,0,75,100),
'codex page V': (677,0,75,100),
'codex page VI': (677,0,75,100),
'biblical references': (677,0,75,100),
'book of psalms': (677,0,75,100),
'book of tomas': (677,0,75,100),
}
UFO_RECT = (6,6,88,88)
FIGHTER_RECT = (113,12,74,75)
LIGHTFIGHTER_RECT = (313,12,74,75)
MISSILE_RECT = (194,12,4,28)
RADAR_RECT = (210,10,80,80)
RADAR_HOSTILE_RECT = (245,3,4,4)
RADAR_GOAL_RECT = (250,3,4,4)
GREEN_ZONE_RECT = (400, 19, 100,52)
class DraggableSprite(pygame.sprite.Sprite):
def __init__(self, name, image, rect):
pygame.sprite.Sprite.__init__(self)
self.name = name
self.image = image
self.rect = rect
class AFOSprite(pygame.sprite.Sprite):
"""
Player controller Air Force One sprite.
"""
def __init__(self, image):
pygame.sprite.Sprite.__init__(self)
self.name = 'AFO'
self.original_image = image
self.image = image
self.rect = image.get_rect()
self.fly_region = 0
self.speed = [0, 0]
self.autopilot = True
self.health = 10
def _accelerate(self, x, y):
self.speed = [self.speed[0] + x, self.speed[1] + y]
if self.speed[0] < -10:
self.speed[0] = -10
if self.speed[1] < -10:
self.speed[1] = -10
if self.speed[0] > 10:
self.speed[0] = 10
if self.speed[1] > 10:
self.speed[1] = 10
def _clamp(self):
if self.rect.left < 10:
self.rect.left = 10
if self.rect.top < 10:
self.rect.top = 10
if self.rect.right > CANVAS_SIZE[0] - 10:
self.rect.right = CANVAS_SIZE[0] - 10
if self.rect.top > self.fly_region:
self.rect.top = self.fly_region
def update(self):
"""
Player controller craft.
"""
# auto move the UFO forward until we are in the top half of the screen
if self.rect.top > self.fly_region:
self.rect.top -= 6
if self.rect.top < self.fly_region:
self.autopilot = False
pressed = pygame.key.get_pressed()
lose_acceleration = True
if not self.autopilot and self.health > 0:
if pressed[K_LEFT] or pressed[K_a]:
self._accelerate(-1, 0)
lose_acceleration = False
if pressed[K_RIGHT] or pressed[K_d]:
self._accelerate(1, 0)
lose_acceleration = False
if pressed[K_UP] or pressed[K_w]:
self._accelerate(0, -1)
lose_acceleration = False
if pressed[K_DOWN] or pressed[K_s]:
self._accelerate(0, 1)
lose_acceleration = False
if pressed[K_F10]:
self.health = 0
self._clamp()
self.rect.left += self.speed[0]
self.rect.top += self.speed[1]
if lose_acceleration:
if self.speed[0] > 0:
self.speed[0] -= 1
elif self.speed[0] < 0:
self.speed[0] += 1
if self.speed[1] > 0:
self.speed[1] -= 1
elif self.speed[1] < 0:
self.speed[1] += 1
def take_damage(self):
self.health -= random.randint(1, 3)
if self.health < 0:
self.health = 0
class UFOSprite(AFOSprite):
"""
Behaves like the base sprite and adds rotation.
"""
def __init__(self, image):
AFOSprite.__init__(self, image)
self.angle = 0
def update(self):
AFOSprite.update(self)
self.angle = (self.angle + 10) % 360
self.image = pygame.transform.rotate(self.original_image, self.angle)
self.rect = self.image.get_rect(center=self.rect.center)
class FighterJetSprite(pygame.sprite.Sprite):
def __init__(self, image, target):
pygame.sprite.Sprite.__init__(self)
self.name = 'Fighter Jet'
self.image = image
self.rect = image.get_rect()
self.target = target
self.reload_time = 0
self.movement = 0
self.fly_region = CANVAS_SIZE[1] / 1.5
self.movement_speed = random.randint(10.0, 30.0)
self.autopilot = True
self.exitpilot = False
self._firing = False
def _clamp(self):
if self.rect.left < 10:
self.rect.left = 10
if self.rect.top > CANVAS_SIZE[1] - 100:
self.rect.top = CANVAS_SIZE[1] - 100
if self.rect.right > CANVAS_SIZE[0] - 10:
self.rect.right = CANVAS_SIZE[0] - 10
if self.rect.top < self.fly_region:
self.rect.top = self.fly_region
def update(self):
if self.autopilot:
self.rect.top -= 4
if self.rect.bottom < CANVAS_SIZE[1] - 100:
self.autopilot = False
elif self.exitpilot:
if self.rect.top < CANVAS_SIZE[1]:
self.rect.left += 2
self.rect.top += 2
else:
# move inline with target and fire when ready and able.
diff = self.target.rect.left - self.rect.left
if abs(diff) > self.movement_speed:
self.rect.left += diff / self.movement_speed
if self.reload_time > 0:
self.reload_time -= 1
elif abs(diff) < 100:
print('Fire!')
self._firing = True
self.reload_time = 45
if random.randint(1, 100) < 5:
self.movement = -1
elif random.randint(1, 100) < 5:
self.movement = 1
elif random.randint(1, 100) < 5:
self.movement = 0
self.rect.top += self.movement * 4
self._clamp()
self.exitpilot = self.target.health == 0
@property
def is_firing(self):
if self._firing:
self._firing = False
return True
else:
return False
class MissileSprite(pygame.sprite.Sprite):
def __init__(self, image):
pygame.sprite.Sprite.__init__(self)
self.name = 'Missile'
self.image = image
self.rect = image.get_rect()
self.destroy = False
def update(self):
self.rect.top -= 10
if self.rect.bottom < 0:
self.destroy = True
class ExplosionSprite(pygame.sprite.Sprite):
small_size = (57, 57)
large_size = (89, 89)
small_rects = (
(1,185),(61,185),(121,185),(181,185),(241,185),
(1,245),(61,245),(121,245),(181,245),(241,245),
)
large_rects = (
(1,01),(93,01),(185,01),(277,01),(369,01),
(1,93),(93,93),(185,93),(277,93),(369,93),
)
def __init__(self, sprites, is_small=True):
pygame.sprite.Sprite.__init__(self)
self.sprites = sprites
self.animation_index = 0
self.destroy = False
self.image = None
self.is_small = is_small
self._set_sprite()
self.rect = self.image.get_rect()
def _set_sprite(self):
if self.is_small:
self.image = self.sprites.subsurface(self.small_rects[self.animation_index], self.small_size)
else:
self.image = self.sprites.subsurface(self.large_rects[self.animation_index], self.large_size)
def update(self):
self._set_sprite()
self.animation_index += 1
self.destroy = self.animation_index >= 10
class View(object):
def __init__(self, pixel_width, pixel_height, model):
# we may observe the model
self.model = model
# listen for model events
model.register_listener(self.model_event)
# calculate each block size, and set our viewport size.
self.screen_size = (pixel_width, pixel_height)
# init pygame
pygame.init()
pygame.display.set_caption('Conspiracy-101')
self.screen = pygame.display.set_mode(self.screen_size)
self.clock = pygame.time.Clock()
# draw game sprites to a surface of a fixed size
# which we can rescale when blitting to the screen
self.canvas = pygame.Surface(CANVAS_SIZE).convert()
self.canvas.set_colorkey(TRANSPARENT)
# calculate the scale size by the canvas/screen height ratio.
# since canvas is square the width+height always equal
# but we calculate anyway to be good citizens.
self.scale_ratio = self.screen_size[1] / float(CANVAS_SIZE[1])
print('scale ratio is %s' % (self.scale_ratio,))
self.scale_size = (
int(CANVAS_SIZE[0] * self.scale_ratio), self.screen_size[1])
self.scale_center = ((self.screen_size[0] - self.scale_size[0]) / 2,
(self.screen_size[1] - self.scale_size[1]) / 2)
print('scale center is %s' % (self.scale_center,))
# background image storage
self.background = self.canvas.copy()
self.load_background()
self.scrolling_background_yoffset = 0
# scenario description
self.brief_offset = 0
self.brief_sprite = None
self.results_sprite = None
self.tactical_info_sprite = None
# sprite sheets
self.parts_sprite_sheet = pygame.image.load(os.path.join('..', 'data', 'parts.png')).convert()
self.parts_sprite_sheet.set_colorkey(TRANSPARENT)
self.player_craft_sheet = pygame.image.load(os.path.join('..', 'data', 'ufo-sprites.png')).convert()
self.player_craft_sheet.set_colorkey(TRANSPARENT)
self.explosion_sprite_sheet = pygame.image.load(os.path.join('..', 'data', 'explosion3.png')).convert()
self.explosion_sprite_sheet.set_colorkey(TRANSPARENT)
# sprite storage
self.dragging_sprite = None
self.drag_offset = None
self.sprites = []
# font storage
self.font = pygame.font.Font(os.path.join('..', 'data', 'emulogic.ttf'), 12)
self.smallfont = pygame.font.Font(os.path.join('..', 'data', 'emulogic.ttf'), 10)
# confirm dialog
self.confirm_image = pygame.image.load(os.path.join('..', 'data', 'confirm-dialog.png')).convert()
self.confirm_action = None
# agent images
self.agent_image = pygame.image.load(os.path.join('..', 'data', 'agent.png')).convert()
# player objects
self.player_craft = None
# delay exit state
self.exit_counter = None
# music
pygame.mixer.music.load(os.path.join('..', 'data', 'kbmonkey-mission_control.xm'))
pygame.mixer.music.play(-1)
def load_background(self):
"""
Load a background depending on the game state.
"""
if self.model.state == STATE_MENU:
self.background = pygame.image.load(os.path.join('..', 'data', 'menu-screen.png')).convert()
if self.model.state == STATE_BUILD:
self.background = pygame.image.load(os.path.join('..', 'data', 'build-screen.png')).convert()
if self.model.state in (STATE_UFO, STATE_FLIGHT):
self.background = pygame.image.load(os.path.join('..', 'data', 'ufo-screen.png')).convert()
if self.model.state == STATE_RESULTS:
self.background = pygame.image.load(os.path.join('..', 'data', 'results-screen.png')).convert()
if self.model.state == STATE_END:
self.background = pygame.image.load(os.path.join('..', 'data', 'end-screen.png')).convert()
def load_build_sprites(self):
"""
Load sprites depending on the game state.
"""
self.sprites = []
parts = self.model.builder.get_level_parts()
print('level %s parts are %s' % (self.model.level, parts))
for part in parts:
rect = pygame.Rect(PARTS_RECT.get(part, None))
if rect:
image = self.parts_sprite_sheet.subsurface(rect)
rect.center = (random.randint(30, 570), random.randint(230, 370))
if self.model.builder.part_used(part):
rect.center = (random.randint(30, 570), random.randint(430, 570))
sprite = DraggableSprite(part, image, rect)
self.sprites.append(sprite)
else:
print('warning: part "%s" has no image rect definition' % (part,))
def load_player_craft_sprites(self):
self.sprites = []
# player craft
player = None
# start off at the bottom center of the screen
if self.model.state == STATE_UFO:
player = UFOSprite(self.player_craft_sheet.subsurface(UFO_RECT))
elif self.model.state == STATE_FLIGHT:
player = AFOSprite(self.player_craft_sheet.subsurface(LIGHTFIGHTER_RECT))
if player:
player.fly_region = CANVAS_SIZE[1] / 2
player.rect.center = (CANVAS_SIZE[0] / 2, CANVAS_SIZE[1])
self.sprites.append(player)
self.player_craft = player
def add_fighter_jet(self):
"""
Add a fighter jet to the play field.
"""
if self.player_craft:
jet = FighterJetSprite(
self.player_craft_sheet.subsurface(FIGHTER_RECT),
self.player_craft)
jet.rect.top = CANVAS_SIZE[1]
jet.rect.left = random.randint(100, 400)
self.sprites.append(jet)
def fire_jet_missile(self, jet):
"""
Fire a missile from a jet.
"""
missile = MissileSprite(
self.player_craft_sheet.subsurface(MISSILE_RECT))
missile.rect.center = jet.rect.center
missile.rect.left += (26 * random.randint(-1, 1))
self.sprites.append(missile)
def create_explosion(self, target, is_small=True):
"""
Creat an explosion near target (a sprite object).
"""
explosion = ExplosionSprite(self.explosion_sprite_sheet, is_small)
explosion.rect.center = target.rect.center
self.sprites.append(explosion)
def draw_hover_part_name(self):
"""
Return the part name under the cursor.
"""
if not self.confirm_action:
xy = self.translated_mousepos
for sprite in self.sprites:
if sprite.rect.collidepoint(xy):
part_name = self.font.render(
sprite.name, False, BLACK, TRANSPARENT)
part_name.set_colorkey(TRANSPARENT)
if part_name:
self.canvas.blit(part_name, (13, 370))
return
def draw_body_accuracy(self):
"""
Return the part name under the cursor.
"""
pass
#part_name = self.font.render(
#'accuracy: %s %%' % (self.model.builder.accuracy, ),
#False, BLACK, TRANSPARENT)
#part_name.set_colorkey(TRANSPARENT)
#if part_name:
#self.canvas.blit(part_name, (13, 420))
def blit(self):
"""
Draw the model state to our game canvas, and finally blit it
to the screen after we rescale it.
"""
garbage_sprites = []
self.canvas.blit(self.background, (0, 0))
if self.model.state != STATE_MENU and self.exit_counter > 0:
self.exit_counter -= 1
if self.model.state == STATE_BUILD:
# dragging a sprite
if self.dragging_sprite:
self.dragging_sprite.rect.center = (
self.translated_mousepos[0] - self.drag_offset[0],
self.translated_mousepos[1] - self.drag_offset[1],
)
# briefing words
if self.brief_sprite:
self.canvas.blit(self.brief_sprite.image,
(14, 22),
self.brief_sprite.rect.move(0, self.brief_offset))
# draw sprites
for sprite in self.sprites:
sprite.update()
self.canvas.blit(sprite.image, sprite.rect)
self.draw_hover_part_name()
self.draw_body_accuracy()
elif self.model.state in (STATE_UFO, STATE_FLIGHT):
bh = self.background.get_height()
self.scrolling_background_yoffset += 15
if self.scrolling_background_yoffset > bh:
self.scrolling_background_yoffset = 0
self.canvas.blit(self.background, (0, self.scrolling_background_yoffset))
self.canvas.blit(self.background, (0, self.scrolling_background_yoffset - bh))
# radar
self.draw_tactical_radar()
# health bar
self.draw_ufo_healthbar()
# help words
self.draw_ufo_help()
# exit
if self.exit_counter == 0:
self.model.mission_success = self.model.ufotactical.distance_from_goal < 10
self.model.set_state(STATE_RESULTS)
# draw sprites
for sprite in self.sprites:
sprite.update()
self.canvas.blit(sprite.image, sprite.rect)
if isinstance(sprite, FighterJetSprite):
if sprite.is_firing:
self.fire_jet_missile(sprite)
elif isinstance(sprite, MissileSprite):
if self.player_craft.health > 0:
if self.player_craft.rect.colliderect(sprite.rect):
# TODO hit sound and explosion
garbage_sprites.append(sprite)
self.player_craft.take_damage()
if self.player_craft.health > 0:
self.create_explosion(sprite, is_small=True)
else:
self.create_explosion(sprite, is_small=False)
elif isinstance(sprite, AFOSprite):
if self.player_craft.health == 0 and not self.exit_counter:
self.exit_counter = 100
garbage_sprites.append(sprite)
elif isinstance(sprite, ExplosionSprite):
if sprite.destroy:
garbage_sprites.append(sprite)
elif self.model.state == STATE_RESULTS:
# report!
if self.results_sprite:
self.canvas.blit(self.results_sprite, (111, 100))
if self.model.state != STATE_MENU:
# garbage
for g in garbage_sprites:
if g in self.sprites:
self.sprites.remove(g)
# confirm
if self.confirm_action:
csize = self.canvas.get_size()
size = pygame.Rect((0, 0), self.confirm_image.get_size())
size.center = (csize[0] / 2, csize[1] / 2)
self.canvas.blit(self.confirm_image, size)
# rescale
if self.scale_ratio > 1.0:
self.screen.blit(
pygame.transform.scale(self.canvas, self.scale_size),
self.scale_center)
else:
self.screen.blit(self.canvas, (0, 0))
# flip and tick
pygame.display.flip()
self.clock.tick(FRAMERATE)
def draw_tactical_radar(self):
# base image
self.canvas.blit(
self.player_craft_sheet.subsurface(RADAR_RECT),
(10, 10))
# enemy fighters
incoming_jets = self.model.ufotactical.jet_distances
for enemy in incoming_jets:
# draw a dot for it's distance.
epos = (
50,
45 + ((enemy / 500.0) * 40)
)
self.canvas.blit(
self.player_craft_sheet.subsurface(RADAR_HOSTILE_RECT),
epos)
# dot for goal distance
epos = (
50,
50 - ((self.model.ufotactical.distance_from_goal / 2000.0) * 40)
)
self.canvas.blit(
self.player_craft_sheet.subsurface(RADAR_GOAL_RECT),
epos)
# green zone
if self.model.ufotactical.distance_from_goal == 0:
self.canvas.blit(self.player_craft_sheet.subsurface(GREEN_ZONE_RECT), (10, 120))
def draw_ufo_help(self):
if self.model.state in (STATE_UFO, STATE_FLIGHT):
# for the first few ticks
if self.model.ufotactical.clock < 250: #250
# show some helpful words of wisdom
if self.tactical_info_sprite:
self.canvas.blit(self.tactical_info_sprite, (220, 40))
# draw the agent picture
self.canvas.blit(self.agent_image, (10, 10))
def draw_ufo_healthbar(self):
hp = self.player_craft.health * 8 + 1
fullrect = pygame.Rect(10, 100, 80, 10)
rect = pygame.Rect(10, 100, hp, 10)
pygame.draw.rect(self.canvas, RED, fullrect, 0)
pygame.draw.rect(self.canvas, GREEN, rect, 0)
pygame.draw.rect(self.canvas, BLACK, fullrect, 2)
def print_wrapped_text(self, sentence, maxlength, font, color):
"""
Creates an image with the given words wrapped.
"""
lines = []
paragraphs = sentence.split('\n')
for p in paragraphs:
lines.extend(textwrap.wrap(p, maxlength))
lines.append(' ')
surfii = []
max_width = 0
total_height = 0
for line in lines:
surfii.append(font.render(line, False, color, TRANSPARENT))
print_size = surfii[-1].get_size()
if print_size[0] > max_width:
max_width = print_size[0]
total_height += print_size[1]
combined = pygame.Surface((max_width, total_height))
combined.fill(TRANSPARENT)
print_position = 0
for print_surface in surfii:
combined.blit(print_surface, (0, print_position))
print_position += print_surface.get_height()
combined.set_colorkey(TRANSPARENT)
return combined
def draw_briefing_words(self):
"""
Redraw the briefing wording.
"""
if self.model.state == STATE_BUILD:
BRIEF_TEXT_HEIGHT = 150
brief_text = LEVEL_SCENARIOS[self.model.level]
if self.model.level > 1:
if self.model.mission_success:
brief_text = 'My commendations on your last ' \
'mission, what a success!\n' + brief_text
else:
brief_text = 'Failure like your last mission will ' \
'not be tolerated. Let us hope your next ' \
'mission goes better...\n' + brief_text
sprite = pygame.sprite.Sprite()
image = self.print_wrapped_text(
brief_text,
30,
self.font,
TEXT
)
sprite.image = image
sprite.rect = pygame.Rect((0, 0), (image.get_width(), BRIEF_TEXT_HEIGHT))
self.brief_sprite = sprite
elif self.model.state in (STATE_UFO, STATE_FLIGHT):
if self.model.level == 1:
words = 'Avoid gunfire until you reach the target zone. ' \
'Once in the zone force the craft down by engaging ' \
'enemy fire. Use the arrows or wsad keys. Good luck Agent!'
elif self.model.level == 2:
words = 'Again, only get shot down when inside the ' \
'Green Zone. Use the arrows or wsad keys. Good luck Agent!'
elif self.model.level == 3:
words = 'You know the drill by now, Agent. ' \
'Keep it tidy and see you at debriefing! '
elif self.model.level == 5:
words = 'Look sharp, Agent. Reports indicate more ' \
'resistance, incoming!'
elif self.model.level == 6:
words = 'Something has come up, I am going in hiding ' \
'and so should you! Finish the mission and disappear!'
else:
self.tactical_info_sprite = None
return
if words:
helpful_words = self.print_wrapped_text(
words, 30, self.font, TEXT )
self.tactical_info_sprite = helpful_words.copy()
self.tactical_info_sprite.fill(BORDER)
self.tactical_info_sprite.blit(helpful_words, (0,0))
elif self.model.state == STATE_RESULTS:
self.results_sprite = self.print_wrapped_text(
self.model.results, 35, self.smallfont, BLACK)
def scroll_brief(self, offset):
if self.model.state == STATE_BUILD:
self.brief_offset += offset
max_size = self.brief_sprite.rect.height * 6
if self.brief_offset > max_size:
self.brief_offset = max_size
if self.brief_offset < 0:
self.brief_offset = 0
def model_event(self, event_name, data):
print('view event "%s" => %s' % (event_name, data))
if event_name == 'levelup':
self.player_craft = None
elif event_name == 'state':
self.load_background()
if self.model.is_new_level:
self.brief_offset = 0
self.draw_briefing_words()
self.exit_counter = None
self.load_build_sprites()
if self.model.state in (STATE_UFO, STATE_FLIGHT) and not self.player_craft:
self.draw_briefing_words()
self.load_player_craft_sprites()
if self.model.state == STATE_RESULTS:
self.draw_briefing_words()
elif event_name == 'deploy fighter jet':
self.add_fighter_jet()
@property
def translated_mousepos(self):
"""
Get the mouse position as translated to to screen size ratio.
"""
xy = pygame.mouse.get_pos()
scaled_xoffset = (self.scale_center[0] / self.scale_ratio)
scaled_yoffset = (self.scale_center[1] / self.scale_ratio)
xy = (
xy[0] / self.scale_ratio - scaled_xoffset,
xy[1] / self.scale_ratio - scaled_yoffset)
return xy
def mouseDown(self):
if self.model.state == STATE_MENU:
return
self.dragging_sprite = None
xy = self.translated_mousepos
# affirmative and negatory buttons
if self.confirm_action:
affirm = pygame.Rect(204, 287, 191, 25)
if affirm.collidepoint(xy):
if self.confirm_action == 'plant':
if self.model.level == len(TACTICAL_TYPE):
print('Warning: There are no tactical missions for level %s' % self.model.level)
else:
self.model.set_state(TACTICAL_TYPE[self.model.level])
self.confirm_action = None
negate = pygame.Rect(204, 337, 191, 25)
if negate.collidepoint(xy):
self.confirm_action = None
return
if self.model.state == STATE_BUILD:
# sprite click
for sprite in self.sprites:
if sprite.rect.collidepoint(xy):
self.dragging_sprite = sprite
# place dragging sprite on top
self.sprites.remove(self.dragging_sprite)
self.sprites.append(self.dragging_sprite)
self.drag_offset = (
xy[0] - sprite.rect.center[0],
xy[1] - sprite.rect.center[1],
)
return
# plant button click
button = pygame.Rect(390, 165, 198, 29)
if button.collidepoint(xy):
self.confirm_action = 'plant'
def mouseUp(self):
if self.dragging_sprite:
part = self.dragging_sprite.name
x,y = self.dragging_sprite.rect.center
self.dragging_sprite = None
if y < 400:
self.model.builder.remove_part(part)
else:
self.model.builder.add_part(part)
def mouseWheelUp(self):
self.scroll_brief(-16)
def mouseWheelDown(self):
self.scroll_brief(16)
def keyDown(self, key):
if key == K_DOWN:
self.scroll_brief(16)
if key == K_UP:
self.scroll_brief(-16)
|
Mix ingredients (except butter). Place baking paper tightly over the top, refrigerate for 2 days. Stir & divide mixture into four 850ml pudding basins. Press in firmly. Cut a 20cm circles of baking paper, brush with melted butter, press firmly onto pudding to seal. Cut 30cm circles of muslin, dampen & lay over puddings. Stretch muslin, tie under rim with string. Preheat oven to 150°C. Place puddings into a deep pan, pour boiling water to halfway up the sides of the puddings. Cover pan with foil, tear 2 small holes in the foil (not over a pudding). Steam for 8 hrs, check water level every 2 hrs, top up with boiling water as needed. Cool at room temp for 1 day. Take off the muslins & wash. Carefully lift up only the edges of the paper and drizzle with 40ml of brandy. Press paper firmly to make airtight. Stretch the washed muslins back over the puddings & retie string. Brush with melted butter. To serve, place the covered pudding back in the same steam bath for 21/2 hrs. Remove cloth & paper seals, unmold puddings upside down on a plate. Decorate with fresh berries and pour over Paul’s Double Thick Vanilla Custard.
|
#Created to read salvatore table, read 3FHL data and etrapolated (Using Biteau prescription) and simulate with CTOOLS
# Made for Rio 2017.
# author David Sanchez david.sanchez@lapp.in2p3.fr
# Gate Florian
# Piel Quentin
# ------ Imports --------------- #
import numpy,math,pyfits,os,sys
from Plot.PlotLibrary import *
from Catalog.ReadFermiCatalog import *
from environ import FERMI_CATALOG_DIR,INST_DIR
from Plot.PlotLibrary import *
from ebltable.tau_from_model import OptDepth as OD
from os.path import join
import Script.Common_Functions as CF
import ctoolsAnalysis.xml_generator as xml
from ctoolsAnalysis.config import get_config,get_default_config
from ctoolsAnalysis.SimulateSource import CTA_ctools_sim
from submit import call
# ------------------------------ #
def GetInfoFromTable(fitstable,indice):
'''read salvatore table and return info corresponding to the source at the place indice
Parameters
---------
fitstable : pyfits object : table to be browsed
indice : place of the source in the table
'''
data = fitstable[1].data[indice]
sourcename = data[0]
ra = data[1]
dec = data[2]
z = data[4]
if math.isnan(z):
z=0
hemisphere = data[6]
observation_type = data[8]
if hemisphere =='S':
hemisphere ='South'
if hemisphere =='N':
hemisphere ='North'
return sourcename,ra,dec,z,hemisphere,observation_type
def cutoff(energy,z):
'''correct with JB cut off prescription
Parameters
---------
energy : in TeV.
'''
return numpy.exp(-energy/(3./(1+z)))
def ComputeExtrapolateSpectrum(sourcename,z,eblmodel = "dominguez",alpha = -1,out="."):
try :
Cat = FermiCatalogReader.fromName(sourcename,FK5,FERMI_CATALOG_DIR,"dnde","MeV") #read 3FHL
except :
print 'cannot read 3FHL for some reason, returning'
return
emin = 5e4 #Mev
emax = 100e6
params = Cat.ReadPL("3FHL")
print params
spec = Spectrum(params,Model="PowerLaw",Emin=emin,
Emax=emax,Representation="dnde",escale="MeV",
Npt=1000)
energy,phi = spec.GetModel()
# Cat.MakeSpectrum("3FHL",emin,emax)
# _,_,energy,phi = Cat.Plot("3FHL")
SpectreWithCutoff = cutoff(energy/1e6,z)
#Correct for EBL using Dominguez model
tau = OD.readmodel(model = eblmodel)
TauEBL = tau.opt_depth(z,energy/1e6)
Etau2 = numpy.interp([2.],TauEBL,energy/1e6)*1e6 # Input in TeV -> Get MeV at the end
EBL_corrected_phi = phi*numpy.exp(alpha * TauEBL)
phi_extrapolated = EBL_corrected_phi*SpectreWithCutoff
# phi_extrapolated = EBL_corrected_phi
outfile = out+"/"+sourcename.replace(" ","")+"_File.txt"
CF.MakeFileFunction(energy,phi_extrapolated+1e-300,outfile)
return outfile, Etau2
if __name__=="__main__":
TableInfo = pyfits.open(INST_DIR+'/data/table_20161213.fits')
outfolder = join(os.getcwd(), "out/Dominguez3TeVCutOff")
# outfolder = join(os.getcwd(), "out/DominguezNoCutOff")
#default work and out path.
work = join(os.getcwd(), "work")
os.system("mkdir -p "+outfolder)
i = int(sys.argv[1])
sourcename,ra,dec,z,hemisphere,_ = GetInfoFromTable(TableInfo,i)
print 'work on source ',sourcename,' at a redsift of z=',z
Filefunction, Etau2 = ComputeExtrapolateSpectrum(sourcename,z,eblmodel = "dominguez",alpha = -1,out=outfolder)
########### Create XML
lib,doc = xml.CreateLib()
spec = xml.addFileFunction(lib, sourcename, type = "PointSource",filefun=Filefunction,flux_free=1, flux_value=1., flux_scale=1.,flux_max=100000000.0, flux_min=0.0)
spatial = xml.AddPointLike(doc,ra,dec)
spec.appendChild(spatial)
lib.appendChild(spec)
bkg = xml.addCTAIrfBackground(lib)
lib.appendChild(bkg)
open(Filefunction.replace("_File.txt",'.xml'), 'w').write(doc.toprettyxml(' '))
#######################
simutime = 100 #Hours
irfTime = CF.IrfChoice(simutime)
# setup : Time, Energy and IRFS.
tmin = 0
tmax = int(simutime*3600)
emin_table =[0.05,Etau2*1e-6] #TeV
emax = 100 #TeV
irf = "South_z20_"+str(irfTime.replace(".0",""))+"h"
caldb = "prod3b"
config = CF.MakeconfigFromDefault(outfolder,work,sourcename,ra,dec)
# config.write(open("simu_"+sourcename.replace(" ","")+"_"+str(simutime)+"h"+".conf", 'w'))
for emin in emin_table:
print 'simu'
#creation of the simulation object
# simu = CTA_ctools_sim.fromConfig(config)
# simu.SetTimeRange(tmin,tmax)
# simu.SetIRFs(caldb,irf)
# simu.SetEnergyRange(float(emin),emax)
config["file"]["inmodel"] = Filefunction.replace("_File.txt",'.xml')
config["time"]["tmin"] = tmin
config["time"]["tmax"] = tmax
config["irfs"]["irf"] = irf
config["irfs"]["caldb"] = caldb
config["energy"]["emin"] = float(emin)
config["energy"]["emax"] = emax
config_file = Filefunction.replace("_File.txt","_"+str(int(emin*100.)/100.)+"TeV"+".conf")
config.write(open(config_file, 'w'))
print "save configuration file ",config_file
# run the simulation
cmd = "python "+join(os.getcwd(), "Simulate_Ctools.py")+" "+config_file
call(cmd,config_file.replace(".conf",".sh"),config_file.replace(".conf",".log"))
# os.system(cmd)
|
This is a Limited Term Position ending 7/10/20.
The Jonathan M. Tisch College of Civic Life prepares Tufts students to become active citizens and community leaders. As the only university-wide college of its kind, its mission is to engage Tufts students in meaningful community building, public service experiences, and transformational learning. It conducts groundbreaking research on young people’s civic and political participation and forges innovative community partnerships. Tisch College is a national leader in civic education, whose model and research are setting the standard for higher education’s role in civic engagement.
Tufts University’s Jonathan M. Tisch College of Civic Life seeks to hire a graduating member of the Tufts University class of 2019 to serve in an exciting outreach role focused on increasing the civic and political engagement of students across the University. This is a one-year, full-time, benefits eligible position beginning July 10, 2019.Tufts University class of 2019 graduates strongly encouraged to apply.
|
# Texture Stitcher for x801
# Usage: python3 tools/autostitch.py assets/textures/terrain/blocks asset-src/textures/terrain/gimpfiles/blocknames.tti assets/textures/terrain/blocks.tti asset-temp/textures/terrain/gimpfiles
import argparse
import fparser
import pathlib
import re
import readtable
from PIL import Image
parser = argparse.ArgumentParser(description='Stitch textures for Experiment801.')
parser.add_argument('destinationImage', metavar='di', type=str, nargs=1,
help='the destination path for the image')
parser.add_argument('sourceTable', metavar='sd', type=str, nargs=1,
help='the source path for the table')
parser.add_argument('destinationTable', metavar='dd', type=str, nargs=1,
help='the destination path for the table')
parser.add_argument('images', metavar='images', type=str, nargs=1,
help='the path with the appropriate images')
args = parser.parse_args()
tsize = 32
# Reasonable requirement for max texture size
# according to http://feedback.wildfiregames.com/report/opengl/feature/GL_MAX_TEXTURE_SIZE
asize = 4096
tdim = asize // tsize
cumul = 0
pageno = 0
capat = tdim * tdim
image = Image.new(
"RGBA",
(asize, asize),
(0, 0, 0, 0)
)
st = args.sourceTable[0]
# name -> id
nametrans = lambda x: x
if st != "*":
nametable = readtable.read(st)
nametrans = lambda x: nametable.get(x, None)
table = {}
def save():
image.save(args.destinationImage[0] + "." + str(pageno) + ".png")
for fn in pathlib.Path(args.images[0]).glob("*.png"):
# Add file entry
shortname = fn.name
shortname = shortname[0:shortname.rfind('.')]
myid = nametrans(shortname)
if myid is None:
fparser.error("Name not found: " + shortname)
table[myid] = cumul + capat * pageno
# Try to open image
newImage = Image.open(str(fn))
if newImage.height > tsize:
fparser.error("Image is too tall: %d > %d", newImage.height, tsize)
# Write image
nSlots = (newImage.height + tsize - 1) // tsize;
progress = 0
while progress < nSlots:
if cumul >= capat:
# No more room.
# Save the current image and start a new page
save()
image = Image.new(
"RGBA",
(asize, asize),
(0, 0, 0, 0)
)
pageno += 1
cumul -= capat
x = cumul % tdim
y = cumul // tdim
pasteAmt = min(nSlots, tdim - x)
region = newImage.crop(
(x * tsize, 0, (x + pasteAmt) * tsize, tsize)
)
image.paste(newImage, (x * tsize, y * tsize))
cumul += pasteAmt
progress += pasteAmt
save()
fh = open(args.destinationTable[0], "w")
for (name, index) in table.items():
fh.write(str(name) + " " + str(index) + "\n")
fh.close()
|
SURUHANJAYA PERKHIDMATAN PELAJARAN MSIA, WILAYAH PERSEKUTUAN PUTRAJAYA, W.P. PUTRAJAYA. SEKTOR: AKTIVITI FOTOKOPI, PENYEDIAAN DOKUMEN DAN LAIN-LAIN AKTIVITI SOKONGAN PEJABAT KHUSUS T.T.T.L.
0 Response to "JAWATAN KOSONG SURUHANJAYA PERKHIDMATAN PELAJARAN MALAYSIA"
|
__author__ = 'espin'
#######################################################################
# Dependencies
#######################################################################
import sys
import collections
import numpy as np
import pandas
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from statsmodels.formula.api import ols
from libs import utils
from libs.profiling import Profiling
import time
import gc
from scipy import stats
from scipy.stats.mstats import zscore
#######################################################################
# MRQAP
#######################################################################
INTERCEPT = 'Intercept'
class MRQAP():
#####################################################################################
# Constructor and Init
#####################################################################################
def __init__(self, Y=None, X=None, npermutations=-1, diagonal=False, directed=False, logfile=None, memory=None, standarized=False):
'''
Initialization of variables
:param Y: numpy array depended variable
:param X: dictionary of numpy array independed variables
:param npermutations: int number of permutations
:param diagonal: boolean, False to delete diagonal from the OLS model
:return:
'''
self.X = X # independent variables: dictionary of numpy.array
self.Y = Y # dependent variable: dictionary numpy.array
self.n = Y.values()[0].shape[0] # number of nodes
self.npermutations = npermutations # number of permutations
self.diagonal = diagonal # False then diagonal is removed
self.directed = directed # directed True, undirected False
self.data = None # Pandas DataFrame
self.model = None # OLS Model y ~ x1 + x2 + x3 (original)
self.v = collections.OrderedDict() # vectorized matrices, flatten variables with no diagonal
self.betas = collections.OrderedDict() # betas distribution
self.tvalues = collections.OrderedDict() # t-test values
self.logfile = logfile # logfile path name
self.standarized = standarized
self.memory = memory if memory is not None else Profiling() # to track memory usage
def init(self):
'''
Generating the original OLS model. Y and Xs are flattened.
Also, the betas and tvalues dictionaries are initialized (key:independent variables, value:[])
:return:
'''
self.v[self.Y.keys()[0]] = self._getFlatten(self.Y.values()[0])
self._initCoefficients(INTERCEPT)
for k,x in self.X.items():
if k == self.Y.keys()[0]:
utils.printf('ERROR: Idependent variable cannot be named \'[}\''.format(self.Y.keys()[0]), self.logfile)
sys.exit(0)
self.v[k] = self._getFlatten(x)
self._initCoefficients(k)
self.data = pandas.DataFrame(self.v)
self.model = self._fit(self.v.keys(), self.data)
del(self.X)
def profiling(self, key):
self.memory.check_memory(key)
#####################################################################################
# Core QAP methods
#####################################################################################
def mrqap(self):
'''
MultipleRegression Quadratic Assignment Procedure
:return:
'''
directed = 'd' if self.directed else 'i'
key = self.npermutations if self.memory.perm else self.n
self.profiling('init-{}-{}'.format(directed, key))
self.init()
self.profiling('shuffle-{}-{}'.format(directed, key))
self._shuffle()
self.profiling('end-{}-{}'.format(directed, key))
def _shuffle(self):
'''
Shuffling rows and columns npermutations times.
beta coefficients and tvalues are stored.
:return:
'''
for p in range(self.npermutations):
self.Ymod = self.Y.values()[0].copy()
self._rmperm()
model = self._newfit()
self._update_betas(model._results.params)
self._update_tvalues(model.tvalues)
self.Ymod = None
gc.collect()
def _newfit(self):
'''
Generates a new OLS fit model
:return:
'''
newv = collections.OrderedDict()
newv[self.Y.keys()[0]] = self._getFlatten(self.Ymod)
for k,x in self.v.items():
if k != self.Y.keys()[0]:
newv[k] = x
newdata = pandas.DataFrame(newv)
newfit = self._fit(newv.keys(), newdata)
del(newdata)
del(newv)
return newfit
#####################################################################################
# Handlers
#####################################################################################
def _fit(self, keys, data):
'''
Fitting OLS model
v a dictionary with all variables.
:return:
'''
if self.standarized:
data = data.apply(lambda x: (x - np.mean(x)) / (np.std(x)), axis=0) #axis: 0 to each column, 1 to each row
formula = '{} ~ {}'.format(self.Y.keys()[0], ' + '.join([k for k in keys if k != self.Y.keys()[0]]))
return ols(formula, data).fit()
def _initCoefficients(self, key):
self.betas[key] = []
self.tvalues[key] = []
def _rmperm(self, duplicates=True):
shuffle = np.random.permutation(self.Ymod.shape[0])
np.take(self.Ymod,shuffle,axis=0,out=self.Ymod)
np.take(self.Ymod,shuffle,axis=1,out=self.Ymod)
del(shuffle)
def _update_betas(self, betas):
for idx,k in enumerate(self.betas.keys()):
self.betas[k].append(round(betas[idx],6))
def _update_tvalues(self, tvalues):
for k in self.tvalues.keys():
self.tvalues[k].append(round(tvalues[k],6))
def _getFlatten(self, original):
return self._deleteDiagonalFlatten(original)
def _deleteDiagonalFlatten(self, original):
tmp = original.flatten()
if not self.diagonal:
tmp = np.delete(tmp, [i*(original.shape[0]+1)for i in range(original.shape[0])])
return tmp
def _zeroDiagonalFlatten(self, original):
tmp = original.copy()
if not self.diagonal:
np.fill_diagonal(tmp,0)
f = tmp.flatten()
del(tmp)
return f
#####################################################################################
# Prints
#####################################################################################
def summary(self):
'''
Prints the OLS original summary and beta and tvalue summary.
:return:
'''
self._summary_ols()
self._summary_betas()
self._summary_tvalues()
self._ttest()
def _summary_ols(self):
'''
Print the OLS summary
:return:
'''
utils.printf('', self.logfile)
utils.printf('=== Summary OLS (original) ===\n{}'.format(self.model.summary()), self.logfile)
utils.printf('', self.logfile)
utils.printf('# of Permutations: {}'.format(self.npermutations), self.logfile)
def _summary_betas(self):
'''
Summary of beta coefficients
:return:
'''
utils.printf('', self.logfile)
utils.printf('=== Summary beta coefficients ===', self.logfile)
utils.printf('{:20s}{:>10s}{:>10s}{:>10s}{:>10s}{:>12s}{:>12s}{:>12s}{:>12s}{:>12s}'.format('INDEPENDENT VAR.','MIN','MEDIAN','MEAN','MAX','STD. DEV.','B.COEFF.','As Large', 'As Small', 'P-VALUE'), self.logfile)
for k,v in self.betas.items():
beta = self.model.params[k]
pstats = self.model.pvalues[k]
aslarge = sum([1 for c in v if c >= beta]) / float(len(v))
assmall = sum([1 for c in v if c <= beta]) / float(len(v))
utils.printf('{:20s}{:10f}{:10f}{:10f}{:10f}{:12f}{:12f}{:12f}{:12f}{:12f}'.format(k,min(v),sorted(v)[len(v)/2],sum(v)/len(v),max(v),round(np.std(v),6),beta,aslarge,assmall,round(float(pstats),2)), self.logfile)
def _summary_tvalues(self):
'''
Summary t-values
:return:
'''
utils.printf('', self.logfile)
utils.printf('=== Summary T-Values ===', self.logfile)
utils.printf('{:20s}{:>10s}{:>10s}{:>10s}{:>10s}{:>12s}{:>12s}{:>12s}{:>12s}'.format('INDEPENDENT VAR.','MIN','MEDIAN','MEAN','MAX','STD. DEV.','T-TEST','As Large', 'As Small'), self.logfile)
for k,v in self.tvalues.items():
tstats = self.model.tvalues[k]
aslarge = sum([1 for c in v if c >= tstats]) / float(len(v))
assmall = sum([1 for c in v if c <= tstats]) / float(len(v))
utils.printf('{:20s}{:10f}{:10f}{:10f}{:10f}{:12f}{:12f}{:12f}{:12f}'.format(k,min(v),sorted(v)[len(v)/2],sum(v)/len(v),max(v),round(np.std(v),6),round(float(tstats),2),aslarge,assmall), self.logfile)
def _ttest(self):
utils.printf('')
utils.printf('========== T-TEST ==========')
utils.printf('{:25s} {:25s} {:25s} {:25s}'.format('IND. VAR.','COEF.','T-STAT','P-VALUE'))
ts = {}
lines = {}
for k,vlist in self.betas.items():
t = stats.ttest_1samp(vlist,self.model.params[k])
ts[k] = abs(round(float(t[0]),6))
lines[k] = '{:20s} {:25f} {:25f} {:25f}'.format(k,self.model.params[k],round(float(t[0]),6),round(float(t[1]),6))
ts = utils.sortDictByValue(ts,True)
for t in ts:
utils.printf(lines[t[0]])
#####################################################################################
# Plots
#####################################################################################
def plot(self,coef='betas',fn=None):
'''
Plots frequency of pearson's correlation values
:param coef: string \in {betas, tvalues}
:return:
'''
ncols = 3
m = len(self.betas.keys())
ranges = range(ncols, m, ncols)
i = np.searchsorted(ranges, m, 'left')
nrows = len(ranges)
if i == nrows:
ranges.append((i+1)*ncols)
nrows += 1
fig = plt.figure(figsize=(8,3*i))
for idx,k in enumerate(self.betas.keys()):
plt.subplot(nrows,ncols,idx+1)
if coef == 'betas':
plt.hist(self.betas[k])
elif coef == 'tvalues':
plt.hist(self.tvalues[k])
plt.xlabel('regression coefficients', fontsize=8)
plt.ylabel('frequency', fontsize=8)
plt.title(k)
plt.grid(True)
for ax in fig.get_axes():
ax.tick_params(axis='x', labelsize=5)
ax.tick_params(axis='y', labelsize=5)
plt.tight_layout()
plt.savefig(fn)
plt.close()
|
The Molly Skirt is a fun and stylish skirt for all ages!
With an oversized modern bow and ribbon tucked up in a bubble skirt. The skirt has fun surprises like in seam pockets and a paper bag effect for the waistband. Skirt can be worn higher on the waist for a fun and full style.
Pattern is complete with size guide and cutting guide.
Step by step pictures walk you through the pattern.
No need for a serger as all of the seams are neatly placed on the inside of the skirt and the finishing is clean and professional.
Pattern is rated Easy, without any zippers or fasteners all that is required is sewing a straight line and a gather.
If you come across any questions I am always willing to help via email.
Pattern comes as a Instant download via email and is sent to the email address linked to paypal. If you would like it sent to a different email address please just send me a not to let me know.
|
import cPickle
import os
import re
from openpyxl import load_workbook
import xlsxwriter
__author__ = 'mwelland'
class VCFComparison:
def __init__(self, run_number, variant_dict, vcf_dir):
self.vcf_file = os.path.join(vcf_dir, '{}_anno.vcf.hg19_multianno.vcf'.format(run_number))
self.run_number = run_number
with open(variant_dict, 'r') as handle:
self.variant_dict = cPickle.load(handle)
self.genes = self.variant_dict.keys()
self.vcf = {}
self.results = {}
self.tempvcf = os.path.join(vcf_dir, 'tempout.vcf')
self.matches = 0
self.variants = 0
self.unmatched_predictions = 0
self.excel_dir = 'Excels'
self.mystery_genes = set()
self.transcripts = 0
self.perfect_genes = {'total': 0,
'list': set()}
self.total_observed = 0
def run(self):
self.squish_vcf()
self.open_vcf()
for gene in self.genes:
self.results[gene] = {'found': set(),
'not_found': {'in_vcf': set(),
'in_fq': set()}}
self.check_gene(gene)
os.remove(self.tempvcf)
self.add_missing_variants()
# Print all the output stuff to an excel document
for gene in self.results:
perfect = 0
for section in self.results[gene]['not_found']:
perfect += len(self.results[gene]['not_found'][section])
if perfect == 0:
self.perfect_genes['total'] += 1
self.perfect_genes['list'].add(gene)
self.write_excel()
if self.matches != self.variants:
print 'Total variants counted: {}'.format(self.variants)
print 'Total matches: {}'.format(self.matches)
print 'Predicted and not found:{}'.format(self.unmatched_predictions)
print 'Perfect genes: {}'.format(self.perfect_genes)
else:
print 'All variants found'
def write_excel(self):
# This method will take the results from the process and output to an excel
# There will be a summary page to condense the main details of the comparison
# Each gene will have a further page to describe results in detail
excel_out_name = os.path.join(self.excel_dir, 'run_{}_results.xlsx'.format(self.run_number))
workbook = xlsxwriter.Workbook(excel_out_name)
format_bold = workbook.add_format({'bold': True})
format_matched = workbook.add_format({'bg_color': '#ADFF2F'})
format_missing_db = workbook.add_format({'bg_color': '#F4A460'})
format_missing_excel = workbook.add_format({'bg_color': '#F08080'})
format_hyperlink = workbook.add_format({'font_color': '#0000FF'})
worksheet = workbook.add_worksheet('Summary')
worksheet.set_column(0, 0, 20)
worksheet.set_column(2, 2, 17)
row = 0
col = 0
worksheet.write(row, col, 'Summary Page', format_bold); row =+ 2
worksheet.write(row, 0, 'Genes featured:', format_bold)
worksheet.write(row, 1, '{}'.format(len(self.genes)), format_bold)
row += 1
worksheet.write(row, 0, 'Transcripts featured:', format_bold)
worksheet.write(row, 1, '{}'.format(self.transcripts), format_bold)
row += 1
worksheet.write(row, 0, 'Variants Expected:', format_bold)
worksheet.write(row, 1, '{}'.format(self.variants), format_bold)
row += 1
worksheet.write(row, 0, 'Variants in VCF:', format_bold)
worksheet.write(row, 1, '{}'.format(self.total_observed), format_bold)
row += 1
worksheet.write(row, 0, 'Variants Matched:', format_bold)
worksheet.write(row, 1, '{}'.format(self.matches), format_bold)
row += 1
worksheet.write(row, 0, 'Dodgy Gene names :', format_bold)
worksheet.write(row, 1, '{}'.format(', '.join(self.mystery_genes)), format_bold)
row += 1
worksheet.write(row, 0, 'Perfect genes ({}):'.format(self.perfect_genes['total']), format_bold)
if self.perfect_genes['total'] != 0:
col = 1
for gene in self.perfect_genes['list']:
worksheet.write(row, col, gene, format_bold)
if col == 1:
col += 1
else:
row += 1
col = 1
row += 2
worksheet.write(row, 0, 'Mismatches by Gene:', format_missing_excel)
row += 1
worksheet.write(row, 0, 'Gene', format_bold)
worksheet.write(row, 1, 'FastQ Predictions', format_bold)
worksheet.write(row, 2, 'VCF Results', format_bold)
highest_row = row + 1
for gene in self.results:
worksheet.write(highest_row, 0, """=HYPERLINK("#{0}!A1", "{0}")""".format(gene), format_hyperlink)
fq_row = highest_row
vcf_row = highest_row
if self.results[gene]['not_found']['in_fq']:
for result in self.results[gene]['not_found']['in_fq']:
worksheet.write(fq_row, 1, result); fq_row += 1
fq_row += 1
if self.results[gene]['not_found']['in_vcf']:
for result in self.results[gene]['not_found']['in_vcf']:
worksheet.write(vcf_row, 2, result); vcf_row += 1
vcf_row += 1
if vcf_row > fq_row:
highest_row = vcf_row
else:
highest_row = fq_row
worksheet.set_column(1, 1, 45)
worksheet.set_column(2, 2, 100)
for gene in self.results:
matches = len(self.results[gene]['found'])
mismatches = 0
for section in self.results[gene]['not_found']:
mismatches += len(self.results[gene]['not_found'][section])
total = mismatches + matches
worksheet = workbook.add_worksheet(gene)
worksheet.write(0, 1, """=HYPERLINK("#Summary!A1", "Link To Summary")""", format_hyperlink)
row = 0
col = 0
worksheet.write(row, col, gene, format_bold); row =+ 2
worksheet.write(row, col, 'Total Variants:', format_bold); col += 1
worksheet.write(row, col, '{}'.format(total), format_bold); row += 1; col -= 1
worksheet.write(row, col, 'Matched:', format_bold); col += 1
worksheet.write(row, col, '{}'.format(matches), format_bold); row += 1; col -= 1
worksheet.write(row, col, 'Not Matched:', format_bold); col += 1
worksheet.write(row, col, '{}'.format(mismatches), format_bold); row += 1
row += 2
worksheet.write(row, col, 'Variants Matched:', format_matched)
row += 1
for variant in self.results[gene]['found']:
worksheet.write(row, col, variant, format_matched)
row += 1
row += 2
if self.results[gene]['not_found']['in_vcf'] or self.results[gene]['not_found']['in_fq']:
worksheet.write(row, col, 'Unmatched Variants:', format_missing_excel)
row += 1
if self.results[gene]['not_found']['in_fq']:
worksheet.write(row, col, 'Predicted:', format_missing_excel); row += 1
for variant in self.results[gene]['not_found']['in_fq']:
worksheet.write(row, col, variant, format_missing_excel); row += 1
row += 2
else:
worksheet.write(row, col, 'No Predicted Variants:', format_missing_db); row += 2
if self.results[gene]['not_found']['in_vcf']:
worksheet.write(row, col, 'Unexpected:', format_missing_excel); row += 1
for variant in self.results[gene]['not_found']['in_vcf']:
worksheet.write(row, col, variant, format_missing_excel); row += 1
row += 2
else:
worksheet.write(row, col, 'No Unexpected Variants:', format_missing_db); row += 2
else:
worksheet.write(row, col, 'No Unmatched Variants:', format_missing_db)
worksheet.set_column(0, 0, 15)
worksheet.set_column(1, 1, 50)
workbook.close()
def add_missing_variants(self):
for gene in self.vcf:
for row in self.vcf[gene]:
# Search for specific string in complete row
# RegEx required as columns are not always in order
# GeneDetail.refGene=NM_002506:c.-6897A>G
active_match = 'Unknown Variant'
matched = False
if 'GeneDetail.refGene=.;' in row:
if 'AAChange.refGene=.;' not in row:
a = re.search('AAChange.refGene=.*?:(?P<HGVS>NM_.*?);', row)
b = re.search('AAChange.refGene=(?P<HGVS>NM.*?);', row)
if a:
active_match = a
matched = True
elif b:
active_match = b
matched = True
else:
a = re.search('GeneDetail.refGene=.*?:(?P<HGVS>NM_.*?);', row)
b = re.search('GeneDetail.refGene=(?P<HGVS>NM_.*?);', row)
if a:
active_match = a
matched = True
elif b:
active_match = b
matched = True
if matched:
filtered_list = self.filter_matches(active_match.group('HGVS'), gene)
self.results[gene]['not_found']['in_vcf'].add(', '.join(filtered_list))
else:
if gene in self.results:
self.results[gene]['not_found']['in_vcf'].add('Variant unknown')
else:
self.mystery_genes.add(gene)
def filter_matches(self, string, gene):
output_list = []
for element in string.split(';'):
nm_number = element.split(':')
if nm_number in self.variant_dict[gene].keys():
output_list.append(element)
if not output_list:
output_list.append(string.split(';')[0])
return output_list
def squish_vcf(self):
"""
This mini method just writes out only the non-header information from the original vcf into a new file
The file is written to a new output to make sure that it can be read again if required
The output is written in CSV format so that the csv.DictWriter method can be used
"""
with open(self.vcf_file, 'rU') as input_vcf:
with open(self.tempvcf, 'wb') as output_vcf:
for line in input_vcf:
if line[0] == '#':
pass
else:
output_vcf.write(line)
def open_vcf(self):
"""
Add all contents from the VCF into a dictionary object which can be sorted through by gene
Use regex to capture the gene name, create a dictionary index which is the gene name (if not already an index)
Add the row to a list in the dictionary
Might be best to treat the whole 'INFO' block as a single string, as different variants are annotated in
different columns, depending on whether they are 5'UTR, 3'UTR or exonic...
Ready to begin matching against pickled contents
"""
with open(self.tempvcf) as csvfile:
for row in csvfile:
self.total_observed += 1
search_string = row.split('\t')[7]
match = re.search(';Gene\.refGene=(?P<gene_name>,?.*?);', search_string)
if match:
gene = match.group('gene_name')
if gene in self.vcf:
self.vcf[gene].append(search_string)
else:
self.vcf[gene] = [search_string]
else:
print "couldn't match the variant in %s" % row
def check_gene(self, gene):
rows_to_delete = []
gene_vcf = self.vcf[gene]
rows = range(len(gene_vcf))
for transcript in self.variant_dict[gene]:
self.transcripts += 1
variants = self.variant_dict[gene][transcript]
exons = variants.keys()
for exon in exons:
self.variants += 1
hgvs = variants[exon]['hgvs']
found = False
# If the variant is 3' UTR or 5' UTR, e.g. c.-69A>C:
if hgvs[2] == '-' or hgvs[2] == '*':
# Use the exact sequence predicted to write the gene
variant = '{0}:{1}:{2}'.format(gene, transcript, hgvs)
for row in rows:
match = re.search('({0}:)?{1}:{2}'.format(gene, transcript, hgvs), gene_vcf[row])
if match:
rows_to_delete.append(row)
found = True
self.matches += 1
self.results[gene]['found'].add(variant)
break
else:
# Use the exact sequence predicted to write the gene
variant = '{0}:{1}:exon{2}:{3}'.format(gene, transcript, exon, hgvs)
for row in rows:
match = re.search('({0}:)?{1}:.*?:{2}'.format(gene, transcript, hgvs), gene_vcf[row])
if match:
rows_to_delete.append(row)
found = True
self.matches += 1
self.results[gene]['found'].add(variant)
break
"""
This section will allow matches to be made which are less specific.
This may not be useful if exon numbers are required, but exon numbers
may change between systems more easily than variant nomenclature.
Matching only on nomenclature should be fine for this project.
"""
if not found:
for row in rows:
if hgvs in gene_vcf[row]:
rows_to_delete.append(row)
found = True
self.matches += 1
self.results[gene]['found'].add(variant)
break
if not found:
self.results[gene]['not_found']['in_fq'].add(variant)
self.unmatched_predictions += 1
# Delete any rows which have been matched against
# This is done in reverse, high indexes first
# From low to high would mean the list shrinks and high indexes are invalid
for row in sorted(rows_to_delete, reverse=True):
del gene_vcf[row]
self.vcf[gene] = gene_vcf
|
Zain Patel is working as a Data Scientist Intern at Satavia; Zain is currently a third year mathematics undergraduate at the University of Cambridge. He is avidly interested in solving puzzles and has attended various hackathons across the world. In his spare time, he enjoys kayaking and swimming.
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
Aggregate.py
---------------------
Date : February 2017
Copyright : (C) 2017 by Arnaud Morvan
Email : arnaud dot morvan at camptocamp dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Arnaud Morvan'
__date__ = 'February 2017'
__copyright__ = '(C) 2017, Arnaud Morvan'
from qgis.core import (
QgsDistanceArea,
QgsExpression,
QgsExpressionContextUtils,
QgsFeature,
QgsFeatureSink,
QgsField,
QgsFields,
QgsGeometry,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterExpression,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFeatureSource,
QgsProcessingException,
QgsProcessingUtils,
QgsWkbTypes,
)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class Aggregate(QgisAlgorithm):
INPUT = 'INPUT'
GROUP_BY = 'GROUP_BY'
AGGREGATES = 'AGGREGATES'
DISSOLVE = 'DISSOLVE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def name(self):
return 'aggregate'
def displayName(self):
return self.tr('Aggregate')
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer'),
types=[QgsProcessing.TypeVector]))
self.addParameter(QgsProcessingParameterExpression(self.GROUP_BY,
self.tr('Group by expression (NULL to group all features)'),
defaultValue='NULL',
optional=False,
parentLayerParameterName=self.INPUT))
class ParameterAggregates(QgsProcessingParameterDefinition):
def __init__(self, name, description, parentLayerParameterName='INPUT'):
super().__init__(name, description)
self._parentLayerParameter = parentLayerParameterName
def clone(self):
copy = ParameterAggregates(self.name(), self.description(), self._parentLayerParameter)
return copy
def type(self):
return 'aggregates'
def checkValueIsAcceptable(self, value, context=None):
if not isinstance(value, list):
return False
for field_def in value:
if not isinstance(field_def, dict):
return False
if not field_def.get('input', False):
return False
if not field_def.get('aggregate', False):
return False
if not field_def.get('name', False):
return False
if not field_def.get('type', False):
return False
return True
def valueAsPythonString(self, value, context):
return str(value)
def asScriptCode(self):
raise NotImplementedError()
@classmethod
def fromScriptCode(cls, name, description, isOptional, definition):
raise NotImplementedError()
def parentLayerParameter(self):
return self._parentLayerParameter
self.addParameter(ParameterAggregates(self.AGGREGATES,
description=self.tr('Aggregates')))
self.parameterDefinition(self.AGGREGATES).setMetadata({
'widget_wrapper': 'processing.algs.qgis.ui.AggregatesPanel.AggregatesWidgetWrapper'
})
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT,
self.tr('Aggregated')))
def parameterAsAggregates(self, parameters, name, context):
return parameters[name]
def prepareAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
group_by = self.parameterAsExpression(parameters, self.GROUP_BY, context)
aggregates = self.parameterAsAggregates(parameters, self.AGGREGATES, context)
da = QgsDistanceArea()
da.setSourceCrs(source.sourceCrs(), context.transformContext())
da.setEllipsoid(context.project().ellipsoid())
self.source = source
self.group_by = group_by
self.group_by_expr = self.createExpression(group_by, da, context)
self.geometry_expr = self.createExpression('collect($geometry, {})'.format(group_by), da, context)
self.fields = QgsFields()
self.fields_expr = []
for field_def in aggregates:
self.fields.append(QgsField(name=field_def['name'],
type=field_def['type'],
typeName="",
len=field_def['length'],
prec=field_def['precision']))
aggregate = field_def['aggregate']
if aggregate == 'first_value':
expression = field_def['input']
elif aggregate == 'concatenate' or aggregate == 'concatenate_unique':
expression = ('{}({}, {}, {}, \'{}\')'
.format(field_def['aggregate'],
field_def['input'],
group_by,
'TRUE',
field_def['delimiter']))
else:
expression = '{}({}, {})'.format(field_def['aggregate'],
field_def['input'],
group_by)
expr = self.createExpression(expression, da, context)
self.fields_expr.append(expr)
return True
def processAlgorithm(self, parameters, context, feedback):
expr_context = self.createExpressionContext(parameters, context, self.source)
self.group_by_expr.prepare(expr_context)
# Group features in memory layers
source = self.source
count = self.source.featureCount()
if count:
progress_step = 50.0 / count
current = 0
groups = {}
keys = [] # We need deterministic order for the tests
feature = QgsFeature()
for feature in self.source.getFeatures():
expr_context.setFeature(feature)
group_by_value = self.evaluateExpression(self.group_by_expr, expr_context)
# Get an hashable key for the dict
key = group_by_value
if isinstance(key, list):
key = tuple(key)
group = groups.get(key, None)
if group is None:
sink, id = QgsProcessingUtils.createFeatureSink(
'memory:',
context,
source.fields(),
source.wkbType(),
source.sourceCrs())
layer = QgsProcessingUtils.mapLayerFromString(id, context)
group = {
'sink': sink,
'layer': layer,
'feature': feature
}
groups[key] = group
keys.append(key)
group['sink'].addFeature(feature, QgsFeatureSink.FastInsert)
current += 1
feedback.setProgress(int(current * progress_step))
if feedback.isCanceled():
return
(sink, dest_id) = self.parameterAsSink(parameters,
self.OUTPUT,
context,
self.fields,
QgsWkbTypes.multiType(source.wkbType()),
source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
# Calculate aggregates on memory layers
if len(keys):
progress_step = 50.0 / len(keys)
for current, key in enumerate(keys):
group = groups[key]
expr_context = self.createExpressionContext(parameters, context)
expr_context.appendScope(QgsExpressionContextUtils.layerScope(group['layer']))
expr_context.setFeature(group['feature'])
geometry = self.evaluateExpression(self.geometry_expr, expr_context)
if geometry is not None and not geometry.isEmpty():
geometry = QgsGeometry.unaryUnion(geometry.asGeometryCollection())
if geometry.isEmpty():
raise QgsProcessingException(
'Impossible to combine geometries for {} = {}'
.format(self.group_by, group_by_value))
attrs = []
for fields_expr in self.fields_expr:
attrs.append(self.evaluateExpression(fields_expr, expr_context))
# Write output feature
outFeat = QgsFeature()
if geometry is not None:
outFeat.setGeometry(geometry)
outFeat.setAttributes(attrs)
sink.addFeature(outFeat, QgsFeatureSink.FastInsert)
feedback.setProgress(50 + int(current * progress_step))
if feedback.isCanceled():
return
return {self.OUTPUT: dest_id}
def createExpression(self, text, da, context):
expr = QgsExpression(text)
expr.setGeomCalculator(da)
expr.setDistanceUnits(context.project().distanceUnits())
expr.setAreaUnits(context.project().areaUnits())
if expr.hasParserError():
raise QgsProcessingException(
self.tr(u'Parser error in expression "{}": {}')
.format(text, expr.parserErrorString()))
return expr
def evaluateExpression(self, expr, context):
value = expr.evaluate(context)
if expr.hasEvalError():
raise QgsProcessingException(
self.tr(u'Evaluation error in expression "{}": {}')
.format(expr.expression(), expr.evalErrorString()))
return value
|
Want to Add Value and Desirability to Your Home?
- Remodel your Kitchen. If you want to increase the value of your home, add modern appliances, clean or reface cabinets, consider adding a solid surface countertop and new fixtures.
- Remodel your Bathrooms. Buyers today are looking for modern conveniences. Do you need an updated vanity, countertop and fixtures? If you have wallpaper, it’s a MUST for you to remove and replace with a fresh coat of neutral colored paint.
- Replace flooring where applicable. Hardwood and ceramic tile are very popular among today’s buyers. Carpet is passe!
- Remove “unnecessary belonging” (i.e. clutter). If you’re selling your home, you can get a head start with packing by boxing up all your knick-knacks, photographs, etc. Clutter makes rooms feel small and dirty. Open up your floor space and rearrange the furniture to take advantage of the floor plan.
For more ideas, please contact me at Michelle@HarrisburgHouses.net.
Are you looking for a home in Harrisburg? You’re in luck. In addition to the current tax credit and low interest rates, there’s a new home ownership incentive program that’s available.
Please contact me at Michelle@HarrisburgHouses.net for more information.
Even though the contract negotiations are over, you still can’t relax because the dreaded home inspection is right around the corner. Below is a list of some of the most common defects that home inspectors look for during their detailed inspection.
- Improper or insufficient electrical wiring. An inspector will be looking for potential fire hazards. Wires need to be housed properly in a box, not hanging loose. If you need electrical work done, make sure you hire a certified electrician who obtains the proper permits (when applicable). This is an area where you don’t want to cut corners to save a few dollars because in the end it may cost you thousands!
- Plumbing problems such as leaky faucets, corroding pipes, improperly installed hot water heaters, and loose toilets. Something as small as a leak can lead to mold, mildew and even structural damage when left unattended.
- Roof deterioration. Home owners should routinely check for loose, missing or damaged shingles. If you tend to the small repairs on a regular basis, you can stave off costly repairs (or even a full roof replacement). Eventually, you’ll have to succumb to a roof replacement but until then, take care of what you currently have.
- Overall neglect. The details speak volumes as to what type of home owner you are. If a potential buyer sees peeling paint, decayed caulking around windows and doors, broken fixtures, gutters overflowing, etc., it may lead them to the conclusion that overall “health” of the home is poor. However, if you take care of the visual details and disclose service maintenanace records, chances are the buyer is going to go into the home inspection feeling confident about their decision and may overlook the small stuff should the inspector find anything.
Depending on the situation, a seller may want to have a pre-listing home inspection. The benefit is that there won’t be any surprises once a willing, able buyer is identified. However, sellers must keep in mind that what you know about your property you MUST disclose. Therefore, if the inspector indicates that the current electrical wiring is insufficient for the current size of the home due to the family room addition, the seller has two options. 1) Upgrade the electrical service to accommodate the square footage of the home and document this upgrade in the seller’s disclosure, or 2) Do nothing and disclose the home inspector’s observation in the seller’s disclosure. Obviously, it’s recommended for a seller to fix the problem so it’s no longer a potential issue.
For more information, please contact me directly at Michelle@HarrisburgHouses.net.
Federal tax incentives are better than ever now for home improvements that conserve energy. The Stimulus package offers some great tax advantages for work you have done to save energy (and money). The Federal Weatherization Assistance Program allows for up to $6,500 to spent on energy saving improvements. Tax credits up to $1,500 are available for energy saving improvements are Energy Star qualified including; insulation, windows and doors, roofing, HVAC and solar systems.
|
#!/usr/bin/python
#####################################
class SequenceUnit:
"""
A pattern unit with a prespecified sequence and, possibly, an allowed number of
mismatches, insertions, and deletions.
"""
def __init__(self, seq, M, I, D):
self.sequence = seq
self.M = M
self.I = I
self.D = D
self.match_set = MatchSet()
def matches(self, seq, pos):
self.match_set.reset(pos)
loose_match(seq, pos, self.sequence, self.M, self.I, self.D, 0, self.match_set)
print self.match_set.end_positions
return len(self.match_set.end_positions) != 0
class RangeUnit:
"""
A pattern unit matching any character sequence with length in a certain a range.
"""
def __init__(self, min_len, max_len):
self.min_len = min_len
self.max_len = max_len
self.match_set = MatchSet()
def matches(self, seq, pos):
self.match_set.reset(pos)
range_match(seq, pos, self.min_len, self.max_len, self.match_set)
return len(self.match_set.end_positions) != 0
class ReferenceUnit:
"""
A pattern unit that extracts previous matches from another "referenced" pattern unit and
attempts to match the sequence of these matches, allowing for a number of mismatches,
insertions and deletions.
"""
def __init__(self, ref_unit, M, I, D, reverse, complement):
self.ref_unit = ref_unit
self.M = M
self.I = I
self.D = D
self.reverse = reverse
self.complement = complement
self.match_set = MatchSet()
def matches(self, seq, pos):
self.match_set.reset(pos)
reference_match(seq, pos, self.ref_unit.match_set, self.M, self.I, self.D,self.reverse, self.complement, self.match_set)
return len(self.match_set.end_positions) != 0
class CompositeUnit:
"""
A pattern that is composed of other pattern units that must match consequtively for the
composite to pass
"""
def __init__(self, units):
self.units = units
self.match_set = MatchSet()
def rest_matches(self, seq, positions, pu_idx):
if pu_idx>=len(self.units):
return True
for pos in positions:
if self.units[pu_idx].matches(seq,pos) and self.rest_matches(seq,self.units[pu_idx].match_set.end_positions, pu_idx+1):
return True
return False
def matches(self, seq, pos):
return self.rest_matches(seq, [pos], 0)
#####################################
class MatchSet:
"""
Represents a set of matches using their common start position and the set of all possible
end-positions. The end-position is the index of the first character that comes after the
match, so e.g. the match starting at 2 with end-position 5 in the sequence "ATCCAG" will be
the sub-sequence "CCA".
"""
def __init__(self, pos=-1):
self.pos = pos
self.end_positions = set([])
def __str__(self):
return "Match["+str(self.pos)+","+str(self.end_positions)+"]"
def reset(self, new_pos):
self.pos = new_pos
self.end_positions.clear()
#####################################
from string import maketrans
complement_table = maketrans("ATUCG","TAAGC")
def nucl_complement(S):
return S.translate(complement_table)[::-1]
def nucl_reverse(S):
return S[::-1]
def reference_match(S,i, match_set, M, I, D, reverse, complement, ret=MatchSet()):
"""
Take the string of a previous match and match it to S, allowing for a number of mismatches,
insertions, and deletions.
S --- The string in which to search for matches
i --- The position in S from which to search
match_set --- The matches of the referenced pattern-unit
M --- The allowed number of mismatches
I --- The allowed number of insertions
D --- The allowed number of deletions
ret --- An accumulating set of matches
"""
if match_set is None: return None
ret.pos = i
for ep in match_set.end_positions:
P = S[match_set.pos:ep]
if reverse: P = nucl_reverse(P)
if complement: P = nucl_complement(P)
loose_match(S,i,P,M,I,D,0,ret)
if ret.end_positions:
return ret
else:
return None
def range_match(S,i, minLen, maxLen, ret=MatchSet(0)):
"""
Match a range to a string given the minimum and maximum length of the range.
S --- The string in which to search for matches
i --- The position in S from which to search
minLen --- The minimum length of the range
maxLen --- The maximum length of the range
ret --- An accumulating set of matches
"""
ret.pos = i
ret.end_positions.update( range(i+minLen, min(i+maxLen+1,len(S))) )
if ret.end_positions:
return ret
else:
return None
def loose_match(S,i,P,M,I,D,j=0, ret=MatchSet(None)):
"""
Match a pattern to a string given an allowed number of mismatches, insertions and deletions.
S --- The string in which to search for matches
i --- The position in S from which to search
P --- The pattern string
M --- The allowed number of mismatches
I --- The allowed number of insertions
D --- The allowed number of deletions
j --- The position in P from which to search
ret --- An accumulating set of matches
"""
if ret.pos is None: ret.pos = i
if j==len(P):
ret.end_positions.add(i)
return ret
if i==len(S):
return None
if I>0: loose_match(S,i+1,P,M, I-1,D, j, ret)
if D>0: loose_match(S,i ,P,M, I, D-1,j+1, ret)
if S[i]==P[j]:
loose_match(S,i+1,P,M,I,D,j+1,ret)
if M>0: loose_match(S,i+1,P,M-1,I, D, j+1, ret)
if not ret.end_positions:
return None
else:
return ret
def scan_for_matches(P, S):
for pos in range(0,len(S)):
if P.matches(S,pos):
print "Full pattern match at position",pos
#p1 = SequenceUnit("ATA",1,0,1)
#p2 = SequenceUnit("AAA",0,0,0)
#P = CompositeUnit([ p1,p2 ])
#S = "AATAAAGAA"
#p1 = SequenceUnit("ATA",0,0,0)
#p2 = RangeUnit(2,5)
#p3 = ReferenceUnit(p1,1,0,0, False, False)
#P = CompositeUnit([ p1,p2,p3 ]) # Corresponds to the SFM pattern "p1=AAG[1,0,0] 2..5 p1"
#S = "AATAAAAGAA"
#p1 = RangeUnit(4,10)
#p2 = RangeUnit(4,4)
#p3 = ReferenceUnit(p1,0,0,0, True, True)
#P = CompositeUnit([p1,p2,p3]) # "p1=4...10 4...4 ~p1"
# scan_for_matches(P, S)
|
This is a placeholder page for Gabriel Montemayor, which means this person is not currently on this site. We do suggest using the tools below to find Gabriel Montemayor.
You are visiting the placeholder page for Gabriel Montemayor. This page is here because someone used our placeholder utility to look for Gabriel Montemayor. We created this page automatically in hopes Gabriel Montemayor would find it. If you are not Gabriel Montemayor, but are an alumni of Sunnyside High School Fresno, CA, register on this site for free now.
|
from unittest import TestCase, mock
import os
import numpy
from subprocesses.dimensionality_reduction import _get_colors, plot_results
class TestColorMapping(TestCase):
def setUp(self):
self.manhattan = [6, 11, -1, 21, 14]
self.arr = numpy.asarray([[1, 2, 3], [2, 5, 4], [3, 4, -8], [4, 11, 6], [5, 9]])
self.metadata = {"test_tag": {"__filterable": True,
"v1":
{"points": [0, 2, 3],
"color": "#ffffff"},
"v2":
{"points": [1, 4],
"color": "#ff0000"},
}}
def test_color_by_manhattan(self):
res = _get_colors(self.arr)
self.assertSequenceEqual(res, self.manhattan)
def test_color_by_metadata(self):
res = _get_colors(self.arr, self.metadata, "test_tag")
self.assertSequenceEqual(res, [(1.0, 1.0, 1.0),
(1.0, 0.0, 0.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 0.0, 0.0)])
def test_color_missing_metadata(self):
res = _get_colors(self.arr, None, "test_tag")
self.assertSequenceEqual(res, self.manhattan)
def test_color_missing_colorby(self):
res = _get_colors(self.arr, self.metadata)
self.assertSequenceEqual(res, self.manhattan)
class TestPlotOutput(TestCase):
def setUp(self):
self.arr = numpy.asarray([[1, 2, 3], [2, 5, 4], [3, 4, -8], [4, 11, 6], [5, 9]])
@mock.patch("subprocesses.dimensionality_reduction.plt")
def test_plot_results(self, mock_plot):
plot_results(self.arr)
mock_plot.figure.assert_called_with(figsize=(16, 16))
unnamed, named = mock_plot.scatter.call_args
self.assertSequenceEqual(unnamed[0], [1, 2, 3, 4, 5])
self.assertSequenceEqual(unnamed[1], [2, 5, 4, 11, 9])
self.assertSequenceEqual(named["c"], [6, 11, -1, 21, 14])
self.assertEqual(named["s"], 20)
mock_plot.tight_layout.assert_called()
mock_plot.savefig.assert_called_with(os.path.join(os.getcwd(), "prints.png"))
mock_plot.close.assert_called()
|
Short answer explanations: brief descriptions of physical phenomena, experiments or calculations.
Formal lab reports: short reports (5 to 10 pages) describing specific experiments or derivations.
Research theses: long reports (over 10 pages) describing more involved research projects carried out over months/years.
Journal articles: formal presentations of the results of scientific experiments and derivations, usually peer-reviewed.
PHYS-151, 152 and 153 Tutorials: In these three courses that comprise the initial major’s sequence, students participate in weekly 50 minute tutorials. In these, students are presented with conceptual problems which they discuss in groups and write out short answer explanations. There is also normally a pretest worksheet and follow-up homework with a similar format.
PHYS-151, 152 and 153 Labs: Each of these courses also contains a lab component, in which students carry out experiments and/or computer simulations. In the first two courses, the results are written up in a handout, in which they need to describe methods, present data and discuss results. In PHYS-153, this work is done in a lab book.
PHYS-154: Methods of Experimental Physics(required for all majors): All students in this class may be required to write a small number (2-4) of formal lab reports of 5 to 10 pages each. The reports are graded on a rubric (attached) based on the five points noted above, with feedback from the instructor helping students develop good scientific writing skills.
PHYS-301 to -304:Independent Research in Physics, or PHYS-311 to 314: Independent Research in Biological Physics (required for B.S. majors) Students in this course write a thesis based on their independent research. These are 10-20 pages for one semester and at least 20 pages for two semester projects. In addition to their research work, students enrolled in this course also attend periodic classes to strengthen writing, presentation and other professional skills. During these additional classes, drafts of the theses are reviewed by peers according to the scientific writing rubric (attached) developed for this course. The final drafts are graded by both their research mentor and one second reader from among the physics faculty. In addition, the research sometimes becomes part of a manuscript submitted to a peer-reviewed scientific journal, and the students are active participants in the preparation.
|
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
from pip._internal.cli import cmdoptions
from pip._internal.cli.cmdoptions import make_target_python
from pip._internal.cli.req_command import RequirementCommand
from pip._internal.req import RequirementSet
from pip._internal.req.req_tracker import get_requirement_tracker
from pip._internal.utils.misc import ensure_dir, normalize_path, write_output
from pip._internal.utils.temp_dir import TempDirectory
logger = logging.getLogger(__name__)
class DownloadCommand(RequirementCommand):
"""
Download packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports downloading from "requirements files", which provide
an easy way to specify a whole environment to be downloaded.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] <vcs project url> ...
%prog [options] <local project path> ...
%prog [options] <archive url/path> ..."""
def __init__(self, *args, **kw):
super(DownloadCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.prefer_binary())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(cmdoptions.progress_bar())
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.use_pep517())
cmd_opts.add_option(cmdoptions.no_use_pep517())
cmd_opts.add_option(
'-d', '--dest', '--destination-dir', '--destination-directory',
dest='download_dir',
metavar='dir',
default=os.curdir,
help=("Download packages into <dir>."),
)
cmdoptions.add_target_python_options(cmd_opts)
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
options.ignore_installed = True
# editable doesn't really make sense for `pip download`, but the bowels
# of the RequirementSet code require that property.
options.editables = []
cmdoptions.check_dist_restriction(options)
options.download_dir = normalize_path(options.download_dir)
ensure_dir(options.download_dir)
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
)
build_delete = (not (options.no_clean or options.build_dir))
with get_requirement_tracker() as req_tracker, TempDirectory(
options.build_dir, delete=build_delete, kind="download"
) as directory:
requirement_set = RequirementSet()
self.populate_requirement_set(
requirement_set,
args,
options,
finder,
session,
None
)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
req_tracker=req_tracker,
session=session,
finder=finder,
download_dir=options.download_dir,
use_user_site=False,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
py_version_info=options.python_version,
)
self.trace_basic_info(finder)
resolver.resolve(requirement_set)
downloaded = ' '.join([
req.name for req in requirement_set.successfully_downloaded
])
if downloaded:
write_output('Successfully downloaded %s', downloaded)
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
return requirement_set
|
2 Chronicles 13:1 Now in the eighteenth year of king Jeroboam began Abijah to reign over Judah. 2 Chronicles 13:2 He reigned three years in Jerusalem. His mother's name also [was] Michaiah the daughter of Uriel of Gibeah. And there was war between Abijah and Jeroboam. 2 Chronicles 13:3 And Abijah set the battle in array with an army of valiant men of war, [even] four hundred thousand chosen men: Jeroboam also set the battle in array against him with eight hundred thousand chosen men, [being] mighty men of valour.
2 Chronicles 13:4 And Abijah stood up upon mount Zemaraim, which [is] in mount Ephraim, and said, Hear me, thou Jeroboam, and all Israel; 2 Chronicles 13:5 Ought ye not to know that the LORD God of Israel gave the kingdom over Israel to David for ever, [even] to him and to his sons by a covenant of salt? 2 Chronicles 13:6 Yet Jeroboam the son of Nebat, the servant of Solomon the son of David, is risen up, and hath rebelled against his lord. 2 Chronicles 13:7 And there are gathered unto him vain men, the children of Belial, and have strengthened themselves against Rehoboam the son of Solomon, when Rehoboam was young and tenderhearted, and could not withstand them. 2 Chronicles 13:8 And now ye think to withstand the kingdom of the LORD in the hand of the sons of David; and ye [be] a great multitude, and [there are] with you golden calves, which Jeroboam made you for gods. 2 Chronicles 13:9 Have ye not cast out the priests of the LORD, the sons of Aaron, and the Levites, and have made you priests after the manner of the nations of [other] lands? so that whosoever cometh to consecrate himself with a young bullock and seven rams, [the same] may be a priest of [them that are] no gods. 2 Chronicles 13:10 But as for us, the LORD [is] our God, and we have not forsaken him; and the priests, which minister unto the LORD, [are] the sons of Aaron, and the Levites [wait] upon [their] business: 2 Chronicles 13:11 And they burn unto the LORD every morning and every evening burnt sacrifices and sweet incense: the shewbread also [set they in order] upon the pure table; and the candlestick of gold with the lamps thereof, to burn every evening: for we keep the charge of the LORD our God; but ye have forsaken him. 2 Chronicles 13:12 And, behold, God himself [is] with us for [our] captain, and his priests with sounding trumpets to cry alarm against you. O children of Israel, fight ye not against the LORD God of your fathers; for ye shall not prosper.
2 Chronicles 13:13 But Jeroboam caused an ambushment to come about behind them: so they were before Judah, and the ambushment [was] behind them. 2 Chronicles 13:14 And when Judah looked back, behold, the battle [was] before and behind: and they cried unto the LORD, and the priests sounded with the trumpets. 2 Chronicles 13:15 Then the men of Judah gave a shout: and as the men of Judah shouted, it came to pass, that God smote Jeroboam and all Israel before Abijah and Judah. 2 Chronicles 13:16 And the children of Israel fled before Judah: and God delivered them into their hand. 2 Chronicles 13:17 And Abijah and his people slew them with a great slaughter: so there fell down slain of Israel five hundred thousand chosen men. 2 Chronicles 13:18 Thus the children of Israel were brought under at that time, and the children of Judah prevailed, because they relied upon the LORD God of their fathers. 2 Chronicles 13:19 And Abijah pursued after Jeroboam, and took cities from him, Bethel with the towns thereof, and Jeshanah with the towns thereof, and Ephrain with the towns thereof. 2 Chronicles 13:20 Neither did Jeroboam recover strength again in the days of Abijah: and the LORD struck him, and he died.
2 Chronicles 13:21 But Abijah waxed mighty, and married fourteen wives, and begat twenty and two sons, and sixteen daughters. 2 Chronicles 13:22 And the rest of the acts of Abijah, and his ways, and his sayings, [are] written in the story of the prophet Iddo.
|
"""
sentry.manager
~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import datetime
import hashlib
import logging
import time
import warnings
import uuid
from celery.signals import task_postrun
from django.conf import settings
from django.contrib.auth.models import UserManager
from django.core.signals import request_finished
from django.db import models, transaction, IntegrityError
from django.db.models import Sum
from django.utils import timezone
from django.utils.datastructures import SortedDict
from raven.utils.encoding import to_string
from sentry import app
from sentry.constants import (
STATUS_RESOLVED, STATUS_UNRESOLVED, MINUTE_NORMALIZATION,
LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH)
from sentry.db.models import BaseManager
from sentry.processors.base import send_group_processors
from sentry.signals import regression_signal
from sentry.tasks.index import index_event
from sentry.utils.cache import cache, memoize
from sentry.utils.dates import get_sql_date_trunc, normalize_datetime
from sentry.utils.db import get_db_engine, has_charts, attach_foreignkey
from sentry.utils.safe import safe_execute, trim, trim_dict, trim_frames
from sentry.utils.strings import strip
logger = logging.getLogger('sentry.errors')
UNSAVED = dict()
MAX_TAG_LENGTH = 200
def get_checksum_from_event(event):
interfaces = event.interfaces
for interface in interfaces.itervalues():
result = interface.get_composite_hash(interfaces=event.interfaces)
if result:
hash = hashlib.md5()
for r in result:
hash.update(to_string(r))
return hash.hexdigest()
return hashlib.md5(to_string(event.message)).hexdigest()
class ScoreClause(object):
def __init__(self, group):
self.group = group
def prepare_database_save(self, unused):
return self
def prepare(self, evaluator, query, allow_joins):
return
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = self.group.get_score()
return (sql, [])
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
class UserManager(BaseManager, UserManager):
pass
class ChartMixin(object):
def get_chart_data_for_group(self, instances, max_days=90, key=None):
if not instances:
if key is None:
return []
return {}
if hasattr(instances[0], '_state'):
db = instances[0]._state.db or 'default'
else:
db = 'default'
field = self.model.groupcountbyminute_set.related
column = field.field.name
queryset = field.model.objects.filter(**{
'%s__in' % column: instances,
})
return self._get_chart_data(queryset, max_days, db, key=key)
def get_chart_data(self, instance, max_days=90, key=None):
if hasattr(instance, '_state'):
db = instance._state.db or 'default'
else:
db = 'default'
queryset = instance.groupcountbyminute_set
return self._get_chart_data(queryset, max_days, db, key=key)
def _get_chart_data(self, queryset, max_days=90, db='default', key=None):
if not has_charts(db):
if key is None:
return []
return {}
today = timezone.now().replace(microsecond=0, second=0)
# the last interval is not accurate, so we exclude it
# TODO: it'd be ideal to normalize the last datapoint so that we can include it
# and not have ~inaccurate data for up to MINUTE_NORMALIZATION
today -= datetime.timedelta(minutes=MINUTE_NORMALIZATION)
if max_days >= 30:
g_type = 'date'
d_type = 'days'
points = max_days
modifier = 1
today = today.replace(hour=0)
elif max_days >= 1:
g_type = 'hour'
d_type = 'hours'
points = max_days * 24
modifier = 1
today = today.replace(minute=0)
else:
g_type = 'minute'
d_type = 'minutes'
modifier = MINUTE_NORMALIZATION
points = max_days * 24 * (60 / modifier)
min_date = today - datetime.timedelta(days=max_days)
method = get_sql_date_trunc('date', db, grouper=g_type)
chart_qs = queryset.filter(
date__gte=min_date,
).extra(
select={'grouper': method},
)
if key:
chart_qs = chart_qs.values('grouper', key)
else:
chart_qs = chart_qs.values('grouper')
chart_qs = chart_qs.annotate(
num=Sum('times_seen'),
)
if key:
chart_qs = chart_qs.values_list(key, 'grouper', 'num').order_by(key, 'grouper')
else:
chart_qs = chart_qs.values_list('grouper', 'num').order_by('grouper')
if key is None:
rows = {None: dict(chart_qs)}
else:
rows = {}
for item, grouper, num in chart_qs:
if item not in rows:
rows[item] = {}
rows[item][grouper] = num
results = {}
for item, tsdata in rows.iteritems():
results[item] = []
for point in xrange(points, -1, -1):
dt = today - datetime.timedelta(**{d_type: point * modifier})
results[item].append((int(time.mktime((dt).timetuple())) * 1000, tsdata.get(dt, 0)))
if key is None:
return results[None]
return results
class GroupManager(BaseManager, ChartMixin):
use_for_related_fields = True
def normalize_event_data(self, data):
# TODO(dcramer): store http.env.REMOTE_ADDR as user.ip
# First we pull out our top-level (non-data attr) kwargs
if not data.get('level') or data['level'] not in LOG_LEVELS:
data['level'] = logging.ERROR
if not data.get('logger'):
data['logger'] = DEFAULT_LOGGER_NAME
else:
data['logger'] = trim(data['logger'], 64)
timestamp = data.get('timestamp')
if not timestamp:
timestamp = timezone.now()
# We must convert date to local time so Django doesn't mess it up
# based on TIME_ZONE
if settings.TIME_ZONE:
if not timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=timezone.utc)
elif timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=None)
data['timestamp'] = timestamp
if not data.get('event_id'):
data['event_id'] = uuid.uuid4().hex
data.setdefault('message', None)
data.setdefault('culprit', None)
data.setdefault('time_spent', None)
data.setdefault('server_name', None)
data.setdefault('site', None)
data.setdefault('checksum', None)
data.setdefault('platform', None)
data.setdefault('extra', {})
tags = data.get('tags')
if not tags:
tags = []
# full support for dict syntax
elif isinstance(tags, dict):
tags = tags.items()
# prevent [tag, tag, tag] (invalid) syntax
elif not all(len(t) == 2 for t in tags):
tags = []
else:
tags = list(tags)
data['tags'] = tags
data['message'] = strip(data['message'])
data['culprit'] = strip(data['culprit'])
if not isinstance(data['extra'], dict):
# throw it away
data['extra'] = {}
trim_dict(
data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE)
if 'sentry.interfaces.Exception' in data:
if 'values' not in data['sentry.interfaces.Exception']:
data['sentry.interfaces.Exception'] = {
'values': [data['sentry.interfaces.Exception']]
}
# convert stacktrace + exception into expanded exception
if 'sentry.interfaces.Stacktrace' in data:
data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace')
for exc_data in data['sentry.interfaces.Exception']['values']:
for key in ('type', 'module', 'value'):
value = exc_data.get(key)
if value:
exc_data[key] = trim(value)
if exc_data.get('stacktrace'):
trim_frames(exc_data['stacktrace'])
for frame in exc_data['stacktrace']['frames']:
stack_vars = frame.get('vars', {})
trim_dict(stack_vars)
if 'sentry.interfaces.Stacktrace' in data:
trim_frames(data['sentry.interfaces.Stacktrace'])
for frame in data['sentry.interfaces.Stacktrace']['frames']:
stack_vars = frame.get('vars', {})
trim_dict(stack_vars)
if 'sentry.interfaces.Message' in data:
msg_data = data['sentry.interfaces.Message']
trim(msg_data['message'], 1024)
if msg_data.get('params'):
msg_data['params'] = trim(msg_data['params'])
if 'sentry.interfaces.Http' in data:
http_data = data['sentry.interfaces.Http']
for key in ('cookies', 'querystring', 'headers', 'env', 'url'):
value = http_data.get(key)
if not value:
continue
if type(value) == dict:
trim_dict(value)
else:
http_data[key] = trim(value)
value = http_data.get('data')
if value:
http_data['data'] = trim(value, 2048)
# default the culprit to the url
if not data['culprit']:
data['culprit'] = trim(strip(http_data.get('url')), MAX_CULPRIT_LENGTH)
return data
def from_kwargs(self, project, **kwargs):
data = self.normalize_event_data(kwargs)
return self.save_data(project, data)
def save_data(self, project, data, raw=False):
# TODO: this function is way too damn long and needs refactored
# the inner imports also suck so let's try to move it away from
# the objects manager
# TODO: culprit should default to "most recent" frame in stacktraces when
# it's not provided.
from sentry.plugins import plugins
from sentry.models import Event, Project, EventMapping
with transaction.commit_on_success():
project = Project.objects.get_from_cache(id=project)
# First we pull out our top-level (non-data attr) kwargs
event_id = data.pop('event_id')
message = data.pop('message')
culprit = data.pop('culprit')
level = data.pop('level')
time_spent = data.pop('time_spent')
logger_name = data.pop('logger')
server_name = data.pop('server_name')
site = data.pop('site')
date = data.pop('timestamp')
checksum = data.pop('checksum')
platform = data.pop('platform')
if 'sentry.interfaces.Exception' in data:
if 'values' not in data['sentry.interfaces.Exception']:
data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]}
# convert stacktrace + exception into expanded exception
if 'sentry.interfaces.Stacktrace' in data:
data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace')
kwargs = {
'level': level,
'message': message,
'platform': platform,
'culprit': culprit or '',
'logger': logger_name,
}
event = Event(
project=project,
event_id=event_id,
data=data,
server_name=server_name,
site=site,
time_spent=time_spent,
datetime=date,
**kwargs
)
# Calculate the checksum from the first highest scoring interface
if not checksum:
checksum = get_checksum_from_event(event)
event.checksum = checksum
group_kwargs = kwargs.copy()
group_kwargs.update({
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
tags = data['tags']
tags.append(('level', LOG_LEVELS[level]))
if logger:
tags.append(('logger', logger_name))
if server_name:
tags.append(('server_name', server_name))
if site:
tags.append(('site', site))
for plugin in plugins.for_project(project):
added_tags = safe_execute(plugin.get_tags, event)
if added_tags:
tags.extend(added_tags)
try:
group, is_new, is_sample = self._create_group(
event=event,
tags=data['tags'],
**group_kwargs
)
except Exception as exc:
# TODO: should we mail admins when there are failures?
try:
logger.exception(u'Unable to process log entry: %s', exc)
except Exception, exc:
warnings.warn(u'Unable to process log entry: %s', exc)
return
using = group._state.db
event.group = group
# save the event unless its been sampled
if not is_sample:
sid = transaction.savepoint(using=using)
try:
event.save()
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
sid = transaction.savepoint(using=using)
try:
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
transaction.commit_unless_managed(using=using)
if not raw:
send_group_processors(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample
)
# TODO: move this to the queue
if is_new and not raw:
regression_signal.send_robust(sender=self.model, instance=group)
if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH):
index_event.delay(event)
return event
def should_sample(self, group, event):
if not settings.SENTRY_SAMPLE_DATA:
return False
silence_timedelta = event.datetime - group.last_seen
silence = silence_timedelta.days * 86400 + silence_timedelta.seconds
if group.times_seen % count_limit(group.times_seen):
return False
if group.times_seen % time_limit(silence):
return False
return True
def _create_group(self, event, tags=None, **kwargs):
from sentry.models import ProjectCountByMinute, GroupCountByMinute
date = event.datetime
time_spent = event.time_spent
project = event.project
group, is_new = self.get_or_create(
project=project,
checksum=event.checksum,
defaults=kwargs
)
if is_new:
transaction.commit_unless_managed(using=group._state.db)
update_kwargs = {
'times_seen': 1,
}
if time_spent:
update_kwargs.update({
'time_spent_total': time_spent,
'time_spent_count': 1,
})
if not is_new:
extra = {
'last_seen': max(event.datetime, group.last_seen),
'score': ScoreClause(group),
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != event.level:
extra['level'] = event.level
if group.status == STATUS_RESOLVED or group.is_over_resolve_age():
# Making things atomic
is_new = bool(self.filter(
id=group.id,
status=STATUS_RESOLVED,
).exclude(
active_at__gte=date,
).update(active_at=date, status=STATUS_UNRESOLVED))
transaction.commit_unless_managed(using=group._state.db)
group.active_at = date
group.status = STATUS_UNRESOLVED
group.last_seen = extra['last_seen']
app.buffer.incr(self.model, update_kwargs, {
'id': group.id,
}, extra)
else:
# TODO: this update should actually happen as part of create
group.update(score=ScoreClause(group))
# We need to commit because the queue can run too fast and hit
# an issue with the group not existing before the buffers run
transaction.commit_unless_managed(using=group._state.db)
# Determine if we've sampled enough data to store this event
if is_new:
is_sample = False
elif not self.should_sample(group, event):
is_sample = False
else:
is_sample = True
# Rounded down to the nearest interval
normalized_datetime = normalize_datetime(date)
app.buffer.incr(GroupCountByMinute, update_kwargs, {
'group': group,
'project': project,
'date': normalized_datetime,
})
app.buffer.incr(ProjectCountByMinute, update_kwargs, {
'project': project,
'date': normalized_datetime,
})
try:
self.add_tags(group, tags)
except Exception, e:
logger.exception('Unable to record tags: %s' % (e,))
return group, is_new, is_sample
def add_tags(self, group, tags):
from sentry.models import TagValue, GroupTag
project = group.project
date = group.last_seen
for tag_item in tags:
if len(tag_item) == 2:
(key, value), data = tag_item, None
else:
key, value, data = tag_item
if not value:
continue
value = unicode(value)
if len(value) > MAX_TAG_LENGTH:
continue
app.buffer.incr(TagValue, {
'times_seen': 1,
}, {
'project': project,
'key': key,
'value': value,
}, {
'last_seen': date,
'data': data,
})
app.buffer.incr(GroupTag, {
'times_seen': 1,
}, {
'group': group,
'project': project,
'key': key,
'value': value,
}, {
'last_seen': date,
})
def get_by_natural_key(self, project, logger, culprit, checksum):
return self.get(project=project, logger=logger, view=culprit, checksum=checksum)
@memoize
def model_fields_clause(self):
return ', '.join('sentry_groupedmessage."%s"' % (f.column,) for f in self.model._meta.fields)
def get_accelerated(self, project_ids, queryset=None, minutes=15):
if not project_ids:
return self.none()
if queryset is None:
queryset = self.filter(
project__in=project_ids,
status=STATUS_UNRESOLVED,
)
else:
queryset = queryset._clone()
queryset.query.select_related = False
normalization = float(MINUTE_NORMALIZATION)
assert minutes >= normalization
intervals = 8
engine = get_db_engine(queryset.db)
# We technically only support mysql and postgresql, since there seems to be no standard
# way to get the epoch from a datetime/interval
if engine.startswith('mysql'):
minute_clause = "interval %s minute"
epoch_clause = "unix_timestamp(utc_timestamp()) - unix_timestamp(mcbm.date)"
now_clause = 'utc_timestamp()'
else:
minute_clause = "interval '%s minutes'"
epoch_clause = "extract(epoch from now()) - extract(epoch from mcbm.date)"
now_clause = 'now()'
sql, params = queryset.query.get_compiler(queryset.db).as_sql()
before_select, after_select = str(sql).split('SELECT ', 1)
after_where = after_select.split(' WHERE ', 1)[1]
# Ensure we remove any ordering clause
after_where = after_where.split(' ORDER BY ')[0]
query = """
SELECT ((mcbm.times_seen + 1) / ((%(epoch_clause)s) / 60)) / (COALESCE(z.rate, 0) + 1) as sort_value,
%(fields)s
FROM sentry_groupedmessage
INNER JOIN sentry_messagecountbyminute as mcbm
ON (sentry_groupedmessage.id = mcbm.group_id)
LEFT JOIN (SELECT a.group_id, (SUM(a.times_seen)) / COUNT(a.times_seen) / %(norm)f as rate
FROM sentry_messagecountbyminute as a
WHERE a.date >= %(now)s - %(max_time)s
AND a.date < %(now)s - %(min_time)s
AND a.project_id IN (%(project_ids)s)
GROUP BY a.group_id) as z
ON z.group_id = mcbm.group_id
WHERE mcbm.date >= %(now)s - %(min_time)s
AND mcbm.date < %(now)s - %(offset_time)s
AND mcbm.times_seen > 0
AND ((mcbm.times_seen + 1) / ((%(epoch_clause)s) / 60)) > (COALESCE(z.rate, 0) + 1)
AND %(after_where)s
GROUP BY z.rate, mcbm.times_seen, mcbm.date, %(fields)s
ORDER BY sort_value DESC
""" % dict(
fields=self.model_fields_clause,
after_where=after_where,
offset_time=minute_clause % (1,),
min_time=minute_clause % (minutes + 1,),
max_time=minute_clause % (minutes * intervals + 1,),
norm=normalization,
epoch_clause=epoch_clause,
now=now_clause,
project_ids=', '.join((str(int(x)) for x in project_ids)),
)
return RawQuerySet(self, query, params)
class RawQuerySet(object):
def __init__(self, queryset, query, params):
self.queryset = queryset
self.query = query
self.params = params
def __getitem__(self, k):
offset = k.start or 0
limit = k.stop - offset
limit_clause = ' LIMIT %d OFFSET %d' % (limit, offset)
query = self.query + limit_clause
return self.queryset.raw(query, self.params)
class ProjectManager(BaseManager, ChartMixin):
def get_for_user(self, user=None, access=None, hidden=False, team=None,
superuser=True):
"""
Returns a SortedDict of all projects a user has some level of access to.
"""
from sentry.models import Team
if not (user and user.is_authenticated()):
return []
# TODO: the result of this function should be cached
is_authenticated = (user and user.is_authenticated())
base_qs = self
if not hidden:
base_qs = base_qs.filter(status=0)
if team:
base_qs = base_qs.filter(team=team)
if team and user.is_superuser and superuser:
projects = set(base_qs)
else:
projects_qs = base_qs
if not settings.SENTRY_PUBLIC:
# If the user is authenticated, include their memberships
teams = Team.objects.get_for_user(
user, access, access_groups=False).values()
if not teams:
projects_qs = self.none()
if team and team not in teams:
projects_qs = self.none()
elif not team:
projects_qs = projects_qs.filter(team__in=teams)
projects = set(projects_qs)
if is_authenticated:
projects |= set(base_qs.filter(accessgroup__members=user))
attach_foreignkey(projects, self.model.team)
return sorted(projects, key=lambda x: x.name.lower())
class MetaManager(BaseManager):
NOTSET = object()
def __init__(self, *args, **kwargs):
super(MetaManager, self).__init__(*args, **kwargs)
task_postrun.connect(self.clear_cache)
request_finished.connect(self.clear_cache)
self.__metadata = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_MetaManager__metadata', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__metadata = {}
def get_value(self, key, default=NOTSET):
result = self.get_all_values()
if default is self.NOTSET:
return result[key]
return result.get(key, default)
def unset_value(self, key):
self.filter(key=key).delete()
self.__metadata.pop(key, None)
def set_value(self, key, value):
print key, value
inst, _ = self.get_or_create(
key=key,
defaults={
'value': value,
}
)
if inst.value != value:
inst.update(value=value)
self.__metadata[key] = value
def get_all_values(self):
if not hasattr(self, '_MetaManager__metadata'):
self.__metadata = dict(self.values_list('key', 'value'))
return self.__metadata
def clear_cache(self, **kwargs):
self.__metadata = {}
class InstanceMetaManager(BaseManager):
NOTSET = object()
def __init__(self, field_name, *args, **kwargs):
super(InstanceMetaManager, self).__init__(*args, **kwargs)
self.field_name = field_name
task_postrun.connect(self.clear_cache)
request_finished.connect(self.clear_cache)
self.__metadata = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_InstanceMetaManager__metadata', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__metadata = {}
def _make_key(self, instance):
if isinstance(instance, models.Model):
instance_id = instance.pk
else:
instance_id = instance
return '%s:%s' % (self.model._meta.db_table, instance_id)
def get_value_bulk(self, instances, key):
return dict(self.filter(**{
'%s__in' % self.field_name: instances,
'key': key,
}).values_list(self.field_name, 'value'))
def get_value(self, instance, key, default=NOTSET):
result = self.get_all_values(instance)
if default is self.NOTSET:
return result[key]
return result.get(key, default)
def unset_value(self, instance, key):
self.filter(**{self.field_name: instance, 'key': key}).delete()
if instance.pk not in self.__metadata:
cache.delete(self._make_key(instance))
return
self.__metadata[instance.pk].pop(key, None)
cache.set(self._make_key(instance), self.__metadata[instance.pk])
def set_value(self, instance, key, value):
inst, created = self.get_or_create(**{
self.field_name: instance,
'key': key,
'defaults': {
'value': value,
}
})
if not created and inst.value != value:
inst.update(value=value)
if instance.pk not in self.__metadata:
cache.delete(self._make_key(instance))
return
self.__metadata[instance.pk][key] = value
cache.set(self._make_key(instance), self.__metadata[instance.pk])
def get_all_values(self, instance):
if isinstance(instance, models.Model):
instance_id = instance.pk
else:
instance_id = instance
if instance_id not in self.__metadata:
cache_key = self._make_key(instance)
result = cache.get(cache_key)
if result is None:
result = dict(
(i.key, i.value) for i in
self.filter(**{
self.field_name: instance_id,
})
)
cache.set(cache_key, result)
self.__metadata[instance_id] = result
return self.__metadata.get(instance_id, {})
def clear_cache(self, **kwargs):
self.__metadata = {}
class UserOptionManager(BaseManager):
NOTSET = object()
def __init__(self, *args, **kwargs):
super(UserOptionManager, self).__init__(*args, **kwargs)
task_postrun.connect(self.clear_cache)
request_finished.connect(self.clear_cache)
self.__metadata = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_UserOptionManager__metadata', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__metadata = {}
def get_value(self, user, project, key, default=NOTSET):
result = self.get_all_values(user, project)
if default is self.NOTSET:
return result[key]
return result.get(key, default)
def unset_value(self, user, project, key):
self.filter(user=user, project=project, key=key).delete()
if not hasattr(self, '_metadata'):
return
if project:
metakey = (user.pk, project.pk)
else:
metakey = (user.pk, None)
if metakey not in self.__metadata:
return
self.__metadata[metakey].pop(key, None)
def set_value(self, user, project, key, value):
inst, created = self.get_or_create(
user=user,
project=project,
key=key,
defaults={
'value': value,
},
)
if not created and inst.value != value:
inst.update(value=value)
if project:
metakey = (user.pk, project.pk)
else:
metakey = (user.pk, None)
if metakey not in self.__metadata:
return
self.__metadata[metakey][key] = value
def get_all_values(self, user, project):
if project:
metakey = (user.pk, project.pk)
else:
metakey = (user.pk, None)
if metakey not in self.__metadata:
result = dict(
(i.key, i.value) for i in
self.filter(
user=user,
project=project,
)
)
self.__metadata[metakey] = result
return self.__metadata.get(metakey, {})
def clear_cache(self, **kwargs):
self.__metadata = {}
class TagKeyManager(BaseManager):
def _get_cache_key(self, project_id):
return 'filterkey:all:%s' % project_id
def all_keys(self, project):
# TODO: cache invalidation via post_save/post_delete signals much like BaseManager
key = self._get_cache_key(project.id)
result = cache.get(key)
if result is None:
result = list(self.filter(project=project).values_list('key', flat=True))
cache.set(key, result, 60)
return result
class TeamManager(BaseManager):
def get_for_user(self, user, access=None, access_groups=True, with_projects=False):
"""
Returns a SortedDict of all teams a user has some level of access to.
Each <Team> returned has a ``membership`` attribute which holds the
<TeamMember> instance.
"""
from sentry.models import TeamMember, AccessGroup, Project
results = SortedDict()
if not user.is_authenticated():
return results
if settings.SENTRY_PUBLIC and access is None:
for team in sorted(self.iterator(), key=lambda x: x.name.lower()):
results[team.slug] = team
else:
all_teams = set()
qs = TeamMember.objects.filter(
user=user,
).select_related('team')
if access is not None:
qs = qs.filter(type__lte=access)
for tm in qs:
all_teams.add(tm.team)
if access_groups:
qs = AccessGroup.objects.filter(
members=user,
).select_related('team')
if access is not None:
qs = qs.filter(type__lte=access)
for group in qs:
all_teams.add(group.team)
for team in sorted(all_teams, key=lambda x: x.name.lower()):
results[team.slug] = team
if with_projects:
# these kinds of queries make people sad :(
new_results = SortedDict()
for team in results.itervalues():
project_list = list(Project.objects.get_for_user(
user, team=team))
new_results[team.slug] = (team, project_list)
results = new_results
return results
|
Since it first started in 1990 in Dartmouth, Nova Scotia, Hurricane Lift Truck has also had a presence in Saint John, New Brunswick. But, COO Jamie Thomson and his business partners knew it was time to look at setting up a bricks and mortar shop at 875 Bayside Drive in Saint John.
“Our employees worked from home until the opening of our new branch office. The physical branch will allow us to provide a better level of service to our customer base. Additional parts stock, the ability to work on large repair jobs, an operator training classroom and a rental fleet are a few of the additional services the physical branch will offer,” says Thomson.
The business specializes in sales, leases, rentals, parts and service of all brands of lift trucks that use a variety of fuel sources including, diesel and LPG (propane) as well as providing operator training. Hurricane has a broad range of lift truck rentals including sit down electrics, narrow-aisle-reach trucks, and lift trucks with a lift capacity of 15,000 lb or more.
Thomson says Hurricane Lift Truck works with Hyundai, Linde, Blue Giant and Load Lifter. On top of Dartmouth and Saint John, it also has a location in St. John’s, Newfoundland. Another thing that sets it apart from others in the industry is the ability to offer critical operator training.
“Courses are designed for both the new operators and the experienced operators who require a refresher course. Operation of any type of material handling equipment requires formal training and qualification. On average, a lift truck weighs approximately twice as much as its rated lift capacity. So, a 5000-lb capacity lift truck will actually weigh 9000 lb with no load on the forks. This has the potential to cause serious injury or death to the operator and/or pedestrian,” says Thomson.
There are plans to begin courses in Saint John in 2019. The courses last a full day and include classroom and hands-on training with lift trucks as well as a final written test.
Hurricane’s New Brunswick location currently has 5 employees but there is hope as demand increases, more technicians could be hired onto the team.
“A physical branch will give us a visual presence, `which will create a greater awareness of our business in the city. With the awareness and the branch, our staff will find it easier to identify our business and its offerings with current or prospective customers,” adds Thomson.
|
"""
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.types.common import (is_categorical_dtype, is_datetime64_dtype,
_ensure_object)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to iso-8859-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
data = reader.read()
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the
same size if there is no loss in precision, other wise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'iso-8859-1'
def __init__(self, encoding):
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='iso-8859-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x
for x in self.fmtlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
self.GSO = {0: ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
self.GSO[v_o] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when wirting the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
self._file.write(
self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
)
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
|
Adorable, if necessary, it can be used as decoration for Children' s Room, Hallway, Bedroom, Bathroom, Living Room, Outdoor, etc.
1. Press the "Button Switch" at the first time: soft white light is on. Pat the lamp at the first time, automatic color-changing colorful lights is on; pat 2nd, light off; pat 3rd, soft white light is on.
2. Press the "Button Switch" at the second time: soft white light is on, pat the lamp 1st-9th, the light color will change sequentially: Red-Blue-Green-Yellow-Purple-Cyan-White-Light off-Soft white.
3. Press the "Button Switch" at the third time: the light remain off.
|
#!/usr/bin/env python
"""
Copyright 2013 OpERA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
sys.path.insert(0, path)
from gnuradio import gr
from gnuradio import blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
from struct import *
from threading import Thread
import time
import random
import numpy as np
from abc import ABCMeta, abstractmethod
#Project imports:
from OpERAFlow import OpERAFlow
from device import *
from sensing import EnergyDecision
from sensing import EnergySSArch, EnergyCalculator
from packet import PacketGMSKRx, PacketOFDMRx, PacketGMSKTx, PacketOFDMTx, SimpleTx
from utils import Channel, Logger, ChannelModeler
# Try to import easygui.
#try:
# import easygui
# easygui_import = True
#
#except ImportError:
easygui_import = False
# Constants used in the program:
# ranges:
MIN_FREQ = 100e6
MAX_FREQ = 2.2e9
MIN_GAIN = 0
MAX_GAIN = 30
# options
STR_FREQ = "Frequency"
STR_GAIN = "Gain multiplier"
#questions
QUESTION_SET_FREQ = "Enter a frequency value. Should be in range"
QUESTION_SET_GAIN = "Enter the gain multiplier. Should be in range"
# menu (operations)
NEW_FREQ = "Set a new frequency"
GAIN_MULTIPLIER = "Set a new gain multiplier"
QUIT = "Quit"
# integers representing the operations
OPT_SET_FREQ = 1
OPT_SET_GAIN = 2
OPT_QUIT = 3
# others
MIN_OPT = 1
MAX_OPT = 3
YES = 1
NO = 0
ENTER = "enter"
RAW_ENTER = ""
def clear_screen():
"""
Check the os and use an apropriate function to clear the screen
"""
# Clear Windows command prompt.
if (os.name in ('ce', 'nt', 'dos')):
os.system('cls')
# Clear the Linux terminal.
elif ('posix' in os.name):
os.system('clear')
class OpERAUtils(object):
"""
Class with useful methods from OpERA
"""
def __init__(self):
"""
CTOR
"""
pass
@staticmethod
def device_definition():
"""
Definition of the devices used in the program.
"""
tb = OpERAFlow(name='US')
uhd_source = UHDSource()
uhd_source.samp_rate = 195512
energy = EnergySSArch(fft_size=512, mavg_size=5, algorithm=EnergyDecision(th=0))
radio = RadioDevice(name="radio")
radio.add_arch(source=uhd_source, arch=energy, sink=blocks.probe_signal_f(), uhd_device=uhd_source, name='ss')
tb.add_radio(radio, "radio")
return tb, radio
@staticmethod
def add_print_list():
"""
Adds to the print list (method of the Logger class)
"""
print "\n******************************************************************\n"
print "\nPrinting the energy\n"
Logger.add_to_print_list("energy_decision", "energy")
print "\n******************************************************************\n"
@staticmethod
def printing_energy():
"""
Prints the energy until the ENTER key is pressed.
"""
clear_screen()
key = None
time.sleep(0.1)
Logger._enable = True
# Press enter to exit (stop the printing).
while key is not ENTER:
OpERAUtils.add_print_list()
key = raw_input()
# If "enter" key was pressed, exit the loop:
if RAW_ENTER in key:
key = ENTER
Logger._enable = False
Logger.remove_from_print_list("energy_decision", "energy")
class AbstractMenu(object):
"""
Abstract class for the menus.
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
CTOR
"""
pass
def get_value_in_range(self, min_value, max_value, question, option):
"""
Reads a value (from the user) and check if it is in range (ie, min_value >= value <= max_value).
@param min_value Mininum value of the range (float number)
@param max_value Maximum value of the range (float number)
@param question Question asked (string type)
@param option (string type)
@return float_value
@return no_value Indicates if the value returned is valid (no_value = False) or if the user has
cancelled the operation (no_value = True).
"""
# Check if the chosen option is "Set Frequency" or "Set Gain Multiplier", in order
# to use the most appropriate string formatting.
if option is STR_GAIN:
mens = "%s (%.2f, %.2f)." % (question, min_value, max_value)
elif option is STR_FREQ:
mens = "%s (%.2e, %.2e)." % (question, min_value, max_value)
value = ""
float_value = False
value_ok = False
no_value = False
while value_ok is False and no_value is False:
value = self._get_value(mens)
# If it is a valid input.
if value is not None:
try:
float_value = float(value)
# If the value is a float number but it's not in range, shows an error message
if float_value < min_value or float_value > max_value:
if option is STR_GAIN:
range_error = "%s should be in range (%.2f, %.2f)." % (option, min_value, max_value)
elif option is STR_FREQ:
range_error = "%s should be in range (%.2e, %.2e)." % (option, min_value, max_value)
self._show_error_msg(range_error)
# If the value if a float number and it's in range, so the input is valid. Exits the loop.
else:
value_ok = True
# If the input is not a float number, shows an error message.
except ValueError:
type_error = "%s should be a float number." % (option)
self._show_error_msg(type_error)
# If the user has cancelled the operation.
elif value is None:
# Check if the user wants to quit.
choices = ["Yes", "No"]
msg = "Quit the %s setter? " % (option.lower())
reply = self._choose_option(msg, choices)
if reply is "Yes":
no_value = True
# Returns the value (casted to float) and a boolean that indicates if the value returned
# is valid or not(in case of cancelled operation).
return float_value, no_value
@abstractmethod
def _show_error_msg(self, msg):
"""
Shows an error message with the appropriate GUI.
@param msg Error message.
"""
pass
@abstractmethod
def _choose_option(self, msg, choices):
"""
Let the user choose an option and return the integer that represents it.
@param msg Instruction message
@param choices List of choices
"""
pass
@abstractmethod
def _get_value(self, msg):
"""
Returns the read value
@param msg The message to instruct the user.
"""
pass
class Menu(AbstractMenu):
"""
Class that manages the GUIs.
"""
def __init__(self):
"""
CTOR
"""
AbstractMenu.__init__(self)
# If the import was successful, uses Easygui as GUI.
if easygui_import is True:
self._menu = EasyguiMenu()
# If it isn't, uses the Console.
else:
self._menu = ConsoleMenu()
def get_value_in_range(self, min_value, max_value, question, option):
"""
@param min_value Mininum value of the range (float number)
@param max_value Maximum value of the range (float number)
@param question Question asked (string type)
@param option (string type --> or constant??)
"""
return self._menu.get_value_in_range(min_value, max_value, question, option)
def main_menu(self):
"""
Shows the main menu.
"""
self._menu._main_menu()
def _show_error_msg(self, msg):
"""
Shows the message.
@param msg The message to show.
"""
self._menu._show_error_msg(msg)
def _choose_option(self, msg, choices):
"""
Let the user choose an option from a list of them.
@param msg Instruction message
@param choices A list of choices.
"""
return self._menu._choose_option(msg, choices)
def _get_value(self, msg):
"""
Gets an input from the user.
@param msg Instruction message.
"""
self._menu._get_value(msg)
def _show_menu(self, str_list):
"""
Shows a menu with options and let the user choose one of them.
@param str_list A list with the options of the menu (strings).
"""
return self._menu._show_menu(str_list)
class EasyguiMenu(AbstractMenu):
"""
Class for the menu (shown with easygui).
"""
def __init__(self):
"""
CTOR
"""
AbstractMenu.__init__(self)
def _show_error_msg(self, msg):
"""
Easygui implementation of showing a message.
@param msg Message to show.
"""
easygui.msgbox(msg)
def _choose_option(self, msg, choices):
"""
Easygui implementation of letting the user choose an option.
@param msg Instruction message.
@param choices A list of choices.
"""
reply = easygui.buttonbox(msg, choices=choices)
if reply is "Yes":
return YES
elif reply is "No":
return NO
def _get_value(self, msg):
"""
Easygui implementation of letting the user enter a value.
@param msg Instruction message
"""
value = easygui.enterbox(msg)
return value
def _show_menu(self, str_list):
"""
Easygui implementation of showing a menu and allowing the user to choose
one of its options.
@param str_list A list with the menu options.
"""
choices = str_list
msg = "Choose one option: "
reply = easygui.buttonbox(msg, choices=choices)
if reply is NEW_FREQ:
int_reply = 1
elif reply is GAIN_MULTIPLIER:
int_reply = 2
elif reply is QUIT:
int_reply = 3
return int_reply
class ConsoleMenu(AbstractMenu):
"""
Class for the menu (shown in console)
"""
def __init__(self):
"""
CTOR
"""
AbstractMenu.__init__(self)
def _show_error_msg(self, msg):
"""
Console implementation of showing a message
@param msg Message to show.
"""
print msg
def _choose_option(self, msg, choices):
"""
Console implementation of letting the user choose an option.
@param msg Instruction message
@param choices A list of choices.
"""
reply_ok = False
while reply_ok is False:
print msg
for num, opt in enumerate(choices):
print "%i: %s" % (num, opt)
reply = raw_input("\nChoose one option: ")
try:
int_reply = int(reply)
if int_reply is 0:
reply_ok = True
return int_reply
elif int_reply is 1:
reply_ok = True
return int_reply
else:
print "\nReply should be 0 (Yes) or 1 (No)."
except ValueError:
print "\nReply should be an integer."
def _get_value(self, msg):
"""
Console implementation of letting the user enter a value.
@param msg Instruction message.
"""
str_value = raw_input("\n" + msg)
return str_value
def _show_menu(self, str_list):
"""
Console implementation of showing a menu and letting the user choose
one of its options.
@param str_list A list with the menu options.
"""
print "*****************************************************************\n"
for num, opt in enumerate(str_list):
print "%i. %s" % (num, opt)
print "*****************************************************************\n\n"
input_ok = False
while input_ok is False:
choice = raw_input("Choose one option: ")
if choice.isdigit() is True:
int_choice = int(choice)
if int_choice < MIN_OPT or int_choice > MAX_OPT:
print "\n\nChosen operation is invalid.\n"
else:
input_ok = True
else:
print "\n\nEnter a number that corresponds to a valid operation.\n"
return int_choice
def main(tb, radio):
"""
Main function
@param tb The topblock.
@param radio The radio device.
"""
# instance of Menu class
menu = Menu()
tb.start()
radio.set_center_freq(100e6)
continue_loop = True
no_freq = False
while continue_loop is True:
reply = menu._show_menu([NEW_FREQ, GAIN_MULTIPLIER, QUIT])
# Operation is quit.
if reply is OPT_QUIT:
choices = ["Yes", "No"]
msg = "Are you sure?"
reply_2 = menu._choose_option(msg, choices=choices)
# If the answer is YES, quit the program. Else, continues in the loop.
if reply_2 is YES:
tb.stop()
continue_loop = False
print "\n******************************************"
print "\tQuitting the program."
print "******************************************\n"
os._exit(1)
# Operation is "set a new frequency".
elif reply is OPT_SET_FREQ:
# gets the frequency
freq, no_freq = menu.get_value_in_range(MIN_FREQ, MAX_FREQ, QUESTION_SET_FREQ, STR_FREQ)
if no_freq is False:
radio.set_center_freq(freq)
# prints the energy
OpERAUtils.printing_energy()
# Operation is "set the gain multiplier".
elif reply is OPT_SET_GAIN:
# gets the gain
gain, no_gain = menu.get_value_in_range(MIN_GAIN, MAX_GAIN, QUESTION_SET_GAIN, STR_GAIN)
if no_gain is False:
radio.set_gain(gain)
OpERAUtils.printing_energy()
if __name__ == "__main__":
tb, radio = OpERAUtils.device_definition()
try:
main(tb, radio)
except KeyboardInterrupt:
tb.stop()
Logger.dump('./dump/', '', 0)
|
[n] a change in the units or form of an expression: "conversion from Fahrenheit to Centigrade"
[n] a change of religion; "his conversion to the Catholic faith"
CONVERSION is a 10 letter word that starts with C.
\Con*ver"sion\, n. [L. conversio: cf. F. conversion.
Artificial conversion of water into ice. --Bacon.
The conversion of the aliment into fat. --Arbuthnot.
transformation of the outward life.
In prison under judgments imminent. --Milton.
Definition: Dreaming about a conversion indicates your hopes that some problem or situation can be changed. This dream also represents your mobility and adaptability to situations in life.
Definition: the use of a sphere of exchange for a transaction with which it is not generally associated.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 breakwall
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import sys
import threading
import os
if __name__ == '__main__':
import inspect
os.chdir(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))))
import server_pool
import db_transfer
from shadowsocks import shell
from configloader import load_config, get_config
class MainThread(threading.Thread):
def __init__(self, obj):
threading.Thread.__init__(self)
self.obj = obj
def run(self):
self.obj.thread_db(self.obj)
def stop(self):
self.obj.thread_db_stop()
def main():
shell.check_python()
if False:
db_transfer.DbTransfer.thread_db()
else:
if get_config().API_INTERFACE == 'mudbjson':
thread = MainThread(db_transfer.MuJsonTransfer)
elif get_config().API_INTERFACE == 'sspanelv2':
thread = MainThread(db_transfer.DbTransfer)
else:
thread = MainThread(db_transfer.Dbv3Transfer)
thread.start()
try:
while thread.is_alive():
time.sleep(10)
except (KeyboardInterrupt, IOError, OSError) as e:
import traceback
traceback.print_exc()
thread.stop()
if __name__ == '__main__':
main()
|
In the world of personality types there are many models that use many different names to categorise people, although in truth most of these models seem to be based on just a few theories, namely those of Jung and/or Myers-Briggs. AusIDentities is no different and incorporates the theories of both of these systems, we simply chose to use Australian Animals to represent the personality types that we identify and work with instead of letters, colours or more abstract names.
So when people question the use of these particular animals, as they sometimes do, we tell them it is because the characteristics of these four in particular most accurately reflect the kinds of qualities that we would like to highlight as being part of the innate personality types they represent. Of the four animals though, it is the Wombat that most often comes under scrutiny. “Why the Wombat?” people ask, “aren’t they just slow and cumbersome?” or some similar comparison.
To understand why we chose the Wombat then, it might serve people to understand a little more about this often-maligned little Australian native marsupial. Firstly, they are not slow. Although they usually do take their time they can easily outrun a person, and are capable of speeds up to 40km/hr over a short distance. This quality relates to personality by virtue of the fact that, generally speaking, anyone in a rush to complete tasks is more likely to make mistakes, something the Wombat personality type dislike immensely. For the Wombats, slow and steady usually wins the race.
Wombats pride themselves on their accuracy and attention to detail, therefore rushing, working under extreme pressure or leaving things until the last minute will usually be counter-productive to the type that wants to do things right. If you are looking at ways to improve your relationship with the Wombat(s) in your life, then make sure you do things right the first time, have a plan, leave time for contingencies, and definitely do not take short cuts! This also applies to students of this type, who simply want to be shown the correct way to complete tasks, and also dislike chaos of any kind.
The Wombat in nature is an intelligent, hard-working little creature, that likes to return to their burrow after foraging for food. They are also shy creatures, avoiding contact with people and generally keeping to themselves. These qualities are typical of the Wombat personality type, who dislike too much fuss, especially made in public where a simple token of your gratitude or esteem would suffice instead. The home environment is also hugely important for them, and the phrase ‘A man’s (and a women’s) home is their castle’ may well have been written with this type in mind.
As for the ‘hard-working’ quality, which they undoubtabley share with their animal name-sake, this is an attribute that, while very admirable, can sometimes get them into strife. Part of the personal growth for the Wombat is learning when to ask for help and understand when the deadline is unrealistic; when in order to do a proper job, more time needs to be taken or more personnel involved. Instead, these diligent people will often drive themselves to the point of burn-out or breakdown in order to meet the deadline. It seems that just when they need it most, common sense is abandoned in favour of an uncompromising approach to tasks. They will get it done on time if it kills them! The same approach by students of this type will often see them staying up late just to get their homework or assignments done.
Absolutely the rest of the world can learn to work with the Wombats, putting work, duty and responsibility first, and by not changing your mind or altering the plan at the last minute. We help them by being consistent, and putting accuracy and consistency above short cuts and often ill thought out new approaches to tasks. However, it is not just up to the rest of the world to accommodate the Wombat Personality type, and to have true collaboration we must also ask the Wombats to accommodate us.
Self-improvement for all Wombats often means recognising the difference between needing to have something done and wanting to have to have something done, and by understanding their way is not always the right way, even if it is the preferred way. Innovation means sometimes discarding what was in favour of what could be. That is not to say Wombats need to abandon consistency and routine, especially when circumstances require those qualities. It simply means that by taking a more flexible approach to work, life and learning, the Wombats can eventually come to embrace the subtle art of being at peace within chaos. After all, the only constant in life is actually change!
|
from twisted.internet.task import LoopingCall
from twisted.plugin import IPlugin
from heufybot.moduleinterface import IBotModule
from heufybot.modules.commandinterface import BotCommand
from heufybot.utils import isNumber, networkName
from heufybot.utils.timeutils import now, strftimeWithTimezone, timeDeltaString
from zope.interface import implements
from datetime import datetime, timedelta
import re
class EventCommand(BotCommand):
implements(IPlugin, IBotModule)
name = "Event"
def triggers(self):
return ["event", "events", "timetill", "timesince", "dateof", "revent", "subevent", "unsubevent"]
def load(self):
self.help = "Commands: event <yyyy-MM-dd> (<HH:mm>) <event>, events (<days>), timetill <event>, timesince " \
"<event>, dateof <event>, revent <event>, subevent, unsubevent | Add, request or remove an event " \
"or subscribe to them."
self.commandHelp = {
"event": "event <yyyy-MM-dd> (<HH:mm>) <event> | Add an event to the events database.",
"events": "events <days> | Request all events that occur within the given number of days. The default is "
"a week. The maximum is a year.",
"timetill": "timetill <event> | Request the amount of time until a specified event occurs.",
"timesince": "timesince <event> | Request the amount of time since a specified event occurred.",
"dateof": "dateof <event> | Request the date of a specified event.",
"revent": "revent <event> | Remove a specified event that was added by you from the events database.",
"subevent": "subevent | Subscribe to event announcements. PM to subscribe to them in PM. Requires admin "
"permission to subscribe channels.",
"unbsubevent": "unsubevent | Unsubscribe to event announcements. PM to unsubscribe from them in PM. "
"Requires admin permission to unsubscribe channels."
}
if "events" not in self.bot.storage:
self.bot.storage["events"] = {}
self.events = self.bot.storage["events"]
if "event-subs" not in self.bot.storage:
self.bot.storage["event-subs"] = {}
self.subscriptions = self.bot.storage["event-subs"]
self.announcementLoopCall = LoopingCall(self.checkEvents)
self.announcementLoopCall.start(300, now=True) # Announce events every 5 minutes
def checkPermissions(self, server, source, user, command):
if command in ["subevent", "unsubevent"] and source[0] in self.bot.servers[server].supportHelper.chanTypes:
channel = self.bot.servers[server].channels[source]
if channel.userIsChanOp(user):
return True
return not self.bot.moduleHandler.runActionUntilFalse("checkadminpermission", server, source, user,
"event-subscribe")
return True
def execute(self, server, source, command, params, data):
if networkName(self.bot, server) not in self.events:
self.events[networkName(self.bot, server)] = []
if command == "event":
if len(params) == 0:
self.replyPRIVMSG(server, source, "Add what event?")
return
try:
date = datetime.strptime(" ".join(params[0:2]), "%Y-%m-%d %H:%M")
eventOffset = 2
if len(params) < 3:
self.replyPRIVMSG(server, source, "Add what event?")
return
except ValueError:
try:
date = datetime.strptime(params[0], "%Y-%m-%d")
eventOffset = 1
if len(params) < 2:
self.replyPRIVMSG(server, source, "Add what event?")
return
except ValueError:
e = "The date format you specified is invalid. The format is yyyy-MM-dd or yyyy-MM-dd HH:mm."
self.replyPRIVMSG(server, source, e)
return
event = {
"event": " ".join(params[eventOffset:]),
"date": date,
"user": data["user"].nick,
"fired": True if date < now() else False
}
self.events[networkName(self.bot, server)].append(event)
self.bot.storage["events"] = self.events
m = "Event {!r} on date {} was added to the events database!".format(event["event"],
strftimeWithTimezone(date))
self.replyPRIVMSG(server, source, m)
elif command == "timetill":
if len(params) == 0:
self.replyPRIVMSG(server, source, "You didn't specify an event")
return
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] > now()]
events.sort(key=lambda item: item["date"])
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
m = "{}'s event {!r} will occur in {}.".format(event["user"], event["event"], timeDeltaString(
event["date"], now()))
self.replyPRIVMSG(server, source, m)
break
else:
m = "No events matching {!r} were found in the events database.".format(" ".join(params))
self.replyPRIVMSG(server, source, m)
elif command == "timesince":
if len(params) == 0:
self.replyPRIVMSG(server, source, "You didn't specify an event")
return
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] < now()]
events.sort(key=lambda item: item["date"], reverse=True)
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
m = "{}'s event {!r} occurred {} ago.".format(event["user"], event["event"], timeDeltaString(
now(), event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
m = "No events matching {!r} were found in the events database.".format(" ".join(params))
self.replyPRIVMSG(server, source, m)
elif command == "dateof":
if len(params) == 0:
self.replyPRIVMSG(server, source, "You didn't specify an event")
return
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] > now()]
events.sort(key=lambda item: item["date"])
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
m = "{}'s event {!r} will occur on {}.".format(event["user"], event["event"],
strftimeWithTimezone(event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] < now()]
events.sort(key=lambda item: item["date"], reverse=True)
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
m = "{}'s event {!r} occurred on {}.".format(event["user"], event["event"],
strftimeWithTimezone(event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
m = "No events matching {!r} were found in the events database.".format(" ".join(params))
self.replyPRIVMSG(server, source, m)
elif command == "events":
if len(params) == 0 or not isNumber(params[0]):
days = 7
else:
days = int(params[0]) if int(params[0]) < 365 else 365
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] > now() and x[
"date"] <= now() + timedelta(days)]
dayString = "" if days == 1 else "s"
if len(events) > 0:
events.sort(key=lambda item: item["date"])
eventNames = [x["event"] for x in events]
m = "Events occurring in the next {} day{}: {}.".format(days, dayString, ", ".join(eventNames))
else:
m = "No events are occurring in the next {} day{}.".format(days, dayString)
self.replyPRIVMSG(server, source, m)
elif command == "revent":
if len(params) == 0:
self.replyPRIVMSG(server, source, "You didn't specify an event")
return
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] > now()]
events.sort(key=lambda item: item["date"])
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
self.events[networkName(self.bot, server)].remove(event)
self.bot.storage["events"] = self.events
m = "{}'s event {!r} with date {} has been removed from the events database.".format(
event["user"], event["event"], strftimeWithTimezone(event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] < now() and x[
"user"].lower() == data["user"].nick.lower()]
events.sort(key=lambda item: item["date"], reverse=True)
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
self.events[networkName(self.bot, server)].remove(event)
self.bot.storage["events"] = self.events
m = "{}'s event {!r} with date {} has been removed from the events database.".format(
event["user"], event["event"], strftimeWithTimezone(event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
m = "No events matching {!r} by you were found in the events database.".format(" ".join(params))
self.replyPRIVMSG(server, source, m)
elif command == "subevent" or command == "unsubevent":
if networkName(self.bot, server) not in self.subscriptions:
self.subscriptions[networkName(self.bot, server)] = []
src = source if "channel" in data else data["user"].nick
subAction = command == "subevent"
self._handleSubscription(server, src, subAction)
def checkEvents(self):
for network in self.subscriptions:
if network not in self.events:
continue
try:
server = [x for x in self.bot.servers.itervalues() if x.supportHelper.network == network][0].name
except IndexError: # We're not currently connected to this network
continue
sources = [x for x in self.subscriptions[network] if x in self.bot.servers[server].channels or x in
self.bot.servers[server].users]
if len(sources) == 0:
continue # Only fire events if there's a channel or user to fire them at
events = []
for i in range(0, len(self.events[network])):
event = self.events[network][i]
if event["date"] < now() and event["fired"] == False:
events.append(event)
self.events[network][i]["fired"] = True
if len(events) == 0:
continue
self.bot.storage["events"] = self.events
for source in sources:
for event in events:
m = "{}'s event {!r} is happening right now!".format(event["user"], event["event"])
self.replyPRIVMSG(server, source, m)
def _handleSubscription(self, server, source, subAction):
if subAction:
if source not in self.subscriptions[networkName(self.bot, server)]:
self.subscriptions[networkName(self.bot, server)].append(source)
self.bot.storage["event-subs"] = self.subscriptions
m = "{} is now subscribed to event announcements.".format(source)
else:
m = "{} is already subscribed to event announcements!".format(source)
else:
if source in self.subscriptions[networkName(self.bot, server)]:
self.subscriptions[networkName(self.bot, server)].remove(source)
self.bot.storage["event-subs"] = self.subscriptions
m = "{} is now unsubscribed from event announcements.".format(source)
else:
m = "{} is not subscribed to event announcements!".format(source)
self.replyPRIVMSG(server, source, m)
eventCommand = EventCommand()
|
The future cyber-age networked infrastructures starting from objects, furniture, appliances, homes, buildings and extending to campuses, cars, roads and cities are extremely likely to be reliant on sensors embedded in the infrastructure including intelligent mobile devices. This will give rise to a new dimension of connectivity and services and is also termed as the Internet of Things (IoT). Such technologies will act as a catalyst to the evolution of a new generation of services that will have a great impact on the social and technological eco-system. It can be envisaged that these next generation systems and services will encompass several domains such as e-Governance, Health Care, Transportation, Waste Management, Food Supply Chains, Energy & Utilities, Insurance etc. New technologies and applications built on top of them must be developed in order to fulfil this vision of Intelligent Infrastructure.
The First International Workshop on Building "Intelligence" for Intelligent Infrastructure using Internet-of-Things (BI4T) will be co-hosted with ICDCN 2013. It intends to provide an inter-disciplinary platform for researchers to share their ideas and results and discuss the issues pertinent with the signal and data processing required on the sensor data collected to make the infrastructure "intelligent".
Papers should be a maximum of 6 pages, including title, abstract, figures and references, in ACM conference proceedings format (http://www.acm.org/sigs/publications/proceedings-templates#aL1), not published elsewhere and not currently under review by another conference or journal. Submissions deviating from these guidelines will not be reviewed. Papers should be submitted via the EasyChair submission management System (https://www.easychair.org/conferences/?conf=bi4t2013). Workshop proceedings will be published in ACM Digital Library and some selected papers will be considered for publication in CSI Journal of Computing.
|
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <jkotan@mail.desy.de>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file NXSDataWriterTest.py
# unittests for NXSDataWriter
#
import unittest
import os
import sys
import json
import numpy
import PyTango
try:
from ProxyHelper import ProxyHelper
except Exception:
from .ProxyHelper import ProxyHelper
import struct
from nxstools import h5cppwriter as H5CppWriter
try:
import ServerSetUp
except Exception:
from . import ServerSetUp
# if 64-bit machione
IS64BIT = (struct.calcsize("P") == 8)
# test fixture
class NXSDataWriterH5CppTest(unittest.TestCase):
# server counter
serverCounter = 0
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
NXSDataWriterH5CppTest.serverCounter += 1
sins = self.__class__.__name__ + \
"%s" % NXSDataWriterH5CppTest.serverCounter
self._sv = ServerSetUp.ServerSetUp("testp09/testtdw/" + sins, sins)
self.__status = {
PyTango.DevState.OFF: "Not Initialized",
PyTango.DevState.ON: "Ready",
PyTango.DevState.OPEN: "File Open",
PyTango.DevState.EXTRACT: "Entry Open",
PyTango.DevState.RUNNING: "Writing ...",
PyTango.DevState.FAULT: "Error",
}
self._scanXmlb = """
<definition>
<group type="NXentry" name="entry%s">
<group type="NXinstrument" name="instrument">
<attribute name ="short_name"> scan instrument </attribute>
<group type="NXdetector" name="detector">
<field units="m" type="NX_FLOAT" name="counter1">
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="exp_c01"/>
</datasource>
</field>
<field units="" type="NX_FLOAT" name="mca">
<dimensions rank="1">
<dim value="2048" index="1"/>
</dimensions>
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="p09/mca/exp.02"/>
</datasource>
</field>
</group>
</group>
<group type="NXdata" name="data">
<link target="/NXentry/NXinstrument/NXdetector/mca" name="data">
<doc>
Link to mca in /NXentry/NXinstrument/NXdetector
</doc>
</link>
<link target="%s://entry%s/instrument/detector/counter1" name="cnt1">
<doc>
Link to counter1 in /NXentry/NXinstrument/NXdetector
</doc>
</link>
</group>
</group>
</definition>
"""
self._scanXml = """
<definition>
<group type="NXentry" name="entry1">
<group type="NXinstrument" name="instrument">
<attribute name ="short_name"> scan instrument </attribute>
<group type="NXdetector" name="detector">
<field units="m" type="NX_FLOAT" name="counter1">
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="exp_c01"/>
</datasource>
</field>
<field units="" type="NX_FLOAT" name="mca">
<dimensions rank="1">
<dim value="2048" index="1"/>
</dimensions>
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="p09/mca/exp.02"/>
</datasource>
</field>
</group>
</group>
<group type="NXdata" name="data">
<link target="%s://entry1/instrument/detector/mca" name="data">
<doc>
Link to mca in /NXentry/NXinstrument/NXdetector
</doc>
</link>
<link target="/NXentry/NXinstrument/NXdetector/counter1" name="counter1">
<doc>
Link to counter1 in /NXentry/NXinstrument/NXdetector
</doc>
</link>
</group>
</group>
</definition>
"""
self._scanXml1 = """
<definition>
<group type="NXentry" name="entry1">
<group type="NXinstrument" name="instrument">
<attribute name ="short_name"> scan instrument </attribute>
<group type="NXdetector" name="detector">
<field units="m" type="NX_FLOAT" name="counter1">
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="exp_c01"/>
</datasource>
</field>
<field units="" type="NX_FLOAT" name="mca">
<dimensions rank="1">
<dim value="2048" index="1"/>
</dimensions>
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="p09/mca/exp.02"/>
</datasource>
</field>
</group>
</group>
<group type="NXdata" name="data">
<link target="/NXentry/NXinstrument/NXdetector/mca" name="data">
<doc>
Link to mca in /NXentry/NXinstrument/NXdetector
</doc>
</link>
<link target="/entry1/instrument/detector/counter1" name="cnt1">
<doc>
Link to counter1 in /NXentry/NXinstrument/NXdetector
</doc>
</link>
</group>
</group>
</definition>
"""
self._scanXml3 = """
<definition>
<group type="NXentry" name="entry1">
<group type="NXinstrument" name="instrument">
<attribute name ="short_name"> scan instrument </attribute>
<group type="NXdetector" name="detector">
<field units="m" type="NX_FLOAT" name="counter1">
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="exp_c01"/>
</datasource>
</field>
<field units="" type="NX_INT64" name="image">
<dimensions rank="2">
<dim value="100" index="1"/>
<dim value="200" index="2"/>
</dimensions>
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="image"/>
</datasource>
</field>
</group>
</group>
<group type="NXdata" name="data">
<link target="/NXentry/NXinstrument/NXdetector/image" name="data">
<doc>
Link to mca in /NXentry/NXinstrument/NXdetector
</doc>
</link>
<link target="%s://entry1/instrument/detector/counter1" name="cnt1">
<doc>
Link to counter1 in /NXentry/NXinstrument/NXdetector
</doc>
</link>
</group>
</group>
</definition>
"""
self._counter = [0.1, 0.2]
self._mca1 = [e * 0.1 for e in range(2048)]
self._mca2 = [(float(e) / (100. + e)) for e in range(2048)]
self._image1 = [[(i + j) for i in range(100)] for j in range(200)]
self._image2 = [[(i - j) for i in range(100)] for j in range(200)]
self._image1a = [[(i + j) for i in range(200)] for j in range(100)]
self._image2a = [[(i - j) for i in range(200)] for j in range(100)]
self._bint = "int64" if IS64BIT else "int32"
self._buint = "uint64" if IS64BIT else "uint32"
self._bfloat = "float64" if IS64BIT else "float32"
# test starter
# \brief Common set up of Tango Server
def setUp(self):
self._sv.setUp()
# test closer
# \brief Common tear down oif Tango Server
def tearDown(self):
self._sv.tearDown()
def setProp(self, rc, name, value):
db = PyTango.Database()
name = "" + name[0].upper() + name[1:]
db.put_device_property(
self._sv.new_device_info_writer.name,
{name: value})
rc.Init()
# Exception tester
# \param exception expected exception
# \param method called method
# \param args list with method arguments
# \param kwargs dictionary with method arguments
def myAssertRaise(self, exception, method, *args, **kwargs):
try:
error = False
method(*args, **kwargs)
except Exception:
error = True
self.assertEqual(error, True)
# openFile test
# \brief It tests validation of opening and closing H5 files.
def test_openFile(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
try:
fname = '%s/test.h5' % os.getcwd()
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.XMLSettings, "")
self.assertEqual(dp.JSONRecord, "{}")
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
# self.assertEqual(f.name, fname)
f = f.root()
# self.assertEqual(f.path, fname)
# print("\nFile attributes:")
cnt = 0
for at in f.attributes:
cnt += 1
# print(at.name),"=",at[...]
self.assertEqual(cnt, len(f.attributes))
self.assertEqual(5, len(f.attributes))
# print ""
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 1)
cnt = 0
for ch in f:
cnt += 1
self.assertEqual(cnt, f.size)
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# openFile test
# \brief It tests validation of opening and closing H5 files.
def test_openFileDir(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
directory = '#nexdatas_test_directory#'
dirCreated = False
dirExists = False
if not os.path.exists(directory):
try:
os.makedirs(directory)
dirCreated = True
dirExists = True
except Exception:
pass
else:
dirExists = True
if dirExists:
fname = '%s/%s/%s%s.h5' % (
os.getcwd(), directory, self.__class__.__name__, fun)
else:
fname = '%s/%s%s.h5' % (os.getcwd(), self.__class__.__name__, fun)
if dirCreated:
fname = '%s/%s/%s%s.h5' % (
os.getcwd(), directory, self.__class__.__name__, fun)
else:
fname = '%s/%s%s.h5' % (os.getcwd(), self.__class__.__name__, fun)
try:
fname = '%s/test.h5' % os.getcwd()
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.XMLSettings, "")
self.assertEqual(dp.JSONRecord, "{}")
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.name, fname)
# self.assertEqual(f.path, fname)
# print("\nFile attributes:")
cnt = 0
for at in f.attributes:
cnt += 1
# print(at.name),"=",at[...]
self.assertEqual(cnt, len(f.attributes))
self.assertEqual(5, len(f.attributes))
# print ""
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 1)
cnt = 0
for ch in f:
cnt += 1
self.assertEqual(cnt, f.size)
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
if dirCreated:
os.removedirs(directory)
# openEntry test
# \brief It tests validation of opening and closing entry in H5 files.
def test_openEntry(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/test2.h5' % os.getcwd()
xml = '<definition> <group type="NXentry" name="entry"/></definition>'
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = xml
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
dp.CloseEntry()
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
dp.CloseFile()
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.state(), PyTango.DevState.ON)
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.path, fname)
cnt = 0
for at in f.attributes:
cnt += 1
self.assertEqual(cnt, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
cnt = 0
for ch in f:
self.assertTrue(ch.is_valid)
cnt += 1
if ch.name == "entry":
self.assertEqual(ch.name, "entry")
self.assertEqual(len(ch.attributes), 1)
for at in ch.attributes:
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
# self.assertEqual(at.dtype,"string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
else:
self.assertEqual(ch.name, "nexus_logs")
ch2 = ch.open("configuration")
for c in ch2:
if c.name == "nexus__entry__1_xml":
self.assertEqual(
c.read(),
'<definition> '
'<group type="NXentry" name="entry"/>'
'</definition>')
print(c.read())
else:
self.assertEqual(c.name, "python_version")
self.assertEqual(c.read(), sys.version)
self.assertEqual(len(ch.attributes), 1)
for at in ch.attributes:
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
# self.assertEqual(at.dtype,"string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXcollection")
self.assertEqual(cnt, f.size)
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# openEntryWithSAXParseException test
# \brief It tests validation of opening and closing entry
# with SAXParseException
def test_openEntryWithSAXParseException(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/test2.h5' % os.getcwd()
wrongXml = """Ala ma kota."""
xml = """<definition/>"""
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
try:
error = None
dp.XMLSettings = wrongXml
except PyTango.DevFailed:
error = True
except Exception:
error = False
self.assertEqual(error, True)
self.assertTrue(error is not None)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
# dp.CloseFile()
# dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = xml
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.path, fname)
cnt = 0
for at in f.attributes:
cnt += 1
self.assertEqual(cnt, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 1)
cnt = 0
for ch in f:
cnt += 1
self.assertEqual(cnt, f.size)
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_twoentries(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXmlb % ("001", fname, "001")
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXmlb % ("002", fname, "002")
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
from nxstools import filewriter as FileWriter
FileWriter.writer = H5CppWriter
f = FileWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 3)
ent = ["001", "002"]
for et in ent:
en = f.open("entry%s" % et)
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry%s" % et)
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
# ???
# self.assertEqual(cnt.name,"cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.path, fname)
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
# ???????
# ! PNI self.assertEqual(mca.name, "mca")
# ????
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_skipacq(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.myAssertRaise(Exception, dp.openEntry)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
dp.OpenFile()
dp.skipacquisition = True
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.skipacquisition = True
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.skipacquisition = True
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.path, fname)
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
# ???????
# ! PNI self.assertEqual(mca.name, "mca")
# ????
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_canfail(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
dp.Canfail = True
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{}')
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{}')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
value[i], numpy.finfo(getattr(numpy, 'float64')).max)
self.assertEqual(len(cnt.attributes), 6)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["nexdatas_canfail"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail")
self.assertEqual(at[...], "FAILED")
at = cnt.attributes["nexdatas_canfail_error"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail_error")
self.assertEqual(
at[...],
"('Data for /entry1:NXentry/instrument:NXinstrument"
"/detector:NXdetector/counter1 not found. DATASOURCE: CLIENT"
" record exp_c01', 'Data without value')\n('Data for "
"/entry1:NXentry/instrument:NXinstrument/detector:NXdetector"
"/counter1 not found. DATASOURCE: CLIENT record exp_c01',"
" 'Data without value')")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(numpy.finfo(getattr(numpy, 'float64')).max,
value[0][i])
for i in range(len(value[0])):
self.assertEqual(numpy.finfo(getattr(numpy, 'float64')).max,
value[1][i])
self.assertEqual(len(mca.attributes), 6)
at = mca.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["nexdatas_canfail"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail")
self.assertEqual(at[...], "FAILED")
at = mca.attributes["nexdatas_canfail_error"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail_error")
self.assertEqual(
at[...],
"('Data for /entry1:NXentry/instrument:NXinstrument/"
"detector:NXdetector/mca not found. DATASOURCE: CLIENT "
"record p09/mca/exp.02', 'Data without value')\n('Data for "
"/entry1:NXentry/instrument:NXinstrument/detector:NXdetector"
"/mca not found. DATASOURCE: CLIENT record p09/mca/exp.02', "
"'Data without value')")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
# cnt = dt.open("cnt1")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
# ???
# self.assertEqual(cnt.name,"cnt1")
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(numpy.finfo(getattr(numpy, 'float64')).max,
value[i])
self.assertEqual(len(cnt.attributes), 6)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["nexdatas_canfail"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail")
self.assertEqual(at[...], "FAILED")
at = cnt.attributes["nexdatas_canfail_error"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail_error")
self.assertEqual(
at[...],
"('Data for /entry1:NXentry/instrument:NXinstrument/"
"detector:NXdetector/counter1 not found. DATASOURCE: CLIENT "
"record exp_c01', 'Data without value')\n('Data for "
"/entry1:NXentry/instrument:NXinstrument/detector:NXdetector"
"/counter1 not found. DATASOURCE: CLIENT record exp_c01', "
"'Data without value')")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
# self.assertEqual(mca.name,"mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(
numpy.finfo(getattr(numpy, 'float64')).max,
value[0][i])
for i in range(len(value[0])):
self.assertEqual(
numpy.finfo(getattr(numpy, 'float64')).max,
value[1][i])
self.assertEqual(len(mca.attributes), 6)
at = mca.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_canfail"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail")
self.assertEqual(at[...], "FAILED")
at = mca.attributes["nexdatas_canfail_error"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail_error")
self.assertEqual(
at[...],
"('Data for /entry1:NXentry/instrument:NXinstrument/detector"
":NXdetector/mca not found. DATASOURCE: CLIENT record "
"p09/mca/exp.02', 'Data without value')\n('Data for "
"/entry1:NXentry/instrument:NXinstrument/detector:NXdetector"
"/mca not found. DATASOURCE: CLIENT record p09/mca/exp.02', "
"'Data without value')")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_canfail_false(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Canfail = False
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, False)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.myAssertRaise(Exception, dp.Record, '{}')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, False)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.myAssertRaise(Exception, dp.Record, '{}')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.canfail, False)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
# self.assertEqual(cnt.shape, (1,))
self.assertEqual(cnt.dtype, "float64")
# self.assertEqual(cnt.size, 1)
cnt.read()
# value = cnt[:]
# for i in range(len(value)):
# self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
# self.assertEqual(mca.shape, (2,2048))
self.assertEqual(mca.dtype, "float64")
# self.assertEqual(mca.size, 4096)
mca.read()
# for i in range(len(value[0])):
# self.assertEqual(self._mca1[i], value[0][i])
# for i in range(len(value[0])):
# self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
# cnt = dt.open("cnt1")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
# ???
# self.assertEqual(cnt.name,"cnt1")
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
# self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
# self.assertEqual(cnt.size, 2)
# print(cnt.read())
cnt[:]
# for i in range(len(value)):
# self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
# self.assertEqual(mca.name,"mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
# self.assertEqual(mca.shape, (2,2048))
self.assertEqual(mca.dtype, "float64")
# self.assertEqual(mca.size, 4096)
mca.read()
# for i in range(len(value[0])):
# self.assertEqual(self._mca1[i], value[0][i])
# for i in range(len(value[0])):
# self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecordGrow2(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[0], self._counter[1]]
mcag = [self._mca1, self._mca2]
rec = {"data": {"exp_c01": cntg, "p09/mca/exp.02": mcag}}
dp.Record(json.dumps(rec))
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[1], self._counter[0]]
mcag = [self._mca2, self._mca1]
rec = {"data": {"exp_c01": cntg, "p09/mca/exp.02": mcag}}
dp.Record(json.dumps(rec))
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") # bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (4, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 8192)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[3][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[2][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
# ???
# self.assertEqual(cnt.name,"cnt1")
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (4, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 8192)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[3][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[2][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_split(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
tfname = '%s/%s%s.h5' % (os.getcwd(), self.__class__.__name__, fun)
fname = None
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = tfname
dp.stepsPerFile = 2
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml1
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 1)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 1)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 2)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 2)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 3)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 3)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 4)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 1)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
fname = '%s/%s%s_00001.h5' % (
os.getcwd(), self.__class__.__name__, fun)
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
# ???
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
# check the created file
fname = '%s/%s%s_00002.h5' % (
os.getcwd(), self.__class__.__name__, fun)
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[1], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
# ???
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[1], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
# check the created file
fname = '%s/%s%s_00003.h5' % (
os.getcwd(), self.__class__.__name__, fun)
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[1 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
# ???
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[1 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
for i in range(1, 4):
fname = '%s/%s%s_%05d.h5' % (
os.getcwd(), self.__class__.__name__, fun, i)
if os.path.isfile(fname):
os.remove(fname)
if os.path.isfile(tfname):
os.remove(tfname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecordGrow3(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = "scantestgrow.h5"
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
# self.setProp(dp, "DefaultCanFail", False)
dp.FileName = fname
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml3 % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[0], self._counter[1]]
imageg = [self._image1a, self._image2a]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[1], self._counter[0]]
imageg = [self._image2a, self._image1a]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("image")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "image")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 100, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 80000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[3][i][j])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 100, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 80000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[3][i][j])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecordGrow3_false(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = "scantestgrow.h5"
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "DefaultCanFail", False)
dp.FileName = fname
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml3 % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[0], self._counter[1]]
imageg = [self._image1a, self._image2a]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[1], self._counter[0]]
imageg = [self._image2a, self._image1a]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("image")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "image")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 100, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 80000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[3][i][j])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 100, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 80000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[3][i][j])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecordGrow4(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = "scantestgrow.h5"
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
dp.FileName = fname
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml3 % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[0], self._counter[1]]
imageg = [self._image1, self._image2]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[1], self._counter[0]]
imageg = [self._image2, self._image1]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("image")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "image")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 200, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 160000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(0, value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(9223372036854775807, value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(0, value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(9223372036854775807, value[3][i][j])
self.assertEqual(len(mca.attributes), 6)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 200, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 160000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(0, value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(9223372036854775807, value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(0, value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(9223372036854775807, value[3][i][j])
self.assertEqual(len(mca.attributes), 6)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
Compensation for Mississippi car accident victims | Wood, Carlton & Hudson, P.C.
On behalf of Wood, Carlton & Hudson, P.C. posted in Car Accidents on Friday, April 17, 2015.
Spring in Mississippi brings warmer weather and longer days. Mississippi residents, eager to soak up the spring sunshine, tend to hit the road and explore their neighborhoods a little bit more. Sometimes, this exploration includes a cocktail or two that is consumed with friends. When residents fail to utilize a designated driver, car accidents can occur.
Drunk drivers can cause extensive injury to car accident victims. From a broken bone to brain damage, the injuries can cause both short and long term injuries that affect an accident victim's ability to earn a living or care for their families. Even if the accident victim has health insurance, the injuries may surpass the limits of the policy, leaving the injured to pay the remainder of the bill out of pocket. In addition, the individual's quality of life may be dramatically altered from the injuries.
In such situations, seeking compensation from a negligent party can help alleviate the sometimes-extreme costs of accidents. If damages are recovered, expenses such as medical bills can be more easily dealt with, hopefully helping pave the way to a smoother recovery for all involved.
For victims of car accidents in both Mississippi and Tennessee, there are limits on when a claim can be filed. A delay could mean a lost opportunity to receive compensation from a drunk driver or other negligent driver. For more information on possibly receiving compensation after a car accident, please visit our car accident page.
FLAG: After edits, this blog is too short.
|
2#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Administrator
#
# Created: 08/10/2011
# Copyright: (c) Administrator 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
class Gol02:
def __init__(self):
self.numberOfNeighbors = 0
self.isAlive = False
self.board = []
pass
def evolve(self):
return
def setAlive(self):
self.isAlive = True
def isLiving(self):
result = ( self.numberOfNeighbors == 3 ) or \
(self.isAlive and self.numberOfNeighbors == 2)
return result
def addNeigbors(self, numberOfNeighbors):
self.numberOfNeighbors = numberOfNeighbors
return
def appendNeigbors(neighbor):
self.board.append(neighbor)
self.numberOfNeighbors +=1
if __name__ == '__main__':
pass
|
STEEL CHISEL light, regular, bold and outline, is an 4 font system that can be layered in different ways to create a infinite title effects used commonly in poster and 3D logo design. Steel Chisel’s layer combinations give you complete control in producing styles like, outline, 3D, beveled. It can be used alone and/or in layered and allows you adjust leading and kerning. Each font contains the similar metrics, so when your title is set, copy and paste-in-place to create layers of different weights/styles to build out your desired effect. Steel Chisel works great in any graphics application that allows you to utilize layers or 3D effects.
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Repository'
db.create_table('stats_repository', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)),
('repourl', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('stats', ['Repository'])
def backwards(self, orm):
# Deleting model 'Repository'
db.delete_table('stats_repository')
models = {
'stats.repository': {
'Meta': {'ordering': "['name']", 'object_name': 'Repository'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'repourl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['stats']
|
including increased card activation and revenue per card.
offerings and increase 1:1 marketing content.
and cross-promotional content to your mailers.
|
"""Private module full of compatibility hacks.
Primarily this is for downstream redistributions of requests that unvendor
urllib3 without providing a shim.
.. warning::
This module is private. If you use it, and something breaks, you were
warned
"""
from collections import Mapping, MutableMapping
import sys
import requests
try:
from requests.packages.urllib3 import fields
from requests.packages.urllib3 import filepost
from requests.packages.urllib3 import poolmanager
except ImportError:
from urllib3 import fields
from urllib3 import filepost
from urllib3 import poolmanager
try:
from requests.packages.urllib3.connection import HTTPConnection
from requests.packages.urllib3 import connection
except ImportError:
try:
from urllib3.connection import HTTPConnection
from urllib3 import connection
except ImportError:
HTTPConnection = None
connection = None
if requests.__build__ < 0x020300:
timeout = None
else:
try:
from requests.packages.urllib3.util import timeout
except ImportError:
from urllib3.util import timeout
if requests.__build__ < 0x021000:
gaecontrib = None
else:
try:
from requests.packages.urllib3.contrib import appengine as gaecontrib
except ImportError:
from urllib3.contrib import appengine as gaecontrib
PY3 = sys.version_info > (3, 0)
if PY3:
import queue
from urllib.parse import urlencode, urljoin
else:
import Queue as queue
from urllib import urlencode
from urlparse import urljoin
try:
basestring = basestring
except NameError:
basestring = (str, bytes)
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = {}
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = (key, val)
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ', '.join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
dict((k.lower(), v) for k, v in other.itermerged()))
def __ne__(self, other):
return not self.__eq__(other)
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
"""D.pop(k[,d]) -> v, remove specified key and return its value.
If key is not found, d is returned if given, otherwise KeyError is
raised.
"""
# Using the MutableMapping function directly fails due to the private
# marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# new_vals was not inserted, as there was a previous one
if isinstance(vals, list):
# If already several items got inserted, we have a list
vals.append(val)
else:
# vals should be a tuple then, i.e. only one item so far
# Need to convert the tuple to list for further extension
self._container[key_lower] = [vals[0], vals[1], val]
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
__all__ = (
'basestring',
'connection',
'fields',
'filepost',
'poolmanager',
'timeout',
'HTTPHeaderDict',
'queue',
'urlencode',
'gaecontrib',
'urljoin',
)
|
I should clarify that I do not by this designation mean anyone and everyone who fights, nor everyone who trains to engage in combat. I have in mind the soldier who fights to defend home and family. I’m aware that there are others, and we will consider the knight, the assassin, perhaps the barbarian, perhaps others, as distinct kinds of fighting men who represent something else. This is the simple man who fights because someone has to do it.
I have never been a fighter. I have no military record, and the closest I got to police work (apart from reading about it in law school) was as an unarmed security guard. I’ve never much admired people who fight, because in grade school many of them seemed to think that they could prove their values as human beings by assaulting me. (They then added insult to injury by complaining about the bites, scratches, and kick bruises delivered upon their persons in my defense. Apparently they were universally of the opinion that you were bound by their rules even if you refused to play the game.) Thus I don’t have a strong connection to the warrior as a concept. Yet the first time, after years of running games, that I was given the opportunity to play a character in a fantasy game, I chose not a priest nor a wizard but a fighter, a warrior type. Thus despite my distaste for people in real life who exemplify some of the vices of this character and my unfamiliarity with any of the reality of the life of soldiers, there is something here that does appeal to me.
I also note that God considered this important. In Judges 3:1f we read, “Now these are the nations which the Lord left, to test Israel by them (that is, all who had not experienced any of the wars of Canaan; only in order that the generations of the sons of Israel might be taught war, those who had not experienced it formerly).” So apparently regardless of what I think about fighting, God thought it was very important that His people know something about it from experience. So what is it about the warrior that we value, or that we should value?
Writ in large letters on the warrior type is the idea we discussed a few months back, Greater love has no man than this, that a man lay down his life for his Friends . Then we spoke of how this often means less than a life and death situation; but here we put life and death in stark relief. The warrior believes that there are some things, usually people, for which or whom it is worth dying, and worth killing. He doesn’t fight because it’s noble, nor because it’s fun, nor because it is profitable. He fights because it is necessary, because if he doesn’t do it maybe no one will, and if no one does it, those he loves may suffer and die.
Of course, he doesn’t always see how the current action protects his loved ones specifically. Sometimes the battle is far from home, and the people whose lives he is protecting are someone else’s family. In this, he represents something else to us. He shows us obedience to a higher authority, in the expectation that those from whom his orders come know what he does not, know how risking his life here protects his people elsewhere. It is that acceptance of the risk of death on the word of someone else that it is necessary that wins our respect.
In this, he also demonstrates the value of mutuality. I am fighting to defend your home and family, because it is ultimately in my interest to protect you. If I don’t defend you, you may fall to the enemy; then if the enemy comes to me, I, too, will fall, and will not have you to help me. Together we are strong, and the warrior teaches us of the importance of combining our strength against the foe.
The warrior is not without his vices, though. He stands forever on the edge of the danger that he will believe might makes right, that the fact that he can do something means that he has the right to do it. These are the bullies I dreaded in my youth; these are the tyrants who rule by force. He may think that because he has risked his life, those for whom he risked it must excuse his demanding attitudes and rude effrontery, that he is somehow better than the rest of us, more deserving of respect and less required to offer it. He may think our gratitude inadequate, our respect insufficient. These are the evils that exist just on the edge of those goods, the temptations to which he is subject.
Looking at the warrior, we see something of ourselves. On the positive side, we see that we value the willingness to risk death for something more important than life, to lay down our lives for our friends, and this is a good thing. We also see the value of obedience, as we often must obey God without knowing the reason at that moment. On the other side, though, there is the temptation to excuse our own actions based on the authority of someone else, someone who instructed us to do this as we have done it. There is also the temptation in all of us, the wish that we had the power to have things our own way, to force others to fit into our expectations. The demand for respect given the force of strength is a tyranny of its own, and a temptation to us all. These values and these dangers exist within our concept of the warrior.
Thus in playing the warrior, we can inform our play with these virtues, the willingness to die for those we love, and to do so obedient to someone we trust. We can also address the questions of personal responsibility in what we do, and the temptation to perceive strength as its own justification.
In looking at our first archetype, we find within it values to emulate and vices to recognize, ways to use the character to bring our faith into our games in tangible ways, even when he’s just a fighter.
This article was originally published in July 2004 on the Christian Gamers Guild’s website. The entire series remains available at its original URL.
Pingback: #208: Halloween | mark Joseph "young"
|
# Flexlay - A Generic 2D Game Editor
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pickle
from PyQt4.QtCore import Qt
from PyQt4.QtGui import (QWidget, QGridLayout, QScrollBar, QTabWidget,
QKeySequence, QShortcut, QCursor)
from flexlay.graphic_context_state import GraphicContextState
from flexlay.math import Pointf
from flexlay.util import Signal
from flexlay.workspace import Workspace
from .editor_map_widget import EditorMapWidget
from .object_selector import ObjectSelector
class EditorMapComponent:
current = None
def __init__(self, tabbed=True, parent=None):
EditorMapComponent.current = self
self.workspace = Workspace()
self.gc_state = GraphicContextState()
if tabbed:
self.tab_widget = QTabWidget(parent)
self.widget = QWidget(self.tab_widget)
self.tab_widget.addTab(self.widget, "A Label")
else:
self.tab_widget = None
self.widget = QWidget(parent)
self.layout = QGridLayout(self.widget)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setHorizontalSpacing(0)
self.layout.setVerticalSpacing(0)
self.scroll_horz = QScrollBar(Qt.Horizontal)
self.scroll_vert = QScrollBar(Qt.Vertical)
self.editormap_widget = EditorMapWidget(self, None)
self.scroll_horz.valueChanged.connect(self.move_to_x)
self.scroll_vert.valueChanged.connect(self.move_to_y)
self.layout.addWidget(self.editormap_widget, 0, 0)
self.layout.addWidget(self.scroll_horz, 1, 0)
self.layout.addWidget(self.scroll_vert, 0, 1)
self.sig_drop = Signal()
self.editormap_widget.sig_drop.connect(self.on_drop)
def on_drop(self, data, pos):
"""sends (brush, pos)"""
brush_id = pickle.loads(data)
brush = ObjectSelector.current.get_brush(brush_id)
return self.sig_drop(brush, pos)
def get_workspace(self):
return self.workspace
def grab_mouse(self):
self.editormap_widget.grabMouse()
def release_mouse(self):
self.editormap_widget.releaseMouse()
# ifdef GRUMBEL
# void
# EditorMapComponentImpl::on_key_down(const CL_InputEvent& event)
# {
# if (event.id >= 0 && event.id < 256)
# {
# Rect rect = parent.get_position()
# key_bindings[event.id](CL_Mouse::get_x() - rect.left,
# CL_Mouse::get_y() - rect.top)
# }
# if (event.repeat_count == 0)
# {
# Rect rect = parent.get_position()
# CL_InputEvent ev2 = event
# ev2.mouse_pos = Point(CL_Mouse::get_x() - rect.left,
# CL_Mouse::get_y() - rect.top)
# workspace.key_down(InputEvent(ev2))
# }
# }
# void
# EditorMapComponentImpl::on_key_up(const CL_InputEvent& event)
# {
# Rect rect = parent.get_position()
# CL_InputEvent ev2 = event
# ev2.mouse_pos = Point(CL_Mouse::get_x() - rect.left,
# CL_Mouse::get_y() - rect.top)
# workspace.key_up(InputEvent(ev2))
# }
# void
# EditorMapComponentImpl::draw ()
# {
# if (workspace.get_map().is_null()) return
# Display::push_cliprect(parent.get_screen_rect())
# Display::push_modelview()
# Display::add_translate(parent.get_screen_x(), parent.get_screen_y())
# // Update scrollbars (FIXME: move me to function)
# scrollbar_v.set_range(0, workspace.get_map().get_bounding_rect().height)
# scrollbar_v.set_pagesize(parent.height/gc_state.get_zoom())
# scrollbar_v.set_pos(gc_state.get_pos().y)
# scrollbar_h.set_range(0, workspace.get_map().get_bounding_rect().width)
# scrollbar_h.set_pagesize(parent.width/gc_state.get_zoom())
# scrollbar_h.set_pos(gc_state.get_pos().x)
# gc_state.push()
# {
# GraphicContext gc(gc_state, CL_Display::get_current_window().get_gc())
# workspace.draw(gc)
# }
# gc_state.pop()
# Display::pop_modelview()
# Display::pop_cliprect()
# }
# endif
def screen2world(self, pos):
return self.gc_state.screen2world(pos)
def set_zoom(self, z):
self.gc_state.set_zoom(z)
self.editormap_widget.repaint()
self.update_scrollbars()
def zoom_out(self, pos):
self.gc_state.set_zoom(self.gc_state.get_zoom() / 1.25,
Pointf(pos.x, pos.y))
self.editormap_widget.repaint()
self.update_scrollbars()
def zoom_in(self, pos):
self.gc_state.set_zoom(self.gc_state.get_zoom() * 1.25,
Pointf(pos.x, pos.y))
self.editormap_widget.repaint()
self.update_scrollbars()
def zoom_to(self, rect):
self.gc_state.zoom_to(rect)
self.editormap_widget.repaint()
self.update_scrollbars()
def get_clip_rect(self):
return self.gc_state.get_clip_rect()
def move_to(self, x, y):
self.gc_state.set_pos(Pointf(x, y))
self.editormap_widget.repaint()
self.update_scrollbars()
def move_to_x(self, x):
self.gc_state.set_pos(Pointf(x, self.gc_state.get_pos().y))
self.editormap_widget.repaint()
self.update_scrollbars()
def move_to_y(self, y):
self.gc_state.set_pos(Pointf(self.gc_state.get_pos().x, y))
self.editormap_widget.repaint()
self.update_scrollbars()
def sig_on_key(self, keyseq_str):
key_sequence = QKeySequence(keyseq_str)
if key_sequence.isEmpty():
raise RuntimeError("invalid key binding: '%s'" % keyseq_str)
shortcut = QShortcut(key_sequence, self.editormap_widget)
signal = Signal()
def on_key(*args):
pos = self.editormap_widget.mapFromGlobal(QCursor.pos())
# pos = self.gc_state.screen2world(Point.from_qt(pos))
signal(pos.x(), pos.y())
shortcut.activated.connect(on_key)
return signal
def get_gc_state(self):
return self.gc_state
def get_widget(self):
return self.tab_widget or self.widget
def update_scrollbars(self):
rect = self.workspace.get_map().get_bounding_rect()
border = 128
self.scroll_horz.setMinimum(rect.left - border)
self.scroll_horz.setMaximum(rect.right + border)
self.scroll_horz.setPageStep(self.editormap_widget.width())
self.scroll_horz.setSliderPosition(int(self.gc_state.get_pos().x))
self.scroll_vert.setMinimum(rect.top - border)
self.scroll_vert.setMaximum(rect.bottom + border)
self.scroll_vert.setPageStep(self.editormap_widget.height())
self.scroll_vert.setSliderPosition(int(self.gc_state.get_pos().y))
def set_sector_tab_label(self, index, text):
self.tab_widget.setTabText(index, "Sector \"%s\"" % text)
# EOF #
|
In the Bronx borough of New York City, a popular liberal arts college called Fordham University offers students the opportunity for a moderately selective higher education experience on a pretty campus with the world's largest city all around them.
They accept about 45% of applicants, and most admitted students scored in the 500s on their SAT. This admissions profile, along with enrolling about 15,000 students, make it similar to Howard University or Villanova University.
|
# -*- coding: utf-8 -*-
# Copyright 2009 James Hensman and Michael Dewar
# Licensed under the Gnu General Public license, see COPYING
import numpy as np
import node
from scipy import special #needed for calculating lower bound in Gamma, Wishart
class ConjugacyError(ValueError):
def __init__(self,message):
ValueError.__init__(self,message)
class hstack(node.Node):
"""A class to represent a Matrix whose columns are Normally distributed.
Arguments
----------
Attributes
----------
"""
def __init__(self,parents):
dims = [e.shape[0] for e in parents]
shape = (dims[0],len(parents))
node.Node.__init__(self, shape)
assert type(parents)==list
assert np.all(dims[0]==np.array(dims)),"dimensions incompatible"
self.parents = parents
self.shape = shape
[e.addChild(self) for e in self.parents]
def pass_down_Ex(self):
return np.hstack([e.pass_down_Ex() for e in self.parents])
def pass_down_ExxT(self):
""""""
return np.sum([p.pass_down_ExxT() for p in self.parents],0)
def pass_down_ExTx(self):
raise NotImplementedError
def pass_up_m1_m2(self,requester):
if self.shape[1] ==1:
#i'm a hstack of only _one_ vector.
# a corner case I guess...
child_messages = [c.pass_up_m1_m2(self) for c in self.children]
return sum([e[0] for e in child_messages]),sum([e[1] for e in child_messages])
#child messages consist of m1,m2,b,bbT
child_messages = [c.pass_up_m1_m2(self) for c in self.children]
i = self.parents.index(requester)
#here's m1 - \sum_{children} m1 bbT[i,i]
m1 = np.sum([m[0]*float(m[-1][i,i]) for m in child_messages],0)
#here's m2
m2 = np.zeros((self.shape[0],1))
m2 += sum([m[1]*float(m[2][i]) for m in child_messages])# TODO Shouldn;t this all be in the Multiplication node?
m2 -= sum([sum([np.dot(m[0]*m[-1][i,j],self.parents[j].pass_down_Ex()) for j in range(self.shape[1]) if not i==j]) for m in child_messages])
return m1,m2
class Transpose(node.Node):
def __init__(self,parent):
"""I'm designing this to sit between a Gaussian node.Node and a multiplication node.Node (for inner products)"""
assert isinstance(parent, Gaussian), "Can only transpose Gaussian node.Nodes..."
node.Node.__init__(self, shape)
self.parent = parent
self.shape = self.parent.shape[::-1]
parent.addChild(self)
def pass_down_Ex(self):
return self.parent.pass_down_Ex().T
def pass_down_ExxT(self):
return self.parent.pass_down_ExTx()
def pass_down_ExTx(self):
return self.parent.pass_down_ExxT()
def pass_up_m1_m2(self,requester):
child_messages = [c.pass_up_m1_m2(self) for a in self.children]
return np.sum([m[0] for m in child_messages],0),np.sum([m[1] for m in self.child_messages],0).T
class Gamma:
"""
A Class to represent a Gamma random variable in a VB network
Arguments
----------
dim - int
The dimension of the node (can be more than 1 - see notes)
a0 - float
The prior value of a
b0 - float
The prior value of b
Attributes
----------
qa - float
The (variational) posterior value of a
qb - float
The (variational) posterior value of b
Notes
----------
The dimensionality of a Gamma node can be more than 1: this is useful for representing univariate noise. The expected value of the node is then simply a diagonal matrix with each diagonal element set to qa/qb.
Gamma does not inherrit from node.Node because it cannot be added, muliplied etc"""
def __init__(self,dim,a0,b0):
self.shape = (dim,dim)
self.a0 = a0
self.b0 = b0
self.children = []
self.update_a()#initialise q to correct value
self.qb = np.random.rand()#randomly initialise solution
def addChild(self,child):
self.children.append(child)
self.update_a()#re-initialise q to correct value
def update_a(self):
self.qa = self.a0
for child in self.children:
self.qa += 0.5*child.shape[0]
def update(self):
"""
Notes
----------
We update only the 'b' parameter, since the 'a' parameter can be done in closed form and does not need to be iterated. Note the use of trace() allows for children whose shape is not (1,1)"""
self.qb = self.b0
for child in self.children:
self.qb += 0.5*np.trace(child.pass_down_ExxT()) + 0.5*np.trace(child.mean_parent.pass_down_ExxT()) - np.trace(np.dot(child.pass_down_Ex(),child.mean_parent.pass_down_Ex().T))
def pass_down_Ex(self):
"""Returns the expected value of the node"""
return np.eye(self.shape[0])*self.qa/self.qb
def pass_down_lndet(self):
"""Return the log of the determinant of the expected value of this node"""
#return np.log(np.power(self.qa/self.qb,self.shape[0]))
return self.shape[0]*(np.log(self.qa) - np.log(self.qb))
def log_lower_bound(self):
"""Return this node's contribution to the log of the lower bound on the model evidence. """
Elnx = special.digamma(self.qa)-np.log(self.qb)#expected value of the log of this node
#terms in joint prob not covered by child nodes:
ret = (self.a0-1)*Elnx - special.gammaln(self.a0) + self.a0*np.log(self.b0) - self.b0*(self.qa/self.qb)
#entropy terms:
ret -= (self.qa-1)*Elnx - special.gammaln(self.qa) + self.qa*np.log(self.qb) - self.qb*(self.qa/self.qb)
return ret
class DiagonalGamma:
"""A class to implemet a diagonal prior for a multivariate (diagonal) Gaussian. Effectively a series of Gamma distributions
Arguments
----------
Attributes
----------
"""
def __init__(self,dim,a0s,b0s):
self.shape = (dim,dim)
assert a0s.size==self.shape[0]
assert b0s.size==self.shape[0]
self.a0s = a0s.flatten()
self.b0s = b0s.flatten()
self.children = []
self.update_a()#initialise q to correct value
self.qb = np.random.rand()#randomly initialise solution
def addChild(self,child):
assert child.shape == (self.shape[0],1)
self.children.append(child)
self.update_a()
def update_a(self):
self.qa = self.a0s.copy()
for child in self.children:
self.qa += 0.5
def update(self):
self.qb = self.b0s.copy()
for child in self.children:
self.qb += 0.5*np.diag(child.pass_down_ExxT()) + 0.5*np.diag(child.mean_parent.pass_down_ExxT()) - np.diag(np.dot(child.pass_down_Ex(),child.mean_parent.pass_down_Ex().T))
def pass_down_Ex(self):
return np.diag(self.qa/self.qb)
def pass_down_lndet(self):
"""Return the log of the determinant of the expected value of this node"""
return np.log(np.prod(self.qa/self.qb))
def log_lower_bound(self):
Elnx = special.digamma(self.qa)-np.log(self.qb)#expected value of the log of this node
#terms in joint prob not covered by child nodes:
ret = (self.a0s-1)*Elnx - special.gammaln(self.a0s) + self.a0s*np.log(self.b0s) - self.b0s*(self.qa/self.qb)
ret -= (self.qa-1)*Elnx - special.gammaln(self.qa) + self.qa*np.log(self.qb) - self.qb*(self.qa/self.qb)#entropy terms
return sum(ret)
class Wishart:
""" A wishart random variable: the conjugate prior to the precision of a (full) multivariate Gaussian distribution"""
def __init__(self,dim,v0,w0):
self.shape = (dim,dim)
assert w0.shape==self.shape
self.v0 = v0
self.w0 = w0
self.children = []
self.update_v()#initialise qv to correct value
#randomly initialise solution (for qw)
l = np.random.randn(self.shape[0],1)#randomly initialise solution
self.qw = np.dot(l,l.T)
def addChild(self,child):
assert child.shape == (self.shape[0],1)
self.children.append(child)
self.update_v()
def update_v(self):
self.qv = self.v0
for child in self.children:
self.qv += 0.5
def update(self):
self.qw = self.w0
for child in self.children:
self.qw += 0.5*child.pass_down_ExxT() + 0.5*child.mean_parent.pass_down_ExxT() - np.dot(child.pass_down_Ex(),child.mean_parent.pass_down_Ex().T)
def pass_down_Ex(self):
return self.qv*np.linalg.inv(self.qw)
|
My friend, Kay Haslam, lives in one of Broadway’s most beautiful and historically significant homes. Luggershill (as it was originally known) was designed and built as the home for Alfred Parsons, the Edwardian painter and garden designer.
Kay, and her husband, Red, lovingly restored the home to its previous glory. Kay very kindly offered to host the first of our Supper Club events last week.
It was a truly successful evening, not least because of the remarkable, classical setting.
Tom Rains and I initiated the evening with cocktails served in the Drawing Room for a very convivial party of 10 local guests.
The evening progressed with dinner and here is the fabulous menu which Tom produced especially for the evening.
Main course of slow cooked pork belly, black pudding, butternut squash, broccoli and apple ketchup.
Warm treacle tart with vanilla ice cream and salted caramel sauce.
Thank you again Kay for all your help and hospitality.
A magical mystery supper on 15th April. Details to follow.
Be prepared for a casual, social and fun evening and a unique dining experience.
Watch this space. We are taking provisional reservations right now.
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from rally.cmd import manage
from tests.unit import test
class CmdManageTestCase(test.TestCase):
@mock.patch("rally.cmd.manage.cliutils")
def test_main(self, cli_mock):
manage.main()
categories = {"db": manage.DBCommands,
"tempest": manage.TempestCommands}
cli_mock.run.assert_called_once_with(sys.argv, categories)
class DBCommandsTestCase(test.TestCase):
def setUp(self):
super(DBCommandsTestCase, self).setUp()
self.db_commands = manage.DBCommands()
@mock.patch("rally.cmd.manage.db")
def test_recreate(self, mock_db):
self.db_commands.recreate()
calls = [mock.call.db_drop(), mock.call.db_create()]
self.assertEqual(calls, mock_db.mock_calls)
class TempestCommandsTestCase(test.TestCase):
def setUp(self):
super(TempestCommandsTestCase, self).setUp()
self.tempest_commands = manage.TempestCommands()
self.tempest = mock.Mock()
@mock.patch("rally.cmd.manage.api")
def test_install(self, mock_api):
deployment_uuid = "deployment_uuid"
self.tempest_commands.install(deployment_uuid)
mock_api.Verification.install_tempest.assert_called_once_with(
deployment_uuid, None)
|
This luxury suit is decorated with some objects of recreational boats. With its different atmosphere, it makes this travel an unrepeatable one for the travelers.
In the sitting room, the windows opened to the mountainous forests on both sides together with the oceanic blue color and the verdant jungle give an especial freshness and brightness to the space and transfer the feeling of liveliness to the guests. Also, the chic kitchen painted with full oceanic colors and furnished with luxury furniture, besides the stone sofa presents two different experiences to the guests.
Design and decoration of Titanic suits, as suggested by its name, is inspired by recreational boats and sailing ships. The used colors are of harmony with the beautiful environment of sea and ships. These colors are known as oceanic colors and remind us of the peace dominant on seas and oceans. This luxury suit is decorated with some objects of recreational boats. With its different atmosphere, it makes this travel an unrepeatable one for the travelers. Titanic suite measures 75 sq. meters in area and is suitable for two people.
To reserve Hotel, forest cottages, restaurant and ilmili coffee shop use online form in this website and if there is any probable problem or wants to know any supplemental information contact Hotel Baam Sabz reservation department or Baam Sabz Tourism Agency.
© Copyrights ModiranART 2015. All rights reserved.
|
import ferris
from ..models.setting import Setting
import datetime
from google.appengine.api import memcache
class Settings(ferris.Controller):
class Meta:
prefixes = ('admin',)
components = (ferris.scaffold.Scaffolding,)
Model = Setting
def startup(self):
self.context['setting_classes'] = Setting.get_classes()
def admin_list(self):
self.context['settings'] = ferris.settings.settings()
def admin_edit(self, key):
model = Setting.factory(key)
instance = model.get_instance(static_settings=ferris.settings.settings())
self.meta.Model = model
self.scaffold.ModelForm = ferris.model_form(model)
self.context['settings_class'] = model
def reload_settings(**kwargs):
self.components.flash_messages('Settings saved, however, the settings may not be updated on all instances. You may have to restart instances for the settings to take effect.', 'warning')
self.events.scaffold_after_save += reload_settings
return ferris.scaffold.edit(self, instance.key.urlsafe())
|
The Carlisle Import & Performance Nationals welcomes the world of cars transcending all marques, scenes and styles of Import and Performance vehicles. This exciting event welcomes Imported vehicles from around the world as well as domestics, kit cars, motorcycles and trucks and high end performance vehicles. See the top builds, restorations, clubs and brands representing the world of automobiles and performance. Join us for your high-octane adrenaline fix with autocross, drifting, rolling exhaust competition, burnouts and more.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.