text
stringlengths 29
850k
|
|---|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from gi.repository import GLib
from gwebsockets.server import Server
from gwebsockets.server import Message
_PORT = 8080
class WebClient(object):
def __init__(self, session):
self._session = session
self._session_id = None
def send_json_message(self, data):
self._session.send_message(json.dumps(data))
def send_raw_message(self, data):
self._session.send_message(data)
def get_session_id(self):
return self._session_id
def set_session_id(self, value):
self._session_id = value
class WebServer(object):
def __init__(self):
self._sessions = {}
self._server = Server()
self._server.connect('session-started', self._session_started_cb)
self._port = self._server.start(_PORT)
def _session_started_cb(self, server, session):
# perhaps reject non-sugar connections
# how do we know if a connection comes from sugar?
client = WebClient(session)
session.connect('handshake-completed',
self._handshake_completed_cb, client)
session.connect('message-received',
self._message_received_cb, client)
# maybe disconnect the signal handler once it is recieved
if session.is_ready():
self._add_client(session, client)
def _add_client(self, session, client):
url = session.get_headers().get('http_path')
# this should be of the form '/hub/sessionID'
if not url or not url.startswith('/hub/'):
return
session_id = url[5:]
client.set_session_id(session_id)
if session_id in self._sessions:
self._sessions[session_id].append(client)
else:
self._sessions[session_id] = [client]
client.send_json_message(
{'type': 'init-connection',
'peerCount': len(self._sessions[session_id])})
def _handshake_completed_cb(self, session, client):
self._add_client(session, client)
def _message_received_cb(self, session, message, source):
if message.message_type == Message.TYPE_BINARY:
# FIXME: how to handle this?
return
session_id = source.get_session_id()
if session_id is None:
# perhaps queue
return
dictionary = json.loads(message.data)
# TODO: be more strict with the protocol
for client in self._sessions[session_id]:
if client != source or dictionary.get('server-echo', False):
client.send_raw_message(message.data)
def _session_ended_cb(self, session, client):
# FIXME: this callback is not called at all
self._add_client(session, client)
session_id = client.get_session_id()
if session_id is None:
return
self._sessions[session_id].remove(client)
if not self._sessions[session_id]:
del self._sessions[session_id]
if __name__ == "__main__":
server = WebServer()
main_loop = GLib.MainLoop()
main_loop.run()
|
This picturesque beach is famous for the 82 iconic bathing boxes along its seaside. This colourful beach is a great spot for photographers and foodies as it is teeming with a range of cool restaurants and cafes.
Puerto Vallarta is a colourful city and an interesting one to visit. It has a lovely mixture of modern life and proud traditional Mexican culture. The particularly beautiful thing about Puerto Vallarta (aside from the architecture) is that it’s very tourist friendly, yet incredibly authentic. There’s no shortage of activities to partake in. Be it easy boating or exciting ziplining, there’s something for the adventurer and the relaxer.
This old town of Stockholm, Sweden dates back to the 13th century, and consists of medieval alleyways, cobbled streets, and archaic architecture. Most buildings seen today where built during the 1700 to 1800’s.
Whilst visiting the city of La Boca you must head to Caminto (little walkway in Spanish). This colourful street has a large cultural significance as it inspired the music for the famous tango Caminto.
Trinidad is one of the best-preserved cities in the Caribbean and has been one of UNESCOs World Heritage sites since 1988.
Jodhpur is also known as Blue City as most of the buildings are a vibrant blue colour. No one knows for certain why this colour was chosen and many theories have been thrown around. In the end it makes for one beautifully coloured city.
Wroclaw, located in western Poland, has a population of over 630,000 making it the fourth largest city in Poland. Many visit to see the buildings within the city as they have been painted every colour of the rainbow!
This stunning island off Greece’s southeastern coast is famous for its blue-roofed, whitewashed architecture.
Longyearben is the world’s northernmost town, and the northernmost settlement of any kind with greater than 1,000 permanent residents – it is also well known for its beautifully coloured houses.
Known as one of the most colourful places in the world, Cinque Terre is a place you need to see with your own eyes. Found along Italy’s coast on the Italian Riveria, it is composed of five villages and has been named a UNESCO World Heritage Site.
Which colourful city would you love to visit?
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2008 Gary Burton
# Copyright (C) 2010,2015 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# gtk
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ... import widgets
from gramps.gen.lib import Place, PlaceType
from .. import build_filter_model
from . import SidebarFilter
from gramps.gen.filters import GenericFilterFactory, rules
from gramps.gen.filters.rules.place import (RegExpIdOf, HasData, IsEnclosedBy,
HasTag, HasNoteRegexp,
MatchesFilter)
GenericPlaceFilter = GenericFilterFactory('Place')
#-------------------------------------------------------------------------
#
# PlaceSidebarFilter class
#
#-------------------------------------------------------------------------
class PlaceSidebarFilter(SidebarFilter):
def __init__(self, dbstate, uistate, clicked):
self.clicked_func = clicked
self.filter_id = widgets.BasicEntry()
self.filter_name = widgets.BasicEntry()
self.filter_place = Place()
self.filter_place.set_type((PlaceType.CUSTOM, ''))
self.ptype = Gtk.ComboBox(has_entry=True)
self.place_menu = widgets.MonitoredDataType(
self.ptype,
self.filter_place.set_type,
self.filter_place.get_type)
self.filter_code = widgets.BasicEntry()
self.filter_enclosed = widgets.PlaceEntry(dbstate, uistate, [])
self.filter_note = widgets.BasicEntry()
self.filter_regex = Gtk.CheckButton(label=_('Use regular expressions'))
self.tag = Gtk.ComboBox()
self.generic = Gtk.ComboBox()
SidebarFilter.__init__(self, dbstate, uistate, "Place")
def create_widget(self):
cell = Gtk.CellRendererText()
cell.set_property('width', self._FILTER_WIDTH)
cell.set_property('ellipsize', self._FILTER_ELLIPSIZE)
self.generic.pack_start(cell, True)
self.generic.add_attribute(cell, 'text', 0)
self.on_filters_changed('Place')
cell = Gtk.CellRendererText()
cell.set_property('width', self._FILTER_WIDTH)
cell.set_property('ellipsize', self._FILTER_ELLIPSIZE)
self.tag.pack_start(cell, True)
self.tag.add_attribute(cell, 'text', 0)
self.add_text_entry(_('ID'), self.filter_id)
self.add_text_entry(_('Name'), self.filter_name)
self.add_entry(_('Type'), self.ptype)
self.add_text_entry(_('Code'), self.filter_code)
self.add_text_entry(_('Enclosed By'), self.filter_enclosed)
self.add_text_entry(_('Note'), self.filter_note)
self.add_entry(_('Tag'), self.tag)
self.add_filter_entry(_('Custom filter'), self.generic)
self.add_regex_entry(self.filter_regex)
def clear(self, obj):
self.filter_id.set_text('')
self.filter_name.set_text('')
self.filter_code.set_text('')
self.filter_enclosed.set_text('')
self.filter_note.set_text('')
self.ptype.get_child().set_text('')
self.tag.set_active(0)
self.generic.set_active(0)
def get_filter(self):
gid = str(self.filter_id.get_text()).strip()
name = str(self.filter_name.get_text()).strip()
ptype = self.filter_place.get_type().xml_str()
code = str(self.filter_code.get_text()).strip()
enclosed = str(self.filter_enclosed.get_text()).strip()
note = str(self.filter_note.get_text()).strip()
regex = self.filter_regex.get_active()
tag = self.tag.get_active() > 0
gen = self.generic.get_active() > 0
empty = not (gid or name or ptype or code or enclosed or note or regex
or tag or gen)
if empty:
generic_filter = None
else:
generic_filter = GenericPlaceFilter()
if gid:
rule = RegExpIdOf([gid], use_regex=regex)
generic_filter.add_rule(rule)
if enclosed:
rule = IsEnclosedBy([enclosed])
generic_filter.add_rule(rule)
rule = HasData([name, ptype, code], use_regex=regex)
generic_filter.add_rule(rule)
if note:
rule = HasNoteRegexp([note], use_regex=regex)
generic_filter.add_rule(rule)
# check the Tag
if tag:
model = self.tag.get_model()
node = self.tag.get_active_iter()
attr = model.get_value(node, 0)
rule = HasTag([attr])
generic_filter.add_rule(rule)
if self.generic.get_active() != 0:
model = self.generic.get_model()
node = self.generic.get_active_iter()
obj = str(model.get_value(node, 0))
rule = MatchesFilter([obj])
generic_filter.add_rule(rule)
return generic_filter
def on_filters_changed(self, name_space):
if name_space == 'Place':
all_filter = GenericPlaceFilter()
all_filter.set_name(_("None"))
all_filter.add_rule(rules.place.AllPlaces([]))
self.generic.set_model(build_filter_model('Place', [all_filter]))
self.generic.set_active(0)
def on_tags_changed(self, tag_list):
"""
Update the list of tags in the tag filter.
"""
model = Gtk.ListStore(str)
model.append(('',))
for tag_name in tag_list:
model.append((tag_name,))
self.tag.set_model(model)
self.tag.set_active(0)
|
Organic-Vegan Spray tanning application is finally here!!We chose for you the absolute best Sunless tanning product on the market! Safe, organic, and healthy for your skin, they source the world’s finest naturally derived ingredients and materials to create products you can be confident will provide you a dependable service each and every time as a health conscientious sunless professional in the community. The best possible answer for your everyday sunless tanning needs.
Organic -Vegan Spray Tanning application is here! Organic, safe and a golden tan every time! No orange here!
Package includes a specialized body scrub designed to clean, hydrate and exfoliate dead skin to enhance the natural + customized Vegan spray tan product!
|
#
# Copyright (c) 2016 SUSE Linux GmbH. All rights reserved.
#
# This file is part of dbassembly.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
from functools import wraps
import pytest
from py.path import local
from dbassembly.core import NSMAP
from dbassembly.cli import parsecli
class raises(object): # pragma: no cover
"""
exception decorator as used in nose, tools/nontrivial.py
"""
def __init__(self, *exceptions):
self.exceptions = exceptions
self.valid = ' or '.join([e.__name__ for e in exceptions])
def __call__(self, func):
name = func.__name__
def newfunc(*args, **kw):
try:
func(*args, **kw)
except self.exceptions:
pass
except:
raise
else:
message = "%s() did not raise %s" % (name, self.valid)
raise AssertionError(message)
newfunc = wraps(func)(newfunc)
return newfunc
def xmldump(tree, indent=2):
"""Dump XML tree into hierarchical string
:param element: ElementTree or Element
:return: generator, yields strings
"""
for i, elem in enumerate(tree.iter()):
indstr=indent*" "
if elem.text is None or (not elem.text.strip()):
text = 'None'
else:
text = repr(elem.text.strip())
yield i*indstr + "%s = %s" % (elem.tag, text)
for attr in sorted(elem.attrib):
yield (i+1)*indstr+"* %s = %r" % (attr, elem.attrib[attr])
# ------------------------------------------------------
# Fixtures
#
@pytest.fixture
def docoptdict():
"""Fixture: creates a faked dictionary object from docopt.
:return: dictionary
:rtype: dict
"""
return parsecli(['foo.xml'])
# ------------------------------------------------------
# General
#
# http://pytest.org/latest/parametrize.html#basic-pytest-generate-tests-example
def casesdir():
"""Fixture: returns the "cases" directory relative to
'conftest.py'
:return: directory pointing to 'cases'
:rtype: :py:class:'py.path.local'
"""
return local(__file__).dirpath() / "cases"
def structdir():
"""Fixture: returns the "cases" directory relative to
'conftest.py'
:return: directory pointing to 'cases'
:rtype: :py:class:'py.path.local'
"""
return local(__file__).dirpath() / "struct"
def get_test_cases(testcasesdir,
casesxml='.case.xml',
patternout='.out.xml',
patternerr='.err.xml'):
"""Generator: yield name tuple of (casexmlfile, outputfile, errorfile)
:param str casesxml: file extension of XML case file
:param str patternout: file extension of output file
:param str patternerr: file extension of error file
"""
for case in testcasesdir:
b = case.basename
out = b.replace(casesxml, patternout)
err = b.replace(casesxml, patternerr)
out = case.new(basename=out)
err = case.new(basename=err)
yield (case, out, err)
def xmltestcase(metafunc, cases):
"""Compares .cases.xml files with .out.xml / .err.xml files
HINT: The out file has to be an *exact* output. Each spaces
is considered to be significant.
"""
testcases = cases.listdir('*.case.xml', sort=True)
# Create tuple of (original, outputfile, errorfile)
result = get_test_cases(testcases)
ids=[i.basename for i in testcases]
metafunc.parametrize("xmltestcase", result, ids=ids)
def xmlteststruct(metafunc, struct):
"""Compares .cases.xml files with .struct.xml / .err.xml files
"""
# cases = local(__file__).dirpath() / "cases"
testcases = struct.listdir('*.case.xml', sort=True)
# Create tuple of (original, outputfile, errorfile)
result = get_test_cases(testcases, patternout='.out.struct')
ids=[i.basename for i in testcases]
metafunc.parametrize("xmlteststruct", result, ids=ids)
def pytest_generate_tests(metafunc):
"""Generate testcases for all *.case.xml files.
"""
funcdict = dict(xmltestcase=[xmltestcase, casesdir()],
xmlteststruct=[xmlteststruct, structdir()],
)
if not metafunc.fixturenames:
return
func, subdir = funcdict.get(metafunc.fixturenames[0], [None, None])
if func is not None:
func(metafunc, subdir)
|
Product Description Sustainable. By taking waste paper and recycling it into cellulose fibre for numerous applications, CIUR a.s. Technology is sequestering thousands of tonnes of carbon which would otherwise be released as methane gas at a landfill.
A longer-term solution begins with the City making investments into technology that can recycle materials into reusable and profit-making products such as insulation from recycled paper. This is attainable if the type of waste product used in consumer packaging is standardized.
Warmcel 100 is a cellulose fibre loft insulation manufactured from 100% recycled waste newspaper. Developed for the DIY loft insulation market, Warmcel 100 is supplied in convenient 8 kg bags and can be easily poured between the joists to provide full insulation, or used to top up existing insulation.
RECYCLED INSULATION Recycled plastic or paper insulation. PLANT FIBRE INSULATION Hemp or wood fibre insulation . LOFT ACCESSORIES Make the most of your loft. SOLO ONE COAT LIME PLASTER Allowing your walls to breathe . MEMBRANES AND TAPES ... Samples, Best Prices, Fast Delivery.
F&H Insulation, Inc. is a contractor AND supplier for industrial, commercial, and mechanical insulation.
Luckily, many new insulation products exist that achieve energy efficiency with natural and recycled materials, such as cotton, hemp, and paper. Bonded Logic, Inc., a member of the U.S. Green Building Council, offers Ultra Touch, a non-irritant, cotton insulation made from 85% post-industrial recycled materials.
Loft insulation alone cuts down energy usage (and heating bills) by 20%. For loft or wall insulation Warmcel 100% recycled newspaper insulation, non-toxic and recyclable, can be used instead of fibreglass.
Thermofloc is a loose cellulose insulation made from recycled newspaper combined with a mineral fire retardant that offers a sustainable and cost effective way to insulate horizontal surfaces such as lofts or floors. Thermofloc is an excellent replacement for Warmcel 100.
Made entirely from recycled newsprint, Warmcel is a high-performance cellulose fibre insulation offering very high U-values. We inject Warmcel into our panels to a specific high density, so that no settling occurs when the panels are erected upright.
Made from recycled newspaper, cellulose fibre is a superb insulation material. It's environmentally friendly and offers a good thermal barrier. In tandem with superior airtightness provided by the blown installation method, cellulose fibre outperforms many typical insulation products.
We install cellulose fibre insulation, a high-performing carbon-negative product. Cellulose fibre is made from recycled newspaper. The paper is shredded and natural inorganic salts added for resistance to fire, mould, insects and vermin.
Natural Insulation - Wall and Loft Insulation from natural materials and other eco friendly sources: Recycled Paper, Sheep's Wool, Hemp and recycled plastics.
Thermofloc replaces Warmcel 100 - similar specification, cheaper price. Thermofloc is a loose cellulose insulation made from recycled newspaper combined with a mineral fire retardant that offers a sustainable and cost effective way to insulate horizontal surfaces such as lofts or floors.
Earthborn Paints, Farrow & Ball, Auro Natural Paints, OSMO Wood Oils, Ecoezee, Baavet Wool Duvets, Ecosolutions, Warmcel Recycled Paper, Blackmountain & Thermafleece Sheepwool Insulation.
|
# -*- coding: utf-8 -*-
from utils import logger
from update_status import update_submission_status
from exception import VLoginFailed, VSubmitFailed
from bs4 import BeautifulSoup
import html5lib
import urllib, urllib2, cookielib
import time
class HDU:
# base information:
URL_HOME = 'http://acm.hdu.edu.cn/'
URL_LOGIN = URL_HOME + 'userloginex.php?action=login'
URL_SUBMIT = URL_HOME + 'submit.php?action=submit'
URL_STATUS = URL_HOME + 'status.php?'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Ubuntu Chromium/52.0.2743.116 Chrome/52.0.2743.116 Safari/537.36',
'Origin': "http://acm.hdu.edu.cn",
'Host': "acm.hdu.edu.cn",
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
}
# result
INFO = ['Run ID', 'Submit Time', 'Judge Status', 'Pro.ID', 'Exe.Time', 'Exe.Memory', 'Code Len.', 'Language',
'Author']
# map to compatible result
# vid v_run_id v_submit_time status time memory length language v_user
MAP = {
'Run ID': 'v_run_id',
'Submit Time': 'v_submit_time',
'Judge Status': 'status',
'Pro.ID': 'vid',
'Exe.Time': 'time',
'Exe.Memory': 'memory',
'Code Len.': 'length',
'Language': 'language',
'Author': 'v_user',
}
# language
LANGUAGE = {
'G++': '0',
'GCC': '1',
'C++': '2',
'C': '3',
'PASCAL': '4',
'JAVA': '5',
'C#': '6',
}
def __init__(self, user_id, password):
self.user_id = user_id
self.password = password
self.problem_id = ''
self.run_id = ''
# 声明一个CookieJar对象实例来保存cookie
cookie = cookielib.CookieJar()
# 利用urllib2库的HTTPCookieProcessor对象来创建cookie处理器
handler = urllib2.HTTPCookieProcessor(cookie)
# 通过handler来构建opener
self.opener = urllib2.build_opener(handler)
# 此处的open方法同urllib2的urlopen方法,也可以传入request
def login(self):
data = dict(
username=self.user_id,
userpass=self.password,
login='Sign In'
)
try:
post_data = urllib.urlencode(data)
request = urllib2.Request(HDU.URL_LOGIN, post_data, HDU.headers)
response = self.opener.open(request).read()
if response.find('signout') > 0:
return True
else:
logger.warning("Login failed.")
return False
except:
logger.error("Login method error.")
return False
def submit(self, problem_id, language, src_code):
submit_data = dict(
problemid=problem_id,
language=HDU.LANGUAGE[language.upper()],
usercode=src_code,
check='0',
)
self.problem_id = problem_id
post_data = urllib.urlencode(submit_data)
try:
request = urllib2.Request(HDU.URL_SUBMIT, post_data, HDU.headers)
self.opener.open(request)
return True
except:
logger.info('Submit method error.')
return False
@staticmethod
def str2int(string):
if not string:
return 0
try:
return int(string[:-1])
except:
return int(string[:-2])
def result(self):
data = {
'first': '',
'pid': '',
'user': self.user_id,
}
if self.run_id:
data['first'] = self.run_id
if self.problem_id:
data['pid'] = self.problem_id
url = HDU.URL_STATUS + urllib.urlencode(data)
try:
request = urllib2.Request(url, '', HDU.headers)
page = self.opener.open(request, timeout=5)
soup = BeautifulSoup(page, 'html5lib')
table = soup.find('table', {'class': 'table_text'})
table_body = table.find('tbody')
rows = table_body.find_all('tr')
data = []
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols]) # No need:Get rid of empty values
if len(data) <= 1:
logger.warning('get result error!')
return False, {}
name = data[0]
latest = data[1]
if not self.run_id:
self.run_id = latest[0]
wait = ['queuing', 'compiling', 'running']
res = {}
for i in range(9):
res[HDU.MAP[name[i]]] = str(latest[i]).lower()
res['time'] = self.str2int(res['time'])
res['memory'] = self.str2int(res['memory'])
for i in range(3):
if res['status'] == wait[i]:
return False, res
return True, res
except Exception as e:
logger.error(e)
return False, {}
def hdu_submit(problem_id, language_name, src_code, ip=None, sid=None, username='USTBVJ', password='USTBVJ'):
hdu = HDU(username, password)
if hdu.login():
if hdu.submit(problem_id, language_name, src_code):
status, result = hdu.result()
while not status:
status, result = hdu.result()
if result and ip:
update_submission_status(ip, sid, result['status'])
time.sleep(2)
return result
else:
info = 'HDU [{pid},{lang},{sid}] submit error.'.format(pid=problem_id, lang=language_name, sid=sid)
logger.exception(info)
raise VSubmitFailed(info)
else:
info = 'HDU [{user},{sid}] login failed.'.format(user=username, sid=sid)
logger.exception(info)
raise VLoginFailed(info)
if __name__ == '__main__':
pid = 1000
lang = 'g++'
src = '''
#include<bits/stdc++.h>
using namespace std;
int main()
{
int a,b;
while(cin>>a>>b)cout<<a-b<<endl;
return 0;
}
'''
print hdu_submit(pid,lang,src)
|
Athletically elegant outside, it's in the E-Class cabin where your senses come alive. Vivid screens and flowing shapes entice the eyes. And from multitouch controls to aromatherapy and a hot stone massage, wherever you touch, it caresses you back. Two smartphone-like pads control nearly all the car's features without taking a hand from the wheel. Swipe and select from the central screen with your right thumb. Change settings on the screen between the gauges with your left.
A panoramic 12.3-inch screen unites navigation, entertainment and countless comfort features. Sharper graphics and more intuitive menus are clear to the eye and the mind. A second 12.3-inch graphical instrument cluster is optional. Standard COMAND®(R) navigation helps get you where you want to go with 3D maps, live traffic, searching via Yelp or HERE? and lane advice before turns. The touchpad controller lets you control by tapping, twisting or even handwriting. The E-Class cabin flows with the contours of nature. Fine-tailored seats are sculpted to match the muscles in your body. Exotic yet sustainable woods sweep door-to-door, their natural glow edge-lit by 64-color LED ambience.
You don't measure a car's efficiency just at the gas pump. It's also something you feel in its confident agility. How it moves into traffic, or negotiates a surprise. It's how an E-Class always feels right for the times and in the moment. The E 300's turbo engine squeezes more power from less fuel, atomizing and itemizing every drop and self-tuning every few milliseconds. A new 9-speed transmission responds more quickly yet relaxes more efficiently. While the E-Class passionately grips the pavement, it holds its favorite driver with equal affection. The enduring support of its ergonomically contoured and crafted seats can be further enhanced with active side bolsters. DYNAMIC SELECT dials in the car's performance character with the tap of a console button. The throttle, shifting, chassis and more sharpen in Sport mode, ease up in ECO. There's even a mode you can set up as you like.
This isn't just the most advanced E-Class yet. With technologies never before offered on a production car, the E-Class writes a new chapter in the story of driving: Where cars can talk to each other and look out for you, in ways you never imagined. Groundbreaking Car-to-X technology connects your E-Class to a central information resource, to send you in-car updates about driving conditions before you get to them. Your car can also report hazards, to help other drivers with Car-to-X. Mercedes-Benz engineers are human engineers first. When PRESAFE®(R) Sound senses an impending collision, it emits safe pink noise via the audio system to pre-trigger your ears' natural defense against the loud noises of an accident.
Let the 2019 Mercedes-Benz E 300 impress you in person today at Walter's Mercedes-Benz of Riverside located at 3213 Adams St. Riverside, CA 92504.
|
from egnyte.tests.config import EgnyteTestCase
FOLDER_NAME = 'EVENT'
class TestEvents(EgnyteTestCase):
def setUp(self):
super(TestEvents, self).setUp()
self.root_folder.create()
def test_filter_poll(self):
events = self.egnyte.events
events = events.filter(events.oldest_event_id)
results = events.poll(count=1)
self.assertNotEqual(0, len(results), "Poll results should not be empty")
self.assertNotEqual(events.start_id, events.oldest_event_id,
"latest_event_id should have been bumped after non-empty poll")
def test_register_new_events(self):
folder = self.root_folder.folder(FOLDER_NAME).create()
events = self.egnyte.events
events = events.filter(events.latest_event_id - 1)
results = events.poll(count=1)
self.assertEqual(results[0].action_source, 'PublicAPI')
self.assertEqual(results[0].action, 'create')
self.assertEqual(results[0].data['target_path'], folder.path)
self.assertEqual(results[0].data['is_folder'], True)
|
Alexandria International Airport (AEX) and the England Economic & Industrial Development District (England Authority) are implementing the Neighborhood Noise Mitigation Program, with funding provided by the Federal Aviation Administration (FAA) and the Louisiana Department of Transportation & Development (La DOTD). The Neighborhood Noise Mitigation Program was approved by the FAA on August 14, 2007, as part of AEX’s Airport Noise Compatibility Program (NCP). View a copy of the FAA’s Record of Approval (ROA) for the AEX NCP.
AEX voluntarily conducted the Airport Noise Compatibility Study in accordance with a Federal regulation called title 14, Code of Federal Regulations (CFR), part 150, Airport Noise Compatibility Planning. The Airport Noise Compatibility Study was comprised of two parts: the Noise Exposure Maps and Supporting Documentation and the Noise Compatibility Program. The 2010 Future Condition Noise Exposure Map, with Program Implementation is the basis for determining the areas that are eligible to participate in the Neighborhood Noise Mitigation Program.
The Neighborhood Noise Mitigation Program includes residential and other noise-sensitive buildings in areas surrounding the airport that are exposed to a yearly average Day/Night Sound Level (DNL) of 65 decibels (dB) and higher as shown on the 2010 Future Condition Noise Exposure Map, with Program Implementation.
The noise contours were squared off using natural boundaries, street boundaries, and property boundaries to achieve equity in the neighborhoods. Then that area was divided into the Primary Area and the Secondary Area. The Primary Area generally includes parcels that are located within, or partially within, the DNL 70 dB or greater contour, and/or within, or partially within, the Clear Zone (CZ) or Airport Protection Zone I (APZ I). The Secondary Area includes parcels that are located within, or partially within, the DNL 65 dB contour, and/or within, or partially within, the Airport Protection Zone II (APZ II) for Runways 14 and 32. In addition there is a significant boundary between the two areas (such as I-49, LA Highway 1, or LA Highway 28).
The England Authority is currently implementing the Fee Simple Acquisition and Relocation Assistance Program in the Primary Area. In the Rapides Station neighborhood, many homes have been acquired, their residents have been relocated, and the structures have been demolished.
St. Mary’s Residential Training School and Renaissance Home for Youth have both participated in the Acoustical Treatment Program for Institutional and Community Facilities.
Please note that participation in any of these programs is completely voluntary. You are not required to participate in any of these programs.
For the most up-to-date information regarding the status of the Neighborhood Noise Mitigation Program, read our newsletter.
If you would like more information about the Rapides Parish Ordinance known as the Alexandria International Airport Landing District Ordinance, which describes the land use restrictions in the CZ, APZ I, and APZ II. The City of Alexandria has a similar ordinance known as Airfield Compatibility.
Below are some of the most frequently asked questions about the AEX Neighborhood Noise Mitigation Program.
Federal Grants from the Airport Improvement Program are used to fund 95% of the Neighborhood Noise Mitigation Program. The State of Louisiana Department of Transportation & Development provides the remaining 5%. No community taxpayer dollars are used to fund any portion of the Program.
What types of properties are eligible for the Program?
The Noise Compatibility Program has determined that eligible structures should be prioritized to include single-family or multi-family residences, and institutional and community facilities.
Is there a “list” to sign-up for the Program?
No. Once the eligibility criterion is determined, letters are sent to the homes of those respective blocks for an initial meeting. The homeowner meeting is an “invitation only” meeting where the Program is discussed at length, and at that time, eligible homeowners can fill-out an application to participate in the Program. Applications for ineligible homes are not accepted.
How do I obtain a copy of a “2010 Future Condition Noise Exposure Map, with Program Implementation”?
Who can I call if I have questions about the Program?
|
from dataactbroker.handlers.submission_handler import get_submission_stats
from tests.unit.dataactcore.factories.job import SubmissionFactory
from tests.unit.dataactcore.factories.staging import TotalObligationsFactory
def test_obligation_stats_for_submission_nonzero(database):
submission = SubmissionFactory()
database.session.add(submission)
database.session.commit()
financials = [
TotalObligationsFactory(total_obligations=5000, total_proc_obligations=2000, total_asst_obligations=3000,
submission_id=submission.submission_id)
]
database.session.add_all(financials)
database.session.commit()
assert get_submission_stats(submission.submission_id) == {
"total_obligations": 5000,
"total_procurement_obligations": 2000,
"total_assistance_obligations": 3000
}
def test_obligation_stats_for_submission_zero(database):
submission = SubmissionFactory()
# no financials in db
database.session.add(submission)
database.session.commit()
assert get_submission_stats(submission.submission_id) == {
"total_obligations": 0,
"total_procurement_obligations": 0,
"total_assistance_obligations": 0
}
|
Moving on a budget? Below is a list of affordable Sudley Springs movers that provide quality moving services at low rates. Select the 'Get Quotes' button to fill out a quote form. You will receive up to seven free estimates from professional moving companies in Sudley Springs, VA.
Read real customer reviews and compare moving estimates from our pre-screened and fully-insured Sudley Springs moving companies. We also provide moving company profiles with photos, services offered, license numbers and contact information. Determine which Sudley Springs mover best suits your needs and save on your next move.
They definitely knew what they were doing.
They were patient. They were fast paced. They definitely knew what they were doing. Very organized, very polite. You know, no damages at all. And they were just very flexible with my demands.
It was good. They showed up and they moved my stuff.
Submit information, such as move date and size of the move, on our easy online form. We will help you locate the best moving companies near Sudley Springs, VA.
Get FREE, no obligation moving quotes from professional movers in our network. All Sudley Springs moving companies are pre-screened, licensed and insured.
|
###############################################################################
# Copyright 2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import logging
import logging.config
import optparse
import os
import sys
min_version = (3,6)
max_version = (3,9)
if sys.version_info < min_version or sys.version_info > max_version:
print(sys.stderr,"Python version 3.6 or newer is required")
sys.exit(1)
from ipf.daemon import OneProcessWithRedirect,Daemon
from ipf.engine import WorkflowEngine
from ipf.paths import *
#######################################################################################################################
logging.config.fileConfig(os.path.join(IPF_ETC_PATH,"logging.conf"))
#######################################################################################################################
class WorkflowDaemon(Daemon):
def __init__(self, workflow_path):
self.workflow_path = workflow_path
(path,workflow_filename) = os.path.split(workflow_path)
name = workflow_filename.split(".")[0]
Daemon.__init__(self,
pidfile=os.path.join(IPF_VAR_PATH,name+".pid"),
stdout=os.path.join(IPF_LOG_PATH,name+".log"),
stderr=os.path.join(IPF_LOG_PATH,name+".log"))
def run(self):
engine = WorkflowEngine()
engine.run(self.workflow_path)
#######################################################################################################################
class OneWorkflowOnly(OneProcessWithRedirect):
def __init__(self, workflow_path):
self.workflow_path = workflow_path
(path,workflow_filename) = os.path.split(workflow_path)
name = workflow_filename.split(".")[0]
OneProcessWithRedirect.__init__(self,
pidfile=os.path.join(IPF_VAR_PATH,name+".pid"),
stdout=os.path.join(IPF_LOG_PATH,name+".log"),
stderr=os.path.join(IPF_LOG_PATH,name+".log"))
def run(self):
engine = WorkflowEngine()
engine.run(self.workflow_path)
#######################################################################################################################
def main():
usage = "Usage: %prog [options] <workflow file>"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-d","--daemon",action="store_true",default=False,dest="daemon",
help="run as a daemon")
parser.add_option("-c","--cron",action="store_true",default=False,dest="cron",
help="running out of cron")
(options, args) = parser.parse_args()
if options.daemon and options.cron:
parser.error("can't run as both daemon and cron")
if len(args) != 1:
parser.error("exactly one positional argument expected - a path to a workflow file")
if options.daemon:
daemon = WorkflowDaemon(args[0])
daemon.start()
elif options.cron:
# don't let processes pile up if workflows aren't finishing
workflow = OneWorkflowOnly(args[0])
workflow.start()
else:
engine = WorkflowEngine()
engine.run(args[0])
#######################################################################################################################
if __name__ == "__main__":
main()
|
LOTUS MIXERS is your global answer to your mixing needs. Based in the USA, but supplying mixers worldwide.
Whether you are in North America, South America, Asia/Middle East, Europe, Austrailia, or Africa, contact us so we can help you solve your mixing needs.
LOTUS MIXERS has many regional representatives and is always looking for additional quality representatives worldwide.
|
# Copyright (C) 2011 Equinor ASA, Norway.
#
# The file 'ecl_grid.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module to load and query ECLIPSE GRID/EGRID files.
The ecl_grid module contains functionality to load and query an
ECLIPSE grid file; it is currently not possible to manipulate or let
alone create a grid with ecl_grid module. The functionality is
implemented in the EclGrid class. The ecl_grid module is a thin
wrapper around the ecl_grid.c implementation from the libecl library.
"""
import ctypes
import warnings
import numpy
import pandas
import sys
import os.path
import math
import itertools
from cwrap import CFILE, BaseCClass, load, open as copen
from ecl import EclPrototype
from ecl.util.util import monkey_the_camel
from ecl.util.util import IntVector
from ecl import EclDataType, EclUnitTypeEnum, EclTypeEnum
from ecl.eclfile import EclKW, FortIO
from ecl.grid import Cell
class EclGrid(BaseCClass):
"""
Class for loading and internalizing ECLIPSE GRID/EGRID files.
"""
TYPE_NAME = "ecl_grid"
_fread_alloc = EclPrototype("void* ecl_grid_load_case__(char*, bool)", bind = False)
_grdecl_create = EclPrototype("ecl_grid_obj ecl_grid_alloc_GRDECL_kw(int, int, int, ecl_kw, ecl_kw, ecl_kw, ecl_kw)", bind = False)
_alloc_rectangular = EclPrototype("ecl_grid_obj ecl_grid_alloc_rectangular(int, int, int, double, double, double, int*)", bind = False)
_exists = EclPrototype("bool ecl_grid_exists(char*)", bind = False)
_get_numbered_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_lgr_from_lgr_nr(ecl_grid, int)")
_get_named_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_lgr(ecl_grid, char*)")
_get_cell_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_cell_lgr1(ecl_grid, int)")
_num_coarse_groups = EclPrototype("int ecl_grid_get_num_coarse_groups(ecl_grid)")
_in_coarse_group1 = EclPrototype("bool ecl_grid_cell_in_coarse_group1(ecl_grid, int)")
_free = EclPrototype("void ecl_grid_free(ecl_grid)")
_get_nx = EclPrototype("int ecl_grid_get_nx(ecl_grid)")
_get_ny = EclPrototype("int ecl_grid_get_ny(ecl_grid)")
_get_nz = EclPrototype("int ecl_grid_get_nz(ecl_grid)")
_get_global_size = EclPrototype("int ecl_grid_get_global_size(ecl_grid)")
_get_active = EclPrototype("int ecl_grid_get_active_size(ecl_grid)")
_get_active_fracture = EclPrototype("int ecl_grid_get_nactive_fracture(ecl_grid)")
_get_name = EclPrototype("char* ecl_grid_get_name(ecl_grid)")
_ijk_valid = EclPrototype("bool ecl_grid_ijk_valid(ecl_grid, int, int, int)")
_get_active_index3 = EclPrototype("int ecl_grid_get_active_index3(ecl_grid, int, int, int)")
_get_global_index3 = EclPrototype("int ecl_grid_get_global_index3(ecl_grid, int, int, int)")
_get_active_index1 = EclPrototype("int ecl_grid_get_active_index1(ecl_grid, int)")
_get_active_fracture_index1 = EclPrototype("int ecl_grid_get_active_fracture_index1(ecl_grid, int)")
_get_global_index1A = EclPrototype("int ecl_grid_get_global_index1A(ecl_grid, int)")
_get_global_index1F = EclPrototype("int ecl_grid_get_global_index1F(ecl_grid, int)")
_get_ijk1 = EclPrototype("void ecl_grid_get_ijk1(ecl_grid, int, int*, int*, int*)")
_get_ijk1A = EclPrototype("void ecl_grid_get_ijk1A(ecl_grid, int, int*, int*, int*)")
_get_xyz3 = EclPrototype("void ecl_grid_get_xyz3(ecl_grid, int, int, int, double*, double*, double*)")
_get_xyz1 = EclPrototype("void ecl_grid_get_xyz1(ecl_grid, int, double*, double*, double*)")
_get_cell_corner_xyz1 = EclPrototype("void ecl_grid_get_cell_corner_xyz1(ecl_grid, int, int, double*, double*, double*)")
_get_corner_xyz = EclPrototype("void ecl_grid_get_corner_xyz(ecl_grid, int, int, int, double*, double*, double*)")
_get_xyz1A = EclPrototype("void ecl_grid_get_xyz1A(ecl_grid, int, double*, double*, double*)")
_get_ij_xy = EclPrototype("bool ecl_grid_get_ij_from_xy(ecl_grid, double, double, int, int*, int*)")
_get_ijk_xyz = EclPrototype("int ecl_grid_get_global_index_from_xyz(ecl_grid, double, double, double, int)")
_cell_contains = EclPrototype("bool ecl_grid_cell_contains_xyz1(ecl_grid, int, double, double, double)")
_cell_regular = EclPrototype("bool ecl_grid_cell_regular1(ecl_grid, int)")
_num_lgr = EclPrototype("int ecl_grid_get_num_lgr(ecl_grid)")
_has_numbered_lgr = EclPrototype("bool ecl_grid_has_lgr_nr(ecl_grid, int)")
_has_named_lgr = EclPrototype("bool ecl_grid_has_lgr(ecl_grid, char*)")
_grid_value = EclPrototype("double ecl_grid_get_property(ecl_grid, ecl_kw, int, int, int)")
_get_cell_volume = EclPrototype("double ecl_grid_get_cell_volume1(ecl_grid, int)")
_get_cell_thickness = EclPrototype("double ecl_grid_get_cell_thickness1(ecl_grid, int)")
_get_cell_dx = EclPrototype("double ecl_grid_get_cell_dx1(ecl_grid, int)")
_get_cell_dy = EclPrototype("double ecl_grid_get_cell_dy1(ecl_grid, int)")
_get_depth = EclPrototype("double ecl_grid_get_cdepth1(ecl_grid, int)")
_fwrite_grdecl = EclPrototype("void ecl_grid_grdecl_fprintf_kw(ecl_grid, ecl_kw, char*, FILE, double)")
_load_column = EclPrototype("void ecl_grid_get_column_property(ecl_grid, ecl_kw, int, int, double_vector)")
_get_top = EclPrototype("double ecl_grid_get_top2(ecl_grid, int, int)")
_get_top1A = EclPrototype("double ecl_grid_get_top1A(ecl_grid, int)")
_get_bottom = EclPrototype("double ecl_grid_get_bottom2(ecl_grid, int, int)")
_locate_depth = EclPrototype("int ecl_grid_locate_depth(ecl_grid, double, int, int)")
_invalid_cell = EclPrototype("bool ecl_grid_cell_invalid1(ecl_grid, int)")
_valid_cell = EclPrototype("bool ecl_grid_cell_valid1(ecl_grid, int)")
_get_distance = EclPrototype("void ecl_grid_get_distance(ecl_grid, int, int, double*, double*, double*)")
_fprintf_grdecl2 = EclPrototype("void ecl_grid_fprintf_grdecl2(ecl_grid, FILE, ecl_unit_enum) ")
_fwrite_GRID2 = EclPrototype("void ecl_grid_fwrite_GRID2(ecl_grid, char*, ecl_unit_enum)")
_fwrite_EGRID2 = EclPrototype("void ecl_grid_fwrite_EGRID2(ecl_grid, char*, ecl_unit_enum)")
_equal = EclPrototype("bool ecl_grid_compare(ecl_grid, ecl_grid, bool, bool)")
_dual_grid = EclPrototype("bool ecl_grid_dual_grid(ecl_grid)")
_init_actnum = EclPrototype("void ecl_grid_init_actnum_data(ecl_grid, int*)")
_compressed_kw_copy = EclPrototype("void ecl_grid_compressed_kw_copy(ecl_grid, ecl_kw, ecl_kw)")
_global_kw_copy = EclPrototype("void ecl_grid_global_kw_copy(ecl_grid, ecl_kw, ecl_kw)")
_create_volume_keyword = EclPrototype("ecl_kw_obj ecl_grid_alloc_volume_kw(ecl_grid, bool)")
_use_mapaxes = EclPrototype("bool ecl_grid_use_mapaxes(ecl_grid)")
_export_coord = EclPrototype("ecl_kw_obj ecl_grid_alloc_coord_kw(ecl_grid)")
_export_zcorn = EclPrototype("ecl_kw_obj ecl_grid_alloc_zcorn_kw(ecl_grid)")
_export_actnum = EclPrototype("ecl_kw_obj ecl_grid_alloc_actnum_kw(ecl_grid)")
_export_mapaxes = EclPrototype("ecl_kw_obj ecl_grid_alloc_mapaxes_kw(ecl_grid)")
_get_unit_system = EclPrototype("ecl_unit_enum ecl_grid_get_unit_system(ecl_grid)")
_export_index_frame = EclPrototype("void ecl_grid_export_index(ecl_grid, int*, int*, bool)")
_export_data_as_int = EclPrototype("void ecl_grid_export_data_as_int(int, int*, ecl_kw, int*)", bind = False)
_export_data_as_double = EclPrototype("void ecl_grid_export_data_as_double(int, int*, ecl_kw, double*)", bind = False)
_export_volume = EclPrototype("void ecl_grid_export_volume(ecl_grid, int, int*, double*)")
_export_position = EclPrototype("void ecl_grid_export_position(ecl_grid, int, int*, double*)")
_export_corners = EclPrototype("void export_corners(ecl_grid, int, int*, double*)")
@classmethod
def load_from_grdecl(cls, filename):
"""Will create a new EclGrid instance from grdecl file.
This function will scan the input file @filename and look for
the keywords required to build a grid. The following keywords
are required:
SPECGRID ZCORN COORD
In addition the function will look for and use the ACTNUM and
MAPAXES keywords if they are found; if ACTNUM is not found all
cells are assumed to be active.
Slightly more exotic grid concepts like dual porosity, NNC
mapping, LGR and coarsened cells will be completely ignored;
if you need such concepts you must have an EGRID file and use
the default EclGrid() constructor - that is also considerably
faster.
"""
if os.path.isfile(filename):
with copen(filename) as f:
specgrid = EclKW.read_grdecl(f, "SPECGRID", ecl_type=EclDataType.ECL_INT, strict=False)
zcorn = EclKW.read_grdecl(f, "ZCORN")
coord = EclKW.read_grdecl(f, "COORD")
try:
actnum = EclKW.read_grdecl(f, "ACTNUM", ecl_type=EclDataType.ECL_INT)
except ValueError:
actnum = None
try:
mapaxes = EclKW.read_grdecl(f, "MAPAXES")
except ValueError:
mapaxes = None
return EclGrid.create(specgrid, zcorn, coord, actnum, mapaxes)
else:
raise IOError("No such file:%s" % filename)
@classmethod
def load_from_file(cls, filename):
"""
Will inspect the @filename argument and create a new EclGrid instance.
"""
if FortIO.isFortranFile(filename):
return EclGrid(filename)
else:
return EclGrid.loadFromGrdecl(filename)
@classmethod
def create(cls, specgrid, zcorn, coord, actnum, mapaxes=None):
"""
Create a new grid instance from existing keywords.
This is a class method which can be used to create an EclGrid
instance based on the EclKW instances @specgrid, @zcorn,
@coord and @actnum. An ECLIPSE EGRID file contains the
SPECGRID, ZCORN, COORD and ACTNUM keywords, so a somewhat
involved way to create a EclGrid instance could be:
file = ecl.EclFile("ECLIPSE.EGRID")
specgrid_kw = file.iget_named_kw("SPECGRID", 0)
zcorn_kw = file.iget_named_kw("ZCORN", 0)
coord_kw = file.iget_named_kw("COORD", 0)
actnum_kw = file.iget_named_kw("ACTNUM", 0)
grid = EclGrid.create(specgrid_kw, zcorn_kw, coord_kw, actnum_kw)
If you are so inclined ...
"""
return cls._grdecl_create(specgrid[0], specgrid[1], specgrid[2], zcorn, coord, actnum, mapaxes)
@classmethod
def create_rectangular(cls, dims, dV, actnum=None):
"""
Will create a new rectangular grid. @dims = (nx,ny,nz) @dVg = (dx,dy,dz)
With the default value @actnum == None all cells will be active,
"""
warnings.warn("EclGrid.createRectangular is deprecated. " +
"Please used the similar method in EclGridGenerator!",
DeprecationWarning)
if actnum is None:
ecl_grid = cls._alloc_rectangular(dims[0], dims[1], dims[2], dV[0], dV[1], dV[2], None)
else:
if not isinstance(actnum, IntVector):
tmp = IntVector(initial_size=len(actnum))
for (index, value) in enumerate(actnum):
tmp[index] = value
actnum = tmp
if not len(actnum) == dims[0] * dims[1] * dims[2]:
raise ValueError("ACTNUM size mismatch: len(ACTNUM):%d Expected:%d" % (len(actnum), dims[0] * dims[1] * dims[2]))
ecl_grid = cls._alloc_rectangular(dims[0], dims[1], dims[2], dV[0], dV[1], dV[2], actnum.getDataPtr())
# If we have not succeeded in creatin the grid we *assume* the
# error is due to a failed malloc.
if ecl_grid is None:
raise MemoryError("Failed to allocated regualar grid")
return ecl_grid
def __init__(self, filename, apply_mapaxes=True):
"""
Will create a grid structure from an EGRID or GRID file.
"""
c_ptr = self._fread_alloc(filename, apply_mapaxes)
if c_ptr:
super(EclGrid, self).__init__(c_ptr)
else:
raise IOError("Loading grid from:%s failed" % filename)
def free(self):
self._free()
def _nicename(self):
"""name is often full path to grid, if so, output basename, else name"""
name = self.getName()
if os.path.isfile(name):
name = os.path.basename(name)
return name
def __repr__(self):
"""Returns, e.g.:
EclGrid("NORNE_ATW2013.EGRID", 46x112x22, global_size=113344, active_size=44431) at 0x28c4a70
"""
name = self._nicename()
if name:
name = '"%s", ' % name
g_size = self.getGlobalSize()
a_size = self.getNumActive()
xyz_s = '%dx%dx%d' % (self.getNX(),self.getNY(),self.getNZ())
return self._create_repr('%s%s, global_size=%d, active_size=%d' % (name, xyz_s, g_size, a_size))
def __len__(self):
"""
len(grid) wil return the total number of cells.
"""
return self._get_global_size()
def equal(self, other, include_lgr=True, include_nnc=False, verbose=False):
"""
Compare the current grid with the other grid.
"""
if not isinstance(other, EclGrid):
raise TypeError("The other argument must be an EclGrid instance")
return self._equal(other, include_lgr, include_nnc, verbose)
def dual_grid(self):
"""Is this grid dual porosity model?"""
return self._dual_grid()
def get_dims(self):
"""A tuple of four elements: (nx, ny, nz, nactive)."""
return (self.getNX(),
self.getNY(),
self.getNZ(),
self.getNumActive())
@property
def nx(self):
return self._get_nx()
def get_nx(self):
""" The number of elements in the x direction"""
return self._get_nx()
@property
def ny(self):
return self._get_ny()
def get_ny(self):
""" The number of elements in the y direction"""
return self._get_ny()
@property
def nz(self):
return self._get_nz()
def get_nz(self):
""" The number of elements in the z direction"""
return self._get_nz()
def get_global_size(self):
"""Returns the total number of cells in this grid"""
return self._get_global_size()
def get_num_active(self):
"""The number of active cells in the grid."""
return self._get_active()
def get_num_active_fracture(self):
"""The number of active cells in the grid."""
return self._get_active_fracture()
def get_bounding_box_2d(self, layer=0, lower_left=None, upper_right=None):
if 0 <= layer <= self.getNZ():
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
if lower_left is None:
i1 = 0
j1 = 0
else:
i1,j1 = lower_left
if not 0 < i1 < self.getNX():
raise ValueError("lower_left i coordinate invalid")
if not 0 < j1 < self.getNY():
raise ValueError("lower_left j coordinate invalid")
if upper_right is None:
i2 = self.getNX()
j2 = self.getNY()
else:
i2,j2 = upper_right
if not 1 < i2 <= self.getNX():
raise ValueError("upper_right i coordinate invalid")
if not 1 < j2 <= self.getNY():
raise ValueError("upper_right j coordinate invalid")
if not i1 < i2:
raise ValueError("Must have lower_left < upper_right")
if not j1 < j2:
raise ValueError("Must have lower_left < upper_right")
self._get_corner_xyz(i1, j1, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p0 = (x.value, y.value)
self._get_corner_xyz(i2, j1, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p1 = (x.value, y.value )
self._get_corner_xyz( i2, j2, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p2 = (x.value, y.value )
self._get_corner_xyz(i1, j2, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p3 = (x.value, y.value )
return (p0,p1,p2,p3)
else:
raise ValueError("Invalid layer value:%d Valid range: [0,%d]" % (layer, self.getNZ()))
def get_name(self):
"""
Name of the current grid, returns a string.
For the main grid this is the filename given to the
constructor when loading the grid; for an LGR this is the name
of the LGR. If the grid instance has been created with the
create() classmethod this can be None.
"""
n = self._get_name()
return str(n) if n else ''
def cell(self, global_index=None, active_index=None, i=None, j=None, k=None):
if global_index is not None:
return Cell(self, global_index)
if active_index is not None:
return Cell(self, self.global_index(active_index=active_index))
if i is not None:
return Cell(self, self.global_index(ijk=(i,j,k)))
def __getitem__(self, global_index):
if isinstance(global_index, tuple):
i,j,k = global_index
return self.cell(i=i, j=j, k=k)
return self.cell(global_index=global_index)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def cells(self, active=False):
"""Iterator over all the (active) cells"""
if not active:
for c in self:
yield c
else:
for i in range(self.get_num_active()):
yield self.cell(active_index=i)
def global_index(self, active_index=None, ijk=None):
"""
Will convert either active_index or (i,j,k) to global index.
"""
return self.__global_index(active_index=active_index, ijk=ijk)
def __global_index(self, active_index=None, global_index=None, ijk=None):
"""
Will convert @active_index or @ijk to global_index.
This method will convert @active_index or @ijk to a global
index. Exactly one of the arguments @active_index,
@global_index or @ijk must be supplied.
The method is used extensively internally in the EclGrid
class; most methods which take coordinate input pass through
this method to normalize the coordinate representation.
"""
set_count = 0
if not active_index is None:
set_count += 1
if not global_index is None:
set_count += 1
if ijk:
set_count += 1
if not set_count == 1:
raise ValueError("Exactly one of the kewyord arguments active_index, global_index or ijk must be set")
if not active_index is None:
global_index = self._get_global_index1A( active_index)
elif ijk:
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
i,j,k = ijk
if not 0 <= i < nx:
raise IndexError("Invalid value i:%d Range: [%d,%d)" % (i, 0, nx))
if not 0 <= j < ny:
raise IndexError("Invalid value j:%d Range: [%d,%d)" % (j, 0, ny))
if not 0 <= k < nz:
raise IndexError("Invalid value k:%d Range: [%d,%d)" % (k, 0, nz))
global_index = self._get_global_index3(i,j,k)
else:
if not 0 <= global_index < self.getGlobalSize():
raise IndexError("Invalid value global_index:%d Range: [%d,%d)" % (global_index, 0, self.getGlobalSize()))
return global_index
def get_active_index(self, ijk=None, global_index=None):
"""
Lookup active index based on ijk or global index.
Will determine the active_index of a cell, based on either
@ijk = (i,j,k) or @global_index. If the cell specified by the
input arguments is not active the function will return -1.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk)
return self._get_active_index1(gi)
def get_active_fracture_index(self, ijk=None, global_index=None):
"""
For dual porosity - get the active fracture index.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk)
return self._get_active_fracture_index1(gi)
def get_global_index1F(self, active_fracture_index):
"""
Will return the global index corresponding to active fracture index.
"""
return self._get_global_index1F(active_fracture_index)
def cell_invalid(self, ijk=None, global_index=None, active_index=None):
"""
Tries to check if a cell is invalid.
Cells which are used to represent numerical aquifers are
typically located in UTM position (0,0); these cells have
completely whacked up shape and size, and should **NOT** be
used in calculations involving real world coordinates. To
protect against this a heuristic is used identify such cells
and mark them as invalid. There might be other sources than
numerical aquifers to this problem.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk, active_index=active_index)
return self._invalid_cell(gi)
def valid_cell_geometry(self, ijk=None, global_index=None, active_index=None):
"""Checks if the cell has valid geometry.
There are at least two reasons why a cell might have invalid
gemetry:
1. In the case of GRID files it is not necessary to supply
the geometry for all the cells; in that case this
function will return false for cells which do not have
valid coordinates.
2. Cells which are used to represent numerical aquifers are
typically located in UTM position (0,0); these cells have
completely whacked up shape and size; these cells are
identified by a heuristic - which might fail
If the validCellGeometry() returns false for a particular
cell functions which calculate cell volumes, real world
coordinates and so on - should not be used.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk, active_index=active_index)
return self._valid_cell(gi)
def active(self, ijk=None, global_index=None):
"""
Is the cell active?
See documentation og get_xyz() for explanation of parameters
@ijk and @global_index.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk)
active_index = self._get_active_index1(gi)
if active_index >= 0:
return True
else:
return False
def get_global_index(self, ijk=None, active_index=None):
"""
Lookup global index based on ijk or active index.
"""
gi = self.__global_index(active_index=active_index, ijk=ijk)
return gi
def get_ijk(self, active_index=None, global_index=None):
"""
Lookup (i,j,k) for a cell, based on either active index or global index.
The return value is a tuple with three elements (i,j,k).
"""
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
gi = self.__global_index(active_index=active_index, global_index=global_index)
self._get_ijk1(gi, ctypes.byref(i), ctypes.byref(j), ctypes.byref(k))
return (i.value, j.value, k.value)
def get_xyz(self, active_index=None, global_index=None, ijk=None):
"""
Find true position of cell center.
Will return world position of the center of a cell in the
grid. The return value is a tuple of three elements:
(utm_x, utm_y, depth).
The cells of a grid can be specified in three different ways:
(i,j,k) : As a tuple of i,j,k values.
global_index : A number in the range [0,nx*ny*nz). The
global index is related to (i,j,k) as:
global_index = i + j*nx + k*nx*ny
active_index : A number in the range [0,nactive).
For many of the EclGrid methods a cell can be specified using
any of these three methods. Observe that one and only method is
allowed:
OK:
pos1 = grid.get_xyz(active_index=100)
pos2 = grid.get_xyz(ijk=(10,20,7))
Crash and burn:
pos3 = grid.get_xyz(ijk=(10,20,7), global_index=10)
pos4 = grid.get_xyz()
All the indices in the EclGrid() class are zero offset, this
is in contrast to ECLIPSE which has an offset 1 interface.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_xyz1(gi, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value, z.value)
def get_node_pos(self, i, j, k):
"""Will return the (x,y,z) for the node given by (i,j,k).
Observe that this method does not consider cells, but the
nodes in the grid. This means that the valid input range for
i,j and k are are upper end inclusive. To get the four
bounding points of the lower layer of the grid:
p0 = grid.getNodePos(0, 0, 0)
p1 = grid.getNodePos(grid.getNX(), 0, 0)
p2 = grid.getNodePos(0, grid.getNY(), 0)
p3 = grid.getNodePos(grid.getNX(), grid.getNY(), 0)
"""
if not 0 <= i <= self.getNX():
raise IndexError("Invalid I value:%d - valid range: [0,%d]" % (i, self.getNX()))
if not 0 <= j <= self.getNY():
raise IndexError("Invalid J value:%d - valid range: [0,%d]" % (j, self.getNY()))
if not 0 <= k <= self.getNZ():
raise IndexError("Invalid K value:%d - valid range: [0,%d]" % (k, self.getNZ()))
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_corner_xyz(i,j,k, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value, z.value)
def get_cell_corner(self, corner_nr, active_index=None, global_index=None, ijk=None):
"""
Will look up xyz of corner nr @corner_nr
lower layer: upper layer
2---3 6---7
| | | |
0---1 4---5
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_cell_corner_xyz1(gi, corner_nr, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value, z.value)
def get_node_xyz(self, i,j,k):
"""
This function returns the position of Vertex (i,j,k).
The coordinates are in the inclusive interval [0,nx] x [0,ny] x [0,nz].
"""
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
corner = 0
if i == nx:
i -= 1
corner += 1
if j == ny:
j -= 1
corner += 2
if k == nz:
k -= 1
corner += 4
if self._ijk_valid(i, j, k):
return self.getCellCorner(corner, global_index=i + j*nx + k*nx*ny)
else:
raise IndexError("Invalid coordinates: (%d,%d,%d) " % (i,j,k))
def get_layer_xyz(self, xy_corner, layer):
nx = self.getNX()
(j, i) = divmod(xy_corner, nx + 1)
k = layer
return self.getNodeXYZ(i,j,k)
def distance(self, global_index1, global_index2):
dx = ctypes.c_double()
dy = ctypes.c_double()
dz = ctypes.c_double()
self._get_distance(global_index1, global_index2, ctypes.byref(dx), ctypes.byref(dy), ctypes.byref(dz))
return (dx.value, dy.value, dz.value)
def depth(self, active_index=None, global_index=None, ijk=None):
"""
Depth of the center of a cell.
Returns the depth of the center of the cell given by
@active_index, @global_index or @ijk. See method get_xyz() for
documentation of @active_index, @global_index and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._get_depth( gi)
def top(self, i, j):
"""
Top of the reservoir; in the column (@i, @j).
Returns average depth of the four top corners.
"""
return self._get_top(i, j)
def top_active(self, i, j):
"""
Top of the active part of the reservoir; in the column (@i, @j).
Raises ValueError if (i,j) column is inactive.
"""
for k in range(self.getNZ()):
a_idx = self.get_active_index(ijk=(i,j,k))
if a_idx >= 0:
return self._get_top1A(a_idx)
raise ValueError('No active cell in column (%d,%d)' % (i,j))
def bottom(self, i, j):
"""
Bottom of the reservoir; in the column (@i, @j).
"""
return self._get_bottom( i, j)
def locate_depth(self, depth, i, j):
"""
Will locate the k value of cell containing specified depth.
Will scan through the grid column specified by the input
arguments @i and @j and search for a cell containing the depth
given by input argument @depth. The return value is the k
value of cell containing @depth.
If @depth is above the top of the reservoir the function will
return -1, and if @depth is below the bottom of the reservoir
the function will return -nz.
"""
return self._locate_depth( depth, i, j)
def find_cell(self, x, y, z, start_ijk=None):
"""
Lookup cell containg true position (x,y,z).
Will locate the cell in the grid which contains the true
position (@x,@y,@z), the return value is as a triplet
(i,j,k). The underlying C implementation is not veeery
efficient, and can potentially take quite long time. If you
provide a good intial guess with the parameter @start_ijk (a
tuple (i,j,k)) things can speed up quite substantially.
If the location (@x,@y,@z) can not be found in the grid, the
method will return None.
"""
start_index = 0
if start_ijk:
start_index = self.__global_index(ijk=start_ijk)
global_index = self._get_ijk_xyz(x, y, z, start_index)
if global_index >= 0:
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
self._get_ijk1(global_index,
ctypes.byref(i), ctypes.byref(j), ctypes.byref(k))
return (i.value, j.value, k.value)
return None
def cell_contains(self, x, y, z, active_index=None, global_index=None, ijk=None):
"""
Will check if the cell contains point given by world
coordinates (x,y,z).
See method get_xyz() for documentation of @active_index,
@global_index and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._cell_contains(gi, x,y,z)
def find_cell_xy(self, x, y, k):
"""Will find the i,j of cell with utm coordinates x,y.
The @k input is the layer you are interested in, the allowed
values for k are [0,nz]. If the coordinates (x,y) are found to
be outside the grid a ValueError exception is raised.
"""
if 0 <= k <= self.getNZ():
i = ctypes.c_int()
j = ctypes.c_int()
ok = self._get_ij_xy(x,y,k, ctypes.byref(i), ctypes.byref(j))
if ok:
return (i.value, j.value)
else:
raise ValueError("Could not find the point:(%g,%g) in layer:%d" % (x,y,k))
else:
raise IndexError("Invalid layer value:%d" % k)
def find_cell_corner_xy(self, x, y, k):
"""Will find the corner nr of corner closest to utm coordinates x,y.
The @k input is the layer you are interested in, the allowed
values for k are [0,nz]. If the coordinates (x,y) are found to
be outside the grid a ValueError exception is raised.
"""
i,j = self.findCellXY(x,y,k)
if k == self.getNZ():
k -= 1
corner_shift = 4
else:
corner_shift = 0
nx = self.getNX()
x0,y0,z0 = self.getCellCorner(corner_shift, ijk=(i,j,k))
d0 = math.sqrt((x0 - x)*(x0 - x) + (y0 - y)*(y0 - y))
c0 = i + j*(nx + 1)
x1,y1,z1 = self.getCellCorner(1 + corner_shift, ijk=(i,j,k))
d1 = math.sqrt((x1 - x)*(x1 - x) + (y1 - y)*(y1 - y))
c1 = i + 1 + j*(nx + 1)
x2,y2,z2 = self.getCellCorner(2 + corner_shift, ijk=(i,j,k))
d2 = math.sqrt((x2 - x)*(x2 - x) + (y2 - y)*(y2 - y))
c2 = i + (j + 1)*(nx + 1)
x3,y3,z3 = self.getCellCorner(3 + corner_shift, ijk=(i,j,k))
d3 = math.sqrt((x3 - x)*(x3 - x) + (y3 - y)*(y3 - y))
c3 = i + 1 + (j + 1)*(nx + 1)
l = [(d0, c0), (d1,c1), (d2, c2), (d3,c3)]
l.sort(key=lambda k: k[0])
return l[0][1]
def cell_regular(self, active_index=None, global_index=None, ijk=None):
"""
The ECLIPSE grid models often contain various degenerate cells,
which are twisted, have overlapping corners or what not. This
function gives a moderate sanity check on a cell, essentially
what the function does is to check if the cell contains it's
own centerpoint - which is actually not as trivial as it
sounds.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._cell_regular( gi)
def cell_volume(self, active_index=None, global_index=None, ijk=None):
"""
Calculate the volume of a cell.
Will calculate the total volume of the cell. See method
get_xyz() for documentation of @active_index, @global_index
and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._get_cell_volume(gi)
def cell_dz(self, active_index=None, global_index=None, ijk=None):
"""
The thickness of a cell.
Will calculate the (average) thickness of the cell. See method
get_xyz() for documentation of @active_index, @global_index
and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._get_cell_thickness( gi)
def get_cell_dims(self, active_index=None, global_index=None, ijk=None):
"""Will return a tuple (dx,dy,dz) for cell dimension.
The dx and dy values are best effor estimates of the cell size
along the i and j directions respectively. The three values
are guaranteed to satisfy:
dx * dy * dz = dV
See method get_xyz() for documentation of @active_index,
@global_index and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
dx = self._get_cell_dx(gi)
dy = self._get_cell_dy(gi)
dz = self._get_cell_thickness( gi)
return (dx,dy,dz)
def get_num_lgr(self):
"""
How many LGRs are attached to this main grid?
How many LGRs are attached to this main grid; the grid
instance doing the query must itself be a main grid.
"""
return self._num_lgr()
def has_lgr(self, lgr_name):
"""
Query if the grid has an LGR with name @lgr_name.
"""
if self._has_named_lgr(lgr_name):
return True
else:
return False
def get_lgr(self, lgr_key):
"""Get EclGrid instance with LGR content.
Return an EclGrid instance based on the LGR @lgr, the input
argument can either be the name of an LGR or the grid number
of the LGR. The LGR grid instance is mostly like an ordinary
grid instance; the only difference is that it can not be used
for further queries about LGRs.
If the grid does not contain an LGR with this name/nr
exception KeyError will be raised.
"""
lgr = None
if isinstance(lgr_key, int):
if self._has_numbered_lgr(lgr_key):
lgr = self._get_numbered_lgr(lgr_key)
else:
if self._has_named_lgr(lgr_key):
lgr = self._get_named_lgr(lgr_key)
if lgr is None:
raise KeyError("No such LGR: %s" % lgr_key)
lgr.setParent(self)
return lgr
def get_cell_lgr(self, active_index=None, global_index=None, ijk=None):
"""
Get EclGrid instance located in cell.
Will query the current grid instance if the cell given by
@active_index, @global_index or @ijk has been refined with an
LGR. Will return None if the cell in question has not been
refined, the return value can be used for further queries.
See get_xyz() for documentation of the input parameters.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
lgr = self._get_cell_lgr(gi)
if lgr:
lgr.setParent(self)
return lgr
else:
raise IndexError("No LGR defined for this cell")
def grid_value(self, kw, i, j, k):
"""
Will evalute @kw in location (@i,@j,@k).
The ECLIPSE properties and solution vectors are stored in
restart and init files as 1D vectors of length nx*nx*nz or
nactive. The grid_value() method is a minor convenience
function to convert the (@i,@j,@k) input values to an
appropriate 1D index.
Depending on the length of kw the input arguments are
converted either to an active index or to a global index. If
the length of kw does not fit with either the global size of
the grid or the active size of the grid things will fail hard.
"""
return self._grid_value(kw, i, j, k)
def load_column(self, kw, i, j, column):
"""
Load the values of @kw from the column specified by (@i,@j).
The method will scan through all k values of the input field
@kw for fixed values of i and j. The size of @kw must be
either nactive or nx*ny*nz.
The input argument @column should be a DoubleVector instance,
observe that if size of @kw == nactive k values corresponding
to inactive cells will not be modified in the @column
instance; in that case it is important that @column is
initialized with a suitable default value.
"""
self._load_column( kw, i, j, column)
def create_kw(self, array, kw_name, pack):
"""
Creates an EclKW instance based on existing 3D numpy object.
The method create3D() does the inverse operation; creating a
3D numpy object from an EclKW instance. If the argument @pack
is true the resulting keyword will have length 'nactive',
otherwise the element will have length nx*ny*nz.
"""
if array.ndim == 3:
dims = array.shape
if dims[0] == self.getNX() and dims[1] == self.getNY() and dims[2] == self.getNZ():
dtype = array.dtype
if dtype == numpy.int32:
type = EclDataType.ECL_INT
elif dtype == numpy.float32:
type = EclDataType.ECL_FLOAT
elif dtype == numpy.float64:
type = EclDataType.ECL_DOUBLE
else:
sys.exit("Do not know how to create ecl_kw from type:%s" % dtype)
if pack:
size = self.getNumActive()
else:
size = self.getGlobalSize()
if len(kw_name) > 8:
# Silently truncate to length 8 - ECLIPSE has it's challenges.
kw_name = kw_name[0:8]
kw = EclKW(kw_name, size, type)
active_index = 0
global_index = 0
for k in range(self.getNZ()):
for j in range(self.getNY()):
for i in range(self.getNX()):
if pack:
if self.active(global_index=global_index):
kw[active_index] = array[i,j,k]
active_index += 1
else:
if dtype == numpy.int32:
kw[global_index] = int(array[i,j,k])
else:
kw[global_index] = array[i,j,k]
global_index += 1
return kw
raise ValueError("Wrong size / dimension on array")
def coarse_groups(self):
"""
Will return the number of coarse groups in this grid.
"""
return self._num_coarse_groups()
def in_coarse_group(self, global_index=None, ijk=None, active_index=None):
"""
Will return True or False if the cell is part of coarse group.
"""
global_index = self.__global_index(active_index=active_index, ijk=ijk, global_index=global_index)
return self._in_coarse_group1(global_index)
def create_3d(self, ecl_kw, default = 0):
"""
Creates a 3D numpy array object with the data from @ecl_kw.
Observe that 3D numpy object is a copy of the data in the
EclKW instance, i.e. modification to the numpy object will not
be reflected in the ECLIPSE keyword.
The methods createKW() does the inverse operation; creating an
EclKW instance from a 3D numpy object.
Alternative: Creating the numpy array object is not very
efficient; if you only need a limited number of elements from
the ecl_kw instance it might be wiser to use the grid_value()
method:
value = grid.grid_value(ecl_kw, i, j, k)
"""
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
array = numpy.ones([ self.getGlobalSize() ], dtype=ecl_kw.dtype) * default
kwa = ecl_kw.array
if len(ecl_kw) == self.getGlobalSize():
for i in range(kwa.size):
array[i] = kwa[i]
else:
for global_index in range(self.getGlobalSize()):
active_index = self._get_active_index1(global_index)
array[global_index] = kwa[active_index]
array = array.reshape([self.getNX(), self.getNY(), self.getNZ()], order='F')
return array
else:
err_msg_fmt = 'Keyword "%s" has invalid size %d; must be either nactive=%d or nx*ny*nz=%d'
err_msg = err_msg_fmt % (ecl_kw, len(ecl_kw), self.getNumActive(),
self.getGlobalSize())
raise ValueError(err_msg)
def save_grdecl(self, pyfile, output_unit=EclUnitTypeEnum.ECL_METRIC_UNITS):
"""
Will write the the grid content as grdecl formatted keywords.
Will only write the main grid.
"""
cfile = CFILE(pyfile)
self._fprintf_grdecl2(cfile, output_unit)
def save_EGRID(self, filename, output_unit=None):
if output_unit is None:
output_unit = self.unit_system
self._fwrite_EGRID2(filename, output_unit)
def save_GRID(self, filename, output_unit=EclUnitTypeEnum.ECL_METRIC_UNITS):
"""
Will save the current grid as a GRID file.
"""
self._fwrite_GRID2( filename, output_unit)
def write_grdecl(self, ecl_kw, pyfile, special_header=None, default_value=0):
"""
Writes an EclKW instance as an ECLIPSE grdecl formatted file.
The input argument @ecl_kw must be an EclKW instance of size
nactive or nx*ny*nz. If the size is nactive the inactive cells
will be filled with @default_value; hence the function will
always write nx*ny*nz elements.
The data in the @ecl_kw argument can be of type integer,
float, double or bool. In the case of bool the default value
must be specified as 1 (True) or 0 (False).
The input argument @pyfile should be a valid python filehandle
opened for writing; i.e.
pyfile = open("PORO.GRDECL", "w")
grid.write_grdecl(poro_kw , pyfile, default_value=0.0)
grid.write_grdecl(permx_kw, pyfile, default_value=0.0)
pyfile.close()
"""
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
cfile = CFILE(pyfile)
self._fwrite_grdecl(ecl_kw, special_header, cfile, default_value)
else:
raise ValueError("Keyword: %s has invalid size(%d), must be either nactive:%d or nx*ny*nz:%d" % (ecl_kw.getName(), len(ecl_kw), self.getNumActive(), self.getGlobalSize()))
def exportACTNUM(self):
actnum = IntVector(initial_size=self.getGlobalSize())
self._init_actnum(actnum.getDataPtr())
return actnum
def compressed_kw_copy(self, kw):
if len(kw) == self.getNumActive():
return kw.copy()
elif len(kw) == self.getGlobalSize():
kw_copy = EclKW(kw.getName(), self.getNumActive(), kw.data_type)
self._compressed_kw_copy(kw_copy, kw)
return kw_copy
else:
raise ValueError("The input keyword must have nx*n*nz or nactive elements. Size:%d invalid" % len(kw))
def global_kw_copy(self, kw, default_value):
if len(kw) == self.getGlobalSize():
return kw.copy()
elif len(kw) == self.getNumActive():
kw_copy = EclKW(kw.getName(), self.getGlobalSize(), kw.data_type)
kw_copy.assign(default_value)
self._global_kw_copy(kw_copy, kw)
return kw_copy
else:
raise ValueError("The input keyword must have nx*n*nz or nactive elements. Size:%d invalid" % len(kw))
def export_ACTNUM_kw(self):
actnum = EclKW("ACTNUM", self.getGlobalSize(), EclDataType.ECL_INT)
self._init_actnum(actnum.getDataPtr())
return actnum
def create_volume_keyword(self, active_size=True):
"""Will create a EclKW initialized with cell volumes.
The purpose of this method is to create a EclKW instance which
is initialized with all the cell volumes, this can then be
used to perform volume summation; i.e. to calculate the total
oil volume:
soil = 1 - sgas - swat
cell_volume = grid.createVolumeKeyword()
tmp = cell_volume * soil
oip = tmp.sum()
The oil in place calculation shown above could easily be
implemented by iterating over the soil kw, however using the
volume keyword has two advantages:
1. The calculation of cell volumes is quite time consuming,
by storing the results in a kw they can be reused.
2. By using the compact form 'oip = cell_volume * soil' the
inner loop iteration will go in C - which is faster.
By default the kw will only have values for the active cells,
but by setting the optional variable @active_size to False you
will get volume values for all cells in the grid.
"""
return self._create_volume_keyword(active_size)
def export_index(self, active_only = False):
"""
Exports a pandas dataframe containing index data of grid cells.
The global_index of the cells is used as index in the pandas frame.
columns 0, 1, 2 are i, j, k, respectively
column 3 contains the active_index
if active_only == True, only active cells are listed,
otherwise all cells are listed.
This index frame should typically be passed to the epxport_data(),
export_volume() and export_corners() functions.
"""
if active_only:
size = self.get_num_active()
else:
size = self.get_global_size()
indx = numpy.zeros(size, dtype=numpy.int32)
data = numpy.zeros([size, 4], dtype=numpy.int32)
self._export_index_frame( indx.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), active_only )
df = pandas.DataFrame(data=data, index=indx, columns=['i', 'j', 'k', 'active'])
return df
def export_data(self, index_frame, kw, default = 0):
"""
Exports keywoard data to a numpy vector.
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
kw must have size of either global_size or num_active.
The length of the numpy vector is the number of rows in index_frame.
If kw is of length num_active, values in the output vector
corresponding to inactive cells are set to default.
"""
if not isinstance(index_frame, pandas.DataFrame):
raise TypeError("index_frame must be pandas.DataFrame")
if len(kw) == self.get_global_size():
index = numpy.array( index_frame.index, dtype=numpy.int32 )
elif len(kw) == self.get_num_active():
index = numpy.array( index_frame["active"], dtype=numpy.int32 )
else:
raise ValueError("The keyword must have a 3D compatible length")
if kw.type is EclTypeEnum.ECL_INT_TYPE:
data = numpy.full( len(index), default, dtype=numpy.int32 )
self._export_data_as_int( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
kw,
data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)) )
return data
elif kw.type is EclTypeEnum.ECL_FLOAT_TYPE or kw.type is EclTypeEnum.ECL_DOUBLE_TYPE:
data = numpy.full( len(index), default, dtype=numpy.float64 )
self._export_data_as_double( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
kw,
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
else:
raise TypeError("Keyword must be either int, float or double.")
def export_volume(self, index_frame):
"""
Exports cell volume data to a numpy vector.
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
"""
index = numpy.array( index_frame.index, dtype=numpy.int32 )
data = numpy.zeros( len(index ), dtype=numpy.float64 )
self._export_volume( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
def export_position(self, index_frame):
"""Exports cell position coordinates to a numpy vector (matrix), with columns
0, 1, 2 denoting coordinates x, y, and z, respectively.
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
"""
index = numpy.array( index_frame.index, dtype=numpy.int32 )
data = numpy.zeros( [len(index), 3], dtype=numpy.float64 )
self._export_position( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
def export_corners(self, index_frame):
"""Exports cell corner position coordinates to a numpy vector (matrix).
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
Example of a row of the output matrix:
0 1 2 .... 21 22 23
x1 y1 z1 .... x8 y8 z8
In total there are eight 8 corners. They are described as follows:
The corners in a cell are numbered 0 - 7, where corners 0-3 constitute
one layer and the corners 4-7 consitute the other layer. Observe
that the numbering does not follow a consistent rotation around the face:
j
6---7 /|\
| | |
4---5 |
|
o----------> i
2---3
| |
0---1
Many grids are left-handed, i.e. the direction of increasing z will
point down towards the center of the earth. Hence in the figure above
the layer 4-7 will be deeper down in the reservoir than layer 0-3, and
also have higher z-value.
Warning: The main author of this code suspects that the coordinate
system can be right-handed as well, giving a z axis which will
increase 'towards the sky'; the safest way is probably to check this
explicitly if it matters for the case at hand.
"""
index = numpy.array( index_frame.index, dtype=numpy.int32 )
data = numpy.zeros( [len(index), 24], dtype=numpy.float64 )
self._export_corners( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
def export_coord(self):
return self._export_coord()
def export_zcorn(self):
return self._export_zcorn()
def export_actnum(self):
return self._export_actnum()
def export_mapaxes(self):
if not self._use_mapaxes():
return None
return self._export_mapaxes()
@property
def unit_system(self):
return self._get_unit_system()
monkey_the_camel(EclGrid, 'loadFromGrdecl', EclGrid.load_from_grdecl, classmethod)
monkey_the_camel(EclGrid, 'loadFromFile', EclGrid.load_from_file, classmethod)
monkey_the_camel(EclGrid, 'createRectangular', EclGrid.create_rectangular, classmethod)
monkey_the_camel(EclGrid, 'dualGrid', EclGrid.dual_grid)
monkey_the_camel(EclGrid, 'getDims', EclGrid.get_dims)
monkey_the_camel(EclGrid, 'getNX', EclGrid.get_nx)
monkey_the_camel(EclGrid, 'getNY', EclGrid.get_ny)
monkey_the_camel(EclGrid, 'getNZ', EclGrid.get_nz)
monkey_the_camel(EclGrid, 'getGlobalSize', EclGrid.get_global_size)
monkey_the_camel(EclGrid, 'getNumActive', EclGrid.get_num_active)
monkey_the_camel(EclGrid, 'getNumActiveFracture', EclGrid.get_num_active_fracture)
monkey_the_camel(EclGrid, 'getBoundingBox2D', EclGrid.get_bounding_box_2d)
monkey_the_camel(EclGrid, 'getName', EclGrid.get_name)
monkey_the_camel(EclGrid, 'validCellGeometry', EclGrid.valid_cell_geometry)
monkey_the_camel(EclGrid, 'getNodePos', EclGrid.get_node_pos)
monkey_the_camel(EclGrid, 'getCellCorner', EclGrid.get_cell_corner)
monkey_the_camel(EclGrid, 'getNodeXYZ', EclGrid.get_node_xyz)
monkey_the_camel(EclGrid, 'getLayerXYZ', EclGrid.get_layer_xyz)
monkey_the_camel(EclGrid, 'findCellXY', EclGrid.find_cell_xy)
monkey_the_camel(EclGrid, 'findCellCornerXY', EclGrid.find_cell_corner_xy)
monkey_the_camel(EclGrid, 'getCellDims', EclGrid.get_cell_dims)
monkey_the_camel(EclGrid, 'getNumLGR', EclGrid.get_num_lgr)
monkey_the_camel(EclGrid, 'createKW', EclGrid.create_kw)
monkey_the_camel(EclGrid, 'create3D', EclGrid.create_3d)
monkey_the_camel(EclGrid, 'compressedKWCopy', EclGrid.compressed_kw_copy)
monkey_the_camel(EclGrid, 'globalKWCopy', EclGrid.global_kw_copy)
monkey_the_camel(EclGrid, 'exportACTNUMKw', EclGrid.export_ACTNUM_kw)
monkey_the_camel(EclGrid, 'createVolumeKeyword', EclGrid.create_volume_keyword)
|
There comes a time in every home or business owner’s life when they need to make an investment in the roof over their head. If that time is quickly approaching for you, make sure you’re getting the most out of your investment and that you’re putting your trust in a roofer in Omaha, NE who truly understands the complexities of the job.
A & P Construction Inc. has been delivering top quality residential and commercial roofing in Omaha, NE for more than four decades. Our experience and workmanship has garnered us a reputation for quality and thoroughness, giving our customers peace of mind when choosing us for any and all of their new roofing needs. Take a look at just a couple of the ways we’re dedicated to guaranteeing your new roofing investment.
Superior materials: By using only the finest-grade materials, from industry leading brands like Certainteed and Malarkey Roofing, we’ll craft a new roof for your home or business that’s resilient, reliable and aesthetically pleasing. We put our trust in the materials we use, which makes it easy for our customers to put their trust in our work. In the end, we guarantee a roof that will serve you for years to come.
Diverse experience: Whether your roofing in Omaha, NE is comprised of shingles, shakes, tile or asphalt, our experienced roofers in Omaha, NE have worked with it before. Our contractors have a vast depth of knowledge regarding the most common types of roofing and can install brand new materials with precision and excellence to fully replace worn or damaged roofs.
Roofing Contractor Omaha NEAs a fully licensed, bonded and insured roofer in Omaha, NE, A & P Construction Inc. is ready to prove to you the superiority of our workmanship, in a way that you can trust. We strive to always give our customers peace of mind when performing new roofing installations and stand by our work once it’s completed.
Whether you’re a residential homeowner seeking to reinforce the roof over your head with a brand new installation or you’re a commercial business owner who needs a complete replacement of materials to protect your business, A & P Construction Inc. is here to make your investment in a new roof one that you’ll appreciate. Call us today at (402) 740-0800 to see why we have so many satisfied roofing customers!
|
import math
import scipy.interpolate as intrp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
font = {'family' : 'normal',
'size' : 24}
rc('font', **font)
### The function
def f(t):
return 1/(1+t**2)
# Spline
def spline(xpts, ypts):
n = len(xpts)
mat = np.zeros(( n, n))
rhs = np.zeros(( n,1 ))
for i in range(1,n-1):
rhs[i] = 6 * ( (ypts[i+1]-ypts[i]) / (xpts[i+1]-xpts[i]) \
-(ypts[i]-ypts[i-1]) / (xpts[i]-xpts[i-1]) )
for j in range(0,n-1):
# Set triagonal elements
if(j==i-1): mat[i][j] += xpts[i] - xpts[i-1]
elif(j==i): mat[i][j] += 2*(xpts[i+1]-xpts[i-1])
elif(j==i+1): mat[i][j] += xpts[i+1]-xpts[i]
# BCs
mat[0][0] = 1
mat[-1][-1] = 1
rhs[0] = 0
rhs[-1] = 0
# Solve it
x_vec = np.linalg.solve(mat, rhs)
return x_vec
#######
# The function
x = [ i/100 for i in range(-500,500) ]
fx = [ f(i) for i in x ]
plt.plot(x,fx, 'k--',label='f(t)', linewidth=5)
### 5 points
xpts = np.linspace(-5, 5, 5)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'r', label='5 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 5 Points:', rmse)
### 10 points
xpts = np.linspace(-5, 5, 10)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'b', label='10 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 10 Points:', rmse)
### 15 points
xpts = np.linspace(-5, 5, 15)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'g', label='15 Points',linewidth=3)
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 15 Points:', rmse)
plt.legend(fontsize=16)
plt.ylim( [-0.2, 1.1] )
plt.title('Natural Cubic Splines for $f(t)$')
plt.savefig('Problem5ii.png')
plt.show()
|
Export Import Data Subasri Pictures, is a premier company that exports and imports goods from countries including Sri Lanka, and host of other countries.
Subasri Pictures, is a premier company that exports and imports goods from countries including Sri Lanka, and host of other countries.
Subasri Pictures is a leading exporter of FEATURE FILM, FEATURE FILM, FEATURE FILM, FEATURE FILM, FEATURE FILM, having HS Codes 37061001. The company has its head office at 4D, Mandira Apartments,, 23, North Boag Road,T.Nagar,, Chennai. Tamil Nadu., Tamilnadu600017.
|
#!/usr/bin/env python
# encoding: utf-8
"""
factory.py
Created by Thomas Mangin on 2009-01-10.
Copyright (c) 2008 Exa Networks. All rights reserved.
See LICENSE for details.
"""
import time
from zope.interface import Interface, implements
from plugin import response
class IMailPolicyFactory (Interface):
def policeMessage (message):
"""returns a list of what the plugin are saying about the message"""
def sanitiseMessage (message):
"""return what the version of the protocol of the request"""
def getPlugins (state):
"""returns a list of plugin which can run at this level"""
def validateMessage (message):
"""check that the message have the key we need"""
from twisted.python import log
from twisted.internet import protocol
from twisted.internet import defer
from scavenger.policy.protocol import PostfixPolicyProtocol,ScavengerPolicyProtocol
from scavenger.policy.service import IMailPolicyService
message = "[policy server reports] message %(msg)s\ncode %(code)s"
class MailPolicyFactoryFromService (protocol.ServerFactory):
implements(IMailPolicyFactory)
debug = False
postfix_21 = ['request','protocol_state','protocol_name','helo_name','queue_id','sender','recipient','recipient_count','client_address','client_name','reverse_client_name','instance',]
postfix_22 = ['sasl_method','sasl_username','sasl_sender','size','ccert_subject','ccert_issuer','ccert_fingerprint',]
postfix_23 = ['encryption_protocol','encryption_cipher','encryption_keysize','etrn_domain',]
postfix_25 = ['stress',]
scavenger_10 = ['server_address','code','origin']
states = ['VRFY','ETRN','CONNECT','EHLO','HELO','MAIL','RCPT','DATA','END-OF-DATA',]
def __init__ (self,service):
if service.getType() == 'scavenger':
self.protocol = ScavengerPolicyProtocol
elif service.getType() == 'postfix':
self.protocol = PostfixPolicyProtocol
else:
raise ValueError('unknow protocol option (scavenger,postfix)')
log.msg('+'*80)
self.plugins = {}
self.service = service
self.template = self.service.configuration.get('message',message)
self.type = self.service.getType()
self.version = {'postfix':{},'scavenger':{}}
for kv,ver in ((self.postfix_21,'2.1'),(self.postfix_22,'2.2'),(self.postfix_23,'2.3'),(self.postfix_25,'2.5')):
for k in kv:
self.version['postfix'][k] = ver
for kv,ver in ((self.postfix_21,'2.1'),(self.scavenger_10,'1.0')):
for k in kv:
self.version['scavenger'][k] = ver
for state in self.states:
self.plugins[state] = []
for plugin in self.service.getPlugins():
states = plugin.getStates()
for state in states:
self.plugins[state].append(plugin)
def getPlugins (self,message):
protocol = message['request']
state = message['protocol_state']
for plugin in self.plugins[state]:
yield plugin
def policeMessage (self,message):
self._storeMessage(message)
response = self._checkMessage(message)
print "%-15s %4s : %s" % (message['client_address'],message['protocol_state'],str(response))
return response
def _storeMessage (self,message):
# Perform database storage functions
for plugin in self.getPlugins(message):
try:
plugin.store(message)
# Errors
except response.InternalError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'plugin had an internal error',str(r)))
continue
except response.DataError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'the plugin does not like the data provided',str(r)))
continue
except response.UncheckableError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'uncheckable',str(r)))
continue
except response.NoResponseError, r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'no answer from the plugin',str(r)))
continue
# Uncaught Exception
except response.PluginError,r:
log.msg('plugin %s : %s' % (plugin.getName(),'no reponse'))
continue
except Exception, r:
log.msg('plugin %s : %s' % (plugin.getName(),'unknown response '+str(r)))
continue
def _checkMessage (self,message):
# Run all the plugin in order and act depending on the response returned
for plugin in self.getPlugins(message):
if self.debug: log.msg('running pluging ' + plugin.getName())
try:
r = plugin.police(message)
except Exception, e:
if plugin.debug:
import traceback
traceback.print_exc()
else:
log.msg("Plugin %s is raising an error - %s %s" % (plugin.getName(),str(type(e)), e.message))
continue
try:
raise r
# Nothing can be said about the data
except response.ResponseContinue:
if self.debug: log.msg('plugin %s : %s' % (plugin.getName(),'continue'))
continue
# Allow or Block the mail
except response.PostfixResponse, r:
# XXX: Need to create a dict class which reply '' to every unknown key
log.msg('plugin %s : %s' % (plugin.getName(),r.message))
if r.delay: log.msg('plugin %s : forcing a time of %d' % (plugin.getName(), r.delay))
return r
except response.ScavengerResponse, r:
# XXX: Need to create a dict class which reply '' to every unknown key
log.msg('plugin %s : %s' % (plugin.getName(),r.message))
if r.duration: log.msg('plugin %s : forcing a duration of %d' % (plugin.getName(), r.duration))
return r
# Nothing can be said about the data
except response.ResponseContinue:
log.msg('plugin %s : %s' % (plugin.getName(),'continue'))
continue
# Errors
except response.InternalError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'plugin had an internal error',str(r)))
continue
except response.DataError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'the plugin does not like the data provided',str(r)))
continue
except response.UncheckableError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'uncheckable',str(r)))
continue
except response.NoResponseError, r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'no answer from the plugin',str(r)))
continue
# Uncaught Exception
except response.PluginError,r:
log.msg('plugin %s : %s' % (plugin.getName(),'no reponse'))
continue
except Exception, r:
log.msg('plugin %s : %s' % (plugin.getName(),'unknown response '+str(r)))
continue
if self.debug: log.msg('plugins could not say anything about this message')
return response.ResponseUndetermined(self.type)
def sanitiseMessage (self,message):
r = {}
for k in self.version[self.type].keys():
if not message.has_key(k):
r[k]=''
else:
r[k] = message[k]
then = time.time()
if message.has_key('timestamp'):
try:
then = float(message['timestamp'])
except (TypeError, ValueError):
pass
r['timestamp'] = int(then)
return r
def validateMessage (self,message):
for k in ['client_address','protocol_state']:
if not message.has_key(k):
return False
if message['protocol_state'] not in self.states:
log.msg('invalid protocol state %s' % message['protocol_state'])
return False
if message['request'] not in ['smtpd_access_policy','scavenger_access_policy']:
log.msg('invalid request type %s' % message['request'])
return False
if message['request'] != 'scavenger_access_policy':
return True
for k in ['server_address','code','origin']:
if not message.has_key(k):
log.msg('scavenger message must have key %s' % k)
return False
return True
from twisted.python import components
components.registerAdapter(MailPolicyFactoryFromService,
IMailPolicyService,
IMailPolicyFactory)
|
I picked up the Player's Handbook the other day (retails for $55. Phew!) and so far I'm liking what I see. They're trying to get back to the roots of what made D&D awesome. I've heard it described as a mix of 2nd and 3rd Edition, taking the best parts of both. So far, I can see that.
There is a free online version of both the Player's Handbook and the Dungeon Master's Guide here.
You had my attention, now you have my interest.
I've been looking at it, and I've been going back and forth whether or not it's worth getting the new books and upgrading my current 3.5 campaigns to 5e.
Any thoughts from current DMs? Worth upgrading? Any challenges?
I haven't finished reading the whole book yet but when I do I'll share a detailed review. For now I'll say that so far, it's a simpler, more elegant system and I'm digging it.
|
from pybitcoin.transactions.scripts import script_to_hex
from pybitcoin.hash import bin_double_sha256, bin_hash160
from pybitcoin.address import bin_hash160_to_address
template = '''
OP_IF
OP_2 %(Alice)s %(Bob)s OP_2 OP_CHECKMULTISIG
OP_ELSE
%(Sequence)s OP_CHECKSEQUENCEVERIFY OP_DROP
%(Alice)s OP_CHECKSIG
OP_ENDIF
'''
contract = template % {'Bob' : '02578ad340083e85c739f379bbe6c6937c5da2ced52e09ac1eec43dc4c64846573',
'Alice' : '0380990a7312b87abda80e5857ee6ebf798a2bf62041b07111287d19926c429d11',
'Sequence' : '0a000000'}
template2 = '''
OP_0
3045022100ff054d83e4f376b6b47705b8186fd1e2b61cabe70e717f052b6bf0fd00d883ec02203adaf168c7e4b32fbd66dd2adfdd42aaf6268f5e4c736978ab6c86d4e13bfcf401
304402200eab2db325b0c95dcfed00a4554b59d3422d2eef3eed50a341da55cd83e8e06302203fc97b96df2e803dfc3113cc6ee0dd5728ced316b63dfda72c808ab48826f7e601
OP_1
OP_PUSHDATA1
63522102578ad340083e85c739f379bbe6c6937c5da2ced52e09ac1eec43dc4c64846573210380990a7312b87abda80e5857ee6ebf798a2bf62041b07111287d19926c429d1152ae670164b3752102578ad340083e85c739f379bbe6c6937c5da2ced52e09ac1eec43dc4c64846573ac68
'''
template3 = '''
304402204d21c19216cad74e780bd70e04518cf8f1a20108dc3bf79f7b218865524661ac022049b5de8a05d9b524ae6de3b4b221c856d16d4e3a51f7f19e685e7fc33b51abac01
OP_1
OP_PUSHDATA1
6352210380990a7312b87abda80e5857ee6ebf798a2bf62041b07111287d19926c429d112102578ad340083e85c739f379bbe6c6937c5da2ced52e09ac1eec43dc4c6484657352ae67040a000000b375210380990a7312b87abda80e5857ee6ebf798a2bf62041b07111287d19926c429d11ac68
'''
def compile(ct):
ct = ct.split()
#for i in ct:
# print i, '->', script_to_hex(i)
return script_to_hex(' '.join(ct))
if __name__ == '__main__':
script = compile(contract)
script_hash = bin_hash160(script, hex_format=True)
p2sh = bin_hash160_to_address(script_hash, 5)
print (p2sh)
print (script)
print ('-' * 80)
print (compile(template3))
|
Not all is lost because we are not alone. If she can believe that, then so can I.
When we arrived at Tornillo, we planned to send up a balloon into the air, with a banner hanging down from it that read, “No estan solo” (You are not alone). We wanted the kids imprisoned in tents at Tornillo to know that there were people that cared about them, and that were fighting for them on the outside. It was simple, it would not have changed the world, but it would have given them hope. It would have reminded them that not all is lost. For us, that was worth the risk.
Unfortunately, the balloon never got up high enough for them to read. A local rancher, who had been encouraged to feel free to engage in vigilantism by CBP, interrupted and eventually pulled a revolver out, waving it around and threatening to shoot down the balloon.
Eventually, through peaceful dialogue, he was deescalated and perhaps began to realize how foolish he was being. He put his revolver in his front pocket. But that did not stop him from saying, “Well, I’ll let you do it if you pay me $5,000.” I wondered how much, if anything, CBP was paying him to outsource their intimidation.
Eventually, the balloon was deflated, as were our spirits, and we all went our separate ways.
Rev. Hannah Adair Bonner is partnering with Auburn to bring immediate first hand witness of both the horrors, and resilient organizing, on the ground in Texas this week. This blog first appeared on her personal blog.
Read Rev. Hannah’s other pieces here, here here and here.
Follow Auburn on Twitter, Facebook, and Instagram.
|
# !/usr/bin/python
# -*- coding: cp1252 -*-
#
##################################################################################
#
# Copyright 2016-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
__author__ = "John Doe <johndoe@mailcatch.com>"
__version__ = "1.0"
import argparse
import json
import re
import sys
import urllib2
import osrframework.utils.browser as browser
from osrframework.utils.platforms import Platform
class Bebee(Platform):
"""
A <Platform> object for Bebee.
"""
def __init__(self):
"""
Constructor...
"""
self.platformName = "Bebee"
self.tags = ["jobs"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "https://bebee.com/bee/" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ['<link rel="canonical" href="https://.bebee.com/bees/search">']
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
self.fieldsRegExp["usufy"]["i3visio.fullname"] = {"start": '<title>', "end": '- beBee</title>'}
self.fieldsRegExp["usufy"]["i3visio.location"] = {"start": '<span itemprop="addressRegion">', "end": '</span>'}
self.fieldsRegExp["usufy"]["i3visio.alias.googleplus"] = {"start": '<div><a rel="nofollow" class="color_corp_three" href="https://plus.google.com/u/0/', "end": '"'}
self.fieldsRegExp["usufy"]["i3visio.alias.linkedin"] = {"start": '<div><a rel="nofollow" class="color_corp_three" href="http://br.linkedin.com/in/', "end": '"'}
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
|
WHEN FAMILY AND SUCCESS MATTER, RAMOS LAW GROUP GLADIATORS DELIVER RESULTS.
Mary E. Ramos and the Ramos Law Group are Sugar Land divorce lawyers who have developed comprehensive divorce and family law knowledge base to help potential clients and other Texans come to terms with a divorce, learn about the process and other potential divorce issues that may arise. The divorce and family law blog includes a wide range of topics including divorce process, child support guidelines, child custody, support enforcement and division of marital property.
If you face a high-value estate divorce, custody, domestic violence or other family law issues, then you came to right place. Each of our gladiators are well trained and understands all aspects of the divorce and the unique considerations required with high wealth estates and highly contested litigation. Our team is here to help you reduce the stress, help you heal and achieve the happiness you deserve. The articles linked on this page discuss specific aspects of divorce, child custody, child support community property, modification of divorce degrees and enforcement.
Sugarland Divorce Lawyer, Mary E. Ramos, is a Family Law attorney who is Board certified in the state of Texas by the Board of Legal Specialization since 2014. This title is reserved for attorneys who demonstrate the highest level of public achievement in their respective fields. Mrs. Ramos and her team are passionate about what they do and actively work to grow their knowledge on family law and further develop their litigation skills by spending at least 80 hours in training. In addition, our attorneys and support staff attend the Advanced Family Law Course offered by the State Bar of Texas. We also require our staff to attend litigation skill building courses offered by the National Family Law Trial Institute. The articles below go over different aspects of divorce. They can be used as points of reference during divorce proceedings.
Don’t Move Out Of the House – The house is usually the biggest asset to negotiate. If you’re not in fear for your safety, do not move out of the house before speaking with an attorney.
Maintain the Status Quo – Don’t make drastic changes to your lifestyle such as buying a Corvette or cutting off your spouse from the steam of income. Guys DON’T buy the girlfriend diamonds since you are still considered married until the judge signs off on the final order.
Keep the Kids Out of it – NEVER use the kids as a tool to hurt the other party, and DON’T put them in the middle. Let’s let our kids grow with the love of both parents.
Stay Off Social Media – Don’t post anything you wouldn’t want your mother or the judge to see. Set your social media settings to private away from public view and make sure not to delete anything once the case has been filed without the advice of an attorney.
While these five steps do not cover every aspect of the financial preparation necessary to gain independence post-divorce, the steps are a great start to become in control of your own money and move to living with only one income.
These reasons show how valuable mediation is in divorce cases and why people should keep an open mind when it comes to settling their divorce case without going to trial.
While these all seem like obvious things to avoid, emotions will bring the worst out of you. By following these guidelines, your divorce process is likely to be much smoother than those who act in a way that leads to animosity between parties.
|
from collections.abc import Sequence
from uqbar.enums import IntEnumeration, StrictEnumeration
class AddAction(IntEnumeration):
"""
An enumeration of scsynth node add actions.
"""
### CLASS VARIABLES ###
ADD_TO_HEAD = 0
ADD_TO_TAIL = 1
ADD_BEFORE = 2
ADD_AFTER = 3
REPLACE = 4
class BinaryOperator(IntEnumeration):
### CLASS VARIABLES ###
ABSOLUTE_DIFFERENCE = 38 # |a - b|
ADDITION = 0
AMCLIP = 40
ATAN2 = 22
BIT_AND = 14
BIT_OR = 15
BIT_XOR = 16
CLIP2 = 42
DIFFERENCE_OF_SQUARES = 34 # a*a - b*b
EQUAL = 6
EXCESS = 43
EXPRANDRANGE = 48
FLOAT_DIVISION = 4
FILL = 29
FIRST_ARG = 46
FOLD2 = 44
GREATEST_COMMON_DIVISOR = 18
GREATER_THAN_OR_EQUAL = 11
GREATER_THAN = 9
HYPOT = 23
HYPOTX = 24
INTEGER_DIVISION = 3
LEAST_COMMON_MULTIPLE = 17
LESS_THAN_OR_EQUAL = 10
LESS_THAN = 8
MAXIMUM = 13
MINIMUM = 12
MODULO = 5
MULTIPLICATION = 2
NOT_EQUAL = 7
POWER = 25
RANDRANGE = 47
RING1 = 30 # a * (b + 1) == a * b + a
RING2 = 31 # a * b + a + b
RING3 = 32 # a*a*b
RING4 = 33 # a*a*b - a*b*b
ROUND = 19
ROUND_UP = 20
SCALE_NEG = 41
SHIFT_LEFT = 26
SHIFT_RIGHT = 27
SQUARE_OF_DIFFERENCE = 37 # (a - b)^2
SQUARE_OF_SUM = 36 # (a + b)^2
SUBTRACTION = 1
SUM_OF_SQUARES = 35 # a*a + b*b
THRESHOLD = 39
TRUNCATION = 21
UNSIGNED_SHIFT = 28
WRAP2 = 45
class CalculationRate(IntEnumeration):
"""
An enumeration of scsynth calculation-rates.
::
>>> import supriya.synthdefs
>>> supriya.CalculationRate.AUDIO
CalculationRate.AUDIO
::
>>> supriya.CalculationRate.from_expr("demand")
CalculationRate.DEMAND
"""
### CLASS VARIABLES ###
AUDIO = 2
CONTROL = 1
DEMAND = 3
SCALAR = 0
### PUBLIC METHODS ###
@classmethod
def from_expr(cls, expr):
"""
Gets calculation-rate.
::
>>> import supriya.synthdefs
>>> import supriya.ugens
::
>>> supriya.CalculationRate.from_expr(1)
CalculationRate.SCALAR
::
>>> supriya.CalculationRate.from_expr("demand")
CalculationRate.DEMAND
::
>>> collection = []
>>> collection.append(supriya.ugens.DC.ar(0))
>>> collection.append(supriya.ugens.DC.kr(1))
>>> collection.append(2.0)
>>> supriya.CalculationRate.from_expr(collection)
CalculationRate.AUDIO
::
>>> collection = []
>>> collection.append(supriya.ugens.DC.kr(1))
>>> collection.append(2.0)
>>> supriya.CalculationRate.from_expr(collection)
CalculationRate.CONTROL
Return calculation-rate.
"""
import supriya.synthdefs
import supriya.ugens
if isinstance(expr, (int, float)) and not isinstance(expr, cls):
return CalculationRate.SCALAR
elif isinstance(expr, (supriya.synthdefs.OutputProxy, supriya.synthdefs.UGen)):
return expr.calculation_rate
elif isinstance(expr, supriya.synthdefs.Parameter):
name = expr.parameter_rate.name
if name == "TRIGGER":
return CalculationRate.CONTROL
return CalculationRate.from_expr(name)
elif isinstance(expr, str):
return super().from_expr(expr)
elif isinstance(expr, Sequence):
return max(CalculationRate.from_expr(item) for item in expr)
elif hasattr(expr, "calculation_rate"):
return cls.from_expr(expr.calculation_rate)
return super().from_expr(expr)
### PUBLIC PROPERTIES ###
@property
def token(self):
if self == CalculationRate.SCALAR:
return "ir"
elif self == CalculationRate.CONTROL:
return "kr"
elif self == CalculationRate.AUDIO:
return "ar"
return "new"
class DoneAction(IntEnumeration):
"""
An enumeration of ``scsynth`` UGen "done" actions.
::
>>> import supriya.synthdefs
>>> supriya.DoneAction(2)
DoneAction.FREE_SYNTH
::
>>> supriya.DoneAction.from_expr("pause synth")
DoneAction.PAUSE_SYNTH
"""
### CLASS VARIABLES ###
NOTHING = 0
PAUSE_SYNTH = 1
FREE_SYNTH = 2
FREE_SYNTH_AND_PRECEDING_NODE = 3
FREE_SYNTH_AND_FOLLOWING_NODE = 4
FREE_SYNTH_AND_FREEALL_PRECEDING_NODE = 5
FREE_SYNTH_AND_FREEALL_FOLLOWING_NODE = 6
FREE_SYNTH_AND_ALL_PRECEDING_NODES_IN_GROUP = 7
FREE_SYNTH_AND_ALL_FOLLOWING_NODES_IN_GROUP = 8
FREE_SYNTH_AND_PAUSE_PRECEDING_NODE = 9
FREE_SYNTH_AND_PAUSE_FOLLOWING_NODE = 10
FREE_SYNTH_AND_DEEPFREE_PRECEDING_NODE = 11
FREE_SYNTH_AND_DEEPFREE_FOLLOWING_NODE = 12
FREE_SYNTH_AND_ALL_SIBLING_NODES = 13
FREE_SYNTH_AND_ENCLOSING_GROUP = 14
class EnvelopeShape(IntEnumeration):
### CLASS VARIABLES ###
CUBED = 7
CUSTOM = 5
EXPONENTIAL = 2
LINEAR = 1
SINE = 3
SQUARED = 6
STEP = 0
WELCH = 4
class HeaderFormat(IntEnumeration):
"""
An enumeration of soundfile header formats.
::
>>> supriya.HeaderFormat.AIFF
HeaderFormat.AIFF
::
>>> supriya.HeaderFormat.from_expr("wav")
HeaderFormat.WAV
::
>>> header_format = supriya.HeaderFormat.from_expr("wav")
>>> header_format.name.lower()
'wav'
"""
### CLASS VARIABLES ###
AIFF = 0
IRCAM = 1
NEXT = 2
RAW = 3
WAV = 4
class NodeAction(IntEnumeration):
### CLASS VARIABLES ###
NODE_CREATED = 0
NODE_REMOVED = 1
NODE_ACTIVATED = 2
NODE_DEACTIVATED = 3
NODE_MOVED = 4
NODE_QUERIED = 5
### PUBLIC METHODS ###
@classmethod
def from_address(cls, address):
addresses = {
"/n_end": cls.NODE_REMOVED,
"/n_go": cls.NODE_CREATED,
"/n_info": cls.NODE_QUERIED,
"/n_move": cls.NODE_MOVED,
"/n_off": cls.NODE_DEACTIVATED,
"/n_on": cls.NODE_ACTIVATED,
}
action = addresses[address]
return action
class ParameterRate(IntEnumeration):
"""
An enumeration of synthdef control rates.
"""
### CLASS VARIABLES ###
AUDIO = 2
CONTROL = 3
SCALAR = 0
TRIGGER = 1
class RequestId(IntEnumeration):
"""
An enumeration of scsynth request ids.
"""
### CLASS VARIABLES ###
BUFFER_ALLOCATE = 28
BUFFER_ALLOCATE_READ = 29
BUFFER_ALLOCATE_READ_CHANNEL = 54
BUFFER_CLOSE = 33
BUFFER_FILL = 37
BUFFER_FREE = 32
BUFFER_GENERATE = 38
BUFFER_GET = 42
BUFFER_GET_CONTIGUOUS = 43
BUFFER_QUERY = 47
BUFFER_READ = 30
BUFFER_READ_CHANNEL = 55
BUFFER_SET = 35
BUFFER_SET_CONTIGUOUS = 36
BUFFER_WRITE = 31
BUFFER_ZERO = 34
CLEAR_SCHEDULE = 51
COMMAND = 4
CONTROL_BUS_FILL = 27
CONTROL_BUS_GET = 40
CONTROL_BUS_GET_CONTIGUOUS = 41
CONTROL_BUS_SET = 25
CONTROL_BUS_SET_CONTIGUOUS = 26
DUMP_OSC = 39
ERROR = 58
GROUP_DEEP_FREE = 50
GROUP_DUMP_TREE = 56
GROUP_FREE_ALL = 24
GROUP_HEAD = 22
GROUP_NEW = 21
GROUP_QUERY_TREE = 57
GROUP_TAIL = 23
NODE_AFTER = 19
NODE_BEFORE = 18
NODE_COMMAND = 13
NODE_FILL = 17
NODE_FREE = 11
NODE_MAP_TO_CONTROL_BUS = 14
NODE_MAP_TO_AUDIO_BUS = 60
NODE_MAP_TO_AUDIO_BUS_CONTIGUOUS = 61
NODE_MAP_TO_CONTROL_BUS_CONTIGUOUS = 48
NODE_ORDER = 62
NODE_QUERY = 46
NODE_RUN = 12
NODE_SET = 15
NODE_SET_CONTIGUOUS = 16
NODE_TRACE = 10
NOTHING = 0
NOTIFY = 1
PARALLEL_GROUP_NEW = 63
QUIT = 3
STATUS = 2
SYNC = 52
SYNTHDEF_FREE = 53
SYNTHDEF_FREE_ALL = 8
SYNTHDEF_LOAD = 6
SYNTHDEF_LOAD_DIR = 7
SYNTHDEF_RECEIVE = 5
SYNTH_GET = 44
SYNTH_GET_CONTIGUOUS = 45
SYNTH_NEW = 9
SYNTH_NEWARGS = 59
SYNTH_NOID = 49
SYNTH_QUERY = 65
UGEN_COMMAND = 20
VERSION = 64
@property
def request_name(self):
return RequestName.from_expr(self.name)
class RequestName(StrictEnumeration):
"""
An enumeration of scsynth request names.
"""
### CLASS VARIABLES ###
BUFFER_ALLOCATE = "/b_alloc"
BUFFER_ALLOCATE_READ = "/b_allocRead"
BUFFER_ALLOCATE_READ_CHANNEL = "/b_allocReadChannel"
BUFFER_CLOSE = "/b_close"
BUFFER_FILL = "/b_fill"
BUFFER_FREE = "/b_free"
BUFFER_GENERATE = "/b_gen"
BUFFER_GET = "/b_get"
BUFFER_GET_CONTIGUOUS = "/b_getn"
BUFFER_QUERY = "/b_query"
BUFFER_READ = "/b_read"
BUFFER_READ_CHANNEL = "/b_readChannel"
BUFFER_SET = "/b_set"
BUFFER_SET_CONTIGUOUS = "/b_setn"
BUFFER_WRITE = "/b_write"
BUFFER_ZERO = "/b_zero"
CLEAR_SCHEDULE = "/clearSched"
COMMAND = "/cmd"
CONTROL_BUS_FILL = "/c_fill"
CONTROL_BUS_GET = "/c_get"
CONTROL_BUS_GET_CONTIGUOUS = "/c_getn"
CONTROL_BUS_SET = "/c_set"
CONTROL_BUS_SET_CONTIGUOUS = "/c_setn"
DUMP_OSC = "/dumpOSC"
ERROR = "/error"
GROUP_DEEP_FREE = "/g_deepFree"
GROUP_DUMP_TREE = "/g_dumpTree"
GROUP_FREE_ALL = "/g_freeAll"
GROUP_HEAD = "/g_head"
GROUP_NEW = "/g_new"
GROUP_QUERY_TREE = "/g_queryTree"
GROUP_TAIL = "/g_tail"
NODE_AFTER = "/n_after"
NODE_BEFORE = "/n_before"
# NODE_COMMAND = None
NODE_FILL = "/n_fill"
NODE_FREE = "/n_free"
NODE_MAP_TO_AUDIO_BUS = "/n_mapa"
NODE_MAP_TO_AUDIO_BUS_CONTIGUOUS = "/n_mapan"
NODE_MAP_TO_CONTROL_BUS = "/n_map"
NODE_MAP_TO_CONTROL_BUS_CONTIGUOUS = "/n_mapn"
NODE_ORDER = "/n_order"
NODE_QUERY = "/n_query"
NODE_RUN = "/n_run"
NODE_SET = "/n_set"
NODE_SET_CONTIGUOUS = "/n_setn"
NODE_TRACE = "/n_trace"
# NOTHING = None
NOTIFY = "/notify"
PARALLEL_GROUP_NEW = "/p_new"
QUIT = "/quit"
STATUS = "/status"
SYNC = "/sync"
SYNTHDEF_FREE = "/d_free"
# SYNTHDEF_FREE_ALL = None
SYNTHDEF_LOAD = "/d_load"
SYNTHDEF_LOAD_DIR = "/d_loadDir"
SYNTHDEF_RECEIVE = "/d_recv"
SYNTH_GET = "/s_get"
SYNTH_GET_CONTIGUOUS = "/s_getn"
SYNTH_NEW = "/s_new"
SYNTH_QUERY = "/s_query"
# SYNTH_NEWARGS = None
SYNTH_NOID = "/s_noid"
UGEN_COMMAND = "/u_cmd"
VERSION = "/version"
### PUBLIC PROPERTIES ###
@property
def request_id(self):
return RequestId.from_expr(self.name)
class SampleFormat(IntEnumeration):
"""
An enumeration of soundfile sample formats.
::
>>> supriya.SampleFormat.INT24
SampleFormat.INT24
::
>>> supriya.SampleFormat.from_expr("float")
SampleFormat.FLOAT
::
>>> sample_format = supriya.SampleFormat.INT24
>>> sample_format.name.lower()
'int24'
"""
### CLASS VARIABLES ###
INT24 = 0
ALAW = 1
DOUBLE = 2
FLOAT = 3
INT8 = 4
INT16 = 5
INT32 = 6
MULAW = 7
class SignalRange(IntEnumeration):
"""
An enumeration of scsynth UGen signal ranges.
::
>>> supriya.SignalRange.UNIPOLAR
SignalRange.UNIPOLAR
::
>>> supriya.SignalRange.from_expr("bipolar")
SignalRange.BIPOLAR
"""
### CLASS VARIABLES ###
UNIPOLAR = 0
BIPOLAR = 1
class UnaryOperator(IntEnumeration):
### CLASS VARIABLES ###
ABSOLUTE_VALUE = 5
AMPLITUDE_TO_DB = 22
ARCCOS = 32
ARCSIN = 31
ARCTAN = 33
AS_FLOAT = 6
AS_INT = 7
BILINRAND = 40
BIT_NOT = 4
CEILING = 8
COIN = 44
COS = 29
COSH = 35
CUBED = 13
DB_TO_AMPLITUDE = 21
DIGIT_VALUE = 45
DISTORT = 42
EXPONENTIAL = 15
FLOOR = 9
FRACTIONAL_PART = 10
HZ_TO_MIDI = 18
HZ_TO_OCTAVE = 24
HANNING_WINDOW = 49
IS_NIL = 2
LINRAND = 39
LOG = 25
LOG10 = 27
LOG2 = 26
MIDI_TO_HZ = 17
SEMITONES_TO_RATIO = 19
NEGATIVE = 0
NOT = 1
NOT_NIL = 3
OCTAVE_TO_HZ = 23
RAMP = 52
RAND = 37
RAND2 = 38
RATIO_TO_SEMITONES = 20
RECIPROCAL = 16
RECTANGLE_WINDOW = 48
S_CURVE = 53
SIGN = 11
SILENCE = 46
SIN = 28
SINH = 34
SOFTCLIP = 43
SQUARE_ROOT = 14
SQUARED = 12
SUM3RAND = 41
TAN = 30
TANH = 36
THRU = 47
TRIANGLE_WINDOW = 51
WELCH_WINDOW = 50
class Unit(IntEnumeration):
### CLASS VARIABLES ###
UNDEFINED = 0
DECIBELS = 1
AMPLITUDE = 2
SECONDS = 3
MILLISECONDS = 4
HERTZ = 5
SEMITONES = 6
|
If you experience any Wallet sync issues you can try to add these below Nodes, they are active and working.
on debug window please tell us which version of the coins you have?
Hi! My brother fixed the problem, we are using the latest v0.8.6.5-beta version, but still, thank you for your attention. Good karma to everyone!
I tried these nodes and still no success. I am currently running windows 7 Home premium with the latest wallet. (Downloaded it last night 6/10/15) I have several other wallets(bitcoin core, mintcoin, litecoin,feathercoin,to name a few) up and running, so what else can I do to get this one running?
Evidently I need more patience that I used. The wallet is currently syncing. At 39 weeks and counting down. So for anyone else reading this looking for a solution after entering in the nodes above. Wait for for several hours. It was not syncing when I went to bed, but was when I got back up.
great. i was away from comp for few days, so i couldnt reply to you. Usually it would only take 10-30 mins before your nodes link up with a network node. due to some network noise it seems like you are one of few who had hard time finding a node, even though more then 20-30 are usually online each day. Yup, best thing to always do is just leave it on and wait.
|
#coding: utf-8
import logging
import time
from sys import argv,exit,path
import threading
from os import environ
path.insert(0, environ['SCRIPTS_HOME'])
path.insert(1, environ['SCRIPTS_HOME'][:-8])
from commons import *
logging.basicConfig(format=FORMAT,level=logging.INFO)
from daemon import runner
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = '/tmp/mydaemon.pid'
self.pidfile_timeout = 5
def run(self):
logging.debug("Seteo variables")
threads = list()
logging.info("--------- Inicio del Script ----------------")
commitQuery("""update ft_modalidad set cod_bahra=null where cod_bahra=0;""")
datosSinCorregirModalidad="""select id_mod_ori,longitud,latitud from ft_modalidad
where cod_bahra is null
and longitud is not null
order by id_mod_ori asc
limit 500
"""
while True:
threads = list()
rows = executeQuery(datosSinCorregirModalidad)
barrio=""
print len(rows)
cantidad=len(rows)/10
for rows_sub in [rows[x:x+cantidad] for x in xrange(0, len(rows), cantidad)]:
print len(rows_sub)
t = threading.Thread(target=Modalidad, args=(rows_sub,))
threads.append(t)
t.start()
for t in threads:
t.join()
if cantidad < 500 :
time.sleep(10000)
def Modalidad(rows):
logging.info("Thread %s iniciado" %threading.currentThread().getName())
query=""
for row in rows:
try:
barrio=getBarrio(row[2],row[1])
except Exception,e:
logging.error(str(e))
barrio=0
if barrio == 0:
try:
barrio=getBarrio(row[2],row[1],8)
except Exception , e:
logging.error(str(e))
query=query+"""update ft_modalidad set
cod_bahra=%s
where id_mod_ori= '%s'; \n""" % (barrio,row[0])
commitQuery(query)
logging.debug("Se finalizo la carga en la base de modalidad")
logging.info("Thread %s finalizado" %threading.currentThread().getName())
#####################################
# Consultas a la base de datos #
#####################################
app = App()
daemon_runner = runner.DaemonRunner(app)
daemon_runner.do_action()
|
Lauren was diagnosed with ALL (Acute Lymphoblastic Leukemia) on April 11, 2011, when she was 13 years old. At the time of diagnosis, she was a healthy girl who played on two fast pitch softball teams and participated in the local bowling league.
Lauren experienced fatigue, nausea, dizziness, headaches and body aches prior to her diagnosis and was diagnosed within days of her initial visit to her pediatrician. Lauren endured 2 ½ years of treatment at MD Anderson Children’s Cancer Hospital. Most of the treatments were on an outpatient basis, but a few were done during inpatient stays at the hospital. Lauren lost her hair twice, experienced an allergic reaction to one of the chemotherapy drugs and got very familiar with the side effects from prednisone.
Lauren finished her treatment two days prior to her 16th birthday in August 2013. We had a very wonderful Sweet 16 Celebration.
|
# -*- coding: utf-8 -*-
from knowledge_base.utils.decorators import skip_signal
from knowledge_base.utils.string_representations import make_slug
from knowledge_base.utils.thumbnails import make_thumbnail
@skip_signal()
def generate_slug(sender, instance, created, **kwargs):
"""
Generates a slug for every given instance.
"""
instance.slug = make_slug(instance, 'name')
instance.skip_signal = True
instance.save()
@skip_signal()
def generate_thumbnail(sender, instance, created, *args, **kwargs):
"""
Generates a thumbnail, with the given values that should be configurated in
thumbnail_settings property of the desired model.
the format of this settings should be as follows (for example):
@property
def thumbnail_settings(self)
return {
"dimension": "100x100",
"original_field": "image",
"thumbnail_field": "thumbnail"
}
"""
thumbnail_settings = instance.thumbnail_settings
original_field = getattr(
instance,
thumbnail_settings.get('original_field')
)
if original_field:
make_thumbnail(
instance,
thumbnail_settings.get('original_field'),
thumbnail_settings.get('thumbnail_field'),
thumbnail_settings.get('dimension')
)
instance.skip_signal = True
instance.save()
del instance.skip_signal
|
Some Props And Sets From The Netflix Film "Outlaw King"
Various paint techniques used to cover plastic/wood etc... Reproduction parts cast in fibreglass/plaster, RTV silicone moulds, build incorporated laser-cut parts, 3D printed designs, carved foam units... Where to begin!
- Oh wow! Thank you! Amazing! I love it!
Your Ideas For A Prop?
|
from kivy.logger import Logger
from screens import InteractiveScreen
from config import tutorial_path
from kivy.properties import ObjectProperty, OptionProperty, BooleanProperty, StringProperty, NumericProperty
from kivy.uix.image import Image
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.label import Label
from os import listdir
from os.path import join
from authentication import user_authenticated
from widgets.linkedin import LinkedIn
from kivy.utils import platform
from theme import anonymous_nick
class TutorialImage(FloatLayout):
source = StringProperty()
class TutorialProgressImage(Image):
status = OptionProperty('dark', options=('dark', 'light'))
class TutorialSkipButton(ButtonBehavior, Label):
hide = BooleanProperty(False)
class HiddenButton(ButtonBehavior, Label):
__events__ = ('on_hidden_press',)
hold_threshold = NumericProperty(10.)
def on_hidden_press(self):
pass
def on_touch_down(self, touch):
if super(HiddenButton, self).on_touch_down(touch):
from time import time
self.ts = time()
def on_touch_up(self, touch):
if super(HiddenButton, self).on_touch_up(touch):
if platform is not 'android' and touch.is_double_tap:
self.dispatch('on_hidden_press')
else:
from time import time
self.ts = time()-self.ts
if self.ts > self.hold_threshold:
self.dispatch('on_hidden_press')
class WelcomeScreen(InteractiveScreen):
__events__ = ('on_complete',)
carousel = ObjectProperty()
progress_indicator = ObjectProperty()
skip_button = ObjectProperty()
def set_index(self, index):
if self.progress_indicator:
pis = list(reversed(self.progress_indicator.children))
for c in pis[:index + 1]:
c.status = 'dark'
for c in pis[index + 1:]:
c.status = 'light'
self.update_skip_button(index=index)
def update_skip_button(self, index=None):
index = index or self.carousel.index
self.skip_button.hide = (index == len(self.carousel.slides) - 1)
if self.skip_button.hide:
from modules.core.android_utils import LogTestFairy
LogTestFairy('Login Screen')
def _linkedin_login_completed(self, *largs):
user_profile = largs[1] if len(largs) >1 else None
if user_profile:
from config import linkedin_ds
industry = user_profile.get('industry','unknown')
expertise = 'unknown'
if user_profile.get('skills',None):
try:
skills = user_profile.get('skills').get('values', None)
expertise = skills[0]['skill']['name']
except:
print 'Error parsing linkedin skills -- %s' % user_profile
company = 'unknown'
position = 'unknown'
if user_profile.get('threeCurrentPositions',None):
try:
positions = user_profile.get('threeCurrentPositions').get('values', None)
company = positions[0]['company']['name']
position = positions[0]['title']
except:
print 'Error parsing linkedin company/position -- %s' % user_profile
def update(ds):
ds.update({
'anonymous': anonymous_nick,
'industry': industry,
'company': company,
'position': position,
'expertise': expertise
})
linkedin_ds.update(update)
self.dispatch('on_complete')
def on_pre_enter(self):
from modules.core.android_utils import LogTestFairy
LogTestFairy('Tutorial')
if self.carousel:
self.populate()
else:
self.bind(
carousel=self._populate_when_ready,
progress_indicator=self._populate_when_ready)
def _populate_when_ready(self, *largs):
if self.carousel and self.progress_indicator:
self.populate()
def populate(self):
if not self.carousel.slides:
self.populate_slides()
self.populate_progress()
def populate_slides(self):
for file in sorted(listdir(tutorial_path)):
self.carousel.add_widget(
TutorialImage(
source=join(tutorial_path, file)))
if not user_authenticated():
linkedin = LinkedIn()
linkedin.bind(on_complete=self._linkedin_login_completed)
self.carousel.add_widget(linkedin)
self.update_skip_button()
def populate_progress(self):
first = True
for c in self.carousel.slides:
self.progress_indicator.add_widget(
TutorialProgressImage(status='dark' if first else 'light'))
first = False
def on_leave(self, *args):
# Note: a bug in kivy will cause this to throw an index exception
if self.carousel:
self.carousel.clear_widgets()
def skip_to_last(self):
try:
self.carousel.load_slide(self.carousel.slides[-1])
self.set_index(len(self.carousel.slides) - 1)
except Exception as ex:
pass
def on_complete(self):
# store the login keys only when we complete the linkedin authentication
from utilities.auth_store import store_keys
store_keys()
|
The voluble author would amount to little more than a footnote in the épopée Sartrienne were it not for one masterpiece: Shoah. After seeing the film, Jean Daniel, the elegant editor of the Nouvel Observateur, told its maker: “Cela justifie une vie”. Less directed than compiled, the film lasts nine hours and, for all the flair, persistence and courage involved in its composition, remains sui generis: to call it a work of art, as flatterers have, claims too much for its formal qualities and too little for its unblinking uniqueness. No other documentaries on the Holocaust (Alain Resnais’s Nuit et Brouillard of 1956 was the first and, in its tact, the most artful) can match Shoah’s implacable pursuit of the witnesses of what Raul Hilberg (an inspiring source) called, in his pioneering 1960 history of mechanized mass murder, “The Destruction of the European Jews”. Avoiding rhetoric and discounting the agony of the victims, Hilberg adopted Primo Levi’s tone, that of a “factory report”, and concentrated on the German organizational apparatus.
Lanzmann cleaves to a similar line, but holds tight to real people rather than to statistics. The (sometimes hidden) camera lingers, sometimes unsteadily, often artlessly, on faces and places, while the microphone picks up speech that, by its raw flow or sudden caesuras, reveals what was for so long unseen and unsaid. The horror grows and grows, unalleviated by sententious phrases or clever montage. Lanzmann’s thorny genius expressed itself, over a decade of assembly impeded by lack of funds, by threats and actual incidents of violence, and by the difficulty of locating survivors and killers, bystanders and escapees, in a work which at once bears a single signature and carries no evidence of having been rigged by a selfconscious auteur. Want of tact (even with regard to the bladders of the spectators) and unevenness of texture make Shoah a film that is never a movie. Not all memory’s children are muses.
|
import logging
import time
from pymarketcap import Pymarketcap
logger = logging.getLogger(__name__)
class CryptoFiat():
# Constants
CACHE_DURATION = 6 * 60 * 60 # 6 hours
def __init__(self, crypto_symbol: str, fiat_symbol: str, price: float) -> None:
"""
Create an object that will contains the price for a crypto-currency in fiat
:param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
:param fiat_symbol: FIAT currency you want to convert to (e.g USD)
:param price: Price in FIAT
"""
# Public attributes
self.crypto_symbol = None
self.fiat_symbol = None
self.price = 0.0
# Private attributes
self._expiration = 0
self.crypto_symbol = crypto_symbol.upper()
self.fiat_symbol = fiat_symbol.upper()
self.set_price(price=price)
def set_price(self, price: float) -> None:
"""
Set the price of the Crypto-currency in FIAT and set the expiration time
:param price: Price of the current Crypto currency in the fiat
:return: None
"""
self.price = price
self._expiration = time.time() + self.CACHE_DURATION
def is_expired(self) -> bool:
"""
Return if the current price is still valid or needs to be refreshed
:return: bool, true the price is expired and needs to be refreshed, false the price is
still valid
"""
return self._expiration - time.time() <= 0
class CryptoToFiatConverter(object):
__instance = None
_coinmarketcap = None
# Constants
SUPPORTED_FIAT = [
"AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK",
"EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY",
"KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN",
"RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR", "USD"
]
def __new__(cls):
if CryptoToFiatConverter.__instance is None:
CryptoToFiatConverter.__instance = object.__new__(cls)
try:
CryptoToFiatConverter._coinmarketcap = Pymarketcap()
except BaseException:
CryptoToFiatConverter._coinmarketcap = None
return CryptoToFiatConverter.__instance
def __init__(self) -> None:
self._pairs = []
def convert_amount(self, crypto_amount: float, crypto_symbol: str, fiat_symbol: str) -> float:
"""
Convert an amount of crypto-currency to fiat
:param crypto_amount: amount of crypto-currency to convert
:param crypto_symbol: crypto-currency used
:param fiat_symbol: fiat to convert to
:return: float, value in fiat of the crypto-currency amount
"""
price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol)
return float(crypto_amount) * float(price)
def get_price(self, crypto_symbol: str, fiat_symbol: str) -> float:
"""
Return the price of the Crypto-currency in Fiat
:param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
:param fiat_symbol: FIAT currency you want to convert to (e.g USD)
:return: Price in FIAT
"""
crypto_symbol = crypto_symbol.upper()
fiat_symbol = fiat_symbol.upper()
# Check if the fiat convertion you want is supported
if not self._is_supported_fiat(fiat=fiat_symbol):
raise ValueError('The fiat {} is not supported.'.format(fiat_symbol))
# Get the pair that interest us and return the price in fiat
for pair in self._pairs:
if pair.crypto_symbol == crypto_symbol and pair.fiat_symbol == fiat_symbol:
# If the price is expired we refresh it, avoid to call the API all the time
if pair.is_expired():
pair.set_price(
price=self._find_price(
crypto_symbol=pair.crypto_symbol,
fiat_symbol=pair.fiat_symbol
)
)
# return the last price we have for this pair
return pair.price
# The pair does not exist, so we create it and return the price
return self._add_pair(
crypto_symbol=crypto_symbol,
fiat_symbol=fiat_symbol,
price=self._find_price(
crypto_symbol=crypto_symbol,
fiat_symbol=fiat_symbol
)
)
def _add_pair(self, crypto_symbol: str, fiat_symbol: str, price: float) -> float:
"""
:param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
:param fiat_symbol: FIAT currency you want to convert to (e.g USD)
:return: price in FIAT
"""
self._pairs.append(
CryptoFiat(
crypto_symbol=crypto_symbol,
fiat_symbol=fiat_symbol,
price=price
)
)
return price
def _is_supported_fiat(self, fiat: str) -> bool:
"""
Check if the FIAT your want to convert to is supported
:param fiat: FIAT to check (e.g USD)
:return: bool, True supported, False not supported
"""
fiat = fiat.upper()
return fiat in self.SUPPORTED_FIAT
def _find_price(self, crypto_symbol: str, fiat_symbol: str) -> float:
"""
Call CoinMarketCap API to retrieve the price in the FIAT
:param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
:param fiat_symbol: FIAT currency you want to convert to (e.g USD)
:return: float, price of the crypto-currency in Fiat
"""
# Check if the fiat convertion you want is supported
if not self._is_supported_fiat(fiat=fiat_symbol):
raise ValueError('The fiat {} is not supported.'.format(fiat_symbol))
try:
return float(
self._coinmarketcap.ticker(
currency=crypto_symbol,
convert=fiat_symbol
)['price_' + fiat_symbol.lower()]
)
except BaseException:
return 0.0
|
PURPOSE: To establish and communicate expectations for both faculty and trainees regarding participation in the educational program.
POLICY: Ongoing participation in the educational program is the mutual responsibility of the faculty and the trainees. The following expectations are outlined to enhance the educational process.
•I. Attendance of Teaching Conferences: Wednesday conferences are an integral component of the educational program. These conferences are compulsory for all residents. Attendance will be documented by personnel signature recorded on the sign-in sheet. The absence of a signature will be regarded as an unexcused absence. Failure to comply with the minimal attendance standards set forth below or the failure to demonstrate an effort to attend conferences may result in probationary action for trainees.
Exposure to the continuity established in the preoperative evaluation and postoperative visit(s) is essential to the training of surgical housestaff. For this reason, all trainees to Harbor UCLA Medical Center and West Los Angeles VA Medical Center will attend the outpatient clinic for their service a minimum of one day per week (with the exception of the third Wednesday for the UCLA Neurosurgery education day), unless the trainees' immediate attention is required for other patient care.
All trainees, irregardless of institutional assignment, are expected to attend the operating room.
Prior to the operation, make themselves familiar with the patient's history including diagnostic studies, and have an understanding of the indications for surgery.
Document their involvement with a preoperative note.
Consult with the attending physician regarding the operative care to be delivered.
Personally participate in the operation.
Document a summary of the operation in the form of a brief, handwritten operative note.
Personally participate in the immediate in-hospital postoperative care of the patient and document that participation in the form of a postoperative note in the medical record within the first 48 hours after operation.
Participation in the outpatient postoperative care of the patient by personally seeing the patient in the outpatient setting and determining follow-up care.
All procedures must be performed in the presence of a supervising resident physician or an attending physician.
Each trainee is expected to learn the indications, technical maneuvers, and possible complications of each procedure.
Trainees will provide the patient with informed consent regarding the procedure, and document, either by signed consent form, or by handwritten note that consent was given.
Trainees will document performance of the procedure with a handwritten note in the medical record noting the procedure, indication(s), and their supervising resident or attending physician, anesthetic used, and any complications.
Trainees will maintain a log of procedures completed to document his/her activity.
Balances are cleared annually. Meal cards are for UCLA Clinical rotations only. Some funds are assigned to each resident for vacation/emergency coverage. Funds may not be replenished. There are separate meal programs at Harbor-UCLA and West Los Angeles Veterans Administration.
2) $500 annual educational gift beginning in January of the NS1 year. The funds are intended for books but may be used for other neurosurgical essentials with prior approval from the program director.
POLICY: All residents in the Neurological Surgery Training Program are required to complete the ABNS Primary Examination the last Saturday of March each year until the exam has been passed for credit. Exam must be passed prior to program completion. Residents must take exam as 'Self-Assessment' and receive a passing score prior to taking exam for credit.
Residents take exam for Self-Assessment beginning in the NS1 year.
Board Review Courses require Program Director approval.
Residents will receive one additional academic week (in addition to contracted vacation time) during Chief Year if they have scored 85% or higher on the ABNS exam. This week will be scheduled to take place during the UCLA Chief Resident rotation with internal cross-coverage by the UCLA resident team.
The Program Director must approve exceptions to the Board Exam requirement.
The American Board of Neurological Surgeons mandates the production of scholarly work. Creative and productive scholarly work is expected of all residents. Each senior resident should work towards the production of one peer-reviewed manuscript per year.
Linda M. Liau, M.D., Ph.D. is the chair of the Resident Research Committee. Residents are required to present plans for research to Dr. Liau and the research committee. Research may be done within the Department of Neurosurgery. It is also possible to work with affiliated specialties to prepare critical literature reviews, laboratory experiments, and manuscripts.
Residents are strongly encouraged to apply for educational grants in support of their research activity. The faculty mentor is expected to support academic travel.
One resident may apply/be accepted to the UCLA STAR Ph.D. Program for a period not to exceed three (3) years.
PURPOSE: To ensure adequate operative experience and documentation of cases.
POLICY: All residents in the Neurological Surgery Training Program are required to enter cases in the ACGME Case Log system in a timely fashion. Residents must complete a minimum number of cases to advance to the next year or have a favorable review. A deviation of 20% fewer cases than the minimum may lead to disciplinary action or dismissal.
The faculty is responsible for ensuring that residents proceed through the training program in a satisfactory manner. The residents are evaluated clinically based on the six competencies: patient care, medical knowledge, professionalism, interpersonal and communication skills, practice-based learning, and systems based practices.
Faculty and senior residents, as indicated in the lines of supervision, provide formative comments daily throughout clinical rotations. The faculty complete summative evaluations at the end of each rotation via the Verinform electronic system. As part of the residents' 360? evaluation process, clinical staff and patients also submit evaluations.
Any resident receiving two or more unsatisfactory evaluations will receive an unsatisfactory evaluation for that category. The Program Director reviews unsatisfactory results and determines if disciplinary action, including remediation to achieve a satisfactory result, is necessary. The program director will also address responses from two or more rotational attendings of "cannot evaluate due to insufficient contact"
The Program Director reviews a composite of each resident's evaluations with each resident during his individual, semi-annual meeting. The residents are expected to have a continual maturation of clinical skills and to have completed academic requirements in a timely fashion. The Program Director also reviews the resident's performance on the American Board of Neurological Surgery (ABNS) In-Service exam, the residents' surgical and duty hour logs, and the residents' portfolio with him during each semi-annual meeting. Ongoing research activity and future rotations are also discussed during this evaluative and mentoring meeting.
Residents evaluate attendings teaching skills at the end of each rotation via the Verinform electronic system. The evaluation is based on a resident's clinical interaction in the operating room, ward, and clinic; and the faculty member's lecture performance, scholastic achievement, and overall performance.
The Chair and Program Director reviews the results of these evaluations with each faculty member during an annual review meeting. Meetings are scheduled on a more frequent basis as needed.
The residents evaluate the program during their monthly Residents' Meeting. This meeting is led by the UCLA Chief Resident and occurs as the first meeting on each education day to insure maximal resident attendance.
The Program Director also has a monthly meeting with the residents on Education Day. During these meetings, residents review aspects of training and patient care at each of our four training locations.
The residents have a confidential, semi-annual retreat with the resident program ombudsman. Anonymous meeting notes are distributed to both faculty and residents highlighting attributes of the training program, documenting concerns, and listing requests for programmatic updates.
Faculty evaluate the training program during the monthly faculty meetings and also submit a confidential, written evaluation of the training program annually via an electronic survey.
Beginning in 2008, resident alumni will also complete formal evaluations of the training program.
The program reviews the percentage of alumni continuing into academia and passing the ABNS Oral Exam as part of the evaluative process.
The Department of Neurosurgery requires residents to attend all Education Day conferences (third Wednesday of the month) and a minimum of seventy-five percent (75%) of all scheduled didactic core conferences. Core conferences are required teaching in addition to morning reporting and rounds.
The Education Office must have advance notice for excused absences (i.e. vacation, post-call). All other absences without proper permission will be listed as non-excused and count against the required minimum attendance.
The Division has a policy communicated to all Attendings that residents are protected from clinical obligations to attend these conferences.
Official attendance is tracked using sign-in sheets and is periodically reviewed by the Program Director. Non-satisfactory attendance will be addressed by the Program Director during the resident's semi-annual progress meeting.
The conference calendar is created and maintained by the Education Office. A monthly schedule is both forwarded to residents and faculty via email and posted in the Residents' Library. Weekly updates are forwarded via email.
All Neurosurgery Residents are required to attend the monthly Quality Assurance/Morbidity and Mortality Conference. Attendance is also required for rotating interns, sub-interns, and visiting medical students.
Interns, Sub-Interns, and rotating medical students are required to attend Neurocritical Care Rounds on Wednesday afternoons (2pm). In-houseUCLA NS1 residents are required to attend this conference if not attending to an emergency clinical situation or not needed in the operating room.
To support conference attendance at other required rotations (i.e. Neurology, Pathology, Functional, Radiology, and Interventional Radiology), credit will be given for attendance if documentation is forwarded to the Resident Program Administrator. Credit will also be given for non-UCLA lectures if appropriate documentation (i.e. sign-in sheets) is forwarded to the Education Office. It is the responsibility of the resident to forward this information to the Program Administrator.
The Harbor Chief Resident and Harbor Senior Resident are required to attend the Friday morning teaching rounds, radiology lecture, neuropathology lecture, and noontime journal club.
Santa Monica Spine Conference occurs Monday evenings 5:00PM. The Santa Monica Spine Senior is required to attend. All residents on elective and research rotations are encouraged to attend.
UCLA Neurology Conferences occur on Wednesday mornings. Attendance is required for residents on the Neurology Rotation. Neurosurgery Residents on the Neurology rotation are still required to attend Education Day conferences that do not conflict with the Neurology Teaching Rounds.
There are also several additional meetings/seminars required throughout the academic year. These include R2 Orientation, Skull-Base Lab lectures, and other courses as directed by the Resident Program Director.
The UCLA Chief Resident may be excused from morning conferences to begin first start cases.
Residents who have successfully completed the American Board of Neurological Surgery Exam for credit are not required to attend the Board Preparation lectures.
Post-call residents (UCLA service) are required to attend the 7-9am lectures but are excused from the remainder of the Education or academic day lectures. Sign-out must occur before morning conference. Residents who are post-call from moonlighting activities are required to attend conferences. Therefore, UCLA residents should not be scheduled for pre-Harbor call or moonlighting on Tuesday nights.
Residents may be excused with permission from the Program Director, Vice-Chairs of Clinical Affairs, or the Chief Resident (UCLA or Harbor) to attend to emergent/urgent clinical service needs.
The Harbor Chief Resident is excused from conferences between 9am-12n on Education Days (Surgical Science, Neuroscience, and Basic Science) to run the Harbor Neurosurgery Clinic.
Joint Neurosurgery/Orthopedic teaching conference reviewing spinal surgical.
Required for Santa Monica Spine Resident. All residents are encouraged to attend.
This conference is an in-depth presentation of one or more cases by the neurosurgery residents. The neuroradiologic and neuropathologic findings of the case are discussed, and the historical background of the field and the relevant literature are presented. Residents and faculty are required to attend.
Faculty Host: Nader Pouratian, M.D.
Resident attendance required. Faculty attendance encouraged.
Faculty Coordinators: Ulrich Batzdorf, M.D. and Noriko Salamon, M.D.
Four to five brief case presentations of ongoing patient treatment.
Faculty Moderator: Dennis Malkasian, M.D., Ph.D.
Faculty lecture highlighting the decision-making process of determining surgical or alternative treatment plans for patient care.
Faculty Coordinator: David Hovda, Ph.D.
Faculty Coordinator: Marvin Bergsneider, M.D.
Multi-disciplinary conference reviewing clinical presentation, imaging, operative findings, and pathology for pediatric brain and spine nerve tumor cases. Clinical options are discussed and decisions are made regarding clinical care. Residents are encouraged to attend.
Faculty Coordinator: Linda Liau, M.D., Ph.D.
Faculty Coordinators: Marvin Bergsneider, M.D. and Anthony Heaney, M.D.
Faculty Coordinator: Paul Vespa, M.D.
Discussion of epilepsy patients and their treatment plans by the multidisciplinary team.
Multidisciplinary forum for discussing surgical management of refractory epilepsy patients.
Faculty Coordinator: Itzhak Fried, M.D.
Faculty Coordinator: Duncan McBride, M.D.
POLICY: Attendance to this conference is considered a compulsory component of participation in the educational program for both faculty and residents alike. Confidentiality regarding the discussion conducted during this conference is expected.
1) It is the administrative responsibility of the service chief or senior resident to report the required data on a monthly basis to the Neurosurgery Education Office. The data required is the average daily census of the service, the number of hospital admissions and discharges to and from the service, the total number of inpatient and outpatient operative cases performed the number of reportable events occurring, and the number of deaths occurring.
2) The data accrual period is the seven (7) calendar days preceding the date of the conference. In the event that a conference is postponed, or cancelled, the data must still be reported. Any cases previously reported since the last conference was conducted, but not presented, may still be selected for presentation at the discretion of the conference moderator.
a) Unplanned need for secondary operation, whatever the nature.
b) Unanticipated admissions to any acute care facility within 30 days of discharge.
c) Unanticipated significant escalation of care required.
e) Event Codes: see attachment regarding classification of cases presented at conference.
4) The chief or senior resident involved in the critical aspects of the perioperative care, primarily the decision for operation and the operation itself will make the presentation of the case. This includes clinical training years IV to VI only. In the event, that the involved resident is no longer on service, that individual will still be responsible for the presentation of the case. The appropriate clinical data and imaging studies will be available for presentation, and that the presenting resident is expected to have reviewed the case, and considered the possible causes of the complication and potential approaches in which the complication could be avoided in the future.
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# depends_on = (
# ("cyclope", "0015_fk_2_M2M_pictures_data"),
# )
def forwards(self, orm):
# Deleting field 'Article.picture'
db.delete_column('articles_article', 'picture_id')
def backwards(self, orm):
# Adding field 'Article.picture'
db.add_column('articles_article', 'picture',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='picture', null=True, to=orm['medialibrary.Picture'], on_delete=models.SET_NULL, blank=True),
keep_default=False)
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'articles.article': {
'Meta': {'ordering': "('-creation_date', 'name')", 'object_name': 'Article'},
'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'SITE'", 'max_length': '4'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Author']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'pictures': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'pictures'", 'symmetrical': 'False', 'to': "orm['medialibrary.Picture']"}),
'pretitle': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_author': ('django.db.models.fields.CharField', [], {'default': "'SITE'", 'max_length': '6'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Source']", 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'collections.categorization': {
'Meta': {'ordering': "('order', '-id')", 'object_name': 'Categorization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categorizations'", 'to': "orm['collections.Category']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'collections.category': {
'Meta': {'unique_together': "(('collection', 'name'),)", 'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': "orm['collections.Collection']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '250', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['collections.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'collections.collection': {
'Meta': {'object_name': 'Collection'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'default_list_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'navigation_root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.author': {
'Meta': {'ordering': "['name']", 'object_name': 'Author'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'})
},
'cyclope.relatedcontent': {
'Meta': {'ordering': "['order']", 'object_name': 'RelatedContent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'other_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'other_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_rt'", 'to': "orm['contenttypes.ContentType']"}),
'self_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'self_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_lt'", 'to': "orm['contenttypes.ContentType']"})
},
'cyclope.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'medialibrary.picture': {
'Meta': {'object_name': 'Picture'},
'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'SITE'", 'max_length': '4'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Author']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '100'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_author': ('django.db.models.fields.CharField', [], {'default': "'SITE'", 'max_length': '6'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Source']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['articles']
|
> WebM or other non-encumbered formats is a future activity.
stuff into the platform anyway for compat reasons.
> partners and DRM licensors.
issue of what CDMs would *actually be used* with this proposal.
definitely very relevant for those of us actually implementing it.
> architecture and JS looking interfaces.
> then they won't be able to use those features.
<video> now; multiple examples show that it's completely viable.
> filtering" is factually wrong.
rendering pipeline, providing the video to the browser as an overlay.
|
import sys
import re
class SyntacticError(Exception):
def __init__(self, message):
self.message = message
user_tape_regex = r'^\s*\(.*,.,.*\)\s*$'
user_states_regex = r'^([0-9]*,)*[0-9]+$'
user_initial_regex = r'^[0-9]+$'
user_rule_regex = r'^\([0-9]{1,},.,.,.,[0-9]{1,},(Left|None|Right)\)$'
def parse_tape_from_terminal(input_tape):
tape = re.match(user_tape_regex, input_tape.strip('\n '))
if tape is None:
raise SyntacticError('There is syntactic error with this tape !')
else:
return tape.group().strip(')(').split(',')
def parse_states_from_terminal(input_states):
states = re.match(user_states_regex, input_states.strip('\n'))
if states is None:
raise SyntacticError('There is syntactic error with these states !')
else:
machine_states = set()
for state in states.group().strip('}').split(','):
machine_states.add(int(state))
return machine_states
def parse_initial_from_terminal(input_initial_state):
initial = re.match(user_initial_regex, input_initial_state.strip('\n'))
if initial is None:
raise SyntacticError('There is syntactic error with the'
'initial state !')
else:
return int(initial.group())
def parse_rule_from_terminal(input_rule):
input_rule = re.match(user_rule_regex, input_rule)
if input_rule is None:
raise SyntacticError('There is syntactic error with this rule !')
else:
rule = input_rule.group().strip('\n)(').split(',')
rule[0] = int(rule[0])
rule[4] = int(rule[4])
return rule
|
Elected under the Young Progressives Party (YPP) on Feb. 23, Ubah is one of the 11 senators-elect who were with Lawan at an interactive session with newsmen in Abuja.
Lawan, who represents Yobe North in the 8th Senate under the ruling All Progressives Congress (APC), is seeking to lead the 9th Assembly as President of the Senate.
“So, I am free, and I am pinching tent with him because I believe he is the kind of leader we need in the 9th Senate,’’ Ubah said.
“It is a choice anybody has to make. I can’t be alone in the Senate. If any group comes up and wants to lead the Senate, it is a duty for me to look at them and see if I can pinch tent with them.
“I must join a group, which I see as progressive like us in YPP. Today, I have pinched tent with Ahmed Lawan,’’ he said.
“ I run the biggest platform in the downstream sector of the economy. If you look at sports, I don’t think any individual Nigerian has invested 10 per cent of what I have invested in Nigerian football in the last five years.
“I am sure you know I am also one of the biggest employers of labour in this country. So, I am coming to the senate with a lot of goodwill for the masses of the Nigerian and most especially for my senatorial district,‘’ he said.
|
# -*- coding: utf-8 -*-
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from cStringIO import StringIO
from twext.python.filepath import CachingFilePath as FilePath
from twisted.trial.unittest import TestCase
from twistedcaldav.config import Config, ConfigDict
from twistedcaldav.stdconfig import NoUnicodePlistParser, PListConfigProvider,\
_updateDataStore, _updateMultiProcess, _updateUtilityLog
import twistedcaldav.stdconfig
import sys
import os
nonASCIIValue = "→←"
nonASCIIPlist = "<plist version='1.0'><string>%s</string></plist>" % (
nonASCIIValue,
)
nonASCIIConfigPList = """
<plist version="1.0">
<dict>
<key>DataRoot</key>
<string>%s</string>
</dict>
</plist>
""" % (nonASCIIValue,)
class ConfigParsingTests(TestCase):
"""
Tests to verify the behavior of the configuration parser.
"""
def test_noUnicodePListParser(self):
"""
L{NoUnicodePlistParser.parse} retrieves non-ASCII property list values
as (UTF-8 encoded) 'str' objects, so that a single type is consistently
used regardless of the input data.
"""
parser = NoUnicodePlistParser()
self.assertEquals(parser.parse(StringIO(nonASCIIPlist)),
nonASCIIValue)
def test_parseNonASCIIConfig(self):
"""
Non-ASCII <string>s found as part of a configuration file will be
retrieved as UTF-8 encoded 'str' objects, as parsed by
L{NoUnicodePlistParser}.
"""
cfg = Config(PListConfigProvider({"DataRoot": ""}))
tempfile = FilePath(self.mktemp())
tempfile.setContent(nonASCIIConfigPList)
cfg.load(tempfile.path)
self.assertEquals(cfg.DataRoot, nonASCIIValue)
def test_relativeDefaultPaths(self):
"""
The paths specified in the default configuration should be interpreted
as relative to the paths specified in the configuration file.
"""
cfg = Config(PListConfigProvider(
{"AccountingLogRoot": "some-path",
"LogRoot": "should-be-ignored"}))
cfg.addPostUpdateHooks([_updateDataStore])
tempfile = FilePath(self.mktemp())
tempfile.setContent("<plist version='1.0'><dict>"
"<key>LogRoot</key><string>/some/root</string>"
"</dict></plist>")
cfg.load(tempfile.path)
self.assertEquals(cfg.AccountingLogRoot, "/some/root/some-path")
tempfile.setContent("<plist version='1.0'><dict>"
"<key>LogRoot</key><string>/other/root</string>"
"</dict></plist>")
cfg.load(tempfile.path)
self.assertEquals(cfg.AccountingLogRoot, "/other/root/some-path")
def test_includes(self):
plist1 = """
<plist version="1.0">
<dict>
<key>ServerRoot</key>
<string>/root</string>
<key>DocumentRoot</key>
<string>defaultdoc</string>
<key>DataRoot</key>
<string>defaultdata</string>
<key>ConfigRoot</key>
<string>defaultconfig</string>
<key>LogRoot</key>
<string>defaultlog</string>
<key>RunRoot</key>
<string>defaultrun</string>
<key>Includes</key>
<array>
<string>%s</string>
</array>
</dict>
</plist>
"""
plist2 = """
<plist version="1.0">
<dict>
<key>DataRoot</key>
<string>overridedata</string>
</dict>
</plist>
"""
tempfile2 = FilePath(self.mktemp())
tempfile2.setContent(plist2)
tempfile1 = FilePath(self.mktemp())
tempfile1.setContent(plist1 % (tempfile2.path,))
cfg = Config(PListConfigProvider({
"ServerRoot": "",
"DocumentRoot": "",
"DataRoot": "",
"ConfigRoot": "",
"LogRoot": "",
"RunRoot": "",
"Includes": [],
}))
cfg.addPostUpdateHooks([_updateDataStore])
cfg.load(tempfile1.path)
self.assertEquals(cfg.DocumentRoot, "/root/overridedata/defaultdoc")
self.assertEquals(cfg.DataRoot, "/root/overridedata")
def test_updateDataStore(self):
configDict = {
"ServerRoot": "/a/b/c/",
}
_updateDataStore(configDict)
self.assertEquals(configDict["ServerRoot"], "/a/b/c")
def test_updateMultiProcess(self):
def stubProcessCount(*args):
return 3
self.patch(twistedcaldav.stdconfig, "computeProcessCount", stubProcessCount)
configDict = ConfigDict({
"MultiProcess": {
"ProcessCount": 0,
"MinProcessCount": 2,
"PerCPU": 1,
"PerGB": 1,
},
"Postgres": {
"ExtraConnections": 5,
"BuffersToConnectionsRatio": 1.5,
},
"SharedConnectionPool": False,
"MaxDBConnectionsPerPool": 10,
})
_updateMultiProcess(configDict)
self.assertEquals(45, configDict.Postgres.MaxConnections)
self.assertEquals(67, configDict.Postgres.SharedBuffers)
def test_updateUtilityLog(self):
configDict = {
"ServerRoot": "/a/b/c/",
"LogRoot": "Logs",
"UtilityLogFile": "util.txt",
}
_updateUtilityLog(configDict)
self.assertEquals(configDict["UtilityLogFile"], "{}.log".format(os.path.basename(sys.argv[0])))
_updateDataStore(configDict)
_updateUtilityLog(configDict)
self.assertEquals(configDict["UtilityLogFile"], "/a/b/c/Logs/{}.log".format(os.path.basename(sys.argv[0])))
|
Today Poland celebrates 20 years of NATO membership. 2019 is unique – we also celebrate NATO’s 70th anniversary. Let’s make it a double celebration!
By the decision of the OSCE Permanent Council adopted on 29.03.2019, the mandate of the OSCE Special Monitoring Mission (SMM) in Ukraine has been extended by another 12 months until March 31, 2020. The SMM budget has been set at the level of 100.8 million EUR. The decision provides for further expansion of the SMM and allows for creation of up to 1522 positions.
58th Session of the Legal Subcommittee (LSC) of the UN Committee on the Peaceful Uses of Outer Space (COPOUS) chaired by Poland.
|
# -*- coding: UTF-8 -*-
import urllib2
import cookielib
import urllib
import httplib2
import poster
import json
import sys
import base64
import requests
reload(sys)
sys.setdefaultencoding('utf-8')
class HttpLibrary(object):
"""Http Client"""
global host
# host = 'https://my.qiye.yixin.im'
host = 'https://super.qiye.yixin.im'
# host = 'http://10.164.96.78'
global port
port = "0"
# port = "8184"
global ticket_path
ticket_path = "http://10.164.96.78:8184/app/system/getAppAuthTicketFromWeb"
def __init__(self):
pass
def get_cookie(self, username, password):
"""Get cookie from username and password.
Examples:
| Get Cookie | username | password |
"""
print 'start to getcookie'
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({'email': username, 'password': password})
# url = self.__checkport() + '/login/in' + '?' + postdata
url = self.__checkport() + '/login/in'
print 'HttpPost url is ' + url
try:
# response = opener.open(url)
response = opener.open(url, postdata)
except urllib2.URLError as e:
if hasattr(e, 'reason'):
print 'getcookie failed!'
print 'reason is ' + e.reason
elif hasattr(e, 'code'):
print 'getcookie failed!'
print 'reson is ' + e.reason + ',error code is '
else:
print 'getcookie failed! the error is not URLError and HTTPError'
else:
content = response.read()
print 'get cookie sussessful,getcookie response is ' + str(content).decode('utf-8')
return response.info()['Set-Cookie']
def get_admin_cookie(self, username, password):
"""Get admin cookie from username and password.
Examples:
| Get Admin Cookie | username | password |
"""
print 'start to getadmincookie'
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({'account': username, 'password': password})
url = 'https://super.qiye.yixin.im' + '/checkLogin?'
print 'HttpPost url is ' + url
try:
response = opener.open(url, postdata)
except urllib2.URLError as e:
if hasattr(e, 'reason'):
print 'getadmincookie failed!'
print 'reason is ' + e.reason
elif hasattr(e, 'code'):
print 'getadmincookie failed!'
print 'reson is ' + e.reason + ',error code is '
else:
print 'getadmincookie failed! the error is not URLError and HTTPError'
else:
content = response.read()
print 'get admin cookie sussessful,getcookie admin response is ' + str(content).decode('utf-8')
return response.info()['Set-Cookie']
def web_get(self, path, parameter, cookie):
"""Issues a HTTP GET request,parameter should be a python dict,this method return a string object.
Examples:
| ${res} | WEB Get | /foo/bar.do | {'foo': '1','bar': '2'} | cookie |
"""
if parameter == 'None':
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(parameter))
url = self.__checkport() + path + '?' + str(self.__encodepara(parameter))
print 'HttpGet request url is ' + url
res = urllib2.Request(url)
res.add_header('Accept', 'application/json')
res.add_header('Content-Type', 'application/x-www-form-urlencoded')
res.add_header('Cookie', cookie)
try:
response = urllib2.urlopen(res)
except urllib2.URLError as e:
if hasattr(e, 'reason'):
print 'send HttpGet failed!'
print 'reason is ' + e.reason
elif hasattr(e, 'code'):
print 'send HttpGet failed!'
print 'reason is ' + e.reason + ',error code is ' + e.code
else:
print 'send HttpGet failed! the error is not URLError and HTTPError'
else:
info = self.__replace_null(response.read())
print 'HttpGet response is ' + str(info)
return info.decode('utf-8')
def web_post(self, path, para, data, cookie, uid=''):
"""Issues a HTTP POST request,parameter should be a python dict,data is post entity, this method return a string object.
Examples:
| ${res} | WEB POST | /foo/bar.do | {'foo': '1','bar': '2'} | {"foo": {"bar": [1,2,3]}} | cookie |
| ${res} | WEB POST | /foo/bar.do | {'foo': '1','bar': '2'} | None | cookie |
"""
http = httplib2.Http()
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': cookie,
'uid': uid}
headers1 = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': cookie, 'uid': uid}
if para == 'None':
if "http" in path:
url = path
print "chenyazhi test url: ", url
else:
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(para))
url = self.__checkport() + path + '?' + str(self.__encodepara(para))
print 'HttpPost url is ' + url
try:
if data == 'None':
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'POST', headers=headers)
res_content = self.__replace_null(content)
print 'send HttpPost successful! content is ' + res_content
return res_content.decode('utf-8')
else:
if type(eval(data)) == dict:
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'POST', headers=headers1, body=json.dumps(eval(data)))
res_content = self.__replace_null(content)
print 'send HttpPost successful! content is ' + res_content
return res_content.decode('utf-8')
else:
print 'please confirm data type,data is not json'
except Exception, e:
raise e
def __generate_url(self, parameter):
"""generate url from parameter"""
parameter = eval(parameter)
para = ''
for key in parameter.keys():
para = str(para) + key + '=' + parameter.get(key) + '&'
url = para[:-1]
return url
def web_delete(self, path, parameter, data, cookie):
"""Issues a HTTP DELETE request,parameter should be a python dict,data is delete entity, this method return a string object.
Examples:
| ${res} | WEB DELETE | /foo/bar.do | {'foo': '1','bar': '2'} | {"foo": {"bar": [1,2,3]}} | cookie |
| ${res} | WEB DELETE | /foo/bar.do | None | {"foo": {"bar": [1,2,3]}} | cookie |
"""
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': cookie}
headers1 = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': cookie}
if parameter == 'None':
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(parameter))
url = self.__checkport() + path + '?' + str(self.__encodepara(parameter))
print 'HttpDelete url is ' + url
if data == 'None':
request = urllib2.Request(url, headers=headers)
else:
if type(eval(data)) == dict:
request = urllib2.Request(url, data=urllib.urlencode(data), headers=headers1)
else:
print 'please confirm data type,data is not json'
request.get_method = lambda: 'DELETE'
opener = urllib2.build_opener()
try:
# response = urllib2.urlopen(request)
response = opener.open(request)
except Exception, e:
raise e
else:
info = self.__replace_null(response.read())
print 'HttpDelete response is ' + info
return info.decode('utf-8')
def web_put(self, path, parameter, data, cookie):
"""Issues a HTTP PUT request,parameter should be a python dict,data is put entity, this method return a string object.
Examples:
| ${res} | WEB PUT | /foo/bar.do | {'foo': '1','bar': '2'} | {"foo": {"bar": [1,2,3]}} | cookie |
| ${res} | WEB PUT | /foo/bar.do | {'foo': '1','bar': '2'} | None | cookie |
"""
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': cookie}
headers1 = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': cookie}
if parameter == 'None':
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(parameter))
url = self.__checkport() + path + '?' + str(self.__encodepara(parameter))
print 'HttpPut url is ' + url
http = httplib2.Http()
try:
if data == 'None':
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'PUT', headers=headers)
elif data != 'None':
if type(eval(data)) == dict:
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'PUT', headers=headers1, body=json.dumps(eval(data)))
else:
print 'please confirm data type,data is not json'
else:
info = self.__replace_null(str(content))
print 'Send HttpPut successful,content is ' + info
return info.decode('utf-8')
except Exception, e:
raise e
def web_post_file(self, path, parameter, entity, cookie):
"""Issues a HTTP POST FILE request,url is the URL relative to the server root,parameter should be a python dict,this method return a string object.
Examples:
| ${res} | WEB POST FILE | https://b.yixin.im/addCodeConf.p | {'file':open('Resources/Material/codeConf.csv','rb'),'name':'text码活动ffd'}| cookie |
"""
if parameter == 'None':
url = self.__checkport() + path
else:
url = self.__checkport() + path + '?' + str(self.__encodepara(parameter))
opener = poster.streaminghttp.register_openers()
datagen, headers = poster.encode.multipart_encode(eval(entity))
res = urllib2.Request(url, datagen, headers)
res.add_header('Cookie', cookie)
try:
response = urllib2.urlopen(res)
except Exception, e:
raise e
else:
info = self.__replace_null(response.read())
print 'send file successful,http response is ' + info
return info.decode('utf-8')
def web_post_filebyte(self, path, para, entity):
"""this keyword is for openplatform to post file.
Examples:
| ${res} | WEB POST FILEBYTE | /cgi-bin/file/upload | {'access_token':'ACCESS_TOKEN'} | {'content':'Resources/Material/logo.jpg','type':'jpg'}
"""
if type(eval(entity)) != dict:
print 'entity must be dict'
return
else:
entitydict = eval(entity)
filename = entitydict['content']
f = open(filename, 'rb')
fbyte = f.read()
enbyte = base64.b64encode(fbyte)
entitydict['content'] = enbyte
res = self.web_post(path, para, str(entitydict), 'None')
return res
def __replace_null(self, response):
strres = json.dumps(response, ensure_ascii=False)
return eval(strres.replace('null', '\\"null\\"').replace('false', '\\"false\\"').replace('true', '\\"true\\"'))
def web_environment_config(self, h, p):
"""Set HTTP Request host and port,host and port is global variable.
host default value is https://b.yixin.im,port default value is 0.
Examples:
| WEB Environment Config| host | port |
"""
global host
global port
host = h
port = p
print 'host is ' + h
print 'port is ' + str(p)
def __checkport(self):
global host
global port
if port == "0":
url = host
else:
url = host + ':' + str(port)
return url
def __encodepara(self, para):
encodepara = urllib.urlencode(para)
return encodepara
def web_formdatapost(self, path, para, data, cookie):
http = httplib2.Http()
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': cookie}
if para == 'None':
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(para))
url = self.__checkport() + path + '?' + str(self.__encodepara(para))
print 'HttpPost url is ' + url
try:
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'POST', headers=headers, body=data)
res_content = self.__replace_null(content)
print 'send HttpPost successful! content is ' + res_content
return res_content.decode('utf-8')
except Exception, e:
raise e
def web_get_oauth(self, my_qiye_url, cookie, appKey, uid):
'''
内部系统访问外部应用接入OAuth免登
使用时,需要开始VPN
:param my_qiye_url: 未跳转前的url,如重要通知:"https://my.qiye.yixin.im/app/manageUrl?appId=613&url=https://inotice.qiye.yixin.im/manage/index"
:param cookie: 后台普通管理员的cookie
:param appKey: 大后台查看appKey
:param uid: 后台普通管理员的uid,和获取cookie的管理员同一个
:return: 带code的url
Examples:
| Web Get Oauth| my_qiye_url | cookie | appKey | uid |
'''
global ticket_path
###获取url值###
redirect_uri = self.get_id_from_url(my_qiye_url, "url")
###获取内部Web免登票据###
# path = "http://10.164.96.78:8184/app/system/getAppAuthTicketFromWeb"
para = 'None'
data = '{"appid": "' + appKey + '"}'
res_ticket = self.web_post(ticket_path, 'None', data, cookie, uid)
st = json.loads(res_ticket).get('result').get('st')
oauth_url = "https://oauth-test.qiye.yixin.im/authorize?response_type=code&client_id=" + appKey + "&st=" + st + "&redirect_uri=" + redirect_uri
url_code = requests.get(oauth_url).url
return url_code
def get_id_from_url(self, url, id):
"""
获取url里的关键字的值.
比如 url="https://my.qiye.yixin.im/app/manageUrl?appId=613&url=https://inotice.qiye.yixin.im/manage/index"
需要获取appId的值,在id处传入参数 "appId"
"""
if "?" not in url:
print "The url is indissociable "
else:
spliturl = url.split("?")
url_body = spliturl[1].split("&")
print url_body
for r in url_body:
if id in r:
id_long = len(id)
print r[id_long + 1:]
return r[id_long + 1:]
print "There have not " + id
def get_url_cookie(self, url):
print 'start to getcookie'
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
print 'HttpPost url is ' + url
try:
response = opener.open(url)
except urllib2.URLError as e:
if hasattr(e, 'reason'):
print 'getcookie failed!'
print 'reason is ' + e.reason
elif hasattr(e, 'code'):
print 'getcookie failed!'
print 'reson is ' + e.reason + ',error code is '
else:
print 'getcookie failed! the error is not URLError and HTTPError'
else:
content = response.read()
print 'get cookie sussessful,getcookie response is ' + str(content).decode('utf-8')
return response.info()['Set-Cookie']
if __name__ == '__main__':
pass
# h = HttpLibrary()
# cookie = h.get_admin_cookie("numen_dev@163.com", "Admin123")
# print cookie
# cookie = h.get_cookie("chenyazhi@yixin.im", "Abc123456")
#
# r = h.web_get_oauth("https://my.qiye.yixin.im/app/manageUrl?appId=613&url=https://inotice.qiye.yixin.im/manage/index", cookie, "bossnotice", "130")
#
# ###管理员登录###
# r = h.web_post("/checkLogin", "{'account':'chenyazhi@yixin.im','password':'Abc123456'}", "None", "NTESkolibri-adminSI=1658DE8D79232165A1E7A4AD47C77A79.hzabj-kolibri-1.server.163.org-8016; Path=/; HttpOnly")
# print r
# web_get(self,path,parameter,cookie):
# print "!!!!!!!!!!!!!!!!!!!!!"
# r = h.web_get('/smsquota/getCompanyInfo','{"domain":"yixin.im"}',cookie)
# print r
# h.get_id_from_url("https://my.qiye.yixin.im/app/manageUrl?appId=613&url=https://inotice.qiye.yixin.im/manage/index", "appId")
# cookie = h.get_cookie('interfacetest@jiekou.com','Admin123')
# h.web_post_filebyte('/cgi-bin/file/upload','{"access_token":"718ad40d0fbc4eba89621f86e0d23313"}','{"content":"Resources/Material/logo.jpg","type":"jpg"}')
|
The chaffinch is one of the most widespread and abundant bird in Britian and Ireland. Its patterned plumage helps it to blend in when feeding on the ground and it becomes most obvious when it flies, revealing a flash of white on the wings and white outer tail feathers. It does not feed openly on bird feeders - it prefers to hop about under the bird table or under the hedge. You'll usually hear chaffinches before you see them, with their loud song and varied calls.
Chaffinches can be seen around the UK in woodlands, hedgerows, fields, parks and gardens anywhere.
Chaffinches can be seen all year round.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0002_initial_data'),
('tests', '0004_auto_20141008_0420'),
]
operations = [
migrations.CreateModel(
name='PageChooserModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('page', models.ForeignKey(help_text=b'help text', to='wagtailcore.Page')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SnippetChooserModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('advert', models.ForeignKey(help_text=b'help text', to='tests.Advert')),
],
options={
},
bases=(models.Model,),
),
]
|
Grow your business, earn rewards and be part of the prestigious Avon Champions Club, the club for active Sales Leaders.
Avon Champions Club – with levels and rewards to support you and your business every step of the way, you won’t want to miss out on being a part of this exclusive club!
Run every quarter there are four levels; Contender, Pioneer level, Premier level and Champion level. Open to all Avon sales leaders including trainees.
plus all you achieved at Contender level!
plus all you achieve at Pioneer Level!
Achieve four consecutive quarters at Champion Level and you’ll be invited to a national event.
Sales Leaders must be “Active” in every campaign. This means that they must recruit at least one new Representative into their Generation 1 team, who must then place an on-time LOA 1 order, in every campaign across the incentive period.
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-04-02 23:20
from collections import defaultdict
from hanlp.components.parsers.chu_liu_edmonds import decode_mst
import numpy as np
class Tarjan:
"""Computes Tarjan's algorithm for finding strongly connected components (cycles) of a graph"""
def __init__(self, prediction, tokens):
"""
Parameters
----------
prediction : numpy.ndarray
a predicted dependency tree where prediction[dep_idx] = head_idx
tokens : numpy.ndarray
the tokens we care about (i.e. exclude _GO, _EOS, and _PAD)
"""
self._edges = defaultdict(set)
self._vertices = set((0,))
for dep, head in enumerate(prediction[tokens]):
self._vertices.add(dep + 1)
self._edges[head].add(dep + 1)
self._indices = {}
self._lowlinks = {}
self._onstack = defaultdict(lambda: False)
self._SCCs = []
index = 0
stack = []
for v in self.vertices:
if v not in self.indices:
self.strongconnect(v, index, stack)
# =============================================================
def strongconnect(self, v, index, stack):
"""
Args:
v:
index:
stack:
Returns:
"""
self._indices[v] = index
self._lowlinks[v] = index
index += 1
stack.append(v)
self._onstack[v] = True
for w in self.edges[v]:
if w not in self.indices:
self.strongconnect(w, index, stack)
self._lowlinks[v] = min(self._lowlinks[v], self._lowlinks[w])
elif self._onstack[w]:
self._lowlinks[v] = min(self._lowlinks[v], self._indices[w])
if self._lowlinks[v] == self._indices[v]:
self._SCCs.append(set())
while stack[-1] != v:
w = stack.pop()
self._onstack[w] = False
self._SCCs[-1].add(w)
w = stack.pop()
self._onstack[w] = False
self._SCCs[-1].add(w)
return
# ======================
@property
def edges(self):
return self._edges
@property
def vertices(self):
return self._vertices
@property
def indices(self):
return self._indices
@property
def SCCs(self):
return self._SCCs
class UnionFind(object):
def __init__(self, n) -> None:
super().__init__()
self.parent = [x for x in range(n)]
self.height = [0] * n
def find(self, x):
if self.parent[x] == x:
return x
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.height[x] < self.height[y]:
self.parent[x] = y
else:
self.parent[y] = x
if self.height[x] == self.height[y]:
self.height[x] += 1
def same(self, x, y):
return self.find(x) == self.find(y)
def tarjan(parse_probs, length, tokens_to_keep, ensure_tree=True):
"""Adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/models/nn.py
Args:
parse_probs(NDArray): seq_len x seq_len, the probability of arcs
length(NDArray): sentence length including ROOT
tokens_to_keep(NDArray): mask matrix
ensure_tree: (Default value = True)
Returns:
"""
if ensure_tree:
parse_preds, parse_probs, tokens = unique_root(parse_probs, tokens_to_keep, length)
# remove cycles
tarjan = Tarjan(parse_preds, tokens)
for SCC in tarjan.SCCs:
if len(SCC) > 1:
dependents = set()
to_visit = set(SCC)
while len(to_visit) > 0:
node = to_visit.pop()
if not node in dependents:
dependents.add(node)
to_visit.update(tarjan.edges[node])
# The indices of the nodes that participate in the cycle
cycle = np.array(list(SCC))
# The probabilities of the current heads
old_heads = parse_preds[cycle]
old_head_probs = parse_probs[cycle, old_heads]
# Set the probability of depending on a non-head to zero
non_heads = np.array(list(dependents))
parse_probs[np.repeat(cycle, len(non_heads)), np.repeat([non_heads], len(cycle), axis=0).flatten()] = 0
# Get new potential heads and their probabilities
new_heads = np.argmax(parse_probs[cycle][:, tokens], axis=1) + 1
new_head_probs = parse_probs[cycle, new_heads] / old_head_probs
# Select the most probable change
change = np.argmax(new_head_probs)
changed_cycle = cycle[change]
old_head = old_heads[change]
new_head = new_heads[change]
# Make the change
parse_preds[changed_cycle] = new_head
tarjan.edges[new_head].add(changed_cycle)
tarjan.edges[old_head].remove(changed_cycle)
return parse_preds
else:
# block and pad heads
parse_probs = parse_probs * tokens_to_keep
parse_preds = np.argmax(parse_probs, axis=1)
return parse_preds
def chu_liu_edmonds(parse_probs, length):
tree = decode_mst(parse_probs.T, length, False)[0]
tree[0] = 0
return tree
def unique_root(parse_probs, tokens_to_keep: np.ndarray, length):
I = np.eye(len(tokens_to_keep))
# block loops and pad heads
if tokens_to_keep.ndim == 1:
tokens_to_keep = np.expand_dims(tokens_to_keep, -1)
parse_probs = parse_probs * tokens_to_keep * (1 - I)
parse_preds = np.argmax(parse_probs, axis=1)
tokens = np.arange(1, length)
roots = np.where(parse_preds[tokens] == 0)[0] + 1
# ensure at least one root
if len(roots) < 1:
# The current root probabilities
root_probs = parse_probs[tokens, 0]
# The current head probabilities
old_head_probs = parse_probs[tokens, parse_preds[tokens]]
# Get new potential root probabilities
new_root_probs = root_probs / old_head_probs
# Select the most probable root
new_root = tokens[np.argmax(new_root_probs)]
# Make the change
parse_preds[new_root] = 0
# ensure at most one root
elif len(roots) > 1:
# The probabilities of the current heads
root_probs = parse_probs[roots, 0]
# Set the probability of depending on the root zero
parse_probs[roots, 0] = 0
# Get new potential heads and their probabilities
new_heads = np.argmax(parse_probs[roots][:, tokens], axis=1) + 1
new_head_probs = parse_probs[roots, new_heads] / root_probs
# Select the most probable root
new_root = roots[np.argmin(new_head_probs)]
# Make the change
parse_preds[roots] = new_heads
parse_preds[new_root] = 0
return parse_preds, parse_probs, tokens
def dfs(graph, start, end):
fringe = [(start, [])]
while fringe:
state, path = fringe.pop()
if path and state == end:
yield path
continue
for next_state in graph[state]:
if next_state in path:
continue
fringe.append((next_state, path + [next_state]))
def mst_then_greedy(arc_scores, rel_scores, mask, root_rel_idx, rel_idx=None):
from scipy.special import softmax
from scipy.special import expit as sigmoid
length = sum(mask) + 1
mask = mask[:length]
arc_scores = arc_scores[:length, :length]
arc_pred = arc_scores > 0
arc_probs = sigmoid(arc_scores)
rel_scores = rel_scores[:length, :length, :]
rel_probs = softmax(rel_scores, -1)
if not any(arc_pred[:, 0][1:]): # no root
root = np.argmax(rel_probs[1:, 0, root_rel_idx]) + 1
arc_probs[root, 0] = 1
parse_preds, parse_probs, tokens = unique_root(arc_probs, mask, length)
root = adjust_root_score(arc_scores, parse_preds, root_rel_idx, rel_scores)
tree = chu_liu_edmonds(arc_scores, length)
if rel_idx is not None: # Unknown DEPREL label: 'ref'
rel_scores[np.arange(len(tree)), tree, rel_idx] = -float('inf')
return tree, add_secondary_arcs_by_scores(arc_scores, rel_scores, tree, root_rel_idx)
def adjust_root_score(arc_scores, parse_preds, root_rel_idx, rel_scores=None):
root = np.where(parse_preds[1:] == 0)[0] + 1
arc_scores[:, 0] = min(np.min(arc_scores), -1000)
arc_scores[root, 0] = max(np.max(arc_scores), 1000)
if rel_scores is not None:
rel_scores[:, :, root_rel_idx] = -float('inf')
rel_scores[root, 0, root_rel_idx] = float('inf')
return root
def add_secondary_arcs_by_scores(arc_scores, rel_scores, tree, root_rel_idx, arc_preds=None):
if not isinstance(tree, np.ndarray):
tree = np.array(tree)
if arc_preds is None:
arc_preds = arc_scores > 0
rel_pred = np.argmax(rel_scores, axis=-1)
return add_secondary_arcs_by_preds(arc_scores, arc_preds, rel_pred, tree, root_rel_idx)
def add_secondary_arcs_by_preds(arc_scores, arc_preds, rel_preds, tree, root_rel_idx=None):
dh = np.argwhere(arc_preds)
sdh = sorted([(arc_scores[x[0], x[1]], list(x)) for x in dh], reverse=True)
graph = [[] for _ in range(len(tree))]
for d, h in enumerate(tree):
if d:
graph[h].append(d)
for s, (d, h) in sdh:
if not d or not h or d in graph[h]:
continue
try:
path = next(dfs(graph, d, h))
except StopIteration:
# no path from d to h
graph[h].append(d)
parse_graph = [[] for _ in range(len(tree))]
num_root = 0
for h in range(len(tree)):
for d in graph[h]:
rel = rel_preds[d, h]
if h == 0 and root_rel_idx is not None:
rel = root_rel_idx
assert num_root == 0
num_root += 1
parse_graph[d].append((h, rel))
parse_graph[d] = sorted(parse_graph[d])
return parse_graph
def adjust_root_score_then_add_secondary_arcs(arc_scores, rel_scores, tree, root_rel_idx):
if len(arc_scores) != tree:
arc_scores = arc_scores[:len(tree), :len(tree)]
rel_scores = rel_scores[:len(tree), :len(tree), :]
parse_preds = arc_scores > 0
# adjust_root_score(arc_scores, parse_preds, rel_scores)
parse_preds[:, 0] = False # set heads to False
rel_scores[:, :, root_rel_idx] = -float('inf')
return add_secondary_arcs_by_scores(arc_scores, rel_scores, tree, root_rel_idx, parse_preds)
|
The Dark Knight is director Christopher Nolan's 2008 sequel to Batman Begins. Christian Bale returns as Batman, this time having to save Gotham from the grips of a new foe who calls himself the Joker (Heath Ledger, who posthumously received an Academy Award for Best Supporting Actor for his performance). The film's cast includes Gary Oldman, Morgan Freeman, and Michael Caine and the sequel would be followed by 2012's The Dark Knight Rises. Note: The screenshots seen here will be of varying sizes due to the change in aspect ratio throughout the film.
The Joker (Heath Ledger) uses a Glock 17 that has been converted to full-auto, as the gun lacks the Glock 18's firing selector switch on the side of the slide. His Glock is fitted with a stainless slide and a G18's standard 33 round magazine. He is seen using it as his main weapon several times in the film. For someone as bizarre as The Joker, the gun fits him well as it is very rare, destructive, stylish although in some ways impractical. The extremely high cyclic rate makes it difficult to control, although in the hands of an experienced shooter, it is all the more deadly. The armorer referred to the gun as a Glock 18, but this is less of a mouthful then "Glock 17 converted to full-auto to resemble a Glock 18". Not to mention, he likely assumed no one would know the difference anyway. The armorer also mentioned how three Glocks were used, including two converted Glock 17s and one flashpaper model for close shooting scenes where blanks are harmful. Trivia: To make the Joker's Glock sound more impressive, the sound editors mixed in the report of a GE M134 Minigun. Despite the extreme cyclic rate difference (roughly 1300rpm to 4000rpm), this sound fits quite nicely. This particular pistol had previously been used in the 2005 film Mr. & Mrs. Smith.
Two-tone 3rd Generation Glock 17 converted to full-auto - 9x19mm. This is the screen-used firearm held by Heath Ledger in the film. A few of these Glocks had previously been used in Mr. & Mrs. Smith. Thanks to James Georgopoulos.
Disguised as Bozo, one of the low-life robbers, the Joker loads a 33 round magazine into his converted Glock.
He then locks the slide back and chambers the gun.
The Joker shoots the bank manager with his Glock.
The Joker shoots the bus driver with his Glock. Notice that he looks away while doing this.
The Joker fires a burst from his Glock at the SWAT van. Note how the gun jammed after a single shot, although the sound editor and continuity director chose to ignore this.
The Joker continues to fire his Glock on full at the SWAT van, but only manages to rake the sides with dents.
The Joker's Glock locks empty, so he switches to a Remington 870 sawed-off.
"This city deserves a better class of criminal..."
The Joker holds his Full-auto Glock 17 as he explains his philosophy about chaos to the Chechen.
As the mob's money burns, the Joker holds his Glock on The Chechen (Ritchie Coster).
Inside Harvey Dent's hospital room, the Joker shoots a police officer while crudely disguised as a nurse. Note how the gun is a flashpaper gun due to close proximity for safety concerns.
The Joker with his suppressed Glock.
"Grumpy" (Danny Goldring), one of The Joker's henchmen in the bank heist, uses a Glock 17 to kill "Happy" in the vault. He also uses it to hold up The Joker, suspecting he will kill him as well, only for Joker to tell him he is going to kill the bus driver. Several other Glocks are seen in the hands of Lau's henchmen and Gotham City police throughout the film.
"Grumpy" (Danny Goldring) leads the robbery with his Glock 17 drawn.
"Where did you learn to count?!"
Grumpy nurses a bullet wound after being hit by an unexpected sixth shot from the bank manager's shotgun.
Grumpy shoots and kills Happy in the vault.
Grumpy holds his Glock on the Joker in the bank lobby.
One of the Joker's "bounty hunters" carries a Glock 17 while the other two are armed with Beretta 92FS Inox's.
Batman disarms one of Lau's men of his Glock 17.
One of Lau's men lay unconscious with a Glock 17 in hand.
A GCPD officer with his Glock drawn apprehends Thomas Schiff, one of the Joker's men posing as part of the honor guard.
One of the Joker's men disguised as a doctor wields a Glock 17.
A SWAT officer points a Glock 17 at Batman before he is yanked out the window.
During the cross-examination occurring during the trial of mobster Sal Maroni (Eric Roberts), the witness Al Rossi (Michael Vieau) draws a small compact pistol on Harvey Dent (Aaron Eckhart). He pulls the trigger, but the gun misfires. After disarming Rossi, Dent identifies the gun as a "Carbon fiber... 28 Caliber, made in China." This pistol appears to be a prop replica based on the Glock 26. In order to disguise its identity, the pistol has the back plate covered up, the sights removed and has been fitted with a different trigger guard.
Screen used stunt casting from the propstore of london.
opposite view of the screen-used stunt gun.
This is a Glock 26, mocked up as a fictional Chinese .28 caliber pistol, handled by Michael Vieau in The Dark Knight. Thanks to James Georgopoulos.
Al Rossi (Michael Vieau) pulls the mocked up Glock 26 on Dent.
Note how the back plate has been covered up and the sights removed, further helping to hide its origins.
Dent unloads the pistol. Note that one of the officers in the background restraining the witness carries a Beretta 92FS in his holster.
Dent holds the pistol. "Carbon fiber, .28 caliber. Made in China. If you want to kill a public servant, Mr. Maroni, I recommend you buy American."
Dent puts the pistol down in front of Maroni.
Lieutenant James Gordon (Gary Oldman) uses a Smith & Wesson 5904 as his sidearm in the film.
Early Model Smith & Wesson 5904 - 9x19mm. This has the finger step contoured trigger guard that would later be replaced by the round trigger guard.
Lt. Gordon raids a bank vault with his Smith & Wesson 5904 drawn.
Lt. Gordon with his 5904 at the ready.
Gordon enters a crime scene with his 5904 at the ready.
Gordon draws his 5904 at Batman.
Gordon searches for Dent and his family.
A guard at Sal Maroni's trial carries a Beretta 92FS in a holster. An officer is also seen pointing his Beretta 92FS at The Joker when he takes Detective Stephens (Keith Szarabajka) hostage with a shard of glass, demanding to get his one phone call.
One of the officers in the background has a Beretta 92FS in his holster as Dent is seen unloading the fictional pistol.
An Beretta 92FS is seen on a tray next to a Beretta 92FS Inox and what looks like an M1911A1 variant packed in a holster.
An officer draws his Beretta 92FS on the Joker as he enter the room with Detective Stephens (Keith Szarabajka) hostage with a shard of glass.
An officer aims his Beretta 92FS at the Joker.
The officer throws his phone to the Joker.
A Beretta 92FS Inox is seen used by a Batman copycat against Scarecrow (Cillian Murphy). Several Beretta Inox handguns are also seen used by Joker's henchmen when Gambol (Michael Jai White) is ambushed.
One of the Batman copycats tries to hold up Scarecrow with a Beretta 92FS Inox before he sprays fear toxin in his face.
The Beretta Inox is held to the head of one of Gambol's men.
Another one of Gambol's men is overcome at gunpoint.
A wide shot shows the Joker's men with Beretta Inox's as they overcome Gambol's men.
An Beretta 92FS Inox lays on a tray between what seems to be an M1911A1 variant packed in a holster and a Beretta 92FS.
A Beretta 92SB-C equipped with a suppressor is used by "Happy" (William Smillie) during the bank heist to kill "Dopey" (Michael Stoyanov), as soon as Dopey has hacked the silent alarm.
The Joker's henchman "Happy" (William Smillie), stands over "Dopey's" dead body after shooting him with his suppressed Beretta 92SB-C.
Production Images of The Dark Knight.
Production Photo of Happy's suppressed Beretta.
Lau (Chin Han) is seen pulling a Walther P99 from his desk when Batman (Christian Bale) cuts the power to the building. Lau then uses the Walther to fire several shots at Batman.
Lau chambers his Walther P99.
Lau fires his Walther P99 at Batman. This type of low grip makes recoil control difficult and would definitely throw off sight alignment.
Lau with his Walther P99.
The Chechen (Ritchie Coster) is seen using a SIG Pro SP 2009 pistol during the meeting with Scarecrow.
When the meeting with Scarecrow is interupted by the Batman copycats and a firefight erupts the Chechen (Ritchie Coster) draws his SIG Pro SP 2009.
The Chechen prepares to fight back with his SIG Pro.
The Chechen yells to his men while holding his SIG.
Chechen (Ritchie Coster) is about to enter his van while a Batman copycat prepares to shoot him but is thwarted by Batman.
Detective Stephens (Keith Szarabajka) uses a SIG-Sauer P229 as his sidearm in the film most notably during the funeral after the Joker fires at the mayor.
Stephens draws his SIG P229 when the Joker's men strike.
One of the men employed by the Chechen (John Turk) is seen using a SIG-Sauer P226 pistol during the meeting with the Scarecrow.
One of the Chechen's men with his SIG P226 at the ready. This thug is played by John Turk, an actor and stuntman mostly known for his portrayal of Sub-Zero, Scorpion and all the other ninjas in Mortal Kombat 3.
The Joker (Heath Ledger) gives Harvey Dent/Two-Face (Aaron Eckhart) a stainless Smith & Wesson Model 64 with a 3" heavy barrel to kill his perceived "enemies", flipping his coin to decide their fate.
The Joker gives Dent a Smith & Wesson Model 64.
Dent holds the Smith & Wesson Model 64 on The Joker.
Dent holds the Model 64 to Joker. Note the position of the Joker's thumb on the hammer.
Two-Face points the Model 64 at Det. Wuertz.
When Two-Face's coin lands dirty-side up, he executes Wuertz.
Two-Face carries the Model 64 in Maroni's limo.
A closeup of the Model 64 as Two-Face shoots Maroni's driver, sending the car flying off the road.
Two-Face holds Det. Anna Ramirez (Monique Gabriela Curnen) at gunpoint.
After firing the revolver once, Two-Face puts the gun to his own head and flips his coin.
Note how even after firing off one round and cocking the hammer, the chambers are fully loaded.
The Joker takes a Smith & Wesson Model 19 Snub Nose off of one of his men at Harvey Dent's fundraiser after Batman knocks him unconscious. He then uses it to hold Rachel Dawes (Maggie Gyllenhaal) hostage before shooting out the window behind him and dropping her out of it.
The Joker holds Rachel hostage with a snub nose Smith & Wesson Model 19.
The Joker demands Batman to take off his mask as he waves around the S&W.
Harvey Dent (Aaron Eckhart) uses a Smith & Wesson Model 36 to interrogate Thomas Schiff, one of The Joker's thugs, before Batman intervenes.
Dent swings open the cylinder on the Smith & Wesson Model 36.
Dent checks the Smith & Wesson Model 36.
Dent holds the Model 36 on Schiff.
After the Joker announces on the news that if Coleman Reese (Joshua Harto) isn't killed within the hour, he will blow up a hospital in Gotham, one citizen tries to shoot Reese with his old nickel plated Colt Police Positive before he is tackled by the police.
A citizen draws his Colt Police Positive.
The citizen fires his Police Positive.
A GCPD officer seizes the gun.
One of the Scarecrow's men at the meeting with the Russians is seen in the background with a Colt Anaconda.
One of Scarecrow's men to the right in the background is seen with an Anaconda.
Several officers in Hong Kong following the SDU teams are seen using Smith & Wesson Model 10 revolvers with heavy barrels and rubber grips.
Smith & Wesson Model 10 HB (heavy barrel) revolver (post-1950s model) - .38 Special. Later incarnations of the Model 10 had a non-tapered heavy barrel, which leads it to be commonly mistaken for a .357 revolver.
A lobby guard in Lau's building is seen with a rubber-gripped Smith & Wesson Model 10 in his holster.
A Hong Kong officer aims his Smith & Wesson Model 10 at Batman.
"Chuckles" (Matthew O'Neill) is seen holding a Smith & Wesson Model 15 in his left hand when he is shot by the Bank Manager.
Chuckles with a Smith & Wesson Model 15 in his left hand and his Ingram MAC-10 in the other as he is gunned down.
M16A2 rifles are used by National Guard troops, and by some of the GCPD officers at the funeral.
A sniper covers Commissioner Loeb's funeral procession with an M16A2.
An officer charges his M16A2 fitted with a Trijicon Reflex sight while riding in the chopper.
A National Guardsman is seen on the news report with an M16A2.
A Guardsman with an M16A2 at the docks.
Guardsmen guard the civilians on one of the carriers threatened by The Joker.
Guardsmen on the ferry are armed with M16A2s.
M4A1 Carbines are used by the GCPD's SWAT teams throughout the film, and police snipers during the funeral of Comissioner Loeb. The M4A1s are seen mainly fitted with reflex sights, foregrips, and M900 Surefire tactical lights.
A SWAT sniper fires his M4A1 at a window when a timer opens the shades to distract the snipers from The Joker and his men.
A SWAT officer armed with an M4A1 guards Harvey Dent in the transport van.
A SWAT officer loads his M4A1 fitted with a Surefire M900 light before breaching the building.
Batman disarms a SWAT officer of his M4A1.
SWAT officers ride the elevator armed with M4A1s.
SWAT officers do a sweep armed with M4A1s and MP5A3s.
Blaser 93 sniper rifles are used by the GCPD SWAT snipers over looking the building The Joker is in from the neighboring rooftop. While the poor light makes them kind of hard to see, the basic shape of the rifle and the obvious straight pull bolt-action makes it easy to identify.
A SWAT sniper trains his Blaser 93 on the clown guards, who are in fact the actual hostages in disguise.
A SWAT sniper waits for the order to fire.
A view down the scope of the rifle, showing the three hostages. Any sniper with a brain would find a shooting gallery like this to be very bizarre. The fact they are standing still with their hands bound would likely cause suspicion as well.
The sniper chambers his Blaser 93 which clearly has a straight pull bolt-action.
Like in most films, the rifles are shown to have lasers which reveal to the target they are under a sniper scope. In reality, lasers are very tactically unsound.
A sniper prepares to fire his Blaser 93.
A sniper fires his Blaser 93.
Thomas Schiff (David Dastmalchian) fires an M1 Garand into the air.
The Joker and his men fire the M1 Garands.
The Joker (Heath Ledger) fires an M1 Garand into the air.
The Joker suddenly turns and fires his M1 Garand at the mayor.
A Ruger Mini-14 is used by one of the Batman copycats who attacks the meeting between Scarecrow and the Chechen. Batman twists the barrel on the rifle with his Mangler before knocking him out.
A Batman copycat wields a Ruger Mini-14.
Batman bends the barrel on the Mini-14.
A GCPD SWAT sniper scouts windows for shooters during the funeral armed with a Remington 700PSS.
"Tightly, but frankly, there's alot of windows up here.".
An AKMSU is used by one of the Chechen's men during the meeting with the Scarecrow and his men.
A thug fires his AKMSU at the Tumbler.
Heckler & Koch MP5A2s are used by GCPD SWAT officers as well as one of Lau's men that is disarmed by Batman.
A GCPD SWAT officer under the command of Comm. Gordon wields a Heckler & Koch MP5A2.
Batman uses one of Lau's men's MP5A2 as a club to dispatch him.
GCPD SWAT storm the building armed with MP5A2s.
Heckler & Koch MP5A3s are also used by the GCPD SWAT and members of the Hong Kong Special Duties Unit.
Hong Kong police SDU teams storm Lau's building armed with Heckler & Koch MP5A3s.
GCPD SWAT raids the building armed with MP5A3s.
A SWAT officer is yanked from the building with his MP5A3 in hand.
A SWAT officer aims his MP5A3 at Batman before being yanked out of the building.
A SWAT officer points his MP5A3 at The Joker as he dangles from Batman's grappling hook line.
Two of the henchmen employed by the Chechen (Ritchie Coster) carry Skorpion SA Vz 61's during the meeting with the Scarecrow (Cillian Murphy) and his men.
Two of Chechen's men fires their Skorpion SA Vz 61's at the Tumbler, one is holding the gun by the magazine.
One of the thugs takes cover while holding the Skorpion as the Tumbler switches to "INTIMIDATE" mode.
One of the Batman copycats is seen using an IMI Mini Uzi to shoot at the Chechen's mobsters.
One of the Batman copycats fires his IMI Mini Uzi at the mobsters.
The Batman copycat with his Mini Uzi.
One of Scarecrow's men in the garage is seen armed with a full-size IMI Uzi during the meeting with the Chechen.
One of the Scarecrow's men, the man in the middle, is seen with an IMI Uzi as they exits their van for the meeting with Chechen.
The men stand guard as Scarecrow's about to exit the van, the man furthest to the left has the Uzi.
As the Tumbler comes crashing in on the meeting the man to the right is seen with the Uzi as he runs away.
Several of the Joker's men are seen using Beretta PM12S submachine guns throughout the film.
One of the Joker's men holds a Beretta PM12S on the partygoers at Bruce Wayne's fundraiser. Apparently they didn't have enough plastic clown masks in stock and had to paint pantyhose instead.
One of the Joker's men disguised as a "doctor" readies his Beretta PM12S as the SWAT enters.
One of the Joker's men readies his PM12S.
The thug with his PM12S.
The Joker (Heath Ledger) uses a Smith & Wesson M76 stored in his semi-truck to open fire on Batman's Batpod and several civilian cars before Batman crashes and Joker ditches the gun in favor of his knife. One of Scarecrow's men is also seen armed with one during the meeting with the Russians. Like the modified Glock 17 listed above, the report of a GE M134 Minigun is dubbed over for the sound of the gun.
One of Scarecrow's men is armed with a Smith & Wesson M76.
Due to The Joker's lack of trigger discipline , he never keeps his finger off the trigger and when he falls trying to get out of his overturned semi-truck, he lets off a burst from his Smith & Wesson M76. Curiously, there are no bullet impacts on the ground.
The Joker fires his Smith & Wesson M76.
An alternate take of the Joker firing his Smith & Wesson M76 at oncoming traffic.
As the Batpod approaches, the Joker continues to fire his Smith & Wesson M76.
Many of Joker's henchmen use MAC-10s throughout the film. "Chuckles", one of Joker's crew in the opening robbery, uses a MAC-10 fitted with the special buttstock made by Jersey Arms Works.
MAC-10 - .45 ACP. This is the screen-used firearm seen in the film The Dark Knight. Thanks to James Georgopoulos.
Jersey Arms Works Avenger - .45 ACP.
"Three of a kind. Let's do this."
Chuckles loads up his Ingram MAC-10 on the way to the bank.
Chuckles with his MAC-10 fitted fitted with the special buttstock made by Jersey Arms Works.
One of The Joker's men points a MAC-10 at the party goers during the fundraiser.
One of the hostages disguised as a clown tries to pull the MAC-10 taped to their hands off as the SWAT team enter.
One of The Joker's men points his MAC-10 at the SWAT as they fall through the floor.
During the bank robbery, the bank manager (William Fichtner) uses a sawed-off Remington 870 shotgun to kill Chuckles. He comes out of his office, and fires at Grumpy and Bozo, firing five shots in total. Once Grumpy and Bozo are safely behind a desk, Grumpy asks Bozo if he is out and when he stands up to fire, an unexpected sixth round grazes him in his shoulder. The Joker takes the Remington and uses throughout the rest of the film. He is first seen threatening the guests at Bruce Wayne's fundraiser with the shotgun and then later seen using it to gun down a traffic cop who walks up to his semi-truck and to fire at the SWAT van transporting Harvey Dent before switching to his Type 69. Multiple times in the film he is seen firing the gun with one hand, which in reality would produce more recoil than shown. Since blanks produce little to no recoil, this factor doesn't present a problem in movie guns.
Remington 870 with shortened barrel and shortened stock - 12 gauge. This is the screen-used firearm seen handled by Heath Ledger and William Fichtner in the film The Dark Knight. Thanks to James Georgopoulos.
The bank manager readies his sawed-off Remington 870.
The bank manager fires his Remington 870.
The bank manager realizes that he has fired his last round.
The manager lays wounded with his 870 at his side.
The Joker with his 870 crashes Wayne's party.
Joker fires his 870 into the air to get the attention of everyone at Dent's fundraiser.
Joker points his 870 at random guests. Note '870 MAGNUM' on the receiver.
Joker shoots a traffic cop manning a roadblock with his 870. It would still be deafeningly loud for the driver, who doesn't even flinch.
Joker fires his 870 at the SWAT van.
Standard Remington 870 shotguns are seen as the standard long arms of the Gotham City Police Department. One of the Batman copycats is also seen firing one during the meeting between the Chechan and Scarecrow.
One of the Batman copycats fires a Remington 870.
GCPD officers stand guard armed with Remington 870s.
An officer escorts Coleman Reese (Joshua Harto) armed with a Remington 870 after the Joker makes a threat on his life.
Officer Berg (Matt Shallenberger) checks the chamber on his Remington 870 while escorting Reese.
A SWAT officer holds the Remington 870 with a saddle shell holder.
A Double Barrel Shotgun is seen wielded by one of The Joker's henchmen at the fundraiser. Bruce Wayne (Christian Bale) disarms the henchman of the shotgun and then disassembles the gun. This gun was likely selected because it is arguably one of the simplest guns to disassemble of many.
Bruce takes the shotgun from one of Joker's men.
He then removes the barrels. Simple.
Mossberg 500s can be seen in the hands of the correctional officers on the convict ferry.
A corrections officer guards the civilians on the ferry armed with a Mossberg 500.
The officer to the left of the warden is armed with a Mossberg 500 while the one to the right has a 590 on the convict carrier.
An officer holds the prisoners at bay with his Mossberg 500.
Mossberg 590s are also seen used by the correctional officers on the ferries. 590s are also used by GCPD officers.
A GCPD officer arrests one of Dent's heavy load of convicts with a Mossberg 590 in hand.
A GCPD officer with a Mossberg 590.
A correctional officer fires his Mossberg 590 into the air to break up the convicts.
An correctional officer with a Mossberg 590.
The officer with his Mossberg 590.
A Mossberg 590 Cruiser is carried by one of the Joker's party-crashing thugs.
The Joker and his men arrives with the elevator to the party. A thug furthest in to the right is seen with a Mossberg 590 Cruiser, identified by the protrusion on the magazinetube ahead of the foregrip wich is a mounting bar.
One of the Joker's men on the far right by the elevator is seen with a Mossberg 590 Cruiser.
Rachel attracts the Joker's attention before he's about to slice up an old man. A thug is seen holding off guests with his 590 Cruiser in the background.
"You must be Harvey's squeeze!"
The thug with the Mossberg is seen to the left, here you can glimpse the mounting bar between the foregrip and the magazinecap indicating it is an 590 Cruiser.
The thug with the 590 Cruiser is seen in the background while Batman fights the other thugs.
A shortened Benelli M1 Super 90 with a Surefire dedicated forend by the GCPD officer (Nicky Katt) who rides in the front seat in the armored transport carrying Dent. It's later used to apprehend The Joker only seconds before he tries to get to Batman. Several other GCPD SWAT officers use Benelli shotguns as well.
The SWAT officer (Nicky Katt) riding shotgun is seen armed with a Benelli M1.
Note saddle mounted shell holster on the Benelli.
A Benelli M1 is held on the Joker.
A clearer view of the Benelli as the Joker is thrown to the ground.
Bruce Wayne (Christian Bale) is seen using a custom built automated gun built from a GE M134 Minigun to do ballistic tests on wall slabs to test which type of round matches the bullet he found used by the Joker to kill two policemen.
Alfred (Michael Caine) examines a round to load into the Minigun.
Alfred loads a 30 round magazine to load in the Minigun.
The Minigun automatically fires down the range.
The National Guard vehicles are seen with Browning M2HBs mounted on top of them.
The truck-mounted Browning M2HB is seen on the news report.
A member of the National Guard mans a Browning M2HB.
"Happy", one of the Joker's henchmen, uses a 40mm WZ.83 Pallad D cable launcher to break the window of an office building and to fire a grappling hook across the street to the roof of the bank (to reach the alarm box). The launcher is later seen used by two other of the Joker's henchmen to fire a line between two buildings to take down a GCPD helicopter.
WZ.83 Pallad D - 40mm.
"Happy" breaks the window with his WZ.83 Pallad D.
He then fires the grappling line over to the bank roof.
One of the Joker's men fires a cable over the street to block the police chopper.
His comrade does the same thing.
After opening fire on the GCPD armored car with his Glock and the Remington 870, the Joker takes out a Chinese Norinco Type 69 launcher. He uses it to destroy a police cruiser, but fails to hit the armored car protecting Harvey Dent (Aaron Eckhart). When Batman (Christian Bale) joins the chase, the Joker fires on the armored car again, only to hit the Tumbler inadvertently (after Batman bridges his Tumbler in front of the van). The Tumbler damaged beyond viability, Batman is forced to switch to his secondary vehicle hidden within the Tumbler, the Batpod. The SWAT officer riding shotgun incorrectly refers to this a "bazooka" (which was probably just used as simplistic slang for 'rocket launcher').
The Joker fires his Norinco Type 69.
The Joker reloads the Type 69.
Joker fires the Type 69.
Joker prepares to fire another rocket.
Joker takes out the Tumbler with the Type 69.
The Joker is a "man of simple tastes", which includes gasoline, gunpowder, dynamite, and of course, explosives. Mainly grenades. During the bank robbery, he brings a duffle bag filled with grenades including Mk 2 hand grenades, M26 hand grenades, and M67 hand grenades, which he primes in the hostages' hands so they are forced to hold the primer handles on (Grumpy says to them "Obviously we don't want you doing anything with your hands other than holding on for dear life."). No indication is made that these go off when the lobby is gassed following the Joker's escape. Later when facing the major mob bosses during their meeting, he manages to stop Gambol from attacking by revealing many of these grenades attached to the inside of his jacket, all stringed to the ring attached to his finger.
The Joker opens up his bag of grenades to keep the bank customers busy.
The Joker puts an M67 grenade into one of the bank customer's hands.
A bank customer nervously holds onto an M67 grenade. His hand is barely holding onto the primer handle!
"I believe whatever doesn't kill you, simply makes you stranger."
The Joker puts an unknown gas/smoke grenade in the bank manager's mouth.
As the Joker leaves, he attaches a wire to the grenade pin.
The Joker reveals his coat filled with grenades including M67s, M26s, and Mk 2s.
"Ah-ta-ta-ta! Let's not "blow" this out of proportion!"
A modified Heckler & Koch AG36 Grenade Launcher also forms part of the automated gun.
The Minigun locks up. The AG36 can be seen between the two M134s.
The Batpod is an two-wheeled vehicle salvaged from the Tumbler. It is equipped with mini-cannons, machine guns and even two grappling hooks, notably used to bring down (or, actually up) The Joker's semi. The mini-cannons are used to clear automobiles immediately after the Batpod breaks off the Tumbler and to remove minor obstacles such as the alley garbage container. The grappling hook guns are first fired at the engine compartment of the semi and then trailing cables are secured around two street posts and then shot again to become anchored to the street. Note that this feat is next to impossible unless the initial hooks actually grab onto the semi's chassis. If they only hook onto the radiator, it would have come off instead of lifting the entire vehicle.
A closer front view of the Batpod.
Batpod firing the cannons to clear away cars.
Batman machine-gunning down a door he's about to go through.
Close up of the batpod as Batman races to help Harvey Dent.
Batman uses a pump-action weapon to fire timed "sticky bombs" at the windows of Lau's building. He keeps the gun concealed and disassembled in the back of his utility belt. Definitely a prop made for the movie but in the storyline it was probably designed at Wayne Enterprise's R & D department.
Batman prepares to fire his sticky bomb gun at Lau's building.
Please visit the Discussion page for the bladed weapons used by the Joker.
|
#!/usr/bin/env python
import os,sys
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
__doc__ = '''
Usage: graph-school.py <dataset> <index column> <feature out> <sorted out>
'''
def avgs(dat, idx, fout, sout):
schools = [ 'renaissance'
, 'baroque'
, 'neoclassicism'
, 'romanticism'
, 'impressionism'
, 'mughal'
]
data = pd.read_table( dat, delim_whitespace=True
, skipinitialspace=True, index_col=idx )
df = data[data.columns[3:]]
df.index = data.school
d = (df - df.min()) / (df.max() - df.min())
d['school'] = d.index
grp = d.groupby('school')
means = grp.mean().transpose()[schools]
fe = range(192)
ren = means['renaissance'].tolist()
bar = means['baroque'].tolist()
neo = means['neoclassicism'].tolist()
rom = means['romanticism'].tolist()
imp = means['impressionism'].tolist()
mug = means['mughal'].tolist()
plt.plot(fe, ren, '-r', label='renaissance')
plt.plot(fe, bar, '-g', label='baroque')
plt.plot(fe, neo, '-b', label='neoclassicism')
plt.plot(fe, rom, '-c', label='romanticism')
plt.plot(fe, imp, '-m', label='impressionism')
plt.plot(fe, mug, '-y', label='mughal')
plt.legend(loc='upper left')
plt.xlabel('feature')
plt.ylabel('mean value')
#plt.show()
plt.savefig(fout, dpi=150)
plt.close()
ren.sort()
bar.sort()
neo.sort()
rom.sort()
imp.sort()
mug.sort()
plt.plot(fe, ren, '-r', label='renaissance')
plt.plot(fe, bar, '-g', label='baroque')
plt.plot(fe, neo, '-b', label='neoclassicism')
plt.plot(fe, rom, '-c', label='romanticism')
plt.plot(fe, imp, '-m', label='impressionism')
plt.plot(fe, mug, '-y', label='mughal')
plt.legend(loc='upper left')
plt.ylabel('feature mean value')
#plt.show()
plt.savefig(sout, dpi=150)
if '__main__' == __name__:
if 5 != len(sys.argv):
print __doc__
exit(0)
if not os.path.isfile(sys.argv[1]):
print sys.argv[1], ': no such file'
exit(1)
avgs(*sys.argv[1:])
|
Gartner Catalyst London Conference will be starting on 18 Sep and finishing on 19 Sep 2017.
It will be a substantial Summit at the Park Plaza Westminster Bridge in London, UK.
In course of the meetings of Gartner Catalyst London 2017, you will gain the most recent updates and comprehensive data concerning Cloud Computing, Networking, Data Analytics, Internet Of Things, Data Security, Business Intelligence, Mobility, Application Architecture and Applications cases.
Gartner Catalyst London 2017 is organized annually.
Gartner and Inc. is the PCO of Gartner Catalyst London 2017, Gartner Catalyst London Conference.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_hostname
short_description: Manage the hostname of a BIG-IP
description:
- Manage the hostname of a BIG-IP.
version_added: 2.3
options:
hostname:
description:
- Hostname of the BIG-IP host.
required: True
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Matthew Lam (@mryanlam)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set the hostname of the BIG-IP
bigip_hostname:
hostname: bigip.localhost.localdomain
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
hostname:
description: The new hostname of the device
returned: changed
type: string
sample: big-ip01.internal
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
class Parameters(AnsibleF5Parameters):
api_attributes = ['hostname']
updatables = ['hostname']
returnables = ['hostname']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
@property
def hostname(self):
if self._values['hostname'] is None:
return None
return str(self._values['hostname'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
pass
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = ApiParameters()
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
result = dict()
changed = self.update()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _read_global_settings_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/global-settings/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def read_current_from_device(self):
result = self._read_global_settings_from_device()
uri = "https://{0}:{1}/mgmt/tm/cm/device/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
self_device = next((x['name'] for x in response['items'] if x['selfDevice'] == "true"), None)
result['self_device'] = self_device
return ApiParameters(params=result)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/global-settings/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.have.self_device:
uri = "https://{0}:{1}/mgmt/tm/cm/device".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='mv',
name=self.have.self_device,
target=self.want.hostname
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
hostname=dict(
required=True
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
Few on the earth love Dark Chocolates! Indulge yourself in our premium chocolate sponge cake with Pure Dark Belgian Chocolate Ganache in layers and all over. Treat yourself to the pure decadence today! Note : A kilogram of Cake serves 12 people approximately.
|
"""
Indivo Model for Vitals
"""
from fact import Fact
from django.db import models
from django.conf import settings
class Vitals(Fact):
name = models.CharField(max_length=100)
name_type = models.CharField(max_length=80, null=True)
name_value = models.CharField(max_length=40, null=True)
name_abbrev = models.CharField(max_length=20, null=True)
measured_by=models.CharField(max_length=200, null=True)
date_measured_start=models.DateTimeField(null=True)
date_measured_end=models.DateTimeField(null=True)
result_unit=models.CharField(max_length=100, null=True)
result_textvalue=models.CharField(max_length=5000, null=True)
result_value=models.CharField(max_length=200, null=True)
result_unit_type=models.CharField(max_length=200, null=True)
result_unit_value=models.CharField(max_length=200, null=True)
result_unit_abbrev=models.CharField(max_length=200, null=True)
site = models.CharField(max_length=40, null=True)
position = models.CharField(max_length=40, null=True)
technique=models.CharField(max_length=200, null=True)
comments = models.TextField(null=True)
def __unicode__(self):
return 'Vitals %s' % self.id
|
The New York Times and Everlane are sharing the message about journalism’s essential role in understanding and combating climate change. Starting April 10, Everlane’s “Climate Collection” of t-shirts and sweatshirts will be available for purchase on Everlane.com. For each shirt sold, nine public school students will receive access to The Times’s Sponsor a Subscription program, which provides students with free access to NYTimes.com, for one year.
On Earth Day (April 22) The Times will also air a new TV ad featuring reporting from correspondent Nicholas Casey and photography from Josh Haner as part of its “Truth is Worth It” brand campaign. The spot takes viewers through the reporting process that led to their groundbreaking interactive investigation into the impacts of warming seas on the Galápagos Islands.
The ad campaign will air on linear and streaming television and across social channels, as well as in The Times in print and online.
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User-friendly container for Google Cloud Bigtable Table."""
from grpc import StatusCode
from google.api_core.exceptions import RetryError
from google.api_core.exceptions import NotFound
from google.api_core.retry import if_exception_type
from google.api_core.retry import Retry
from google.api_core.gapic_v1.method import wrap_method
from google.cloud._helpers import _to_bytes
from google.cloud.bigtable.column_family import _gc_rule_from_pb
from google.cloud.bigtable.column_family import ColumnFamily
from google.cloud.bigtable.batcher import MutationsBatcher
from google.cloud.bigtable.batcher import (FLUSH_COUNT, MAX_ROW_BYTES)
from google.cloud.bigtable.row import AppendRow
from google.cloud.bigtable.row import ConditionalRow
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.row_data import PartialRowsData
from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable import enums
from google.cloud.bigtable_v2.proto import (
bigtable_pb2 as data_messages_v2_pb2)
from google.cloud.bigtable_admin_v2.proto import (
table_pb2 as admin_messages_v2_pb2)
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_admin_messages_v2_pb2)
# Maximum number of mutations in bulk (MutateRowsRequest message):
# (https://cloud.google.com/bigtable/docs/reference/data/rpc/
# google.bigtable.v2#google.bigtable.v2.MutateRowRequest)
_MAX_BULK_MUTATIONS = 100000
VIEW_NAME_ONLY = enums.Table.View.NAME_ONLY
class _BigtableRetryableError(Exception):
"""Retry-able error expected by the default retry strategy."""
DEFAULT_RETRY = Retry(
predicate=if_exception_type(_BigtableRetryableError),
initial=1.0,
maximum=15.0,
multiplier=2.0,
deadline=120.0, # 2 minutes
)
"""The default retry strategy to be used on retry-able errors.
Used by :meth:`~google.cloud.bigtable.table.Table.mutate_rows`.
"""
class TableMismatchError(ValueError):
"""Row from another table."""
class TooManyMutationsError(ValueError):
"""The number of mutations for bulk request is too big."""
class Table(object):
"""Representation of a Google Cloud Bigtable Table.
.. note::
We don't define any properties on a table other than the name.
The only other fields are ``column_families`` and ``granularity``,
The ``column_families`` are not stored locally and
``granularity`` is an enum with only one value.
We can use a :class:`Table` to:
* :meth:`create` the table
* :meth:`delete` the table
* :meth:`list_column_families` in the table
:type table_id: str
:param table_id: The ID of the table.
:type instance: :class:`~google.cloud.bigtable.instance.Instance`
:param instance: The instance that owns the table.
:type app_profile_id: str
:param app_profile_id: (Optional) The unique name of the AppProfile.
"""
def __init__(self, table_id, instance, app_profile_id=None):
self.table_id = table_id
self._instance = instance
self._app_profile_id = app_profile_id
@property
def name(self):
"""Table name used in requests.
.. note::
This property will not change if ``table_id`` does not, but the
return value is not cached.
The table name is of the form
``"projects/../instances/../tables/{table_id}"``
:rtype: str
:returns: The table name.
"""
project = self._instance._client.project
instance_id = self._instance.instance_id
table_client = self._instance._client.table_data_client
return table_client.table_path(
project=project, instance=instance_id, table=self.table_id)
def column_family(self, column_family_id, gc_rule=None):
"""Factory to create a column family associated with this table.
:type column_family_id: str
:param column_family_id: The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type gc_rule: :class:`.GarbageCollectionRule`
:param gc_rule: (Optional) The garbage collection settings for this
column family.
:rtype: :class:`.ColumnFamily`
:returns: A column family owned by this table.
"""
return ColumnFamily(column_family_id, self, gc_rule=gc_rule)
def row(self, row_key, filter_=None, append=False):
"""Factory to create a row associated with this table.
.. warning::
At most one of ``filter_`` and ``append`` can be used in a
:class:`~google.cloud.bigtable.row.Row`.
:type row_key: bytes
:param row_key: The key for the row being created.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) Filter to be used for conditional mutations.
See :class:`.ConditionalRow` for more details.
:type append: bool
:param append: (Optional) Flag to determine if the row should be used
for append mutations.
:rtype: :class:`~google.cloud.bigtable.row.Row`
:returns: A row owned by this table.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``filter_`` and ``append`` are used.
"""
if append and filter_ is not None:
raise ValueError('At most one of filter_ and append can be set')
if append:
return AppendRow(row_key, self)
elif filter_ is not None:
return ConditionalRow(row_key, self, filter_=filter_)
else:
return DirectRow(row_key, self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (other.table_id == self.table_id and
other._instance == self._instance)
def __ne__(self, other):
return not self == other
def create(self, initial_split_keys=[], column_families={}):
"""Creates this table.
.. note::
A create request returns a
:class:`._generated.table_pb2.Table` but we don't use
this response.
:type initial_split_keys: list
:param initial_split_keys: (Optional) list of row keys in bytes that
will be used to initially split the table
into several tablets.
:type column_families: dict
:param column_failies: (Optional) A map columns to create. The key is
the column_id str and the value is a
:class:`GarbageCollectionRule`
"""
table_client = self._instance._client.table_admin_client
instance_name = self._instance.name
families = {id: ColumnFamily(id, self, rule).to_pb()
for (id, rule) in column_families.items()}
table = admin_messages_v2_pb2.Table(column_families=families)
split = table_admin_messages_v2_pb2.CreateTableRequest.Split
splits = [split(key=_to_bytes(key)) for key in initial_split_keys]
table_client.create_table(parent=instance_name, table_id=self.table_id,
table=table, initial_splits=splits)
def exists(self):
"""Check whether the table exists.
:rtype: bool
:returns: True if the table exists, else False.
"""
table_client = self._instance._client.table_admin_client
try:
table_client.get_table(name=self.name, view=VIEW_NAME_ONLY)
return True
except NotFound:
return False
def delete(self):
"""Delete this table."""
table_client = self._instance._client.table_admin_client
table_client.delete_table(name=self.name)
def list_column_families(self):
"""List the column families owned by this table.
:rtype: dict
:returns: Dictionary of column families attached to this table. Keys
are strings (column family names) and values are
:class:`.ColumnFamily` instances.
:raises: :class:`ValueError <exceptions.ValueError>` if the column
family name from the response does not agree with the computed
name from the column family ID.
"""
table_client = self._instance._client.table_admin_client
table_pb = table_client.get_table(self.name)
result = {}
for column_family_id, value_pb in table_pb.column_families.items():
gc_rule = _gc_rule_from_pb(value_pb.gc_rule)
column_family = self.column_family(column_family_id,
gc_rule=gc_rule)
result[column_family_id] = column_family
return result
def get_cluster_states(self):
"""List the cluster states owned by this table.
:rtype: dict
:returns: Dictionary of cluster states for this table.
Keys are cluster ids and values are
:class: 'ClusterState' instances.
"""
REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW
table_client = self._instance._client.table_admin_client
table_pb = table_client.get_table(self.name, view=REPLICATION_VIEW)
return {cluster_id: ClusterState(value_pb.replication_state)
for cluster_id, value_pb in table_pb.cluster_states.items()}
def read_row(self, row_key, filter_=None):
"""Read a single row from this table.
:type row_key: bytes
:param row_key: The key of the row to read from.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
row. If unset, returns the entire row.
:rtype: :class:`.PartialRowData`, :data:`NoneType <types.NoneType>`
:returns: The contents of the row if any chunks were returned in
the response, otherwise :data:`None`.
:raises: :class:`ValueError <exceptions.ValueError>` if a commit row
chunk is never encountered.
"""
row_set = RowSet()
row_set.add_row_key(row_key)
result_iter = iter(self.read_rows(filter_=filter_, row_set=row_set))
row = next(result_iter, None)
if next(result_iter, None) is not None:
raise ValueError('More than one row was returned.')
return row
def read_rows(self, start_key=None, end_key=None, limit=None,
filter_=None, end_inclusive=False, row_set=None,
retry=DEFAULT_RETRY_READ_ROWS):
"""Read rows from this table.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads every column in
each row.
:type end_inclusive: bool
:param end_inclusive: (Optional) Whether the ``end_key`` should be
considered inclusive. The default is False (exclusive).
:type row_set: :class:`row_set.RowSet`
:param filter_: (Optional) The row set containing multiple row keys and
row_ranges.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry:
(Optional) Retry delay and deadline arguments. To override, the
default value :attr:`DEFAULT_RETRY_READ_ROWS` can be used and
modified with the :meth:`~google.api_core.retry.Retry.with_delay`
method or the :meth:`~google.api_core.retry.Retry.with_deadline`
method.
:rtype: :class:`.PartialRowsData`
:returns: A :class:`.PartialRowsData` a generator for consuming
the streamed results.
"""
request_pb = _create_row_request(
self.name, start_key=start_key, end_key=end_key,
filter_=filter_, limit=limit, end_inclusive=end_inclusive,
app_profile_id=self._app_profile_id, row_set=row_set)
data_client = self._instance._client.table_data_client
return PartialRowsData(
data_client.transport.read_rows,
request_pb, retry)
def yield_rows(self, **kwargs):
"""Read rows from this table.
.. warning::
This method will be removed in future releases. Please use
``read_rows`` instead.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads every column in
each row.
:type row_set: :class:`row_set.RowSet`
:param filter_: (Optional) The row set containing multiple row keys and
row_ranges.
:rtype: :class:`.PartialRowData`
:returns: A :class:`.PartialRowData` for each row returned
"""
return self.read_rows(**kwargs)
def mutate_rows(self, rows, retry=DEFAULT_RETRY):
"""Mutates multiple rows in bulk.
The method tries to update all specified rows.
If some of the rows weren't updated, it would not remove mutations.
They can be applied to the row separately.
If row mutations finished successfully, they would be cleaned up.
Optionally, a ``retry`` strategy can be specified to re-attempt
mutations on rows that return transient errors. This method will retry
until all rows succeed or until the request deadline is reached. To
specify a ``retry`` strategy of "do-nothing", a deadline of ``0.0``
can be specified.
:type rows: list
:param rows: List or other iterable of :class:`.DirectRow` instances.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry:
(Optional) Retry delay and deadline arguments. To override, the
default value :attr:`DEFAULT_RETRY` can be used and modified with
the :meth:`~google.api_core.retry.Retry.with_delay` method or the
:meth:`~google.api_core.retry.Retry.with_deadline` method.
:rtype: list
:returns: A list of response statuses (`google.rpc.status_pb2.Status`)
corresponding to success or failure of each row mutation
sent. These will be in the same order as the `rows`.
"""
retryable_mutate_rows = _RetryableMutateRowsWorker(
self._instance._client, self.name, rows,
app_profile_id=self._app_profile_id)
return retryable_mutate_rows(retry=retry)
def sample_row_keys(self):
"""Read a sample of row keys in the table.
The returned row keys will delimit contiguous sections of the table of
approximately equal size, which can be used to break up the data for
distributed tasks like mapreduces.
The elements in the iterator are a SampleRowKeys response and they have
the properties ``offset_bytes`` and ``row_key``. They occur in sorted
order. The table might have contents before the first row key in the
list and after the last one, but a key containing the empty string
indicates "end of table" and will be the last response given, if
present.
.. note::
Row keys in this list may not have ever been written to or read
from, and users should therefore not make any assumptions about the
row key structure that are specific to their use case.
The ``offset_bytes`` field on a response indicates the approximate
total storage space used by all rows in the table which precede
``row_key``. Buffering the contents of all rows between two subsequent
samples would require space roughly equal to the difference in their
``offset_bytes`` fields.
:rtype: :class:`~google.cloud.exceptions.GrpcRendezvous`
:returns: A cancel-able iterator. Can be consumed by calling ``next()``
or by casting to a :class:`list` and can be cancelled by
calling ``cancel()``.
"""
data_client = self._instance._client.table_data_client
response_iterator = data_client.sample_row_keys(
self.name, app_profile_id=self._app_profile_id)
return response_iterator
def truncate(self, timeout=None):
"""Truncate the table
:type timeout: float
:param timeout: (Optional) The amount of time, in seconds, to wait
for the request to complete.
:raise: google.api_core.exceptions.GoogleAPICallError: If the
request failed for any reason.
google.api_core.exceptions.RetryError: If the request failed
due to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
client = self._instance._client
table_admin_client = client.table_admin_client
if timeout:
table_admin_client.drop_row_range(
self.name, delete_all_data_from_table=True, timeout=timeout)
else:
table_admin_client.drop_row_range(
self.name, delete_all_data_from_table=True)
def drop_by_prefix(self, row_key_prefix, timeout=None):
"""
:type row_prefix: bytes
:param row_prefix: Delete all rows that start with this row key
prefix. Prefix cannot be zero length.
:type timeout: float
:param timeout: (Optional) The amount of time, in seconds, to wait
for the request to complete.
:raise: google.api_core.exceptions.GoogleAPICallError: If the
request failed for any reason.
google.api_core.exceptions.RetryError: If the request failed
due to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
client = self._instance._client
table_admin_client = client.table_admin_client
if timeout:
table_admin_client.drop_row_range(
self.name, row_key_prefix=_to_bytes(row_key_prefix),
timeout=timeout)
else:
table_admin_client.drop_row_range(
self.name, row_key_prefix=_to_bytes(row_key_prefix))
def mutations_batcher(self, flush_count=FLUSH_COUNT,
max_row_bytes=MAX_ROW_BYTES):
"""Factory to create a mutation batcher associated with this instance.
:type table: class
:param table: class:`~google.cloud.bigtable.table.Table`.
:type flush_count: int
:param flush_count: (Optional) Maximum number of rows per batch. If it
reaches the max number of rows it calls finish_batch() to
mutate the current row batch. Default is FLUSH_COUNT (1000
rows).
:type max_row_bytes: int
:param max_row_bytes: (Optional) Max number of row mutations size to
flush. If it reaches the max number of row mutations size it
calls finish_batch() to mutate the current row batch.
Default is MAX_ROW_BYTES (5 MB).
"""
return MutationsBatcher(self, flush_count, max_row_bytes)
class _RetryableMutateRowsWorker(object):
"""A callable worker that can retry to mutate rows with transient errors.
This class is a callable that can retry mutating rows that result in
transient errors. After all rows are successful or none of the rows
are retryable, any subsequent call on this callable will be a no-op.
"""
# pylint: disable=unsubscriptable-object
RETRY_CODES = (
StatusCode.DEADLINE_EXCEEDED.value[0],
StatusCode.ABORTED.value[0],
StatusCode.UNAVAILABLE.value[0],
)
# pylint: enable=unsubscriptable-object
def __init__(self, client, table_name, rows, app_profile_id=None):
self.client = client
self.table_name = table_name
self.rows = rows
self.app_profile_id = app_profile_id
self.responses_statuses = [None] * len(self.rows)
def __call__(self, retry=DEFAULT_RETRY):
"""Attempt to mutate all rows and retry rows with transient errors.
Will retry the rows with transient errors until all rows succeed or
``deadline`` specified in the `retry` is reached.
:rtype: list
:returns: A list of response statuses (`google.rpc.status_pb2.Status`)
corresponding to success or failure of each row mutation
sent. These will be in the same order as the ``rows``.
"""
mutate_rows = self._do_mutate_retryable_rows
if retry:
mutate_rows = retry(self._do_mutate_retryable_rows)
try:
mutate_rows()
except (_BigtableRetryableError, RetryError):
# - _BigtableRetryableError raised when no retry strategy is used
# and a retryable error on a mutation occurred.
# - RetryError raised when retry deadline is reached.
# In both cases, just return current `responses_statuses`.
pass
return self.responses_statuses
@staticmethod
def _is_retryable(status):
return (status is None or
status.code in _RetryableMutateRowsWorker.RETRY_CODES)
def _do_mutate_retryable_rows(self):
"""Mutate all the rows that are eligible for retry.
A row is eligible for retry if it has not been tried or if it resulted
in a transient error in a previous call.
:rtype: list
:return: The responses statuses, which is a list of
:class:`~google.rpc.status_pb2.Status`.
:raises: One of the following:
* :exc:`~.table._BigtableRetryableError` if any
row returned a transient error.
* :exc:`RuntimeError` if the number of responses doesn't
match the number of rows that were retried
"""
retryable_rows = []
index_into_all_rows = []
for index, status in enumerate(self.responses_statuses):
if self._is_retryable(status):
retryable_rows.append(self.rows[index])
index_into_all_rows.append(index)
if not retryable_rows:
# All mutations are either successful or non-retryable now.
return self.responses_statuses
mutate_rows_request = _mutate_rows_request(
self.table_name, retryable_rows,
app_profile_id=self.app_profile_id)
data_client = self.client.table_data_client
inner_api_calls = data_client._inner_api_calls
if 'mutate_rows' not in inner_api_calls:
default_retry = data_client._method_configs['MutateRows'].retry,
default_timeout = data_client._method_configs['MutateRows'].timeout
data_client._inner_api_calls[
'mutate_rows'] = wrap_method(
data_client.transport.mutate_rows,
default_retry=default_retry,
default_timeout=default_timeout,
client_info=data_client._client_info,
)
responses = data_client._inner_api_calls['mutate_rows'](
mutate_rows_request, retry=None)
num_responses = 0
num_retryable_responses = 0
for response in responses:
for entry in response.entries:
num_responses += 1
index = index_into_all_rows[entry.index]
self.responses_statuses[index] = entry.status
if self._is_retryable(entry.status):
num_retryable_responses += 1
if entry.status.code == 0:
self.rows[index].clear()
if len(retryable_rows) != num_responses:
raise RuntimeError(
'Unexpected number of responses', num_responses,
'Expected', len(retryable_rows))
if num_retryable_responses:
raise _BigtableRetryableError
return self.responses_statuses
class ClusterState(object):
"""Representation of a Cluster State.
:type replication_state: int
:param replication_state: enum value for cluster state
Possible replications_state values are
0 for STATE_NOT_KNOWN: The replication state of the table is
unknown in this cluster.
1 for INITIALIZING: The cluster was recently created, and the
table must finish copying
over pre-existing data from other clusters before it can
begin receiving live replication updates and serving
``Data API`` requests.
2 for PLANNED_MAINTENANCE: The table is temporarily unable to
serve
``Data API`` requests from this
cluster due to planned internal maintenance.
3 for UNPLANNED_MAINTENANCE: The table is temporarily unable
to serve
``Data API`` requests from this
cluster due to unplanned or emergency maintenance.
4 for READY: The table can serve
``Data API`` requests from this
cluster. Depending on replication delay, reads may not
immediately reflect the state of the table in other clusters.
"""
def __init__(self, replication_state):
self.replication_state = replication_state
def __repr__(self):
"""Representation of cluster state instance as string value
for cluster state.
:rtype: ClusterState instance
:returns: ClusterState instance as representation of string
value for cluster state.
"""
replication_dict = {
enums.Table.ReplicationState.STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
enums.Table.ReplicationState.INITIALIZING: "INITIALIZING",
enums.Table.ReplicationState.PLANNED_MAINTENANCE:
"PLANNED_MAINTENANCE",
enums.Table.ReplicationState.UNPLANNED_MAINTENANCE:
"UNPLANNED_MAINTENANCE",
enums.Table.ReplicationState.READY: "READY"
}
return replication_dict[self.replication_state]
def __eq__(self, other):
"""Checks if two ClusterState instances(self and other) are
equal on the basis of instance variable 'replication_state'.
:type other: ClusterState
:param other: ClusterState instance to compare with.
:rtype: Boolean value
:returns: True if two cluster state instances have same
replication_state.
"""
if not isinstance(other, self.__class__):
return False
return self.replication_state == other.replication_state
def __ne__(self, other):
"""Checks if two ClusterState instances(self and other) are
not equal.
:type other: ClusterState.
:param other: ClusterState instance to compare with.
:rtype: Boolean value.
:returns: True if two cluster state instances are not equal.
"""
return not self == other
def _create_row_request(table_name, start_key=None, end_key=None,
filter_=None, limit=None, end_inclusive=False,
app_profile_id=None, row_set=None):
"""Creates a request to read rows in a table.
:type table_name: str
:param table_name: The name of the table to read from.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads the entire table.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:type end_inclusive: bool
:param end_inclusive: (Optional) Whether the ``end_key`` should be
considered inclusive. The default is False (exclusive).
:type: app_profile_id: str
:param app_profile_id: (Optional) The unique name of the AppProfile.
:type row_set: :class:`row_set.RowSet`
:param filter_: (Optional) The row set containing multiple row keys and
row_ranges.
:rtype: :class:`data_messages_v2_pb2.ReadRowsRequest`
:returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``row_set`` and one of ``start_key`` or ``end_key`` are set
"""
request_kwargs = {'table_name': table_name}
if ((start_key is not None or end_key is not None) and
row_set is not None):
raise ValueError('Row range and row set cannot be '
'set simultaneously')
if filter_ is not None:
request_kwargs['filter'] = filter_.to_pb()
if limit is not None:
request_kwargs['rows_limit'] = limit
if app_profile_id is not None:
request_kwargs['app_profile_id'] = app_profile_id
message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs)
if start_key is not None or end_key is not None:
row_set = RowSet()
row_set.add_row_range(RowRange(start_key, end_key,
end_inclusive=end_inclusive))
if row_set is not None:
row_set._update_message_request(message)
return message
def _mutate_rows_request(table_name, rows, app_profile_id=None):
"""Creates a request to mutate rows in a table.
:type table_name: str
:param table_name: The name of the table to write to.
:type rows: list
:param rows: List or other iterable of :class:`.DirectRow` instances.
:type: app_profile_id: str
:param app_profile_id: (Optional) The unique name of the AppProfile.
:rtype: :class:`data_messages_v2_pb2.MutateRowsRequest`
:returns: The ``MutateRowsRequest`` protobuf corresponding to the inputs.
:raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is
greater than 100,000
"""
request_pb = data_messages_v2_pb2.MutateRowsRequest(
table_name=table_name, app_profile_id=app_profile_id)
mutations_count = 0
for row in rows:
_check_row_table_name(table_name, row)
_check_row_type(row)
mutations = row._get_mutations()
request_pb.entries.add(row_key=row.row_key, mutations=mutations)
mutations_count += len(mutations)
if mutations_count > _MAX_BULK_MUTATIONS:
raise TooManyMutationsError('Maximum number of mutations is %s' %
(_MAX_BULK_MUTATIONS,))
return request_pb
def _check_row_table_name(table_name, row):
"""Checks that a row belongs to a table.
:type table_name: str
:param table_name: The name of the table.
:type row: :class:`~google.cloud.bigtable.row.Row`
:param row: An instance of :class:`~google.cloud.bigtable.row.Row`
subclasses.
:raises: :exc:`~.table.TableMismatchError` if the row does not belong to
the table.
"""
if row.table is not None and row.table.name != table_name:
raise TableMismatchError(
'Row %s is a part of %s table. Current table: %s' %
(row.row_key, row.table.name, table_name))
def _check_row_type(row):
"""Checks that a row is an instance of :class:`.DirectRow`.
:type row: :class:`~google.cloud.bigtable.row.Row`
:param row: An instance of :class:`~google.cloud.bigtable.row.Row`
subclasses.
:raises: :class:`TypeError <exceptions.TypeError>` if the row is not an
instance of DirectRow.
"""
if not isinstance(row, DirectRow):
raise TypeError('Bulk processing can not be applied for '
'conditional or append mutations.')
|
There have been 88 plays since 26/10/2018.
You are currently playing Chalk Gardens game for free on Arcade Spot. It is a single game out of a variety of games that you can play on Arcade Spot. Play more games like Chalk Gardens in the Adventure, and Unity gaming categories. If you enjoyed playing the game, give it a thumbs up. Free Online Games and Arcade Games are added every day. Arcade Spot will bring you the best games without downloading and a fun online gaming experience on the internet.
|
# -*- coding: utf-8 -*-
# Open Chord Charts -- Database of free chord charts
# By: Christophe Benz <contact@openchordcharts.org>
#
# Copyright (C) 2012 Christophe Benz
# https://github.com/openchordcharts/
#
# This file is part of Open Chord Charts.
#
# Open Chord Charts is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Open Chord Charts is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Charts controller functions."""
import re
import webob
from webob.dec import wsgify
from .. import conf, contexts, conv, model, urls, wsgihelpers
# Routes
def route_api1(environ, start_response):
req = webob.Request(environ)
ctx = contexts.Ctx(req)
chart, error = conv.pipe(
conv.input_to_slug,
conv.not_none,
model.Chart.make_id_or_slug_to_instance(),
)(req.urlvars.get('id_or_slug'), state=ctx)
if error is not None:
return wsgihelpers.not_found(ctx, message=error)(environ, start_response)
ctx.node = chart
router = urls.make_router(
('GET', '^$', api1_view),
('POST', '^$', api1_create_or_edit),
(('GET', 'POST'), '^/delete$', api1_delete),
)
return router(environ, start_response)
def route_api1_class(environ, start_response):
router = urls.make_router(
('GET', '^$', api1_search),
('POST', '^$', api1_create_or_edit),
(None, '^/(?P<id_or_slug>[^/]+)(?=/|$)', route_api1),
)
return router(environ, start_response)
# Controllers
@wsgify
def api1_create_or_edit(req):
ctx = contexts.Ctx(req)
user = model.get_user(ctx, check=True)
is_create_mode = ctx.node is None
chart_attributes, errors = conv.pipe(
conv.input_to_json_dict,
conv.struct(
{
'composers': conv.validate_list_of_strings,
'compositionYear': conv.test_isinstance(int),
'genre': conv.pipe(
conv.test_isinstance(unicode),
conv.cleanup_line,
),
'interpretations': conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.pipe(
conv.test_isinstance(dict),
conv.struct(
{
'externalLinks': conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.pipe(
conv.test_isinstance(unicode),
conv.make_str_to_url(full=True),
conv.not_none,
),
),
),
'interpreterName': conv.empty_to_none,
'year': conv.test_isinstance(int),
},
default=None, # Fail if unexpected item.
),
conv.empty_to_none,
conv.not_none,
),
),
conv.empty_to_none,
),
'key': conv.pipe(
conv.test_isinstance(unicode),
conv.cleanup_line,
conv.str_to_chart_key,
conv.not_none,
),
'parts': conv.pipe(
conv.test_isinstance(dict),
conv.uniform_mapping(
conv.cleanup_line,
conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.pipe(
conv.test_isinstance(dict),
conv.struct(
{
'alterations': conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.pipe(
conv.test_isinstance(unicode),
conv.empty_to_none,
conv.not_none,
),
),
),
'degree': conv.pipe(
conv.test_isinstance(int),
conv.test_between(0, 11),
),
'duration': conv.anything_to_float,
},
default=None, # Fail if unexpected item.
),
conv.empty_to_none,
conv.not_none,
),
),
),
),
),
'structure': conv.validate_list_of_strings,
'title': conv.pipe(
conv.test_isinstance(unicode),
conv.cleanup_line,
conv.not_none,
),
},
default=None, # Fail if unexpected item.
),
conv.validate_structure_and_parts,
)(req.body, state=ctx)
if errors is not None:
return wsgihelpers.bad_request(ctx, errors=errors, message=ctx._(u'Invalid JSON'))
chart_already_exists = lambda ctx, slug: \
wsgihelpers.bad_request(ctx, message=ctx._(u'Chart with slug "{}" already exists'.format(slug)))
if is_create_mode:
slug = conv.slugify(chart_attributes['title'])
existing_chart = model.Chart.find_one({'slug': slug})
if existing_chart is not None:
return chart_already_exists(ctx, slug)
chart_attributes['owner_account_id'] = user._id
chart = model.Chart(**chart_attributes)
else:
chart = ctx.node
model.check_owner(ctx, user, chart)
slug = conv.slugify(chart_attributes['title'])
existing_chart = model.Chart.find_one({'_id': {'$ne': chart._id}, 'slug': slug})
if existing_chart is not None:
return chart_already_exists(ctx, slug)
chart.set_attributes(**chart_attributes)
chart.compute_attributes()
chart.save(safe=True)
return wsgihelpers.respond_json(ctx, {'chart': chart.to_json(state=ctx)})
@wsgify
def api1_delete(req):
ctx = contexts.Ctx(req)
user = model.get_user(ctx, check=True)
chart = ctx.node
model.check_owner(ctx, user, chart)
chart.delete(safe=True)
return wsgihelpers.respond_json(ctx, {'delete': 'ok'})
@wsgify
def api1_search(req):
ctx = contexts.Ctx(req)
data, errors = conv.struct(
{
'ownerSlug': conv.cleanup_line,
'q': conv.cleanup_line,
},
default=None, # Fail if unexpected item.
)(req.params, state=conv.default_state)
if errors is not None:
return wsgihelpers.bad_request(ctx, errors=errors)
spec = {}
keywords = None
if data['q'] is not None:
keywords = data['q'].strip().split()
spec['keywords'] = {'$all': [re.compile(u'^{0}'.format(re.escape(keyword))) for keyword in keywords]}
if data['ownerSlug']:
owner_account = model.Account.find_one({'username': data['ownerSlug']})
if owner_account is None:
return wsgihelpers.bad_request(ctx, message=ctx._(u'Invalid account: {}'.format(data['ownerSlug'])))
spec['owner_account_id'] = owner_account._id
charts_cursor = model.Chart.find(spec).sort('slug').limit(conf['charts.limit'])
return wsgihelpers.respond_json(ctx, {
'charts': [chart.to_json(state=ctx, with_owner=True) for chart in charts_cursor],
})
@wsgify
def api1_view(req):
ctx = contexts.Ctx(req)
chart = ctx.node
return wsgihelpers.respond_json(ctx, {'chart': chart.to_json(state=ctx, with_owner=True)})
|
Wall Mount Lamp Architecture | Brevardbesthomes.com wall mount lamps lowes. wall mount lamp amazon. wall mount lamp with arm.
Wall Mount Lamp Brilliant Kenroy Home 20950ORB Sweep Swing Arm Oil Rubbed Bronze For 2. Wall Mount Lamp Incredible Link LED Small Design Within Reach 3. Wall Mount Lamp Amazing Buy The Anglepoise Original 1227 Brass Mounted At Nest Co Uk Inside 4. Wall Mount Lamp Popular Shop Allen Roth 15 62 In H Brushed Nickel Swing Arm Traditional Within 5. Wall Mount Lamp Popular Vintage Sconces Lights For Bathroom Kitchen Throughout 6. Wall Mount Lamp Modern Type 75 Mounted Anglepoise With Regard To 7. Wall Mount Lamp New Amazon Com CEENWE Athrun Industrial Retro Iron Creative With Regard To 8. Wall Mount Lamp Popular Shop 10 In H Steel Swing Arm Contemporary Modern Mounted Regarding 9. Wall Mount Lamp New Bronze Long Bulb Cage For 10. Wall Mount Lamp Attractive NEW Chrome Modern LED Lamps Sconces Lights Bathroom Kitchen Intended For 11. Wall Mount Lamp Incredible Anglepoise Type 75 Mini Mounted Throughout 12. Wall Mount Lamp Incredible Mounted Swing Arm Best Armwall Simple Pertaining To With Regard 13. Wall Mount Lamp Popular Chrome Bruin Moderne LED Wandlampen Sconces Verlichting Badkamer Within 14. Wall Mount Lamp Modern Led Light Black Iron With Fabric Throughout 15. Wall Mount Lamp Brilliant Tolomeo Design Within Reach With 16. Wall Mount Lamp New Kenroy Home 30110BLKP Simplicity Swing Arm Matte Black Throughout 17. Wall Mount Lamp Incredible Shop Allen Roth 15 62 In H Oil Rubbed Bronze Swing Arm Traditional With 18. Wall Mount Lamp Stylish Brass Medium Bulb Cage E27 Edison Light Globes Pty Ltd For 19.
|
# Copyright (c) 2012 Zadara Storage, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Zadara Virtual Private Storage Array (VPSA).
This driver requires VPSA with API ver.13.07 or higher.
"""
import httplib
from lxml import etree
from oslo.config import cfg
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.volume import driver
LOG = logging.getLogger(__name__)
zadara_opts = [
cfg.StrOpt('zadara_vpsa_ip',
default=None,
help='Management IP of Zadara VPSA'),
cfg.StrOpt('zadara_vpsa_port',
default=None,
help='Zadara VPSA port number'),
cfg.BoolOpt('zadara_vpsa_use_ssl',
default=False,
help='Use SSL connection'),
cfg.StrOpt('zadara_user',
default=None,
help='User name for the VPSA'),
cfg.StrOpt('zadara_password',
default=None,
help='Password for the VPSA',
secret=True),
cfg.StrOpt('zadara_vpsa_poolname',
default=None,
help='Name of VPSA storage pool for volumes'),
cfg.BoolOpt('zadara_vol_thin',
default=True,
help='Default thin provisioning policy for volumes'),
cfg.BoolOpt('zadara_vol_encrypt',
default=False,
help='Default encryption policy for volumes'),
cfg.StrOpt('zadara_vol_name_template',
default='OS_%s',
help='Default template for VPSA volume names'),
cfg.BoolOpt('zadara_vpsa_auto_detach_on_delete',
default=True,
help="Automatically detach from servers on volume delete"),
cfg.BoolOpt('zadara_vpsa_allow_nonexistent_delete',
default=True,
help="Don't halt on deletion of non-existing volumes"), ]
CONF = cfg.CONF
CONF.register_opts(zadara_opts)
class ZadaraVPSAConnection(object):
"""Executes volume driver commands on VPSA."""
def __init__(self, conf):
self.conf = conf
self.access_key = None
self.ensure_connection()
def _generate_vpsa_cmd(self, cmd, **kwargs):
"""Generate command to be sent to VPSA."""
def _joined_params(params):
param_str = []
for k, v in params.items():
param_str.append("%s=%s" % (k, v))
return '&'.join(param_str)
# Dictionary of applicable VPSA commands in the following format:
# 'command': (method, API_URL, {optional parameters})
vpsa_commands = {
'login': ('POST',
'/api/users/login.xml',
{'user': self.conf.zadara_user,
'password': self.conf.zadara_password}),
# Volume operations
'create_volume': ('POST',
'/api/volumes.xml',
{'name': kwargs.get('name'),
'capacity': kwargs.get('size'),
'pool': self.conf.zadara_vpsa_poolname,
'thin': 'YES'
if self.conf.zadara_vol_thin else 'NO',
'crypt': 'YES'
if self.conf.zadara_vol_encrypt else 'NO'}),
'delete_volume': ('DELETE',
'/api/volumes/%s.xml' % kwargs.get('vpsa_vol'),
{}),
'expand_volume': ('POST',
'/api/volumes/%s/expand.xml'
% kwargs.get('vpsa_vol'),
{'capacity': kwargs.get('size')}),
# Snapshot operations
'create_snapshot': ('POST',
'/api/consistency_groups/%s/snapshots.xml'
% kwargs.get('cg_name'),
{'display_name': kwargs.get('snap_name')}),
'delete_snapshot': ('DELETE',
'/api/snapshots/%s.xml'
% kwargs.get('snap_id'),
{}),
'create_clone_from_snap': ('POST',
'/api/consistency_groups/%s/clone.xml'
% kwargs.get('cg_name'),
{'name': kwargs.get('name'),
'snapshot': kwargs.get('snap_id')}),
'create_clone': ('POST',
'/api/consistency_groups/%s/clone.xml'
% kwargs.get('cg_name'),
{'name': kwargs.get('name')}),
# Server operations
'create_server': ('POST',
'/api/servers.xml',
{'display_name': kwargs.get('initiator'),
'iqn': kwargs.get('initiator')}),
# Attach/Detach operations
'attach_volume': ('POST',
'/api/servers/%s/volumes.xml'
% kwargs.get('vpsa_srv'),
{'volume_name[]': kwargs.get('vpsa_vol'),
'force': 'NO'}),
'detach_volume': ('POST',
'/api/volumes/%s/detach.xml'
% kwargs.get('vpsa_vol'),
{'server_name[]': kwargs.get('vpsa_srv'),
'force': 'NO'}),
# Get operations
'list_volumes': ('GET',
'/api/volumes.xml',
{}),
'list_pools': ('GET',
'/api/pools.xml',
{}),
'list_controllers': ('GET',
'/api/vcontrollers.xml',
{}),
'list_servers': ('GET',
'/api/servers.xml',
{}),
'list_vol_attachments': ('GET',
'/api/volumes/%s/servers.xml'
% kwargs.get('vpsa_vol'),
{}),
'list_vol_snapshots': ('GET',
'/api/consistency_groups/%s/snapshots.xml'
% kwargs.get('cg_name'),
{})}
if cmd not in vpsa_commands.keys():
raise exception.UnknownCmd(cmd=cmd)
else:
(method, url, params) = vpsa_commands[cmd]
if method == 'GET':
# For GET commands add parameters to the URL
params.update(dict(access_key=self.access_key,
page=1, start=0, limit=0))
url += '?' + _joined_params(params)
body = ''
elif method == 'DELETE':
# For DELETE commands add parameters to the URL
params.update(dict(access_key=self.access_key))
url += '?' + _joined_params(params)
body = ''
elif method == 'POST':
if self.access_key:
params.update(dict(access_key=self.access_key))
body = _joined_params(params)
else:
raise exception.UnknownCmd(cmd=method)
return (method, url, body)
def ensure_connection(self, cmd=None):
"""Retrieve access key for VPSA connection."""
if self.access_key or cmd == 'login':
return
cmd = 'login'
xml_tree = self.send_cmd(cmd)
user = xml_tree.find('user')
if user is None:
raise exception.MalformedResponse(cmd=cmd,
reason='no "user" field')
access_key = user.findtext('access-key')
if access_key is None:
raise exception.MalformedResponse(cmd=cmd,
reason='no "access-key" field')
self.access_key = access_key
def send_cmd(self, cmd, **kwargs):
"""Send command to VPSA Controller."""
self.ensure_connection(cmd)
(method, url, body) = self._generate_vpsa_cmd(cmd, **kwargs)
LOG.debug(_('Sending %(method)s to %(url)s. Body "%(body)s"'),
{'method': method, 'url': url, 'body': body})
if self.conf.zadara_vpsa_use_ssl:
connection = httplib.HTTPSConnection(self.conf.zadara_vpsa_ip,
self.conf.zadara_vpsa_port)
else:
connection = httplib.HTTPConnection(self.conf.zadara_vpsa_ip,
self.conf.zadara_vpsa_port)
connection.request(method, url, body)
response = connection.getresponse()
if response.status != 200:
connection.close()
raise exception.BadHTTPResponseStatus(status=response.status)
data = response.read()
connection.close()
xml_tree = etree.fromstring(data)
status = xml_tree.findtext('status')
if status != '0':
raise exception.FailedCmdWithDump(status=status, data=data)
if method in ['POST', 'DELETE']:
LOG.debug(_('Operation completed. %(data)s'), {'data': data})
return xml_tree
class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
"""Zadara VPSA iSCSI volume driver."""
VERSION = '13.07'
def __init__(self, *args, **kwargs):
super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(zadara_opts)
def do_setup(self, context):
"""Any initialization the volume driver does while starting.
Establishes initial connection with VPSA and retrieves access_key.
"""
self.vpsa = ZadaraVPSAConnection(self.configuration)
def check_for_setup_error(self):
"""Returns an error (exception) if prerequisites aren't met."""
self.vpsa.ensure_connection()
def local_path(self, volume):
"""Return local path to existing local volume."""
raise NotImplementedError()
def _xml_parse_helper(self, xml_tree, first_level, search_tuple,
first=True):
"""Helper for parsing VPSA's XML output.
Returns single item if first==True or list for multiple selection.
If second argument in search_tuple is None - returns all items with
appropriate key.
"""
objects = xml_tree.find(first_level)
if objects is None:
return None
result_list = []
(key, value) = search_tuple
for object in objects.getchildren():
found_value = object.findtext(key)
if found_value and (found_value == value or value is None):
if first:
return object
else:
result_list.append(object)
return result_list if result_list else None
def _get_vpsa_volume_name_and_size(self, name):
"""Return VPSA's name & size for the volume."""
xml_tree = self.vpsa.send_cmd('list_volumes')
volume = self._xml_parse_helper(xml_tree, 'volumes',
('display-name', name))
if volume is not None:
return (volume.findtext('name'),
int(volume.findtext('virtual-capacity')))
return (None, None)
def _get_vpsa_volume_name(self, name):
"""Return VPSA's name for the volume."""
(vol_name, size) = self._get_vpsa_volume_name_and_size(name)
return vol_name
def _get_volume_cg_name(self, name):
"""Return name of the consistency group for the volume."""
xml_tree = self.vpsa.send_cmd('list_volumes')
volume = self._xml_parse_helper(xml_tree, 'volumes',
('display-name', name))
if volume is not None:
return volume.findtext('cg-name')
return None
def _get_snap_id(self, cg_name, snap_name):
"""Return snapshot ID for particular volume."""
xml_tree = self.vpsa.send_cmd('list_vol_snapshots',
cg_name=cg_name)
snap = self._xml_parse_helper(xml_tree, 'snapshots',
('display-name', snap_name))
if snap is not None:
return snap.findtext('name')
return None
def _get_pool_capacity(self, pool_name):
"""Return pool's total and available capacities."""
xml_tree = self.vpsa.send_cmd('list_pools')
pool = self._xml_parse_helper(xml_tree, 'pools',
('name', pool_name))
if pool is not None:
total = int(pool.findtext('capacity'))
free = int(float(pool.findtext('available-capacity')))
LOG.debug(_('Pool %(name)s: %(total)sGB total, %(free)sGB free'),
{'name': pool_name, 'total': total, 'free': free})
return (total, free)
return ('infinite', 'infinite')
def _get_active_controller_details(self):
"""Return details of VPSA's active controller."""
xml_tree = self.vpsa.send_cmd('list_controllers')
ctrl = self._xml_parse_helper(xml_tree, 'vcontrollers',
('state', 'active'))
if ctrl is not None:
return dict(target=ctrl.findtext('target'),
ip=ctrl.findtext('iscsi-ip'),
chap_user=ctrl.findtext('chap-username'),
chap_passwd=ctrl.findtext('chap-target-secret'))
return None
def _get_server_name(self, initiator):
"""Return VPSA's name for server object with given IQN."""
xml_tree = self.vpsa.send_cmd('list_servers')
server = self._xml_parse_helper(xml_tree, 'servers',
('iqn', initiator))
if server is not None:
return server.findtext('name')
return None
def _create_vpsa_server(self, initiator):
"""Create server object within VPSA (if doesn't exist)."""
vpsa_srv = self._get_server_name(initiator)
if not vpsa_srv:
xml_tree = self.vpsa.send_cmd('create_server', initiator=initiator)
vpsa_srv = xml_tree.findtext('server-name')
return vpsa_srv
def create_volume(self, volume):
"""Create volume."""
self.vpsa.send_cmd(
'create_volume',
name=self.configuration.zadara_vol_name_template % volume['name'],
size=volume['size'])
def delete_volume(self, volume):
"""Delete volume.
Return ok if doesn't exist. Auto detach from all servers.
"""
# Get volume name
name = self.configuration.zadara_vol_name_template % volume['name']
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
msg = _('Volume %(name)s could not be found. '
'It might be already deleted') % {'name': name}
LOG.warning(msg)
if self.configuration.zadara_vpsa_allow_nonexistent_delete:
return
else:
raise exception.VolumeNotFound(volume_id=name)
# Check attachment info and detach from all
xml_tree = self.vpsa.send_cmd('list_vol_attachments',
vpsa_vol=vpsa_vol)
servers = self._xml_parse_helper(xml_tree, 'servers',
('iqn', None), first=False)
if servers:
if not self.configuration.zadara_vpsa_auto_detach_on_delete:
raise exception.VolumeAttached(volume_id=name)
for server in servers:
vpsa_srv = server.findtext('name')
if vpsa_srv:
self.vpsa.send_cmd('detach_volume',
vpsa_srv=vpsa_srv,
vpsa_vol=vpsa_vol)
# Delete volume
self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug(_('Create snapshot: %s'), snapshot['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
% snapshot['volume_name']
cg_name = self._get_volume_cg_name(volume_name)
if not cg_name:
msg = _('Volume %(name)s not found') % {'name': volume_name}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=volume_name)
self.vpsa.send_cmd('create_snapshot',
cg_name=cg_name,
snap_name=snapshot['name'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug(_('Delete snapshot: %s'), snapshot['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
% snapshot['volume_name']
cg_name = self._get_volume_cg_name(volume_name)
if not cg_name:
# If the volume isn't present, then don't attempt to delete
LOG.warning(_("snapshot: original volume %s not found, "
"skipping delete operation")
% snapshot['volume_name'])
return True
snap_id = self._get_snap_id(cg_name, snapshot['name'])
if not snap_id:
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_("snapshot: snapshot %s not found, "
"skipping delete operation")
% snapshot['name'])
return True
self.vpsa.send_cmd('delete_snapshot',
snap_id=snap_id)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(_('Creating volume from snapshot: %s') % snapshot['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
% snapshot['volume_name']
cg_name = self._get_volume_cg_name(volume_name)
if not cg_name:
msg = _('Volume %(name)s not found') % {'name': volume_name}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=volume_name)
snap_id = self._get_snap_id(cg_name, snapshot['name'])
if not snap_id:
msg = _('Snapshot %(name)s not found') % {'name': snapshot['name']}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=snapshot['name'])
self.vpsa.send_cmd('create_clone_from_snap',
cg_name=cg_name,
name=self.configuration.zadara_vol_name_template
% volume['name'],
snap_id=snap_id)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.debug(_('Creating clone of volume: %s') % src_vref['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
% src_vref['name']
cg_name = self._get_volume_cg_name(volume_name)
if not cg_name:
msg = _('Volume %(name)s not found') % {'name': volume_name}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=volume_name)
self.vpsa.send_cmd('create_clone',
cg_name=cg_name,
name=self.configuration.zadara_vol_name_template
% volume['name'])
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
# Get volume name
name = self.configuration.zadara_vol_name_template % volume['name']
(vpsa_vol, size) = self._get_vpsa_volume_name_and_size(name)
if not vpsa_vol:
msg = _('Volume %(name)s could not be found. '
'It might be already deleted') % {'name': name}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=name)
if new_size < size:
raise exception.InvalidInput(
reason='%s < current size %s' % (new_size, size))
expand_size = new_size - size
self.vpsa.send_cmd('expand_volume',
vpsa_vol=vpsa_vol,
size=expand_size)
def create_export(self, context, volume):
"""Irrelevant for VPSA volumes. Export created during attachment."""
pass
def ensure_export(self, context, volume):
"""Irrelevant for VPSA volumes. Export created during attachment."""
pass
def remove_export(self, context, volume):
"""Irrelevant for VPSA volumes. Export removed during detach."""
pass
def initialize_connection(self, volume, connector):
"""Attach volume to initiator/host.
During this call VPSA exposes volume to particular Initiator. It also
creates a 'server' entity for Initiator (if it was not created before)
All necessary connection information is returned, including auth data.
Connection data (target, LUN) is not stored in the DB.
"""
# Get/Create server name for IQN
initiator_name = connector['initiator']
vpsa_srv = self._create_vpsa_server(initiator_name)
if not vpsa_srv:
raise exception.ZadaraServerCreateFailure(name=initiator_name)
# Get volume name
name = self.configuration.zadara_vol_name_template % volume['name']
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
raise exception.VolumeNotFound(volume_id=name)
# Get Active controller details
ctrl = self._get_active_controller_details()
if not ctrl:
raise exception.ZadaraVPSANoActiveController()
# Attach volume to server
self.vpsa.send_cmd('attach_volume',
vpsa_srv=vpsa_srv,
vpsa_vol=vpsa_vol)
# Get connection info
xml_tree = self.vpsa.send_cmd('list_vol_attachments',
vpsa_vol=vpsa_vol)
server = self._xml_parse_helper(xml_tree, 'servers',
('iqn', initiator_name))
if server is None:
raise exception.ZadaraAttachmentsNotFound(name=name)
target = server.findtext('target')
lun = server.findtext('lun')
if target is None or lun is None:
raise exception.ZadaraInvalidAttachmentInfo(
name=name,
reason='target=%s, lun=%s' % (target, lun))
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = '%s:%s' % (ctrl['ip'], '3260')
properties['target_iqn'] = target
properties['target_lun'] = lun
properties['volume_id'] = volume['id']
properties['auth_method'] = 'CHAP'
properties['auth_username'] = ctrl['chap_user']
properties['auth_password'] = ctrl['chap_passwd']
LOG.debug(_('Attach properties: %(properties)s'),
{'properties': properties})
return {'driver_volume_type': 'iscsi',
'data': properties}
def terminate_connection(self, volume, connector, **kwargs):
"""Detach volume from the initiator."""
# Get server name for IQN
initiator_name = connector['initiator']
vpsa_srv = self._get_server_name(initiator_name)
if not vpsa_srv:
raise exception.ZadaraServerNotFound(name=initiator_name)
# Get volume name
name = self.configuration.zadara_vol_name_template % volume['name']
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
raise exception.VolumeNotFound(volume_id=name)
# Detach volume from server
self.vpsa.send_cmd('detach_volume',
vpsa_srv=vpsa_srv,
vpsa_vol=vpsa_vol)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'Zadara Storage'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
(total, free) = self._get_pool_capacity(self.configuration.
zadara_vpsa_poolname)
data['total_capacity_gb'] = total
data['free_capacity_gb'] = free
self._stats = data
|
Description People will not likely find their way from the source distribution how to contribute to Doomseeker & Wadseeker.
I propose adding some text to README or creating a CONTRIBUTING file with this information, and more.
Probably a link to our issue tracker, at least.
|
import numpy as Num
import numpy.fft as FFT
import Pgplot, ppgplot, bisect, sinc_interp, parfile
from scipy.stats import histogram
from scipy.special import ndtr, ndtri, chdtrc, chdtri, fdtr, i0, kolmogorov
from scipy.optimize import leastsq
import scipy.optimize.zeros as zeros
from psr_constants import *
isintorlong = lambda x: type(x) == type(0) or type(x) == type(0L)
def span(Min, Max, Number):
"""
span(Min, Max, Number):
Create a range of 'Num' floats given inclusive 'Min' and 'Max' values.
"""
assert isintorlong(Number)
if isintorlong(Min) and isintorlong(Max) and \
(Max-Min) % (Number-1) != 0:
Max = float(Max) # force floating points
return Min+(Max-Min)*Num.arange(Number)/(Number-1)
def distance(width):
"""
distance(width):
Return a 'width' x 'width' Num Python array with each
point set to the geometric distance from the array's center.
"""
x = Num.arange(-width/2.0+0.5, width/2.0+0.5, 1.0)**2
x = Num.resize(x, (width,width))
return Num.sqrt(x + Num.transpose(x))
def choose_N(orig_N):
"""
choose_N(orig_N):
Choose a time series length that is larger than
the input value but that is highly factorable.
Note that the returned value must be divisible
by at least the maximum downsample factor * 2.
Currently, this is 8 * 2 = 16.
"""
# A list of 4-digit numbers that are highly factorable by small primes
goodfactors = [1008, 1024, 1056, 1120, 1152, 1200, 1232, 1280, 1296,
1344, 1408, 1440, 1536, 1568, 1584, 1600, 1680, 1728,
1760, 1792, 1920, 1936, 2000, 2016, 2048, 2112, 2160,
2240, 2304, 2352, 2400, 2464, 2560, 2592, 2640, 2688,
2800, 2816, 2880, 3024, 3072, 3136, 3168, 3200, 3360,
3456, 3520, 3584, 3600, 3696, 3840, 3872, 3888, 3920,
4000, 4032, 4096, 4224, 4320, 4400, 4480, 4608, 4704,
4752, 4800, 4928, 5040, 5120, 5184, 5280, 5376, 5488,
5600, 5632, 5760, 5808, 6000, 6048, 6144, 6160, 6272,
6336, 6400, 6480, 6720, 6912, 7040, 7056, 7168, 7200,
7392, 7680, 7744, 7776, 7840, 7920, 8000, 8064, 8192,
8400, 8448, 8624, 8640, 8800, 8960, 9072, 9216, 9408,
9504, 9600, 9680, 9856]
if orig_N < 10000:
return 0
# Get the number represented by the first 4 digits of orig_N
first4 = int(str(orig_N)[:4])
# Now get the number that is just bigger than orig_N
# that has its first 4 digits equal to "factor"
for factor in goodfactors:
if factor > first4: break
new_N = factor
while new_N < orig_N:
new_N *= 10
# Finally, compare new_N to the closest power_of_two
# greater than orig_N. Take the closest.
two_N = 2
while two_N < orig_N:
two_N *= 2
return min(two_N, new_N)
def running_avg(arr, navg):
"""
running_avg(arr, navg):
Return an array of the running average of 'navg' bins from the
input array 'arr'.
"""
a = Num.asarray(arr, 'd')
a.shape = (len(a) / navg, navg)
return Num.add.reduce(Num.transpose(a)) / navg
def hist(data, bins, range=None, laby="Number", **kwargs):
"""
hist(data, bins, range=None, laby="Number", **kwargs):
Return and plot a histogram in one variable.
data -- a sequence of data points
bins -- the number of bins into which the data is to be sorted
range -- a tuple of two values, specifying the lower and
the upper end of the interval spanned by the bins.
Any data point outside this interval will be ignored.
If no range is given, the smallest and largest
data values are used to define the interval.
Note: This command also accepts all the keyword arge of plotbinned().
"""
(ys, lox, dx, out) = histogram(data, bins, range)
xs = Num.arange(bins, dtype='d')*dx + lox + 0.5*dx
maxy = int(1.1*max(ys))
if maxy < max(ys):
maxy = max(ys) + 1.0
if 'rangey' not in kwargs.keys():
kwargs['rangey']=[0,maxy]
Pgplot.plotbinned(ys, xs, laby=laby, **kwargs)
return (xs, ys)
def KS_test(data, cumdist, output=0):
"""
KS_test(data, cumdist, output=0):
Perform a Kolmogorov-Smirnov test on data compared to the
cumulative-distribution function cumdist.
"""
nn = len(data)
sdata = Num.sort(Num.asarray(data))
D1 = Num.maximum.reduce(Num.absolute(cumdist(sdata)-
Num.arange(nn, dtype='d')/nn))
D2 = Num.maximum.reduce(Num.absolute(cumdist(sdata)-
Num.arange(1,nn+1, dtype='d')/nn))
D = max((D1, D2))
P = kolmogorov(Num.sqrt(nn)*D)
if (output):
print "Max distance between the cumulative distributions (D) = %.5g" % D
print "Prob the data is from the specified distrbution (P) = %.3g" % P
return (D, P)
def MJD_to_JD(MJD):
"""
MJD_to_JD(MJD):
Convert Modified Julian Date (MJD) to Julian Date (JD)
"""
return MJD+2400000.5
def JD_to_MJD(JD):
"""
JD_to_MJD(JD):
Convert Julian Date (JD) to Modified Julian Date (MJD)
"""
return JD-2400000.5
def MJD_to_Julian_Epoch(MJD):
"""
MJD_to_Julian_Epoch(MJD):
Convert Modified Julian Date (MJD) to Julian Epoch
"""
return 2000.0 + (MJD-51544.5)/365.25
def Julian_Epoch_to_MJD(jepoch):
"""
Julian_Epoch_to_MJD(jepoch):
Convert Julian Epoch to Modified Julian Date (MJD)
"""
return 51544.5 + (jepoch-2000.0)*365.25
def MJD_to_Besselian_Epoch(MJD):
"""
MJD_to_Besselian_Epoch(MJD):
Convert Modified Julian Date (MJD) to Besselian Epoch
"""
return 1900.0 + (MJD-15019.81352)/365.242198781
def Besselian_Epoch_to_MJD(bepoch):
"""
Besselian_Epoch_to_MJD(bepoch):
Convert Besselian Epoch to Modified Julian Date (MJD)
"""
return 15019.81352 + (bepoch-1900.0)*365.242198781
def rad_to_dms(rad):
"""
rad_to_dms(rad):
Convert radians to degrees, minutes, and seconds of arc.
"""
if (rad < 0.0): sign = -1
else: sign = 1
arc = RADTODEG * Num.fmod(Num.fabs(rad), PI)
d = int(arc)
arc = (arc - d) * 60.0
m = int(arc)
s = (arc - m) * 60.0
if sign==-1 and d==0:
return (sign * d, sign * m, sign * s)
else:
return (sign * d, m, s)
def dms_to_rad(deg, min, sec):
"""
dms_to_rad(deg, min, sec):
Convert degrees, minutes, and seconds of arc to radians.
"""
if (deg < 0.0):
sign = -1
elif (deg==0.0 and (min < 0.0 or sec < 0.0)):
sign = -1
else:
sign = 1
return sign * ARCSECTORAD * \
(60.0 * (60.0 * Num.fabs(deg) +
Num.fabs(min)) + Num.fabs(sec))
def dms_to_deg(deg, min, sec):
"""
dms_to_deg(deg, min, sec):
Convert degrees, minutes, and seconds of arc to degrees.
"""
return RADTODEG * dms_to_rad(deg, min, sec)
def rad_to_hms(rad):
"""
rad_to_hms(rad):
Convert radians to hours, minutes, and seconds of arc.
"""
rad = Num.fmod(rad, TWOPI)
if (rad < 0.0): rad = rad + TWOPI
arc = RADTOHRS * rad
h = int(arc)
arc = (arc - h) * 60.0
m = int(arc)
s = (arc - m) * 60.0
return (h, m, s)
def hms_to_rad(hour, min, sec):
"""
hms_to_rad(hour, min, sec):
Convert hours, minutes, and seconds of arc to radians
"""
if (hour < 0.0): sign = -1
else: sign = 1
return sign * SECTORAD * \
(60.0 * (60.0 * Num.fabs(hour) +
Num.fabs(min)) + Num.fabs(sec))
def hms_to_hrs(hour, min, sec):
"""
hms_to_hrs(hour, min, sec):
Convert hours, minutes, and seconds of arc to hours.
"""
return RADTOHRS * hms_to_rad(hour, min, sec)
def coord_to_string(h_or_d, m, s):
"""
coord_to_string(h_or_d, m, s):
Return a formatted string of RA or DEC values as
'hh:mm:ss.ssss' if RA, or 'dd:mm:ss.ssss' if DEC.
"""
retstr = ""
if h_or_d < 0:
retstr = "-"
elif abs(h_or_d)==0:
if (m < 0.0) or (s < 0.0):
retstr = "-"
h_or_d, m, s = abs(h_or_d), abs(m), abs(s)
if (s >= 9.9995):
return retstr+"%.2d:%.2d:%.4f" % (h_or_d, m, s)
else:
return retstr+"%.2d:%.2d:0%.4f" % (h_or_d, m, s)
def ra_to_rad(ra_string):
"""
ra_to_rad(ar_string):
Given a string containing RA information as
'hh:mm:ss.ssss', return the equivalent decimal
radians.
"""
h, m, s = ra_string.split(":")
return hms_to_rad(int(h), int(m), float(s))
def dec_to_rad(dec_string):
"""
dec_to_rad(dec_string):
Given a string containing DEC information as
'dd:mm:ss.ssss', return the equivalent decimal
radians.
"""
d, m, s = dec_string.split(":")
if "-" in d and int(d)==0:
m, s = '-'+m, '-'+s
return dms_to_rad(int(d), int(m), float(s))
def delta_m(flux_factor):
"""
delta_m(flux_factor):
Return the change in magnitudes caused by a change
in flux of flux_factor.
"""
return -2.5*Num.log10(flux_factor)
def flux_factor(delta_m):
"""
flux_factor(delta_m):
Return the change in flux caused by a change
in magnitude of delta_m magnitudes
"""
return 10.0**(delta_m/-2.5)
def distance_modulus_to_distance(dm, absorption=0.0):
"""
distance_modulus_to_distance(dm, absorption=0.0):
Return the distance (kpc) given a distance modulus dm and
an optional absorption.
"""
return 10.0**(((dm-absorption)+5.0)/5.0)/1000.0
def distance_to_distance_modulus(d, absorption=0.0):
"""
distance_to_distance_modulus(d, absorption=0.0):
Return the distance modulus given a distance d and
an optional absorption.
"""
return 5.0*Num.log10(d*1000.0)-5.0+absorption
def true_anomaly(E, ecc):
"""
true_anomaly(E, ecc):
Return the True Anomaly (in radians) given the Eccentric anomaly
(E in radians) and the eccentricity (ecc)
"""
return 2.0*Num.arctan(Num.sqrt((1.0+ecc)/(1.0-ecc))*Num.tan(E/2.0))
def mass_funct(pb, x):
"""
mass_funct(pb, x):
Return the mass function of an orbit given the following:
'pb' is the binary period in days.
'x' is the projected semi-major axis in lt-sec.
"""
pbs = pb * 86400.0
return 8015123.37129 * x**3.0 / (pbs * pbs)
def mass_funct2(mp, mc, i):
"""
mass_funct2(mp, mc, i):
Return the mass function of an orbit given the following:
'mp' is the mass of the primary in solar masses.
'mc' is the mass of the companion in solar masses.
'i' is the orbital inclination (rad).
Note: An 'average' orbit has cos(i) = 0.5, or i = 60 deg
"""
return (mc * Num.sin(i))**3.0 / (mc + mp)**2.0
def asini_c(pb, mf):
"""
asini_c(pb, mf):
Return the orbital projected semi-major axis (lt-sec) given:
'pb' is the binary period in sec.
'mf' is the mass function of the orbit.
"""
return (mf * pb * pb / 8015123.37129)**(1.0 / 3.0)
def ELL1_check(par_file, output=False):
"""
ELL1_check(par_file):
Check the parfile to see if ELL1 can be safely used as the
binary model. To work properly, we should have:
asini/c * ecc**2 << timing precision / sqrt(# TOAs)
"""
psr = parfile.psr_par(par_file)
try:
lhs = psr.A1 * psr.E**2.0 * 1e6
except:
if output:
print "Can't compute asini/c * ecc**2, maybe parfile doesn't have a binary?"
return
try:
rhs = psr.TRES / Num.sqrt(psr.NTOA)
except:
if output:
print "Can't compute TRES / sqrt(# TOAs), maybe this isn't a TEMPO output parfile?"
return
if output:
print "Condition is asini/c * ecc**2 << timing precision / sqrt(# TOAs) to use ELL1:"
print " asini/c * ecc**2 = %8.3g us"%lhs
print " TRES / sqrt(# TOAs) = %8.3g us"%rhs
if lhs * 50.0 < rhs:
if output:
print "Should be fine."
return True
elif lhs * 5.0 < rhs:
if output:
print "Should be OK, but not optimal."
return True
else:
if output:
print "Should probably use BT or DD instead."
return False
def accel_to_z(accel, T, reffreq, harm=1):
"""
accel_to_z(accel, T, reffreq, harm=1):
Return the accelsearch 'z' (i.e. number of bins drifted)
at a reference frequency 'reffreq', for an observation
of duration 'T' seconds and with acceleration (in m/s/s)
'accel'. You can specify the harmonic number in 'harm'.
"""
return accel * harm * reffreq * T * T / SOL
def z_to_accel(z, T, reffreq, harm=1):
"""
z_to_accel(z, T, reffreq, harm=1):
Return the acceleration (in m/s/s) corresponding to the
accelsearch 'z' (i.e. number of bins drifted) at a
reference frequency 'reffreq', for an observation
of duration 'T'. You can specify the harmonic number
in 'harm'.
"""
return z * SOL / (harm * reffreq * T * T)
def bins_to_accel(z, T, f=[1.0, 1000.0], device="/XWIN"):
"""
bins_to_accel(z, T, f=[1.0, 1000.0], device="/XWIN"):
Make a plot showing the acceleration which corresponds
to a certain number of Fourier bins drifted 'z' during
an observation of length 'T'.
"""
fs = span(Num.log10(f[0]), Num.log10(f[1]), 1000)
accels = z_to_accel(z, T, 10.0**fs)
if (device):
Pgplot.plotxy(Num.log10(accels), fs, logx=1, logy=1,
labx="Frequency (Hz)",
laby="Acceleration (m/s\u2\d)", device=device)
ppgplot.pgmtxt("T", -2.0, 0.75, 0.0, "T = %.0f sec"%T)
ppgplot.pgmtxt("T", -3.5, 0.75, 0.0, "r\B\u\.\d = %.1f bins"%z)
if (device != '/XWIN'):
Pgplot.closeplot()
else:
return accels
def pulsar_mass(pb, x, mc, inc):
"""
pulsar_mass(pb, x, mc, inc):
Return the pulsar mass (in solar mass units) for a binary
system with the following characteristics:
'pb' is the binary period in days.
'x' is the projected semi-major axis in lt-sec.
'inc' is the orbital inclination in degrees.
'mc' is the mass of the companion in solar mass units.
"""
massfunct = mass_funct(pb, x)
def localmf(mp, mc=mc, mf=massfunct, i=inc*DEGTORAD):
return mass_funct2(mp, mc, i) - mf
return zeros.bisect(localmf, 0.0, 1000.0)
def companion_mass(pb, x, inc=60.0, mpsr=1.4):
"""
companion_mass(pb, x, inc=60.0, mpsr=1.4):
Return the companion mass (in solar mass units) for a binary
system with the following characteristics:
'pb' is the binary period in days.
'x' is the projected semi-major axis in lt-sec.
'inc' is the orbital inclination in degrees.
'mpsr' is the mass of the pulsar in solar mass units.
"""
massfunct = mass_funct(pb, x)
def localmf(mc, mp=mpsr, mf=massfunct, i=inc*DEGTORAD):
return mass_funct2(mp, mc, i) - mf
return zeros.bisect(localmf, 0.0, 1000.0)
def companion_mass_limit(pb, x, mpsr=1.4):
"""
companion_mass_limit(pb, x, mpsr=1.4):
Return the lower limit (corresponding to i = 90 degrees) of the
companion mass (in solar mass units) in a binary system with
the following characteristics:
'pb' is the binary period in days.
'x' is the projected semi-major axis in lt-sec.
'mpsr' is the mass of the pulsar in solar mass units.
"""
return companion_mass(pb, x, inc=90.0, mpsr=mpsr)
def OMDOT(porb, e, Mp, Mc):
"""
OMDOT(porb, e, Mp, Mc):
Return the predicted advance of periaston (deg/yr) given the
orbital period (days), eccentricity, and pulsar and companion masses.
"""
return 3.0 * (porb*86400.0/TWOPI)**(-5.0/3.0) * \
(Tsun*(Mp+Mc))**(2.0/3.0) / (1.0-e**2.0) * \
RADTODEG * SECPERJULYR
def GAMMA(porb, e, Mp, Mc):
"""
GAMMA(porb, e, Mp, Mc):
Return the predicted value of relativistic gamma (sec) given the
orbital period (days), eccentricity, and pulsar and companion masses.
"""
return e * (porb*86400.0/TWOPI)**(1.0/3.0) * Tsun**(2.0/3.0) * \
(Mp+Mc)**(-4.0/3.0) * Mc * (Mp+2.0*Mc)
def PBDOT(porb, e, Mp, Mc):
"""
PBDOT(porb, e, Mp, Mc):
Return the predicted orbital period derivative (s/s) given the
orbital period (s), eccentricity, and pulsar and companion masses.
"""
return -192.0*PI/5.0 * (porb*86400.0/TWOPI)**(-5.0/3.0) * \
(1.0 + 73.0/24.0*e**2.0 + 37.0/96.0*e**4.0) * \
(1.0-e**2.0)**(-7.0/2.0) * Tsun**(5.0/3.0) * \
Mp * Mc * (Mp+Mc)**(-1.0/3.0)
def OMDOT_to_Mtot(OMDOT, porb, e):
"""
OMDOT_to_Mtot(OMDOT, porb, e):
Return the total mass (in solar units) of a system given an advance
of periastron (OMDOT) in deg/yr. The orbital period should be in days.
"""
wd = OMDOT/SECPERJULYR*DEGTORAD # rad/s
return (wd/3.0*(1.0-e*e)*(porb*SECPERDAY/TWOPI)**(5.0/3.0))**(3.0/2.0)/Tsun
def GAMMA_to_Mc(gamma, porb, e, Mp):
"""
GAMMA_to_Mc(gamma, porb, e, Mp):
Given the relativistic gamma in sec, the orbital period in days,
the eccentricity and the pulsar mass in solar units, return the
predicted companion mass.
"""
def funct(mc, mp=Mp, porb=porb, e=e, gamma=gamma):
return GAMMA(porb, e, mp, mc) - gamma
return zeros.bisect(funct, 0.01, 20.0)
def shklovskii_effect(pm, D):
"""
shklovskii_effect(pm, D):
Return the 'acceleration' due to the transverse Doppler effect
(i.e. the Shklovskii Effect) given the proper motion (pm) in mas/yr
and the distance (D) in kpc. Note: What is returned is a_pm/C,
or equivalently, Pdot_pm/P.
"""
return (pm/1000.0*ARCSECTORAD/SECPERJULYR)**2.0 * KMPERKPC*D / (C/1000.0)
def galactic_accel_simple(l, b, D, v_o=240.0, R_o = 8.34):
"""
galactic_accel_simple(l, b, D, v_o=240.0, R_o = 8.34):
Return the approximate projected acceleration/c (in s^-1)
(a_p - a_ssb) dot n / c, where a_p and a_ssb are acceleration
vectors, and n is the los vector. This assumes a simple spherically
symmetric isothermal sphere with v_o = 220 km/s circular velocity
and R_o = 8 kpc to the center of the sphere from the SSB. l and
b are the galactic longitude and latitude (in deg) respectively,
and D is the distance in kpc. This is eqn 2.4 of Phinney 1992.
The default v_o and R_o values are from Reid et al 2014.
"""
A_sun = v_o*v_o / (C/1000.0 * R_o*KMPERKPC)
d = D/R_o
cbcl = Num.cos(b*DEGTORAD) * Num.cos(l*DEGTORAD)
return -A_sun * (cbcl + (d - cbcl) / (1.0 + d*d - 2.0*d*cbcl))
def galactic_accel(l, b, D, v_o=240.0, R_o = 8.34):
"""
galactic_accel(l, b, D, v_o=240.0, R_o = 8.34):
Return the approximate projected acceleration/c (in s^-1)
(a_p - a_ssb) dot n / c, where a_p and a_ssb are acceleration
vectors, and n is the los vector. This assumes v_o = 220 km/s
circular velocity and R_o = 8 kpc to the center of Galaxy. l and
b are the galactic longitude and latitude (in deg) respectively,
and D is the distance in kpc. This is eqn 5 of Nice & Taylor 1995.
The default v_o and R_o values are from Reid et al 2014.
"""
A_sun = v_o*v_o / (C/1000.0 * R_o*KMPERKPC)
cb = Num.cos(b*DEGTORAD)
cl = Num.cos(l*DEGTORAD)
sl = Num.sin(l*DEGTORAD)
beta = D/R_o * cb - cl
return -A_sun * cb * (cl + beta / (sl**2 + beta**2))
def gal_z_accel(l, b, D):
"""
gal_z_accel(l, b, D):
Return the approximate projected acceleration/c (in s^-1)
(a_p - a_ssb) dot n / c, where a_p and a_ssb are acceleration
vectors, and n is the los vector, caused by the acceleration
of the pulsar towards the plane of the galaxy. l and b are
the galactic longitude and latitude (in deg) respectively, and D
is the distance in kpc. This is eqn 3+4 of Nice & Taylor 1995.
"""
sb = Num.sin(b*DEGTORAD)
z = D * sb
az = 1.08e-19 * (1.25 * z / Num.sqrt(z**2 + 0.0324) + 0.58 * z)
return az * sb
def beam_halfwidth(obs_freq, dish_diam):
"""
beam_halfwidth(obs_freq, dish_diam):
Return the telescope beam halfwidth in arcmin
'obs_freq' = the observing frqeuency in MHz
'dish_diam' = the telescope diameter in m
"""
return 1.2*SOL/(obs_freq*10.0**6)/dish_diam*RADTODEG*60/2
def limiting_flux_dens(Ttot, G, BW, T, P=0.01, W=0.05, polar=2, factor=15.0):
"""
limiting_flux_dens(Ttot, G, BW, T, P=0.01, W=0.05, polar=2, factor=15.0):
Return the approximate limiting flux density for a pulsar
survey in mJy based of the following characteristics:
'Ttot' = sky + system temperature (K)
'G' = forward gain of the antenna (K/Jy)
'BW' = observing bandwidth (MHz)
'T' = integration time (s)
'P' = pulsar period (s) (default = 0.01)
'W' = duty cycle of pulsar (0-1) (default = 0.05)
'polar' = number of polarizations (default = 2)
'factor' = normalization factor that take into account
limiting SNR, hardware limitations etc. (default = 15.0)
Note: This is a _very_ approximate calculation. For a better
calculation, see Cordes and Chernoff, ApJ, 482, p971, App. A.
Observatories:
Parkes Multibeam: Tsys = 21 K, G = 0.735 K/Jy
"""
w = W * P
return Num.sqrt(w/((P-w)*polar*BW*T))*factor*Ttot/G
def dm_info(dm=None, dmstep=1.0, freq=1390.0, numchan=512, chanwidth=0.5):
"""
dm_info(dm=None, dmstep=1.0, freq=1390.0, numchan=512, chanwidth=0.5):
Return info about potential DM smearing during an observation.
"""
BW = chanwidth * numchan
print " Center freq (MHz) = %.3f" % (freq)
print " Number of channels = %d" % (numchan)
print " Channel width (MHz) = %.3g" % (chanwidth)
print " Total bandwidth (MHz) = %.3g" % (BW)
print " DM offset (0.5*step) = %.3g" % (0.5 * dmstep)
print " Smearing over BW (ms) = %.3g" % \
(1000.0 * dm_smear(0.5 * dmstep, BW, freq))
if (dm):
print " Smearing per chan (ms) = %.3g" % \
(1000.0 * dm_smear(dm, chanwidth, freq))
def best_dm_step(maxsmear=0.1, dt=0.00080, dm=0.0, freq=1390.0, numchan=512, chanwidth=0.5):
"""
best_dm_step(maxsmear=0.1, dt=0.00080, dm=0.0, freq=1390.0, numchan=512, chanwidth=0.5):
Return the required DM step to keep the total smearing below 'maxsmear' (in ms).
"""
BW = chanwidth * numchan
tau_tot = maxsmear/1000.0
tau_chan = dm_smear(dm, chanwidth, freq)
tau_samp = dt
if (tau_tot**2.0 < (tau_chan**2.0+tau_samp**2.0)):
print "The requested total smearing is smaller than one or more of the components."
return 0.0
else:
return 0.0001205*freq**3.0*2.0/BW*Num.sqrt(tau_tot**2.0-tau_chan**2.0-tau_samp**2.0)
def dm_smear(dm, BW, center_freq):
"""
dm_smear(dm, BW, center_freq):
Return the smearing in sec caused by a 'dm' over a bandwidth
of 'BW' MHz centered at 'center_freq' MHz.
"""
return dm * BW / (0.0001205 * center_freq * center_freq * center_freq)
def diagonal_DM(dt, chanBW, center_freq):
"""
diagonal_DM(dt, chanBW, center_freq):
Return the so-called "diagonal DM" where the smearing across
one channel is equal to the sample time.
"""
return (0.0001205 * center_freq * center_freq * center_freq) * dt / chanBW
def pulse_broadening(DM, f_ctr):
"""
pulse_broadening(DM, f_ctr):
Return the approximate pulse broadening (tau) in ms due to scattering
based on the rough relation in Cordes' 'Pulsar Observations I' paper.
'f_ctr' should be in MHz. The approximate error is 0.65 in log(tau).
"""
logDM = Num.log10(DM)
return 10.0**(-3.59 + 0.129*logDM + 1.02*logDM**2.0 -
4.4*Num.log10(f_ctr/1000.0))/1000.0
def rrat_period(times, numperiods=20, output=True):
"""
rrat_period(times, numperiods=20, output=True):
Try to determine a RRAT pulse period using a brute force
search when the input times are (real!) single-pulse
arrival times. numperiods is the number of integer pulses
to try between the first two pulses. If output is True,
print some diagnostic information
"""
ts = Num.asarray(sorted(times))
ps = (ts[1]-ts[0])/Num.arange(1, numperiods+1)
dts = Num.diff(ts)
xs = dts / ps[:,Num.newaxis]
metric = Num.sum(Num.fabs((xs - xs.round())), axis=1)
pnum = metric.argmin()
numrots = xs.round()[pnum].sum()
p = (ts[-1] - ts[0]) / numrots
if output:
print "Min, avg, std metric values are %.4f, %.4f, %.4f" % \
(metric.min(), metric.mean(), metric.std())
print " Approx period is likely:", ps[pnum]
print "Refined period is likely:", p
print "Rotations between pulses are:"
print dts / p
return p
def guess_DMstep(DM, dt, BW, f_ctr):
"""
guess_DMstep(DM, dt, BW, f_ctr):
Choose a reasonable DMstep by setting the maximum smearing across the
'BW' to equal the sampling time 'dt'.
"""
return dt*0.0001205*f_ctr**3.0/(0.5*BW)
def delay_from_DM(DM, freq_emitted):
"""
Return the delay in seconds caused by dispersion, given
a Dispersion Measure (DM) in cm-3 pc, and the emitted
frequency (freq_emitted) of the pulsar in MHz.
"""
if (type(freq_emitted)==type(0.0)):
if (freq_emitted > 0.0):
return DM/(0.000241*freq_emitted*freq_emitted)
else:
return 0.0
else:
return Num.where(freq_emitted > 0.0,
DM/(0.000241*freq_emitted*freq_emitted), 0.0)
def delay_from_foffsets(df, dfd, dfdd, times):
"""
Return the delays in phase caused by offsets in
frequency (df), and two frequency derivatives (dfd, dfdd)
at the given times in seconds.
"""
f_delays = df * times
fd_delays = dfd * times**2 / 2.0
fdd_delays = dfdd * times**3 / 6.0
return (f_delays + fd_delays + fdd_delays)
def smear_plot(dm=[1.0,1000.0], dmstep=1.0, subdmstep=10.0, freq=1390.0,
numchan=512, numsub=32, chanwidth=0.5, dt=0.000125,
device='/xwin'):
"""
smear_plot(dm=[0.0,1000.0], dmstep=1.0, subdmstep=10.0, freq=1390.0,
numchan=512, numsub=32, chanwidth=0.5, dt=0.000125,
device='/xwin'):
Show a plot that displays the expected smearing in ms
from various effects during a radio pulsar search.
"""
numpts = 500
BW = numchan * chanwidth
subBW = numchan / numsub * chanwidth
maxDMerror = 0.5 * dmstep
maxsubDMerror = 0.5 * subdmstep
ldms = span(Num.log10(dm[0]), Num.log10(dm[1]), numpts)
dms = 10.0**ldms
# Smearing from sample rate
dts = Num.zeros(numpts) + 1000.0 * dt
# Smearing due to the intrinsic channel width
chan_smear = 1000.0 * dm_smear(dms, chanwidth, freq)
# Smearing across the full BW due to max DM mismatch
BW_smear = Num.zeros(numpts) + \
1000.0 * dm_smear(maxDMerror, BW, freq)
# Smearing in each subband due to max DM mismatch
subband_smear = Num.zeros(numpts) + \
1000.0 * dm_smear(maxsubDMerror, subBW, freq)
total_smear = Num.sqrt(dts**2.0 + chan_smear**2.0 +
subband_smear**2.0 + BW_smear**2.0)
maxval = Num.log10(2.0 * max(total_smear))
minval = Num.log10(0.5 * min([min(dts), min(chan_smear),
min(BW_smear), min(subband_smear)]))
Pgplot.plotxy(Num.log10(total_smear), ldms, rangey=[minval, maxval],
logx=1, logy=1, labx="Dispersion Measure",
laby="Smearing (ms)", device=device)
ppgplot.pgsch(0.8)
ppgplot.pgmtxt("t", 1.5, 1.0/12.0, 0.5, "\(2156)\dcenter\u = %gMHz" % freq)
ppgplot.pgmtxt("t", 1.5, 3.0/12.0, 0.5, "N\dchan\u = %d" % numchan)
ppgplot.pgmtxt("t", 1.5, 5.0/12.0, 0.5, "N\dsub\u = %d" % numsub)
ppgplot.pgmtxt("t", 1.5, 7.0/12.0, 0.5, "BW\dchan\u = %gMHz" % chanwidth)
ppgplot.pgmtxt("t", 1.5, 9.0/12.0, 0.5, "\gDDM = %g" % dmstep)
ppgplot.pgmtxt("t", 1.5, 11.0/12.0, 0.5, "\gDDM\dsub\u = %g" % subdmstep)
ppgplot.pgsch(1.0)
ppgplot.pgmtxt("b", -7.5, 0.95, 1.0, "Total")
Pgplot.plotxy(Num.log10(dts), ldms, color="green",
logx=1, logy=1)
ppgplot.pgmtxt("b", -6.0, 0.95, 1.0, "Sample Rate")
Pgplot.plotxy(Num.log10(chan_smear), ldms, color="purple",
logx=1, logy=1)
ppgplot.pgmtxt("b", -4.5, 0.95, 1.0, "Channel")
Pgplot.plotxy(Num.log10(BW_smear), ldms, color="red",
logx=1, logy=1)
ppgplot.pgmtxt("b", -3.0, 0.95, 1.0, "Full BW")
Pgplot.plotxy(Num.log10(subband_smear), ldms, color="blue",
logx=1, logy=1)
ppgplot.pgmtxt("b", -1.5, 0.95, 1.0, "Subband")
ppgplot.pgsci(1)
def search_sensitivity(Ttot, G, BW, chan, freq, T, dm, ddm, dt, Pmin=0.001,
Pmax=1.0, W=0.1, polar=2, factor=15.0, pts=1000):
"""
(periods, S_min) = search_sensitivity(Ttot, G, BW, chan, freq, T, dm,
ddm, dt, Pmin=0.001, Pmax=1.0, W=0.1, polar=2, factor=15.0, pts=1000):
Return the approximate limiting flux density for a pulsar
survey in mJy based of the following characteristics:
'Ttot' = sky + system temperature (K)
'G' = forward gain of the antenna (K/Jy)
'BW' = observing bandwidth (MHz)
'chan' = number of channels in the filterbank
'freq' = central observing frequency (MHz)
'T' = integration time (s)
'dm' = Dispersion Measure in pc cm^-3
'ddm' = Dispersion Measure stepsize in pc cm^-3
'dt' = Sample time for each data point in sec
'Pmin' = minimum pulsar period (s) (default = 0.001)
'Pmax' = maximum pulsar period (s) (default = 1.0)
'W' = duty cycle of pulsar (0-1) (default = 0.1)
'polar' = number of polarizations (default = 2)
'factor' = normalization factor that take into account
limiting SNR, hardware limitations etc. (default = 15.0)
'pts' = the number of points to calculate
Note: This is a _very_ approximate calculation. For a better
calculation, see Cordes and Chernoff, ApJ, 482, p971, App. A.
Observatories:
Parkes Multibeam: Tsys = 21 K, G = 0.735 K/Jy
"""
periods = span(Pmin, Pmax, pts)
widths = Num.sqrt((W * periods)**2.0 +
dm_smear(dm, BW/chan, freq)**2.0 + \
dm_smear(ddm/2.0, BW, freq)**2.0 + \
dt**2.0) / periods
return (periods, limiting_flux_dens(Ttot, G, BW, T, periods, widths,
polar=polar, factor=factor))
def smin_noise(Ttot, G, BW, dt):
"""
smin_noise(Ttot, G, BW, dt):
Return the 1 sigma Gaussian noise level (mJy) for each time
series bin in a pulsar data simulation. Default is for a
sinusoidal pulse (i.e. W = P / 2) with freq << Nyquist freq.
'Ttot' = sky + system temperature (K)
'G' = forward gain of the antenna (K/Jy)
'BW' = observing bandwidth (MHz)
'dt' = time per time series bin (s)
Observatories:
Parkes Multibeam: Tsys = 21 K, G = 0.735 K/Jy
"""
return Ttot / (G * Num.sqrt(2 * BW * dt))
def read_profile(filenm, normalize=0):
"""
read_profile(filenm, normalize=0):
Read a simple ASCII profile with one bin per line
from the file 'filenm'. Comments are allowed
if they begin with '#'. The profile is pseudo-
normalized if 'normalize' is true.
"""
prof = []
for line in file(filenm):
if line.startswith("#"): continue
else: prof.append(float(line.split()[-1]))
prof = Num.asarray(prof)
if normalize:
prof -= min(prof)
prof /= max(prof)
return prof
def calc_phs(MJD, refMJD, *args):
"""
calc_phs(MJD, refMJD, *args):
Return the rotational phase (0-1) at MJD (can be an array)
given a reference MJD and the rotational freq (f0) and
optional freq derivs (f1...) as ordered in the *args
list (e.g. [f0, f1, f2, ...]).
"""
t = (MJD-refMJD)*SECPERDAY
n = len(args) # polynomial order
nargs = Num.concatenate(([0.0], args))
taylor_coeffs = Num.concatenate(([0.0],
Num.cumprod(1.0/(Num.arange(float(n))+1.0))))
p = Num.poly1d((taylor_coeffs * nargs)[::-1])
return Num.fmod(p(t), 1.0)
def calc_freq(MJD, refMJD, *args):
"""
calc_freq(MJD, refMJD, *args):
Return the instantaneous frequency at an MJD (can be an array)
given a reference MJD and the rotational freq (f0) and
optional freq derivs (f1...) as ordered in the *args
list (e.g. [f0, f1, f2, ...]).
"""
t = (MJD-refMJD)*SECPERDAY
n = len(args) # polynomial order
taylor_coeffs = Num.concatenate(([1.0],
Num.cumprod(1.0/(Num.arange(float(n-1))+1.0))))
p = Num.poly1d((taylor_coeffs * args)[::-1])
return p(t)
def calc_t0(MJD, refMJD, *args):
"""
calc_t0(MJD, refMJD, *args):
Return the closest previous MJD corresponding to phase=0 of the pulse.
*args are the spin freq (f0) and optional freq derivs (f1...)
"""
phs = calc_phs(MJD, refMJD, *args)
p = 1.0 / calc_freq(MJD, refMJD, *args)
return MJD - phs*p/SECPERDAY
def write_princeton_toa(toa_MJDi, toa_MJDf, toaerr, freq, dm, obs='@', name=' '*13):
"""
Princeton Format
columns item
1-1 Observatory (one-character code) '@' is barycenter
2-2 must be blank
16-24 Observing frequency (MHz)
25-44 TOA (decimal point must be in column 30 or column 31)
45-53 TOA uncertainty (microseconds)
69-78 DM correction (pc cm^-3)
"""
# Splice together the fractional and integer MJDs
toa = "%5d"%int(toa_MJDi) + ("%.13f"%toa_MJDf)[1:]
if dm!=0.0:
print obs+" %13s %8.3f %s %8.2f %9.4f" % \
(name, freq, toa, toaerr, dm)
else:
print obs+" %13s %8.3f %s %8.2f" % \
(name, freq, toa, toaerr)
def write_tempo2_toa(toa_MJDi, toa_MJDf, toaerr, freq, dm, obs='@', name='unk', flags=""):
"""
Write Tempo2 format TOAs.
Note that first line of file should be "FORMAT 1"
TOA format is "file freq sat satErr siteID <flags>"
"""
toa = "%5d"%int(toa_MJDi) + ("%.13f"%toa_MJDf)[1:]
if dm != 0.0:
flags += "-dm %.4f" % (dm,)
print "%s %f %s %.2f %s %s" % (name,freq,toa,toaerr,obs,flags)
def rotate(arr, bins):
"""
rotate(arr, bins):
Return an array rotated by 'bins' places to the left
"""
bins = bins % len(arr)
if bins==0:
return arr
else:
return Num.concatenate((arr[bins:], arr[:bins]))
def interp_rotate(arr, bins, zoomfact=10):
"""
interp_rotate(arr, bins, zoomfact=10):
Return a sinc-interpolated array rotated by 'bins' places to the left.
'bins' can be fractional and will be rounded to the closest
whole-number of interpolated bins. The resulting vector will
have the same length as the oiginal.
"""
newlen = len(arr)*zoomfact
rotbins = int(Num.floor(bins*zoomfact+0.5)) % newlen
newarr = sinc_interp.periodic_interp(arr, zoomfact)
return rotate(newarr, rotbins)[::zoomfact]
def fft_rotate(arr, bins):
"""
fft_rotate(arr, bins):
Return array 'arr' rotated by 'bins' places to the left. The
rotation is done in the Fourier domain using the Shift Theorem.
'bins' can be fractional. The resulting vector will have
the same length as the original.
"""
arr = Num.asarray(arr)
freqs = Num.arange(arr.size/2+1, dtype=Num.float)
phasor = Num.exp(complex(0.0, TWOPI) * freqs * bins / float(arr.size))
return Num.fft.irfft(phasor * Num.fft.rfft(arr), arr.size)
def corr(profile, template):
"""
corr(profile, template):
Cross-correlate (using FFTs) a 'profile' and a 'template'.
"""
return FFT.irfft(FFT.rfft(template) * Num.conjugate(FFT.rfft(profile)),
profile.size)
def autocorr(x):
"""
autocorr(x):
Circular normalized auto-correlation of the (real) function x
using FFTs. Returns only N/2+1 points as the remaining N/2-1
points are symmetric (corresponding to negative lags).
"""
fftx = FFT.rfft(x)
acf = FFT.irfft(fftx * Num.conjugate(fftx), x.size)[:len(x)/2+1]
return acf / acf[0]
def maxphase(profile, template):
"""
maxphase(profile, template):
Return the phase offset required to get the 'profile' to best
match the 'template'.
"""
return float(Num.argmax(corr(profile, template))) / len(profile)
def linear_interpolate(vector, zoom=10):
"""
linear_interpolate(vector, zoom=10):
Linearly interpolate 'vector' by a factor of 'zoom'.
"""
n = len(vector)
ivect = Num.zeros(zoom*n, dtype='d')
nvect = Num.concatenate((vector, vector[:1]))
ivals = Num.arange(zoom, dtype='d')/zoom
loy = nvect[0]
for ii in range(n):
hiy = nvect[ii+1]
ivect[ii*zoom:(ii+1)*zoom] = ivals*(hiy-loy) + loy
loy = hiy
return ivect
def downsample(vector, factor):
"""
downsample(vector, factor):
Downsample (i.e. co-add consecutive numbers) a short section
of a vector by an integer factor.
"""
if (len(vector) % factor):
print "Lenght of 'vector' is not divisible by 'factor'=%d!" % factor
return 0
newvector = Num.reshape(vector, (len(vector)/factor, factor))
return Num.add.reduce(newvector, 1)
def measure_phase_corr(profile, template, zoom=10):
"""
measure_phase_corr(profile, template, zoom=10):
Return the phase offset required to get the 'profile' to best
match the 'template', each of which has been interpolated
by a factor of 'zoom'.
"""
zoomprof = zoomtemp = zoom
if (len(template) != len(profile)):
if (len(template)%len(profile) == 0):
zoomprof = zoom*len(template)/len(profile)
else:
print "Warning!: The lengths of the template (%d) and profile (%d)" % \
(len(template), len(profile))
print " are not the same!"
#itemp = linear_interpolate(rotate(template, Num.argmax(template)), zoomtemp)
itemp = linear_interpolate(template, zoomtemp)
iprof = linear_interpolate(profile, zoomprof)
return maxphase(iprof, itemp)
def spike_profile(N, phase, fwhm):
"""
spike_profile(N, phase, fwhm):
Return a triangular pulse profile with 'N' bins and
an integrated 'flux' of 1 unit.
'N' = the number of points in the profile
'phase' = the pulse phase (0-1)
'fwhm' = the triangular pulses full width at half-max
"""
phsval = Num.arange(N, dtype='d') / float(N)
peakst = 0.5 - fwhm
peakend = 0.5 + fwhm
normalize = 1.0 / fwhm
if (mean < 0.5):
phsval = Num.where(Num.greater(phsval, mean+0.5),
phsval-1.0, phsval)
else:
phsval = Num.where(Num.less(phsval, mean-0.5),
phsval+1.0, phsval)
return Num.where(Num.less_equal(phsval, 0.5),
Num.where(Num.less_equal(phsval, peakst),
0.0, (phsval - peakst) *
normalize * normalize),
Num.where(Num.greater(phsval, peakend),
0.0, (1.0 - (phsval - 0.5) *
normalize) * normalize))
def harm_to_sum(fwhm):
"""
harm_to_sum(fwhm):
For an MVMD profile returns the optimal number
of harmonics to sum incoherently
"""
fwhms = [0.0108, 0.0110, 0.0113, 0.0117, 0.0119, 0.0124, 0.0127, 0.0132,
0.0134, 0.0140, 0.0145, 0.0151, 0.0154, 0.0160, 0.0167, 0.0173,
0.0180, 0.0191, 0.0199, 0.0207, 0.0220, 0.0228, 0.0242, 0.0257,
0.0273, 0.0295, 0.0313, 0.0338, 0.0366, 0.0396, 0.0437, 0.0482,
0.0542, 0.0622, 0.0714, 0.0836, 0.1037, 0.1313, 0.1799, 0.2883]
return len(fwhms)-bisect.bisect(fwhms, fwhm)+1
def expcos_profile(N, phase, fwhm):
"""
expcos_profile(N, phase, fwhm):
Return a pulse profile with 'N' bins and an integrated 'flux'
of 1 unit based on the 'Exponentiated Sinusoid'.
'N' = the number of points in the profile
'phase' = the pulse phase (0-1)
'fwhm' = pulse full width at half-max (0.0 < fwhm <= 0.5)
"""
from simple_roots import secant
def fwhm_func(k, fwhm=fwhm):
if (fwhm < 0.02):
return Num.arccos(1.0-Num.log(2.0)/k)/PI-fwhm
else:
return Num.arccos(Num.log(0.5*(Num.exp(k)+
Num.exp(-k)))/k)/PI-fwhm
phsval = TWOPI * Num.arange(N, dtype='d') / float(N)
phi = -phase * TWOPI
if (fwhm >= 0.5):
return Num.cos(phsval + phi) + 1.0
elif (fwhm < 0.02):
# The following is from expanding of iO(x) as x->Infinity.
k = Num.log(2.0) / (1.0 - Num.cos(PI * fwhm))
# print "Expansion: k = %f FWHM = %f" % (k, fwhm_func(k, 0.0))
phsval = Num.fmod(phsval + phi, TWOPI)
phsval = Num.where(Num.greater(phsval, PI),
phsval - TWOPI, phsval)
denom = ((1 + 1/(8*k) + 9/(128*k*k) + 75/(1024*k**3) +
3675/(32768*k**4) + 59535/(262144*k**5)) / Num.sqrt(TWOPI*k))
return Num.where(Num.greater(Num.fabs(phsval/TWOPI), 3.0*fwhm), 0.0,
Num.exp(k*(Num.cos(phsval)-1.0))/denom)
else:
k = secant(fwhm_func, 1e-8, 0.5)
norm = 1.0 / (i0(k) - Num.exp(-k))
# print "Full Calc: k = %f FWHM = %f" % (k, fwhm_func(k, 0.0))
if (k < 0.05):
tmp = Num.cos(phsval + phi)
tmp2 = tmp * tmp
return norm * (k * (tmp + 1) +
k * k * (tmp2 - 1.0) / 2.0 +
k * k * k * (tmp2 * tmp + 1.0) / 6.0)
else:
return norm * (Num.exp(k * Num.cos(phsval + phi)) -
Num.exp(-k))
def read_gaussfitfile(gaussfitfile, proflen):
"""
read_gaussfitfile(gaussfitfile, proflen):
Read a Gaussian-fit file as created by the output of pygaussfit.py.
The input parameters are the name of the file and the number of
bins to include in the resulting template file. A numpy array
of that length is returned.
"""
phass = []
ampls = []
fwhms = []
for line in open(gaussfitfile):
if line.lstrip().startswith("phas"):
phass.append(float(line.split()[2]))
if line.lstrip().startswith("ampl"):
ampls.append(float(line.split()[2]))
if line.lstrip().startswith("fwhm"):
fwhms.append(float(line.split()[2]))
if not (len(phass) == len(ampls) == len(fwhms)):
print "Number of phases, amplitudes, and FWHMs are not the same in '%s'!"%gaussfitfile
return 0.0
phass = Num.asarray(phass)
ampls = Num.asarray(ampls)
fwhms = Num.asarray(fwhms)
# Now sort them all according to decreasing amplitude
new_order = Num.argsort(ampls)
new_order = new_order[::-1]
ampls = Num.take(ampls, new_order)
phass = Num.take(phass, new_order)
fwhms = Num.take(fwhms, new_order)
# Now put the biggest gaussian at phase = 0.0
phass = phass - phass[0]
phass = Num.where(phass<0.0, phass+1.0, phass)
template = Num.zeros(proflen, dtype='d')
for ii in range(len(ampls)):
template += ampls[ii]*gaussian_profile(proflen, phass[ii], fwhms[ii])
return template
def gaussian_profile(N, phase, fwhm):
"""
gaussian_profile(N, phase, fwhm):
Return a gaussian pulse profile with 'N' bins and
an integrated 'flux' of 1 unit.
'N' = the number of points in the profile
'phase' = the pulse phase (0-1)
'fwhm' = the gaussian pulses full width at half-max
Note: The FWHM of a gaussian is approx 2.35482 sigma
"""
sigma = fwhm / 2.35482
mean = phase % 1.0
phsval = Num.arange(N, dtype='d') / float(N)
if (mean < 0.5):
phsval = Num.where(Num.greater(phsval, mean+0.5),
phsval-1.0, phsval)
else:
phsval = Num.where(Num.less(phsval, mean-0.5),
phsval+1.0, phsval)
try:
zs = (phsval-mean)/sigma
okzinds = Num.compress(Num.fabs(zs)<20.0, Num.arange(N))
okzs = Num.take(zs, okzinds)
retval = Num.zeros(N, 'd')
Num.put(retval, okzinds, Num.exp(-0.5*(okzs)**2.0)/(sigma*Num.sqrt(2*PI)))
return retval
except OverflowError:
print "Problem in gaussian prof: mean = %f sigma = %f" % \
(mean, sigma)
return Num.zeros(N, 'd')
def gauss_profile_params(profile, output=0):
"""
gauss_profile_params(profile, output=0):
Return parameters of a best-fit gaussian to a profile.
The funtion returns a tuple containg the following values:
ret[0] = Best-fit gaussian integrated 'flux'.
ret[1] = Best-fit gaussian FWHM.
ret[2] = Best-fit gaussian phase (0.0-1.0).
ret[3] = Baseline (i.e. noise) average value.
ret[4] = Residuals average value.
ret[5] = Residuals standard deviation.
If 'output' is true, the fit will be plotted and
the return values will be printed.
"""
profile = Num.asarray(profile)
def funct(afpo, profile):
return afpo[0] * gaussian_profile(len(profile), afpo[2], afpo[1]) \
+ afpo[3] - profile
ret = leastsq(funct, [profile.max()-profile.min(),
0.25, profile.argmax()/float(len(profile)),
profile.min()], args=(profile))
if (output):
phases = Num.arange(0.0, 1.0,
1.0 / len(profile)) + 0.5 / len(profile)
Pgplot.plotxy(profile, phases, rangex=[0.0, 1.0],
labx='Pulse Phase', laby='Pulse Intensity')
bestfit = ret[0][0] * gaussian_profile(len(profile),
ret[0][2], ret[0][1]) \
+ ret[0][3]
if (output):
Pgplot.plotxy(bestfit, phases, color='red')
Pgplot.closeplot()
residuals = bestfit - profile
resid_avg = residuals.mean()
resid_std = residuals.std()
if (output):
Pgplot.plotxy(residuals, phases, rangex=[0.0, 1.0],
rangey=[min(residuals) - 2 * resid_std,
max(residuals) + 2 * resid_std],
labx='Pulse Phase', laby='Residuals',
line=None, symbol=3)
ppgplot.pgerrb(6, phases, residuals,
Num.zeros(len(residuals), 'd') + \
resid_std, 2)
Pgplot.plotxy([resid_avg, resid_avg], [0.0, 1.0], line=2)
Pgplot.closeplot()
print ""
print " Best-fit gaussian integrated 'flux' = ", ret[0][0]
print " Best-fit gaussian FWHM = ", ret[0][1]
print " Best-fit gaussian phase (0.0-1.0) = ", ret[0][2]
print " Baseline (i.e. noise) average = ", ret[0][3]
print " Residuals average = ", resid_avg
print " Residuals standard deviation = ", resid_std
print ""
return (ret[0][0], ret[0][1], ret[0][2], ret[0][3], resid_avg, resid_std)
def twogauss_profile_params(profile, output=0):
"""
twogauss_profile_params(profile, output=0):
Return parameters of a two best-fit gaussians to a profile.
The function returns a tuple containg the following values:
ret[0] = Best-fit gaussian integrated 'flux'.
ret[1] = Best-fit gaussian FWHM.
ret[2] = Best-fit gaussian phase (0.0-1.0).
ret[3] = Best-fit gaussian integrated 'flux'.
ret[4] = Best-fit gaussian FWHM.
ret[5] = Best-fit gaussian phase (0.0-1.0).
ret[6] = Baseline (i.e. noise) average value.
ret[7] = Residuals average value.
ret[8] = Residuals standard deviation.
If 'output' is true, the fit will be plotted and
the return values will be printed.
"""
def yfunct(afpo, n):
return afpo[0] * gaussian_profile(n, afpo[2], afpo[1]) + \
afpo[3] * gaussian_profile(n, afpo[5], afpo[4]) + afpo[6]
def min_funct(afpo, profile):
return yfunct(afpo, len(profile)) - profile
ret = leastsq(min_funct, [max(profile)-min(profile),
0.05,
Num.argmax(profile)/float(len(profile)),
0.2 * max(profile)-min(profile),
0.1,
Num.fmod(Num.argmax(profile)/float(len(profile))+0.5, 1.0),
min(profile)], args=(profile))
if (output):
phases = Num.arange(0.0, 1.0,
1.0 / len(profile)) + 0.5 / len(profile)
Pgplot.plotxy(profile, phases, rangex=[0.0, 1.0],
labx='Pulse Phase', laby='Pulse Intensity')
bestfit = yfunct(ret[0], len(profile))
if (output):
Pgplot.plotxy(bestfit, phases, color='red')
Pgplot.closeplot()
residuals = bestfit - profile
resid_avg = residuals.mean()
resid_std = residuals.std()
if (output):
Pgplot.plotxy(residuals, phases, rangex=[0.0, 1.0],
rangey=[min(residuals) - 2 * resid_std,
max(residuals) + 2 * resid_std],
labx='Pulse Phase', laby='Residuals',
line=None, symbol=3)
ppgplot.pgerrb(6, phases, residuals,
Num.zeros(len(residuals), 'd') + \
resid_std, 2)
Pgplot.plotxy([resid_avg, resid_avg], [0.0, 1.0], line=2)
Pgplot.closeplot()
print ""
print " Best-fit gaussian integrated 'flux' = ", ret[0][0]
print " Best-fit gaussian FWHM = ", ret[0][1]
print " Best-fit gaussian phase (0.0-1.0) = ", ret[0][2]
print " Best-fit gaussian integrated 'flux' = ", ret[0][3]
print " Best-fit gaussian FWHM = ", ret[0][4]
print " Best-fit gaussian phase (0.0-1.0) = ", ret[0][5]
print " Baseline (i.e. noise) average = ", ret[0][6]
print " Residuals average = ", resid_avg
print " Residuals standard deviation = ", resid_std
print ""
return (ret[0][0], ret[0][1], ret[0][2], ret[0][3], ret[0][4],
ret[0][5], ret[0][6], resid_avg, resid_std)
def estimate_flux_density(profile, N, dt, Ttot, G, BW, prof_stdev, display=0):
"""
estimate_flux_density(profile, N, dt, Ttot, G, BW, prof_stdev, display=0):
Return an estimate of the flux density (mJy) of a pulsar.
'profile' = the pulse profile you are using
'N' = number of time series bins folded
'dt' = time per time series bin (s)
'Ttot' = sky + system temperature (K)
'G' = forward gain of the antenna (K/Jy)
'BW' = observing bandwidth (MHz)
'prof_stdev' = profile standard deviation
'display' = if set, the gaussian fit plots are shown
Observatories:
Parkes Multibeam: Tsys = 21 K, G = 0.735 K/Jy
"""
(amp, fwhm, phase, offset, resid_avg, resid_std) = \
gauss_profile_params(profile, display)
T = N * dt
norm_fact = (prof_stdev * len(profile)) / \
smin_noise(Ttot, G, BW, T / len(profile))
return Num.add.reduce(profile - offset) / norm_fact
def max_spike_power(FWHM):
"""
max_spike_power(FWHM):
Return the (approx.) ratio of the highest power from a
triangular spike pulse profile to the power from a
perfect sinusoidal pulse profile. In other words, if a
sine gives you a power of 1, what power does a spike profile
give you? Both the spike and the sine are assumed to have
an area under one full pulse of 1 unit. Note: A gaussian
profile gives almost identical powers as a spike profile
of the same width. This expression was determined using
a least-squares fit (Max abs error ~ 0.016).
'FWHM' is the full width at half-max of the spike.
(0.0 < FWHM <= 0.5)
"""
return ((36.4165309504 * FWHM - 32.0107844537) * FWHM \
+ 0.239948319674) * FWHM + 4.00277916584
def num_spike_powers(FWHM):
"""
num_spike_powers(FWHM):
Return the (approx.) number of powers from a triangular spike
pulse profile which are greater than one half the power
perfect sinusoidal pulse profile. Both the spike and the
sine are assumed to have an area under one full pulse of 1 unit.
Note: A gaussian profile gives almost identical numbers of
high powers as a spike profile of the same width. This
expression was determined using a least-squares fit.
(Errors get large as FWHM -> 0).
'FWHM' is the full width at half-max of the spike.
(0.0 < FWHM <= 0.5)
"""
return -3.95499721563e-05 / FWHM**2 + 0.562069634689 / FWHM - \
0.683604041138
def incoherent_sum(amps):
"""
incoherent_sum(amps):
Given a series of complex Fourier amplitudes, return a vector
showing the accumulated incoherently-summed powers.
"""
return Num.add.accumulate(Num.absolute(amps)**2.0)
def coherent_sum(amps):
"""
coherent_sum(amps):
Given a series of complex Fourier amplitudes, return a vector
showing the accumulated coherently-summed powers.
"""
phss = Num.arctan2(amps.imag, amps.real)
phs0 = phss[0]
phscorr = phs0 - Num.fmod((Num.arange(len(amps), dtype='d')+1.0)*phs0, TWOPI)
sumamps = Num.add.accumulate(amps*Num.exp(complex(0.0, 1.0)*phscorr))
return Num.absolute(sumamps)**2.0
def dft_vector_response(roff, z=0.0, w=0.0, phs=0.0, N=1000):
"""
dft_vector_response(roff, z=0.0, w=0.0, phs=0.0, N=1000):
Return a complex vector addition of N vectors showing the DFT
response for a noise-less signal with Fourier frequency
offset roff, (roff=0 would mean that we are exactly at the
signal freq), average Fourier f-dot, z, and Fourier 2nd
deriv, w. An optional phase in radians can be added.
"""
r0 = roff - 0.5 * z + w / 12.0 # Make symmetric for all z and w
z0 = z - 0.5 * w
us = Num.linspace(0.0, 1.0, N)
phss = 2.0 * Num.pi * (us * (us * (us * w/6.0 + z0/2.0) + r0) + phs)
return Num.cumsum(Num.exp(Num.complex(0.0, 1.0) * phss)) / N
def prob_power(power):
"""
prob_power(power):
Return the probability for noise to exceed a normalized power
level of 'power' in a power spectrum.
"""
return Num.exp(-power)
def Ftest(chi2_1, dof_1, chi2_2, dof_2):
"""
Ftest(chi2_1, dof_1, chi2_2, dof_2):
Compute an F-test to see if a model with extra parameters is
significant compared to a simpler model. The input values are the
(non-reduced) chi^2 values and the numbers of DOF for '1' the
original model and '2' for the new model (with more fit params).
The probability is computed exactly like Sherpa's F-test routine
(in Ciao) and is also described in the Wikipedia article on the
F-test: http://en.wikipedia.org/wiki/F-test
The returned value is the probability that the improvement in
chi2 is due to chance (i.e. a low probability means that the
new fit is quantitatively better, while a value near 1 means
that the new model should likely be rejected).
"""
delta_chi2 = chi2_1 - chi2_2
delta_dof = dof_1 - dof_2
new_redchi2 = chi2_2 / dof_2
F = (delta_chi2 / delta_dof) / new_redchi2
return 1.0 - fdtr(delta_dof, dof_2, F)
def equivalent_gaussian_sigma(p):
"""
equivalent_gaussian_sigma(p):
Return the equivalent gaussian sigma corresponding
to the cumulative gaussian probability p. In other
words, return x, such that Q(x) = p, where Q(x) is the
cumulative normal distribution. For very small
"""
logp = Num.log(p)
if type(1.0) == type(logp):
if logp > -30.0:
return ndtri(1.0 - p)
else:
return extended_equiv_gaussian_sigma(logp)
else: # Array input
return Num.where(logp>-30.0,
ndtri(1.0-p),
extended_equiv_gaussian_sigma(logp))
def extended_equiv_gaussian_sigma(logp):
"""
extended_equiv_gaussian_sigma(logp):
Return the equivalent gaussian sigma corresponding
to the log of the cumulative gaussian probability logp.
In other words, return x, such that Q(x) = p, where Q(x)
is the cumulative normal distribution. This version uses
the rational approximation from Abramowitz and Stegun,
eqn 26.2.23. Using the log(P) as input gives a much
extended range.
"""
t = Num.sqrt(-2.0 * logp)
num = 2.515517 + t * (0.802853 + t * 0.010328)
denom = 1.0 + t * (1.432788 + t * (0.189269 + t * 0.001308))
return t - num / denom
def log_asymtotic_incomplete_gamma(a, z):
"""
log_asymtotic_incomplete_gamma(a, z):
Return the log of the incomplete gamma function in its
asymtotic limit as z->infty. This is from Abramowitz
and Stegun eqn 6.5.32.
"""
x = 1.0
newxpart = 1.0
term = 1.0
ii = 1
while (Num.fabs(newxpart) > 1e-15):
term *= (a - ii)
newxpart = term / z**ii
x += newxpart
ii += 1
return (a-1.0)*Num.log(z) - z + Num.log(x)
def log_asymtotic_gamma(z):
"""
log_asymtotic_gamma(z):
Return the log of the gamma function in its asymtotic limit
as z->infty. This is from Abramowitz and Stegun eqn 6.1.41.
"""
x = (z-0.5) * Num.log(z) - z + 0.91893853320467267
y = 1.0/(z*z)
x += (((- 5.9523809523809529e-4 * y
+ 7.9365079365079365079365e-4) * y
- 2.7777777777777777777778e-3) * y
+ 8.3333333333333333333333e-2) / z;
return x
def prob_sum_powers(power, nsum):
"""
prob_sum_powers(power, nsum):
Return the probability for noise to exceed 'power' in
the sum of 'nsum' normalized powers from a power spectrum.
"""
# Notes:
# prob_sum_powers(power, nsum)
# = scipy.special.gammaincc(nsum, power)
# = statdists.chi_prob(power*2, nsum*2)
# = scipy.special.chdtrc(nsum*2, power*2)
# = Q(power*2|nsum*2) (from A&S 26.4.19)
# = Gamma(nsum,power)/Gamma(nsum)
# = [Gamma(nsum) - gamma(nsum,power)]/Gamma(nsum)
return chdtrc(2*nsum, 2.0*power)
def log_prob_sum_powers(power, nsum):
"""
log_prob_sum_powers(power, nsum):
Return the log of the probability for noise to exceed
'power' in the sum of 'nsum' normalized powers from a
power spectrum. This version uses allows the use of
very large powers by using asymtotic expansions from
Abramowitz and Stegun Chap 6.
"""
# Notes:
# prob_sum_powers(power, nsum)
# = scipy.special.gammaincc(nsum, power)
# = statdists.chi_prob(power*2, nsum*2)
# = scipy.special.chdtrc(nsum*2, power*2)
# = Q(power*2|nsum*2) (from A&S 26.4.19)
# = Gamma(nsum,power)/Gamma(nsum)
# = [Gamma(nsum) - gamma(nsum,power)]/Gamma(nsum)
if type(1.0) == type(power):
if power < 100.0:
return Num.log(prob_sum_powers(power, nsum))
else:
return log_asymtotic_incomplete_gamma(nsum, power) - \
log_asymtotic_gamma(nsum)
else:
return Num.where(power < 100.0,
Num.log(prob_sum_powers(power, nsum)),
log_asymtotic_incomplete_gamma(nsum, power) - \
log_asymtotic_gamma(nsum))
def sigma_power(power):
"""
sigma_power(power):
Return the approximate equivalent Gaussian sigma for noise
to exceed a normalized power level given as 'power'
in a power spectrum.
"""
if type(1.0) == type(power):
if power > 36.0:
return Num.sqrt(2.0 * power - Num.log(PI * power))
else:
return equivalent_gaussian_sigma(prob_power(power))
else:
return Num.where(power > 36.0,
Num.sqrt(2.0 * power - Num.log(PI * power)),
extended_equiv_gaussian_sigma(log_prob_sum_powers(power, 1)))
def sigma_sum_powers(power, nsum):
"""
sigma_sum_powers(power, nsum):
Return the approximate equivalent Gaussian sigma for noise
to exceed a sum of 'nsum' normalized powers given by 'power'
in a power spectrum.
"""
if type(1.0) == type(power):
if power < 100.0:
return equivalent_gaussian_sigma(prob_sum_powers(power, nsum))
else:
return extended_equiv_gaussian_sigma(log_prob_sum_powers(power, nsum))
else: # Array input
return Num.where(power < 100.0,
equivalent_gaussian_sigma(prob_sum_powers(power, nsum)),
extended_equiv_gaussian_sigma(log_prob_sum_powers(power, nsum)))
def power_at_sigma(sigma):
"""
power_at_sigma(sigma):
Return the approximate normalized power level that is
equivalent to a detection of significance 'sigma'.
"""
return sigma**2 / 2.0 + Num.log(Num.sqrt(PIBYTWO)
* sigma)
def powersum_at_sigma(sigma, nsum):
"""
powersum_at_sigma(sigma, nsum):
Return the approximate sum of 'nsum' normalized powers that is
equivalent to a detection of significance 'sigma'.
"""
return 0.5 * chdtri(2.0 * nsum, 1.0 - ndtr(sigma))
def cand_sigma(N, power):
"""
cand_sigma(N, power):
Return the sigma of a candidate found in a power
spectrum of 'N' bins after taking into account the
number of bins searched.
"""
return ndtri(1.0 - N * prob_power(power))
def fft_max_pulsed_frac(N, numphot, sigma=3.0):
"""
fft_max_pulsed_frac(N, numphot, sigma=3.0):
Return the approximate maximum pulsed fraction for a
sinusoidal signal that _wasn't_ found in a FFT-based
search. 'N' is the number of bins searched in the FFT.
'numphot' is the number of photons present in the data.
And 'sigma' is your confidence (in sigma) that you
have in expressing this limit.
"""
# The following is the power level required to get a
# noise spike that would appear somewhere in N bins
# at the 'sigma' level
power_required = -Num.log((1.0-ndtr(sigma))/N)
return Num.sqrt(4.0 * numphot * power_required)/N
def p_to_f(p, pd, pdd=None):
"""
p_to_f(p, pd, pdd=None):
Convert period, period derivative and period second
derivative to the equivalent frequency counterparts.
Will also convert from f to p.
"""
f = 1.0 / p
fd = -pd / (p * p)
if (pdd==None):
return [f, fd]
else:
if (pdd==0.0):
fdd = 0.0
else:
fdd = 2.0 * pd * pd / (p**3.0) - pdd / (p * p)
return [f, fd, fdd]
def pferrs(porf, porferr, pdorfd=None, pdorfderr=None):
"""
pferrs(porf, porferr, pdorfd=None, pdorfderr=None):
Calculate the period or frequency errors and
the pdot or fdot errors from the opposite one.
"""
if (pdorfd==None):
return [1.0 / porf, porferr / porf**2.0]
else:
forperr = porferr / porf**2.0
fdorpderr = Num.sqrt((4.0 * pdorfd**2.0 * porferr**2.0) / porf**6.0 +
pdorfderr**2.0 / porf**4.0)
[forp, fdorpd] = p_to_f(porf, pdorfd)
return [forp, forperr, fdorpd, fdorpderr]
def pdot_from_B(p, B):
"""
pdot_from_B(p, B):
Return a pdot (or p, actually) that a pulsar with spin
period (or pdot) 'p' (in sec) would experience given a
magnetic field strength 'B' in gauss.
"""
return (B / 3.2e19)**2.0 / p
def pdot_from_age(p, age):
"""
pdot_from_age(p, age):
Return the pdot that a pulsar with spin period 'p' (in sec)
would experience given a characteristic age 'age' (in yrs).
"""
return p / (2.0 * age * SECPERJULYR)
def pdot_from_edot(p, edot, I=1.0e45):
"""
pdot_from_edot(p, edot, I=1.0e45):
Return the pdot that a pulsar with spin period 'p (in sec)
would experience given an Edot 'edot' (in ergs/s) and a
moment of inertia I.
"""
return (p**3.0 * edot) / (4.0 * PI * PI * I)
def pulsar_age(f, fdot, n=3, fo=1e99):
"""
pulsar_age(f, fdot, n=3, fo=1e99):
Return the age of a pulsar (in years) given the spin frequency
and frequency derivative. By default, the characteristic age
is returned (assuming a braking index 'n'=3 and an initial
spin freqquency fo >> f). But 'n' and 'fo' can be set.
"""
return -f / ((n-1.0) * fdot) * (1.0 - (f / fo)**(n-1.0)) / SECPERJULYR
def pulsar_edot(f, fdot, I=1.0e45):
"""
pulsar_edot(f, fdot, I=1.0e45):
Return the pulsar Edot (in erg/s) given the spin frequency and
frequency derivative. The NS moment of inertia is assumed to be
I = 1.0e45 g cm^2
"""
return -4.0 * PI * PI * I * f * fdot
def pulsar_B(f, fdot):
"""
pulsar_B(f, fdot):
Return the estimated pulsar surface magnetic field strength
(in Gauss) given the spin frequency and frequency derivative.
"""
return 3.2e19 * Num.sqrt(-fdot/f**3.0)
def pulsar_B_lightcyl(f, fdot):
"""
pulsar_B_lightcyl(f, fdot):
Return the estimated pulsar magnetic field strength at the
light cylinder (in Gauss) given the spin frequency and
frequency derivative.
"""
p, pd = p_to_f(f, fdot)
return 2.9e8 * p**(-5.0/2.0) * Num.sqrt(pd)
def psr_info(porf, pdorfd, time=None, input=None, I=1e45):
"""
psr_info(porf, pdorfd, time=None, input=None, I=1e45):
Print a list of standard derived pulsar parameters based
on the period (or frequency) and its first derivative. The
routine will automatically assume you are using periods if
'porf' <= 1.0 and frequencies otherwise. You can override this
by setting input='p' or 'f' appropriately. If time is specified
(duration of an observation) it will also return the Fourier
frequency 'r' and Fourier fdot 'z'. I is the NS moment of inertia.
"""
if ((input==None and porf > 1.0) or
(input=='f' or input=='F')):
pdorfd = - pdorfd / (porf * porf)
porf = 1.0 / porf
[f, fd] = p_to_f(porf, pdorfd)
print ""
print " Period = %f s" % porf
print " P-dot = %g s/s" % pdorfd
print " Frequency = %f Hz" % f
print " F-dot = %g Hz/s" % fd
if (time):
print " Fourier Freq = %g bins" % (f * time)
print " Fourier F-dot = %g bins" % (fd * time * time)
print " E-dot = %g ergs/s" % pulsar_edot(f, fd, I)
print " Surface B Field = %g gauss" % pulsar_B(f, fd)
print " Characteristic Age = %g years" % pulsar_age(f, fd)
print " Assumed I = %g g cm^2" % I
print ""
def doppler(freq_observed, voverc):
"""doppler(freq_observed, voverc):
This routine returns the frequency emitted by a pulsar
(in MHz) given that we observe the pulsar at frequency
freq_observed (MHz) while moving with radial velocity
(in units of v/c) of voverc wrt the pulsar.
"""
return freq_observed * (1.0 + voverc)
|
You know, this is a chronic problem. I stopped calling these people Muslim terrorists. They're about as Muslim as I am. I mean, they have no respect for anybody else's life, that's not what the Koran says. Europe has an enormous radical problem. I think ISIS is a cult. Not an Islamic cult. I think it's a cult.
What happened today, according to current reports, is that two men went on a killing spree. Their killing spree, like most killing sprees, will have some thin rationale. Even the worst villains believe themselves to be heroes. But in truth, it was unprovoked slaughter. The fault lies with no one but them and their accomplices. Their crime isn't explained by cartoons or religion. Plenty of people read Charlie Hebdo's cartoons and managed to avoid responding with mass murder. Plenty of people follow all sorts of religions and somehow get through the day without racking up a body count. The answers to what happened today won't be found in Charlie Hebdo's pages. They can only be found in the murderers' sick minds.
. @peterbergencnn's argument: If you ignore 9/11, attacks on Americans abroad and honor killings--Muslim extremism not so bad.
While it's no doubt true that egregious violations of gay people's human rights have been -- and in some cases, continue to be -- carried out in the name of Christianity, there is no comparison here. Check out this chart, and see if you can spot the pattern. And then try to extract yourself from the rabbit hole I've led you down. We should all be able to agree that holding the horrendous actions of killers against larger groups with whom they're associated is almost always unjustifiable and wrong. We can lock arms and agree that a majority of Muslims across the world do not condone what happened in Paris yesterday, or in Iraq and Syria over the last year, or in London in 2005, or in Madrid in 2004, or in Bali in 2002, or in New York City and Washington in 2001, etc. But those important caveats should not give way to the mindless repetition of a flawed mantra in order to shield a politically correct narrative against unpleasant realities. This form of denial is frightening and self-defeating. Left unchecked, it's societal suicide.
|
################ Copyright 2005-2016 Team GoldenEye: Source #################
#
# This file is part of GoldenEye: Source's Python Library.
#
# GoldenEye: Source's Python Library is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or(at your option) any later version.
#
# GoldenEye: Source's Python Library is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GoldenEye: Source's Python Library.
# If not, see <http://www.gnu.org/licenses/>.
#############################################################################
import math
import random
from GamePlay import GEScenario
from .Utils import GetPlayers, _
from .Utils.GEPlayerTracker import GEPlayerTracker
from random import shuffle
import GEPlayer, GEUtil, GEMPGameRules as GERules, GEGlobal as Glb, GEWeapon
USING_API = Glb.API_VERSION_1_2_0
# Organized by strength, in groups of 4. Stronger weapons are higher.
weaponList = [
"weapon_golden_pp7", "weapon_golden_gun", "weapon_rocket_launcher", "weapon_grenade_launcher",
"weapon_moonraker", "weapon_silver_pp7", "weapon_rcp90", "weapon_auto_shotgun",
"weapon_cmag", "weapon_ar33", "weapon_phantom", "weapon_shotgun",
"weapon_kf7", "weapon_knife_throwing", "weapon_sniper_rifle", "weapon_zmg",
"weapon_d5k_silenced", "weapon_d5k", "weapon_pp7", "weapon_pp7_silenced",
"weapon_klobb", "weapon_knife", "weapon_dd44", "weapon_grenade" ]
TR_WEPINDEX = "wepindex" # Index of the weapon the player has on the list above.
class GunTrade( GEScenario ):
def __init__( self ):
GEScenario.__init__( self )
self.indexQueue = [0] * 24
self.pltracker = GEPlayerTracker( self )
def GetPrintName( self ):
return "#GES_GP_GT_NAME"
def GetScenarioHelp( self, help_obj ):
help_obj.SetDescription( "#GES_GP_GT_HELP" )
def GetGameDescription( self ):
if GERules.IsTeamplay():
return "Team Gun Trade"
else:
return "Gun Trade"
def GetTeamPlay( self ):
return Glb.TEAMPLAY_TOGGLE
def OnLoadGamePlay( self ):
GEUtil.PrecacheSound( "GEGamePlay.Woosh" ) # Plays when level is lost
GERules.EnableInfiniteAmmo()
def OnUnloadGamePlay(self):
super( GunTrade, self ).OnUnloadGamePlay()
self.pltracker = None
def OnRoundBegin( self ):
GEScenario.OnRoundBegin( self )
GERules.DisableWeaponSpawns()
GERules.DisableAmmoSpawns()
#Reorder the weapon index queue and then issue a unique weapon to each player.
self.indexQueue = [0] * 24
#Take all the player's weapons away so we don't get duplicates
for player in GetPlayers():
self.pltracker[player][TR_WEPINDEX] = -1
for i in range(24):
self.indexQueue[i] = i
self.gt_QueueShuffle()
def OnPlayerConnect( self, player ):
self.pltracker[player][TR_WEPINDEX] = -1 # Give them an index of -1 so we know to give them a new weapons when they spawn.
def OnPlayerDisconnect( self, player ):
self.indexQueue.append( self.pltracker[player][TR_WEPINDEX] ) # Put their weapon back in the queue so we don't lose it.
def OnPlayerSpawn( self, player ):
if (self.pltracker[player][TR_WEPINDEX] == -1): # If we haven't been issued a weapon, pull one from the stack.
self.gt_IssueWeapon( player )
self.gt_SpawnWeapon( player )
if player.IsInitialSpawn():
GEUtil.PopupMessage( player, "#GES_GP_GT_NAME", "#GES_GPH_GT_GOAL" )
def OnPlayerKilled( self, victim, killer, weapon ):
# Let the base scenario behavior handle scoring so we can just worry about the gun swap mechanics.
GEScenario.OnPlayerKilled( self, victim, killer, weapon )
if not victim:
return
if killer and victim != killer:
# Normal kill
# Equip new weapons
wepname = weapon.GetClassname().lower()
if (wepname == "weapon_slappers" or wepname == "trigger_trap"): #Slapper kills replace the victim's weapon with a random new one.
self.gt_SubSwapWeapon( killer, victim )
elif wepname == "player" and self.gt_GetWeaponTierOfPlayer(victim) >= self.gt_GetWeaponTierOfPlayer(killer): #Killbind greifing protection, lower tiers are better.
self.gt_SubSwapWeapon( killer, victim )
else:
self.gt_SwapWeapons( killer, victim ) #Normal swap.
#Killer ID, Victim ID, Weapon Killer Traded Away, Weapon Victim Traded Away
GEUtil.EmitGameplayEvent( "gt_weaponswap" , str( killer.GetUserID()), str( victim.GetUserID() ), weaponList[ self.pltracker[victim][TR_WEPINDEX] ], weaponList[ self.pltracker[killer][TR_WEPINDEX] ], True )
self.gt_SpawnWeapon( killer ) # Only killer gets their weapon right now.
GEUtil.PlaySoundTo( victim, "GEGamePlay.Woosh" )
GEUtil.PlaySoundTo( killer, "GEGamePlay.Woosh" )
victim.StripAllWeapons() # Victim always loses their weapons so they never drop anything, as there are no weapon pickups in this mode.
# This is used to make sure we can't pick up any weapons we're not supposed to. Players shouldn't drop weapons in this
# mode but it doesn't hurt to cut out other ways of getting weapons too.
def CanPlayerHaveItem( self, player, item ):
weapon = GEWeapon.ToGEWeapon( item )
if weapon:
name = weapon.GetClassname().lower()
wI = self.pltracker[player][TR_WEPINDEX]
if name == weaponList[wI] or name == "weapon_slappers":
return True
return False
return True
# ---------------------------
# GAMEPLAY SPECIFIC FUNCTIONS
# ---------------------------
# We shuffle the weapon indexes this way to make sure that there's roughly an even destribution of the different
# weapon strengths in play at any given time. Since this que controls the weapons coming into play, having it be a controlled
# mix means there will typically be a nice destribution of weapon strengths getting substituted in.
# Won't be perfect with higher playercounts if a bunch of a given strength of weapon gets knocked out, but that's the name of the game.
# If someone decides to get rid of all the weak/strong weapons then they'll have to deal with an oversaturated queue.
# Shuffle kind of sucks since stuff like 123443211234 can happen, but it should do the job well enough.
def gt_QueueShuffle( self ):
holdingList = [ [],[],[],[],[],[] ]
entries = len(self.indexQueue)
# Sort the indexes into seperate lists based on their strength
for i in range(entries):
holdingList[math.floor(self.indexQueue[i] / 4)].append(self.indexQueue[i])
self.indexQueue = [] # Wipe the index queue now that all of our data is in the holding list
viablelists = [] # Lists ordered by weapon strength that still have one weapon in them
unchosenlists = [] # Lists that haven't been chosen this shuffle
# Get the lists that actually have anything in them.
for i in range(6):
if holdingList[i]:
viablelists.append(i)
for i in range(24):
if not unchosenlists: # If unchosenlists is empty, refill it with all the lists that still have entries
unchosenlists = list(viablelists)
pickedlist = random.choice(unchosenlists) # Pick a random list we haven't used yet
unchosenlists.remove(pickedlist) # This is just to make sure we get a decent mix of catagories
pickedindex = random.choice(holdingList[pickedlist]) # Pick a random weapon from that list
holdingList[pickedlist].remove(pickedindex) # Then remove that weapon from the list so we don't pick it again
if not holdingList[pickedlist]: # If this list no longer has anything in it, it's not viable anymore
viablelists.remove(pickedlist)
self.indexQueue.append(pickedindex) # Finally add it back to our index que
# Get the strength rating of the player's weapon.
def gt_GetWeaponTierOfPlayer( self, player ):
if not player:
return -1
return math.floor(self.pltracker[player][TR_WEPINDEX] / 4)
# Give the player a weapon from the queue and add their existing one to the queue if they have one, then return it.
def gt_IssueWeapon( self, player ):
if not player:
return
if (self.pltracker[player][TR_WEPINDEX] != -1):
self.indexQueue.append( self.pltracker[player][TR_WEPINDEX] )
self.pltracker[player][TR_WEPINDEX] = self.indexQueue.pop(0) #Pull the index at the bottom of the queue and give it to the player.
return self.pltracker[player][TR_WEPINDEX]
# Actually give the player their weapon.
def gt_SpawnWeapon( self, player ):
if not player:
return
player.StripAllWeapons()
player.GiveNamedWeapon( "weapon_slappers", 0 )
player.GiveNamedWeapon( weaponList[ self.pltracker[player][TR_WEPINDEX] ], 800 ) # We don't care about ammo because it is infinite.
player.WeaponSwitch( weaponList[ self.pltracker[player][TR_WEPINDEX] ] )
# Swap weapons
def gt_SwapWeapons( self, player1, player2 ):
if not player1 or not player2:
return
index1 = self.pltracker[player1][TR_WEPINDEX]
self.pltracker[player1][TR_WEPINDEX] = self.pltracker[player2][TR_WEPINDEX]
self.pltracker[player2][TR_WEPINDEX] = index1
# Swap weapons and substitute in a new one for player1, telling the players what got swapped.
def gt_SubSwapWeapon( self, player1, player2 ):
if not player1 or not player2:
return
self.gt_SwapWeapons( player1, player2 )
oldwep = self.pltracker[player1][TR_WEPINDEX]
newwep = self.gt_IssueWeapon( player1 )
msg = _( "#GES_GP_GT_SWAP", GEWeapon.WeaponPrintName(weaponList[oldwep]), GEWeapon.WeaponPrintName(weaponList[newwep]) )
GEUtil.PostDeathMessage( msg )
|
I agree about the little pieces of you that go on ahead. And remember, this works in reverse as well. You get home again and those little pieces can take anything up to a fortnight to straggle home behind you.
|
# This file is part of Pimlico
# Copyright (C) 2020 Mark Granroth-Wilding
# Licensed under the GNU LGPL v3.0 - https://www.gnu.org/licenses/lgpl-3.0.en.html
"""
Software licenses, for referring to in software dependency documentation.
Literals here are used to refer to the licenses that software uses.
See https://choosealicense.com/licenses/ for more details and comparison.
"""
class SoftwareLicense(object):
def __init__(self, name, description=None, url=None):
self.url = url
self.description = description
self.name = name
GNU_AGPL_V3 = SoftwareLicense(
"GNU AGPLv3",
description="""\
Permissions of this strongest copyleft license are conditioned on making available complete source code of
licensed works and modifications, which include larger works using a licensed work, under the same license. Copyright
and license notices must be preserved. Contributors provide an express grant of patent rights. When a modified
version is used to provide a service over a network, the complete source code of the modified version must be made
available.
""",
url="https://www.gnu.org/licenses/agpl-3.0.en.html"
)
GNU_GPL_V3 = SoftwareLicense(
"GNU GPLv3",
description="""\
Permissions of this strong copyleft license are conditioned on making available complete source code of licensed
works and modifications, which include larger works using a licensed work, under the same license. Copyright and
license notices must be preserved. Contributors provide an express grant of patent rights.
""",
url="https://www.gnu.org/licenses/gpl-3.0.html"
)
GNU_LGPL_V3 = SoftwareLicense(
"GNU LGPLv3",
description="""\
Permissions of this copyleft license are conditioned on making available complete source code of licensed works
and modifications under the same license or the GNU GPLv3. Copyright and license notices must be preserved.
Contributors provide an express grant of patent rights. However, a larger work using the licensed work through
interfaces provided by the licensed work may be distributed under different terms and without source code for the
larger work.
""",
url="https://www.gnu.org/licenses/lgpl-3.0.html"
)
GNU_LGPL_V2 = SoftwareLicense(
"GNU LGPLv2",
description="""\
Permissions of this copyleft license are conditioned on making available complete source code of licensed works
and modifications under the same license or the GNU GPLv2. Copyright and license notices must be preserved.
Contributors provide an express grant of patent rights. However, a larger work using the licensed work through
interfaces provided by the licensed work may be distributed under different terms and without source code for the
larger work.
""",
url="https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html"
)
MOZILLA_V2 = SoftwareLicense(
"Mozilla Public License 2.0",
description="""\
Permissions of this weak copyleft license are conditioned on making available source code of licensed files and
modifications of those files under the same license (or in certain cases, one of the GNU licenses). Copyright and
license notices must be preserved. Contributors provide an express grant of patent rights. However, a larger work
using the licensed work may be distributed under different terms and without source code for files added in the
larger work.
""",
url="https://www.mozilla.org/en-US/MPL/"
)
APACHE_V2 = SoftwareLicense(
"Apache License 2.0",
description="""\
A permissive license whose main conditions require preservation of copyright and license notices. Contributors
provide an express grant of patent rights. Licensed works, modifications, and larger works may be distributed under
different terms and without source code.
""",
url="https://www.apache.org/licenses/LICENSE-2.0"
)
MIT = SoftwareLicense(
"MIT License",
description="""\
A short and simple permissive license with conditions only requiring preservation of copyright and license notices.
Licensed works, modifications, and larger works may be distributed under different terms and without source code.
""",
url="https://opensource.org/licenses/MIT"
)
BOOST = SoftwareLicense(
"Boost Software License 1.0",
description="""\
A simple permissive license only requiring preservation of copyright and license notices for source (and not binary)
distribution. Licensed works, modifications, and larger works may be distributed under different terms and without
source code.
""",
url="https://www.boost.org/users/license.html"
)
UNLICENSE = SoftwareLicense(
"The Unlicense",
description="""\
A license with no conditions whatsoever which dedicates works to the public domain. Unlicensed works, modifications,
and larger works may be distributed under different terms and without source code.
""",
url="https://unlicense.org/"
)
BSD = SoftwareLicense(
"BSD License, 3-clause",
url="https://opensource.org/licenses/BSD-3-Clause",
)
BSD_2CLAUSE = SoftwareLicense(
"BSD License, 2-clause",
url="https://opensource.org/licenses/BSD-2-Clause"
)
PSF = SoftwareLicense(
"Python Software Foundation License",
url="https://docs.python.org/3/license.html"
)
NOT_RELEVANT = SoftwareLicense(
"Not relevant for licensing",
description="This is simply a placeholder to denote that it is not relevant to say what the license of "
"the software in question is. For example, it might be part of some other licensed software, "
"whose license is already covered elsewhere."
)
# Mark this license as the one we use for Pimlico itself, for easy reference
pimlico_license = GNU_LGPL_V2
|
Awesome service and a great customer experience. Highly recommended.
User Notice: This is a free business listing provided for Sign Guy Arthur Blake. We do not guarantee the accuracy of this listing and recommend that you verify the information by calling the business or by visiting their website link, if available. User-generated content like reviews or are opinions expressed solely by our users. Any article submission for Sign Guy Arthur Blake should only be made by or with permission from the author. Media content such as photos and videos for Sign Guy Arthur Blake are sourced from the public domain and social media websites submitted and tagged by users who have access or owned by them.
|
#******************
# MODULE DOCSTRING
#******************
"""
LOMAP: Graph generation
=====
Alchemical free energy calculations hold increasing promise as an aid to drug
discovery efforts. However, applications of these techniques in discovery
projects have been relatively few, partly because of the difficulty of planning
and setting up calculations. The Lead Optimization Mapper (LOMAP) is an
automated algorithm to plan efficient relative free energy calculations between
potential ligands within a substantial of compounds.
"""
#*****************************************************************************
# Lomap2: A toolkit to plan alchemical relative binding affinity calculations
# Copyright 2015 - 2016 UC Irvine and the Authors
#
# Authors: Dr Gaetano Calabro' and Dr David Mobley
#
# This part of the code has been originally made by Jonathan Redmann,
# and Christopher Summa at Summa Lab, Dept. of Computer Science,
# University of New Orleans and it has just been adapded to the new Lomap code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see http://www.gnu.org/licenses/
#*****************************************************************************
#****************
# MODULE IMPORTS
#****************
import networkx as nx
import numpy as np
import sys
import matplotlib.pyplot as plt
import copy
from operator import itemgetter
from rdkit.Chem import Draw
from rdkit.Chem import AllChem
import os.path
import logging
from PyQt4 import QtGui
import tempfile
import shutil
__all__ = ['GraphGen']
#*************************
# Graph Class
#*************************
class GraphGen(object):
"""
This class is used to set and generate the graph used to plan
binding free energy calculation
"""
def __init__(self, dbase):
"""
Inizialization function
Parameters
----------
dbase : dbase object
the molecule container
"""
self.dbase = dbase
self.maxPathLength = dbase.options.max
self.similarityScoresLimit = dbase.options.cutoff
if dbase.options.radial:
self.lead_index = self.pick_lead()
else:
self.lead_index = None
# A set of nodes that will be used to save nodes that are not a cycle cover for a given subgraph
self.nonCycleNodesSet = set()
# Draw Parameters
# THIS PART MUST BE CHANGED
# Max number of displayed chemical compound images as graph nodes
self.max_images = 2000
# Max number of displayed nodes in the graph
self.max_nodes = 100
# The maximum threshold distance in angstroms unit used to select if a molecule is depicted
self.max_mol_size = 50.0
self.edge_labels = False
# The following Section has been strongly copied/adapted from the original implementation
# Generate a list related to the disconnected graphs present in the initial graph
if dbase.options.fast and dbase.options.radial:
#only enable the fast map option if use the radial option
self.initialSubgraphList = self.generateInitialSubgraphList(fast_map = True)
else:
self.initialSubgraphList = self.generateInitialSubgraphList()
# A list of elementes made of [edge, weights] for each subgraph
self.subgraphScoresLists = self.generateSubgraphScoresLists(self.initialSubgraphList)
# Elimintates from each subgraph those edges whose weights are less than the hard limit
self.removeEdgesBelowHardLimit()
# Make a new master list of subgraphs now that there may be more disconnected components
self.workingSubgraphsList = self.generateWorkingSubgraphsList()
# Make a new sorted list of [edge, weights] for each subgraph now that there may be new subgraphs
self.workingSubgraphScoresLists = self.generateSubgraphScoresLists(self.workingSubgraphsList)
# Remove edges, whose removal does not violate constraints, from the subgraphs,
# starting with lowest similarity score first
if dbase.options.fast and dbase.options.radial:
#if we use the fast and radial option, just need to add the surrounding edges from the initial graph
self.resultGraph = self.addsurroundEdges()
#after adding the surround edges, some subgraphs may merge into a larger graph and so need to update the current subgraphs
#self.resultingSubgraphsList = copy.deepcopy(self.workingSubgraphsList)
#merge all Subgraphs together for layout
#self.resultGraph = self.mergeAllSubgraphs()
else:
#>>>>>>>>>>>>>>>>>>>>>>>>>>>ISSUE ORDER PROBLEM<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
self.minimizeEdges()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>ISSUE ORDER PROBLEM<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# Collect together disjoint subgraphs of like charge into subgraphs
self.resultingSubgraphsList = copy.deepcopy(self.workingSubgraphsList)
# Combine seperate subgraphs into a single resulting graph
self.resultGraph = self.mergeAllSubgraphs()
# Make a copy of the resulting graph for later processing in connectResultingComponents()
self.copyResultGraph = self.resultGraph.copy()
# Holds list of edges that were added in the connect components phase
self.edgesAddedInFirstTreePass = []
# Add edges to the resultingGraph to connect its components
self.connectSubgraphs()
return
def pick_lead(self):
if (self.dbase.nums() * (self.dbase.nums() - 1)/2) != self.dbase.strict_mtx.size:
raise ValueError("There are errors in the similarity score matrices")
if not self.dbase.options.hub == "None":
#hub radial option. Use the provided reference compound as a hub
hub_index = None
for i in range(0, self.dbase.nums()):
if os.path.basename(self.dbase[i].getName()) == self.dbase.options.hub:
hub_index = i
if hub_index is None:
logging.info("Warning: the specified center ligand %s is not in the ligand database, will not use the radial option."%self.dbase.options.hub)
return hub_index
else:
#complete radial option. Pick the compound with the highest total similarity to all other compounds to use as a hub
all_sum_i = []
for i in range(0, self.dbase.nums()):
sum_i = 0
for j in range(0, self.dbase.nums()):
sum_i += self.dbase.strict_mtx[i,j]
all_sum_i.append(sum_i)
max_value = max(all_sum_i)
max_index = [i for i, x in enumerate(all_sum_i) if x == max_value]
max_index_final = max_index[0]
return max_index_final
def generateInitialSubgraphList(self, fast_map = False):
"""
This function generates a starting graph connecting with edges all the
compounds with a positive strict similarity score
Returns
-------
initialSubgraphList : list of NetworkX graph
the list of connected component graphs
"""
compound_graph = nx.Graph()
if (self.dbase.nums() * (self.dbase.nums() - 1)/2) != self.dbase.strict_mtx.size:
raise ValueError("There are errors in the similarity score matrices")
if not fast_map:
#if not fast map option, connect all possible nodes to generate the initial graph
for i in range(0, self.dbase.nums()):
if i==0:
compound_graph.add_node(i,ID=self.dbase[i].getID(), fname_comp = os.path.basename(self.dbase[i].getName()))
for j in range(i+1, self.dbase.nums()):
if i == 0:
compound_graph.add_node(j,ID=self.dbase[j].getID(), fname_comp = os.path.basename(self.dbase[j].getName()))
wgt = self.dbase.strict_mtx[i,j]
if wgt > 0.0:
compound_graph.add_edge(i,j,similarity = wgt, strict_flag = True)
else:
#if fast map option, then add all possible radial edges as the initial graph
for i in range(0, self.dbase.nums()):
#add the node for i
compound_graph.add_node(i,ID=self.dbase[i].getID(), fname_comp = os.path.basename(self.dbase[i].getName()))
if i != self.lead_index:
wgt = self.dbase.strict_mtx[i, self.lead_index]
if wgt > 0:
compound_graph.add_edge(i,self.lead_index,similarity = wgt, strict_flag = True)
initialSubgraphGen = nx.connected_component_subgraphs(compound_graph)
initialSubgraphList = [x for x in initialSubgraphGen]
return initialSubgraphList
def generateSubgraphScoresLists(self, subgraphList):
"""
This function generate a list of lists where each inner list is the
weights of each edge in a given subgraph in the subgraphList,
sorted from lowest to highest
Returns
-------
subgraphScoresLists : list of lists
each list contains a tuple with the graph node indexes and their
similatiry as weigth
"""
subgraphScoresLists = []
for subgraph in subgraphList:
weightsDictionary = nx.get_edge_attributes(subgraph, 'similarity')
subgraphWeightsList = [(edge[0], edge[1], weightsDictionary[edge]) for edge in weightsDictionary.keys()]
subgraphWeightsList.sort(key = lambda entry: entry[2])
subgraphScoresLists.append(subgraphWeightsList)
return subgraphScoresLists
def removeEdgesBelowHardLimit(self):
"""
This function removes edges below the set hard limit from each subGraph
and from each weightsList
"""
totalEdges = 0
for subgraph in self.initialSubgraphList:
weightsList = self.subgraphScoresLists[self.initialSubgraphList.index(subgraph)]
index = 0
for edge in weightsList:
if edge[2] < self.similarityScoresLimit:
subgraph.remove_edge(edge[0],edge[1])
index = weightsList.index(edge)
del weightsList[:index + 1]
totalEdges = totalEdges + subgraph.number_of_edges()
#print "Removed = ", totalEdges
def generateWorkingSubgraphsList(self):
"""
After the deletition of the edges that have a weigth less than the
selected threshould the subgraph maybe disconnected and a new master
list of connected subgraphs is genereted
Returns
-------
workingSubgraphsList : list of lists
each list contains a tuple with the graph node indexes and their
similatiry as weigth
"""
workingSubgraphsList = []
for subgraph in self.initialSubgraphList:
newSubgraphList = nx.connected_component_subgraphs(subgraph)
for newSubgraph in newSubgraphList:
workingSubgraphsList.append(newSubgraph)
return workingSubgraphsList
def minimizeEdges(self):
"""
Minimize edges in each subgraph while ensuring constraints are met
"""
for subgraph in self.workingSubgraphsList:
weightsList = self.workingSubgraphScoresLists[self.workingSubgraphsList.index(subgraph)]
# ISSUE ORDER IS ORIGINATED HERE
#weightsList = sorted(weightsList, key = itemgetter(1))
# This part has been copied from the original code
self.nonCycleNodesSet = self.findNonCyclicNodes(subgraph)
numberOfComponents = nx.number_connected_components(subgraph)
if len(subgraph.edges()) > 2: # Graphs must have at least 3 edges to be minimzed
for edge in weightsList:
if self.lead_index is not None:
#Here the radial option is appplied, will check if the remove_edge is connect to the hub(lead) compound, if the edge is connected to the lead compound, then add it back into the graph.
if self.lead_index not in [edge[0], edge[1]]:
subgraph.remove_edge(edge[0], edge[1])
if self.checkConstraints(subgraph, numberOfComponents) == False:
subgraph.add_edge(edge[0], edge[1], similarity = edge[2], strict_flag = True)
else:
subgraph.remove_edge(edge[0], edge[1])
if self.checkConstraints(subgraph, numberOfComponents) == False:
subgraph.add_edge(edge[0], edge[1], similarity = edge[2], strict_flag = True)
def addsurroundEdges(self):
"""
Add surrounding edges in each subgraph to make sure all nodes are in cycle
"""
for subgraph in self.workingSubgraphsList:
subgraph_nodes = subgraph.nodes()
if self.lead_index in subgraph_nodes:
#here we only consider the subgraph with lead compound
self.nonCycleNodesSet = self.findNonCyclicNodes(subgraph)
for node in self.nonCycleNodesSet:
#for each node in the noncyclenodeset, find the fingerprint similarity compare to all other surrounding nodes and pick the one with the max score and connect them
node_score_list = []
for i in range(0, self.dbase.nums()):
if i != node and i != self.lead_index:
node_score_list.append(self.dbase.strict_mtx[node, i])
else:
node_score_list.append(0.0)
max_value = max(node_score_list)
if max_value > self.similarityScoresLimit:
max_index = [i for i, x in enumerate(node_score_list) if x == max_value]
max_index_final = max_index[0]
subgraph.add_edge(node, max_index_final, similarity = self.dbase.strict_mtx[node, max_index_final], strict_flag = True )
return subgraph
def findNonCyclicNodes(self, subgraph):
"""
Generates a list of nodes of the subgraph that are not in a cycle
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for not cycle nodes
Returns
-------
missingNodesSet : set of graph nodes
the set of graph nodes that are not in a cycle
"""
missingNodesSet = set()
cycleNodes = []
cycleList = nx.cycle_basis(subgraph)
cycleNodes = [node for cycle in cycleList for node in cycle]
missingNodesSet = set([node for node in subgraph.nodes() if node not in cycleNodes])
return missingNodesSet
def checkConstraints(self, subgraph, numComp):
"""
Determine if the given subgraph still meets the constraints
Parameters
----------
subgraph : NetworkX subgraph obj
the subgraph to check for the constraints
numComp : int
the number of connected componets
Returns
-------
constraintsMet : bool
True if all the constraints are met, False otherwise
"""
constraintsMet = True
if not self.remainsConnected(subgraph, numComp):
constraintsMet = False
if constraintsMet:
if not self.checkCycleCovering(subgraph):
constraintsMet = False
if constraintsMet:
if not self.checkMaxDistance(subgraph):
constaintsMet = False
return constraintsMet
def remainsConnected(self, subgraph, numComponents):
"""
Determine if the subgraph remains connected after an edge has been
removed
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for connection after the edge deletition
numComp : int
the number of connected componets
Returns
-------
isConnected : bool
True if the subgraph is connected, False otherwise
"""
isConnected = False
if numComponents == nx.number_connected_components(subgraph): isConnected = True
return isConnected
def checkCycleCovering(self, subgraph):
"""
Checks if the subgraph has a cycle covering
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for connection after the edge deletition
Returns
-------
hasCovering : bool
True if the subgraph has a cycle covering, False otherwise
"""
hasCovering = False
# if it is not the same set as before
if(not self.findNonCyclicNodes(subgraph).difference(self.nonCycleNodesSet)): hasCovering = True
return hasCovering
def checkMaxDistance(self, subgraph):
"""
Check to see if the graph has paths from all compounds to all other
compounds within the specified limit
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for the max distance between nodes
Returns
-------
withinMaxDistance : bool
True if the subgraph has all the nodes within the specified
max distance
"""
withinMaxDistance = True
for node in subgraph:
eccentricity = nx.eccentricity(subgraph, node)
if eccentricity > self.maxPathLength: withinMaxDistance = False
return withinMaxDistance
def mergeAllSubgraphs(self):
"""Generates a single networkx graph object from the subgraphs that have
been processed
Returns
-------
finalGraph : NetworkX graph obj
the final graph produced merging all the subgraphs. The produced
graph may have disconneted parts
"""
finalGraph = nx.Graph()
for subgraph in self.workingSubgraphsList:
finalGraph = nx.union(finalGraph, subgraph)
return finalGraph
def connectSubgraphs(self):
"""
Adds edges to the resultGraph to connect as many components of the final
graph possible
"""
connectSuccess = self.connectGraphComponents_brute_force()
while (connectSuccess) :
connectSuccess = self.connectGraphComponents_brute_force()
# WARNING: The self.workingSubgraphsList at this point is different from
# the copy self.resultingSubgraphsList made before
connectSuccess = self.connectGraphComponents_brute_force_2()
while (connectSuccess) :
connectSuccess = self.connectGraphComponents_brute_force_2()
def connectGraphComponents_brute_force(self):
"""
Adds edges to the resultGraph to connect all components that can be
connected, only one edge is added per component, to form a tree like
structure between the different components of the resultGraph
Returns
-------
bool
True if the addition of edges was possible in strict mode, False otherwise
"""
generator_graph = nx.connected_component_subgraphs(self.resultGraph)
self.workingSubgraphsList = [x for x in generator_graph]
if len(self.workingSubgraphsList) == 1:
return False
edgesToCheck = []
edgesToCheckAdditionalInfo = []
numzeros = 0
for i in range(0,len(self.workingSubgraphsList)):
nodesOfI = self.workingSubgraphsList[i].nodes()
for j in range(i+1,len(self.workingSubgraphsList)):
nodesOfJ = self.workingSubgraphsList[j].nodes()
for k in range(0,len(nodesOfI)):
for l in range(0,len(nodesOfJ)):
"""produce an edge from nodesOfI[k] and nodesofJ[l] if nonzero weights push this edge into possibleEdgeList """
#print 'Molecules (%d,%d)' % (nodesOfI[k],nodesOfJ[l])
# I assumed that the score matrix is symmetric. In the Graph part this does not seems to be true: <<<<<<<<<<<<<DEBUG>>>>>>>>>>>>>>>
similarity = self.dbase.loose_mtx[nodesOfI[k],nodesOfJ[l]]
if similarity > 0.0 :
edgesToCheck.append((nodesOfI[k], nodesOfJ[l], similarity))
edgesToCheckAdditionalInfo.append((nodesOfI[k], nodesOfJ[l], similarity, i, j))
else :
numzeros = numzeros + 1
if len(edgesToCheck) > 0:
sortedList = sorted(edgesToCheck, key = itemgetter(2), reverse=True)
sortedListAdditionalInfo = sorted(edgesToCheckAdditionalInfo, key = itemgetter(2), reverse=True)
edgeToAdd = sortedList[0]
#self.edgeFile.write("\n" + str(edgeToAdd))
edgeToAddAdditionalInfo = sortedListAdditionalInfo[0]
self.edgesAddedInFirstTreePass.append(edgeToAdd)
self.resultGraph.add_edge(edgeToAdd[0], edgeToAdd[1], similarity=edgeToAdd[2], strict_flag = False)
generator_graph = nx.connected_component_subgraphs(self.resultGraph)
self.workingSubgraphsList = [x for x in generator_graph]
return True
else:
return False
def connectGraphComponents_brute_force_2(self):
"""
Adds a second edge between each of the (former) components of the
resultGraph to try to provide cycles between (former) components
Returns
-------
bool
True if the addition of edges was possible in loose mode, False otherwise
"""
if len(self.resultingSubgraphsList) == 1:
return False
edgesToCheck = []
for i in range(0,len(self.resultingSubgraphsList)):
nodesOfI = self.resultingSubgraphsList[i].nodes()
for j in range(i+1,len(self.resultingSubgraphsList)):
nodesOfJ = self.resultingSubgraphsList[j].nodes()
#print '(%d,%d)' % (i,j)
for k in range(0,len(nodesOfI)):
for l in range(0,len(nodesOfJ)):
"""produce an edge from nodesOfI[k] and nodesofJ[l] if nonzero weights push this edge into possibleEdgeList """
#print 'Molecules (%d,%d)' % (nodesOfI[k],nodesOfJ[l])
# I assumed that the score matrix is symmetric. In the Graph part this does not seems to be true: <<<<<<<<<<<<<DEBUG>>>>>>>>>>>>>>>
similarity = self.dbase.loose_mtx[nodesOfI[k],nodesOfJ[l]]
if (similarity > 0.0):
edgesToCheck.append((nodesOfI[k], nodesOfJ[l], similarity))
finalEdgesToCheck = [edge for edge in edgesToCheck if edge not in self.edgesAddedInFirstTreePass]
if len(finalEdgesToCheck) > 0:
sortedList = sorted(finalEdgesToCheck, key = itemgetter(2), reverse=True)
edgeToAdd = sortedList[0]
self.resultGraph.add_edge(edgeToAdd[0], edgeToAdd[1], similarity=edgeToAdd[2], strict_flag = False)
self.copyResultGraph.add_edge(edgeToAdd[0], edgeToAdd[1], similarity=edgeToAdd[2], strict_flag = False)
generator_graph = nx.connected_component_subgraphs(self.copyResultGraph)
self.resultingSubgraphsList = [x for x in generator_graph]
return True
else:
return False
def getGraph(self):
"""
Returns the final generated NetworkX graph
"""
return self.resultGraph
def generate_depictions(self):
def max_dist_mol(mol):
max_dist = 0.0
conf = mol.GetConformer()
for i in range(0,conf.GetNumAtoms()):
crdi = np.array([conf.GetAtomPosition(i).x,conf.GetAtomPosition(i).y,conf.GetAtomPosition(i).z])
for j in range(i+1,conf.GetNumAtoms()):
crdj = np.array([conf.GetAtomPosition(j).x,conf.GetAtomPosition(i).y,conf.GetAtomPosition(j).z])
dist = np.linalg.norm(crdi-crdj)
if dist > max_dist:
max_dist = dist
return max_dist
directory_name = tempfile.mkdtemp()
temp_graph = self.resultGraph.copy()
if nx.number_of_nodes(temp_graph) <= self.max_images:
#Draw.DrawingOptions.atomLabelFontSize=30
#Draw.DrawingOptions.dotsPerAngstrom=100
for n in temp_graph:
id_mol = temp_graph.node[n]['ID']
mol = self.dbase[id_mol].getMolecule()
max_dist = max_dist_mol(mol)
if max_dist < self.max_mol_size:
fname = os.path.join(directory_name, self.dbase[id_mol].getName() + ".png")
#1, modify here to calculate the 2D structure for ligands cannot remove Hydrogens by rdkit
#2, change the graph size to get better resolution
try:
mol = AllChem.RemoveHs(mol)
AllChem.Compute2DCoords(mol)
from rdkit.Chem.Draw.MolDrawing import DrawingOptions
DrawingOptions.bondLineWidth = 2.5
Draw.MolToFile(mol, fname, size=(200,200), kekulize=False, fitimage=True, imageType='png', options=DrawingOptions)
except:
######need to ask RDKit to fix this if possible, see the code issue tracker for more details######
logging.info("Error attempting to remove hydrogens for molecule %s using RDKit. RDKit cannot kekulize the molecule"%self.dbase[id_mol].getName())
AllChem.Compute2DCoords(mol)
from rdkit.Chem.Draw.MolDrawing import DrawingOptions
DrawingOptions.bondLineWidth = 2.5
Draw.MolToFile(mol, fname, size=(200,200), kekulize=False, fitimage=True, imageType='png', options=DrawingOptions)
temp_graph.node[n]['image'] = fname
#self.resultGraph.node[n]['label'] = ''
temp_graph.node[n]['labelloc'] = 't'
temp_graph.node[n]['penwidth'] =2.5
#self.resultGraph.node[n]['xlabel'] = self.resultGraph.node[n]['ID']
for u,v,d in temp_graph.edges(data=True):
if d['strict_flag']==True:
temp_graph[u][v]['color'] = 'cyan'
temp_graph[u][v]['penwidth'] = 2.5
else:
temp_graph[u][v]['color'] = 'red'
temp_graph[u][v]['penwidth'] = 2.5
nx.nx_agraph.write_dot(temp_graph, self.dbase.options.name+'_tmp.dot')
cmd = 'dot -Tpng ' + self.dbase.options.name + '_tmp.dot -o ' + self.dbase.options.name + '.png'
os.system(cmd)
cmd = 'dot -Teps ' + self.dbase.options.name + '_tmp.dot -o ' + self.dbase.options.name + '.eps'
os.system(cmd)
cmd = 'dot -Tpdf ' + self.dbase.options.name + '_tmp.dot -o ' + self.dbase.options.name + '.pdf'
os.system(cmd)
os.remove(self.dbase.options.name+'_tmp.dot')
shutil.rmtree(directory_name, ignore_errors=True)
#The function to output the score and connectivity txt file
def layout_info(self):
#pass the lead compound index if the radial option is on and generate the morph type of output required by FESetup
if self.lead_index is not None:
morph_txt = open(self.dbase.options.name+"_morph.txt", "w")
morph_data = "morph_pairs = "
info_txt = open(self.dbase.options.name+"_score_with_connection.txt", "w")
all_key_id = self.dbase.dic_mapping.keys()
data = ["%-10s,%-10s,%-25s,%-25s,%-15s,%-15s,%-15s,%-10s\n"%("Index_1", "Index_2","Filename_1","Filename_2", "Erc_sim","Str_sim", "Loose_sim", "Connect")]
for i in range (len(all_key_id)-1):
for j in range(i+1, len(all_key_id)):
morph_string = None
connected = False
try:
similarity = self.resultGraph.edge[i][j]['similarity']
#print "Check the similarity", similarity
connected = True
except:
pass
Filename_i = self.dbase.dic_mapping[i]
Filename_j = self.dbase.dic_mapping[j]
#print "Check the filename", Filename_i, Filename_j
strict_similarity = self.dbase.strict_mtx[i,j]
loose_similarity = self.dbase.loose_mtx[i,j]
ecr_similarity = self.dbase.ecr_mtx[i,j]
if connected:
new_line = "%-10s,%-10s,%-25s,%-25s,%-15.2f,%-15.5f,%-15.5f,%-10s\n"%(i, j, Filename_i, Filename_j, ecr_similarity, strict_similarity, loose_similarity, "Yes")
#generate the morph type, and pick the start ligand based on the similarity
if self.lead_index is not None:
morph_i = Filename_i.split(".")[0]
morph_j = Filename_j.split(".")[0]
if i == self.lead_index:
morph_string = "%s > %s, "%(morph_i, morph_j)
elif j == self.lead_index:
morph_string = "%s > %s, "%(morph_j, morph_i)
else:
#compare i and j with the lead compound, and pick the one with the higher similarity as the start ligand
similarity_i = self.dbase.strict_mtx[self.lead_index, i]
similarity_j = self.dbase.strict_mtx[self.lead_index, j]
if similarity_i> similarity_j:
morph_string = "%s > %s, "%(morph_i, morph_j)
else:
morph_string = "%s > %s, "%(morph_j, morph_i)
morph_data += morph_string
else:
new_line = "%-10s,%-10s,%-25s,%-25s,%-15.2f,%-15.5f,%-15.5f,%-10s\n"%(i, j, Filename_i, Filename_j, ecr_similarity, strict_similarity, loose_similarity, "No")
data.append(new_line)
info_txt.writelines(data)
if self.lead_index is not None:
morph_txt.write(morph_data)
def writeGraph(self):
"""
This function writes to a file the final generated NetworkX graph as
.dot and the .ps files. The mapping between molecule IDs and compounds
name is saved as text file
"""
try:
self.dbase.write_dic()
self.layout_info()
except Exception as e:
raise IOError("%s: %s.txt" % (str(e), self.dbase.options.name))
try:
self.generate_depictions()
nx.nx_agraph.write_dot(self.resultGraph, self.dbase.options.name+'.dot')
except Exception as e:
raise IOError('Problems during the file generation: %s' % str(e))
logging.info(30*'-')
logging.info('The following files have been generated:\n%s.dot\tGraph file\n%s.png\tPng file\n%s.txt\tMapping Text file' % (self.dbase.options.name, self.dbase.options.name, self.dbase.options.name ))
logging.info(30*'-')
return
###### Still in developing stage ######
def draw(self):
"""
This function plots the NetworkX graph by using Matplotlib
"""
logging.info('\nDrawing....')
if nx.number_of_nodes(self.resultGraph) > self.max_nodes:
logging.info('The number of generated graph nodes %d exceed the max number of drawable nodes %s' % (nx.number_of_nodes(self.resultGraph), self.max_nodes))
return
def max_dist_mol(mol):
max_dist = 0.0
conf = mol.GetConformer()
for i in range(0,conf.GetNumAtoms()):
crdi = np.array([conf.GetAtomPosition(i).x,conf.GetAtomPosition(i).y,conf.GetAtomPosition(i).z])
for j in range(i+1,conf.GetNumAtoms()):
crdj = np.array([conf.GetAtomPosition(j).x,conf.GetAtomPosition(i).y,conf.GetAtomPosition(j).z])
dist = np.linalg.norm(crdi-crdj)
if dist > max_dist:
max_dist = dist
return max_dist
# Determine the screen resolution by using PyQt4
app = QtGui.QApplication([])
screen_resolution = app.desktop().screenGeometry()
# Canvas scale factor
scale_canvas = 0.75
# Canvas resolution
max_canvas_size = (int(screen_resolution.width() * scale_canvas) , int(screen_resolution.height() * scale_canvas))
fig = plt.figure(1,facecolor='white')
fig.set_dpi(100)
fig.set_size_inches(max_canvas_size[0]/fig.get_dpi(), max_canvas_size[1]/fig.get_dpi(), forward=True)
ax = plt.subplot(111)
plt.axis('off')
pos=nx.nx_agraph.graphviz_layout( self.resultGraph, prog="neato")
strict_edges = [(u,v) for (u,v,d) in self.resultGraph.edges(data=True) if d['strict_flag'] == True]
loose_edges = [(u,v) for (u,v,d) in self.resultGraph.edges(data=True) if d['strict_flag'] == False]
node_labels = dict([(u, d['ID']) for u,d in self.resultGraph.nodes(data=True)])
#Draw nodes
nx.draw_networkx_nodes(self.resultGraph, pos , node_size=500, node_color='r')
#Draw node labels
nx.draw_networkx_labels(self.resultGraph, pos,labels=node_labels,font_size=10)
if self.edge_labels:
edge_weight_strict = dict([((u,v,), d['similarity']) for u,v,d in self.resultGraph.edges(data=True) if d['strict_flag'] == True])
edge_weight_loose = dict([((u,v,), d['similarity']) for u,v,d in self.resultGraph.edges(data=True) if d['strict_flag'] == False])
for key in edge_weight_strict:
edge_weight_strict[key] = round(edge_weight_strict[key],2)
for key in edge_weight_loose:
edge_weight_loose[key] = round(edge_weight_loose[key],2)
#edge strict
nx.draw_networkx_edge_labels(self.resultGraph, pos, edge_labels=edge_weight_strict, font_color='g')
#edge loose
nx.draw_networkx_edge_labels(self.resultGraph, pos, edge_labels=edge_weight_loose, font_color='r')
#edges strict
nx.draw_networkx_edges(self.resultGraph, pos, edgelist=strict_edges, edge_color='g')
#edges loose
nx.draw_networkx_edges(self.resultGraph, pos, edgelist=loose_edges, edge_color='r')
if nx.number_of_nodes(self.resultGraph) <= self.max_images:
trans = ax.transData.transform
trans2 = fig.transFigure.inverted().transform
cut = 1.0
frame = 10
xmax = cut * max(xx for xx, yy in pos.values()) + frame
ymax = cut * max(yy for xx, yy in pos.values()) + frame
xmin = cut * min(xx for xx, yy in pos.values()) - frame
ymin = cut * min(yy for xx, yy in pos.values()) - frame
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
h = 20
w = 20
mol_size = (200,200)
for each_node in self.resultGraph:
id_mol = self.resultGraph.node[each_node]['ID']
#skip remove Hs by rdkit if Hs cannot be removed
try:
mol = AllChem.RemoveHs(self.dbase[id_mol].getMolecule())
except:
######need to ask RDKit to fix this if possible, see the code issue tracker for more details######
mol = self.dbase[id_mol].getMolecule()
logging.info("Error attempting to remove hydrogens for molecule %s using RDKit. RDKit cannot kekulize the molecule"%self.dbase[id_mol].getName())
# max_dist = max_dist_mol(mol)
# if max_dist > 7.0:
# continue
AllChem.Compute2DCoords(mol)
#add try exception for cases cannot be draw
try:
img_mol = Draw.MolToImage(mol,mol_size, kekulize = False)
except Exception as ex:
img_mol = None
logging.exception("This mol cannot be draw using the RDKit Draw function, need to check for more details...")
xx, yy = trans(pos[each_node])
xa, ya = trans2((xx,yy))
nodesize_1 = (300.0/(h*100))
nodesize_2 = (300.0/(w*100))
p2_2 = nodesize_2/2
p2_1 = nodesize_1/2
a = plt.axes([xa - p2_2, ya - p2_1, nodesize_2, nodesize_1])
#self.resultGraph.node[id_mol]['image'] = img_mol
#a.imshow(self.resultGraph.node[each_node]['image'])
a.imshow(img_mol)
a.axis('off')
# plt.savefig('graph.png', facecolor=fig.get_facecolor())
# print 'Graph .png file has been generated...'
plt.show()
return
|
Our team, coming from various backgrounds and experiences, has been instrumental in shaping who we are today. With customer service at the forefront, our team drives FrontStreet’s commitment of serving clients with loyalty, integrity, and collaboration. We are always on your side to accomplish your goals.
Tom joined FrontStreet as the Chief Financial Officer four years ago, to grow the company from an entrepreneurial start-up to a national brand. Bringing three decades of Wall Street experience, where he served in a C-level capacity, Tom worked with early stage funding, corporate restructuring, and global expansion of service businesses. Prior to joining, Tom partnered with Skip Warner, FrontStreet’s current COO, where they spearheaded global expansion of back-office support at the world’s largest expert network. Partnering again at FrontStreet, they endeavor to lead the team as an agile and valuable partner to our portfolio of client companies. Unknown to most, Tom is a talented chef and mixologist. He enjoys cooking for his wife and three children, as well as hosting small events. Most of Tom’s spare time is spent outdoors, near his home on the North Shore of Long Island.
Skip brings to FrontStreet over twenty years of financial and managerial experience having led accounting and operations teams in multiple service industries. Adept at translating numbers into dollars, while having an acumen for process improvements and systems automation, Skip carried FrontStreet into the new millennium. He previously spent several years at the Gerson Lehrman Group (GLG), dedicated to building a global enterprise connecting business professionals with industry experts to solve problems through a “platform for professional learning”. He then came to FrontStreet and partnered with Tom Hutzel, former colleague at GLG and FrontStreet’s current CFO, to once again grow a fledgling business. When not in the office, Skip can be found swinging a hammer, lending his handyman skills to friends and family or riding his motorcycle along the Long Island shore. He enjoys time at home with his wife and two kids and coaching his son’s soccer team.
Stephan recently joined FrontStreet as the Vice President of National Accounts. An Atlanta, GA native, he travels the country building relationships with both existing and new clients. Previously, Stephan led regional business development for Brightview Landscapes. He spent many years as an Account Executive with G&K Services, building expertise in the uniform and facility services space. Stephan enjoys spending time with his wife and children when he’s not off jet setting.
As the Vice President of Operations, Gina leads the Repairs & Maintenance operational teams to seamlessly triage work orders and communicate efficiently with clients. In Gina’s tenure at FrontStreet, she has been transformational in the execution of client service through the use of our technology platform and team building. Focusing her career exclusively in facilities management, Gina traversed both the client and service provider sides of the industry. A native of Chicago, and an avid Cubs fan, Gina now resides on Long Island, where she enjoys spending time with her husband and family.
Maria joined FrontStreet in 2017 as the Director of the newly formed Vendor Management and Procurement Department. Charged with building meaningful relationships with our vendor and technician base, the Vendor Management Department directly manages service providers to onboard them and engage them in successful partnerships with our company. Born and raised in Barcelona, Maria brings extensive experience in the Facilities Management industry nationally and internationally. She moved to New York a few years ago and enjoys travelling, nature and the beautiful beaches of Long Island.
|
#!/usr/bin/env python3
# This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
setup(
name="plainbox",
version="0.2",
packages=find_packages(),
author="Zygmunt Krynicki",
test_suite='plainbox.tests.test_suite',
author_email="zygmunt.krynicki@canonical.com",
license="GPLv3+",
description="Simple replacement for checkbox",
entry_points={
'console_scripts': [
'plainbox=plainbox.public:main',
]
})
|
Published 04/21/2019 02:07:46 pm at 04/21/2019 02:07:46 pm in 24000 Btu Window Air Conditioner With Heat.
24000 btu window air conditioner with heat 24000 btu through the wall air conditioner lg seer mega wall mounted value line ductless mini 24000 btu through the wall air conditioner ductless minisplit air conditioners department of energy a ductless minisplit air conditioner is one solution to cooling part of a house photo courtesy of istockphotoluke.
amazoncom ductlessaire btu seer energy star ductless ductlessaire btu seer energy star ductless mini split air conditioner and heat pump variable, btu through the wall air conditioner inverter wall mounted air btu through the wall air conditioner lg seer mega wall mounted value line ductless mini btu through the wall air conditioner , window ac unit btu window air conditioner with heat this item window ac unit btu universal window air conditioner bracket heavy duty window ac support support , ge btu window air conditioner mysaidzinfo ge btu window air conditioner window air conditioner window air conditioner with heat, btu portable air conditioner portable air conditioner with btu portable air conditioner , window air conditioner btu slide out chassis air conditioner window air conditioner btu window air conditioner heat pump with remote control timer and electric , btu wall air conditioner laverdiericlub btu wall air conditioner hyper heat zone wall mount mini split air conditioner btu, amazoncom friedrich chill series epgb room air conditioner friedrich chill series epgb room air conditioner with electric heat btu v, btu air conditioners heating venting cooling the home btu volt window air conditioner with lcd remote control energy star, window heaterair conditioner maytag btu with btu heat lg lwhr btu cooling heating window air conditioner with remote, rra freon mini window air conditioner cool and heat btu rra freon mini window air conditioner cool and heat btu window type air conditioner.
|
from __future__ import annotations
import asyncio
import logging
from datetime import datetime, timezone
from typing import Union, List, Optional, TYPE_CHECKING, Literal
from functools import wraps
import discord
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import humanize_number
from . import Config, errors, commands
from .i18n import Translator
from .errors import BankPruneError
if TYPE_CHECKING:
from .bot import Red
_ = Translator("Bank API", __file__)
__all__ = [
"Account",
"get_balance",
"set_balance",
"withdraw_credits",
"deposit_credits",
"can_spend",
"transfer_credits",
"wipe_bank",
"get_account",
"is_global",
"set_global",
"get_bank_name",
"set_bank_name",
"get_currency_name",
"set_currency_name",
"get_default_balance",
"set_default_balance",
"get_max_balance",
"set_max_balance",
"cost",
"AbortPurchase",
"bank_prune",
]
_MAX_BALANCE = 2 ** 63 - 1
_SCHEMA_VERSION = 1
_DEFAULT_GLOBAL = {
"schema_version": 0,
"is_global": False,
"bank_name": "Twentysix bank",
"currency": "credits",
"default_balance": 100,
"max_balance": _MAX_BALANCE,
}
_DEFAULT_GUILD = {
"bank_name": "Twentysix bank",
"currency": "credits",
"default_balance": 100,
"max_balance": _MAX_BALANCE,
}
_DEFAULT_MEMBER = {"name": "", "balance": 0, "created_at": 0}
_DEFAULT_USER = _DEFAULT_MEMBER
_config: Config = None
log = logging.getLogger("red.core.bank")
_data_deletion_lock = asyncio.Lock()
_cache_is_global = None
_cache = {"bank_name": None, "currency": None, "default_balance": None, "max_balance": None}
async def _init():
global _config
_config = Config.get_conf(None, 384734293238749, cog_name="Bank", force_registration=True)
_config.register_global(**_DEFAULT_GLOBAL)
_config.register_guild(**_DEFAULT_GUILD)
_config.register_member(**_DEFAULT_MEMBER)
_config.register_user(**_DEFAULT_USER)
await _migrate_config()
async def _migrate_config():
schema_version = await _config.schema_version()
if schema_version == _SCHEMA_VERSION:
return
if schema_version == 0:
await _schema_0_to_1()
schema_version += 1
await _config.schema_version.set(schema_version)
async def _schema_0_to_1():
# convert floats in bank balances to ints
# don't use anything seen below in extensions, it's optimized and controlled for here,
# but can't be safe in 3rd party use
# this CANNOT use ctx manager, because ctx managers compare before and after,
# and floats can be equal to ints: (1.0 == 1) is True
group = _config._get_base_group(_config.USER)
bank_user_data = await group.all()
for user_config in bank_user_data.values():
if "balance" in user_config:
user_config["balance"] = int(user_config["balance"])
await group.set(bank_user_data)
group = _config._get_base_group(_config.MEMBER)
bank_member_data = await group.all()
for guild_data in bank_member_data.values():
for member_config in guild_data.values():
if "balance" in member_config:
member_config["balance"] = int(member_config["balance"])
await group.set(bank_member_data)
async def _process_data_deletion(
*, requester: Literal["discord_deleted_user", "owner", "user", "user_strict"], user_id: int
):
"""
Bank has no reason to keep any of this data
if the user doesn't want it kept,
we won't special case any request type
"""
if requester not in ("discord_deleted_user", "owner", "user", "user_strict"):
log.warning(
"Got unknown data request type `{req_type}` for user, deleting anyway",
req_type=requester,
)
async with _data_deletion_lock:
await _config.user_from_id(user_id).clear()
all_members = await _config.all_members()
async for guild_id, member_dict in AsyncIter(all_members.items(), steps=100):
if user_id in member_dict:
await _config.member_from_ids(guild_id, user_id).clear()
class Account:
"""A single account.
This class should ONLY be instantiated by the bank itself."""
def __init__(self, name: str, balance: int, created_at: datetime):
self.name = name
self.balance = balance
self.created_at = created_at
def _encoded_current_time() -> int:
"""Get the current UTC time as a timestamp.
Returns
-------
int
The current UTC timestamp.
"""
now = datetime.now(timezone.utc)
return _encode_time(now)
def _encode_time(time: datetime) -> int:
"""Convert a datetime object to a serializable int.
Parameters
----------
time : datetime.datetime
The datetime to convert.
Returns
-------
int
The timestamp of the datetime object.
"""
ret = int(time.timestamp())
return ret
def _decode_time(time: int) -> datetime:
"""Convert a timestamp to a datetime object.
Parameters
----------
time : int
The timestamp to decode.
Returns
-------
datetime.datetime
The datetime object from the timestamp.
"""
return datetime.utcfromtimestamp(time)
async def get_balance(member: discord.Member) -> int:
"""Get the current balance of a member.
Parameters
----------
member : discord.Member
The member whose balance to check.
Returns
-------
int
The member's balance
"""
acc = await get_account(member)
return acc.balance
async def can_spend(member: discord.Member, amount: int) -> bool:
"""Determine if a member can spend the given amount.
Parameters
----------
member : discord.Member
The member wanting to spend.
amount : int
The amount the member wants to spend.
Raises
------
TypeError
If the amount is not an `int`.
Returns
-------
bool
:code:`True` if the member has a sufficient balance to spend the
amount, else :code:`False`.
"""
if not isinstance(amount, int):
raise TypeError("Amount must be of type int, not {}.".format(type(amount)))
if _invalid_amount(amount):
return False
return await get_balance(member) >= amount
async def set_balance(member: Union[discord.Member, discord.User], amount: int) -> int:
"""Set an account balance.
Parameters
----------
member : Union[discord.Member, discord.User]
The member whose balance to set.
amount : int
The amount to set the balance to.
Returns
-------
int
New account balance.
Raises
------
ValueError
If attempting to set the balance to a negative number.
RuntimeError
If the bank is guild-specific and a discord.User object is provided.
BalanceTooHigh
If attempting to set the balance to a value greater than
``bank._MAX_BALANCE``.
TypeError
If the amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Amount must be of type int, not {}.".format(type(amount)))
if amount < 0:
raise ValueError("Not allowed to have negative balance.")
guild = getattr(member, "guild", None)
max_bal = await get_max_balance(guild)
if amount > max_bal:
currency = await get_currency_name(guild)
raise errors.BalanceTooHigh(
user=member.display_name, max_balance=max_bal, currency_name=currency
)
if await is_global():
group = _config.user(member)
else:
group = _config.member(member)
await group.balance.set(amount)
if await group.created_at() == 0:
time = _encoded_current_time()
await group.created_at.set(time)
if await group.name() == "":
await group.name.set(member.display_name)
return amount
def _invalid_amount(amount: int) -> bool:
return amount < 0
async def withdraw_credits(member: discord.Member, amount: int) -> int:
"""Remove a certain amount of credits from an account.
Parameters
----------
member : discord.Member
The member to withdraw credits from.
amount : int
The amount to withdraw.
Returns
-------
int
New account balance.
Raises
------
ValueError
If the withdrawal amount is invalid or if the account has insufficient
funds.
TypeError
If the withdrawal amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Withdrawal amount must be of type int, not {}.".format(type(amount)))
if _invalid_amount(amount):
raise ValueError(
"Invalid withdrawal amount {} < 0".format(
humanize_number(amount, override_locale="en_US")
)
)
bal = await get_balance(member)
if amount > bal:
raise ValueError(
"Insufficient funds {} > {}".format(
humanize_number(amount, override_locale="en_US"),
humanize_number(bal, override_locale="en_US"),
)
)
return await set_balance(member, bal - amount)
async def deposit_credits(member: discord.Member, amount: int) -> int:
"""Add a given amount of credits to an account.
Parameters
----------
member : discord.Member
The member to deposit credits to.
amount : int
The amount to deposit.
Returns
-------
int
The new balance.
Raises
------
ValueError
If the deposit amount is invalid.
TypeError
If the deposit amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Deposit amount must be of type int, not {}.".format(type(amount)))
if _invalid_amount(amount):
raise ValueError(
"Invalid deposit amount {} <= 0".format(
humanize_number(amount, override_locale="en_US")
)
)
bal = await get_balance(member)
return await set_balance(member, amount + bal)
async def transfer_credits(
from_: Union[discord.Member, discord.User],
to: Union[discord.Member, discord.User],
amount: int,
):
"""Transfer a given amount of credits from one account to another.
Parameters
----------
from_: Union[discord.Member, discord.User]
The member to transfer from.
to : Union[discord.Member, discord.User]
The member to transfer to.
amount : int
The amount to transfer.
Returns
-------
int
The new balance of the member gaining credits.
Raises
------
ValueError
If the amount is invalid or if ``from_`` has insufficient funds.
TypeError
If the amount is not an `int`.
RuntimeError
If the bank is guild-specific and a discord.User object is provided.
BalanceTooHigh
If the balance after the transfer would be greater than
``bank._MAX_BALANCE``.
"""
if not isinstance(amount, int):
raise TypeError("Transfer amount must be of type int, not {}.".format(type(amount)))
if _invalid_amount(amount):
raise ValueError(
"Invalid transfer amount {} <= 0".format(
humanize_number(amount, override_locale="en_US")
)
)
guild = getattr(to, "guild", None)
max_bal = await get_max_balance(guild)
if await get_balance(to) + amount > max_bal:
currency = await get_currency_name(guild)
raise errors.BalanceTooHigh(
user=to.display_name, max_balance=max_bal, currency_name=currency
)
await withdraw_credits(from_, amount)
return await deposit_credits(to, amount)
async def wipe_bank(guild: Optional[discord.Guild] = None) -> None:
"""Delete all accounts from the bank.
Parameters
----------
guild : discord.Guild
The guild to clear accounts for. If unsupplied and the bank is
per-server, all accounts in every guild will be wiped.
"""
if await is_global():
await _config.clear_all_users()
else:
await _config.clear_all_members(guild)
async def bank_prune(bot: Red, guild: discord.Guild = None, user_id: int = None) -> None:
"""Prune bank accounts from the bank.
Parameters
----------
bot : Red
The bot.
guild : discord.Guild
The guild to prune. This is required if the bank is set to local.
user_id : int
The id of the user whose account will be pruned.
If supplied this will prune only this user's bank account
otherwise it will prune all invalid users from the bank.
Raises
------
BankPruneError
If guild is :code:`None` and the bank is Local.
"""
global_bank = await is_global()
if global_bank:
_guilds = set()
_uguilds = set()
if user_id is None:
async for g in AsyncIter(bot.guilds, steps=100):
if not g.unavailable and g.large and not g.chunked:
_guilds.add(g)
elif g.unavailable:
_uguilds.add(g)
group = _config._get_base_group(_config.USER)
else:
if guild is None:
raise BankPruneError("'guild' can't be None when pruning a local bank")
if user_id is None:
_guilds = {guild} if not guild.unavailable and guild.large else set()
_uguilds = {guild} if guild.unavailable else set()
group = _config._get_base_group(_config.MEMBER, str(guild.id))
if user_id is None:
for _guild in _guilds:
await _guild.chunk()
accounts = await group.all()
tmp = accounts.copy()
members = bot.get_all_members() if global_bank else guild.members
user_list = {str(m.id) for m in members if m.guild not in _uguilds}
async with group.all() as bank_data: # FIXME: use-config-bulk-update
if user_id is None:
for acc in tmp:
if acc not in user_list:
del bank_data[acc]
else:
user_id = str(user_id)
if user_id in bank_data:
del bank_data[user_id]
async def get_leaderboard(positions: int = None, guild: discord.Guild = None) -> List[tuple]:
"""
Gets the bank's leaderboard
Parameters
----------
positions : `int`
The number of positions to get
guild : discord.Guild
The guild to get the leaderboard of. If the bank is global and this
is provided, get only guild members on the leaderboard
Returns
-------
`list` of `tuple`
The sorted leaderboard in the form of :code:`(user_id, raw_account)`
Raises
------
TypeError
If the bank is guild-specific and no guild was specified
"""
if await is_global():
raw_accounts = await _config.all_users()
if guild is not None:
tmp = raw_accounts.copy()
for acc in tmp:
if not guild.get_member(acc):
del raw_accounts[acc]
else:
if guild is None:
raise TypeError("Expected a guild, got NoneType object instead!")
raw_accounts = await _config.all_members(guild)
sorted_acc = sorted(raw_accounts.items(), key=lambda x: x[1]["balance"], reverse=True)
if positions is None:
return sorted_acc
else:
return sorted_acc[:positions]
async def get_leaderboard_position(
member: Union[discord.User, discord.Member]
) -> Union[int, None]:
"""
Get the leaderboard position for the specified user
Parameters
----------
member : `discord.User` or `discord.Member`
The user to get the leaderboard position of
Returns
-------
`int`
The position of the user on the leaderboard
Raises
------
TypeError
If the bank is currently guild-specific and a `discord.User` object was passed in
"""
if await is_global():
guild = None
else:
guild = member.guild if hasattr(member, "guild") else None
try:
leaderboard = await get_leaderboard(None, guild)
except TypeError:
raise
else:
pos = discord.utils.find(lambda x: x[1][0] == member.id, enumerate(leaderboard, 1))
if pos is None:
return None
else:
return pos[0]
async def get_account(member: Union[discord.Member, discord.User]) -> Account:
"""Get the appropriate account for the given user or member.
A member is required if the bank is currently guild specific.
Parameters
----------
member : `discord.User` or `discord.Member`
The user whose account to get.
Returns
-------
Account
The user's account.
"""
if await is_global():
all_accounts = await _config.all_users()
else:
all_accounts = await _config.all_members(member.guild)
if member.id not in all_accounts:
acc_data = {"name": member.display_name, "created_at": _DEFAULT_MEMBER["created_at"]}
try:
acc_data["balance"] = await get_default_balance(member.guild)
except AttributeError:
acc_data["balance"] = await get_default_balance()
else:
acc_data = all_accounts[member.id]
acc_data["created_at"] = _decode_time(acc_data["created_at"])
return Account(**acc_data)
async def is_global() -> bool:
"""Determine if the bank is currently global.
Returns
-------
bool
:code:`True` if the bank is global, otherwise :code:`False`.
"""
global _cache_is_global
if _cache_is_global is None:
_cache_is_global = await _config.is_global()
return _cache_is_global
async def set_global(global_: bool) -> bool:
"""Set global status of the bank.
.. important::
All accounts are reset when you switch!
Parameters
----------
global_ : bool
:code:`True` will set bank to global mode.
Returns
-------
bool
New bank mode, :code:`True` is global.
Raises
------
RuntimeError
If bank is becoming global and a `discord.Member` was not provided.
"""
if (await is_global()) is global_:
return global_
global _cache_is_global
if await is_global():
await _config.clear_all_users()
else:
await _config.clear_all_members()
await _config.is_global.set(global_)
_cache_is_global = global_
return global_
async def get_bank_name(guild: discord.Guild = None) -> str:
"""Get the current bank name.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the bank name for (required if bank is
guild-specific).
Returns
-------
str
The bank's name.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
global _cache
if _cache["bank_name"] is None:
_cache["bank_name"] = await _config.bank_name()
return _cache["bank_name"]
elif guild is not None:
return await _config.guild(guild).bank_name()
else:
raise RuntimeError("Guild parameter is required and missing.")
async def set_bank_name(name: str, guild: discord.Guild = None) -> str:
"""Set the bank name.
Parameters
----------
name : str
The new name for the bank.
guild : `discord.Guild`, optional
The guild to set the bank name for (required if bank is
guild-specific).
Returns
-------
str
The new name for the bank.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
await _config.bank_name.set(name)
global _cache
_cache["bank_name"] = name
elif guild is not None:
await _config.guild(guild).bank_name.set(name)
else:
raise RuntimeError("Guild must be provided if setting the name of a guild-specific bank.")
return name
async def get_currency_name(guild: discord.Guild = None) -> str:
"""Get the currency name of the bank.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the currency name for (required if bank is
guild-specific).
Returns
-------
str
The currency name.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
global _cache
if _cache["currency"] is None:
_cache["currency"] = await _config.currency()
return _cache["currency"]
elif guild is not None:
return await _config.guild(guild).currency()
else:
raise RuntimeError("Guild must be provided.")
async def set_currency_name(name: str, guild: discord.Guild = None) -> str:
"""Set the currency name for the bank.
Parameters
----------
name : str
The new name for the currency.
guild : `discord.Guild`, optional
The guild to set the currency name for (required if bank is
guild-specific).
Returns
-------
str
The new name for the currency.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
await _config.currency.set(name)
global _cache
_cache["currency"] = name
elif guild is not None:
await _config.guild(guild).currency.set(name)
else:
raise RuntimeError(
"Guild must be provided if setting the currency name of a guild-specific bank."
)
return name
async def get_max_balance(guild: discord.Guild = None) -> int:
"""Get the max balance for the bank.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the max balance for (required if bank is
guild-specific).
Returns
-------
int
The maximum allowed balance.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
if _cache["max_balance"] is None:
_cache["max_balance"] = await _config.max_balance()
return _cache["max_balance"]
elif guild is not None:
return await _config.guild(guild).max_balance()
else:
raise RuntimeError("Guild must be provided.")
async def set_max_balance(amount: int, guild: discord.Guild = None) -> int:
"""Set the maximum balance for the bank.
Parameters
----------
amount : int
The new maximum balance.
guild : `discord.Guild`, optional
The guild to set the max balance for (required if bank is
guild-specific).
Returns
-------
int
The new maximum balance.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
ValueError
If the amount is less than 0 or higher than 2 ** 63 - 1.
TypeError
If the amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Amount must be of type int, not {}.".format(type(amount)))
if not (0 < amount <= _MAX_BALANCE):
raise ValueError(
"Amount must be greater than zero and less than {max}.".format(
max=humanize_number(_MAX_BALANCE, override_locale="en_US")
)
)
if await is_global():
await _config.max_balance.set(amount)
global _cache
_cache["max_balance"] = amount
elif guild is not None:
await _config.guild(guild).max_balance.set(amount)
else:
raise RuntimeError(
"Guild must be provided if setting the maximum balance of a guild-specific bank."
)
return amount
async def get_default_balance(guild: discord.Guild = None) -> int:
"""Get the current default balance amount.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the default balance for (required if bank is
guild-specific).
Returns
-------
int
The bank's default balance.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
if _cache["default_balance"] is None:
_cache["default_balance"] = await _config.default_balance()
return _cache["default_balance"]
elif guild is not None:
return await _config.guild(guild).default_balance()
else:
raise RuntimeError("Guild is missing and required!")
async def set_default_balance(amount: int, guild: discord.Guild = None) -> int:
"""Set the default balance amount.
Parameters
----------
amount : int
The new default balance.
guild : `discord.Guild`, optional
The guild to set the default balance for (required if bank is
guild-specific).
Returns
-------
int
The new default balance.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
ValueError
If the amount is less than 0 or higher than the max allowed balance.
TypeError
If the amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Amount must be of type int, not {}.".format(type(amount)))
max_bal = await get_max_balance(guild)
if not (0 <= amount <= max_bal):
raise ValueError(
"Amount must be greater than or equal zero and less than or equal {max}.".format(
max=humanize_number(max_bal, override_locale="en_US")
)
)
if await is_global():
await _config.default_balance.set(amount)
global _cache
_cache["default_balance"] = amount
elif guild is not None:
await _config.guild(guild).default_balance.set(amount)
else:
raise RuntimeError("Guild is missing and required.")
return amount
class AbortPurchase(Exception):
pass
def cost(amount: int):
"""
Decorates a coroutine-function or command to have a cost.
If the command raises an exception, the cost will be refunded.
You can intentionally refund by raising `AbortPurchase`
(this error will be consumed and not show to users)
Other exceptions will propagate and will be handled by Red's (and/or
any other configured) error handling.
"""
# TODO: Add documentation for input/output/exceptions
if not isinstance(amount, int) or amount < 0:
raise ValueError("This decorator requires an integer cost greater than or equal to zero")
def deco(coro_or_command):
is_command = isinstance(coro_or_command, commands.Command)
if not is_command and not asyncio.iscoroutinefunction(coro_or_command):
raise TypeError("@bank.cost() can only be used on commands or `async def` functions")
coro = coro_or_command.callback if is_command else coro_or_command
@wraps(coro)
async def wrapped(*args, **kwargs):
context: commands.Context = None
for arg in args:
if isinstance(arg, commands.Context):
context = arg
break
if not context.guild and not await is_global():
raise commands.UserFeedbackCheckFailure(
_("Can't pay for this command in DM without a global bank.")
)
try:
await withdraw_credits(context.author, amount)
except Exception:
credits_name = await get_currency_name(context.guild)
raise commands.UserFeedbackCheckFailure(
_("You need at least {cost} {currency} to use this command.").format(
cost=humanize_number(amount), currency=credits_name
)
)
else:
try:
return await coro(*args, **kwargs)
except AbortPurchase:
await deposit_credits(context.author, amount)
except Exception:
await deposit_credits(context.author, amount)
raise
if not is_command:
return wrapped
else:
wrapped.__module__ = coro_or_command.callback.__module__
coro_or_command.callback = wrapped
return coro_or_command
return deco
|
1.5 In this policy, “we”, “us” and “our” refer to Simplex IT Limited. For more information about us, see Section 13.
3.12 We may process any of your personal data identified in this policy where necessary for the establishment, exercise or defence of legal claims, whether in court proceedings or in an administrative or out-of-court procedure] The legal basis for this processing is our legitimate interests, namely the protection and assertion of our legal rights, your legal rights and the legal rights of others.
6.3 We will keep your personal data for the duration of the period you are a customer of Simplex IT Ltd. We shall retain your data only for as long as necessary in accordance with applicable laws.
13.2 We are registered in England and Wales under registration number 7769491, and our registered office is at 63 Macrae Road, Eden Office Park, Ham Green, Bristol, BS20 0DD.
13.3 Our principal place of business is at 63 Macrae Road, Eden Office Park, Ham Green, Bristol, BS20 0DD.
14.1 Our data protection officer’s contact details are: Ray Mordy and can be contacted at info@simplexit.co.uk.
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RNcdf4(RPackage):
"""Provides a high-level R interface to data files written using Unidata's
netCDF library (version 4 or earlier), which are binary data files that are
portable across platforms and include metadata information in addition to
the data sets. Using this package, netCDF files (either version 4 or
"classic" version 3) can be opened and data sets read in easily. It is also
easy to create new netCDF dimensions, variables, and files, in either
version 3 or 4 format, and manipulate existing netCDF files. This package
replaces the former ncdf package, which only worked with netcdf version 3
files. For various reasons the names of the functions have had to be
changed from the names in the ncdf package. The old ncdf package is still
available at the URL given below, if you need to have backward
compatibility. It should be possible to have both the ncdf and ncdf4
packages installed simultaneously without a problem. However, the ncdf
package does not provide an interface for netcdf version 4 files."""
homepage = "http://cirrus.ucsd.edu/~pierce/ncdf"
url = "https://cloud.r-project.org/src/contrib/ncdf4_1.15.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/ncdf4"
version('1.16.1', sha256='0dde2d6d1e8474f4abd15a61af8a2f7de564f13da00f1a01d7a479ab88587a20')
version('1.16', sha256='edd5731a805bbece3a8f6132c87c356deafc272351e1dd07256ca00574949253')
version('1.15', sha256='d58298f4317c6c80a041a70216126492fd09ba8ecde9da09d5145ae26f324d4d')
depends_on('netcdf-c@4.1:')
|
If you work /intend to work in the field of humanitarian action, you can apply to the NOHA+ Erasmus Mundus Joint Master’s Degree Program to pursue your master degree in international humanitarian action.
The main purpose of this program to education highly committed and interdisciplinary students who are working or intend to work at all level of humanitarian actions and contribute to enhance the humanitarian assistance & sustainable actions, develop global and comprehensive vision of conflict, disaster and related situations, and ability to enhance the responding to new humanitarian challenges.
Number of scholarships: 19 (4 for program countries, 14 for partner countries, 1 for participating countries listed below.
follow and apply according to the program procedure.
Check the completed list of participating countries HERE.
Applications will be reviewed completely by the secretariat of coordinating and partner universities.
|
# -*- coding: utf-8 -*-
import os, sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from spectrocrunch.materials.compoundfromformula import compoundfromformula
from spectrocrunch.materials.compoundfromname import compoundfromname
from spectrocrunch.materials.mixture import mixture
from spectrocrunch.materials.types import fraction
from spectrocrunch.simulation import calcnoise
from spectrocrunch.simulation import materials
from spectrocrunch.math import noisepropagation
import numpy as np
import scipy.optimize
import matplotlib.pyplot as plt
class sample(object):
@staticmethod
def getnframes(totaltime, frametime, fracflat):
n = int(round(totaltime / frametime))
nflat = max(int(round(fracflat * n / 2.0)), 1)
nflat *= 2 # before and after
ndata = max(n - nflat, 1)
return ndata, nflat
@staticmethod
def getrealtime(totaltime, frametime, fracflat):
ndata, nflat = self.getnframes(totaltime, frametime, fracflat)
n = ndata + nflat
overhead = 6.50305 + 0.0131498 * n
return frametime * n + overhead
def xanes(
self, I0, energy, totaltime=None, frametime=None, fracflat=None, ndark=None
):
ndata, nflat = self.getnframes(totaltime, frametime, fracflat)
energy = np.asarray(energy)
N, N0, D, D0 = calcnoise.id21_ffnoise(
I0,
energy,
self.composition,
tframe_data=frametime,
nframe_data=ndata,
tframe_flat=frametime,
nframe_flat=nflat,
nframe_dark=ndark,
)
T = calcnoise.transmission(
N,
N0,
D=D,
D0=D0,
tframe_data=frametime,
nframe_data=ndata,
tframe_flat=frametime,
nframe_flat=nflat,
nframe_dark=ndark,
)
XAS = calcnoise.absorbance(T)
signal = noisepropagation.E(XAS)
noise = noisepropagation.S(XAS)
return signal, noise
def costfunc(self, I0, energy, **kwargs):
signal, noise = self.xanes(I0, energy, **kwargs)
# return np.max(noise/signal*100)
return np.mean(noise) / (signal[-1] - signal[0])
def __str__(self):
return str(self.composition)
def plotxanesnoise(self, I0, energy, **kwargs):
signal, noise = self.xanes(I0, energy, **kwargs)
plt.plot(energy, noise / signal * 100)
plt.xlabel("Energy (keV)")
plt.ylabel("N/S (%)")
def plotxanes(self, I0, energy, **kwargs):
signal, _ = self.xanes(I0, energy, **kwargs)
plt.plot(energy, signal)
plt.xlabel("Energy (keV)")
plt.ylabel("Absorbance")
class sample_hg115(sample):
def __init__(self, wpigment=10, paintthickness=10):
binder = compoundfromname("linseed oil")
pigment = compoundfromname("verdigris")
paint = mixture(
[binder, pigment], [1 - wpigment / 100.0, wpigment / 100.0], fraction.mass
)
ultralene = compoundfromname("ultralene")
sfreetape = compoundfromname("sulfur-free tape")
# ultralene = compoundfromname("vacuum")
# sfreetape = compoundfromname("vacuum")
m = [ultralene, paint, sfreetape]
thickness = [4, paintthickness, 10]
# m = [compoundfromname("vacuum"),compoundfromname("vacuum"),compoundfromname("vacuum")]
self.composition = materials.factory(
"Multilayer",
material=m,
thickness=thickness,
anglein=0,
angleout=0,
azimuth=0,
)
self.paintindex = 1
def set_wpigment(self, wpigment):
w = self.composition.material[self.paintindex].massfractions()
w["verdigris"] = wpigment / 100.0
w["linseed oil"] = 1 - wpigment / 100.0
self.composition.material[self.paintindex].change_fractions(w, fraction.mass)
def get_wpigment(self):
return (
self.composition.material[self.paintindex].massfractions()["verdigris"]
* 100
)
def set_paintthickness(self, paintthickness):
self.composition.thickness[self.paintindex] = paintthickness
def get_paintthickness(self):
return self.composition.thickness[self.paintindex]
def optimize_thickness(self, I0, energy, **kwargs):
def costfunc(paintthickness):
self.set_paintthickness(paintthickness[0])
c = self.costfunc(I0, energy, **kwargs)
return c
guess = self.get_paintthickness()
result = scipy.optimize.least_squares(costfunc, guess, gtol=1e-015, ftol=1e-015)
print result.message
return result.x[0], result.success
def optimize_wpigment(self, I0, energy, **kwargs):
def costfunc(wpigment):
self.set_wpigment(wpigment[0])
c = self.costfunc(I0, energy, **kwargs)
return c
guess = self.get_wpigment()
result = scipy.optimize.least_squares(
costfunc, guess, bounds=([0, 100]), gtol=1e-015, ftol=1e-015
)
print result.message
return result.x[0], result.success
def optimize_thickness_plot(self, I0, energy, **kwargs):
thickness = self.get_paintthickness()
t = np.linspace(max(thickness - 100, 0), thickness + 100, 50)
r = np.zeros(len(t))
for i, paintthickness in enumerate(t):
self.set_paintthickness(paintthickness)
r[i] = self.costfunc(I0, energy, **kwargs)
self.set_paintthickness(thickness)
plt.plot(t, 1 / r, "-o", label="{} %".format(self.get_wpigment()))
plt.xlabel("thickness ($\mu$m)")
plt.ylabel("Jump-to-noise")
def optimize_wpigment_plot(self, I0, energy, **kwargs):
w = self.get_wpigment()
t = np.linspace(0, 20, 50)
r = np.zeros(len(t))
for i, wpigment in enumerate(t):
self.set_wpigment(wpigment)
r[i] = self.costfunc(I0, energy, **kwargs)
self.set_wpigment(w)
plt.plot(t, 1 / r, "-o", label="{} $\mu$m".format(self.get_paintthickness()))
plt.xlabel("Verdigris (%)")
plt.ylabel("Jump-to-noise")
def optimize(self, I0, energy, **kwargs):
def costfunc(p):
self.set_wpigment(p[0])
self.set_paintthickness(p[1])
return self.costfunc(I0, energy, **kwargs)
guess = (self.get_wpigment(), self.get_paintthickness())
result = scipy.optimize.least_squares(
costfunc, guess, bounds=([0, 0], [100, 1e6]), gtol=1e-015
)
print result.message
return result.x, result.success
def hg115_ff():
sample = sample_hg115()
I0 = 1e6
energy = np.linspace(8.9, 9.3, 100)
totaltime = 70
frametime = 0.07
fracflat = 1 / 3.0
ndark = 30
kwargs = {
"totaltime": totaltime,
"frametime": frametime,
"fracflat": fracflat,
"ndark": ndark,
}
opt = 1
energyopt = [8.97, 9]
if opt == 0:
sample.set_wpigment(10)
t, s = sample.optimize_thickness(I0, energyopt, **kwargs)
sample.set_paintthickness(t)
elif opt == 1:
sample.set_paintthickness(20)
w, s = sample.optimize_wpigment(I0, energyopt, **kwargs)
sample.set_wpigment(w)
else:
wt, s = sample.optimize(I0, energy, **kwargs)
sample.set_wpigment(wt[0])
sample.set_paintthickness(wt[1])
print "Thickness = {} μm".format(sample.get_paintthickness())
print "Verdigris = {} wt%".format(sample.get_wpigment())
print "Jump to noise = {}".format(1 / sample.costfunc(I0, energyopt, **kwargs))
print ""
plt.figure()
for thickness in [10, 15, 20]:
sample.set_paintthickness(thickness)
sample.optimize_wpigment_plot(I0, energy, **kwargs)
plt.legend(loc="best")
plt.show()
exit()
sample.optimize_thickness_plot(I0, energy, **kwargs)
sample.optimize_wpigment_plot(I0, energy, **kwargs)
plt.figure()
sample.plotxanes(I0, energy, **kwargs)
plt.figure()
sample.plotxanesnoise(I0, energy, **kwargs)
plt.show()
def hg115_xrd():
sample = sample_hg115()
energy = 8.5
sample.set_wpigment(100)
r = np.linspace(10, 20, 50)
n = [None] * len(r)
for i, t in enumerate(r):
sample.set_paintthickness(t)
n[i] = noisepropagation.E(
sample.composition.propagate(
noisepropagation.poisson(1e7),
energy,
interaction=materials.interactionType.elastic,
)
)
print n[-1] / n[0]
plt.plot(r, n)
plt.show()
if __name__ == "__main__":
hg115_ff()
# I0 = 1e5
# energy = np.linspace(3,5,100)
# tframe = 0.07
# nframe = 100
# ndark = 30
# tframe_data=tframe,nframe_data=nframe,\
# tframe_flat=tframe,nframe_flat=nframe,\
# nframe_dark=ndark
|
Using metrics and automation to accelerate the creative process has been my obsession for some time now. I’ve found a generalization between how one builds both games and gardens.
The hypothesis: tightly integrating agile strategies into the core of your project via Automation, Architecture and Analytics produces a hyper-agile software system: one can quickly identify problems and quickly shift both code and process, while also casting meaningful projections of future behaviors from current and historical data.
A cyclic, incremental evolution strategy that draws metrics from the current cycle to guide the next cycle, automation to accelerate each cycle, is discussed. This Measure / Change / Measure model provides better focus on tangible problems than the traditional Guess / Change / Hope model, more predictable progress and manages continual growth. Creativity is enhanced by providing a higher “what ifs per day” factor for each developer, and by having stable code bases and stable play test environments to experiment in.
The integration of several agile development strategies is proposed, ranging from test driven development, short, instrumented sprints, metrics driven development and Kaizen process improvement.
I haven’t finalized the format for this page yet, so here are a few of the blog postings that form the basis for this page.
And the funny/serious side combined: You can live without metrics in the same way that you can live without bottled water, in a land where Montezuma still seeks his revenge.
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from scipy import special
import tensorflow as tf
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import tensor_util
class AssertCloseTest(tf.test.TestCase):
def testAssertCloseIntegerDtype(self):
x = [1, 5, 10, 15, 20]
y = x
z = [2, 5, 10, 15, 20]
with self.test_session():
with tf.control_dependencies([distribution_util.assert_close(x, y)]):
tf.identity(x).eval()
with tf.control_dependencies([distribution_util.assert_close(y, x)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(x, z)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(y, z)]):
tf.identity(y).eval()
def testAssertCloseNonIntegerDtype(self):
x = np.array([1., 5, 10, 15, 20], dtype=np.float32)
y = x + 1e-8
z = [2., 5, 10, 15, 20]
with self.test_session():
with tf.control_dependencies([distribution_util.assert_close(x, y)]):
tf.identity(x).eval()
with tf.control_dependencies([distribution_util.assert_close(y, x)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(x, z)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(y, z)]):
tf.identity(y).eval()
def testAssertCloseEpsilon(self):
x = [0., 5, 10, 15, 20]
# x != y
y = [0.1, 5, 10, 15, 20]
# x = z
z = [1e-8, 5, 10, 15, 20]
with self.test_session():
with tf.control_dependencies([distribution_util.assert_close(x, z)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(x, y)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(y, z)]):
tf.identity(y).eval()
def testAssertIntegerForm(self):
# This should only be detected as an integer.
x = [1., 5, 10, 15, 20]
y = [1.1, 5, 10, 15, 20]
# First component isn't less than float32.eps = 1e-7
z = [1.0001, 5, 10, 15, 20]
# This shouldn"t be detected as an integer.
w = [1e-8, 5, 10, 15, 20]
with self.test_session():
with tf.control_dependencies([distribution_util.assert_integer_form(x)]):
tf.identity(x).eval()
with self.assertRaisesOpError("x has non-integer components"):
with tf.control_dependencies([
distribution_util.assert_integer_form(y)]):
tf.identity(y).eval()
with self.assertRaisesOpError("x has non-integer components"):
with tf.control_dependencies([
distribution_util.assert_integer_form(z)]):
tf.identity(z).eval()
with self.assertRaisesOpError("x has non-integer components"):
with tf.control_dependencies([
distribution_util.assert_integer_form(w)]):
tf.identity(w).eval()
class GetLogitsAndProbTest(tf.test.TestCase):
def testGetLogitsAndProbImproperArguments(self):
with self.test_session():
with self.assertRaises(ValueError):
distribution_util.get_logits_and_prob(logits=None, p=None)
with self.assertRaises(ValueError):
distribution_util.get_logits_and_prob(logits=[0.1], p=[0.1])
def testGetLogitsAndProbLogits(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
logits = special.logit(p)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_prob(
logits=logits, validate_args=True)
self.assertAllClose(p, new_p.eval())
self.assertAllClose(logits, new_logits.eval())
def testGetLogitsAndProbLogitsMultidimensional(self):
p = np.array([0.2, 0.3, 0.5], dtype=np.float32)
logits = np.log(p)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_prob(
logits=logits, multidimensional=True, validate_args=True)
self.assertAllClose(new_p.eval(), p)
self.assertAllClose(new_logits.eval(), logits)
def testGetLogitsAndProbProbability(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_prob(
p=p, validate_args=True)
self.assertAllClose(special.logit(p), new_logits.eval())
self.assertAllClose(p, new_p.eval())
def testGetLogitsAndProbProbabilityMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_prob(
p=p, multidimensional=True, validate_args=True)
self.assertAllClose(np.log(p), new_logits.eval())
self.assertAllClose(p, new_p.eval())
def testGetLogitsAndProbProbabilityValidateArgs(self):
p = [0.01, 0.2, 0.5, 0.7, .99]
# Component less than 0.
p2 = [-1, 0.2, 0.5, 0.3, .2]
# Component greater than 1.
p3 = [2, 0.2, 0.5, 0.3, .2]
with self.test_session():
_, prob = distribution_util.get_logits_and_prob(p=p, validate_args=True)
prob.eval()
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = distribution_util.get_logits_and_prob(
p=p2, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(p=p2, validate_args=False)
prob.eval()
with self.assertRaisesOpError("p has components greater than 1"):
_, prob = distribution_util.get_logits_and_prob(
p=p3, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(p=p3, validate_args=False)
prob.eval()
def testGetLogitsAndProbProbabilityValidateArgsMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component less than 0. Still sums to 1.
p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component greater than 1. Does not sum to 1.
p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)
# Does not sum to 1.
p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)
with self.test_session():
_, prob = distribution_util.get_logits_and_prob(
p=p, multidimensional=True)
prob.eval()
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = distribution_util.get_logits_and_prob(
p=p2, multidimensional=True, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(
p=p2, multidimensional=True, validate_args=False)
prob.eval()
with self.assertRaisesOpError(
"(p has components greater than 1|p does not sum to 1)"):
_, prob = distribution_util.get_logits_and_prob(
p=p3, multidimensional=True, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(
p=p3, multidimensional=True, validate_args=False)
prob.eval()
with self.assertRaisesOpError("p does not sum to 1"):
_, prob = distribution_util.get_logits_and_prob(
p=p4, multidimensional=True, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(
p=p4, multidimensional=True, validate_args=False)
prob.eval()
class LogCombinationsTest(tf.test.TestCase):
def testLogCombinationsBinomial(self):
n = [2, 5, 12, 15]
k = [1, 2, 4, 11]
log_combs = np.log(special.binom(n, k))
with self.test_session():
n = np.array(n, dtype=np.float32)
counts = [[1., 1], [2., 3], [4., 8], [11, 4]]
log_binom = distribution_util.log_combinations(n, counts)
self.assertEqual([4], log_binom.get_shape())
self.assertAllClose(log_combs, log_binom.eval())
def testLogCombinationsShape(self):
# Shape [2, 2]
n = [[2, 5], [12, 15]]
with self.test_session():
n = np.array(n, dtype=np.float32)
# Shape [2, 2, 4]
counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]
log_binom = distribution_util.log_combinations(n, counts)
self.assertEqual([2, 2], log_binom.get_shape())
class RotateTransposeTest(tf.test.TestCase):
def _np_rotate_transpose(self, x, shift):
if not isinstance(x, np.ndarray):
x = np.array(x)
return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))
def testRollStatic(self):
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "None values not supported."):
distribution_util.rotate_transpose(None, 1)
for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):
for shift in np.arange(-5, 5):
y = distribution_util.rotate_transpose(x, shift)
self.assertAllEqual(self._np_rotate_transpose(x, shift),
y.eval())
self.assertAllEqual(np.roll(x.shape, shift),
y.get_shape().as_list())
def testRollDynamic(self):
with self.test_session() as sess:
x = tf.placeholder(tf.float32)
shift = tf.placeholder(tf.int32)
for x_value in (np.ones(1, dtype=x.dtype.as_numpy_dtype()),
np.ones((2, 1), dtype=x.dtype.as_numpy_dtype()),
np.ones((3, 2, 1), dtype=x.dtype.as_numpy_dtype())):
for shift_value in np.arange(-5, 5):
self.assertAllEqual(
self._np_rotate_transpose(x_value, shift_value),
sess.run(distribution_util.rotate_transpose(x, shift),
feed_dict={x: x_value, shift: shift_value}))
class PickVectorTest(tf.test.TestCase):
def testCorrectlyPicksVector(self):
with self.test_session():
x = np.arange(10, 12)
y = np.arange(15, 18)
self.assertAllEqual(
x, distribution_util.pick_vector(
tf.less(0, 5), x, y).eval())
self.assertAllEqual(
y, distribution_util.pick_vector(
tf.less(5, 0), x, y).eval())
self.assertAllEqual(
x, distribution_util.pick_vector(
tf.constant(True), x, y)) # No eval.
self.assertAllEqual(
y, distribution_util.pick_vector(
tf.constant(False), x, y)) # No eval.
class FillLowerTriangularTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _fill_lower_triangular(self, x):
"""Numpy implementation of `fill_lower_triangular`."""
x = np.asarray(x)
d = x.shape[-1]
# d = n(n+1)/2 implies n is:
n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))
ids = np.tril_indices(n)
y = np.zeros(list(x.shape[:-1]) + [n, n], dtype=x.dtype)
y[..., ids[0], ids[1]] = x
return y
def testCorrectlyMakes1x1LowerTril(self):
with self.test_session():
x = tf.convert_to_tensor(self._rng.randn(3, 1))
expected = self._fill_lower_triangular(tensor_util.constant_value(x))
actual = distribution_util.fill_lower_triangular(x, validate_args=True)
self.assertAllEqual(expected.shape, actual.get_shape())
self.assertAllEqual(expected, actual.eval())
def testCorrectlyMakesNoBatchLowerTril(self):
with self.test_session():
x = tf.convert_to_tensor(self._rng.randn(10))
expected = self._fill_lower_triangular(tensor_util.constant_value(x))
actual = distribution_util.fill_lower_triangular(x, validate_args=True)
self.assertAllEqual(expected.shape, actual.get_shape())
self.assertAllEqual(expected, actual.eval())
g = tf.gradients(distribution_util.fill_lower_triangular(x), x)
self.assertAllEqual(np.tri(4).reshape(-1), g[0].values.eval())
def testCorrectlyMakesBatchLowerTril(self):
with self.test_session():
x = tf.convert_to_tensor(self._rng.randn(2, 2, 6))
expected = self._fill_lower_triangular(tensor_util.constant_value(x))
actual = distribution_util.fill_lower_triangular(x, validate_args=True)
self.assertAllEqual(expected.shape, actual.get_shape())
self.assertAllEqual(expected, actual.eval())
self.assertAllEqual(
np.ones((2, 2, 6)),
tf.gradients(distribution_util.fill_lower_triangular(
x), x)[0].eval())
class GenNewSeedTest(tf.test.TestCase):
def testOnlyNoneReturnsNone(self):
self.assertFalse(distribution_util.gen_new_seed(0, "salt") is None)
self.assertTrue(distribution_util.gen_new_seed(None, "salt") is None)
if __name__ == "__main__":
tf.test.main()
|
Experts say that one of the top 10 most stressful things to go through in modern life is “buying or selling a property.” That’s were I step in. As your realtor, I work purposefully and tirelessly to make your real estate experience a joyful one.
I am part of a team that has over 35 years experience in helping buyers find their dream home and investors find a profitable vacation cabin. In our office we work together to create solutions to overcome hurdles and produce win-win-win transactions.
We are your local experts, CALL US FIRST!
|
#!/usr/bin/python
import sys
import pymongo
def main(args):
# If your database server is running in auth mode, you will need user and
# database info. Ex:
# mongodb_uri = 'mongodb://username:password@localhost:27017/dbname'
#
mongodb_uri = 'mongodb://localhost:27017'
db_name = 'roadhouse'
try:
connection = pymongo.Connection(mongodb_uri)
database = connection[db_name]
except:
print('Error: Unable to connect to database.')
connection = None
# What follows is insert, update, and selection code that can vary widely
# depending on coding style.
#
if connection is not None:
# users
#database.users.insert({'email':
# To begin with, we'll add a few adventurers to the database. Note that
# nothing is required to create the adventurers collection--it is
# created automatically when we insert into it. These are simple JSON
# objects.
#
database.adventurers.insert({'name': 'Cooper',
'class': 'fighter',
'level': 5,
'equipment': {'main-hand': 'sword',
'off-hand': 'shield',
'armor': 'plate'}})
database.adventurers.insert({'name': 'Nishira',
'class': 'warlock',
'level': 10,
'equipment': {'main-hand': 'wand',
'off-hand': 'dagger',
'armor': 'cloth'}})
database.adventurers.insert({'name': 'Mordo',
'class': 'wizard',
'level': 11,
'equipment': {'off-hand': 'dagger',
'armor': 'leather'}})
# Because it seems we forgot to equip Mordo, we'll need to get him
# ready. Note the dot notation used to address the 'main-hand' key.
# Don't send a JSON object describing the 'main-hand' key in the
# context of the 'equipment' key, or MongoDB will overwrite the other
# keys stored under 'equipment'. Mordo would be embarassed without
# armor.
#
# Note that in python, MongoDB $ operators should be quoted.
#
database.adventurers.update({'name': 'Mordo' },
{'$set': {'equipment.main-hand': 'staff'}})
# Now that everyone's ready, we'll send them off through standard
# output. Unfortunately this adventure is is for adventurers level 10
# or higher. We pass a JSON object describing our query as the value
# of the key we'd like to evaluate.
#
party = database.adventurers.find({'level': {'$gte': 10}})
# Our query returns a Cursor, which can be counted and iterated
# normally.
#
if party.count() > 0:
print('The quest begins!')
for adventurer in party:
print('%s, level %s %s, departs wearing %s and wielding a %s and %s.'
% ( adventurer['name'], adventurer['level'],
adventurer['class'],
adventurer['equipment']['armor'],
adventurer['equipment']['main-hand'],
adventurer['equipment']['off-hand'] ))
print('Good luck, you %s brave souls!' % party.count())
else:
print('No one is high enough level!')
# Since this is an example, we'll clean up after ourselves.
database.drop_collection('adventurers')
if __name__ == '__main__':
main(sys.argv[1:])
|
Ok friends. I hope you are ready to enjoy some soap, because this is the best kind around!
I have a friend named Tim. He makes soap, collects weird instruments, and is moving to Africa in January. He needs support!
The best thing about Tim Soap is that it has the most minimal amount of ingredients, but produces the most hydrated and best feeling skin. My current favorite is the chocolate peppermint, or as I like to call it, the “Peppermint Patty.” This one simply contains goat milk, silk, cocoa powder and peppermint essential oil. That’s it! All of these things are highly recommended for nourishing and naturally cleaning the skin. Peppermint essential oil also does wonders with a tight, stressful chest and a tension headache. It’s only $5 a bar!
He has several different bars that are up for grabs. Here’s a list of them and there ingredients!
Baby Skin– $5: baby powder frangrance oil, goats milk, silk and sandalwood powder. This is a great one for exfoliation!
Man Soap– $5: liquified frankinsense resin, anise essential oil, sweet orange, cedar wood, ylang ylang and silk. There’s nothing feminine about this one!
Close Shave– $3: bentonite clay and juniper berry essential oil. This one gives a crazy lather, and is also a good bath soap!
He even has an unscented bar for $3 for those who simply want to clean themselves without the frill.
The packaging will look much better once the lovely AlliRay gets her hands on it, and we will be selling it at the Lake Desoto Farmers Market next weekend!
Sounds awesome right? Not only do you need to grab some of these up for yourself, but they also make fantastic stocking stuffers, leaving the entire stocking smelling so fresh and so clean.
Now I understand that soap is soap. We can buy it virtually anywhere, so why would anyone take the time to order this stuff? Well, let me tell you about Tim, and maybe that will sway your vote.
Tim goes to my church, Church on the Way. If you want to hear complaints, bickering, gossip, negativity, and all of the other crap that we tend to consume ourselves with on a daily basis, then DO NOT go to Tim. Most conversations with Tim consist of instruments, soap, coffee, music festivals, tattoos and the neat adventures he’s been on. Over the summer, he lost his house to the terrible flooding from the storms up here in North Florida, and at one point was riding his bike something like 40 miles a day to get to work. When he needs a place to rest his head, he has one, even if it’s not the most convenient. In the couple of years that I’ve had the pleasure of being in a church community with Tim, I’ve never once heard the man complain. If you want to meet a man who actually looks like a Christ follower, then this is your guy. He’s moving to Africa to join Brie Demott, our other international missionary, to love on orphans and serve Nakuru, Kenya however he can. If you want to know more about Tim’s life and his calling, then clear here and contact him!
So are you ready to buy some soap? If you are interested in supporting Tim and also having the best skin, then email me at meganontheway@gmail.com and we will get your soap ordered right away!
|
from typing import Dict
from yaak import inject
import paginate
from Src.BioAnalyzer.CrossCutting.DTOs.GenePrioritization.DataIntegrationDto import DataIntegrationDto
from Src.BioAnalyzer.CrossCutting.Filters.GenePrioritization.FeSingleDataIntegration import FeSingleDataIntegration
from Src.BioAnalyzer.DataAccess.Entities.GenePrioritization.DataIntegration import DataIntegration
from Src.Core.Manager.ManagerBase import ManagerBase
class DataIntegratorManager(ManagerBase):
"""description of class"""
@inject.Param(repository='DataIntegrationRepositoryBase')
def __init__(self, repository):
"""
:param repository:
"""
super().__init__(repository)
self.__page_size = 10
def add_one(self, data_integration_dto: DataIntegrationDto):
fe_data_integration = self._repository.get_one(FeSingleDataIntegration(data_type=data_integration_dto.data_type,
conditional=data_integration_dto.conditional),
DataIntegration,
{'data_type': 1})
if fe_data_integration.result:
self._repository.delete_one(fe_data_integration)
adj_list = data_integration_dto.adjacency_list
page = paginate.Page(adj_list, page=0, items_per_page=self.__page_size)
while True:
if not page.next_page:
break
data_integration_dto.adjacency_list = page.items
self._repository.add_one(data_integration_dto)
page = paginate.Page(adj_list, page=page.next_page, items_per_page=self.__page_size)
def get_one(self, fe_data_integration: FeSingleDataIntegration,
include_or_exclude_fields: Dict[str, int] = None) -> FeSingleDataIntegration:
"""
:param fe_data_integration:
:param include_or_exclude_fields:
:return:
"""
return self._repository.get_one(fe_data_integration,
DataIntegration,
include_or_exclude_fields)
|
Lisa Raterman has been a valued member of our team since March 2017. As an Assistant Account Manager with a versatile skillset in marketing and public relations, she balances strategy and creativity for clients including Harris County Flood Control District, Houston Botanic Garden, Houston Ballet Nutcracker Market, Kidney Cancer Association and Urban Land Institute Houston.
Lisa is a careful listener and dedicated planner who is able to anticipate clients’ needs and assess the big picture. She is adept at telling an organization’s story, and enjoys working with clients to introduce new projects to media and the public. Recently, she helped announce the creation and mission of the Jung Center’s Mind, Body, Spirit Institute, and the completion of Valencia Group’s boutique hotel, The George in College Station.
Lisa’s eagle-eyed attention to detail are an asset to our clients. She is able to assimilate complex issues and promote them clearly to stakeholders. Prior to joining the Elmore PR team, Lisa was a part-time facilitator for the St. Louis Zoo coordinating private and corporate special events, a promotions coordinator for INTRAV/Clipper Cruise Line, and a communications specialist for PR and marketing firm Synergy Group.
Lisa is a graduate of Webster University in St. Louis. A native Missourian, Lisa has lived in Houston for nearly four years, following two years in Chicago and a year living in Costa Rica. She has learned a lot about the city through the incredible work of her clients and enjoys getting out in the natural beauty of Texas by exploring the state parks with her family and three dogs.
|
# wget http://stuff.mit.edu/afs/sipb/contrib/pi/pi-billion.txt
# THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python example009.py
from __future__ import division
import numpy as np
import theano
import theano.tensor as T
import lasagne as L
import argparse
import time
from six.moves import cPickle
np.set_printoptions(threshold='nan')
np.set_printoptions(linewidth=200)
np.set_printoptions(formatter={'float': '{:12.8f}'.format, 'int': '{:4d}'.format})
print 'numpy ' + np.__version__
print 'theano ' + theano.__version__
print 'lasagne ' + L.__version__
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--ndigits', help='number of digits, default 1000000', default=1000000, type=int)
parser.add_argument('--window', help='window size, default=100', default=100, type=int)
parser.add_argument('--lr', help='learning rate, default 0.001', default=0.001, type=float)
parser.add_argument('--nepoch', help='number of epochs, default=100', default=100, type=int)
parser.add_argument('--nbatch', help='number of batches per eopch, default=100', default=100, type=int)
parser.add_argument('--batchsize', help='batch size, default 1000', default=1000, type=int)
parser.add_argument('--test', help='test fraction, default 0.2', default=0.2, type=float)
parser.add_argument('--model', help='output model filename')
args = parser.parse_args()
print args
# load data
with open('pi-billion.txt') as f:
s = f.read()
f.close()
pi = np.empty([args.ndigits],dtype='float32')
i=0
for c in s:
if c.isdigit():
pi[i] = float(c)
i+=1
if i==args.ndigits:
break
print 'pi.shape',pi.shape
input_var = T.matrix(dtype=theano.config.floatX)
target_var = T.vector(dtype='int32')
network = L.layers.InputLayer((None, args.window), input_var)
print 'input', L.layers.get_output_shape(network)
network = L.layers.ReshapeLayer(network, ((-1, 1, args.window)))
print 'reshape', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
conv = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(conv)
gap = L.layers.Pool1DLayer(conv, pool_size=L.layers.get_output_shape(conv)[2], stride=None, pad=0, mode='average_inc_pad')
print 'gap', L.layers.get_output_shape(gap)
network = L.layers.DenseLayer(gap, 2, nonlinearity=L.nonlinearities.softmax)
print 'output', L.layers.get_output_shape(network)
#input_var = T.matrix(dtype=theano.config.floatX)
#target_var = T.vector(dtype='int32')
#network = L.layers.InputLayer((None, args.window), input_var)
#network = L.layers.DenseLayer(network, 10000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 100)
#network = L.layers.DenseLayer(network, 2, nonlinearity=L.nonlinearities.softmax)
prediction = L.layers.get_output(network)
loss = L.objectives.aggregate(L.objectives.categorical_crossentropy(prediction, target_var), mode='mean')
params = L.layers.get_all_params(network, trainable=True)
updates = L.updates.adam(loss, params, learning_rate=args.lr)
scaled_grads,norm = L.updates.total_norm_constraint(T.grad(loss,params), np.inf, return_norm=True)
train_fn = theano.function([input_var, target_var], [loss,norm], updates=updates)
test_fn = theano.function([input_var], L.layers.get_output(network, deterministic=True))
d = np.empty([args.batchsize,args.window],dtype='float32')
l = np.empty([args.batchsize],dtype='int32')
t0 = time.time()
t = time.time()
for i in range(args.nepoch):
tloss=0
tnorm=0
#train
for j in range(args.nbatch):
for k in range(args.batchsize):
#w = np.random.randint(int(pi.shape[0]*args.test),pi.shape[0]-args.window)
w = np.random.randint(0,int(pi.shape[0]*(1-args.test))-args.window)
d[k] = pi[w:w+args.window]
if np.random.randint(0,2)==0:
l[k]=0
else:
np.random.shuffle(d[k])
l[k]=1
bloss,bnorm = train_fn(d,l)
tloss += bloss
tnorm += bnorm
#test
for k in range(args.batchsize):
#w = np.random.randint(0,int(pi.shape[0]*args.test-args.window))
w = np.random.randint(int(pi.shape[0]*(1-args.test)),pi.shape[0]-args.window)
d[k] = pi[w:w+args.window]
if np.random.randint(0,2)==0:
l[k]=0
else:
np.random.shuffle(d[k])
l[k]=1
val_output = test_fn(d)
val_predictions = np.argmax(val_output, axis=1)
tacc = np.mean(val_predictions == l)
print 'epoch {:8d} loss {:12.8f} grad {:12.8f} accuracy {:12.8f} n_zero {:6d} n_one {:6d} t_epoch {:4d} t_total {:8d}'.format(i, tloss/args.nbatch, tnorm/args.nbatch, tacc, np.sum(val_predictions==0), np.sum(val_predictions==1), int(time.time()-t), int(time.time()-t0))
t = time.time()
f = open(args.model, 'wb')
cPickle.dump(L.layers.get_all_param_values(network), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
|
Our own reproduction of the CJ2A, CJ3A and CJ3B front bumper that is bolted directly to the frame from late 1948 and up with out the license plate slots in it. This bumper is made out of heavy gauge steel with all proper holes drilled and tapered ends. Made in the USA.
Note: On CJ2A models this bumper only fits the style frame that is used with no bumper gussets or after serial number 215649.
|
import os
from flask import Blueprint
from flask.ext.mako import render_template
homework = Blueprint('homework', __name__, template_folder='templates')
lectures = Blueprint('lectures', __name__, template_folder='templates')
quizzes = Blueprint('quizzes', __name__, template_folder='templates')
@homework.route('/', defaults={'page': 'index'})
@homework.route('/<page>')
def display_homework(page):
if page == 'index':
hws = os.listdir(os.path.join(os.path.split(__file__)[0],
'static', 'hw'))
hws.extend(os.listdir(os.path.join(os.path.split(__file__)[0],
'templates', 'hw')))
hws = [hw for hw in sorted(hws) if not hw == "index.mak"]
else:
hws = None
return render_template('hw/{}.mak'.format(page), name='mako', hws=hws)
@lectures.route('/', defaults={'page': 'index'})
@lectures.route('/<page>')
def display_lecture(page):
if page == 'index':
lecture_notes = os.listdir(os.path.join(os.path.split(__file__)[0],
'templates', 'lectures'))
lecture_notes = [note for note in sorted(lecture_notes)
if not note == "index.mak"]
else:
lecture_notes = None
return render_template('lectures/{}.mak'.format(page), name='mako',
lectures=lecture_notes)
@quizzes.route('/<quiz_num>')
def show_quiz(quiz_num):
return render_template('quiz/{}.mak'.format(quiz_num), name='mako')
|
Get a complete, expert manicure with one, easy-to-use tool. 2 filing surfaces shape and smooth nail edges. 4 buffing surfaces create a satiny smooth surface and glossy, brilliant shine.
File & buffer in one. Unique patented all-in-one file and buffer for perfectly manicured looking nails. Professional length for superior control. Precision crafted, fully inspected to assure exceptional performance.
• Start with dry nails to help prevent nails from peeling.
• File nails at a 45° angle against underside of nail edge to help maintain a strong nail tip.
• Always start at the side of the nail and file toward the middle. Do not saw back and forth.
• For a squared edge, file tip of nail straight across with the Black side, then round corners slightly with White side to prevent snagging.
For a natural-looking curve, file nail to match the curve of your fingerip. Filing alternately from one side, then the other, will ensure an even shape.
Before buffing, remove any nail enamel with Revlon Nail Enamel Remover.
• Even Out. Stroke buffer back and forth across nail surface until stains and ridges are removed.
• Smooth. buff with the same back and forth motion to produce a satin smooth surface.
• Buff. Continue to buff nail surface with back and forth motion to prepare nails for final shine.
• Shine. Finish by briskly buffing back and forth to create a natural, brillant shine.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from os import listdir
from models.Documento import *
from os.path import relpath,join
class CorpusRepository:
path = ''
documentos = []
def __init__(self,path):
self.path = path
def getListDocuments(self):
self.documentos = []
for documentName in listdir(relpath(self.path)):
if (documentName[0] != u'.'): # Protección para no leer archivos de sistema MAC ej: .DS_store
self.documentos.append(self.getDocument(documentName))
return self.documentos
def getFullStringFromDocument(self,documentName):
filePath = join(self.path,documentName)
with codecs.open(filePath, mode='rt', encoding='utf-8') as fp:
return fp.read()
return None
def getDocument(self,documentName):
filePath = join(self.path,documentName)
with codecs.open(filePath, mode='rt', encoding='utf-8') as fp:
return Documento(documentName,fp.read())
return None
|
A blacksmith is a traditional trade.
Vocational education (or Vocational Education and Training (VET), also called Career and Technical Education (CTE)) prepares learners for careers that are based in manual or practical activities, traditionally non-academic and directly related to a specific trade, occupation or vocation, hence the term, in which the learner participates. It is sometimes referred to as technical education, as the learner directly develops expertise in a particular group of techniques or technology.
Generally, vocation and career are used interchangeably. Vocational education might be contrasted with education in a usually broader scientific field, which might concentrate on theory and abstract conceptual knowledge, characteristic of tertiary education. Vocational education can be at the secondary or post-secondary level and can interact with the apprenticeship system. Increasingly, vocational education can be recognised in terms of recognition of prior learning and partial academic credit towards tertiary education (e.., at a university) as credit however, it is rarely considered in its own form to fall under the traditional definition of a higher education.
Up until the end of the twentieth century, vocational education focused on specific trades such as for example, an automobile mechanic or welder, and was therefore associated with the activities of lower social classes. As a consequence, it attracted a level of stigma. Vocational education is related to the age-old apprenticeship system of learning.
However, as the labor market becomes more specialized and economies demand higher levels of skill, governments and businesses are increasingly investing in the future of vocational education through publicly funded training organizations and subsidized apprenticeship or traineeship initiatives for businesses. At the post-secondary level vocational education is typically provided by an institute of technology, or by a local community college.
Vocational education has diversified over the 20th century and now exists in industries such as retail, tourism, information technology, funeral services and cosmetics, as well as in the traditional crafts and cottage industries.
In Australia vocational education and training is post-secondary and provided through the Vocational Education and Training (VET) system and by Registered Training Organisations. This system encompasses both Government and private providers in a nationally recognised quality system based on agreed and consistent assessment standards.
The largest and the most unified system of vocational education was created in the Soviet Union with the Professional`no-tehnicheskoye uchilische and, Tehnikum. But it became less effective with the transition of the economies of post-Soviet countries to a market economy.
There are two kinds of vocational education, secondary and post-secondary. Secondary education at a vocational school (ammattikoulu) is usually taken immediately after primary school, at ages of 16-21. Some programmes, however, require a secondary academic degree (ylioppilastutkinto, or matriculation examination). The education is primarily vocational, and little academic general education is given.
With academic or vocational secondary education one can enter higher vocational schools (ammattikorkeakoulu, or AMK). AMK degrees take 3,5-4,5 years. Legally, they are not university degrees in Finland, although in foreign countries similar degrees may be called "university level". This is reflected by some Finnish schools giving English titles such as Bachelor of Science, with no Finnish translation.
Vocational education is an important part of the education systems in Austria, Germany, Liechtenstein and Switzerland (including the French speaking part of the country).
For example, in Germany a law (the Berufsausbildungsgesetz) was passed in 1969 which regulated and unified the vocational training system and codified the shared responsibility of the state, the unions, associations and chambers of trade and industry. The system is very popular in modern Germany: in 2001, two thirds of young people aged under 22 began an apprenticeship, and 78% of them completed it, meaning that approximately 51% of all young people under 22 have completed an apprenticeship. One in three companies offered apprenticeships in 2003; in 2004 the government signed a pledge with industrial unions that all companies except very small ones must take on apprentices.
The vocational education systems in the other German speaking countries are very similar to the German system and a vocational qualification from one country is generally also recognized in the other states within this area.
Additionally there is the Fachhochschule since the 1970's in West Germany and since the 1990's in Austria, former East Germany, Liechtenstein and in Switzerland. This type of institution offers degrees (Diplom(FH), Bachelor's and Master's degrees), which are one of the worldwide rare examples of a higher education that is considered in its own form to fall also under the (local) definition of a vocational education.
New Zealand is served by 41 Industry Training Organsiations. The unique element is that ITOs purchase training as well as set standards and aggregate industry opinion about skills in the labour market. Industry Training, as organised by ITOs, has expanded from apprenticeships to a more true life long learning situation with, for example, over 10% of trainees aged 50 or over. Moreover much of the training is generic. This challenges the prevailing idea of vocational education and the standard layperson view that it focuses on apprenticeships.
In the United States, the approach is varied from state to state. Most of the technical and vocational courses are offered by Community Colleges, though several states have their own institutes of technology which are on an equal accreditational footing with other state universities.
Historically, junior high schools and high schools have offered vocational courses such as home economics, wood and metal shop, typing, business courses, drafting and auto repair, though schools have put more emphasis on academics for all students because of standards based education reform. School to work is a series of federal and state initiatives to link academics to work, sometimes including spending time during the day on a job site without pay.
Federal involvement is principally carried out through the Carl D. Perkins Career and Technical Education Act. Accountability requirements tied to the receipt of federal funds under this Act help provide some overall leadership. The Office of Vocational and Adult Education within the US Department of Education also supervises activities funded by the Act.
The Association for Career and Technical Education (ACTE) is the largest private association dedicated to the advancement of education that prepares youth and adults for careers. Its members include CTE teachers, administrators, and researchers.
Buzzell, C.H. "Let Our Image Reflect Our Pride." VOCATIONAL EDUCATION JOURNAL 62, no. 8 (November-December 1987): 10.
O'Connor, P.J., and Trussell, S.T. "The Marketing of Vocational Education." VOCATIONAL EDUCATION JOURNAL 62, no. 8 (November-December 1987): 31-32.
Ries, E. "To 'V' or Not to 'V': for Many the Word 'Vocational' Doesn't Work." TECHNIQUES 72, no. 8 (November-December 1997): 32-36.
Tuttle, F.T. "Let's Get Serious about Image-Building." VOCATIONAL EDUCATION JOURNAL 62, no. 8 (November-December 1987): 11.
IEK: Vocational education schools in Greece.
Automotive Vocational Schools, a website devoted to vocational schools in the automotive field.
Jschool: Journalism Education & Training, an example of a vocational college in journalism education.
Medical Vocational Schools, a website devoted to vocational schools in the medical field.
Agricultural Vocational Schools, website for the TeachAManToFish network of agricultural vocational schools.
|
#!/usr/local/bin/python3
"""
Author: Jon O'Brien
Due Date: 11/9/13
Assignment: linked list homework
File: myListRec.py
Author: Sean Strout <sps@cs.rit.edu>
Language: Python 3
Description: An iterative implementation of a node based single linked list
data structure.
Purpose: LECTURE VERSION
"""
from myNode import *
###########################################################
# LINKED LIST CLASS DEFINITION
###########################################################
class MyList():
"""A class that encapsulates a node based linked list"""
__slots__ = ('head', 'size', 'cursor')
###########################################################
# LINKED LIST CLASS BUILDER
###########################################################
def mkMyList():
"""
Constructs and returns an empty list.
Parameters:
None
Returns:
An empty list
"""
lst = MyList()
lst.head = mkEmptyNode()
lst.size = 0
lst.cursor = mkEmptyNode()
return lst
###########################################################
# LINKED LIST CURSOR FUNCTIONS
###########################################################
def reset(lst):
"""
Resets the cursor to the start of the list
Paramters:
lst (MyList) - the linked list
Returns:
None
"""
lst.cursor = lst.head
def hasNext(lst):
"""
Returns True if the list has more elements.
Paramters:
lst (MyList) - the linked list
Returns:
True (bool) if the cursor is value
"""
return not isinstance(lst.cursor, EmptyNode)
def next(lst):
"""
Returns the next element in the iteration.
Paramters:
lst (MyList) - the linked list
Preconditions:
If cursor is invalid, raises an IndexError exception
Returns:
The value (any type) referenced by the cursor
"""
if isinstance(lst.cursor, EmptyNode):
raise IndexError("cursor is invalid")
val = lst.cursor.data
lst.cursor = lst.cursor.next
return val
###########################################################
# LINKED LIST FUNCTIONS
###########################################################
def clear(lst):
"""
Make a list empty.
Parameters:
lst (MyList) - the linked list
Returns:
None
"""
lst.head = mkEmptyNode()
lst.size = 0
lst.cursor = mkEmptyNode()
def toString(lst):
"""
Converts our linked list into a string form that is similar to Python's
printed list.
Parameters:
lst (MyList) - The linked list
Returns:
A string representation of the list (e.g. '[1,2,3]')
"""
result = '['
curr = lst.head
while not isinstance(curr, EmptyNode):
if isinstance(curr.next, EmptyNode):
result += str(curr.data)
else:
result += str(curr.data) + ', '
curr = curr.next
result += ']'
return result
def append(lst, value):
"""
Add a node containing the value to the end of the list.
Parameters:
lst (MyList) - The linked list
value (any type) - The data to append to the end of the list
Returns:
None
"""
if isinstance(lst.head, EmptyNode):
lst.head = mkNode(value, EmptyNode())
else:
curr = lst.head
while not isinstance(curr.next, EmptyNode):
curr = curr.next
curr.next = mkNode(value, EmptyNode())
lst.size += 1
def insertAt(lst, index, value):
"""
Insert a new element before the index.
Parameters:
lst (MyList) - The list to insert value into
index (int) - The 0 based index to insert before
value (any type) - The data to be inserted into the list
Preconditions:
0 <= index <= lst.size, raises IndexError exception
Returns:
None
"""
if index < 0 or index > lst.size:
raise IndexError(str(index) + ' is out of range.')
if index == 0:
lst.head = mkNode(value, lst.head)
else:
prev = lst.head
while index > 1:
prev = prev.next
index -= 1
prev.next = mkNode(value, prev.next)
lst.size += 1
def get(lst, index):
"""
Returns the element that is at index in the list.
Parameters:
lst (MyList) - The list to insert value into
index (int) - The 0 based index to get
Preconditions:
0 <= index <= lst.size, raises IndexError exception
Returns:
None
"""
if index < 0 or index >= lst.size:
raise IndexError(str(index) + ' is out of range.')
curr = lst.head
while index > 0:
curr = curr.next
index -= 1
return curr.data
def set(lst, index, value):
"""
Sets the element that is at index in the list to the value.
Parameters:
lst (MyList) - The list to insert value into
index (int) - The 0 based index to set
value (any type)
Preconditions:
0 <= index <= lst.size, raises IndexError exception
Returns:
None
"""
if index < 0 or index >= lst.size:
raise IndexError(str(index) + ' is out of range.')
curr = lst.head
while index > 0:
curr = curr.next
index -= 1
curr.data = value
def pop(lst, index):
"""
Remove and return the element at index.
Parameters:
lst (MyList) - The list to insert value into
index (int) - The 0 based index to remove
Preconditions:
0 <= index <= lst.size, raises IndexError exception
Returns:
The value (any type) being popped
"""
if index < 0 or index >= lst.size:
raise IndexError(str(index) + ' is out of range.')
lst.cursor = mkEmptyNode()
if index == 0:
value = lst.head.data
lst.head = lst.head.next
else:
prev = lst.head
while index > 1:
prev = prev.next
index -= 1
value = prev.next.data
prev.next = prev.next.next
lst.size -=1
return value
def index(lst, value):
"""
Returns the index of the first occurrence of a value in the list
Parameters:
lst (MyList) - The list to insert value into
value (any type) - The data being searched for
Preconditions:
value exists in list, otherwise raises ValueError exception
Returns:
The index (int) of value or None if value is not present in the list
"""
pos = 0
curr = lst.head
while not isinstance(curr, EmptyNode):
if curr.data == value:
return pos
pos += 1
curr = curr.next
raise ValueError(str(value) + " is not present in the list")
def count(lst, value):
"""
This function takes the paramters for the list and the value being counted.
Then the count is accumulated in a while loop that checks for the current
node not being an EmptyNode. The node being checked is iterated over each
consecutive node until this while loop is broken out of. The count is
returned at the end of the function.
"""
c=0
curr=lst.head
while not isinstance (curr,EmptyNode):
if curr.data == value:
c+=1
curr=curr.next
return c
def myListToPyList(lst):
"""
This function takes the list parameter. Then this function converts the
node list into a python list. This is done by assigning pylist to a
built-in list. A while not loop is checked if the current node is not an
EmptyNode, in the loop the curent nodes' data is appended to the python
list and the current node is iterated over until the emptyNode is reached.
Then the finished and constructed python list is returned.
"""
pylst=list()
curr=lst.head
while not isinstance(curr,EmptyNode):
pylst.append(curr.data)
curr=curr.next
return pylst
def pyListToMyList(pylst):
"""
This function takes the pylist as a parameter. It converts the pylist
into a node list called MyList. The node list is initiated and a for
loop is used to take every value in the pylist and append it to a node
in the node list objects. The node based list is returned at the
completion of the function to convert the list into a MyList.
"""
MyList=mkMyList()
for val in pylst:
append(MyList, val)
return MyList
def remove(lst,value):
"""
Remove takes parameters of list and value. It searches through the nodes
and removes the selected value from the list. This is done with a while
not loop that checks if the current node is not an emptyNode. In this
loop, the data of the node is tested if equal to the value and the head
is reassigned to the next node until the value is found, that is,
iterated over each node, and returned true. Otherwise the node with
the data that equals the value is reassigned until the value is found
and the size is decremented by one until the value is located. When
the while not loop is broken out of, False is returned.
"""
curr=lst.head
lst.cursor=mkEmptyNode()
while not isinstance(curr,EmptyNode):
if lst.head.data==value:
lst.head=lst.head.next
return True
elif curr.next.data==value: #starting to skip object
curr.next=curr.next.next #reassigns to not skipped object
lst.size-=1
return True
curr=curr.next
return False
|
Vinnie Vincent autographed 'All Systems Go' Invasion record album. The record is in good shape, the cover shows some wear and the signature is perfect. All autographs are 100% guaranteed and come with a certificate of authenticity from www.RKSportsPromotions.com.
Vinnie Vincent autographed Invasion record album. The record is in good shape, the cover shows some wear and the signature is perfect. All autographs are 100% guaranteed and come with a certificate of authenticity from www.RKSportsPromotions.com.
Meat Loaf autographed 'Bad Attitude' record. The record is in great shape, the cover has some wear and the signature is perfect. All autographs are 100% guaranteed and come with a certificate of authenticity from www.RKSportsPromotions.com.
Denise Nickerson (Violet from Willy Wonka & The Chocolate Factory) autographed RARE PROMO record. The record is in great shape, the cover has some wear and the signature is perfect. All autographs are 100% guaranteed and come with a certificate of authenticity from www.RKSportsPromotions.com.
Andrew Dice Clay autographed 'No Apologies' Laserdisc. The laser disc is in perfect shape, the cover has minor wear and the signature is perfect. All autographs are 100% guaranteed and come with a certificate of authenticity from www.RKSportsPromotions.com.
Anthony Michael Hall autographed 'Vacation' record album. The record is in good shape, the cover shows wear and the signature is perfect. All autographs are 100% guaranteed and come with a certificate of authenticity from www.RKSportsPromotions.com.
Cyndi Lauper autographed 'She Bop' record album. The record is in good shape, the cover shows some wear and the signature is perfect. All autographs are 100% guaranteed and come with a certificate of authenticity from www.RKSportsPromotions.com.
Martha Davis of 'The Motels' autographed 'All Four One' record album. The record is in great shape, the cover shows some wear with a punch hole thru and the signature is perfect. All autographs are 100% guaranteed and come with a certificate of authenticity from www.RKSportsPromotions.com.
Marsha Warfield autographed 'Mask' record album. The record is in great shape, the cover shows some wear and the signature is perfect. Marsha also played 'Roz' in the hit TV sitcom 'Night Court'. All autographs are 100% guaranteed and come with a certificate of authenticity from www.RKSportsPromotions.com.
Marsha Warfield autographed 'D.C. Cab' record album. The record is in great shape, the cover shows some wear and the signature is perfect. Marsha also played 'Roz' in the hit TV sitcom 'Night Court'.All autographs are 100% guaranteed and com with a certificate of authenticity from www.RKSportsPromotions.com.
|
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon import tables
from horizon import views
from cloudkittydashboard.api import cloudkitty as api
from cloudkittydashboard.dashboards.admin.pyscripts import forms \
as pyscripts_forms
from cloudkittydashboard.dashboards.admin.pyscripts import tables \
as pyscripts_tables
class IndexView(tables.DataTableView):
table_class = pyscripts_tables.PyScriptsTable
template_name = 'admin/pyscripts/pyscripts_list.html'
def get_data(self):
data = api.cloudkittyclient(self.request).pyscripts.scripts.list()
data = api.identify(data, name=False)
return data
class ScriptCreateView(forms.ModalFormView):
form_class = pyscripts_forms.CreateScriptForm
form_id = "create_script"
modal_header = _("Create Script")
page_title = _("Create Script")
submit_url = reverse_lazy('horizon:admin:pyscripts:script_create')
success_url = reverse_lazy('horizon:admin:pyscripts:index')
template_name = 'admin/pyscripts/form.html'
def get_object_id(self, obj):
return obj
class ScriptUpdateView(forms.ModalFormView):
form_class = pyscripts_forms.EditScriptForm
form_id = "update_script"
modal_header = _("Update Script")
page_title = _("Update Script")
submit_url = 'horizon:admin:pyscripts:script_update'
success_url = 'horizon:admin:pyscripts:script_update'
template_name = 'admin/pyscripts/form.html'
def get_initial(self):
script = api.cloudkittyclient(self.request).pyscripts.scripts.get(
script_id=self.kwargs['script_id'])
self.initial = script.to_dict()
self.initial['script_data'] = self.initial['data']
return self.initial
def get_context_data(self, **kwargs):
context = super(ScriptUpdateView, self).get_context_data(**kwargs)
context['script_id'] = self.kwargs.get('script_id')
context['submit_url'] = reverse_lazy(self.submit_url,
args=(context['script_id'], ))
return context
def get_success_url(self, **kwargs):
return reverse('horizon:admin:pyscripts:index')
class ScriptDetailsView(views.APIView):
template_name = 'admin/pyscripts/details.html'
page_title = _("Script Details : {{ script.name }}")
def get_data(self, request, context, *args, **kwargs):
script_id = kwargs.get("script_id")
try:
script = api.cloudkittyclient(self.request).pyscripts.scripts.get(
script_id=script_id)
except Exception:
script = None
context['script'] = script
return context
|
Hari Mari hits these fip flops out of the ballpark by marrying renowned baseball glove maker Nokona's rich, full-grain American leather with Hari Mari's comfort and quality-driven flip flops. Just like a seasoned leather glove, these flip flops only get better with age. The memory foam toe post gently grips toes and eliminates break-in, while the jersey-lined stitch-and-turn straps keep feet cool and comfy. Memory foam-filled straps provide softer-than-soft comfort; a firm arch delivers added foot support and all-day wear. Soft-squeeze midsole for fleet-of-foot comfort. Non-marking, boat-safe outsole made with carbon-free rubber. In brown. Imported.
|
#!/usr/bin/env python
import os, re, sys
CONFIG_H = """\
#ifndef CONFIG_H_
#define CONFIG_H_
/* Detected headers */
${{includes}}
/* Functions */
${{functions}}
/* Sizes */
${{sizes}}
/* Definitions */
${{definitions}}
#endif /* CONFIG_H_ */
"""
PATH_SRC = sys.argv[1]
PATH_BIN = sys.argv[2]
FILENAME_CMK = os.path.join (PATH_SRC, 'CMakeLists.txt')
FILENAME_NEW = os.path.join (PATH_BIN, 'config.h.in')
# Parse CMakeLists.txt
with open(FILENAME_CMK, 'r') as f:
cont = f.read()
includes_t = ''
for h in re.findall (r'CHULA_CHECK_INCLUDE *\(.+? *(\w+)\)', cont, re.IGNORECASE):
includes_t += '#cmakedefine %s\n' %(h)
functions_t = ''
for f in re.findall (r'CHECK_FUNCTION_EXISTS *\(.+? *(\w+)\)', cont, re.IGNORECASE):
functions_t += '#cmakedefine %s\n' %(f)
for f in re.findall (r'CHECK_C_SOURCE_COMPILES *\(.+?(HAVE_.+?)\)\n', cont, re.S):
functions_t += '#cmakedefine %s\n' %(f)
for f in re.findall (r'CHECK_C_SOURCE_RUNS *\(.+?(HAVE_.+?)\)\n', cont, re.S):
functions_t += '#cmakedefine %s\n' %(f)
definitions_t = ''
for f in re.findall (r'DEF_SET *\((\w+)? +(.+?)\)', cont, re.IGNORECASE):
definitions_t += '#cmakedefine %s %s\n' %(f[0], f[1])
for f in re.findall (r'DEF_SET_IFNDEF *\((\w+)? +(.+?)\)', cont, re.IGNORECASE):
definitions_t += '#ifndef %s\n' %(f[0])
definitions_t += '#cmakedefine %s %s\n' %(f[0], f[1])
definitions_t += '#endif\n'
for f in re.findall (r'DEF_DEFINE *\((\w+)?\)', cont, re.IGNORECASE):
definitions_t += '#cmakedefine %s\n' %(f)
sizes_t = ''
for h in re.findall (r'CHECK_TYPE_SIZE *\(.+? *(\w+)\)', cont, re.IGNORECASE):
sizes_t += '@%s_CODE@\n' %(h)
sizes_t += '#cmakedefine HAVE_%s\n' %(h)
sizes_t += '#ifdef HAVE_%s\n' %(h)
sizes_t += '# define HAVE_%s\n' %(h.replace('SIZEOF_',''))
sizes_t += '#endif\n'
# Replacements
config_h = CONFIG_H
config_h = config_h.replace ("${{includes}}", includes_t)
config_h = config_h.replace ("${{functions}}", functions_t)
config_h = config_h.replace ("${{sizes}}", sizes_t)
config_h = config_h.replace ("${{definitions}}", definitions_t)
# Write config.h
with open(FILENAME_NEW, 'w+') as f:
f.write (config_h)
|
DEER PARK, Texas - A fire reignited Friday at a chemical facility in Deer Park where a previous blaze burned for nearly four days earlier this week.
Thick smoke could be seen rising from the Intercontinental Terminals Co. facility at the corner of Independence Parkway and Tidal Road.
Video from SKY2 showed a fire burning not only in the 15-tank yard, but also along a drainage ditch leading away from the yard. The new blaze appeared to be under control within about an hour.
Officials said a dike wall that surrounds the yard was breached Friday afternoon, and a shelter-in-place order was issued for surrounding industrial facilities and the San Jacinto historic site.
Authorities closed traffic to a section of the Houston Ship Channel between Tuckers Bayou and Ship Channel Light 116.
The Beltway 8 Ship Channel Bridge was closed for a short period of time Friday afternoon while the fire was burning, but has been reopened.
The shelter-in-place was not issued for residents of Deer Park, and officials repeated that after the fire reignited.
The breach and fire came after ITC officials said they had begun pumping out pygas that remained in tank 80-7. They warned that the pumping operation could upset the foam blanket that is keeping vapors contained, which could release more vapors.
It was not immediately clear which tanks were burning Friday afternoon, but ITC officials said in a statement that the tanks are located on the west side of the yard. Officials at the La Porte Office of Emergency Management said that two tanks were on fire.
The fire started Sunday morning and was declared extinguished by Wednesday morning. There was a brief flareup on Wednesday evening before crews were able to extinguish the blaze again. On Thursday, a release of benzene prompted a shelter-in-place for Deer Park and Gelena Park.
The Chemical Safety Board is conducting an investigation.
"Once we get the all clear that it’s safe to go on there, they’ll start to take a look around, talk to people, do what we do. We take photographic evidence, we talk to people, we gather documents and generally begin the process of the field phase of our investigation," a spokeswoman said.
"When we do an investigation of an incident like this, we’re looking to understand all of the facts and conditions that lead to the incident. So, we’re there technical issues? Were there training issues? Were there management issues? How was the site being operated?," she said.
The Houston Independent School District said all campuses on the east side have canceled activities for the weekend.
HISD said, "With reports of the Deer Park industrial fire re-igniting this afternoon, all on-campus activities for east area HISD campuses this weekend have been cancelled. Also, sporting events have been rescheduled at alternate locations."
Deer Park Mayor Jerry Mouton Jr. said Friday morning that more shelter-in-place orders are possible after pumping at one of the tanks damaged by the fire.
Alice Richardson, spokeswoman for ITC, said Friday morning that the temperature of the tanks continues to drop, and crews are working to pump out the material remaining in the 80-7 tank. She said the operation could upset the foam blanket that is keeping vapors contained. If that happens, a release of volatile organic compounds is possible, she said.
Brett Weber, another ITC representative, said that about 20,000 barrels of pygas remains in the tank and it could take up to 12 hours to complete the pumping operation.
“Sometimes being safe, being methodical takes a little longer,” Weber said.
Weber said the foam layer protecting the tank will be added to as pumping continues in an effort to prevent a benzene release similar to what prompted a shelter-in-place order Thursday for Deer Park.
Mouton said that another shelter-in-place order is possible if there is another vapor release during the pumping. He said there is no scenario that has unfolded since the fire that would warrant an evacuation, but he said residents should follow their instincts.
“Everything that can be done to ensure public safety is being done,” Mouton said.
Adam Adams, the Environmental Protection Agency incident commander, said that the only time benzene was detected since monitoring began Sunday was on Thursday. He said those levels were between 1.5 and 1.8 parts per million.
Adams said that booms have been placed in multiple locations around the facility in an effort to contain any spill of fuel products.
Mouton said that tests are being conducted on the town’s water, but it takes several days to get the results of those tests.
|
import locale
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib.ticker import MaxNLocator
locale.setlocale(locale.LC_ALL, 'en_US')
try:
rave_cannon_dr1
except NameError:
from rave_io import rave_cannon_dr1
else:
print("Using pre-loaded data")
t = rave_cannon_dr1
#ok = (t["SNRK"] > 50) * (t["r_chi_sq_ms"] < 3) * (t["r_chi_sq_giant"] < 3) #* (t["WEIGHTED_VSINI"] < 1)
xlim = (7500, 3500)
ylim = (5.5, 0)
vmin, vmax = (-3, 0.5)
snrs = (100, 50, 25, 10)
M, N = (len(snrs), 50)
factor = 3.5
lbdim = 0.2 * factor
trdim = 0.1 * factor
whspace = 0.05
yspace = factor
xspace = factor * M + factor * (M - 1) * whspace + lbdim * (M - 1)
xdim = lbdim + xspace + trdim
ydim = lbdim + yspace + trdim
fig, axes = plt.subplots(1, M, figsize=(xdim, ydim))
fig.subplots_adjust(
left=lbdim/xdim, bottom=lbdim/ydim, right=(xspace + lbdim)/xdim,
top=(yspace + lbdim)/ydim, wspace=whspace, hspace=whspace)
for ax, snr in zip(axes, snrs):
ok = (t["SNRK"] > snr) * (t["R_CHI_SQ"] < 3) * (t["R"] > 25) #* (t["VSINI"] < 1)
ax.scatter(t["TEFF"][ok], t["LOGG"][ok], c=t["FE_H"][ok],
vmin=vmin, vmax=vmax, alpha=0.25, s=50, edgecolor="none", cmap="plasma",
rasterized=True)
if ax.is_last_col():
scat = ax.scatter([0], [0], c=[0], vmin=vmin, vmax=vmax, cmap="plasma")
K = locale.format("%d", sum(ok), grouping=True).replace(",", "$,$")
ax.text(0.05, 0.9, r"$S/N > {:.0f}$".format(snr),
horizontalalignment="left", verticalalignment="bottom",
transform=ax.transAxes, fontsize=14)
ax.text(0.05, 0.82, r"${}$".format(K) + r" ${\rm stars}$",
horizontalalignment="left", verticalalignment="bottom",
transform=ax.transAxes, fontsize=14)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.xaxis.set_ticks([7000, 6000, 5000, 4000])
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlabel(r"$T_{\rm eff}$ $({\rm K})$")
if ax.is_first_col():
ax.set_ylabel(r"$\log{g}$")
else:
ax.set_yticklabels([])
cax, kw = mpl.colorbar.make_axes(list(axes), fraction=0.075, pad=0.025, aspect=10)
cbar = plt.colorbar(scat, cax=cax, ticks=[-3, -2, -1, 0])
cbar.set_label(r"$[{\rm Fe/H}]$")
fig.savefig("hrd-test-set-color.png")
fig.savefig("hrd-test-set-color.pdf", dpi=300)
|
Everyone would be happier!! We would have more energy, our spouses, children, parents, grandparents, friends etc. would live long energetic liveswe would be paying less taxes for the sick people that tax payers foot the bill forpeople wouldn't go bankrupt from paying out the *** for medical bills when they are poorly insured for major illnessesEven minor illnesses can be a major financial burden for people in addition to all the prescription pills that they have to buy. That money could be spent building our economyinstead of being in debt forever and a day, people could pay off their credit cards which in turn benefit the companies that loan to them.He or she is able to realise his or her potential, and play an important role in social and national development.
Health is a vital component which enables us to maintain our potential for any work done .It is well said that ,"a sound mind is in sound body". So if the person is mentally and physically fit, he /she can be earn and can become financially strong.. Ultimately it leads to productivity and progression of a country.
|
"""--------------------------------------------------------------------
COPYRIGHT 2016 Stanley Innovation Inc.
Software License Agreement:
The software supplied herewith by Stanley Innovation Inc. (the "Company")
for its licensed SI Vector Platform is intended and supplied to you,
the Company's customer, for use solely and exclusively with Stanley Innovation
products. The software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved. Any use in
violation of the foregoing restrictions may subject the user to criminal
sanctions under applicable laws, as well as to civil liability for the
breach of the terms and conditions of this license. The Company may
immediately terminate this Agreement upon your use of the software with
any products that are not Stanley Innovation products.
The software was written using Python programming language. Your use
of the software is therefore subject to the terms and conditions of the
OSI- approved open source license viewable at http://www.python.org/.
You are solely responsible for ensuring your compliance with the Python
open source license.
You shall indemnify, defend and hold the Company harmless from any claims,
demands, liabilities or expenses, including reasonable attorneys fees, incurred
by the Company as a result of any claim or proceeding against the Company
arising out of or based upon:
(i) The combination, operation or use of the software by you with any hardware,
products, programs or data not supplied or approved in writing by the Company,
if such claim or proceeding would have been avoided but for such combination,
operation or use.
(ii) The modification of the software by or on behalf of you
(iii) Your use of the software.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
\file vector_teleop_full_system.py
\brief This module contains a class for teleoperating all the vector
platform DOF with a joystick controller; only works with logitech
extreme 3d
\Platform: Linux/ROS Indigo
Edited 7/25/2016: Vivian Chu, vchu@gatech - included support for simulation
--------------------------------------------------------------------"""
from utils import *
from system_defines import *
from vector_msgs.msg import *
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from std_msgs.msg import Bool,Float64
from trajectory_msgs.msg import JointTrajectoryPoint
from dynamixel_controllers.srv import *
from control_msgs.msg import JointTrajectoryAction, JointTrajectoryGoal, FollowJointTrajectoryAction, FollowJointTrajectoryGoal, JointTrajectoryControllerState
import rospy
import sys
import math
import actionlib
class VectorTeleopFullSystem(object):
def __init__(self):
self.is_sim = rospy.get_param('~sim',True)
self.lincmd = LinearActuatorCmd()
if (False == self.is_sim):
"""
Subscribe to the configuration message
"""
self.config_updated = False
rospy.Subscriber("/vector/feedback/active_configuration", Configuration, self._update_configuration_limits)
start_time = rospy.get_time()
while ((rospy.get_time() - start_time) < 10.0) and (False == self.config_updated):
rospy.sleep(0.05)
if (False == self.config_updated):
rospy.logerr("Timed out waiting for Vector feedback topics make sure the driver is running")
sys.exit(0)
return
"""
Initialize the linear actuator position if this is the real system
"""
vector_dynamics = rospy.wait_for_message("/vector/feedback/dynamics", Dynamics)
self.lincmd.desired_position_m = vector_dynamics.linear_actuator_position_m
else:
self.x_vel_limit_mps = rospy.get_param('~sim_teleop_x_vel_limit_mps',0.5)
self.y_vel_limit_mps = rospy.get_param('~sim_teleop_y_vel_limit_mps',0.5)
self.yaw_rate_limit_rps = rospy.get_param('~sim_teleop_yaw_rate_limit_rps',0.5)
self.accel_lim = rospy.get_param('~sim_teleop_accel_lim',0.5)
self.yaw_accel_lim = rospy.get_param('~sim_teleop_yaw_accel_lim',1.0)
# Simulation flags for linear actuator
self.linact_sub = rospy.Subscriber('/linear_actuator_controller/state', JointTrajectoryControllerState, self._update_simulation_linear_actuator, queue_size=1)
self.sim_lin_actuator_position = 0.0 # init to 0 for now
self.sim_lin_init = False
self.last_arm_update = rospy.get_time()
"""
Set the mapping for the various commands
"""
self.ctrl_map = dict({'momentary': {'dead_man' : {'is_button':True,'index':0,'set_val':1},
'man_ovvrd' : {'is_button':True,'index':1,'set_val':1},
'standby' : {'is_button':True,'index':2,'set_val':1},
'tractor' : {'is_button':True,'index':3,'set_val':1},
'estop' : {'is_button':True,'index':4,'set_val':1},
'pan_tilt_ctl' : {'is_button':True,'index':8,'set_val':1},
'base_ctl' : {'is_button':True,'index':9,'set_val':1},
'arm_ctl_right': {'is_button':True,'index':10,'set_val':1},
'arm_ctl_left' : {'is_button':True,'index':11,'set_val':1}},
'axis' : {'left_right' : {'index' :0, 'invert_axis':False},
'for_aft' : {'index' :1, 'invert_axis':False},
'twist' : {'index' :2, 'invert_axis':False},
'flipper' : {'index' :3, 'invert_axis':False},
'dpad_lr' : {'index' :4, 'invert_axis':False},
'dpad_ud' : {'index' :5, 'invert_axis':False}}})
"""
Initialize the debounce logic states
"""
self.db_cnt = dict()
self.axis_value = dict()
self.button_state = dict()
for key, value in self.ctrl_map.iteritems():
if key == 'momentary':
for key, value2 in value.iteritems():
self.db_cnt[key]=0
self.button_state[key]=False
else:
self.db_cnt[key]=0
self.axis_value[key]=0.0
self.send_cmd_none = False
self.no_motion_commands = True
self.last_motion_command_time = 0.0
self.last_joy = rospy.get_time()
self._last_gripper_val = 0.0
self.run_arm_ctl_right = False
self.run_arm_ctl_left = False
self.run_pan_tilt_ctl = False
self._init_pan_tilt = True
self._last_angles = [0.0,0.0]
self.cfg_cmd = ConfigCmd()
self.cfg_pub = rospy.Publisher('/vector/gp_command', ConfigCmd, queue_size=10)
self.motion_cmd = Twist()
self.limited_cmd = Twist()
self.motion_pub = rospy.Publisher('/vector/teleop/cmd_vel', Twist, queue_size=10)
self.override_pub = rospy.Publisher("/vector/manual_override/cmd_vel",Twist, queue_size=10)
self.linpub = rospy.Publisher("/vector/linear_actuator_cmd",LinearActuatorCmd,queue_size=1)
self.arm_pub = [0]*2
self.gripper_pub = [0]*2
self.arm_pub[0] = rospy.Publisher('/vector/right_arm/cartesian_vel_cmd', JacoCartesianVelocityCmd, queue_size=10)
self.gripper_pub[0] = rospy.Publisher('/vector/right_gripper/cmd', GripperCmd, queue_size=10)
self.arm_pub[1] = rospy.Publisher('/vector/left_arm/cartesian_vel_cmd', JacoCartesianVelocityCmd, queue_size=10)
self.gripper_pub[1] = rospy.Publisher('/vector/left_gripper/cmd', GripperCmd, queue_size=10)
self.pan_pub = rospy.Publisher('/pan_controller/command', Float64, queue_size=1)
self.tilt_pub = rospy.Publisher('/tilt_controller/command', Float64, queue_size=1)
rospy.Subscriber('/joy', Joy, self._vector_teleop)
def _update_simulation_linear_actuator(self, msg):
self.sim_lin_actuator_position = msg.actual.positions[0]
def _update_configuration_limits(self,config):
self.x_vel_limit_mps = config.teleop_x_vel_limit_mps
self.y_vel_limit_mps = config.teleop_y_vel_limit_mps
self.yaw_rate_limit_rps = config.teleop_yaw_rate_limit_rps
self.accel_lim = config.teleop_accel_limit_mps2
self.yaw_accel_lim = config.teleop_yaw_accel_limit_rps2
self.config_updated = True
def _parse_joy_input(self,joyMessage):
raw_button_states = dict()
self.button_state = dict()
for key, value in self.ctrl_map.iteritems():
if key == 'momentary':
for key2, value2 in value.iteritems():
raw_button_states[key2]=True
self.button_state[key2]=False
else:
for key2, value2 in value.iteritems():
self.axis_value[key2] = 0.0
for key, value in self.ctrl_map.iteritems():
if key == 'momentary':
for key2, item in value.iteritems():
if item['is_button']:
if item['set_val'] == joyMessage.buttons[item['index']]:
raw_button_states[key2] &= True
else:
raw_button_states[key2] = False
else:
temp = joyMessage.axes[item['index']]
if (item['invert_axis']):
temp *= -1.0
if (temp >= item['set_thresh']):
raw_button_states[key2] &= True
else:
raw_button_states[key2] = False
if (True == raw_button_states[key2]):
self.db_cnt[key2]+=1
if (self.db_cnt[key2] > 10):
self.db_cnt[key2] = 10
self.button_state[key2] = True
else:
self.button_state[key2] = False
self.db_cnt[key2] = 0
if key == 'axis':
for key2, item in value.iteritems():
temp = joyMessage.axes[item['index']]
if (item['invert_axis']):
temp *= -1.0
self.axis_value[key2] = temp
def _vector_teleop(self, joyMessage):
self._parse_joy_input(joyMessage)
if self.button_state['base_ctl']:
self.run_arm_ctl_right = False
self.run_arm_ctl_left = False
self.run_pan_tilt_ctl = False
self._init_pan_tilt = False
elif self.button_state['arm_ctl_right']:
self.run_arm_ctl_right = True
self.run_arm_ctl_left = False
self.run_pan_tilt_ctl = False
self._init_pan_tilt = False
elif self.button_state['arm_ctl_left']:
self.run_arm_ctl_right = False
self.run_arm_ctl_left = True
self.run_pan_tilt_ctl = False
self._init_pan_tilt = False
elif self.button_state['pan_tilt_ctl']:
self.run_arm_ctl = False
self.run_arm_ctl_right = False
self.run_arm_ctl_left = False
self.run_pan_tilt_ctl = True
self._init_pan_tilt = True
if self.button_state['estop']:
self.run_arm_ctl = False
self.run_pan_tilt_ctl = False
self._init_pan_tilt = False
arm_cmd = JacoCartesianVelocityCmd()
arm_cmd.header.stamp=rospy.get_rostime()
arm_cmd.header.frame_id=''
self.arm_pub[0].publish(arm_cmd)
self.arm_pub[1].publish(arm_cmd)
home = Float64()
home.data = 0.0
self.pan_pub.publish(home)
self.tilt_pub.publish(home)
if self.run_arm_ctl_right or self.run_arm_ctl_left:
arm_cmd = JacoCartesianVelocityCmd()
arm_cmd.header.stamp=rospy.get_rostime()
arm_cmd.header.frame_id=''
gripper_cmd = GripperCmd()
if self.run_arm_ctl_right:
arm_idx = 0
else:
arm_idx = 1
if self.button_state['dead_man']:
arm_cmd.x = self.axis_value['left_right'] * 0.1
arm_cmd.z = self.axis_value['for_aft'] * 0.1
if not self.button_state['man_ovvrd']:
arm_cmd.y = self.axis_value['twist'] * 0.1
else:
# Check if we're in simulation - if so set the last known position
if self.is_sim == True:
if self.sim_lin_init == False:
self.lincmd.desired_position_m = self.sim_lin_actuator_position
self.sim_lin_init = True
dt = rospy.get_time() - self.last_arm_update
self.lincmd.desired_position_m += (self.axis_value['twist'] * 0.05) * dt
if (self.lincmd.desired_position_m > 0.855):
self.lincmd.desired_position_m = 0.855
elif self.lincmd.desired_position_m < 0.0:
self.lincmd.desired_position_m = 0.0
self.lincmd.header.stamp = rospy.get_rostime()
self.lincmd.header.frame_id=''
self.linpub.publish(self.lincmd)
self.lincmd.header.seq+=1
self.last_arm_update = rospy.get_time()
arm_cmd.theta_y = self.axis_value['dpad_ud'] * 100.0
arm_cmd.theta_x = self.axis_value['dpad_lr'] * 100.0
if self.button_state['standby']:
arm_cmd.theta_z = 100.0
elif self.button_state['tractor']:
arm_cmd.theta_z = -100.0
gripper_val = (self.axis_value['flipper'] + 1.0)/2.0
if abs(self._last_gripper_val-gripper_val) > 0.05:
gripper_cmd.position = gripper_val * 0.085
gripper_cmd.speed = 0.05
gripper_cmd.force = 100.0
self.gripper_pub[arm_idx].publish(gripper_cmd)
self._last_gripper_val = gripper_val
self.arm_pub[arm_idx].publish(arm_cmd)
elif self.run_pan_tilt_ctl:
if self._init_pan_tilt:
# Check if we're in sim - if so use default speed
if self.is_sim == False:
rospy.wait_for_service('/pan_controller/set_speed')
rospy.wait_for_service('/tilt_controller/set_speed')
try:
set_speed = rospy.ServiceProxy('/pan_controller/set_speed', SetSpeed)
resp1 = set_speed(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
try:
set_speed = rospy.ServiceProxy('/tilt_controller/set_speed', SetSpeed)
resp1 = set_speed(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
self._init_pan_tilt = False
if self.button_state['dead_man']:
pan = self.axis_value['twist'] * 1.05
tilt = self.axis_value['for_aft'] * 1.4
pan_cmd = Float64()
tilt_cmd = Float64()
pan_cmd.data = pan
tilt_cmd.data = tilt
if abs(self._last_angles[0] - pan) > 0.05:
self.pan_pub.publish(pan_cmd)
self._last_angles[0] = pan
if abs(self._last_angles[1] - tilt) > 0.05:
self.tilt_pub.publish(tilt_cmd)
self._last_angles[1] = tilt
else:
if self.button_state['estop']:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = DTZ_REQUEST
elif self.button_state['standby']:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = STANDBY_REQUEST
elif self.button_state['tractor']:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = TRACTOR_REQUEST
else:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_NONE'
self.cfg_cmd.gp_param = 0
if ('GENERAL_PURPOSE_CMD_NONE' != self.cfg_cmd.gp_cmd):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = True
elif (True == self.send_cmd_none):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = False
elif (False == self.send_cmd_none):
if self.button_state['dead_man']:
self.motion_cmd.linear.x = (self.axis_value['for_aft'] * self.x_vel_limit_mps)
self.motion_cmd.linear.y = (self.axis_value['left_right'] * self.y_vel_limit_mps)
self.motion_cmd.angular.z = (self.axis_value['twist'] * self.yaw_rate_limit_rps)
self.last_motion_command_time = rospy.get_time()
else:
self.motion_cmd.linear.x = 0.0
self.motion_cmd.linear.y = 0.0
self.motion_cmd.angular.z = 0.0
dt = rospy.get_time() - self.last_joy
self.last_joy = rospy.get_time()
if (dt >= 0.01):
self.limited_cmd.linear.x = slew_limit(self.motion_cmd.linear.x,
self.limited_cmd.linear.x,
self.accel_lim, dt)
self.limited_cmd.linear.y = slew_limit(self.motion_cmd.linear.y,
self.limited_cmd.linear.y,
self.accel_lim, dt)
self.limited_cmd.angular.z = slew_limit(self.motion_cmd.angular.z,
self.limited_cmd.angular.z,
self.yaw_accel_lim, dt)
if ((rospy.get_time() - self.last_motion_command_time) < 2.0):
self.motion_pub.publish(self.limited_cmd)
if self.button_state['man_ovvrd'] and self.button_state['man_ovvrd']:
self.override_pub.publish(self.motion_cmd)
|
Truform 0846 (from the "Classic Medical" series) for both men and women utilize graduated compression technology to restore leg health. The smooth knit and traditional styling are made to perform with daily use. Opaque material conceals various conditions. Medical-grade support with functional design and durability.
|
from tests import BaseTestCase
from redash.models import Alert, db
class TestAlertAll(BaseTestCase):
def test_returns_all_alerts_for_given_groups(self):
ds1 = self.factory.data_source
group = self.factory.create_group()
ds2 = self.factory.create_data_source(group=group)
query1 = self.factory.create_query(data_source=ds1)
query2 = self.factory.create_query(data_source=ds2)
alert1 = self.factory.create_alert(query_rel=query1)
alert2 = self.factory.create_alert(query_rel=query2)
db.session.flush()
alerts = Alert.all(group_ids=[group.id, self.factory.default_group.id])
self.assertIn(alert1, alerts)
self.assertIn(alert2, alerts)
alerts = Alert.all(group_ids=[self.factory.default_group.id])
self.assertIn(alert1, alerts)
self.assertNotIn(alert2, alerts)
alerts = Alert.all(group_ids=[group.id])
self.assertNotIn(alert1, alerts)
self.assertIn(alert2, alerts)
def test_return_each_alert_only_once(self):
group = self.factory.create_group()
self.factory.data_source.add_group(group)
alert = self.factory.create_alert()
alerts = Alert.all(group_ids=[self.factory.default_group.id, group.id])
self.assertEqual(1, len(list(alerts)))
self.assertIn(alert, alerts)
|
Just to let you know, we had a very nice trip.
Everything was perfect, but we would especially thank Ajju. He was Excellent.
Subject: Article for Web Page.
Thank you, Sharad, Manjeet and Vijay for giving me two extraordinary adventures of a lifetime.
The travel urge grabbed me when I was a teenager and my father, post WW11,in a grey and weary England said that we were going to Guernsey in the Channel Islands for a holiday. We had a wonderful time and my world became a big mystery to be explored and savoured. Travel became a passion both as a volunteer or a curious passenger or both.
It arrived the following day. It was detailed, thorough, comprehensive , had many inclusions included in the cost and became the blue print for what was to become a 24 day adventure which included 4 National Parks, Corbett ( India’s first Park) Bharatpur Bird Sanctuary, Bandhavgarh, and Kanha plus the cities of New Delhi, Jaipur, Agra and Khajuraho. The main focus was wildlife.
Several months were spent in working out the details, a good friend was persuaded to join me, visas were granted, vaccines administered, books read, deposits sent and we were ready, more or less.
We were to find an enormous country of multiple contrasts , traffic jams that defy the imagination and a small group of men who were determined that we would have a great experience-Sharad, Manjeet Sharma, Nature Safari’s wonderful manager who took everything in his stride no matter the time of day or night, and Vijay, our driver who took his task seriously, drove the immaculate SUV well and reminded us that, if we needed anything he was there for us. Vijay became a valued friend.
We were to see many fantastic birds, a herd of wild elephants, several species of deer and monkeys wild boar, reptiles , many other creatures and, of course..Tigers!
The cities were amazing, every school youngster who saw us , surrounded us, tried out their English, took our photographs, shook our hands and gave us an unexpected and joyful experience. Gorgeous as the Taj is the human interaction was very powerful.
We left India with remarkable memories which still give us a lot of pleasure and quite a few laughs.
One more adventure in a long life beckoned-as did India.
My beloved husband Guy no longer flies long distances so I asked niece Caroline if she would join me. Caroline lives in Ireland, works hard with her own company and could only leave it for 2 weeks.
Sharad was contacted, an itinerary agreed upon with input from all of us. It was city based, beginning in New Delhi and included Jaipur, Ranthambhore for a 2 day safari, Agra, Varanasi and finishing in the hill city of Darjeeling-the last item on my bucket list!
Our departure date was October 21st. then-life happened. On October 17th Guy had a heart attack in the middle of the night. He, by some wonderful miracle, received almost immediate intervention, and not only survived but is doing well. What to do about our trip?
Sharad was called. He was compassionate, calm , reassuring and said that anytime we wished to reschedule we could. We did have some monetary losses but we did make the trip in late November. A welcoming Manjeet met me at 3AM and Caroline the following night at 4 AM. We were both very glad to see him as can be imagined.
Sharad had already emailed to say that there was an international phone waiting so that there could be as much contact with Guy and our family and friends as possible. This was a gift beyond price and we are forever grateful.
We were delighted to have Vijay as our driver, the hotels were good and well situated, breakfast was included and Ranthambhore included all meals and 4 safari drives with driver and guide. There were many other inclusions including representatives at airports and hotels, entrance fees for monuments , historical buildings,the crazy rickshaw drive in Old Delhi, Mahatma Ghandi’s home and tomb, the Toy Train in Darjeeling, car , driver and guide plus others.
I was stricken with a severe upper respiratory infection in Agra. A doctor was immediately called to the hotel, who examined, administered and said “bed rest”.
Caroline saw more of Agra than anticipated but took that in her stride.
Darjeeling was an enormous change from the flatter and warmer climates of the North. We saw everest from the plane, visited Observatory Hill at dawn and witnessed a great sunrise, stayed at the Cedar Inn with its stunning views of mountains and sprawling Darjeeling spreading out below us-it was magical!
I wrote this because I feel that this company not only gives value for money but are reliable, available, have great integrity,are instantly responsive if the unexpected happens, take pride and care in what they offer and love what they do. They all know and understand the environment of the forests and the need to nurture and encourage the use of natural resources.
We would like to thank you for the incredible holiday we had in April 2012.
Again, thanks for your service and we hope to be in touch in the not-too-distant future to start planning our next holiday to India!
Many thanks for all you did to make our recent holiday in India such an enjoyable experience.
The organisation was superb. All the guides were very helpful and informative. The drivers were competent and made us feel quite safe despite the poor roads and chaotic traffic they had to contend with from time to time. Vijay in particular, was outstanding.
The two Tiger Den Resorts were wonderful. We particularly enjoyed our stay at Bandhavgarh. The staff were all very attentive, and our safari driver there, Sanjay, was very knowledgeable and keen to make sure we got as much as possible out of the experience. Thanks to him we finally got to see a tiger, a beautiful female. We've come home with so many good memories, and lots and lots of photos!
So again, my thanks to you, Manjeet, Vijay, Sanjay and all the others for making our holiday so exciting and interesting.
Having returned yesterday from Assam to Belgium, we can look back at a wonderful trip.
It’s hard to believe things can go so smoothly in a country like India, still considered as a difficult country to get around without hardships. Yet, from the moment of our arrival until the visit of the Lotus Temple, everything worked out perfectly as the entire trip was meticulously organized. The lodges were good to outstanding and the food – in particular in The Bagh, Bharatpur and Tiger Den’s Resort, Bandhavgarh – was delicious.
To be honest, after our last trip to India some 20 years ago and during which plenty of things went wrong, it took me a lot of effort to convince my wife to visit India for the 4th time – but, as said, everything was so well organized that we will definitely return to India for a 5th visit. And we will certainly not hesitate to contact Nature Safaris again!
Greetings from the New World once again. It seems like yesterday that I was on game drives in Indian game parks. If only those could have gone on forever. I think you know from observing my enthusiasm that the trip far surpassed my highest hopes. Indeed, I felt as though I had seen many of the greatest natural wonders on India. Thanks for making it all possible and attending to both Mariynn's and my needs throughout our safaris. We are both certain that we could have done no better than to have booked our adventure with you.
Here are my appraisals of the general trip as well as the individual game parks that we visited. Please feel free to post these on your website or otherwise use as they may promote your safari expeditions. You may post my name and use me as a reference (I have 253 photographs that can be sent via a U.S.-based film processing center [Walgreens] that will verify all of the animals I have seen... all I need to do is send an email message to a potential client of yours and they can see these photographs at their convenience).
We have just returned from our 15 day safari to three Indian game parks (i.e., Corbett, Bandhavgarh, and Kanha National Parks) with Nature Safari India. As a veteran of numerous, past African safaris in 9 different countries of that continent, allow me to assert that the best of Indian wildlife is on a par with the best of Africa. Indeed, as an extreme nature enthusiast and professional biologist, I long for finding that remote part of the world where one simply escapes civilization and finds truly wild animals in pristine habitats.
The first park we visited, Corbett, offers just that. There are dramatic vistas as one winds through the Himalayan foothill topography enroute to the Dikhala lodge. Thick sal tree forests transition into brush-laden valleys that are strewn with jagged boulders. All the while, one hears the cries and alarm calls of barking deer, chitta/spotted deer, peacocks, and the hum of cicadas (pre-monsoon season). And one also sees other mammals, including wild boars, samba deer, and perhaps the hog deer as well.
Of course, there is always the chance that one may spot His or Her majesty, the Royal Bengal Tiger or Tigress. The anticipation that one may have this encounter is always on the mind in Corbett. The forest just looks as though it may yield a tiger sighting as the safari jeep/gypsy turns the next bend in the road.
Continuing on towards the Dikhala Lodge, our driver, Sarjeet, stopped to show us a forested area where the tawny fish owl resides. I got good photographs and video here. He also showed us the gharial and mugger crocodiles residing in the Ramganga River. It was fascinating to see the highly endangered gharials lounging in the clear, flowing waters below. Corbett Park is one of the few remaining places where one can observe this primitive crocodilian, with its truncated snout and bulging- forward-directed eyes set well atop the head.
The Dikhala Lodge, with its comfortable accommodations, overlooks the Ramganga, the river flowing through an immense, flat valley. This valley is itself bordered by mountainous terrain on the side opposite the lodge. Asian elephant herds emerge from the forests to enter the grassy, clear areas surrounding much of the river. There are also herds of spotted deer here as well. And, again, there is always the possibility that the Royal Bengal Tiger may appear here as well. Driver/nature guide Sarjeet got us into plenty of excellent elephant herds for the evening game drive. Lots of uniquely elephantine socializations, including that of adults surrounding and protecting young, were observed.
The next day Sarjeet put in extra effort and found a tigress that was cooling itself in the Ramganga River near the Dikhala Lodge. We oberved her for two and a half hours. It was my first wild tiger and, appropriately enough, was seen here in Corbett, the most classic of Indian game parks. For a while, she actually looked up through the openings in the trees and, no doubt, saw us in the tower above. What a once-in-a-lifetime experience! The Royal Bengal Tigress in all her glory.
By the way, the food at the Dikhala Lodge is served buffet style. It is wholesome Indian food, consisting of vegetarian and meat options. Though Nature Safari India described our Corbett Dikhala Lodge accommodations as being basic, I must note that the room we were given was large, contained a powerful cooling fan, had comfortable beds, electricity, and a large bathroom with toilet and shower.
Our elephant ride on the second evening in Corbett was most pleasurable, with sightings of small elephant herds, ring-necked parakeets, and the black shouldered kite being special highlights. Exiting Corbett the following morning, Sarjeet took time to show us additional elephant herds that we had not observed earlier. It is amazing how very keenly aware the elephants are of human presence. The adults turn and face the jeep if approached too closely. While being very safety-conscious, Sarjeet nonetheless got us close enough for good photography and video of these majestic, social animals.
After a night back in Delhi at a 5 star hotel (The Oberloi), we were off the next morning to Bndhavgarh Park and thus boarded a domestic flight for the nearest airport in Jabalpur. All of this had been pre-arranged via Nature Safari India months in advance. As was the case on all such occasions, we were cordially greeted by our driver at the airport. Everything ran with complete smoothness. Sensing that we were nature enthusiasts, the driver stopped on our way to Bandhavgarh Park so that we could photograph flying foxes residing in a tree, several white throated kingfishers, a peacock, and the Indian roller.
We arrived at the Tiger's Den resort and were warmly received with both friendly hospitality and a delicious Indian meal. After a good night's rest in the air conditioned comfort of our room, we were off on our first game drive in Banhavgarh, which means "Brother Fort". This smaller Park is home to some approximately 50 tigers, including the grandson and great grandsons of the famous male tiger Charger (who both terrified and thrilled park visitors some one to two decades earlier by making surprise mock charges on jeeps and domestic elephants used for traveling about the park).
No less than 10 minutes into Bandhavgarh we were into our first tigers, these being a tigress with two 10-month-old cubs. While the tigress was lying on her back, I obtained video footage of her caressing one of the cubs as the youngster casually ambled by her immense body. But when a wild pig started to walk near the tigers, the mother turned from tenderness to the passions of a born-hunter, her ears and eyes acutely alert as she made rapid, silent strides towards the pig. Luckily for the herbivore, it noticed one of the cubs and beat a life-saving retreat before the tigress could charge it.
While our driver, Sanjay, took us about Bandhavgarh throughout the remainder of the morning so that we could get good photographs of male peacocks displaying to peahens (it was mating season during our late May/early June visit), nice shots of the common kingfisher, and some photographs of jackals and barking deer, it was on the evening drive that his skills as a Bandhavgarh guide especially came to the fore. Indeed, Sanjay placed us (along with numerous other jeeps) in just the right location so that we could observe Kahlua, a great grandson of Charger. Kahlua emerged from a mountain valley and lumbered in his uniquely feline, confident swagger along the dry grass and forest paralleling the road. All the while Sanjay kept me about 20 to feet in front of Kahlua, his orange- and black-striped, muscled beauty flowing along to the rhythm of this tiger's evening stroll. This made for once-in-a-lifetime photo and video opportunities and I took full advantage for at least 5 minutes, Sanjay always maintaining the jeep just ahead of the this Royal Bengal Tiger.
After celebrating our evening of tiger successes and getting another good night's sleep at the Tiger's Den, we were off with Sanjay for even more tigers the next morning. We saw the same tigress and two cubs again. But we also saw another great grandson of Charger. This was Nilau (spelling?), the father of the two nearby cubs. We watched and photographed him as he glided along the grass so that he could quench his thirst from the clear-flowing, stream in the area's vicinity. How amazing it is to see these predators still living wild in a world that has otherwise gone completely digital!
Always giving us much more than we had dared hope for, Sanjay also found Charger's grandson, B2 (B2 was the progeny of a cross between Charger's daughter Mohini and the male tiger called the "Mahaman male"; this male is famous for having wrested mating rights of a portion of Badhavgarh Park from Charger in 1996). This B2 tiger, who I was informed is the father of Kahlua and Nilau (the two large males seen the previous day), emerged from a rocky grotto one morning and made his way up and over the mountain. I obtained excellent video footage of this fabled tiger.
Other animals seen during our visit at Bandhavgarh included sloth bears (made for great photo and video shoots), serpent eagle, changeable hawk eagle, malabar pied hornbill, chittal/spotted deer, sambar deer, ruddy mongoose, three-striped squirrel, mating Egyptian (or scavenger) vultures, jungle fowl, jackals, and bats (seen in the ancient, human-carved sandstone caves).
A visit to Bandhavgarh is greatly enriched by having one's guide take them to the water pool that occurs alongside an 80 foot-long statue of a reclined Vishnu, all carved out from a single piece of sandstone. This is located half-way up the mountainside that leads to the 1,000 year old palace/fort atop the plateau. Water from a spring flows into this sacred area and fills the pool. Black-faced monkeys in the overhanging trees are very active and yet, despite their energetic activities as they swing through the overlying branches, somehow add to the serene ambience. Though we did not see any that day, it is reported that tigers, and sometimes even leopards, visit the pool to get a drink from the cool, clear waters. That must be a fantastic scene to witness!
Upon leaving Bandhavgarh after 7 very fulfilling game drives, we were driven to Kanha Park, home to some 90 tigers. This park is about as large as Corbett. Besides tigers, it also contains the last population of swamp deer. It is also home to the gaur, incorrectly called the Indian bison. These latter animals are remendous, bovine beasts that may reach in excess of one ton in weight. As was the case in Corbett and and Bandhavgarh, I felt as though Sharad Vats had set us up with the best guides one could hope to find. My Kanha guide, also called Sanjay (this was also the name of our Bandhavgarh guide), knew very well how to track tigers, leopards, and all animals of the sal forest. Besides getting us into four tremendous tiger sightings during our stay, Sanjay of Kanha also found four leopards one evening. One of these was solitary and had just exited a tree when we saw it. The other was a mother, father, and a cub. How Sanjay spotted those leopards in the darkening, evening forest I will never know. But we watched this family proceed through the forest, the parents keeping a watchful eye on 'Junior' all the while.
We also saw the Indian rock python and had a great tiger show from atop an elephant while in Kanha. In addition, Sanjay got us good sightings of the jungle owlet and a scops owl species. The photo opportunity with the ruddy mongoose was greatly appreciated as well. Certainly, the wildlife to be seen here is nothing short of incredible.
I must mention that the Chitvan Jungle Lodge, where Sharad Vats entertains his guests who are visiting Kanha, is easily a five star accommodation. The rooms one stays in include a giant living room equipped with comfortable, solidly built furniture, large air-conditioned bedroom (with refrigerator), and a very large indoor bathroom as well as an optional outdoor shower. There is a large swimming pool of a uniformly 3.5 foot depth for those wishing to cool off a bit and take in some exercise as well. Morning tea and bisquits/cookies are delivered early to one's room (it arrives a half hour before the game drive). Meals are served to one's tastes by a chef who always visits you to see if you are satisfied with your meal. The presentation of the meals is excellent, with dinner being served at tables set out on the grass and thus under the stars. The reception room is large and spacious. It has a good library that is stocked with local wildlife books as well as inspired writings by such authors as Gandhi, Lincoln, and various other authors presenting on theological topics.
I would recommend, without any reservations at all, Sharad Vats and his www.naturesafariindia.com services to anyone who wishes to experience the wildlife of India. I cannot imagine a more organized and congenial service anywhere that simply delivers on the wildlife one has come to see. The fact that the accommodations are superb just makes my recommendation all the stronger.
Stay in touch for next year. I shall begin inquiring about bringing a group of students to the game parks of India for next year. Naturally, we would need your services to properly arrange this.
"We have spent 16 days primarily visiting National Parks and Sharad and his team ensured the extensive logostics worked perfectly. There were someone to meet and greet at every juncture. Knowledgeable guides and helpfull staff were everywhere. We were thouroughly briefed at the start and everything went smoothly throughout the entire trip.
We visited Corbett, Bandavgahr and Kahna and saw lots of wildlife, not least 9 Tigers which was the main goal of the trip. So all in all a great success."
Hope this is the sort of thing....please note my new mail adress - new job!!
I was really impressed by the punctuality and all the services of your company and by the friendliness and the professionality of your collaborators. My first trip to India has definitely been well organized and I spent nice and interesting days. I got to see the tigers and I had the chance to take some good pictures of birds, places and other animals. In Bandhavgarh, surprisingly, I didn't have many chances to spot tigers (just one good spotting from the jeep), probably due to the fact that my stay was shorter there than in Khana. The Tiger Den Resort was very comfortable with a very cordial and familiar atmosphere just as Tuli Tiger Resort was, which also offers very well prepared guides for safari. In fact I did eight spottings of tigers, six of which from the jeep and one very near, about 5 meters, for 25 minutes.
Certainly I will contact you in the future to organize some other vacations maybe in Gir and Kaziranga. Until then, thanks for everything!
Hi! Sorry about the delay in answering your email. My company was moving to a new building this weekend.
We really enjoyed the travel arrangements made by Sharad. He's very easy to work with, very honest and trustworthy, and made an excellent itinerary for us, along with some great hotel reservations, and used expert nature guides.
We highly recommend him. We are very lucky that someone suggested him to us, and our trip arrangements were handled expertly and professionally by him and his assistant.
However, we did not go to see tigers (we went at the wrong season of the year); our main focus was bird watching, and that was superb.
I can honestly say that you won't go wrong if you use Sharad for your travel arrangements. We definitely will use him again in the future.
We are now back in the UK and would like to send our sincere thanks to you and your team for helping us have a marvellous holiday. We were very impressed by the thoroughness of our intinerary, the attention to detail was excellent - nothing was left to chance. Not only were we were looked after every step of the way, last minute changes we requested during our trip were accommodated with speed and efficiency - your service couldn't be bettered. We also appreciated the way that you kept in contact with us during our time in India - a nice personal touch which made us feel more like welcome visitors and not just clients. Our tiger sightings were fantastic (Bandhavgarh thoroughly recommended) and we feel privileged to have seen these wonderful creatures in their natural habitat.
We read your tribute to Challenger - he was truly a magnificent animal and were glad we had the opportunity to see him but sorry that it was in such sad circumstances. We have tried to send this email with photos of Challenger attached but it has been returned by your mailbox as 'over quota'. We have also tried to send some more photos which you may like to use on your site and although we've not had them returned, we are not sure if you have received them or not, perhaps you can let us know. We will be happy to send photos again if you would like them.
Thank you again for all your help and also for the wildlife book, it's a wonderful souvenier of our visit.
We wanted our India trip to emphasize wildlife and nature (and of course we hoped to see tigers!), but we wanted also to get a feeling for the diverse spiritual and cultural history of India and her people. With the limited time we had available for travel these goals seemed almost too much to ask.
But as it turned out, from start to finish our India trip was a resounding success, thanks mostly to the superb planning and professional care provided for us by Sharad Vats and Nature Safari Ltd. At each camp, national park, and historic site, we felt like we were treated as VIP's. Transfers were generally seamless, and Sharad Vats arranged and kept viable backup plans in place in case we experienced flight delays or cancellations due to weather.
We were always met on time by a friendly and knowledgeable guide upon our arrival into a new train station or airport, and the personal sightseeing tours arranged for us in cities and historic areas were outstanding and truly informative. Our hotel and camp accommodations (most of which were suggested or recommended by Nature Safari) were always of very high quality and comfort, as was the food and service.
Our car transportation arrangements were uniformly excellent and our driving days were quite comfortable, despite the inherent difficulties of road travel in some of the remote regions we visited. To top it all off, we were fortunate beyond our hopes in sighting tigers and other wildlife in the parks, due especially to the superb skills of the naturalists and trackers arranged for our visit by Nature Safari.
We arrived home with photographs and memories to last a lifetime! I recommend most highly the expert services of Sharad Vats and Nature Safari Ltd. to all serious travelers who seek the best experiences that a cultural and wildlife-oriented visit to India can offer.
I must say that the whole experience from the booking (which can be a little daunting over the internet) to the actual tour was fantastic and handled in a very professional and thorough way - The 'Temples and Tiger Tour' which I basically followed was superb, each place being incredibly interesting with the reassurance that all reservations and more importantly transfers to the various airports, train stations and hotels are taken care of, which were absolutely faultless - Thank you!
I have also passed on your business card and my commendation to the TCI representative here at Uhde India Limited Mumbai. I hope he will be able to refer more clients to Nature Safari.
Best of luck in the future and thank you for a wonderful holiday.
Tiger, tiger, tiger, we met a tiger! Unbelievably beautiful tiger is in front of us, 5m ahead of us. He watched me, and I watched him.
I visited several national parks in India, of course including Ranthamnbore. But I had the best experience in Bandhavgarh National Park. Much more chances to see tigers, good trail to the Fort learning Hindu gods and history, and a nice friendly lodge, Tiger Den Resort. We all enjoyed staying Tiger Den Resort. With beautiful gardens, spacious rooms with hot water and especially the foods were excellent. Shailendra and all the staff at the Resort were so kind and helpful. We saw tigers every day. Sharad, thank you very much for all your efforts and services. You are the most reliable operator in India. I organized the tour to India so many times, and I feel getting tired of it. But I would like to come back here again next year. It is worth spending so much time and money. It is worth traveling a long way by train to Bandhavgarh.
We had a slide show after dinner at the Forest Lodge by the naturalist, Ansar Khan.
|
# -*- coding: utf-8 -*-
"""Tests for the user interface."""
#
# (C) Pywikibot team, 2008-2015
#
# Distributed under the terms of the MIT license.
#
# NOTE FOR RUNNING WINDOWS UI TESTS
#
# Windows UI tests have to be run using the tests\ui_tests.bat helper script.
# This will set PYTHONPATH and PYWIKIBOT2_DIR, and then run the tests. Do not
# touch mouse or keyboard while the tests are running, as this might disturb the
# interaction tests.
#
# The Windows tests were developed on a Dutch Windows 7 OS. You might need to adapt the
# helper functions in TestWindowsTerminalUnicode for other versions.
#
# For the Windows-based tests, you need the following packages installed:
# - pywin32, for clipboard access, which can be downloaded here:
# http://sourceforge.net/projects/pywin32/files/pywin32/Build%20218/
# make sure to download the package for the correct python version!
#
# - pywinauto, to send keys to the terminal, which can be installed using:
# easy_install --upgrade https://pywinauto.googlecode.com/files/pywinauto-0.4.2.zip
#
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import inspect
import io
import logging
import os
import subprocess
import sys
import time
if os.name == "nt":
from multiprocessing.managers import BaseManager
import threading
try:
import win32api
except ImportError:
win32api = None
try:
import pywinauto
except ImportError:
pywinauto = None
try:
import win32clipboard
except ImportError:
win32clipboard = None
import pywikibot
from pywikibot.bot import (
ui, DEBUG, VERBOSE, INFO, STDOUT, INPUT, WARNING, ERROR, CRITICAL
)
from pywikibot.tools import PY2
from pywikibot.userinterfaces import (
terminal_interface_win32, terminal_interface_base, terminal_interface_unix,
)
from tests.aspects import TestCase
from tests.utils import unittest, FakeModule
if sys.version_info[0] > 2:
unicode = str
class Stream(object):
"""Handler for a StringIO or BytesIO instance able to patch itself."""
def __init__(self, name, patched_streams):
"""
Create a new stream with a StringIO or BytesIO instance.
@param name: The part after 'std' (e.g. 'err').
@type name: str
@param patched_streams: A mapping which maps the original stream to
the patched stream.
@type patched_streams: dict
"""
self._stream = io.StringIO() if sys.version_info[0] > 2 else io.BytesIO()
self._name = 'std{0}'.format(name)
self._original = getattr(sys, self._name)
patched_streams[self._original] = self._stream
def __repr__(self):
return '<patched %s %r wrapping %r>' % (
self._name, self._stream, self._original)
def reset(self):
"""Reset own stream."""
self._stream.truncate(0)
self._stream.seek(0)
if os.name == "nt":
class pywikibotWrapper(object):
"""pywikibot wrapper class."""
def init(self):
pywikibot.version._get_program_dir()
def output(self, *args, **kwargs):
return pywikibot.output(*args, **kwargs)
def request_input(self, *args, **kwargs):
self.input = None
def threadedinput():
self.input = pywikibot.input(*args, **kwargs)
self.inputthread = threading.Thread(target=threadedinput)
self.inputthread.start()
def get_input(self):
self.inputthread.join()
return self.input
def set_config(self, key, value):
setattr(pywikibot.config, key, value)
def set_ui(self, key, value):
setattr(pywikibot.ui, key, value)
def cls(self):
os.system('cls')
class pywikibotManager(BaseManager):
"""pywikibot manager class."""
pass
pywikibotManager.register(str('pywikibot'), pywikibotWrapper)
_manager = pywikibotManager(
address=('127.0.0.1', 47228),
authkey=b'4DJSchgwy5L5JxueZEWbxyeG')
if len(sys.argv) > 1 and sys.argv[1] == "--run-as-slave-interpreter":
s = _manager.get_server()
s.serve_forever()
def patched_print(text, targetStream):
try:
stream = patched_streams[targetStream]
except KeyError:
assert isinstance(targetStream, pywikibot.userinterfaces.win32_unicode.UnicodeOutput)
assert targetStream._stream
stream = patched_streams[targetStream._stream]
org_print(text, stream)
def patched_input():
return strin._stream.readline().strip()
patched_streams = {}
strout = Stream('out', patched_streams)
strerr = Stream('err', patched_streams)
strin = Stream('in', {})
newstdout = strout._stream
newstderr = strerr._stream
newstdin = strin._stream
if sys.version_info[0] == 2:
# In Python 2 the sys.std* streams use bytes instead of unicode
# But this module is using unicode_literals so '…' will generate unicode
# So it'll convert those back into bytes
original_write = newstdin.write
def encoded_write(text):
if isinstance(text, unicode):
text = text.encode('utf8')
original_write(text)
newstdin.write = encoded_write
org_print = ui._print
org_input = ui._raw_input
def patch():
"""Patch standard terminal files."""
strout.reset()
strerr.reset()
strin.reset()
ui._print = patched_print
ui._raw_input = patched_input
def unpatch():
"""un-patch standard terminal files."""
ui._print = org_print
ui._raw_input = org_input
logger = logging.getLogger('pywiki')
loggingcontext = {'caller_name': 'ui_tests',
'caller_file': 'ui_tests',
'caller_line': 0,
'newline': '\n'}
class UITestCase(unittest.TestCase):
"""UI tests."""
net = False
def setUp(self):
patch()
pywikibot.config.colorized_output = True
pywikibot.config.transliterate = False
pywikibot.ui.transliteration_target = None
pywikibot.ui.encoding = 'utf-8'
def tearDown(self):
unpatch()
def _encode(self, string, encoding='utf-8'):
if sys.version_info[0] > 2:
return string
else:
return string.encode(encoding)
class TestTerminalOutput(UITestCase):
"""Terminal output tests."""
def testOutputLevels_logging_debug(self):
logger.log(DEBUG, 'debug', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def testOutputLevels_logging_verbose(self):
logger.log(VERBOSE, 'verbose', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def testOutputLevels_logging_info(self):
logger.log(INFO, 'info', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'info\n')
def testOutputLevels_logging_stdout(self):
logger.log(STDOUT, 'stdout', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), 'stdout\n')
self.assertEqual(newstderr.getvalue(), '')
def testOutputLevels_logging_input(self):
logger.log(INPUT, 'input', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'input\n')
def testOutputLevels_logging_WARNING(self):
logger.log(WARNING, 'WARNING', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'WARNING: WARNING\n')
def testOutputLevels_logging_ERROR(self):
logger.log(ERROR, 'ERROR', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'ERROR: ERROR\n')
def testOutputLevels_logging_CRITICAL(self):
logger.log(CRITICAL, 'CRITICAL', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'CRITICAL: CRITICAL\n')
def test_output(self):
pywikibot.output('output', toStdout=False)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'output\n')
def test_output_stdout(self):
pywikibot.output('output', toStdout=True)
self.assertEqual(newstdout.getvalue(), 'output\n')
self.assertEqual(newstderr.getvalue(), '')
def test_warning(self):
pywikibot.warning('warning')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'WARNING: warning\n')
def test_error(self):
pywikibot.error('error')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'ERROR: error\n')
def test_log(self):
pywikibot.log('log')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def test_critical(self):
pywikibot.critical('critical')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'CRITICAL: critical\n')
def test_debug(self):
pywikibot.debug('debug', 'test')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def test_exception(self):
class TestException(Exception):
"""Test exception."""
try:
raise TestException('Testing Exception')
except TestException:
pywikibot.exception('exception')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'ERROR: TestException: Testing Exception\n')
def test_exception_tb(self):
class TestException(Exception):
"""Test exception."""
try:
raise TestException('Testing Exception')
except TestException:
pywikibot.exception('exception', tb=True)
self.assertEqual(newstdout.getvalue(), '')
stderrlines = newstderr.getvalue().split('\n')
self.assertEqual(stderrlines[0], 'ERROR: TestException: Testing Exception')
self.assertEqual(stderrlines[1], 'Traceback (most recent call last):')
self.assertEqual(stderrlines[3], " raise TestException('Testing Exception')")
self.assertTrue(stderrlines[4].endswith(': Testing Exception'))
self.assertNotEqual(stderrlines[-1], '\n')
class TestTerminalInput(UITestCase):
"""Terminal input tests."""
input_choice_output = 'question ([A]nswer 1, a[n]swer 2, an[s]wer 3): '
def testInput(self):
newstdin.write('input to read\n')
newstdin.seek(0)
returned = pywikibot.input('question')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'question: ')
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, u'input to read')
def _call_input_choice(self):
rv = pywikibot.input_choice(
'question',
(('answer 1', u'A'),
('answer 2', u'N'),
('answer 3', u'S')),
u'A',
automatic_quit=False)
self.assertEqual(newstdout.getvalue(), '')
self.assertIsInstance(rv, unicode)
return rv
def testInputChoiceDefault(self):
newstdin.write('\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(returned, 'a')
def testInputChoiceCapital(self):
newstdin.write('N\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(), self.input_choice_output)
self.assertEqual(returned, 'n')
def testInputChoiceNonCapital(self):
newstdin.write('n\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(), self.input_choice_output)
self.assertEqual(returned, 'n')
def testInputChoiceIncorrectAnswer(self):
newstdin.write('X\nN\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(),
self.input_choice_output * 2)
self.assertEqual(returned, 'n')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTerminalOutputColorUnix(UITestCase):
"""Terminal output color tests."""
str1 = 'text \03{lightpurple}light purple text\03{default} text'
def testOutputColorizedText(self):
pywikibot.output(self.str1)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'text \x1b[95mlight purple text\x1b[0m text\n')
def testOutputNoncolorizedText(self):
pywikibot.config.colorized_output = False
pywikibot.output(self.str1)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'text light purple text text ***\n')
str2 = ('normal text \03{lightpurple} light purple ' +
'\03{lightblue} light blue \03{previous} light purple ' +
'\03{default} normal text')
def testOutputColorCascade_incorrect(self):
"""Test incorrect behavior of testOutputColorCascade."""
pywikibot.output(self.str2)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'normal text \x1b[95m light purple ' +
'\x1b[94m light blue \x1b[95m light purple ' +
'\x1b[0m normal text\n')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTerminalUnicodeUnix(UITestCase):
"""Terminal output tests for unix."""
def testOutputUnicodeText(self):
pywikibot.output(u'Заглавная_страница')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
self._encode(u'Заглавная_страница\n', 'utf-8'))
def testInputUnicodeText(self):
newstdin.write(self._encode(u'Заглавная_страница\n', 'utf-8'))
newstdin.seek(0)
returned = pywikibot.input(u'Википедию? ')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
self._encode(u'Википедию? ', 'utf-8'))
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, u'Заглавная_страница')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTransliterationUnix(UITestCase):
"""Terminal output transliteration tests."""
def testOutputTransliteratedUnicodeText(self):
pywikibot.ui.encoding = 'latin-1'
pywikibot.config.transliterate = True
pywikibot.output(u'abcd АБГД αβγδ あいうえお')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'abcd \x1b[93mA\x1b[0m\x1b[93mB\x1b[0m\x1b[93mG\x1b[0m'
'\x1b[93mD\x1b[0m \x1b[93ma\x1b[0m\x1b[93mb\x1b[0m\x1b[93mg'
'\x1b[0m\x1b[93md\x1b[0m \x1b[93ma\x1b[0m\x1b[93mi\x1b[0m'
'\x1b[93mu\x1b[0m\x1b[93me\x1b[0m\x1b[93mo\x1b[0m\n')
@unittest.skipUnless(os.name == 'nt', 'requires Windows console')
class WindowsTerminalTestCase(UITestCase):
"""MS Windows terminal tests."""
@classmethod
def setUpClass(cls):
if os.name != 'nt':
raise unittest.SkipTest('requires Windows console')
if not win32api:
raise unittest.SkipTest('requires Windows package pywin32')
if not win32clipboard:
raise unittest.SkipTest('requires Windows package win32clipboard')
if not pywinauto:
raise unittest.SkipTest('requires Windows package pywinauto')
try:
# pywinauto 0.5.0
cls._app = pywinauto.Application()
except AttributeError as e1:
try:
cls._app = pywinauto.application.Application()
except AttributeError as e2:
raise unittest.SkipTest('pywinauto Application failed: %s\n%s'
% (e1, e2))
super(WindowsTerminalTestCase, cls).setUpClass()
@classmethod
def setUpProcess(cls, command):
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESTDHANDLES
cls._process = subprocess.Popen(command,
creationflags=subprocess.CREATE_NEW_CONSOLE)
cls._app.connect_(process=cls._process.pid)
# set truetype font (Lucida Console, hopefully)
try:
window = cls._app.window_()
except Exception as e:
cls.tearDownProcess()
raise unittest.SkipTest('Windows package pywinauto could not locate window: %r'
% e)
try:
window.TypeKeys('% {UP}{ENTER}^L{HOME}L{ENTER}', with_spaces=True)
except Exception as e:
cls.tearDownProcess()
raise unittest.SkipTest('Windows package pywinauto could not use window TypeKeys: %r'
% e)
@classmethod
def tearDownProcess(cls):
cls._process.kill()
def setUp(self):
super(WindowsTerminalTestCase, self).setUp()
self.setclip(u'')
def waitForWindow(self):
while not self._app.window_().IsEnabled():
time.sleep(0.01)
def getstdouterr(self):
sentinel = u'~~~~SENTINEL~~~~cedcfc9f-7eed-44e2-a176-d8c73136c185'
# select all and copy to clipboard
self._app.window_().SetFocus()
self.waitForWindow()
self._app.window_().TypeKeys('% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{DOWN}{ENTER}{ENTER}',
with_spaces=True)
while True:
data = self.getclip()
if data != sentinel:
return data
time.sleep(0.01)
def setclip(self, text):
win32clipboard.OpenClipboard()
win32clipboard.SetClipboardData(win32clipboard.CF_UNICODETEXT, unicode(text))
win32clipboard.CloseClipboard()
def getclip(self):
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
win32clipboard.CloseClipboard()
data = data.split(u'\x00')[0]
data = data.replace(u'\r\n', u'\n')
return data
def sendstdin(self, text):
self.setclip(text.replace(u'\n', u'\r\n'))
self._app.window_().SetFocus()
self.waitForWindow()
self._app.window_().TypeKeys('% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{ENTER}', with_spaces=True)
class TestWindowsTerminalUnicode(WindowsTerminalTestCase):
"""MS Windows terminal unicode tests."""
@classmethod
def setUpClass(cls):
super(TestWindowsTerminalUnicode, cls).setUpClass()
fn = inspect.getfile(inspect.currentframe())
cls.setUpProcess(['python', 'pwb.py', fn, '--run-as-slave-interpreter'])
_manager.connect()
cls.pywikibot = _manager.pywikibot()
@classmethod
def tearDownClass(cls):
del cls.pywikibot
cls.tearDownProcess()
def setUp(self):
super(TestWindowsTerminalUnicode, self).setUp()
self.pywikibot.set_config('colorized_output', True)
self.pywikibot.set_config('transliterate', False)
self.pywikibot.set_config('console_encoding', 'utf-8')
self.pywikibot.set_ui('transliteration_target', None)
self.pywikibot.set_ui('encoding', 'utf-8')
self.pywikibot.cls()
def testOutputUnicodeText_no_transliterate(self):
self.pywikibot.output(u'Заглавная_страница')
self.assertEqual(self.getstdouterr(), u'Заглавная_страница\n')
def testOutputUnicodeText_transliterate(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.set_ui('transliteration_target', 'latin-1')
self.pywikibot.output(u'Заглавная_страница')
self.assertEqual(self.getstdouterr(), 'Zaglavnaya_stranica\n')
def testInputUnicodeText(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.request_input(u'Википедию? ')
self.assertEqual(self.getstdouterr(), u'Википедию?')
self.sendstdin(u'Заглавная_страница\n')
returned = self.pywikibot.get_input()
self.assertEqual(returned, u'Заглавная_страница')
class TestWindowsTerminalUnicodeArguments(WindowsTerminalTestCase):
"""MS Windows terminal unicode argument tests."""
@classmethod
def setUpClass(cls):
super(TestWindowsTerminalUnicodeArguments, cls).setUpClass()
cls.setUpProcess(['cmd', '/k', 'echo off'])
@classmethod
def tearDownClass(cls):
cls.tearDownProcess()
pass
def testOutputUnicodeText_no_transliterate(self):
self.sendstdin(
u"python -c \"import os, pywikibot; os.system('cls'); "
u"pywikibot.output(u'\\n'.join(pywikibot.handleArgs()))\" "
u"Alpha Bετα Гамма دلتا\n")
lines = []
while len(lines) < 4 or lines[0] != 'Alpha':
lines = self.getstdouterr().split('\n')
time.sleep(1)
# empty line is the new command line
self.assertEqual(lines, [u'Alpha', u'Bετα', u'Гамма', u'دلتا', u''])
class FakeUITest(TestCase):
"""Test case to allow doing uncolorized general UI tests."""
net = False
expected = 'Hello world you! ***'
expect_color = False
ui_class = terminal_interface_base.UI
def setUp(self):
"""Create dummy instances for the test and patch encounter_color."""
super(FakeUITest, self).setUp()
if PY2:
self.stream = io.BytesIO()
else:
self.stream = io.StringIO()
self.ui_obj = self.ui_class()
self._orig_encounter_color = self.ui_obj.encounter_color
self.ui_obj.encounter_color = self._encounter_color
self._index = 0
def tearDown(self):
"""Unpatch the encounter_color method."""
self.ui_obj.encounter_color = self._orig_encounter_color
super(FakeUITest, self).tearDown()
self.assertEqual(self._index,
len(self._colors) if self.expect_color else 0)
def _getvalue(self):
"""Get the value of the stream and also decode it on Python 2."""
value = self.stream.getvalue()
if PY2:
value = value.decode(self.ui_obj.encoding)
return value
def _encounter_color(self, color, target_stream):
"""Patched encounter_color method."""
assert False, 'This method should not be invoked'
def test_no_color(self):
"""Test a string without any colors."""
self._colors = tuple()
self.ui_obj._print('Hello world you!', self.stream)
self.assertEqual(self._getvalue(), 'Hello world you!')
def test_one_color(self):
"""Test a string using one color."""
self._colors = (('red', 6), ('default', 10))
self.ui_obj._print('Hello \03{red}world you!', self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_flat_color(self):
"""Test using colors with defaulting in between."""
self._colors = (('red', 6), ('default', 6), ('yellow', 3), ('default', 1))
self.ui_obj._print('Hello \03{red}world \03{default}you\03{yellow}!',
self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_stack_with_pop_color(self):
"""Test using stacked colors and just poping the latest color."""
self._colors = (('red', 6), ('yellow', 6), ('red', 3), ('default', 1))
self.ui_obj._print('Hello \03{red}world \03{yellow}you\03{previous}!',
self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_stack_implicit_color(self):
"""Test using stacked colors without poping any."""
self._colors = (('red', 6), ('yellow', 6), ('default', 4))
self.ui_obj._print('Hello \03{red}world \03{yellow}you!', self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_one_color_newline(self):
"""Test with trailing new line and one color."""
self._colors = (('red', 6), ('default', 11))
self.ui_obj._print('Hello \03{red}world you!\n', self.stream)
self.assertEqual(self._getvalue(), self.expected + '\n')
class FakeUIColorizedTestBase(TestCase):
"""Base class for test cases requiring that colorized output is active."""
expect_color = True
def setUp(self):
"""Force colorized_output to True."""
super(FakeUIColorizedTestBase, self).setUp()
self._old_config = pywikibot.config2.colorized_output
pywikibot.config2.colorized_output = True
def tearDown(self):
"""Undo colorized_output configuration."""
pywikibot.config2.colorized_output = self._old_config
super(FakeUIColorizedTestBase, self).tearDown()
class FakeUnixTest(FakeUIColorizedTestBase, FakeUITest):
"""Test case to allow doing colorized Unix tests in any environment."""
net = False
expected = 'Hello world you!'
ui_class = terminal_interface_unix.UnixUI
def _encounter_color(self, color, target_stream):
"""Verify that the written data, color and stream are correct."""
self.assertIs(target_stream, self.stream)
expected_color = self._colors[self._index][0]
self._index += 1
self.assertEqual(color, expected_color)
self.assertEqual(len(self.stream.getvalue()),
sum(e[1] for e in self._colors[:self._index]))
class FakeWin32Test(FakeUIColorizedTestBase, FakeUITest):
"""
Test case to allow doing colorized Win32 tests in any environment.
This only patches the ctypes import in the terminal_interface_win32 module.
As the Win32CtypesUI is using the std-streams from another import these will
be unpatched.
"""
net = False
expected = 'Hello world you!'
ui_class = terminal_interface_win32.Win32CtypesUI
def setUp(self):
"""Patch the ctypes import and initialize a stream and UI instance."""
super(FakeWin32Test, self).setUp()
self._orig_ctypes = terminal_interface_win32.ctypes
ctypes = FakeModule.create_dotted('ctypes.windll.kernel32')
ctypes.windll.kernel32.SetConsoleTextAttribute = self._handle_setattr
terminal_interface_win32.ctypes = ctypes
self.stream._hConsole = object()
def tearDown(self):
"""Unpatch the ctypes import and check that all colors were used."""
terminal_interface_win32.ctypes = self._orig_ctypes
super(FakeWin32Test, self).tearDown()
def _encounter_color(self, color, target_stream):
"""Call the original method."""
self._orig_encounter_color(color, target_stream)
def _handle_setattr(self, handle, attribute):
"""Dummy method to handle SetConsoleTextAttribute."""
self.assertIs(handle, self.stream._hConsole)
color = self._colors[self._index][0]
self._index += 1
color = terminal_interface_win32.windowsColors[color]
self.assertEqual(attribute, color)
self.assertEqual(len(self.stream.getvalue()),
sum(e[1] for e in self._colors[:self._index]))
class FakeWin32UncolorizedTest(FakeWin32Test):
"""Test case to allow doing uncolorized Win32 tests in any environment."""
net = False
expected = 'Hello world you! ***'
expect_color = False
def setUp(self):
"""Change the local stream's console to None to disable colors."""
super(FakeWin32UncolorizedTest, self).setUp()
self.stream._hConsole = None
if __name__ == "__main__":
try:
try:
unittest.main()
except SystemExit:
pass
finally:
unpatch()
|
Last fall, the USRF awarded 10 $1,000 Kevin Higgins College Scholarships to deserving high school seniors who went on to play rugby in college, bringing the total number of Higgins Scholarships awarded to 62 since the program was created in 2008. This year, the USRFF will provide up to 10 Higgins Scholarships.
These independent scholarships are open to all graduating high school rugby players in the US who will be pursuing their rugby career while continuing their education at the collegiate level.
"The Rugby Foundation is proud to be a part of keeping the Kevin Higgins' legacy alive," said USRF Executive Director Brian Vizard, himself a teammate of Higgins with OMBAC and the U.S. National Team. "It never ceases to amaze me the impressive qualifications that each year's Higgins Scholarship applicants have. Every year it gets harder and harder for the selection committee. But that's a good problem to have."
Read more about the 2014 Kevin Higgins College Scholarship recipients:Liam Wynne, Domonique Bellinger, Tanner Pope, Olivia Fiatoa, Brandon Puccini, Amy Plambeck, Brendan Murphy, Elona Williams, Reed Heynen, and Courtney Bridges.
Apply for a 2015 Kevin Higgins College Scholarship. Applications must be postmarked by July 31, 2015.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.