text
stringlengths 29
850k
|
|---|
#!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zipline.api import order, record, symbol
def initialize(context):
pass
def handle_data(context, data):
order(symbol('AAPL'), 10)
record(AAPL=data.current(symbol('AAPL'), 'price'))
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
# Plot the portfolio and asset data.
ax1 = plt.subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = plt.subplot(212, sharex=ax1)
results.AAPL.plot(ax=ax2)
ax2.set_ylabel('AAPL price (USD)')
# Show the plot.
plt.gcf().set_size_inches(18, 8)
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2014-01-01', tz='utc'),
'end': pd.Timestamp('2014-11-01', tz='utc'),
}
|
Billi Lacombe was born and raised in Jennings, Louisiana; from a very early age she dreamed of helping women and children in her community. Her dream was realized in 1998 when she was led to a position with Faith House.
Now, as the Executive Director of Faith House she is able to contribute to the improvement of the lives of women and children in our community on daily basis.
|
import re
import subprocess
from datetime import datetime as DT, date
CONFIG_FILE="~/.todo.cfg"
_tagTest = re.compile(r'.+:.+')
_prioTest = re.compile(r'\([A-Z]\)$')
_validPrio = re.compile(r'[A-Z]')
def _makeDate(word):
if word is None: return None
if isinstance(word, date): return word
return DT.strptime(word, "%Y-%m-%d").date()
def _isDate(word):
# print "date testing:", word
try:
_makeDate(word)
except Exception, e:
# print "Failed date parse on: %s" % (word,)
# print "exeption", e
return False
return True
def _isPriority(word):
return bool(_prioTest.match(word))
def _isProject(word):
return word.startswith("+")
def _isContext(word):
return word.startswith("@")
def _isTag(word):
return bool(_tagTest.search(word))
def get_todo_env(key):
cmd = ". %s; echo $%s"
cmd %= (CONFIG_FILE, key)
var = subprocess.check_output([cmd], shell=True)
return var.strip()
class Task(object):
def __init__(self, task="", projects=None, contexts=None, tags=None, autodate=False):
self.priority = ''
self._create = None
self._finish = None
self.task = task
self.done = False
self.projects = projects if projects else list()
self.contexts = contexts if contexts else list()
self.tags = tags if tags else dict()
if autodate:
self.create = date.today()
# can "undo" - pass false
def do(self, value=True):
if bool(value):
self.done = True
self.finish = DT.now().date()
else:
self.done = False
self.finish = None
@property
def priority(self):
return self._priority
@priority.setter
def priority(self, value):
if not value:
self._priority = ""
return
value = value.upper()
if _isPriority(value):
self._priority = value
elif len(value) == 1 and _validPrio.match(value):
self._priority = "(%s)" % value
else:
raise Exception('Bad prio')
@property
def create(self):
return self._create
@create.setter
def create(self, val):
self._create = _makeDate(val)
@property
def finish(self):
return self._finish
@finish.setter
def finish(self, val):
self._finish = _makeDate(val)
def __str__(self):
# Question - strip prio as option?
tok = []
finish = str(self.finish) if self.finish else ""
create = str(self.create) if self.create else ""
if self.done:
tok.append("x")
# strip prio because:
# tood.sh do [TASK]
# does it
tok.extend([finish, create, self.task])
else:
tok.extend([self.priority, create, self.task])
tok.extend(self.projects)
tok.extend(self.contexts)
tok.extend("%s:%s" % (k,v) for k,v in self.tags.iteritems())
return " ".join(v for v in tok if v)
@staticmethod
def parse(todoline):
leading_space=False
bare_words = []
task = Task()
if todoline.strip(' \t\n') == "":
return None
if todoline.startswith(' '):
leading_space = True
tokens = todoline.split(" ")
if not leading_space:
# get rid of internal "" tokens
tokens = [tok for tok in tokens if tok]
else:
# preserve leading ws
leader = []
while tokens[0] == '':
leader.append(tokens.pop(0))
tokens.insert(0, " ".join(leader))
# Deal with leading space wierdness
if not leading_space:
if tokens[0] == 'x':
task.done = True
tokens.pop(0)
if _isDate(tokens[0]):
task.finish = tokens.pop(0)
if _isPriority(tokens[0]):
task.priority = tokens.pop(0)
else:
bare_words.append(tokens.pop(0))
# creation date still valid for leading space... TODO: verify
if _isDate(tokens[0]):
task.create = tokens.pop(0)
# Now the meat
for word in tokens:
if _isProject(word):
task.projects.append(word)
elif _isContext(word):
task.contexts.append(word)
elif _isTag(word):
k, v = word.partition(":")[::2]
task.tags[k] = v
else:
bare_words.append(word)
task.task = " ".join(bare_words)
return task
class TodoFile(object):
def __init__(self, filename=""):
self.filename = filename
def __str__(self):
return "\n".join(str(task) for task in self.tasks) + "\n"
def open(self):
try:
with open(self.filename, 'r') as fd:
self.tasks = [Task.parse(x.strip()) for x in fd.readlines()]
self.tasks = [x for x in self.tasks if x is not None]
except:
self.tasks = []
def save(self):
with open(self.filename, 'w') as fd:
fd.write(str(self))
|
4518 Southwood St, Corpus Christi, TX 78415 – Property Overview – 4518 Southwood St, Corpus Christi, TX 78415 is a single family home built in 1947. The $83,000 estimated value is 20.95% less than the median listing price of $105,000 for the.
Texas a&M corpus christi. Wells Fargo Advisors is a trade name used by Wells Fargo Clearing Services, LLC and wells fargo advisors financial network, LLC, Members SIPC, separate registered broker-dealers and non-bank affiliates of Wells Fargo & Company.
Mortgage Lenders In Texas | Corpus Christi FHA Loans | Cash Out. – Best mortgage broker serving houston, Spring, The Woodlands, and the entire state of Texas. We have the best FHA, VA, and USDA loans in Houston and.
Corpus Christi is a major port city on the southeast of Texas. Corpus Christi is a larger city, and has a number of thriving industries, including tourism and film. Tourists flock to North Beach to see attractions like the Texas State Aquarium every year.
Here is the definitive list of Corpus Christi’s apartment finding services as rated by the Corpus Christi, TX community. Represent clients who are looking to buy, sell and rent properties. stay knowledgeable about regional real estate prices, mortgages, market conditions and related.
Corpus Christi, TX Mortgage Lenders & Reviews | Zillow – Search for Corpus Christi, TX mortgage lenders and read thousands of customer reviews on the largest online directory of licensed lenders.
3,973 jobs available in Corpus Christi, TX on Indeed.com. Apply to Receptionist, Executive Assistant, front desk agent and more! Successful out of state candidates must be able to obtain a valid Texas driver’s license within 90 days of hire.
Home – IBC First Equity – IBC First Equity. IBC First Equity provides 2nd Mortgage solutions for your Borrowers. We deliver fast, common-sense underwriting with a wide array of comprehensive and competitively priced products.
View Corpus Christi, TX Home Values, Housing Market. – realtor.com – Corpus Christi, TX. Local community home values, housing market & schools. Corpus Christi is a city in Texas and consists of 6 neighborhoods. There are 2,136 homes for sale, ranging from $7K to $5.5M.
248 Tarlton St, Corpus Christi, TX 78415 – Property Overview – 248 Tarlton St, Corpus Christi, TX 78415 is a single family home built in 1936. The $102,500 estimated value is 2.38% less than the median listing price of $105,000 for the Central.
Corpus Christi TX Refinance: Corpus Christi TX Home Loans | San. – Corpus Christi Home Loan Refinance & San Antonio, TX Mortgage EXPERTS! The most experienced & ethical Portland Mortgage Brokers offering you the best .
222 Richard St, Corpus Christi, TX 78415 – Property Overview – 222 Richard St, Corpus Christi, TX 78415 is a single family home built in 1983. The $86,700 estimated value is 17.43% less than the median listing price of $105,000 for the Central.
|
#!/usr/bin/env python3
import i3ipc
import Tkinter
import sys
import re
from functools import reduce
def parse_entry(acc, entry):
key, value = entry.split("=")
acc[key] = value
return acc
def parse_args(args):
rest = args[1:]
arg_map = reduce(parse_entry, rest, {})
return arg_map
args = parse_args(sys.argv)
i3 = i3ipc.Connection()
def center(toplevel):
toplevel.update_idletasks()
w = toplevel.winfo_screenwidth()
h = toplevel.winfo_screenheight()
size = tuple(int(_) for _ in toplevel.geometry().split('+')[0].split('x'))
x = w/2 - size[0]/2
y = h/2 - size[1]/2
toplevel.geometry("%dx%d+%d+%d" % (size + (x, y)))
def show_window(label_list):
fontName = args.get("--font", "Arial")
fontSize = int(float(args.get("--font_size", "12")))
regexFilter = args.get("--ignore_titles", "$^")
regex = re.compile(regexFilter)
if (regex.match(label_list[0]) != None):
return
root = Tkinter.Tk()
root.attributes("-type", "dock")
width = int(float(args.get("--min_width", "500")))
root.minsize(width=width, height=1)
labelText = reduce(lambda acc, s: acc + '\n' + s, label_list[1:])
label = Tkinter.Label(root, text=labelText, justify=Tkinter.LEFT, anchor='w')
foreground=args.get("--foreground", "#000000")
background=args.get("--background", "#ffffff")
label.config( font=(fontName, fontSize)
, background=background
, foreground=foreground)
label.pack(fill='both')
root.title("i3-pretty-mode-title")
center(root)
# TODO(me)figure out scaling
#root.tk.call('tk', 'scaling', 4.0)
return root
def destroy_root():
global lastRoot
if (lastRoot != None):
lastRoot.destroy()
lastRoot = None
lastRoot = None
def on_mode(i3, e):
global lastRoot
mode_string = e.change
destroy_root()
if (mode_string != "default"):
label_list = mode_string.split(" :: ")
lastRoot = show_window(label_list)
# Subscribe to "mode" events
i3.on("mode", on_mode)
# Start the main loop and wait for events to come in.
i3.main()
|
My Biggest Mistake was not trusting myself to make the right decision with the information I knew at the time. I didn’t have all of the answers – how to execute, how to find the money, how to deal with the ups and downs of being an entrepreneur – and I let that feeling of being out of my comfort zone make my decision for me. The lesson for next time? Be comfortable with being uncomfortable. Trust my gut more, and be in the moment when struck by a big idea that wants to be real.
Now, you decide whether I should have gone forward.
Here are the ideas that I had and decided not to do. Read them with the knowledge that you have in 2013, and decide whether you would pursue them today: I would.
Big Idea: Make the Grocery Store Easier.
Idea #1: Imagine if the next time you went into your local grocery store, there was a way for your phone to tell you the location of every product in the store, to remember your past preferences for shopping, and even to direct you in an optimal aisle-by-aisle route to minimize the time in store? And what if you received loyalty rewards and marketing offers that pertained to you? And what if you could check out of the store simply by scanning each item with your phone as you placed into the cart. That idea sounds promising and real in 2013, and quite similar to the idea my friends are pursuing at qThru.
When I thought of a very similar idea in 1999, even though the hardware and software was off the shelf and readily available, I didn’t go and build it. I made the decision that “I wasn’t the type to do that,” and “I’m not an idea guy” and let self-doubt make my decision for me. I can’t have that decision back, and I know that what I was really feeling in the moment was, “oh crap. I have no idea how to even begin thinking about that much less how to build and monetize it.” And, it happened again.
Big Idea #2: Make Waiting at a Restaurant Better.
Idea #2: Imagine you arrive at a popular restaurant. Because they are very busy, they ask you for your phone number so that they can text message you when your table is available. At the same time, they ask you to join their loyalty program so that you can participate in drink specials, learn about special events, and play games or trivia while you are waiting in line. It exists today – it’s called TurnStar – and I’ve used it. It’s pretty slick.
Why didn’t I build my version? It was called TextMyTable, and I was ready to go with the vision, the business plan, and the execution play. It was September 2008. Then all of a sudden the economy did a flip-flop and all of our assumptions about what was a normal business turned on their head. Or did they? I was stuck because I didn’t know how to raise the money to start the business or to grow the business in such a way that it generated operating capital.
In both of these ideas (and in others it’s not important to share here), I had an idea for a product or a service that was innovative. The ideas capitalized on a consumer need, solved an actual problem and had a reasonable chance at being successful. We could argue about the size of the market and the relative degree of success, and the fact remains that they were good ideas. And I made a mistake in not pursuing them.
What did I learn and what would I do next time?
The first thing I learned is that you can’t find out whether you’ll succeed with an idea until you try it. (Duh.) The ideas I think that would have been successful might have been abject failures, wild successes, or more likely somewhere in between. And I don’t know because I didn’t try them.
The second thing I learned from these mistakes is that collaboration is everything. I needed to do more to ask people to tear apart the idea instead of trying to build the whole business from start to finish inside my head. Groups like Startup Edition are a great place to get feedback, learn from other perspectives, and to reframe your questions.
And finally, I learned from my mistakes that it’s impossible to know what you don’t know until you do it. (Sounds like a Zen koan, doesn’t it.) What can you do about that? Admit that you’re going to make mistakes. Try to make different ones the next time you approach a problem, and learn from the results. Trust your gut.
Previous Post Makers need to make in the real world.
|
#!/usr/bin/python
from bs4 import BeautifulSoup
from datetime import date
import datetime
from icalendar import Calendar, Event
import pytz
import string
import urllib.request, urllib.error, urllib.parse
import re
DEBUG = False
start_index_conf_shortnames = 2
if DEBUG:
OUTFILE = '/tmp/conferences.ics'
else:
OUTFILE = '/var/www/confical/conferences.ics'
class Conf():
def __init__(self, title, deadline, description, venue, dates=("","")):
self.title = title
self.deadline = deadline
self.description = description
self.venue = venue
self.dates = dates
def getDescription(self):
string = self.description + " at "
if self.venue:
string += self.venue
string += "\n From: " + self.dates[0].strftime('%d.%m.%Y')
if len(self.dates)==2:
string += " to: "+ self.dates[1].strftime('%d.%m.%Y')
return string
def __repr__(self):
return self.title + ": " + str(self.deadline.day) + "." + str(self.deadline.month)
def isValid(self):
return True
# return (self.deadline!="None") and (self.dates[1]!="None")
class Website(object):
def parse(self):
site = self.download()
conferences = self.parseConferencesFromString(site)
return conferences
class Friedetzky(Website):
URL = ' http://www.dur.ac.uk/tom.friedetzky/conf.html'
def download(self):
response = urllib.request.urlopen(self.URL)
html = response.read()
return html
def parseConferencesFromString(self,string):
soup = BeautifulSoup(string)
header = [ x for x in soup.findAll('ul')]
confs = [ x for x in header[0] ]
confs_cleaned = [str(x).strip() for x in confs if x]
conferences = []
for x in confs_cleaned:
if x and len(x.strip())!=0:
tag = re.search("<strong>(.*?)</strong>", x).group(1)
longname = re.search("</strong>\n.*?\((.*?)\)", x).group(1)
deadline = re.search("Submission deadline: <b>(.*?)</b>", x).group(1)
dates_and_location = re.search("Conference dates: (.*?)<br.*\n(.*?)<br", x)
date_and_location = re.search("Conference date: (.*?)<br.*\n(.*?)<br", x)
dates = dates_and_location.group(1) if dates_and_location else date_and_location.group(1)
location = dates_and_location.group(2) if dates_and_location else date_and_location.group(2)
link = re.search("More info: <a href.*>(.*)</a><p>", x).group(1)
#convertdates
deadline_date = datetime.datetime.strptime(deadline, "%B %d, %Y")
datessplit = re.search("(.*) - (.*)", dates)
if dates_and_location:
startconf_date = datetime.datetime.strptime(datessplit.group(1), "%B %d, %Y")
endconf_date = datetime.datetime.strptime(datessplit.group(2), "%B %d, %Y")
conf = Conf(tag, deadline_date, longname, location, (startconf_date, endconf_date))
else:
conf = Conf(tag, deadline_date, longname, location, (startconf_date,))
#print(conf)
conferences.append(conf)
#build conferences
return conferences
class Farishi(Website):
URL = 'http://cs.yazd.ac.ir/farshi/conf.html'
def download(self):
# the real download is complicated because frames are web2.0 and static html is even 3.0
with open('test-farishi.html', 'r') as f:
thefile = f.read()
return thefile
def parseConferencesFromString(self, string):
# print(string)
parsed_html = BeautifulSoup(string)
trs = parsed_html.body.findAll('tr')
conferences = []
for elem in trs[2:]: #the first two are junk tags
tds = elem.findAll('td')
#print(tds.decode("utf8"))
for x in tds:
print(x.text.encode("utf8"))
print("done")
tag = tds[0].text
try:
deadline_date = datetime.datetime.strptime(tds[1].text, "%d %b %Y")
except:
deadline_date = datetime.datetime.strptime(tds[1].text, "%d %B %Y")
longname = ""
location = ""
# notification = datetime.datetime.strptime(tds[2].text, "%d %B %Y")
datessplit = re.search("(.*) - (.*)", tds[3].text)
#startconf_date = datetime.datetime.strptime(tds[2].text, "%d %b %Y")
#endconf_date = datetime.datetime.strptime(tds[3].text, "%d %b %Y")
conf = Conf(tag, deadline_date, longname, location)
conferences.append(conf)
return conferences
# table = re.findall("<tr>(?iLmsux)*</tr>", string)
return table
def gatherTwo(list):
list1 = [x for i,x in enumerate(list) if i%2 == 0]
list2 = [x for i,x in enumerate(list) if i%2 == 1]
gatherer = [(x,y) for x,y in zip(list1,list2)]
return gatherer
def constructCalendar(conferences):
cal = Calendar()
cal.add('prodid', '-//conferences//mxm.dk')
cal.add('version', '2.0')
for c in conferences:
#print(c)
# if c.isValid():
event = Event()
event.add('summary', string.upper(c.title) + ' Deadline')
event.add('description', c.getDescription())
year = c.deadline.year
month = c.deadline.month
day = c.deadline.day
event.add('dtstart', datetime.datetime(year, month, day, 0, 0, tzinfo=pytz.utc))
event.add('dtend', datetime.datetime( year, month, day, 20, 0, tzinfo=pytz.utc))
event.add('dtstamp', datetime.datetime(year, month, day, 0, 0, tzinfo=pytz.utc))
cal.add_component(event)
return cal
def writeCal(calendar):
with open(OUTFILE, 'wb') as f:
f.write(calendar.to_ical())
website = Friedetzky()
tmp = website.parse()
writeCal(tmp)
# website = Farishi()
# tmp = website.parse()
# print(tmp)
|
Sony Cdx Gt07 Wiring Diagram. 2003 International 4300 Radio Wiring Diagram. 1756 Ia16 Wiring Diagram.
. 2004 Pontiac Grand Prix Dash Wiring Diagram. Basic Trailer Light Wiring Diagram.
. 2008 Chevy Malibu Stereo Wiring Diagram.
. Central Heating Wiring Diagram Pump Overrun.
. Bmw E92 Radio Wiring Diagram. Neff Cooker Hood Wiring Diagram. 2004 Pontiac Grand Prix Dash Wiring Diagram.
. Best Wiring Diagram. 1995 Acura Integra Stereo Wiring Diagram.
. Gmc Sierra Stereo Wiring Diagram.
Ceiling diagram alternator delco ford wiring obd2 12 fan volt. Vehicle security system way australia chapman gang switch one 3. Best gt07 grand 2004 prix pontiac dash cdx sony 1970. 2008 chevy transfer dart rts diagramming dodge generac sentences rules. Sierra basic club gmc car electric laser stereo malibu ignition. For beetle civic light trailer 99 honda fuse vw 1967. 2002 neff integra 1995 cooker doorbell acura wrx transformer subaru. Ia16 diagrams wisdom 2003 international 1756 hood cars teeth with. Bmw lambretta central 1999 lx 4300 radio e92 electronic box. Answers micrologix pump overrun venn heating 4runner and toyota problems. Plug 2500 silverado 7 visio volcano caldera 2013 hopkins 1400. Fence hilux uml float kubota.
coil crop digestion geology component. S10 verizon level lion fios sea pioneer anatomy tv deh. Contactor p3500 three mobile home furnace pole 2 coleman lighting. House motor duct detector submersible phase electrical est 1997 starter. 4 ohm jaguar exhaust mk4 astra xj6 caravan 1996 2001. Taurus half 1998 blank a speaker am mopar body court. Tutorials single actuator 240 basketball door power brake lock plc. Relay stator force toggle controller gy6 pin uverse 150cc nid. Erwin frank 2007 focus solar heater water boundary connection convergent. 94 ram tree seating thermostat plum center cherokee pruning heat. Elite element legacy deere kenmore dryer john space 4240 d16y8. Wire boat tobin pyramid printable 24 harness serpentine hot.
aveo. Rcd f150 carrier parts lights mitsubishi multiple eclipse belt 30ra. Dot wall cat5e electron solenoid pig cb400 real heart plate. Turbo warn schaltplan xdc cable winch setup lithium comcast sharan. Mk2 2005 subwoofer holden bosch nissan sentra av receiver granada. 1991 jeep architecture mk5 to common draw how golf software. Worksheet 85 trrs what branching trs hayward camera pool 22re. Leaf 1992 stem tacho scania tail torana is truck xterra. Engine remote avital load unit circuit dol 4105l siemens start. Rna dna neon boot vs 8 cat e46 celica 5. Paul in statistics hp gibson handle scatter define les monocot. Pickup database onan online root hfs section tongue cross prs. Simple usb 6 generator dpdt crane plan 01 gmos y. Port.
2016 briggs stratton food chain voltage ocean regulator ecosystem. Humbucker yanmar evinrude hss 90 ficht 2000 racquetball stratocaster fender. Potentiometer 13 tracker msd tractor 1978 240v fire distributor street. Chart model mosaic membrane flow tacoma ct70 of cell fluid. S e350 up set courtroom clipsal telephone pregnant 2006 dog. Jack sl1 mini bay dichotomous omron saturn my4n cooper key. Hdmi hella autocad driving uk buick 1974 photocell 24vdc lesabre. Converter phone ground series oscillator australian vga bridge ranger wein. Recon rv towbar rm2611 carburetor xplod culligan gas softener dometic. Animal expedition line t6360b1028 room reading ribosomes 1994 chambered honeywell. 7mm notebook netbook baldor switches 0 dc pdf.
5hp tablet. Kwh meter manual composite ct boiler liberty arctic plow snow. Off panel ks3 pull refraction grid push wrangler rj45 bt. 940 connector suspension pot volvo on pressure the e39 air. 2500hd class tool vivresaville sport depth tekonsha free labels p3. Creator overhead guitar rat yamaha headlight dakota muscle leviton dimmer. Systems v engineering g2 vectra vauxhall hitch parallel aircon ray. Corolla volkswagen chevrolet 1500 tahoe 1993 gt450u jetta 93 label. Battery ear led dump double 1973 1969 trail camaro outlet. Transporter t5 circuits bus batts trolling traffic 1990 cytokinesis nz. Netball standard t auris 23 glock labeled disassembly max tiida. Ice jvc spider monkey maker refrigerator transit photosynthesis audio.
amana. Horn lead 220 steps tin flasher precedent rj11 ethernet baseboard. Defy vtx camry fridge commander 1300 cat5 amplifier employee data. Turf mapping 240sx tiger dayton alarms escape scag management deck. Gt710 neutrik 96 arlec gfci fennec rx300 lexus fox soldering. Road harley z650 anleitung davidson king alarm digital sensor 6a. Jazz rb20det brakes bass cobalt marcus ecu do miller triple. Danfoss cooling valve position turn fusion mid universal signal ezgo. Pack cart xlr ez go k20 02 sharkawifarm mk1 lancer. Paragon 20 8145 farmall gm 246 gentec case origami eagle. Window blacktop sr20det gt mustang no pots standby automotive s13. W900 hand welder timing ac kenworth palmistry spot bose deer. Mount whirlpool lima four.
estate simplified m8000 801 butcher bell. Bean module 11 shapes part lunar seed process apollo kidney. Reversible sonoma atv marine human skeletal detailed can skeleton bones. 12000 gts india make kia helix lb scn badlands frontier. Lake xc90 city breakaway drum suburban spectra cem airport salt. Satellite 512 motorcycle kdc kenwood 152 kvt weathertron trane lt. M215 f avenger 150 faucet enphase schneider 350z rheem 2010. Datatool und lc1d09 network modern horns vintage drawing telecaster timer. Gretsch 8pin 6pin viper intermatic bubble variac are electromatic adapter. Sarcomere semi automatic pistol powerstroke choke 5902 isolation conditioner glow. Layout zone banshee cover baseball fisher field flower breakout problem..
Internal rodeo puch ups emergency palmar squid isuzu kit pathfinder. 120 tiller velux fuel hydro ryobi 1972 f350 98 bar. Patch 1965 charger 600 schumacher holley schematic se125a tp100 solved. Cruise caliber rostra 70 round cfm colorado carb impala control. Codes flat color web portal washer ibanez durango shed split. Template rg exit 293q z3 ahu 350 white card rodgers. Mower rtd lawn numbers their lab b project guide g. Server radiator eyeshadow cbr600rr pbt steering crossover gf30 cat6 application. Mitochondrion utility ktp alpine structure horse two easy rxv 80. Carbon rig copyright drawings conditioning architectural iron explanation z grounding. Zero crydom solid state h 39 v6 simplicity dual cctv. Cisco topology blazer v70.
mirage wan 2100 icons electrolux vt. Strat 1989 v8 550 fs type x program marathon stihl. Dishwasher mppt 1966 rca charge dollar maytag library p90 marley. Side auto upper leg by teco spa tachometer muscles gauge. Framework wye strategic 1976 star earth planning delta volleyball typical. Energy xl1200 05 help active crust sedona sailboat f250 mercury. Sets directv 990 whole brown maths blinkers david ls1 understanding. W210 360 kawasaki mercedes style benz xbox dvr 300 bayou. Batteries capacitor mand junction elbow racing laredo 480v kohler pj. Ts painless diesel canyon 9m nordyne whelen lt1 24v pv. Cadet compressor cycle foot slt1554 use cub emg extractor bathroom. Accord pro lan bike galant outlets s40 jincheng klf dta. Oil six.
trac switched ow16i allen sigma bradley r33 explorer. Bohr transcription iec microphone translation prepaid the12volt tii cord com. Wheel symbol tech 2266ub throughout rod direct dxt vapour run. Your diva lutron rice twist kyowa museum landscape amp knee. Towing stratus front 1jz workhorse 22 electrics hvac catalina p32. Gsxr transmission 325i socket vtec spotlight corsa 750 protist cucv. Work flush hampton does frigidaire speakers fans cuts arm toilet. Nose anzo telect nest aztek smell actros 8n monsoon rg321mh. Clock from ptac ididit column rds time specifications 4000 motorhome. Mirror 1000 johnson garage 115 69 genie coupe code outboard. Safety digestive vr6 explode g120 piercing 9007 karr hyundai sinamics. Mhl triumph.
induction clark i10 c500 tr6 forklift fj40 circle. At lines magnetic square forester restaurant relationship d editable entity. Motorcraft fasco impreza c4 atx citroen 3g 450w 36 smps. Drw just ms6312 have muscular sending face 2011 i picked. Smith coachmen automobile ao put ats 450 es century 289. Workflow spree antenna supermarket handler t800 86 c residential supply. Design spacer colors behind lungs 15 5mm sni ribs pac. 626 mazda shield beginners well chords ring delay autopage daikin. Player rs cd nutone gt6 involving using hyster 915 solving. Intellipak blade pulley 1 squier 110 mercruiser ata bath all. Snapper 7th portrait studio sundance mtd grade w211 plant p. Circular coronary chinese scholastic 1977 hyperion.
government tao sector 125cc. Charging chloride odyssey therapy massage example hunter tornado erosion ion. School hr lenel access urban bulldog middle ge 3sgte images. Sl2 digistat 1984 glenfield express 91 drayton marlin planner 60. 1985 autoradio mixtrax plot gift uterus magi gynecology nungsanleitung be. Corvette an allison atom ust1102 swm pass geyser seymour long. Fossil x5 da screen stepper lite sub 172 cessna motorized. Interior zip zoning sonos steam archaebacteria eubacteria arduino boost spark. Sensing sc1 thermistor motorhomes york explained fleetwood tsunami mictuning winding. Potential apc 220v wash ultra linear refrigerators channel squirrel distribution. P2900mp polo locking flying spur wiki bentley embraco alley.
molex. 200cc stuffy lifan 200 svt machine ladder 2jz mri only. 225 showing headlamp xl b2200 lincoln sled rewiring taste buds. 95 rover worksheets ladestecker land straight freelander 30ampere year carroll. Tach cruiser shunt pt 6b42 monster virago autometer leeson shift. Clarion payroll breaker sequence print trip source diabetes open lennox. 8210 step mortise 30 college l5 dance 3500 asco xmd3. Dissection poster male christmas coldplay speed peugeot cavalier frog partner. Porsche 924 alternate sun life stove erp opener craftsman layers. Emanage grote directory sample pirate blue greddy 900 l cdi. Inside passtime usq1152 sockets pilot 650 ship i320 ballast lower. Pico beaver mic hostel dmx explorelearning ritetemp outer 8030c.
breeze. Belling container fairlane oakwood 1964 multiplication insert massager prong altima. Mollier virtual 1915 bank si america 5961c8c0925d6 vmware slim units. Ss valves goldwing stadium g5 trailers crabtree chambers cb450 dressing. Tilt hps gauges watt stewart factory peavey g6 warner predator. Sonata 97 pollak b2000 venny kc disconnect low 18awg 1986. Directed dummies smart mondeo weg 2009 opel electronics 1979 spinal. Handshake bulb tl tcp credit accel bone clavicle tj l9000. Eye clutch transaction viair mono butterfly speakon firing order fungal. Travel sip 57 call oven gt57up ba samsung hotpoint read. Haul l8148e u orbit consumer duncan aquastat axiom falcon winnebago. Skull o2 chieftain dimmable polaris brain ddx318.
smoke 5th tridonic. Microscope 700r4 mosfet sagittal burner compound 45wx4 sutures gt550ui domestic. Computer 06 math minn big talon rm212f outcomes kota tex. Plumbing eiger context windstar suzuki 4x4 400 names if16 fat. 30r l14 accounting sky symbols q throttle 4l60e scooter cash. Vav dexter elantra er block freezer motorola 100w true r6. 120v trooper adder duraspark pir 1956 macaw pontoon quad piaa. Labeling r 530 08 ma airbag audi b6 pictures a4. Fibromyalgia miata wh diagramm points 18 tender 8000 ramsey x2. Lionel pertronix phrase offense ignitor sentence nuheat prepositional train pc. Alpha keeps jacuzzi turning ib 5305v price indicators igniter studies. Fill fj1200 microsoft venture borewell gen x4 mk fog 125. Rotork 997.
infrastructure e36 10v 3000 grafik 000 wiper analog. Diode inch soul trx 5901 laptop toshiba testing stop outback. Wig where lymph mp342u my obd0 nodes wag obd1 2017. Doerr hz excel vue 110cc aprilaire 07 foreman lr22132 2015. Beko bronco humidifier down hager beckett punch intertherm test 8t. Rs485 nema watershed jazzmaster professional industrial american zachman colours nikki. 480 quiz.
2006 chrysler sebring wiring diagram.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('clubs', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, verbose_name='\u0627\u0633\u0645 \u0627\u0644\u0646\u0634\u0627\u0637')),
('description', models.TextField(verbose_name='\u0648\u0635\u0641 \u0627\u0644\u0646\u0634\u0627\u0637')),
('public_description', models.TextField(help_text='\u0647\u0630\u0627 \u0647\u0648 \u0627\u0644\u0648\u0635\u0641 \u0627\u0644\u0630\u064a \u0633\u064a\u0639\u0631\u0636 \u0644\u0644\u0637\u0644\u0627\u0628', verbose_name='\u0627\u0644\u0648\u0635\u0641 \u0627\u0644\u0625\u0639\u0644\u0627\u0645\u064a')),
('requirements', models.TextField(verbose_name='\u0645\u062a\u0637\u0644\u0628\u0627\u062a \u0627\u0644\u0646\u0634\u0627\u0637', blank=True)),
('submission_date', models.DateTimeField(auto_now_add=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u0625\u0631\u0633\u0627\u0644')),
('edit_date', models.DateTimeField(auto_now=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u062a\u0639\u062f\u064a\u0644')),
('is_editable', models.BooleanField(default=True, verbose_name='\u0647\u0644 \u064a\u0645\u0643\u0646 \u062a\u0639\u062f\u064a\u0644\u0647\u061f')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u0645\u062d\u0630\u0648\u0641\u061f')),
('inside_collaborators', models.TextField(verbose_name='\u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0648\u0646 \u0645\u0646 \u062f\u0627\u062e\u0644 \u0627\u0644\u062c\u0627\u0645\u0639\u0629', blank=True)),
('outside_collaborators', models.TextField(verbose_name='\u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0648\u0646 \u0645\u0646 \u062e\u0627\u0631\u062c \u0627\u0644\u062c\u0627\u0645\u0639\u0629', blank=True)),
('participants', models.IntegerField(help_text='\u0627\u0644\u0639\u062f\u062f \u0627\u0644\u0645\u062a\u0648\u0642\u0639 \u0644\u0644\u0645\u0633\u062a\u0641\u064a\u062f\u064a\u0646 \u0645\u0646 \u0627\u0644\u0646\u0634\u0627\u0637', verbose_name='\u0639\u062f\u062f \u0627\u0644\u0645\u0634\u0627\u0631\u0643\u064a\u0646')),
('organizers', models.IntegerField(help_text='\u0639\u062f\u062f \u0627\u0644\u0637\u0644\u0627\u0628 \u0627\u0644\u0630\u064a\u0646 \u0633\u064a\u0646\u0638\u0645\u0648\u0646 \u0627\u0644\u0646\u0634\u0627\u0637', verbose_name='\u0639\u062f\u062f \u0627\u0644\u0645\u0646\u0638\u0645\u064a\u0646')),
],
options={
'verbose_name': '\u0646\u0634\u0627\u0637',
'verbose_name_plural': '\u0627\u0644\u0646\u0634\u0627\u0637\u0627\u062a',
'permissions': (('view_activity', 'Can view all available activities.'), ('directly_add_activity', 'Can add activities directly, without approval.')),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ar_name', models.CharField(max_length=50, verbose_name='\u0627\u0633\u0645 \u0627\u0644\u062a\u0635\u0646\u064a\u0641')),
('en_name', models.CharField(max_length=50, verbose_name='\u0627\u0633\u0645 \u0627\u0644\u0625\u0646\u062c\u0644\u064a\u0632\u064a')),
('description', models.TextField(verbose_name='\u0648\u0635\u0641 \u0627\u0644\u062a\u0635\u0646\u064a\u0641', blank=True)),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='\u0627\u0644\u062a\u0635\u0646\u064a\u0641 \u0627\u0644\u0623\u0628', blank=True, to='activities.Category', null=True)),
],
options={
'verbose_name': '\u062a\u0635\u0646\u064a\u0641',
'verbose_name_plural': '\u0627\u0644\u062a\u0635\u0646\u064a\u0641\u0627\u062a',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Episode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('location', models.CharField(max_length=128)),
('allow_multiple_niqati', models.BooleanField(default=False, verbose_name='\u0627\u0633\u0645\u062d \u0628\u0625\u062f\u062e\u0627\u0644 \u0623\u0643\u062b\u0631 \u0645\u0646 \u0631\u0645\u0632 \u0646\u0642\u0627\u0637\u064a\u061f')),
('requires_report', models.BooleanField(default=True)),
('can_report_early', models.BooleanField(default=False)),
('requires_story', models.BooleanField(default=True)),
('activity', models.ForeignKey(to='activities.Activity')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Evaluation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quality', models.PositiveIntegerField(help_text='\u0643\u064a\u0641 \u062a\u0642\u064a\u0645 \u0639\u0645\u0644 \u0627\u0644\u0646\u0627\u062f\u064a \u0641\u064a \u062a\u0646\u0638\u064a\u0645 \u0627\u0644\u0646\u0634\u0627\u0637\u061f', verbose_name='\u062c\u0648\u062f\u0629 \u062a\u0646\u0638\u064a\u0645 \u0627\u0644\u0646\u0634\u0627\u0637')),
('relevance', models.PositiveIntegerField(help_text='\u0645\u0627 \u0645\u062f\u0649 \u0645\u0644\u0627\u0621\u0645\u0629 \u0627\u0644\u0646\u0634\u0627\u0637 \u0644\u0627\u0647\u062a\u0645\u0627\u0645 \u0627\u0644\u0637\u0644\u0627\u0628\u061f', verbose_name='\u0645\u0644\u0627\u0621\u0645\u0629 \u0627\u0644\u0646\u0634\u0627\u0637 \u0644\u0627\u0647\u062a\u0645\u0627\u0645 \u0627\u0644\u0637\u0644\u0627\u0628')),
('episode', models.ForeignKey(to='activities.Episode')),
('evaluator', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '\u062a\u0642\u064a\u064a\u0645',
'verbose_name_plural': '\u0627\u0644\u062a\u0642\u064a\u064a\u0645\u0627\u062a',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('review_date', models.DateTimeField(auto_now_add=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u0645\u0631\u0627\u062c\u0639\u0629')),
('edit_date', models.DateTimeField(auto_now=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u062a\u0639\u062f\u064a\u0644', null=True)),
('clubs_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u0623\u0646\u062f\u064a\u0629', blank=True)),
('name_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u0627\u0633\u0645', blank=True)),
('datetime_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u062a\u0627\u0631\u064a\u062e \u0648\u0627\u0644\u0648\u0642\u062a', blank=True)),
('description_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u0648\u0635\u0641', blank=True)),
('requirement_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0627\u0644\u0645\u062a\u0637\u0644\u0628\u0627\u062a', blank=True)),
('inside_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0648\u0646 \u0645\u0646 \u062f\u0627\u062e\u0644 \u0627\u0644\u062c\u0627\u0645\u0639\u0629', blank=True)),
('outside_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0648\u0646 \u0645\u0646 \u062e\u0627\u0631\u062c \u0627\u0644\u062c\u0627\u0645\u0639\u0629', blank=True)),
('participants_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0639\u062f\u062f \u0627\u0644\u0645\u0634\u0627\u0631\u0643\u064a\u0646', blank=True)),
('organizers_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u0639\u062f\u062f \u0627\u0644\u0645\u0646\u0638\u0645\u064a\u0646', blank=True)),
('submission_date_notes', models.TextField(verbose_name='\u0645\u0644\u0627\u062d\u0638\u0627\u062a \u0639\u0644\u0649 \u062a\u0627\u0631\u064a\u062e \u062a\u0642\u062f\u064a\u0645 \u0627\u0644\u0637\u0644\u0628', blank=True)),
('review_type', models.CharField(default=b'P', max_length=1, verbose_name='\u0646\u0648\u0639 \u0627\u0644\u0645\u0631\u0627\u062c\u0639\u0629', choices=[(b'P', '\u0631\u0626\u0627\u0633\u0629 \u0646\u0627\u062f\u064a \u0627\u0644\u0637\u0644\u0627\u0628'), (b'D', '\u0639\u0645\u0627\u062f\u0629 \u0634\u0624\u0648\u0646 \u0627\u0644\u0637\u0644\u0627\u0628')])),
('is_approved', models.NullBooleanField(verbose_name='\u0627\u0644\u062d\u0627\u0644\u0629', choices=[(None, '\u0623\u0628\u0642\u0650 \u0645\u0639\u0644\u0642\u064b\u0627'), (True, '\u0627\u0642\u0628\u0644'), (False, '\u0627\u0631\u0641\u0636')])),
('activity', models.ForeignKey(verbose_name=' \u0627\u0644\u0646\u0634\u0627\u0637', to='activities.Activity')),
('reviewer', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': '\u0645\u0631\u0627\u062c\u0639\u0629',
'verbose_name_plural': '\u0627\u0644\u0645\u0631\u0627\u062c\u0639\u0627\u062a',
'permissions': (('view_review', 'Can view all available reviews.'), ('add_deanship_review', 'Can add a review in the name of the deanship.'), ('add_presidency_review', 'Can add a review in the name of the presidency.'), ('view_deanship_review', 'Can view a review in the name of the deanship.'), ('view_presidency_review', 'Can view a review in the name of the presidency.')),
},
bases=(models.Model,),
),
migrations.AddField(
model_name='activity',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='\u0627\u0644\u062a\u0635\u0646\u064a\u0641', to='activities.Category', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='activity',
name='primary_club',
field=models.ForeignKey(related_name='primary_activity', on_delete=django.db.models.deletion.SET_NULL, verbose_name='\u0627\u0644\u0646\u0627\u062f\u064a \u0627\u0644\u0645\u0646\u0638\u0645', to='clubs.Club', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='activity',
name='secondary_clubs',
field=models.ManyToManyField(related_name='secondary_activity', null=True, verbose_name='\u0627\u0644\u0623\u0646\u062f\u064a\u0629 \u0627\u0644\u0645\u062a\u0639\u0627\u0648\u0646\u0629', to='clubs.Club', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='activity',
name='submitter',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
|
Armaf Armaf Edition One EDP Prima donna, imperial, elite - a seductive fruity, floral bouquet for women with neroli, bitter orange, jasmine, and raspberry mingled with a heart of jasmine, gardenia and rose. The base composes of patchouli, honey and amber. Product #: Regular price: Rs.1300INR1023(Sale ends 01 January ) Available from: DeoBazaar.com Condition: New In stock! Order now!
Explore Armaf | More Eau De Parfum | Why shop from us?
Prima donna, imperial, elite - a seductive fruity, floral bouquet for women with neroli, bitter orange, jasmine, and raspberry mingled with a heart of jasmine, gardenia and rose. The base composes of patchouli, honey and amber.
|
from django.http import Http404
import json
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from core.apps.decorators import require_request_attributes
from core.apps.tools.common import dump_and_render_json
from . import helper
quizzes_fields=['title', 'created_at', 'about', 'id']
@login_required
@require_http_methods(['GET', 'POST'])
def default(request):
data = None
if request.method == 'GET' :
module = 'course'
if 'module' in request.GET :
module = request.GET['module']
try :
id = request.GET['cid']
except KeyError :
raise Http404()
data = helper.get_list(request, module, id)
else :
data = helper.create(request)
return dump_and_render_json (request, data)
raise Http404()
# read , update , delete
@login_required
@require_http_methods(['GET', 'PUT', 'DELETE'])
def rud(request, id):
if request.method == 'GET' :
data = helper.get_item(request, id)
elif request.method == 'PUT' :
data = helper.update (request, id)
elif request.method == 'DELETE' :
data = helper.delete (request, id)
return dump_and_render_json(request, data)
|
You might be doing onboarding wrong.
The Society for Human Resource Management (SHRM) says employee turnover runs as high as 50% in the first 18 months of employment. Gallup reports that the biggest mistake employers make is failing to properly welcome new employees into the fold via an onboarding process. This has a hugely negative impact on employee engagement and retention.
Today’s employees expect more from a job than just a paycheck. While the paycheck still matters, we know that employees stay in organizations that have great workplace cultures. Here’s how employers can use the onboarding process to improve employee engagement, retention, and workplace culture.
From an employee perspective, taking on a new job is both scary and exciting.
It’s scary because the new employee worries whether they made the right decision. They’ll probably wonder if they’ll make new friends. Or, will the work be interesting and will they be able to learn all they need to know to do a good job?
The exciting part is that they’re on a new adventure in a new place with new people.
This swirling mix of emotions can be harnessed in a way that benefits a company, or squashed flat along with the new employee optimism that almost always follows the signing of an offer letter. What’s the key to capitalizing on these positive feelings so that a business can make every employee’s time productive?
The answer is onboarding, a process that has been proven to help businesses retain employees over the long haul. But if onboarding is so important why are so many companies neglecting it?
What’s Onboarding and Why Does it Matter?
Onboarding always matters, but particularly so during times of record unemployment where the candidate funnel is hair-thin. That’s the time when companies need to work harder to retain more employees because there are lots of job opportunities for people to pick from. In some markets, highly sought candidates like Developers receive dozens of calls and emails from recruiters before they are barely in your door. If the onboarding process is too awkward they may jump ship. Sadly, this happens all too often.
Gallup reports that only 12% of the American workforce says they had a good onboarding experience.
But what is onboarding, really, and why is it such a big deal?
Onboarding is the process of acclimating new staff into a unique cultural setting that is your business. Most of the time, the new hire experience is pretty awkward; sometimes it can take weeks to even snag login credentials, particularly in big companies.
It gets worse; HR Dive reports that one in 10 new hires say their company forgot it was their first day. How would that make you feel?
Throwing new hires into work immediately without training or context, not socializing -- or even introducing -- them to the rest of the team, focusing on tactical work too early, or not meeting and receiving feedback from managers early and often are more the norm than the exception.
The goal of onboarding, then should be to create a work culture where employee friendships can naturally develop.
Onboarding, when handled properly, is what kicks off that process.
While this throws the traditional one-day onboarding process out the door, there’s enough data showing the correlation between employee engagement and retention and a longer onboarding process that tells us the time extension is worth the effort.
Interestingly, a recent article from HR Morning suggests that onboarding starts before the employee walks in the door. Establishing and communicating the onboarding process to a new hire before they arrive for their first day will help lessen some of the fear they feel about their first day. They suggest sharing basic details that will make their first on-site visit a little easier.
Who will meet them in the lobby?
What’s the best place to snag a coffee when they’re dragging at 1 pm?
What will their first-day schedule look like?
Will they have a mentor?
Is there anything they should read to prepare for day one?
This welcoming email can also share details such as what to wear or a reminder to bring two forms of I.D. for HR to copy. You may want to include a map of the facility if the building is bigger than a breadbox. Perhaps even send a link to the corporate mission statement, although keep in mind the pre-welcome email shouldn’t be too long.
The best part of onboarding should always be making the employee feel the love. Think of it like welcoming someone into your home (you can leave out the hugging). Onboarding on day one should be a process of inviting them in and showing them around so they start to feel comfortable in the space and with the people in it.
You can even skip a lot of the new hire paperwork that used to take up so much time on the first day. Today’s modern payroll and HR platforms can capture data online in advance, so employees can sign up for benefits in advance as part of pre-boarding.
Instead of filling out paperwork during new hire orientation, why not have HR go over employee benefits? The studies show that most employees fail to understand and take advantage of many of the benefit choices available in most companies. Personalizing the onboarding experience by having HR discuss the benefits the candidate will likely be most interested in will increase the chances that they use these perks. Using these perks will help your new employee appreciate the company more and potentially, stay longer.
Equipment needs like logins to technology tools, along with the basic place to sit, a computer, and a phone must be set up before the employee comes in. You would be shocked to realize how many companies get this wrong. Having an IT person or even a technology super user sit down with the new employee and teach them how to navigate unfamiliar (or even vaguely familiar) software or hardware will increase the new employee’s productivity. If your company has branded items like t-shirts or other goodies, why not deck out the space with a few presents that will also promote the company image?
Employer tip: Try incentivizing the mentorship function in some way. Being a mentor takes time and energy. But what about rewarding the mentor with an Amazon gift card?
Schedule a few meetings on the first day, but don’t overwhelm the new employee. Give them an hour or two for self-study so they can relax a little in their new space. It’s always a good idea to pre-load calendar meetings, adding notes on why the meeting occurs and what their role will be.
Set up a knowledge management hub, where all important documents are held. This could be something as simple as a Dropbox folder by department or even a more sophisticated knowledge platform that serves as the hub for information. This could improve employee productivity, whether the person has been on the job three days or 365 days.
Check on the employee and their mentor in the first 30-days. Talking with the mentor about how they think the new employee is doing will help you take the pulse of what needs improved. Ask the new employee that very same question and then be prepared to spend time revamping what needs to be tweaked to help improve the experience.
Schedule a 90-day review that incorporates feedback to and from the new hire. As part of the process develop a roadmap for the new employee to follow to advance in the company. The roadmap should be based on what the employee wants to learn. At about the 90-day mark, it’s safe to say the new employee will have a decent grasp of what is expected, the company culture, and their place in the workplace. A 90-day review is a good place to really conclude the formal onboarding process and move the employee into the next phase of what will hopefully be a long-term career with the organization.
Onboarding is more than a one-day orientation. A well-designed onboarding process will help new hires learn the ropes more quickly while establishing relationships that entice them to stay with you longer.
Artisan Talent works closely with employers to ensure that new employees are ready to take on their new role. We can help you design an effective onboarding process designed to maximize the experience and effectiveness of every new hire. Contact our talent team to find out more.
Who is Responsible for Employee Engagement and Why Does It Matter?
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import warnings
import collections
import io
from psd_tools.constants import TaggedBlock, SectionDivider
from psd_tools.decoder.actions import decode_descriptor, UnknownOSType
from psd_tools.utils import read_fmt, read_unicode_string, unpack, debug_view
from psd_tools.decoder import decoders
from psd_tools.reader.layers import Block
_tagged_block_decoders, register = decoders.new_registry()
_tagged_block_decoders.update({
TaggedBlock.BLEND_CLIPPING_ELEMENTS: decoders.boolean("I"),
TaggedBlock.BLEND_INTERIOR_ELEMENTS: decoders.boolean("I"),
TaggedBlock.KNOCKOUT_SETTING: decoders.boolean("I"),
TaggedBlock.UNICODE_LAYER_NAME: decoders.unicode_string,
TaggedBlock.LAYER_ID: decoders.single_value("I") # XXX: there are more fields in docs, but they seem to be incorrect
})
SolidColorSettings = collections.namedtuple('SolidColorSettings', 'version data')
MetadataItem = collections.namedtuple('MetadataItem', 'sig key copy_on_sheet_duplication data')
ProtectedSetting = collections.namedtuple('ProtectedSetting', 'transparency, composite, position')
TypeToolObjectSetting = collections.namedtuple('TypeToolObjectSetting',
'version xx xy yx yy tx ty text_version descriptor_version1 text_data')
#'warp_version descriptor_version2 warp_data'
#'left top right bottom')
class Divider(collections.namedtuple('Divider', 'type key')):
def __repr__(self):
return "Divider(%r %s, %s)" % (self.type, SectionDivider.name_of(self.type), self.key)
def decode(tagged_blocks):
"""
Replaces "data" attribute of a blocks from ``tagged_blocks`` list
with parsed data structure if it is known how to parse it.
"""
return [parse_tagged_block(block) for block in tagged_blocks]
def parse_tagged_block(block):
"""
Replaces "data" attribute of a block with parsed data structure
if it is known how to parse it.
"""
key = block.key.decode('ascii')
if not TaggedBlock.is_known(key):
warnings.warn("Unknown tagged block (%s)" % block.key)
decoder = _tagged_block_decoders.get(key, lambda data: data)
return Block(key, decoder(block.data))
@register(TaggedBlock.SOLID_COLOR)
def _decode_soco(data):
fp = io.BytesIO(data)
version = read_fmt("I", fp)
try:
data = decode_descriptor(None, fp)
return SolidColorSettings(version, data)
except UnknownOSType as e:
warnings.warn("Ignoring solid color tagged block (%s)" % e)
@register(TaggedBlock.REFERENCE_POINT)
def _decode_reference_point(data):
return read_fmt("2d", io.BytesIO(data))
@register(TaggedBlock.SHEET_COLOR_SETTING)
def _decode_color_setting(data):
return read_fmt("4H", io.BytesIO(data))
@register(TaggedBlock.SECTION_DIVIDER_SETTING)
def _decode_section_divider(data):
fp = io.BytesIO(data)
key = None
tp = read_fmt("I", fp)[0]
if not SectionDivider.is_known(tp):
warnings.warn("Unknown section divider type (%s)" % tp)
if len(data) == 12:
sig = fp.read(4)
if sig != b'8BIM':
warnings.warn("Invalid signature in section divider block")
key = fp.read(4).decode('ascii')
return Divider(tp, key)
@register(TaggedBlock.METADATA_SETTING)
def _decode_metadata(data):
fp = io.BytesIO(data)
items_count = read_fmt("I", fp)[0]
items = []
for x in range(items_count):
sig, key, copy_on_sheet, data_length = read_fmt("4s 4s ? 3x I", fp)
data = fp.read(data_length)
items.append(MetadataItem(sig, key, copy_on_sheet, data))
return items
@register(TaggedBlock.PROTECTED_SETTING)
def _decode_protected(data):
flag = unpack("I", data)[0]
return ProtectedSetting(
bool(flag & 1),
bool(flag & 2),
bool(flag & 4),
)
@register(TaggedBlock.LAYER_32)
def _decode_layer32(data):
from psd_tools.reader import layers
from psd_tools.decoder.decoder import decode_layers
fp = io.BytesIO(data)
layers = layers._read_layers(fp, 'latin1', 32, length=len(data))
return decode_layers(layers)
@register(TaggedBlock.LAYER_16)
def _decode_layer16(data):
from psd_tools.reader import layers
from psd_tools.decoder.decoder import decode_layers
fp = io.BytesIO(data)
layers = layers._read_layers(fp, 'latin1', 16, length=len(data))
return decode_layers(layers)
@register(TaggedBlock.TYPE_TOOL_OBJECT_SETTING)
def _decode_type_tool_object_setting(data):
fp = io.BytesIO(data)
ver, xx, xy, yx, yy, tx, ty, txt_ver, desc_ver1 = read_fmt("H 6Q H I", fp)
# This decoder needs to be updated if we have new formats.
if ver != 1 or txt_ver != 50 or desc_ver1 != 16:
warnings.warn("Ignoring type setting tagged block due to old versions")
return
try:
text_data = decode_descriptor(None, fp)
except UnknownOSType as e:
warnings.warn("Ignoring type setting tagged block (%s)" % e)
return
# XXX: Until Engine Data is parsed properly, the following cannot be parsed.
# The end of the engine data dictates where this starts.
return TypeToolObjectSetting(ver, xx, xy, yx, yy, tx, ty, txt_ver, desc_ver1, text_data)
warp_ver, desc_ver2 = read_fmt("H I", fp)
if warp_ver != 1 or desc_ver2 != 16:
warnings.warn("Ignoring type setting tagged block due to old versions")
return
try:
warp_data = decode_descriptor(None, fp)
except UnknownOSType as e:
warnings.warn("Ignoring type setting tagged block (%s)" % e)
return
left, top, right, bottom = read_fmt("4Q", fp)
return TypeToolObjectSetting(ver, xx, xy, yx, yy, tx, ty, txt_ver, desc_ver1,
text_data, warp_ver, desc_ver2, warp_data,
left, top, right, bottom)
|
Although the Dalish elves draw their magic from Fade also, their magic is a little different from that of the Circle's mage. How do you think they are different?
According to ingame rules, they aren't, that I've seen. The distinction is just a writer's bit of making things interesting.
"As mentioned, there are more "mages" in the game than just the Circle of Magi. However, to answer the initial question, you will not play one.
There is a mage origin for player to choose that is open to humans or elves, but not one specifically for Dalish elven mages. Sorry."
There is another post from David that I can't find now, saying that the Dalish mages are different, but not going to detailed how. They are more nature-related that the other mages and probably will have a different repertory of spells.
Are they, perhaps, like Druids from DnD? Or at least more tilted towards that? If so, it's certainly an idea of how to design for them.
Yes it should be something like that. They probably have different spells that are related to nature, like lightning I guess. Bioware will develop this class in their next DA game or expansions for sure, so I doubt they are going to give us more canon information about this one. I definitely would like to see this class implemented, it'd be something totally different.
Well, normal wizards already have the "Primal" section, which is effectively nature magic.
Yes you are right, there is even lightning already available. I don't know how their magic is different. The only thing I am sure is that they draw their magic also from the Fade, by they manifest it differently somehow.
Right now, that only appears to be mana points, so while the flavour and spell access is different, I'm not sure on how the mechanics are different.
I have just noticed this thread while I was thinking of creating the same.
As I had outlined, the following changes would make sense for such a class. As you go with the dalish origins, you can grasp a bit of lore as you speak with Wynne in Ostagar and as you speak with morrigan in your camp.
It would make sense to have the basic keeper skills to be the current in-game skills of a ranger and a shapeshifter combined. This is based on the fact that the dalish are much closer to nature than any other groups in the Dragon Age lore.
The only thing I was thinking is that the spider form might be changed to a Halla?
Seems like a lot of work and this would need it's own origin as well unless you re-use the current dalish origins. However, it would mean replacing Menarill with yourself since there is only 1 keeper and 1 apprentice at the same time.
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'uis/templates.ui'
#
#
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(640, 480)
Dialog.buttonBox = QtGui.QDialogButtonBox(Dialog)
Dialog.buttonBox.setGeometry(QtCore.QRect(406, 440, 225, 32))
Dialog.buttonBox.setOrientation(QtCore.Qt.Horizontal)
Dialog.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
Dialog.buttonBox.setObjectName(_fromUtf8("buttonBox"))
Dialog.templatesList = QtGui.QListWidget(Dialog)
Dialog.templatesList.setGeometry(QtCore.QRect(16, 64, 161, 353))
Dialog.templatesList.setObjectName(_fromUtf8("templatesList"))
Dialog.codeEdit = QtGui.QPlainTextEdit(Dialog)
Dialog.codeEdit.setGeometry(QtCore.QRect(192, 61, 433, 353))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
font.setPointSize(12)
Dialog.codeEdit.setFont(font)
Dialog.codeEdit.setObjectName(_fromUtf8("codeEdit"))
Dialog.addButton = QtGui.QPushButton(Dialog)
Dialog.addButton.setGeometry(QtCore.QRect(16, 432, 65, 32))
Dialog.addButton.setObjectName(_fromUtf8("addButton"))
Dialog.delButton = QtGui.QPushButton(Dialog)
Dialog.delButton.setGeometry(QtCore.QRect(96, 432, 81, 32))
Dialog.delButton.setObjectName(_fromUtf8("delButton"))
Dialog.macrosButton = QtGui.QPushButton(Dialog)
Dialog.macrosButton.setGeometry(QtCore.QRect(258, 420, 101, 25))
Dialog.macrosButton.setObjectName(_fromUtf8("macrosButton"))
Dialog.label = QtGui.QLabel(Dialog)
Dialog.label.setGeometry(QtCore.QRect(16, 16, 161, 17))
Dialog.label.setObjectName(_fromUtf8("label"))
Dialog.tmplDir = QtGui.QLineEdit(Dialog)
Dialog.tmplDir.setGeometry(QtCore.QRect(192, 16, 369, 27))
Dialog.tmplDir.setObjectName(_fromUtf8("tmplDir"))
Dialog.tmplDirBrowseButton = QtGui.QPushButton(Dialog)
Dialog.tmplDirBrowseButton.setGeometry(QtCore.QRect(577, 16, 49, 32))
Dialog.tmplDirBrowseButton.setObjectName(_fromUtf8("tmplDirBrowseButton"))
self.retranslateUi(Dialog)
QtCore.QObject.connect(Dialog.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(Dialog.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Code Templates", None))
Dialog.addButton.setText(_translate("Dialog", "Add", None))
Dialog.delButton.setText(_translate("Dialog", "Delete", None))
Dialog.macrosButton.setText(_translate("Dialog", "Macros Help", None))
Dialog.label.setText(_translate("Dialog", "Templates Directory:", None))
Dialog.tmplDirBrowseButton.setText(_translate("Dialog", "...", None))
|
Tablets are quickly becoming the new netbooks as yet another company, this time GameStop, via its CEO, who confirmed that it has already chosen an Android-based tablet to carry the GameStop brand.
The device will take on the Sony Tablet S and the iPad 2 in the increasingly crowded world of tablets within the gaming demographic (via Gamesindustry.biz).
More specifically, GameStop's tablet will be a "certified platform" that will allow users to stream console games using a technology licensed from Spawn Labs.
Interestingly, the tablet will come with a dedicated gaming controller and Gamestop already tested at the beginning of the year a trade-in scheme that allows users to swap their electronic devices for instore credits.
Now given the ease with which companies can get custom-made, own branded tablets, it shouldn't come as a surprise if the likes of Game.co.uk decides to follow this strategy in order to get a captive audience away from traditional gaming platforms.
The likes of Steam, the App Store or Onlive are taking apart the very assumptions that you need a powerful gaming console in order to experience an enjoyable gaming session.
|
import pandas as pd
import numpy as np
import re
namesID = dict();
emailsID = dict();
allID = dict();
nameRegex = re.compile("(\(.*\))")
currID = 1;
def getID(name, email):
global currID;
global emailsID
global namesID
nameID = False;
emailID = False;
if name is not None:
if name in namesID:
nameID = namesID[name];
if email is not None:
if email in emailsID:
emailID = emailsID[email]
if (not emailID) and (not nameID):
store(currID, name, email);
currID += 1;
return currID;
if not emailID:
store(nameID, name, email)
return nameID
if not nameID:
store(emailID, name, email)
return emailID
if emailID != nameID:
# print("ID MISMATCH! " + email + " " + name)
store(nameID, name, email)
else:
if emailID not in allID:
store(emailID, name, email);
return nameID;
def store(id, name, email) :
if id not in allID:
allID[id] = {"emails": list(), "names": list()}
fullID = allID[id];
namesID[name] = id;
emailsID[email] = id;
fullID["names"].append(name);
fullID["emails"].append(email);
def name_for_id(id):
if id in allID:
if "names" in allID[id] and len(allID[id]["names"]) > 0:
return allID[id]["names"][0]
return "UNKNOWN " + str(id)
def entity_resolve(row, emailCol, nameCol):
emailAddress = row[emailCol].upper();
emailAddress = emailAddress.replace(" AT ", "@")
match = nameRegex.search(emailAddress)
name = None
if (match is not None):
name = match.group(0) #unused info for now
emailAddress = emailAddress.replace(name, "");
name = name.replace("(", "")
name = name.replace(")", "")
if nameCol is not None :
name = row[nameCol].upper()
row["Person-ID"] = getID(name, emailAddress)
return row
|
190107 03172 viewsParkeston, Mineral Resources MRL class locomotive MRL 002 'Sprit of Yilgarn' serial R-0113-03/14-505 built by UGL as a GE model C44aci in 2014 stands in the Eingineers Siding with a string of eighteen, or nine pairs of MHPY bottom discharge hopper waggons awaiting transfer over to West Kalgoorlie.
190107 03182 viewsParkeston, Mineral Resources MRL class locomotive MRL 002 'Sprit of Yilgarn' serial R-0113-03/14-505 built by UGL as a GE model C44aci in 2014 stands in the Eingineers Siding with a string of eighteen, or nine pairs of MHPY bottom discharge hopper waggons awaiting transfer over to West Kalgoorlie.
190107 03422 viewsParkeston, Mineral Resources MRL class locomotive MRL 002 'Sprit of Yilgarn' serial R-0113-03/14-505 built by UGL as a GE model C44aci in 2014 stands in the Engineers Siding with a string of eighteen, or nine pairs of MHPY bottom discharge hopper waggons awaiting transfer over to West Kalgoorlie.
190107 03432 viewsParkeston, builders plate of Mineral Resources MRL class locomotive MRL 002 'Sprit of Yilgarn' serial R-0113-03/14-505 built by UGL as a GE model C44aci in 2014 stands in the Engineers Siding.
190107 04182 viewsParkeston, Mineral Resources MRL class locomotive MRL 002 'Sprit of Yilgarn' serial R-0113-03/14-505 built by UGL as a GE model C44aci in 2014 stands in the Engineers Siding with a string of eighteen, or nine pairs of MHPY bottom discharge hopper waggons awaiting transfer over to West Kalgoorlie.
190107 04192 viewsParkeston, Mineral Resources MRL class locomotive MRL 002 'Sprit of Yilgarn' serial R-0113-03/14-505 built by UGL as a GE model C44aci in 2014 stands in the Engineers Siding with a string of eighteen, or nine pairs of MHPY bottom discharge hopper waggons awaiting transfer over to West Kalgoorlie.
190107 04202 viewsParkeston, cab side view of Mineral Resources MRL class locomotive MRL 002 'Sprit of Yilgarn' serial R-0113-03/14-505 built by UGL as a GE model C44aci in 2014 stands in the Engineers Siding.
190107 04212 viewsParkeston, cab side view of Mineral Resources MRL class locomotive MRL 002 'Sprit of Yilgarn' serial R-0113-03/14-505 built by UGL as a GE model C44aci in 2014 stands in the Engineers Siding.
190107 04222 viewsParkeston, builders plate of Mineral Resources MRL class locomotive MRL 002 'Sprit of Yilgarn' serial R-0113-03/14-505 built by UGL as a GE model C44aci in 2014 stands in the Engineers Siding.
190107 04232 viewsParkeston, UGL plate on Mineral Resources MRL class locomotive bogie.
190107 04242 viewsParkeston, UGL plate on Mineral Resources MRL class locomotive.
190107 04252 viewsParkeston, Mineral Resources Ltd MHPY type iron ore waggon MHPY 00113 built by CSR Yangtze Co China in 2014 as a batch of 382 units, these bottom discharge hopper waggons are operated in 'married' pairs.
190107 04262 viewsParkeston, Mineral Resources Ltd MHPY type iron ore waggon MHPY 00270 built by CSR Yangtze Co China in 2014 as a batch of 382 units, these bottom discharge hopper waggons are operated in 'married' pairs.
190107 04272 viewsParkeston, Mineral Resources Ltd MHPY type iron ore waggon MHPY 00367 built by CSR Yangtze Co China in 2014 as a batch of 382 units, these bottom discharge hopper waggons are operated in 'married' pairs.
190107 04282 viewsParkeston, Mineral Resources Ltd MHPY type iron ore waggon MHPY 00368 built by CSR Yangtze Co China in 2014 as a batch of 382 units, these bottom discharge hopper waggons are operated in 'married' pairs.
190107 04292 viewsParkeston, Mineral Resources Ltd builders plate of MHPY type iron ore waggon MHPY 00368 built by CSR Yangtze Co China in 2014 as a batch of 382 units.
190107 04302 viewsParkeston, Mineral Resources Ltd builders plate of MHPY type iron ore waggon MHPY 00368 built by CSR Yangtze Co China in 2014 as a batch of 382 units.
190107 04312 viewsParkeston, Mineral Resources Ltd MHPY type iron ore waggon MHPY 00211 built by CSR Yangtze Co China in 2014 as a batch of 382 units, these bottom discharge hopper waggons are operated in 'married' pairs.
190107 04322 viewsParkeston, Mineral Resources Ltd MHPY type iron ore waggon MHPY 00212 built by CSR Yangtze Co China in 2014 as a batch of 382 units, these bottom discharge hopper waggons are operated in 'married' pairs.
190107 04332 viewsParkeston, Mineral Resources Ltd MHPY type iron ore waggon MHPY 00315 built by CSR Yangtze Co China in 2014 as a batch of 382 units, these bottom discharge hopper waggons are operated in 'married' pairs.
|
import numpy as np
def pr_transition(offspring_diploid, maternal_diploid, male_diploid, offspring_genotype, maternal_genotype, male_genotype, mu):
"""
Calculate the transition probability for a trio of putative diploid
genotypes.
Transition probabilities are then weight by the probability that the true
offspring, mothers, and male genotypes match the input genotypes given
observed marker data. Generally one would need to sum over all 27 possible
combinations of genotypes.
This function works with diploid genotypes, rather than a genotypeArray.
Generally this is not called directly, but through lik_sampled_fathers() or
similar.
Parameters
----------
offspring_diploid, maternal_diploid, male_diploid: array
arrays of diploid genotypes for the offspring, mothers and fathers.
offspring_genotype, maternal_genotype, male_genotype: int
0, 1 or 2 indicating homozygous, heterozygous or alternate homozygous
genotype.
mu: float
point estimate of the genotyping error rate.
Returns
-------
A 3-dimensional array of probabilities indexing offspring, candidate males,
and loci. These are given in linear, rather than log space.
"""
# an array of all possible transition probabilities indexed as [offspring, mother, father].
trans_prob_array = np.array([[[1, 0.5, 0 ],
[0.5,0.25,0 ],
[0, 0, 0 ]],
[[0, 0.5, 1 ],
[0.5,0.5, 0.5],
[1, 0.5, 0 ]],
[[0, 0, 0 ],
[0, 0.25,0.5],
[0, 0.5, 1 ]]])
# the transition probability for the given genotypes.
trans_prob = trans_prob_array[offspring_genotype, maternal_genotype, male_genotype]
# Probabilities that the observed offspring marker data match observed data.
pr_offs = np.zeros([offspring_diploid.shape[0], offspring_diploid.shape[1]])
pr_offs[offspring_diploid == offspring_genotype] = 1-mu
pr_offs[offspring_diploid != offspring_genotype] = mu
# Probabilities that the observed maternal marker data match observed data.
pr_mothers = np.zeros([maternal_diploid.shape[0], maternal_diploid.shape[1]])
pr_mothers[maternal_diploid == maternal_genotype] = 1-mu
pr_mothers[maternal_diploid != maternal_genotype] = mu
# Probabilities that the observed candidate male marker data match observed data.
pr_males = np.zeros([male_diploid.shape[0], male_diploid.shape[1]])
pr_males[male_diploid == male_genotype] = 1-mu
pr_males[male_diploid != male_genotype] = mu
return trans_prob * pr_males[np.newaxis] * pr_mothers[:,np.newaxis] * pr_offs[:,np.newaxis]
|
British & Irish Lions great Brian O'Driscoll has called on the famous side to play a match in the Pacific Islands as part of their 2017 tour to New Zealand.
Following Friday's news the All Blacks will play a Test in Samoa next year as part of their 2015 Rugby World Cup build-up, O'Driscoll believes the Lions should follow suit and include a match in the Pacific Islands as part of their 2017 tour.
Their last jaunt to Australia was preceded by a trip to Hong Kong to face the Barbarians, a game some deemed to be teed up to satisfy the Lions' sponsors HSBC, but O'Driscoll would like to see the tourists forgo commercial opportunity in 2017.
"It is probably more of a give than a take for the Lions [to play in the Pacific Islands] but it would be great for the people there and it would probably take away from that commercial feel that the Lions are this commercial juggernaut," O'Driscoll told The Times. "It might be a clever move from the Lions' point of view to play a game down there."
And the IRB's general manager for Oceania, William Glenwright, has echoed O'Driscoll's views. "It would be magic. Rugby is so important to the fabric of the three Pacific Island nations and the strength of the brands of teams like the All Blacks, the Wallabies and the Lions is so strong that to have those countries touring the Pacific Islands sends a message that they matter, that they are an important part of world rugby -- and they are.
"It is difficult to persuade them to do it when there is no money for them, so you have to find another angle. The argument I'll be using is that a match against a Pacific Island in the Pacific Islands won't tick the finance box but it will tick the high performance box.
"It would be great to have that meeting and even just look at the possibility of it. It would send a really good message."
|
# Copyright (C) 2014 Imperial College London.
# This file is part of Firedrake-Fluids.
#
# Firedrake-Fluids is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firedrake-Fluids is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firedrake-Fluids. If not, see <http://www.gnu.org/licenses/>.
from firedrake import *
from firedrake_fluids import LOG
import libspud
class ExpressionFromOptions:
""" A module for instantiating UFL Expression objects using information provided
in a simulation's configuration/options file. """
def __init__(self, path, t=None):
""" Retrieve the expression's value from the options file.
:param str path: The path to the expression's definition in the options file.
:param float t: The current time. """
try:
if(libspud.have_option(path + "/constant")):
self.val = libspud.get_option(path + "/constant")
self.type = "constant"
elif(libspud.have_option(path + "/python")):
v = libspud.get_option(path + "/python")
self.type = "python"
exec v # Make the 'val' function that the user has defined available for calling.
self.val = val
self.t = t
elif(libspud.have_option(path + "/cpp")):
# For C++ expressions.
self.type = "cpp"
v = libspud.get_option(path + "/cpp")
exec v
self.val = val
self.t = t
else:
raise ValueError("Unknown expression type.")
except ValueError as e:
LOG.exception(e)
sys.exit()
return
def get_expression(self):
""" Create a UFL Expression object, whose value is obtained from self.val.
Note that for time-dependent Expressions, the variable 't' will need to be updated manually.
:returns: A UFL Expression object.
:rtype: ufl.Expression
"""
try:
if(self.type == "constant"):
return Expression(self.val)
elif(self.type == "cpp"):
return Expression(code=self.val(), t=self.t)
elif(self.type == "python"):
val = self.val
t = self.t
# Determine the value shape by plugging in some dummy coordinate and time.
s = val(x = [0,0,0], t=t)
class PythonExpression(Expression):
def eval(self, value, x, t=None):
value[:] = val(x, t)
if(not isinstance(s, float) and not isinstance(s, int)):
def value_shape(self):
return (len(s),)
e = PythonExpression(t=t)
return e
else:
raise ValueError("Unknown expression type: %s." % self.type)
except ValueError as e:
LOG.exception(e)
sys.exit()
|
Fallahi, A. (2014). Learning spatial prepositions by Iranian EFL learners. Journal of Teaching English Lnaguage Studies, 2(4), 65-84.
Azam Fallahi. "Learning spatial prepositions by Iranian EFL learners". Journal of Teaching English Lnaguage Studies, 2, 4, 2014, 65-84.
Fallahi, A. (2014). 'Learning spatial prepositions by Iranian EFL learners', Journal of Teaching English Lnaguage Studies, 2(4), pp. 65-84.
Fallahi, A. Learning spatial prepositions by Iranian EFL learners. Journal of Teaching English Lnaguage Studies, 2014; 2(4): 65-84.
English language from different classes in Mehr Aein Institution, Ghorveh were randomly Chosen.
for researchers, teachers and learners.
Aminzade Arkhodi, F. ( 2013). A contrastive analysis of English language and Persian language prepositions. The Iranian EFL Journal , vol(9),No(4).
Asm, T. (2010). Transfer of simple prepositions from standard Arabic into English: the case of third year LME students of English language at Mentouri University- Consytantine. M.A Thesis. Mentouri University- Constantine, Ageria.
Bagherzadeh Kasmani, M., & Rahmani. P.(2012). Contrastive analysis: an investigation of error analysis of Kurdish and Persian speaking students majoring in translation(EFL). Asian Journal of Social sciences and Humanities, vol(1), No (4).
Byung-gon, Y.( 1992). A contrastive analysis hypothesis, 2. Pp. 133-149.
Boquist, Patricia, (2009). The second Language Acquisition of English Prepositions, Liberty University.
Evans, V., Tyler, A. (2003). The semantics of English prepositions: spatial scenes, embodied meaning and cognition. Cambridge University Press.
Fatahi,F., Oroji, M.R., & Rahbarian, S. (2013). A contrastive study of English and Persian double objects construction. Frontiers of Language and Teaching, vol(4).
Feist, M.I., & Gentner, D. (2003). Factors involved in the use of In and On. Proceeding of the twenty-fifth Annual Meeting of the Cognitive Science Society.
Finkbeiner, M. (1998). Acquisition of L2 spatial prepositions: new words for old concepts?. University of Arizona.
Fion, Ko, Y.M. (2005). The acquisition of English spatial prepositions by EFL learners, Degree of Master of Art thesis. The Chinese University of Hong Kong.
Froud, K. (2001). Prepositional and the lexical/ functional divide: Aphasic evidence. Department of Phonetics and Linguistics, University College London.
Gorden, R., & Lorincz, K. (2012). Difficulties in learning prepositions and possible solution. Linguistics portfolios, vol(1).
Hamdallah, R., & Tushyeh, Hanna. (1988). A contrastive analysis of selected English and Arabic prepositions with pedagogical implications.
Hsieh, L. Ch. (2006). Learning prepositions as part of fixed phrases in phrasal verbs and collocations. The case of "on" in the EFL classroom.
.Jalali, H., & Shojaei, M. (2012). Persian EFL students' developmental versus fossilized prepositional errors. The reading Matrix, 12(1), 80-97.
Janfaza, A., Soori, A., yousefi, S. (2014). Common preposition errors committed by Iranian students. International Journal of Applied Linguistics and English Literature, vol (3), No(3).
Kemmer, D.(2005). The spatial and temporal meanings of English prepositions can be independently impaired. Neurosychologia, 43,797-806it.
Kodachi, K. (2005). A study of the effect of learners' L1 in the process of learning the usage of prepositions from the perspective of cognitive linguisticS.
Koosha, M., & Jafarpour, A.A. (2006). Data-Driven learning and teaching collocation of prepositions: the case of Iranian EFL Adults Learners, Asian EFL, Journal Quarterly, 8, 192-209.
Mahmoodzade, M. (2012). A cross-linguistic study of prepositions in Persian and English: the effect of transfer. Theory and Practice in Language Studies, Vol. 2, No. 4, pp. 734-740.
Mohammed, M.A. (2011). The use of prepositions by Arab EFL learners: looking on the briht side. The Buckingham Journal of Language and Linguistics, vol(4), pp 84-90.
Okanlawon, B & Ojetunde, B, (2007). A Study of the Acquisition of Spatial Prepositions by Selected Nigerian Learners of English. African Research Review, vol(1), No3.
Pakhomov, S. (1999). Cognitive aspects of acquisition of prepositions in SLA . University of Minnesota.
Quynh, P (2011). An Investigation into English Complex Prepositions and their equivalents in Vietnamese. An M.A Thesis in the English language. Ministry of Education and Training. University of Danang.
Sakurai, T. (2011). Contrastive Linguistics English Prepositions for Japanese Speakers..
Retz-Schmidt, G. (1988). Various views on spatial prepositions. Al Magazine, vol(9)No(9).
Saturnina A. Castro, (2013). An Analysis of Prepositional Errors of College Students. University of the Philippines, Philippines.
Tahaineh, Y. (2012). Arab EFL University Students' errors in the use of prepositions. Modern Journal of Applied Linguistics, 1(6), 76-112.
|
"""Base types for `xcrun simctl`."""
import enum
import json
from typing import Any, Dict
import subprocess
class ErrorCodes(enum.Enum):
"""Simple lookup for all known error codes."""
# Tried to access a file or directory (such as by searching for an app
# container) that doesn't exist
#no_such_file_or_directory = 2
# Trying to perform an action on a device type, but supplied an invalid
# device type
#invalid_device_type = 161
# Tried to perform an action on the device, but there was an
# incompatibility, such as when trying to create a new Apple TV device with
# a watchOS runtime.
incompatible_device = 147
# The device was in a state where it can't be shutdown. e.g. already
# shutdown
#unable_to_shutdown_device_in_current_state = 164
class SimulatorControlType(enum.Enum):
"""Which type of simulator control type is it."""
device_pair = "pair"
runtime = "runtime"
device_type = "device_type"
device = "device"
def list_key(self):
"""Define the key passed into the list function for the type."""
# Disable this false positive
#pylint: disable=comparison-with-callable
if self.name == "device_type":
return "devicetypes"
#pylint: enable=comparison-with-callable
return self.value + "s"
class SimulatorControlBase:
"""Types defined by simctl should inherit from this."""
raw_info: Dict[str, Any]
simctl_type: SimulatorControlType
def __init__(self, raw_info: Dict[str, Any], simctl_type: SimulatorControlType) -> None:
self.raw_info = raw_info
self.simctl_type = simctl_type
#pylint: disable=no-self-use
def _run_command(self, command: str) -> str:
"""Convenience method for running an xcrun simctl command."""
return SimulatorControlBase.run_command(command)
#pylint: enable=no-self-use
def __eq__(self, other: object) -> bool:
"""Override the default Equals behavior"""
if not isinstance(other, self.__class__):
return False
if not self.simctl_type == other.simctl_type:
return False
return self.raw_info == other.raw_info
def __ne__(self, other: object) -> bool:
"""Define a non-equality test"""
return not self.__eq__(other)
@staticmethod
def run_command(command: str) -> str:
"""Run an xcrun simctl command."""
full_command = "xcrun simctl %s" % (command,)
# Deliberately don't catch the exception - we want it to bubble up
return subprocess.run(full_command, universal_newlines=True, shell=True, check=True, stdout=subprocess.PIPE).stdout
@staticmethod
def list_type(item: SimulatorControlType) -> Any:
"""Run an `xcrun simctl` command with JSON output."""
full_command = "xcrun simctl list %s --json" % (item.list_key(),)
# Deliberately don't catch the exception - we want it to bubble up
output = subprocess.run(full_command, universal_newlines=True, shell=True, check=True, stdout=subprocess.PIPE).stdout
json_output = json.loads(output)
if not isinstance(json_output, dict):
raise Exception("Unexpected list type: " + str(type(json_output)))
if not json_output.get(item.list_key()):
raise Exception("Unexpected format for " + item.list_key() + " list type: " + str(json_output))
return json_output[item.list_key()]
|
Multiple people were shot Friday afternoon inside a New York City hospital.
The baby had colic and was only 4 days old.
Dr. Neeki, an Iranian-American doctor who responded to the San Bernardino shooting, implored officials this morning to step up their response to future terror attacks.
President Obama has said that the Affordable Care Act will eventually make health care less expensive.
|
from mushroom_rl.algorithms import Agent
from mushroom_rl.utils.torch import update_optimizer_parameters
class DeepAC(Agent):
"""
Base class for algorithms that uses the reparametrization trick, such as
SAC, DDPG and TD3.
"""
def __init__(self, mdp_info, policy, actor_optimizer, parameters):
"""
Constructor.
Args:
actor_optimizer (dict): parameters to specify the actor optimizer
algorithm;
parameters: policy parameters to be optimized.
"""
if actor_optimizer is not None:
if parameters is not None and not isinstance(parameters, list):
parameters = list(parameters)
self._parameters = parameters
self._optimizer = actor_optimizer['class'](
parameters, **actor_optimizer['params']
)
self._clipping = None
if 'clipping' in actor_optimizer:
self._clipping = actor_optimizer['clipping']['method']
self._clipping_params = actor_optimizer['clipping']['params']
self._add_save_attr(
_optimizer='torch',
_clipping='torch',
_clipping_params='pickle'
)
super().__init__(mdp_info, policy)
def fit(self, dataset):
"""
Fit step.
Args:
dataset (list): the dataset.
"""
raise NotImplementedError('DeepAC is an abstract class')
def _optimize_actor_parameters(self, loss):
"""
Method used to update actor parameters to maximize a given loss.
Args:
loss (torch.tensor): the loss computed by the algorithm.
"""
self._optimizer.zero_grad()
loss.backward()
self._clip_gradient()
self._optimizer.step()
def _clip_gradient(self):
if self._clipping:
self._clipping(self._parameters, **self._clipping_params)
@staticmethod
def _init_target(online, target):
for i in range(len(target)):
target[i].set_weights(online[i].get_weights())
def _update_target(self, online, target):
for i in range(len(target)):
weights = self._tau * online[i].get_weights()
weights += (1 - self._tau) * target[i].get_weights()
target[i].set_weights(weights)
def _update_optimizer_parameters(self, parameters):
self._parameters = list(parameters)
if self._optimizer is not None:
update_optimizer_parameters(self._optimizer, self._parameters)
def _post_load(self):
raise NotImplementedError('DeepAC is an abstract class. Subclasses need'
'to implement the `_post_load` method.')
|
Today we did wheel landings at Palmer, which is a pretty reliable airport for crosswind. The wind was amusingly light, given we'd come just for that - but the windsock at the head of 16 didn't agree with the windsock at the tail end of 16. Combined with the bluff on downwind to base that fools a pilot into coming in high, and the inevitable burbles over the river and the treeline, it was a good thing that I had 6000 feet of runway available. Not that I used it all, but that I didn't feel much pressure or panic to either monkey with my landings or go around. So we worked on wheel landings, and when a wheel landing has gone too far wrong to be pulled off - but can still be converted into a 3-point landing.
In the middle of this practice, we stopped in at the FSS, and met the guy on duty. (They have flush toilets instead of running into the woods! The utter plush civilization of it all!) We tsill have local FAA employees at the Flight Service Stations in Alaska, and unlike the outsourced version in the lower 48, no matter what the patchy automated weather information may indicate, you can take the FSS briefer's weather predictions to the bank.
After we got back, my CFI signed my logbook with all due endorsements as safe to take off and fly this thing alone, anywhere and everywhere I want. I still scheduled another lesson with him, tomorrow at 1300, to polish the last few rough edges - I'm not happy with my wheel landings yet, and want a little more crosswind practice.
No pictures today - busy flying. Maybe tomorrow?
So I celebrated by scarfing down the last of the blueberries and yesterday's donuts, like any good airport scavenger, and then pitching in on power-washing the ramp. I've wanted to do that since the first time I knelt in the accumulated dirt and sand blown in from around the city - I may not be able to pull-start the motor, but I can run the hose for a while.
One of my favorite gunnies in the whole wide world stopped by, freshly retired and with the few day's worth of beard growth and ear-to-ear grin to prove it. Our planes were long-term projects together. It's awesome for us both to see each other's birds as finally flying, and congratulate each other. My big old brick of a radio, which I uninstalled months ago, will now find a new and happy home being used in his plane, making everyone happy in the deal. Yay!
I am looking forward with glee (no, not the TV show) and gloom, to escorting my favorite long term hanger mate, to the border. Peter is a lucky guy!
|
"""
"""
# This file is part of zasim. zasim is licensed under the BSD 3-clause license.
# See LICENSE.txt for details.
# Copyright (c) 2011, Felix Bondarenko
import numpy as np
## A map from states according to the bitmask to
# pygame blittable states ( between 0 and 28 )
displayableStateDict = {
0: 0, # U
2048: 1, #C00 2048
2049: 2, #C10 2048+1
2050: 3, #C01 2048+2
2051: 4, #C11 2048+3
4192: 5, #S000 4096+96
4160: 6, #S00 4096+64
4168: 7, #S01 4096+64+8
4128: 8, #S0 4096+32
4176: 9, #S10 4096+64+16
4184: 10, #S11 4096+64+16+8
4144: 11, #S1 4096+32+16
4096: 12, #S 4096
6144: 13, #T000 6144
6272: 14, #T001 6144+128
6400: 15, #T010 6144+256
6528: 16, #T011 6144+128+256
6656: 17, #T020 6144+512
6784: 18, #T021 6144+128+512
6912: 19, #T030 6144+256+512
7040: 20, #T031 6144+128+256+512
7168: 21, #T100 6144+1024
7296: 22, #T101 6144+128+1024
7424: 23, #T110 6144+256+1024
7552: 24, #T111 6144+128+256+1024
7680: 25, #T120 6144+512+1024
7808: 26, #T121 6144+128+512+1024
7936: 27, #T130 6144+256+512+1024
8064: 28, #T131 6144+128+256+1024+512
}
## A map from human readable vonNeumann states ( such as 'U', 'T020' and 'C11' )
# actual states calculated via bitmask
nameStateDict = { "U": 0,
#"C00" : 2048, "C10" : 2049, "C01" : 2050, "C11" : 2051,
"C00" : 1, "C10" : 2, "C01" : 3, "C11" : 4,
"S" : 4096, "S0" : 4128, "S1" : 4144, "S00" : 4160,
"S01" : 4168, "S10" : 4176, "S11" : 4184, "S000": 4192,
"T000": 6144, "T001": 6272, "T010": 6400, "T011": 6528,
"T020": 6656, "T032": 6784, "T030": 6912, "T031": 7040,
"T100": 7168, "T101": 7296, "T110": 7424, "T111": 7552,
"T120": 7680, "T121": 7808, "T130": 7936, "T131": 8064 }
stateNameDict = {a:b for b,a in nameStateDict.iteritems()}
states = sorted(nameStateDict.values())
from os import path
from zasim.display.qt import generate_tile_atlas
# XXX get the absolute path if possible.
filename_map = {num:path.join("images/vonNeumann", stateNameDict[num]) + ".png" for num in states}
PALETTE_JVN_IMAGE, PALETTE_JVN_RECT = generate_tile_atlas(filename_map, "images/vonNeumann")
## The cellular automaton proposed by John von Neumann
# \verbatim
# All states are encoded in a bitmask:
#
# <--MSB 10 LSB
# ...... 0 0 0 0 0 0 0 0 0 X X u a1 a0 eps sc1 sc0 s2 s1 s0 e1 e0
# | | | | | | | | | | | | |-> current e
# XX = 00 -> U <--------| | | | | | | | | | | |----> next e
# XX = 01 -> C <----------| | | | | | | | | |
# XX = 10 -> S | | | | | | | | |-------> lsb on S
# XX = 11 -> T | | | | | | | |----------> ...
# | | | | | | |-------------> msb on S
# S{} is encoded as SMASK_111 | | | | | |-----------------> s-state counter
# | | | | |---------------------> s-state counter
# | | | |
# | | | |--------------------------> excited
# | | |-----------------------------> direction
# | |--------------------------------> direction
# |----------------------------------> special
#
# \endverbatim
class vonNeumann ( object ):
palette = []
## The constructor
def __init__( self, sizeX, sizeY, confFile ):
## The ca's title
self.title = "vonNeumann"
## The ca's dimension
self.dim = 2
self.size = self.sizeX, self.sizeY = sizeX, sizeY
## The current configuration is held here
# as usual, these two arrays contain the real configuration, that is used
# in every step ... (see vonNeumann::displayConf)
self.currConf = np.zeros( (sizeX, sizeY), int )
## The current configuration is held here
self.nextConf = np.zeros( (sizeX, sizeY), int )
# used when updating only some cells instead of all....
self.cActArr = np.zeros( (self.sizeX*self.sizeY), bool )
self.nActArr = np.zeros( (self.sizeX*self.sizeY), bool )
self.cList = np.zeros( (self.sizeX*self.sizeY), int )
self.nList = np.zeros( (self.sizeX*self.sizeY), int )
self.cCounter = 0
self.nCounter = 0
if confFile != "":
self.importConf( confFile )
self.nextConf = self.currConf.copy()
## The configuration that is blittet...
# But in this CA the states are not enumerable from 0..28, but scattered
# between 0 and ~2^13, so we need a dict (see vonNeumann::displayableStateDict)
# to map the states to 0..28, so the Display-module can display states
# without knowing the difference
self.displayConf = np.zeros( self.size, int )
## Used to append cells to the list of cells to handle in the next step
def enlist( self, x, y ):
for i in ( ( (x) + (y)*self.sizeX ),
( (x+1) + (y)*self.sizeX ),
( (x-1) + (y)*self.sizeX ),
( (x) + (y-1)*self.sizeX ),
( (x) + (y+1)*self.sizeX ) ):
if self.cActArr[ i ] == False:
self.cActArr[ i ] = True
self.cList[self.cCounter] = i
self.cCounter += 1
## Updates all cells using scipy.weave.inline
def updateAllCellsWeaveInline( self ):
#
# All states are encoded in a bitmask:
#
# <--MSB 10 LSB
# ...... 0 0 0 0 0 0 0 0 0 X X u a1 a0 eps sc1 sc0 s2 s1 s0 e1 e0
# | | | | | | | | | | | | |-> current e
# XX = 00 -> U <--------| | | | | | | | | | | |----> next e
# XX = 01 -> C <----------| | | | | | | | | |
# XX = 10 -> S | | | | | | | | |-------> lsb on S
# XX = 11 -> T | | | | | | | |----------> ...
# | | | | | | |-------------> msb on S
# S{} is encoded as SMASK_111 | | | | | |-----------------> s-state counter
# | | | | |---------------------> s-state counter
# | | | |
# | | | |--------------------------> excited
# | | |-----------------------------> direction
# | |--------------------------------> direction
# |----------------------------------> special
#
vonNeumannCode = """
#include <stdlib.h>
#include <stdio.h>
#line 1 "CA.py"
#define UMASK 0
#define CMASK 2048 // 1 << 11
#define SMASK 4096 // 2 << 11
#define TMASK 6144 // 3 << 11
#define CSTATEMASK 3 // 1|2
#define SSTATEMASK 28 // 4|8|16
#define TSTATEMASK 1920 // 128|256|512|1024
#define e0 1
#define e1 2
#define s0 4
#define s1 8
#define s2 16
#define s 28 // s2|s1|s0
#define sc0 32
#define sc1 64
#define sc 96 // sc1|sc0
#define eps 128
#define a0 256
#define a1 512
#define a 768 // a1|a0
#define u 1024
#define U(x) ((x) == 0)
#define C(x) (((x) & CMASK) == CMASK)
#define S(x) (((x) & SMASK) == SMASK)
#define T(x) (((x) & TMASK) == TMASK)
#define A_UNSHIFT(x) (((x)&a)>>8)
#define SC_SHIFT(x) ((x)<<5)
#define SC_UNSHIFT(x) (((x)&sc)>>5)
int i, j, k, l;
int nbs[4];
int state;
for ( i = 1; i < sizeX-1; i++ ) {
for ( j = 1; j < sizeY-1; j++ ) {
state = cconf( i, j );
nbs[0] = cconf( i+1, j );
nbs[1] = cconf( i, j-1 );
nbs[2] = cconf( i-1, j );
nbs[3] = cconf( i, j+1 );
if ( T(state) ) { // transmission state
// transisition rule (T.1):
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && ( abs(k-(A_UNSHIFT(nbs[k]))) == 2)
&& ((nbs[k]&u) != (state&u)) && (nbs[k]&eps) ) {
// (T.1)(alpha)
nconf( i, j ) = UMASK;
break;
}
}
if ( k < 4 ) continue;
// (T.1)(beta)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs((A_UNSHIFT(nbs[k]))-(A_UNSHIFT(state))) != 2)
&& (abs(k-(A_UNSHIFT(nbs[k]))) == 2)
&& ((nbs[k]&u) == (state&u) ) && (nbs[k]&eps) ) {
// (T.1)(beta)(a)
nconf( i, j ) = state | eps;
break;
}
if ( C(nbs[k]) && (nbs[k]&e0) && (k-(A_UNSHIFT(state)) != 0) ) {
// (T.1)(beta)(b)
nconf( i, j ) = state | eps;
break;
}
}
if ( k < 4 ) continue;
// (T.1)(gamma)
nconf( i, j ) = TMASK | (state&u) | (state&a);
} // end of T(state)
else if ( C(state) ) { // confluent state
// transistion rule (T.2)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& (nbs[k]&eps) && (nbs[k]&u) ) {
// (T.2)(alpha)
nconf( i, j ) = UMASK;
break;
}
}
if ( k < 4 ) continue;
// (T.2)(beta)
for( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& (nbs[k]&eps) && !(nbs[k]&u) ) {
// (T.2)(beta)(a)
break;
}
}
if ( k < 4 ) {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& !(nbs[k]&eps) && !(nbs[k]&u) ) {
// (T.2)(beta)(b)
break;
}
}
if ( k == 4 ) {
nconf( i, j ) = CMASK | e1 | ((state&e1)>>1);
continue;
}
}
// (T.2)(gamma)
nconf( i, j ) = CMASK | ((state&e1)>>1);
} // end of C(state)
else if ( U(state) ) { // unexcitable state
// transition rule (T.3)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
// (T.3)(alpha)
nconf( i, j ) = SMASK;
break;
}
}
// (T.3)(beta)
// doesn' change the state
} // end of U(state)
else if ( S(state) ) { // sensitized state
if ( !(state&sc1) ) {
// transition rule (T.4)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
// (T.4)(alpha)
nconf( i, j ) = state | (s0<<(2-SC_UNSHIFT(state)));
break;
}
}
// (T.4)(beta)
// doesn't change the state but the counter
nconf( i, j ) += sc0;
} else {
if ( (state&sc) == sc ) {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
nconf( i, j ) = TMASK | a0;
break;
}
}
if ( k == 4 ) {
nconf( i, j ) = TMASK;
}
} else {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
nconf( i, j ) = state | s0;
break;
}
}
nconf( i, j ) += sc0;
if ( nconf( i, j ) & s ) {
// make transition from sensitized to transmission or confluent state
l = nconf( i, j );
if ( (l & s) == s ) {
nconf( i, j ) = CMASK;
} else {
// other leaves of the S-to-T-transition tree of depth 3
l += s0;
nconf( i, j ) = TMASK | ((l&s)<<6);
}
}
}// else {
// stay for another run
//}
}
}
else {
// this state is undefined!
}
}
}
"""
## Update cells, but only those that changed or are in the neighbourhood of one of those.
# This is done via bitchecking, and hence admittedly difficult to read.
# Every subsection of the transitionfunction from von Neumann's paper is marked.
def updateAllCellsWeaveInlineFewStates( self ):
#
# All states are encoded in a bitmask:
#
# <--MSB 10 LSB
# ...... 0 0 0 0 0 0 0 0 0 X X u a1 a0 eps sc1 sc0 s2 s1 s0 e1 e0
# | | | | | | | | | | | | |-> current e
# XX = 00 -> U <--------| | | | | | | | | | | |----> next e
# XX = 01 -> C <----------| | | | | | | | | |
# XX = 10 -> S | | | | | | | | |-------> lsb on S
# XX = 11 -> T | | | | | | | |----------> ...
# | | | | | | |-------------> msb on S
# S{} is encoded as SMASK_111 | | | | | |-----------------> s-state counter
# | | | | |---------------------> s-state counter
# | | | |
# | | | |--------------------------> excited
# | | |-----------------------------> direction
# | |--------------------------------> direction
# |----------------------------------> special
#
#
vonNeumannCodeFewStates = """
#include <stdlib.h>
#include <stdio.h>
#line 1 "VonNeumannDefinesInCA.py"
#define UMASK 0
#define CMASK 2048 // 1 << 11
#define SMASK 4096 // 2 << 11
#define TMASK 6144 // 3 << 11
#define CSTATEMASK 3 // 1|2
#define SSTATEMASK 28 // 4|8|16
#define TSTATEMASK 1920 // 128|256|512|1024
#define e0 1
#define e1 2
#define s0 4
#define s1 8
#define s2 16
#define s 28 // s2|s1|s0
#define sc0 32
#define sc1 64
#define sc 96 // sc1|sc0
#define eps 128
#define a0 256
#define a1 512
#define a 768 // a1|a0
#define u 1024
/* checkers for different kinds of states */
#define U(x) ((x) == 0)
#define C(x) (((x) & CMASK) == CMASK)
#define S(x) (((x) & SMASK) == SMASK)
#define T(x) (((x) & TMASK) == TMASK)
/* get the direction of a T-state and the 'age' of an S-state */
#define A_UNSHIFT(x) (((x)&a)>>8)
#define SC_SHIFT(x) ((x)<<5)
#define SC_UNSHIFT(x) (((x)&sc)>>5)
/* enlist a cell to be checked in the next step */
#define ENLIST(id) if ( !nActArr( (id) ) ) {\
nActArr( id ) = true;\
nList( nCounter++ ) = id;\
}
/* enlist a cell and it's neighbourhood to be checke in the next step */
#define MARKNBH(x,y) ENLIST( (x)+(y)*sizeX );\
ENLIST( (x+1)+(y)*sizeX );\
ENLIST( (x-1)+(y)*sizeX );\
ENLIST( (x)+(y-1)*sizeX );\
ENLIST( (x)+(y+1)*sizeX );
#include <stdio.h>
#line 1 "VonNeumannCodeInCA.py"
int i, j, k, l, x, y, aa;
/* the neighbours' states */
int nbs[4];
/* the 'own' state */
int state;
/* the number of cells that have to be checked in the next step and is returned as return_val */
int nCounter = 0;
for ( i = 0; i < cCounter; i++ ) {
x = cList( i ) % sizeX;
y = cList( i ) / sizeX;
cActArr( cList( i ) ) = false;
state = cconf( x, y );
nbs[0] = cconf( x+1, y );
nbs[1] = cconf( x, y-1 );
nbs[2] = cconf( x-1, y );
nbs[3] = cconf( x, y+1 );
if ( T(state) ) { // transmission state
// transisition rule (T.1):
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && ( abs(k-(A_UNSHIFT(nbs[k]))) == 2)
&& ((nbs[k]&u) != (state&u)) && (nbs[k]&eps) ) {
// (T.1)(alpha)
nconf( x, y ) = UMASK;
break;
}
}
if ( k < 4 ) continue;
// (T.1)(beta)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs((A_UNSHIFT(nbs[k]))-(A_UNSHIFT(state))) != 2)
&& (abs(k-(A_UNSHIFT(nbs[k]))) == 2)
&& ((nbs[k]&u) == (state&u) ) && (nbs[k]&eps) ) {
// (T.1)(beta)(a)
nconf( x, y ) = state | eps;
MARKNBH( x, y );
break;
}
if ( C(nbs[k]) && (nbs[k]&e0) && (k-(A_UNSHIFT(state)) != 0) ) {
// (T.1)(beta)(b)
nconf( x, y ) = state | eps;
MARKNBH( x, y );
break;
}
}
if ( k < 4 ) continue;
// (T.1)(gamma)
// don't enlist, since cell is not active
// MARKNBH( x, y );
nconf( x, y ) = TMASK | (state&u) | (state&a);
} // end of T(state)
else if ( C(state) ) { // confluent state
// transistion rule (T.2)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& (nbs[k]&eps) && (nbs[k]&u) ) {
// (T.2)(alpha)
// don't enlist, since cell is not active
nconf( x, y ) = UMASK;
break;
}
}
if ( k < 4 ) continue;
// (T.2)(beta)
for( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& (nbs[k]&eps) && !(nbs[k]&u) ) {
// (T.2)(beta)(a)
MARKNBH( x, y );
break;
}
}
if ( k < 4 ) {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2)
&& !(nbs[k]&eps) && !(nbs[k]&u) ) {
// (T.2)(beta)(b)
MARKNBH( x, y );
break;
}
}
if ( k == 4 ) {
nconf( x, y ) = CMASK | e1 | ((state&e1)>>1);
MARKNBH( x, y );
continue;
}
}
// (T.2)(gamma)
nconf( x, y ) = CMASK | ((state&e1)>>1);
MARKNBH( x, y );
} // end of C(state)
else if ( U(state) ) { // unexcitable state
// transition rule (T.3)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
// (T.3)(alpha)
nconf( x, y ) = SMASK;
MARKNBH( x, y );
break;
}
}
// (T.3)(beta)
// doesn' change the state
} // end of U(state)
else if ( S(state) ) { // sensitized state
MARKNBH( x, y );
if ( !(state&sc1) ) {
// transition rule (T.4)
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
// (T.4)(alpha)
nconf( x, y ) = state | (s0<<(2-SC_UNSHIFT(state)));
break;
}
}
// (T.4)(beta)
// doesn't change the state but the counter
nconf( x, y ) += sc0;
} else {
if ( (state&sc) == sc ) {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
nconf( x, y ) = TMASK | a0;
break;
}
}
if ( k == 4 ) {
nconf( x, y ) = TMASK;
}
} else {
for ( k = 0; k < 4; k++ ) {
if ( T(nbs[k]) && (abs(k-A_UNSHIFT(nbs[k])) == 2) && (nbs[k]&eps) ) {
nconf( x, y ) = state | s0;
break;
}
}
nconf( x, y ) += sc0;
if ( nconf( x, y ) & s ) {
// make transition from sensitized to transmission or confluent state
l = nconf( x, y );
if ( (l & s) == s ) {
nconf( x, y ) = CMASK;
} else {
// other leaves of the S-to-T-transition tree of depth 3
l += s0;
nconf( x, y ) = TMASK | ((l&s)<<6);
}
}
}// else {
// stay for another run
//}
}
}
else {
// this state is undefined!
}
}
return_val = nCounter;
"""
|
Beautiful made to order bridal bouquets of tiger lilies and roses, fully personalised to suit your wedding theme.
We will use stunning ivory Tiger Lily and mix them with either white or ivory roses which ever you prefer, a timeless design which is both classical and elegant.
With 8 lovely tiger lilies, these beautiful posies would compliment any wedding theme. You can choose to have diamante or pearl centres added to the roses if liked, or a mix of the 2, then you may also like to have crystal stems, seed pearl loops or diamante picks scattered through the design as well for added interest.
The handle will chunky and fully wrapped in the ribbon colour your choose and the bouquet will then be finished with matching bows.
|
#!/usr/bin/env python
"""
KDE_plot: make plots of each KDE (1D or 2D)
Usage:
KDE_plot [options] <kde>
KDE_plot -h | --help
KDE_plot --version
Options:
<kde> Pickled KDE object
('-' if input from STDIN)
-o=<o> Output file name.
[Default: KDE.png]
-n=<n> Number of taxon KDEs to plot (0 = all plotted).
[Default: 0]
--nCol=<nc> Number of subplot columns.
[Default: 1]
--xMin=<xm> Minimum x-axis value ('' = min value in dataset).
[Default: ]
--xMax=<xM> Maximum x-axis value ('' = max value in dataset).
[Default: ]
--yMin=<ym> Minimum y-axis value ('' = min value in dataset).
[Default: ]
--yMax=<yM> Maximum y-axis value ('' = max value in dataset).
[Default: ]
--xStep=<xs> X dimension granularity.
[Default: 0.0005]
--yStep=<yx> Y dimension granularity.
[Default: 100]
--xX=<xx> X dimension figure size multiplier (ncol * x)
[Default: 4]
--yX=<yx> Y dimension figure size multiplier (ncol * x)
[Default: 3.5]
--logY=<ly> Base for y-axis log scaling ('' = no log scaling).
[Default: ]
-h --help Show this screen.
--version Show version.
--debug Debug mode
Description:
Plot each KDE (1D or 2D KDEs) in the the provided multi-KDE object.
Output:
Image files written to `-o`
"""
# import
## batteries
from docopt import docopt
import sys,os
## application libraries
scriptDir = os.path.dirname(__file__)
libDir = os.path.join(scriptDir, '../lib/')
sys.path.append(libDir)
# application
from SIPSim import Utils
from SIPSim import FigGen
def main(args=None):
KDEs = Utils.load_kde(args['<kde>'])
try:
FigGen.KDE_ndims(KDEs)
except AttributeError:
outFile = os.path.splitext(args['-o'])
msg = 'Processing library: "{}"\n'
for lib,x in KDEs.items():
sys.stderr.write(msg.format(lib))
outName = ''.join([outFile[0], '_', str(lib), outFile[1]])
FigGen.make_kde_fig(x, outName,
n_subplot=args['-n'],
ncol=args['--nCol'],
xMin=args['--xMin'],
xMax=args['--xMax'],
yMin=args['--yMin'],
yMax=args['--yMax'],
xStep=args['--xStep'],
yStep=args['--yStep'],
xX=args['--xX'],
yX=args['--yX'],
logY=args['--logY'])
else:
FigGen.make_kde_fig(KDEs, args['-o'],
n_subplot=args['-n'],
ncol=args['--nCol'],
xMin=args['--xMin'],
xMax=args['--xMax'],
yMin=args['--yMin'],
yMax=args['--yMax'],
xStep=args['--xStep'],
yStep=args['--yStep'],
xX=args['--xX'],
yX=args['--yX'],
logY=args['--logY'])
def opt_parse(args=None):
if args is None:
args = docopt(__doc__, version='0.1')
else:
args = docopt(__doc__, version='0.1', argv=args)
main(args)
|
Should services be managed in a Version Control System?
We're working on a project with both the Gateway and the Developer Portal, and we want to use git as version control system.
Question: Should we be versioning only the policies and its dependencies? Or also the services being published?
Additional note - We expect to have 3 environments: dev, test and prod.
Any insights are greatly appreciated.
Thanks for your quick reply. I've already followed the instructions of the link you provided, which have been really useful for some testing we've done so far.
I guess what confuses me a little bit is when the Developer Portal comes into the picture, and how all the pieces (Policy Manager + Developer Portal + Multiple environments) should align in a release strategy point of view. For example, If I migrateIn a service into a given environment, that service will not be portal managed (even if the assertion "Set as Portal Managed Service" is present). So, for these services the Portal will be more of a read-only tool, am I correct?
1) What version of the Portal are you using?
2) How many Portals are you running?
3) How does the Portal align to the various environments?
1) We're using version 3.5.
2) We just setup a Portal per environment (dev, test, and soon, prod).
3) I believe this is part of what we're trying to figure out .
To move Gateway metadata across different environments, GMU is the right approach. To move Portal metadata such as API Keys (Applications), it would need to be done manually since Portal 3.5 has no APIs. With Portal 4.x, there are system APIs that would allow for Portal metadata to be moved across programatically.
Thanks for your reply. Do these system APIs in Portal 4.x include calls to enable a migrated API via GMU? And add API Owner groups? I've also noticed that APIs published via GMU are disabled and with no API Owner group.
The 4.x Portal API calls allow for the updates and would accommodate for the use case you described. The API Owner Groups does not exist yet in Portal 4.x, but you may be able to achieve similar functionality by using an organization to group users and APIs, and then extend the access permissions of those organization admin user to manage their own APIs.
|
#coding: utf-8
from django import forms
from localflavor.br.br_states import STATE_CHOICES
from django.contrib.auth.models import User
from .models import UserProfile, Testimonials, UserBank, Banks
class TestimonialsForm(forms.ModelForm):
class Meta:
model = Testimonials
exclude = ('active',)
class UserForm(forms.ModelForm):
first_name = forms.CharField(label='Nome')
last_name = forms.CharField(label='Sobrenome')
username = forms.CharField(label='Nome de usuário')
email = forms.EmailField(label='E-mail')
password = forms.CharField(label='Senha', widget=forms.PasswordInput())
class Meta:
model = User
fields = ('first_name', 'last_name', 'username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
state = forms.ChoiceField(label='Estado', choices=STATE_CHOICES)
class Meta:
model = UserProfile
fields = ('cpf', 'birthday', 'gender', 'address', 'code_postal', 'neighborhood', 'state', 'city', 'phone_number')
class BanksForm(forms.ModelForm):
class Meta:
model = Banks
excludes = ('code_bank')
class UserBankForm(forms.ModelForm):
class Meta:
model = UserBank
excludes = ('user')
|
Chris Masters retains the record of the longest serving reporter on Australia's longest running public affairs television program, Four Corners. Between 1983 and 2008 he made over 100 reports for the national broadcasters flagship program, many of them well remembered and some of them nation shaping. The Big League in 1983 triggered the Street Royal Commission and reforms to judicial accountability.French Connections in 1985, an international exclusive on the sinking of the Greenpeace flagship The Rainbow Warrior earned Chris the highest award in Australian journalism, the Gold Walkley. His most famous report The Moonlight State investigating police corruption in Queensland initiated the Fitzgerald Inquiry and a raft of reforms that reached well beyond Queensland. The Dead Heart received a 1987 Penguin award from the television Society of Australia. Other reports such as Inside a Holocaust; on genocide in Rwanda in 1994 won a Logie award and The Coward's War on the Bosnian conflict, a further Walkley.
Chris has written three books, Inside Story (1991), Not for Publication (2002) and Jonestown (2006), the latter winning three awards, including Biography of the Year. Chris is from a well-known media family, his mother Olga, a lifelong journalist and successful author. In 1999 Chris was awarded a Public Service Medal for his anti-corruption work. In 2005 he received an honorary doctorate in Communication from RMIT University. A further honorary doctorate was awarded in 2009 by The University of Queensland where Chris is now an Adjunct Professor. Chris is also a member of the board of Swinburne University's Foundation for Public Interest Journalism.
Surrendering his permanent position at Four Corners in 2008 Chris Masters now works as a contributing editor at Sydney's Daily Telegraph and an author. He has written for a range of major newspapers and journals, most particularly The Monthly. Mission Drift on the Afghanistan conflict was reprinted in Melbourne University Press. Best Political Writing 2008. Moonlight Reflections, an account of corruption in the Bjelke-Petersen government, was included in Best Political Writing 2009.
He also teaches investigative journalism and film writing and is a regular public speaker, having delivered among others the AN Smith lecture and the Curtin address. In 2006 he received the Voltaire award from Free Speech Victoria.
Chris Masters is a national director of Red Kite, which provides support to families of children with cancer.
|
import bisect
import copy
import pyatspi
import time
from gi.repository import GLib
from . import cmdnames
from . import chnames
from . import keybindings
from . import messages
from . import input_event
from . import orca_state
from . import settings_manager
_settingsManager = settings_manager.getManager()
# define 'live' property types
LIVE_OFF = -1
LIVE_NONE = 0
LIVE_POLITE = 1
LIVE_ASSERTIVE = 2
LIVE_RUDE = 3
# Seconds a message is held in the queue before it is discarded
MSG_KEEPALIVE_TIME = 45 # in seconds
# The number of messages that are cached and can later be reviewed via
# LiveRegionManager.reviewLiveAnnouncement.
CACHE_SIZE = 9 # corresponds to one of nine key bindings
class PriorityQueue:
""" This class represents a thread **UNSAFE** priority queue where priority
is determined by the given integer priority. The entries are also
maintained in chronological order.
TODO: experiment with Queue.Queue to make thread safe
"""
def __init__(self):
self.queue = []
def enqueue(self, data, priority, obj):
""" Add a new element to the queue according to 1) priority and
2) timestamp. """
bisect.insort_left(self.queue, (priority, time.time(), data, obj))
def dequeue(self):
"""get the highest priority element from the queue. """
return self.queue.pop(0)
def clear(self):
""" Clear the queue """
self.queue = []
def purgeByKeepAlive(self):
""" Purge items from the queue that are older than the keepalive
time """
currenttime = time.time()
myfilter = lambda item: item[1] + MSG_KEEPALIVE_TIME > currenttime
self.queue = list(filter(myfilter, self.queue))
def purgeByPriority(self, priority):
""" Purge items from the queue that have a lower than or equal priority
than the given argument """
myfilter = lambda item: item[0] > priority
self.queue = list(filter(myfilter, self.queue))
def __len__(self):
""" Return the length of the queue """
return len(self.queue)
class LiveRegionManager:
def __init__(self, script):
self._script = script
# message priority queue
self.msg_queue = PriorityQueue()
self.inputEventHandlers = self._getInputEventHandlers()
self.keyBindings = self._getKeyBindings()
# This is temporary.
self.functions = [self.advancePoliteness,
self.setLivePolitenessOff,
self.toggleMonitoring,
self.reviewLiveAnnouncement]
# Message cache. Used to store up to 9 previous messages so user can
# review if desired.
self.msg_cache = []
# User overrides for politeness settings.
self._politenessOverrides = None
self._restoreOverrides = None
# last live obj to be announced
self.lastliveobj = None
# Used to track whether a user wants to monitor all live regions
# Not to be confused with the global Gecko.liveRegionsOn which
# completely turns off live region support. This one is based on
# a user control by changing politeness levels to LIVE_OFF or back
# to the bookmark or markup politeness value.
self.monitoring = True
# Set up politeness level overrides and subscribe to bookmarks
# for load and save user events.
# We are initialized after bookmarks so call the load handler once
# to get initialized.
#
self.bookmarkLoadHandler()
script.bookmarks.addSaveObserver(self.bookmarkSaveHandler)
script.bookmarks.addLoadObserver(self.bookmarkLoadHandler)
def _getInputEventHandlers(self):
handlers = {}
handlers["advanceLivePoliteness"] = \
input_event.InputEventHandler(
self.advancePoliteness,
cmdnames.LIVE_REGIONS_ADVANCE_POLITENESS)
handlers["setLivePolitenessOff"] = \
input_event.InputEventHandler(
self.setLivePolitenessOff,
cmdnames.LIVE_REGIONS_SET_POLITENESS_OFF)
handlers["monitorLiveRegions"] = \
input_event.InputEventHandler(
self.toggleMonitoring,
cmdnames.LIVE_REGIONS_MONITOR)
handlers["reviewLiveAnnouncement"] = \
input_event.InputEventHandler(
self.reviewLiveAnnouncement,
cmdnames.LIVE_REGIONS_REVIEW)
return handlers
def _getKeyBindings(self):
keyBindings = keybindings.KeyBindings()
keyBindings.add(
keybindings.KeyBinding(
"backslash",
keybindings.defaultModifierMask,
keybindings.NO_MODIFIER_MASK,
self.inputEventHandlers.get("advanceLivePoliteness")))
keyBindings.add(
keybindings.KeyBinding(
"backslash",
keybindings.defaultModifierMask,
keybindings.SHIFT_MODIFIER_MASK,
self.inputEventHandlers.get("setLivePolitenessOff")))
keyBindings.add(
keybindings.KeyBinding(
"backslash",
keybindings.defaultModifierMask,
keybindings.ORCA_SHIFT_MODIFIER_MASK,
self.inputEventHandlers.get("monitorLiveRegions")))
for key in ["F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9"]:
keyBindings.add(
keybindings.KeyBinding(
key,
keybindings.defaultModifierMask,
keybindings.ORCA_MODIFIER_MASK,
self.inputEventHandlers.get("reviewLiveAnnouncement")))
return keyBindings
def reset(self):
# First we will purge our politeness override dictionary of LIVE_NONE
# objects that are not registered for this page
newpoliteness = {}
currenturi = self._script.bookmarks.getURIKey()
for key, value in list(self._politenessOverrides.items()):
if key[0] == currenturi or value != LIVE_NONE:
newpoliteness[key] = value
self._politenessOverrides = newpoliteness
def bookmarkSaveHandler(self):
"""Bookmark save callback"""
self._script.bookmarks.saveBookmarksToDisk(self._politenessOverrides,
filename='politeness')
def bookmarkLoadHandler(self):
"""Bookmark load callback"""
# readBookmarksFromDisk() returns None on error. Just initialize to an
# empty dictionary if this is the case.
self._politenessOverrides = \
self._script.bookmarks.readBookmarksFromDisk(filename='politeness') \
or {}
def handleEvent(self, event):
"""Main live region event handler"""
politeness = self._getLiveType(event.source)
if politeness == LIVE_OFF:
return
if politeness == LIVE_NONE:
# All the 'registered' LIVE_NONE objects will be set to off
# if not monitoring. We will ignore LIVE_NONE objects that
# arrive after the user switches off monitoring.
if not self.monitoring:
return
elif politeness == LIVE_POLITE:
# Nothing to do for now
pass
elif politeness == LIVE_ASSERTIVE:
self.msg_queue.purgeByPriority(LIVE_POLITE)
elif politeness == LIVE_RUDE:
self.msg_queue.purgeByPriority(LIVE_ASSERTIVE)
message = self._getMessage(event)
if message:
if len(self.msg_queue) == 0:
GLib.timeout_add(100, self.pumpMessages)
self.msg_queue.enqueue(message, politeness, event.source)
def pumpMessages(self):
""" Main gobject callback for live region support. Handles both
purging the message queue and outputting any queued messages that
were queued up in the handleEvent() method.
"""
if len(self.msg_queue) > 0:
self.msg_queue.purgeByKeepAlive()
politeness, timestamp, message, obj = self.msg_queue.dequeue()
# Form output message. No need to repeat labels and content.
# TODO: really needs to be tested in real life cases. Perhaps
# a verbosity setting?
if message['labels'] == message['content']:
utts = message['content']
else:
utts = message['labels'] + message['content']
self._script.presentMessage(utts)
# set the last live obj to be announced
self.lastliveobj = obj
# cache our message
self._cacheMessage(utts)
# We still want to maintain our queue if we are not monitoring
if not self.monitoring:
self.msg_queue.purgeByKeepAlive()
# See you again soon, stay in event loop if we still have messages.
if len(self.msg_queue) > 0:
return True
else:
return False
def getLiveNoneObjects(self):
"""Return the live objects that are registered and have a politeness
of LIVE_NONE. """
retval = []
currenturi = self._script.bookmarks.getURIKey()
for uri, objectid in self._politenessOverrides:
if uri == currenturi and isinstance(objectid, tuple):
retval.append(self._script.bookmarks.pathToObj(objectid))
return retval
def advancePoliteness(self, script, inputEvent):
"""Advance the politeness level of the given object"""
if not _settingsManager.getSetting('inferLiveRegions'):
self._script.presentMessage(messages.LIVE_REGIONS_OFF)
return
obj = orca_state.locusOfFocus
objectid = self._getObjectId(obj)
uri = self._script.bookmarks.getURIKey()
try:
# The current priority is either a previous override or the
# live property. If an exception is thrown, an override for
# this object has never occurred and the object does not have
# live markup. In either case, set the override to LIVE_NONE.
cur_priority = self._politenessOverrides[(uri, objectid)]
except KeyError:
cur_priority = self._liveStringToType(obj)
if cur_priority == LIVE_OFF or cur_priority == LIVE_NONE:
self._politenessOverrides[(uri, objectid)] = LIVE_POLITE
self._script.presentMessage(messages.LIVE_REGIONS_LEVEL_POLITE)
elif cur_priority == LIVE_POLITE:
self._politenessOverrides[(uri, objectid)] = LIVE_ASSERTIVE
self._script.presentMessage(messages.LIVE_REGIONS_LEVEL_ASSERTIVE)
elif cur_priority == LIVE_ASSERTIVE:
self._politenessOverrides[(uri, objectid)] = LIVE_RUDE
self._script.presentMessage(messages.LIVE_REGIONS_LEVEL_RUDE)
elif cur_priority == LIVE_RUDE:
self._politenessOverrides[(uri, objectid)] = LIVE_OFF
self._script.presentMessage(messages.LIVE_REGIONS_LEVEL_OFF)
def goLastLiveRegion(self):
"""Move the caret to the last announced live region and speak the
contents of that object"""
if self.lastliveobj:
self._script.utilities.setCaretPosition(self.lastliveobj, 0)
self._script.speakContents(self._script.utilities.getObjectContentsAtOffset(
self.lastliveobj, 0))
def reviewLiveAnnouncement(self, script, inputEvent):
"""Speak the given number cached message"""
msgnum = int(inputEvent.event_string[1:])
if not _settingsManager.getSetting('inferLiveRegions'):
self._script.presentMessage(messages.LIVE_REGIONS_OFF)
return
if msgnum > len(self.msg_cache):
self._script.presentMessage(messages.LIVE_REGIONS_NO_MESSAGE)
else:
self._script.presentMessage(self.msg_cache[-msgnum])
def setLivePolitenessOff(self, script, inputEvent):
"""User toggle to set all live regions to LIVE_OFF or back to their
original politeness."""
if not _settingsManager.getSetting('inferLiveRegions'):
self._script.presentMessage(messages.LIVE_REGIONS_OFF)
return
# start at the document frame
docframe = self._script.utilities.documentFrame()
# get the URI of the page. It is used as a partial key.
uri = self._script.bookmarks.getURIKey()
# The user is currently monitoring live regions but now wants to
# change all live region politeness on page to LIVE_OFF
if self.monitoring:
self._script.presentMessage(messages.LIVE_REGIONS_ALL_OFF)
self.msg_queue.clear()
# First we'll save off a copy for quick restoration
self._restoreOverrides = copy.copy(self._politenessOverrides)
# Set all politeness overrides to LIVE_OFF.
for override in list(self._politenessOverrides.keys()):
self._politenessOverrides[override] = LIVE_OFF
# look through all the objects on the page and set/add to
# politeness overrides. This only adds live regions with good
# markup.
matches = pyatspi.findAllDescendants(docframe, self.matchLiveRegion)
for match in matches:
objectid = self._getObjectId(match)
self._politenessOverrides[(uri, objectid)] = LIVE_OFF
# Toggle our flag
self.monitoring = False
# The user wants to restore politeness levels
else:
for key, value in list(self._restoreOverrides.items()):
self._politenessOverrides[key] = value
self._script.presentMessage(messages.LIVE_REGIONS_ALL_RESTORED)
# Toggle our flag
self.monitoring = True
def generateLiveRegionDescription(self, obj, **args):
"""Used in conjuction with whereAmI to output description and
politeness of the given live region object"""
objectid = self._getObjectId(obj)
uri = self._script.bookmarks.getURIKey()
results = []
# get the description if there is one.
for relation in obj.getRelationSet():
relationtype = relation.getRelationType()
if relationtype == pyatspi.RELATION_DESCRIBED_BY:
targetobj = relation.getTarget(0)
try:
# We will add on descriptions if they don't duplicate
# what's already in the object's description.
# See http://bugzilla.gnome.org/show_bug.cgi?id=568467
# for more information.
#
description = targetobj.queryText().getText(0, -1)
if description.strip() != obj.description.strip():
results.append(description)
except NotImplemented:
pass
# get the politeness level as a string
try:
livepriority = self._politenessOverrides[(uri, objectid)]
liveprioritystr = self._liveTypeToString(livepriority)
except KeyError:
liveprioritystr = 'none'
# We will only output useful information
#
if results or liveprioritystr != 'none':
results.append(messages.LIVE_REGIONS_LEVEL % liveprioritystr)
return results
def matchLiveRegion(self, obj):
"""Predicate used to find a live region"""
attrs = self._getAttrDictionary(obj)
return 'container-live' in attrs
def _getMessage(self, event):
"""Gets the message associated with a given live event."""
attrs = self._getAttrDictionary(event.source)
content = ""
labels = ""
# A message is divided into two parts: labels and content. We
# will first try to get the content. If there is None,
# assume it is an invalid message and return None
if event.type.startswith('object:children-changed:add'):
if attrs.get('container-atomic') == 'true':
content = self._script.utilities.expandEOCs(event.source)
else:
content = self._script.utilities.expandEOCs(event.any_data)
elif event.type.startswith('object:text-changed:insert'):
if attrs.get('container-atomic') != 'true':
content = event.any_data
else:
text = self._script.utilities.queryNonEmptyText(event.source)
if text:
content = text.getText(0, -1)
if not content:
return None
content = content.strip()
if len(content) == 1:
content = chnames.getCharacterName(content)
# Proper live regions typically come with proper aria labels. These
# labels are typically exposed as names. Failing that, descriptions.
# Looking for actual labels seems a non-performant waste of time.
name = (event.source.name or event.source.description).strip()
if name and name != content:
labels = name
# instantly send out notify messages
if attrs.get('channel') == 'notify':
utts = labels + content
self._script.presentationInterrupt()
self._script.presentMessage(utts)
return None
return {'content':[content], 'labels':[labels]}
def flushMessages(self):
self.msg_queue.clear()
def _cacheMessage(self, utts):
"""Cache a message in our cache list of length CACHE_SIZE"""
self.msg_cache.append(utts)
if len(self.msg_cache) > CACHE_SIZE:
self.msg_cache.pop(0)
def _getLiveType(self, obj):
"""Returns the live politeness setting for a given object. Also,
registers LIVE_NONE objects in politeness overrides when monitoring."""
objectid = self._getObjectId(obj)
uri = self._script.bookmarks.getURIKey()
if (uri, objectid) in self._politenessOverrides:
# look to see if there is a user politeness override
return self._politenessOverrides[(uri, objectid)]
else:
livetype = self._liveStringToType(obj)
# We'll save off a reference to LIVE_NONE if we are monitoring
# to give the user a chance to change the politeness level. It
# is done here for performance sake (objectid, uri are expensive)
if livetype == LIVE_NONE and self.monitoring:
self._politenessOverrides[(uri, objectid)] = livetype
return livetype
def _getObjectId(self, obj):
"""Returns the HTML 'id' or a path to the object is an HTML id is
unavailable"""
attrs = self._getAttrDictionary(obj)
if attrs is None:
return self._getPath(obj)
try:
return attrs['id']
except KeyError:
return self._getPath(obj)
def _liveStringToType(self, obj, attributes=None):
"""Returns the politeness enum for a given object"""
attrs = attributes or self._getAttrDictionary(obj)
try:
if attrs['container-live'] == 'off':
return LIVE_OFF
elif attrs['container-live'] == 'polite':
return LIVE_POLITE
elif attrs['container-live'] == 'assertive':
return LIVE_ASSERTIVE
elif attrs['container-live'] == 'rude':
return LIVE_RUDE
else: return LIVE_NONE
except KeyError:
return LIVE_NONE
def _liveTypeToString(self, politeness):
"""Returns the politeness level as a string given a politeness enum"""
if politeness == LIVE_OFF:
return 'off'
elif politeness == LIVE_POLITE:
return 'polite'
elif politeness == LIVE_ASSERTIVE:
return 'assertive'
elif politeness == LIVE_RUDE:
return 'rude'
elif politeness == LIVE_NONE:
return 'none'
else: return 'unknown'
def _getAttrDictionary(self, obj):
try:
return dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return {}
def _getPath(self, obj):
""" Returns, as a tuple of integers, the path from the given object
to the document frame."""
docframe = self._script.utilities.documentFrame()
path = []
while True:
if obj.parent is None or obj == docframe:
path.reverse()
return tuple(path)
try:
path.append(obj.getIndexInParent())
except Exception:
raise LookupError
obj = obj.parent
def toggleMonitoring(self, script, inputEvent):
if not _settingsManager.getSetting('inferLiveRegions'):
_settingsManager.setSetting('inferLiveRegions', True)
self._script.presentMessage(messages.LIVE_REGIONS_MONITORING_ON)
else:
_settingsManager.setSetting('inferLiveRegions', False)
self.flushMessages()
self._script.presentMessage(messages.LIVE_REGIONS_MONITORING_OFF)
|
But, just like America's Next Top Model, the show looks like it'll be far from just fun and games. It turns out the five girls who star will not only be all living under the same roof, but also doing so with their mothers, whom Hadid will also school on becoming momagers—a move that fully allows her Tyra Banks side to shine through. "Moms, use this time to guide your daughters, and then learn to let them fly on their own," Hadid says with upward hand gestures to the crowd on the couch as she sits on the floor just like Bella, barefoot but with her pink furry Gucci slippers in arm's reach.
Gigi might cheer on the girls at photo shoots, and her biggest admirer, Tommy Hilfiger, might also send them some love, but inevitably, and thanks largely to Hadid, the drama starts to build further up and up: As the girls reveal inadequacies in tasks like jumping in a timely fashion and shouldering a giant surfboard while looking like a carefree beach babe, Hadid grows increasingly frustrated, looking straight into one model's eyes while in a boxing ring and saying, "I need to see the tiger in you," and brushing off the pain of another model who's in tears as she descends from a rope that was suspending her in midair as "part of the job."
Spoiler alert: There are many more tears where those came from, including from the moms, who end up feeling like they're "in the judgment seat as much as the girls are." The camera even turns to a black-and-white stormy New York sky as one mom states the obvious: "We're not in a normal situation." Still, they soldier on through what at one point looks like a horror show for their daughters's sakes, who have a chance to win a contract with Hadid and the opportunity to sign with IMG—that is, if they have the stamina to make it past the former. "I'm the most protective mama bear you're ever gonna cross," Hadid at one point asserts to her victims, in a statement that probably should have been fact checked by Kris Jenner.
Yolanda Hadid got to root on not just one but both of her supermodel daughters at the Prabal Gurung show, where both Bella and Gigi Hadid walked.
|
from __future__ import division
import numpy as np
import scipy.optimize as opt
import imp
import geometry
import plot
"""Problem classes"""
class Problem(object):
params = []
"""Parameters associated with this problem."""
constraints = []
"""Constraints associated with this problem."""
error_calc_count = 0
"""Number of times this problem's error has been calculated."""
def add_constraint(self, constraint):
"""Adds a constraint to this problem.
:param constraint: the \
:class:`~pygeosolve.constraints.AbstractConstraint` to add
"""
# add constraint
self.constraints.append(constraint)
# extract its parameters
self._add_constraint_params(constraint)
def _add_param(self, param):
"""Adds a parameter to this problem.
:param param: the :class:`~pygeosolve.parameters.Parameter` to add
"""
# add parameter
self.params.append(param)
def _add_constraint_params(self, constraint):
"""Adds the parameters from a constraint to this problem.
:param constraint: the \
:class:`~pygeosolve.constraints.AbstractConstraint` to extract the \
parameters from
"""
# loop over the constraint's parameters
for param in constraint.params:
# check if parameter already exists in list
if param not in self.params:
# add parameter
self._add_param(param)
def free_params(self):
"""Non-fixed parameters associated with this problem.
:return: list of free :class:`~pygeosolve.parameters.Parameter` objects
"""
# empty list of free parameters
free = []
# loop over this problem's parameters
for param in self.params:
# identify free parameters
if not param.fixed:
# add to list
free.append(param)
return free
def free_param_vals(self):
"""Values of non-fixed parameters associated with this problem.
:return: list of free :class:`~pygeosolve.parameters.Parameter` values
"""
# return values extracted from list of free parameters
return np.array([param.value for param in self.free_params()])
def _set_free_param_vals(self, values):
"""Sets values of non-fixed parameters in this problem.
:param values: list of new values to set, in the same order as the \
free parameters returned by `free_param_vals`
"""
# loop over free parameters and the new values
for param, value in zip(self.free_params(), values):
# set the new value of this parameter
param.value = value
def error(self):
"""Calculates the total error associated with this problem.
:return: total of individual \
:class:`~pygeosolve.constraints.AbstractConstraint` errors"""
# calculate error sum
error = sum([constraint.error() for constraint in self.constraints])
# increment error calculation count
self.error_calc_count += 1
return error
def _error_with_vals(self, vals):
"""Sets new free parameter values and returns the new error.
:param vals: the new free parameter values to set"""
# set free parameter values
self._set_free_param_vals(vals)
# return new error
return self.error()
def _error_methods(self):
"""Creates a list of error dicts in scipy.optimize format."""
# empty constraints list
constraints = []
# create list of dicts
for constraint in self.constraints:
constraints.append({'type': 'ineq', 'fun': constraint.error})
return constraints
def solve(self):
"""Solves the problem.
This method attempts to minimise the error function given the
constraints defined within the problem. A successful minimisation
results in the new, optimised parameter values being assigned."""
# first guess at solution - just use current values
x0 = self.free_param_vals()
# call optimisation routine
self.solution = opt.minimize(fun=self._error_with_vals, x0=x0, \
method="COBYLA", tol=1e-10, constraints=self._error_methods())
#self.solution = opt.minimize(fun=self._error_with_vals, x0=x0, \
#method="SLSQP", constraints=self._constraint_functions(), \
#options={'maxiter': 1000000})
#self.solution = opt.basinhopping(self._error_with_vals, x0=x0, \
#niter=1000)
# update parameters from solution
self._update()
def solution_exists(self):
"""Checks if a solution has been computed.
:return: True if solution exists, otherwise False"""
return self.solution is not None
def _update(self):
"""Updates the list of free parameters associated with this problem.
This method retrieves the values from the optimisation result and
updates each one's corresponding parameter."""
# check if solution exists
if not self.solution_exists():
# cannot update values without a solution
raise Exception("Solution has not been computed")
# update values from the optimisation result's solution
self._set_free_param_vals(self.solution.x)
def plot(self, *args, **kwargs):
"""Plots the problem with its current values.
Requires the PyQt4 module."""
# try to find PyQt4 module
try:
imp.find_module("PyQt4")
except ImportError:
raise Exception("The PyQt4 module is required for plotting")
# create canvas
canvas = plot.Canvas()
# empty list of lines added to canvas
lines = []
# add lines to canvas
# TODO: add support for different primitives
for constraint in self.constraints:
for primitive in constraint.primitives:
if isinstance(primitive, geometry.Line):
canvas.add_line(primitive)
# show canvas
canvas.show(*args, **kwargs)
def __str__(self):
"""String representation of this problem.
:return: description of problem"""
# build list of parameter string representations
param_str = "\n\t" + "\n\t".join([str(param) for param in self.params])
# return description
return "Problem with parameters:{0}".format(param_str)
|
Actors take the stage to inhabit the lives of the Pennsylvania Amish community brought to its knees by unimaginable tragedy. In October of 2006, a shooter entered a one room schoolhouse in the Old Order Amish community and took aim, leaving five young girls dead and five injured before he turned the gun on himself. Playwright Jessica Dickey’s fictional story exploring the aftermath of this real-life event explores the unthinkable concept of forgiveness in the face of a senseless and violent act. Standing on either side of the oft-blurred lines between fact and fiction, The Amish Project is a hauntingly elegant and timely study of compassion and its rightful place in today’s changing world.
See The Country Wife for FREE!
|
import unittest
from datanator.elasticsearch_kl import batch_load
from datanator_query_python.config import config
import tempfile
import shutil
import requests
class TestMongoToES(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache_dir = tempfile.mkdtemp()
cls.src = batch_load.MongoToES(profile_name='es-poweruser', credential_path='~/.wc/third_party/aws_credentials',
config_path='~/.wc/third_party/aws_config', elastic_path='~/.wc/third_party/elasticsearch.ini',
cache_dir=cls.cache_dir, service_name='es', index='test', max_entries=float('inf'), verbose=True)
cls.url = cls.src.es_endpoint + '/' + cls.src.index
requests.delete(cls.url, auth=cls.src.awsauth)
conf = config.Config()
cls.username = conf.USERNAME
cls.password = conf.PASSWORD
cls.server = conf.SERVER
cls.authDB = conf.AUTHDB
cls.db = 'datanator'
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.cache_dir)
requests.delete(cls.url, auth=cls.src.awsauth)
def test_connection(self):
result = self.src.client.list_domain_names()
self.assertEqual(result['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertTrue('datanator-elasticsearch' in self.src.es_endpoint)
def test_data_from_mongo(self):
count, _ = self.src.data_from_mongo_protein(self.server, self.db, self.username,
self.password, authSource=self.authDB)
self.assertTrue(count >= 1000)
def test_data_from_metabolite(self):
_, count_0, _, count_1 = self.src.data_from_mongo_metabolite(self.server, self.db, self.username,
self.password, authSource=self.authDB)
self.assertTrue(count_0 >= 1000)
self.assertTrue(count_1 >= 1000)
def test_data_from_metabolites_meta(self):
doc = self.src.data_from_mongo_metabolites_meta(self.server, self.db, self.username,
self.password, authSource=self.authDB)
result = []
for i in range(5):
result.append(doc)
self.assertEqual(len(result), 5)
|
Our primary aim is to treat our patients' complaints and to help them return to a pain free lifestyle. We can liaise directly with G.P. surgeries on your behalf and we have experience treating patients with dementia and diabetes. From simple nail cutting to other specialist requirements we are available to treat or advise on your individual needs.
|
__author__ = 'jonathan'
import datetime
import pytz
from lib.rome.core.dataformat import get_decoder
import re
import uuid
from sqlalchemy.sql.expression import BinaryExpression
from lib.rome.core.rows.rows import get_attribute, has_attribute
from lib.rome.core.utils import DATE_FORMAT, datetime_to_int
def uncapitalize(s):
return s[:1].lower() + s[1:] if s else ''
def get_attribute_reccursively(obj, attr, otherwise=None):
""" A reccursive getattr function.
:param obj: the object that will be use to perform the search
:param attr: the searched attribute
:param otherwise: value returned in case attr was not found
:return:
"""
try:
if not "." in attr:
return get_attribute(obj, attr.replace("\"", ""))
else:
current_key = attr[:attr.index(".")]
next_key = attr[attr.index(".") + 1:]
if has_attribute(obj, current_key):
current_object = get_attribute(obj, current_key)
elif has_attribute(obj, current_key.capitalize()):
current_object = get_attribute(obj, current_key.capitalize())
elif has_attribute(obj, uncapitalize(current_key)):
current_object = get_attribute(obj, uncapitalize(current_key))
else:
current_object = get_attribute(obj, current_key)
if type(obj) is dict and next_key in obj:
return obj[next_key]
return get_attribute_reccursively(current_object, next_key, otherwise)
except AttributeError:
return otherwise
class LazyDictionnary:
"""This temporary class is used to make a dict acting like an object. This code can be found at:
http://stackoverflow.com/questions/1305532/convert-python-dict-to-object
"""
def __init__(self, **entries):
self.entries = entries
self._cache = {}
self.deconverter = get_decoder()
def keys(self):
return self.entries.keys()
def __getattr__(self, item):
if item not in self._cache:
raw_value = self.entries[item] if item in self.entries else None
deconverted_value = self.deconverter.desimplify(raw_value)
self._cache[item] = deconverted_value
return self._cache[item]
boolean_expression_str_memory = {}
class BooleanExpression(object):
def __init__(self, operator, *exps):
def transform_exp(exp):
if type(exp) is not BooleanExpression and self.operator != "NORMAL":
return BooleanExpression("NORMAL", exp)
else:
return exp
self.operator = operator
self.exps = map(lambda x: transform_exp(x), exps)
self.deconverter = get_decoder()
self.compiled_expression = ""
self.uuid = str(uuid.uuid1()).replace("-", "")
self.is_joining_expression = True
self.tables_involved = []
""" Prepare the expression. """
self.variable_substitution_dict = {}
self.default_value_dict = {}
self.prepare_expression()
def is_boolean_expression(self):
return True
def extract_hint(self):
from lib.rome.core.terms.terms import Hint
result = []
for expression in self.exps:
try:
if hasattr(expression, "extract_hint"):
result += expression.extract_hint()
elif hasattr(expression, "right") and hasattr(expression.right, "value"):
table_name = str(expression.left.table)
attribute_name = str(expression.left.key)
# value = "%s" % (criterion.expression.right.value)
value = expression.right.value
if type(expression.left.type).__name__ == "Integer":
value = int(value)
if type(expression.left.type).__name__ == "Float":
value = float(value)
result += [Hint(table_name, attribute_name, value)]
except:
# TODO: this catch errors that occur when there are "CASE WHEN" expression (this is caused by _paginate_query in glance.db.api)
pass
return result
def extract_joining_pairs(self):
if self.operator == "NORMAL":
word_pattern = "[_a-zA-Z0-9]+"
joining_criterion_pattern = "%s\.%s == %s\.%s" % (word_pattern, word_pattern, word_pattern, word_pattern)
m = re.search(joining_criterion_pattern, self.raw_expression)
if m is not None:
joining_pair = self.raw_expression[1:-1].split("==")
joining_pair = map(lambda x: x.strip(), joining_pair)
joining_pair = sorted(joining_pair)
return [joining_pair]
else:
return []
result = []
for exp in self.exps:
if type(exp).__name__ == "BooleanExpression":
result += exp.extract_joining_pairs()
return result
def extract_nonjoining_criterions(self):
if self.operator == "NORMAL":
word_pattern = "[_a-zA-Z0-9]+"
joining_criterion_pattern = "%s\.%s == %s\.%s" % (word_pattern, word_pattern, word_pattern, word_pattern)
m = re.search(joining_criterion_pattern, self.raw_expression)
if m is None:
return [self]
else:
return []
return [self]
def prepare_expression(self):
def collect_expressions(exp):
if type(exp) is BooleanExpression:
return exp.compiled_expression
if type(exp) is BinaryExpression:
return self.prepare_criterion(exp)
else:
return exp
compiled_expressions = map(lambda x: "(%s)" % (collect_expressions(x)), self.exps)
joined_compiled_expressions = []
if self.operator == "and":
joined_compiled_expressions = " and ".join(compiled_expressions)
elif self.operator == "or":
joined_compiled_expressions = " or ".join(compiled_expressions)
elif self.operator == "NORMAL":
joined_compiled_expressions = " or ".join(compiled_expressions)
self.compiled_expression = joined_compiled_expressions
for criterion_str in compiled_expressions:
for expression in self.exps:
if type(expression) is BinaryExpression:
expression_parts = [expression.right, expression.left]
other_part = expression.left
for expression_part in expression_parts:
# other_parts = filter(lambda x: x != expression_part,expression_parts)
if hasattr(expression_part, "default") and expression_part.bind is None and expression_part.default is not None:
expression_part.bind = expression_part.default.arg
if ":" in str(expression_part):
""" Handle right part of the expression. """
if " in " in criterion_str:
count = 1
parts = getattr(expression_part, "element", [])
like_operator_used = False
if len(parts) == 0:
""" This case happens when the LIKE operator is used. """
like_operator_used = True
parts = [expression_part] if "BindParameter" in str(type(expression_part)) else []
for i in parts:
corrected_label = ("%s_%s_%i" % (i._orig_key, self.uuid, count))
key = ":%s_%i" % (i._orig_key, count)
self.variable_substitution_dict[key] = corrected_label
self.default_value_dict[corrected_label] = i.value
if like_operator_used:
""" Must remove the '%' used as the wildcard symbol in the LIKE synthax"""
self.default_value_dict[corrected_label] = self.default_value_dict[corrected_label].replace("%", "")
count += 1
elif not "." in str(expression_part):
original_label = str(expression_part)
corrected_label = ("%s_%s" % (original_label, self.uuid)).replace(":", "")
self.variable_substitution_dict[original_label] = corrected_label
value = expression_part.value
# if len(other_parts) > 0:
# other_part = other_parts[0]
if type(other_part.expression.type).__name__ == "Integer":
value = int(value)
if type(other_part.expression.type).__name__ == "Float":
value = float(value)
if isinstance(value, datetime.datetime):
value = datetime_to_int(value)
self.default_value_dict[corrected_label] = value
other_part = expression.right
for sub in self.variable_substitution_dict:
joined_compiled_expressions = joined_compiled_expressions.replace(sub, self.variable_substitution_dict[sub])
joined_compiled_expressions = joined_compiled_expressions.replace(":", "")
for exp in self.exps:
if type(exp) is BooleanExpression:
for default_value_key in exp.default_value_dict:
self.default_value_dict[default_value_key] = exp.default_value_dict[default_value_key]
self.compiled_expression = joined_compiled_expressions
self.raw_expression = "%s" % (self.compiled_expression)
keys = self.default_value_dict.keys()
keys = sorted(keys, reverse=True, key=lambda x: len(x))
for key in keys:
value = self.default_value_dict[key]
if type(value).__name__ in ["int", "float"]:
self.raw_expression = self.raw_expression.replace(key, "%s" % (self.default_value_dict[key]))
else:
self.raw_expression = self.raw_expression.replace(key, "\"%s\"" % (self.default_value_dict[key]))
return self.compiled_expression
def prepare_criterion(self, criterion):
criterion_str = criterion.__str__()
if criterion_str in boolean_expression_str_memory:
criterion_str = boolean_expression_str_memory[criterion_str]
else:
prev_criterion_str = criterion_str
subs = {
" = ": " == ",
# ":": "",
"\"": "",
"IN": " in ",
"IS": " is ",
"NOT": " not ",
"NULL": "None",
"(": "[",
")": "]"
}
compiled = re.compile('|'.join(map(re.escape, subs)))
criterion_str = compiled.sub(lambda x: subs[x.group(0)], criterion_str)
for sub in self.variable_substitution_dict:
criterion_str = criterion_str.replace(sub, self.variable_substitution_dict[sub])
# handle regex
if "REGEXP" in criterion_str:
tab = criterion_str.split("REGEXP")
a = tab[0]
b = tab[1]
criterion_str = ("""__import__('re').search(%s, %s) is not None\n""" % (b, a))
if "LIKE" in criterion_str:
left = criterion_str.split("LIKE")[0]
right = criterion_str.split("LIKE")[1]
criterion_str = "(%s in %s) or (%s in %s)" % (left, right, right, left)
boolean_expression_str_memory[prev_criterion_str] = criterion_str
return criterion_str
def evaluate(self, value, additional_parameters={}):
orig_value = value
# construct a dict with the values involved in the expression
values_dict = {}
if type(value) is not dict:
for key in value.keys():
try:
s = LazyDictionnary(**value[value.keys().index(key)])
values_dict[key] = s
except Exception as e:
print("[BUG] evaluation failed: %s -> %s" % (key, value))
# return False
else:
values_dict = value
for key in self.default_value_dict:
values_dict[key] = self.default_value_dict[key]
final_values_dict = {}
for key in values_dict.keys():
value = values_dict[key]
if key.startswith("id_"):
value = int(value)
final_values_dict[key] = value
for key in values_dict:
if key in self.variable_substitution_dict:
value = values_dict[key]
if key.startswith("id_"):
value = int(value)
final_values_dict[self.variable_substitution_dict[key]] = value
for expression in self.exps:
if type(expression) is BinaryExpression:
expression_parts = [expression.right, expression.left]
for expression_part in expression_parts:
if hasattr(expression_part, "default") and expression_part.default is not None:
key = str(expression_part).split(".")[0]
attr = str(expression_part).split(".")[1]
if getattr(final_values_dict[key], attr, None) is None:
value = expression_part.default.arg
setattr(final_values_dict[key], attr, value)
second_final_values_dict = {}
for key in additional_parameters:
value = LazyDictionnary(**additional_parameters[key])
second_final_values_dict[key] = value
for key in final_values_dict:
second_final_values_dict[key] = final_values_dict[key]
try:
result = eval(self.compiled_expression, second_final_values_dict)
except:
import traceback
traceback.print_exc()
if self.operator == "NORMAL":
return False
for exp in self.exps:
if exp.evaluate(orig_value):
if self.operator in ["or"]:
return True
else:
if self.operator in ["and"]:
return False
if self.operator in ["NORMAL", "or"]:
return False
else:
return True
pass
return result
def __repr__(self):
if self.operator == "NORMAL":
return str(self.raw_expression)
else:
op = " %s ".lower() % (self.operator)
return "(%s)" % (op.join(map(lambda x: str(x), self.exps)))
class JoiningBooleanExpression(BooleanExpression):
def __init__(self, operator, *exps):
BooleanExpression.__init__(self, operator, *exps)
self.is_joining_expression = True
|
Winter is personally my least favorite time of year – it’s cold, it’s depressing, there’s barely any sun out. But one thing that even I have to admit is that winter landscapes make for positively fantastic photographs – if you know how to approach them. There are a couple of issues that even amateur photographers will notice when they take their camera out in December with the goal of photographing a beautiful snowy landscape. The lack of contrast, the awkward low angle of the sun, and often the inability to find anything that stands out – primarily since everything is covered with snow.
If you want to get some fantastic snow shots in, I have some advice that might come in handy.
Due to the lack of contrast, it’s sometimes difficult to find a colorful element to stand out in the photo. It might seem really counterintuitive to overexpose an image when there’s sunlight bouncing off all that snow, but it does help the photo gain new life to a certain extent. I’m not talking crazy overexposure, just a touch to keep the photo from seeming overly gray and non-colorful. The highlights are actually a good point of reference for this – if they’re clipped then you’ve gone overboard and you need to turn it down a notch or two.
Choosing the right white balance can make all the difference in photographing winter landscapes. It doesn’t matter if you do it on-site or in post-processing, but winter photos will generally benefit from a cool white balance. The shadows in winter are naturally cool, so it’s better to overemphasize this with the adequate balance instead of trying to capture something warm. Not only will this reinforce the actual mood of the photograph, but it will also make it seem much more realistic with very little additional effort. It’s definitely a move you don’t want to skip, even if it’s just in post-processing.
As already mentioned, it’s sometimes difficult to find adequate contrast when everything around you is completely white and covered with snow. It can be a challenge to find an element that can stick out in the photograph, but the good news is that as soon as you find anything it will stand out perfectly. Whether it’s a lake, a house or just about any piece of scenery, it is instantly recognized by the eye so you don’t have to do any additional work to make it interesting. Don’t settle for photos without contrast because they will seem a bit boring – but remember that as soon as you find any, it’s probably all that you need.
The final problem that makes taking winter landscape photos a challenge is the focus, or rather, your camera’s inability to obtain autofocus under certain conditions. This is also partly due to the issue of low contrast. The camera lens practically works like a human eye, and if you’ve ever been exposed to snow after being in a dark room for a certain amount of time you know how hard it is to see anything (this is actually a condition known as snow blindness). There’s so much light bouncing off the snow that the camera lens has no idea what to favor. This is why it’s best to keep your camera on manual focus and adjust as needed because otherwise, the lens will just probably focus on the falling snowflakes instead.
As you can see, there’s more to taking winter photos than necessarily meets the eye. Of course, there are plenty more things to be said about winter photography, but I have a feeling that these few practical tips will be enough for now.
To sum up, remember to find contrast wherever you can, overexpose the image if necessary, always use manual focus and favor a cool white balance. Taking winter landscape photos is challenging but very rewarding if you do it right, and with enough practice, I’m sure you’ll be able to nail it.
For more on landscape photography, take a look at this link.
Alex Schult has founded PhotographyTalk in 2009, and has been featured in Forbes, Inc. Magazine, Huffington Post, and many more. The site is aimed at educating people about photography and inspiring their work.
|
#!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
from shinken_test import *
class TestConfig(ShinkenTest):
#setUp is in shinken_test
def setUp(self):
self.setup_with_file('etc/nagios_bad_timeperiods.cfg')
#Change ME :)
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the bad timeperiod"
tp = self.sched.timeperiods.find_by_name("24x7")
self.assert_(tp.is_correct() == False)
if __name__ == '__main__':
unittest.main()
|
Read more about Ken Schmidt here.
Read more about Alex Hunter here.
Read more about Molly Fletcher here.
Read more about Rob O'Neill here.
Read more about Ryan Estis here.
Read more about Crystal Washington here.
Read more about Josh Linkner here.
Read more about Chris Voss here.
Read more about Adam Sharp here.
If you’re interested in hearing more about these speakers, or would like additional recommendations in any specific topic area, please contact us or chat with us now.
|
import sys, os, random, constants
from pycmef.event_handler import returns_string, returns_dictionary
from pygaze import libscreen
from pygaze import libtime
from pygaze import liblog
from pygaze import libinput
from pygaze import eyetracker
class PygazeEyetracker:
def __init__(self):
# create display object
self.disp = libscreen.Display()
# create eyetracker object
self.tracker = eyetracker.EyeTracker(self.disp)
# create keyboard object
self.keyboard = libinput.Keyboard(keylist=['space'], timeout=None)
# create logfile object
self.log = liblog.Logfile()
def register(self, event_manager):
event_manager.register_events({
'calibrate_eyetracker': self.calibrate_eyetracker,
'start_eyetracker': self.start_tracking,
'stop_eyetracker': self.stop_tracking,
'log_to_eyetracker': self.log_to_eyetracker
})
@returns_string
def calibrate_eyetracker(self, args):
# calibrate eye tracker
self.tracker.calibrate()
return ""
@returns_string
def start_tracking(self, args):
self.tracker.start_recording()
return ""
@returns_string
def stop_tracking(self, args):
self.tracker.stop_recording()
return ""
@returns_string
def log_to_eyetracker(self, args):
self.tracker.log(args.message)
return ""
|
Summer and summer vacations are coming to an end but many still have travel plans in the near future. October is one of the best months for traveling and of course, there’s the holidays, and that means travel for many. Unfortunately, with travel often comes illness. What is it about traveling that sends our bodies into fits? You’re about to find out and what you can do to minimize your risks of getting sick the next time you travel.
When you travel you often experience both psychological and physiological stress. Both types work to suppress your immune system leaving you vulnerable to getting sick. Besides leaving you vulnerable to infection, stress can also weaken the immune system causing flare-ups and worsen symptoms for those with chronic health issues.
When we travel we’re exposed to more people. More people means more exposure to infectious diseases. And with the global exposure of travel, we are also exposed to new diseases our bodies lack immunity to. Bad bugs travel by air and settle on the surfaces around us. We touch the surface, pick-up the microbes and then introduce them into our bodies by touching our face or by ingestion.
Besides being exposed to disease-causing bacteria we are also exposed to new bacteria that then inhabit our digestive track. When we travel our gut microbiota changes and this can significantly influence our health. Learn more about the gut microbiota.
Chill out – Give yourself extra time and relax. Getting stressed out while traveling only works against your body by suppressing the immune system. Late flight, unfriendly cabbie, long lines…go with the flow and take a deep breath.
Sleep in – Jet lag is an immune killer. Throwing off your internal body clock disrupts the immune system making you more susceptible to illness. Allow yourself and your body the extra time it needs to adjust to any time changes. This includes giving yourself time when returning home.
Get vaccinated – Most people when traveling to foreign countries get vaccinated, and that’s a good thing. But even if you’re traveling closer to home you should be current on all of your vaccinations, including your current flu and pneumonia vaccines. While it may not be flu season in your country, those traveling may be coming from regions where flu season is in full swing, giving you an early exposure to the flu.
Wash your hands – Wash your hands, a lot! Ditch the antimicrobial hand sanitizer and opt for a good soap and water washing. While hand sanitizers may kill “99.9%” of bacteria, it’s often the viruses that are getting you sick. Washing with soap and water will rid your hands of more bacteria and viruses. And while you’re at it, keep your hands away from your face!
Eat some yogurt – Traveling can change your gut microbiota and this in turn influences your immune system. Keep your gut bacteria happy by eating some yogurt or other foods rich in probiotics.
Eat healthy – I know, you’re on vacation and you want to indulge. That’s fine but make an effort to get your fresh fruits and veggies in as well. Your immune system needs those phytochemicals, antioxidants, and micronutrients to stay strong. In addition, your gut microbiota needs the fiber in plant-based foods (prebiotics) to stay healthy.
Keep your distance – Traveling will expose you to lots and lots of other people and there’s not much you can do about it. However, you can keep your distance. Airborne bacteria and viruses can spread easily through coughing and sneezes so keep your distance. No need to be rude to others just prudent.
Get some exercise – Traveling can often have you sitting for long periods and guess what? That negatively impacts your immune system. Moving often helps keep your immune system fluid (the lymph fluid) moving and the immune cells active. This improves immunity and helps prevent illness.
Take along your medications – When traveling it’s a good idea to bring along ALL of your medications. Stress can exacerbate the symptoms of chronic conditions and even bring on flare-ups. So even if you aren’t currently experiencing symptoms but have medication for a specific condition, bring it with you. You’ll feel better faster if you don’t have to go in search of medical help for a condition you can manage with the right medications.
Watch what you eat – You probably already know about watching what you eat depending upon where you’re going. Water and foods washed in that same water can pose a health threat depending on the source. Food preparation hygiene can also be lacking in some places and this can put you at risk for “food poisoning” and viral infections like hepatitis and the noro-virus. Eat smart, be selective.
You may not be able to avoid all illness when you travel but by following these simple tips, you should significantly decrease your risks. Wherever your travels take you…stay well!
Tobi Schmidt, PhD is a Personal Health Advisor, a passionate advocate of empowering personal health through lifestyle choices.
5 Common Immune Health Myths, DEBUNKED!
© Copyright Dr. Tobi Schmidt LLC. All Rights Reserved.
|
import os
from ...enums import CrashInputType
from .chess_exploit import CHESSExploit
class CHESSExploitControl(CHESSExploit):
"""
An exploit that crashes with a controlled instruction pointer and register
"""
def __init__(self, *args, registers=None, **kwargs):
if registers is None:
raise TypeError("Need to specify registers")
super().__init__(*args, **kwargs)
self.registers = registers
def _write_script(self, **kwargs):
if self.crash.input_type in (CrashInputType.STDIN, CrashInputType.POV_FILE):
actions = self._script_get_actions_stdin()
elif self.crash.input_type == CrashInputType.TCP:
actions = self._script_get_actions_tcp()
else:
raise NotImplementedError("Unsupported crash input type %s." % self.crash.input_type)
body = os.linesep.join(actions)
preamble = """
import sys
import time
import nclib
if len(sys.argv) < 3:
print("%s: <host> <port>" % sys.argv[0])
sys.exit(1)
r = nclib.Netcat((sys.argv[1], int(sys.argv[2])), udp=False)
"""
tail = "\nr.recvall()\n"
for reg in self.registers:
tail += 'print("REGISTER_%s=%X")\n' % (reg.upper(), self.registers[reg])
return preamble + body + tail
|
is included with each item.
lasting up to twice as long!
is most primal and exerts surprising influence over our thoughts, emotions, and energy.
|
'''
Created on Aug 4, 2014
@author: lijun
'''
import unittest
import threading
import time
from socket import *
from engine import *
from engine.DataCenter import *
from engine.LogAdapter import *
from data_source.UdpMonitor import *
class udp_send(threading.Thread):
def __init__(self,_strIP,_nPort):
self.bExit = False
self.threadid= 0
self.nPort = _nPort
self.strIP = _strIP
threading.Thread.__init__(self)
def run(self):
while self.bExit == False:
self.SendData(" haha this is a nice tes t")
time.sleep(2)
def start_send(self):
self.start()
self.bExit = False
def stop_send(self):
self.bExit = True
def reTryConnectUdp(self):
try:
self.udpsocket = socket(AF_INET, SOCK_DGRAM)
self.address = (self.strIP,self.nPort)
'self.udpsocket.bind(self.address)'
self.net_status = True
except Exception,e:
self.net_status = False
print e
def SendData(self,_data):
error = False
try:
self.udpsocket.sendto( _data ,self.address)
except Exception,e:
print e
error = True
if error == True:
self.net_status = False
self.reTryConnectUdp()
def test( _data):
print "==>"+_data
class Test(unittest.TestCase):
def setUp(self):
self.udpsender = udp_send("127.0.0.1",45232)
self.udpsender.start_send()
def tearDown(self):
self.udpsender.stop_send()
def testDataCenter(self):
self.DataCenterObject = DataCenter.DataCenter()
object_src = UdpMonitor(3000,"192.168.11.5")
self.DataCenterObject.Register("UDP", object_src)
self.DataCenterObject.Subcribe("UDP", "test", test)
self.DataCenterObject.Start()
while True:
time.sleep(60)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
A voyage to the Cape of Good Hope, towards the Antarctic polar circle, and round the world: but chiefly into the country of the Hottentots and Caffres, from the year 1772, to 1776. By Andrew Sparrman, ... Translated from the Swedish original. With plates. In two volumes. ..
Voyages and travels, Travel, Accessible book, Description and travel, Early works to 1800, Voyages around the world, Birds, Cape of Good Hope (South Africa) -- Description and travel, Catalogs and collections, Cook, James, -- 1728-1779., Discovery and exploration, Pictorial works, Resolution (Ship), Voyages and travels.
Are you sure you want to remove Anders Sparrman from your list?
|
#!/usr/bin/env python3
# © 2015-19 James R. Barlow: github.com/jbarlow83
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import os
import signal
import sys
from multiprocessing import set_start_method
from ocrmypdf import __version__
from ocrmypdf._plugin_manager import get_parser_options_plugins
from ocrmypdf._sync import run_pipeline
from ocrmypdf._validation import check_closed_streams, check_options
from ocrmypdf.api import Verbosity, configure_logging
from ocrmypdf.exceptions import (
BadArgsError,
ExitCode,
InputFileError,
MissingDependencyError,
)
log = logging.getLogger('ocrmypdf')
def sigbus(*args):
raise InputFileError("Lost access to the input file")
def run(args=None):
_parser, options, plugin_manager = get_parser_options_plugins(args=args)
if not check_closed_streams(options):
return ExitCode.bad_args
if hasattr(os, 'nice'):
os.nice(5)
verbosity = options.verbose
if not os.isatty(sys.stderr.fileno()):
options.progress_bar = False
if options.quiet:
verbosity = Verbosity.quiet
options.progress_bar = False
configure_logging(
verbosity,
progress_bar_friendly=options.progress_bar,
manage_root_logger=True,
plugin_manager=plugin_manager,
)
log.debug('ocrmypdf %s', __version__)
try:
check_options(options, plugin_manager)
except ValueError as e:
log.error(e)
return ExitCode.bad_args
except BadArgsError as e:
log.error(e)
return e.exit_code
except MissingDependencyError as e:
log.error(e)
return ExitCode.missing_dependency
if hasattr(signal, 'SIGBUS'):
signal.signal(signal.SIGBUS, sigbus)
result = run_pipeline(options=options, plugin_manager=plugin_manager)
return result
if __name__ == '__main__':
if sys.platform == 'darwin' and sys.version_info < (3, 8):
set_start_method('spawn') # see python bpo-33725
sys.exit(run())
|
Plus that time he tried to surprise her at Fashion Week.
Zayn Malik and Gigi Hadid are a ridiculously cute couple. In a new interview with The Sunday Times Style, Zayn only cemented their adorable couple reputation by divulging their nicknames.
"I call her Gee, she calls me Zee," he revealed. "There's some other nicknames too, but I'll keep those private." Of course, now we're just very intrigued by the super secret nicknames.
He also revealed that, while Gigi hasn't been to his hometown of Bradford yet, she has met his family in London "a few times."
And even though Zayn and Gigi have been dating for well over a year, he still works to keep the romance alive. Earlier this month, Zayn tried to surprise Gigi in Paris while she was there for Fashion Week, E! Online reports.
"She didn't know I was coming," he said. "I went up to the suite to knock on the door…But my number had changed to European on her phone, so it wasn't much of a surprise in the end. She played along with it, though."
Well, it's the thought that counts.
|
# $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Diamatic dome
"""
from __future__ import print_function
_status = 'checked'
_level = 'beginner'
_topics = ['structure','domes']
_techniques = ['color']
from gui.draw import *
def run():
wireframe()
u = 3. # modular length
n = 6 # number of modules in one sector
r = 36. # radius of the dome
# Topology for 1 sector
T = Formex('l:164',3).replic2(n,n,1,1,0,1,0,-1)
# 4 sectors
m = 4
angle = 360./m
# circulize sector
D = T.scale(u).circulize(angle)
D = D.mapd(2,lambda d:sqrt(r**2-d**2),[0,0,0],[0,1])
dome1=D.rosette(m,angle)
clear()
draw(dome1)
# 6 sectors
m = 6
angle = 360./m
a = sqrt(3.)/2
D = T.shear(0,1,0.5).scale([1,a,1])
#D = T.replic2(n,n,1,a,0,1,0.5,-1)
D = D.scale(u).circulize(angle)
D = D.mapd(2,lambda d:sqrt(r**2-d**2),[0,0,0],[0,1])
dome2=D.rosette(m,angle)
clear()
draw(dome2)
# 8 sectors
m = 8
angle = 360./m
a = sqrt(2.)/2
T = Formex([[[0,0],[1,0]],[[1,0],[a,a]],[[a,a],[0,0]]],3)
D = T.replic2(n,n,1,a,0,1,a,-1)
# circulize sector
D = D.scale(u).circulize(angle)
D = D.mapd(2,lambda d:sqrt(r**2-d**2),[0,0,0],[0,1])
dome3=D.rosette(m,angle)
clear()
draw(dome3)
# circulize1
m = 6
angle = 360./m
T = Formex('l:127',3)
D = T.replic2(n,n,1,1,0,1,1,-1)
D = D.scale(u).circulize1()
D = D.mapd(2,lambda d:sqrt(r**2-d**2),[0,0,0],[0,1])
dome4=D.rosette(m,angle)
clear()
draw(dome4)
clear()
dome4.setProp(1)
draw(dome2+dome4)
clear()
d=1.1*r
draw(dome1+dome2.translate([d,0,0])+dome3.translate([0,d,0])+dome4.translate([d,d,0]))
if __name__ == 'draw':
run()
# End
|
I WOULD LIKE TO test ALSO SECONDARY CONJUGATE to APC FOR THIS NKx2.1 ASSY. Which SECONDARY DO YOU RECOMMEND? because we will have some reporter cell lines that express eGFP.
Click here (or use the following: https://www.abcam.com/index.html?datasheet=130805).
|
# coding: utf8
u""" This module translates national characters into similar
sounding latin characters (transliteration).
At the moment, Czech, Greek, Latvian, Polish, Turkish, Russian, Ukrainian
and Kazakh alphabets are supported (it covers 99% of needs).
Python 3:
>>> from trans import trans
>>> trans('Привет, Мир!')
Python 2:
>>> import trans
>>> u'Привет, Мир!'.encode('trans')
u'Privet, Mir!'
>>> trans.trans(u'Привет, Мир!')
u'Privet, Mir!'
Source and full documentations can be found here:
https://github.com/zzzsochi/trans
"""
import sys
import codecs
__version__ = '2.1.0'
__author__ = 'Zelenyak Aleksander aka ZZZ <zzz.sochi@gmail.com>'
PY2 = sys.version_info[0] == 2
class Trans(object):
""" Main class for transliteration with tables.
"""
def __init__(self, tables=None, default_table=None):
self.tables = tables or {}
self.default_table = default_table
def __call__(self, input, table=None):
""" Translate unicode string, using 'table'.
Table may be tuple (diphthongs, other), dict (other) or string name of table.
"""
if table is None:
if self.default_table is not None:
table = self.default_table
else:
raise ValueError('Table not set.')
if not isinstance(input, unicode if PY2 else str): # noqa
raise TypeError(
'trans codec support only unicode string, {0!r} given.'.format(type(input))
)
if isinstance(table, basestring if PY2 else str): # noqa
try:
table = self.tables[table]
except KeyError:
raise ValueError(u'Table "{0}" not found in tables!'.format(table))
if isinstance(table, dict):
table = ({}, table)
first = input
for diphthong, value in table[0].items():
first = first.replace(diphthong, value)
default = table[1].get(None, u'_')
second = u''
for char in first:
second += table[1].get(char, default)
return second
latin = {
u'à': u'a', u'á': u'a', u'â': u'a', u'ã': u'a', u'ä': u'a', u'å': u'a',
u'æ': u'ae', u'ç': u'c', u'è': u'e', u'é': u'e', u'ê': u'e', u'ë': u'e',
u'ì': u'i', u'í': u'i', u'î': u'i', u'ï': u'i', u'ð': u'd', u'ñ': u'n',
u'ò': u'o', u'ó': u'o', u'ô': u'o', u'õ': u'o', u'ö': u'o', u'ő': u'o',
u'ø': u'o', u'ù': u'u', u'ú': u'u', u'û': u'u', u'ü': u'u', u'ű': u'u',
u'ý': u'y', u'þ': u'th', u'ÿ': u'y',
u'À': u'A', u'Á': u'A', u'Â': u'A', u'Ã': u'A', u'Ä': u'A', u'Å': u'A',
u'Æ': u'AE', u'Ç': u'C', u'È': u'E', u'É': u'E', u'Ê': u'E', u'Ë': u'E',
u'Ì': u'I', u'Í': u'I', u'Î': u'I', u'Ï': u'I', u'Ð': u'D', u'Ñ': u'N',
u'Ò': u'O', u'Ó': u'O', u'Ô': u'O', u'Õ': u'O', u'Ö': u'O', u'Ő': u'O',
u'Ø': u'O', u'Ù': u'U', u'Ú': u'U', u'Û': u'U', u'Ü': u'U', u'Ű': u'U',
u'Ý': u'Y', u'Þ': u'TH', u'ß': u'ss',
}
greek = {
u'α': u'a', u'β': u'b', u'γ': u'g', u'δ': u'd', u'ε': u'e', u'ζ': u'z',
u'η': u'h', u'θ': u'8', u'ι': u'i', u'κ': u'k', u'λ': u'l', u'μ': u'm',
u'ν': u'n', u'ξ': u'3', u'ο': u'o', u'π': u'p', u'ρ': u'r', u'σ': u's',
u'τ': u't', u'υ': u'y', u'φ': u'f', u'χ': u'x', u'ψ': u'ps', u'ω': u'w',
u'ά': u'a', u'έ': u'e', u'ί': u'i', u'ό': u'o', u'ύ': u'y', u'ή': u'h',
u'ώ': u'w', u'ς': u's', u'ϊ': u'i', u'ΰ': u'y', u'ϋ': u'y', u'ΐ': u'i',
u'Α': u'A', u'Β': u'B', u'Γ': u'G', u'Δ': u'D', u'Ε': u'E', u'Ζ': u'Z',
u'Η': u'H', u'Θ': u'8', u'Ι': u'I', u'Κ': u'K', u'Λ': u'L', u'Μ': u'M',
u'Ν': u'N', u'Ξ': u'3', u'Ο': u'O', u'Π': u'P', u'Ρ': u'R', u'Σ': u'S',
u'Τ': u'T', u'Υ': u'Y', u'Φ': u'F', u'Χ': u'X', u'Ψ': u'PS', u'Ω': u'W',
u'Ά': u'A', u'Έ': u'E', u'Ί': u'I', u'Ό': u'O', u'Ύ': u'Y', u'Ή': u'H',
u'Ώ': u'W', u'Ϊ': u'I', u'Ϋ': u'Y',
}
turkish = {
u'ş': u's', u'Ş': u'S', u'ı': u'i', u'İ': u'I', u'ç': u'c', u'Ç': u'C',
u'ü': u'u', u'Ü': u'U', u'ö': u'o', u'Ö': u'O', u'ğ': u'g', u'Ğ': u'G'
}
russian = (
{
u'юй': u'yuy', u'ей': u'yay',
u'Юй': u'Yuy', u'Ей': u'Yay'
},
{
u'а': u'a', u'б': u'b', u'в': u'v', u'г': u'g', u'д': u'd', u'е': u'e',
u'ё': u'yo', u'ж': u'zh', u'з': u'z', u'и': u'i', u'й': u'y', u'к': u'k',
u'л': u'l', u'м': u'm', u'н': u'n', u'о': u'o', u'п': u'p', u'р': u'r',
u'с': u's', u'т': u't', u'у': u'u', u'ф': u'f', u'х': u'h', u'ц': u'c',
u'ч': u'ch', u'ш': u'sh', u'щ': u'sh', u'ъ': u'', u'ы': u'y', u'ь': u'',
u'э': u'e', u'ю': u'yu', u'я': u'ya',
u'А': u'A', u'Б': u'B', u'В': u'V', u'Г': u'G', u'Д': u'D', u'Е': u'E',
u'Ё': u'Yo', u'Ж': u'Zh', u'З': u'Z', u'И': u'I', u'Й': u'Y', u'К': u'K',
u'Л': u'L', u'М': u'M', u'Н': u'N', u'О': u'O', u'П': u'P', u'Р': u'R',
u'С': u'S', u'Т': u'T', u'У': u'U', u'Ф': u'F', u'Х': u'H', u'Ц': u'C',
u'Ч': u'Ch', u'Ш': u'Sh', u'Щ': u'Sh', u'Ъ': u'', u'Ы': u'Y', u'Ь': u'',
u'Э': u'E', u'Ю': u'Yu', u'Я': u'Ya',
})
ukrainian = (russian[0].copy(), {
u'Є': u'Ye', u'І': u'I', u'Ї': u'Yi', u'Ґ': u'G',
u'є': u'ye', u'і': u'i', u'ї': u'yi', u'ґ': u'g',
})
ukrainian[1].update(russian[1])
czech = {
u'č': u'c', u'ď': u'd', u'ě': u'e', u'ň': u'n', u'ř': u'r', u'š': u's',
u'ť': u't', u'ů': u'u', u'ž': u'z',
u'Č': u'C', u'Ď': u'D', u'Ě': u'E', u'Ň': u'N', u'Ř': u'R', u'Š': u'S',
u'Ť': u'T', u'Ů': u'U', u'Ž': u'Z',
}
polish = {
u'ą': u'a', u'ć': u'c', u'ę': u'e', u'ł': u'l', u'ń': u'n', u'ó': u'o',
u'ś': u's', u'ź': u'z', u'ż': u'z',
u'Ą': u'A', u'Ć': u'C', u'Ę': u'E', u'Ł': u'L', u'Ń': u'N', u'Ó': u'O',
u'Ś': u'S', u'Ź': u'Z', u'Ż': u'Z',
}
latvian = {
u'ā': u'a', u'č': u'c', u'ē': u'e', u'ģ': u'g', u'ī': u'i', u'ķ': u'k',
u'ļ': u'l', u'ņ': u'n', u'š': u's', u'ū': u'u', u'ž': u'z',
u'Ā': u'A', u'Č': u'C', u'Ē': u'E', u'Ģ': u'G', u'Ī': u'i', u'Ķ': u'k',
u'Ļ': u'L', u'Ņ': u'N', u'Š': u'S', u'Ū': u'u', u'Ž': u'Z',
}
kazakh = (russian[0].copy(), {
u'ә': u'a', u'ғ': u'g', u'қ': u'k', u'ң': 'n', u'ө': u'o', u'ұ': u'u',
u'ү': u'u', u'һ': u'h', u'і': u'i',
u'Ә': u'A', u'Ғ': u'G', u'Қ': u'K', u'Ң': 'N', u'Ө': u'O', u'Ұ': u'U',
u'Ү': u'U', u'Һ': u'H', u'І': u'I',
})
kazakh[1].update(russian[1])
farsi = {
u'ا': u'a',
u'أ': u'a', u'\uFE81': u'a', u'\uFE82': u'a',
u'آ': u'a', u'\uFE83': u'a', u'\uFE84': u'a',
u'ب': u'b', u'\uFE8F': u'b', u'\uFE90': u'b', u'\uFE92': u'b', u'\uFE91': u'b',
u'ت': u't', u'\uFE95': u't', u'\uFE96': u't', u'\uFE98': u't', u'\uFE97': u't',
u'ث': u'th', u'\uFE99': u'th', u'\uFE9A': u'th', u'\uFE9C': u'th', u'\uFE9B': u'th',
u'ج': u'j', u'\uFE9D': u'j', u'\uFE9E': u'j', u'\uFEA0': u'j', u'\uFE9F': u'j',
u'ح': u'h', u'\uFEA1': u'h', u'\uFEA2': u'h', u'\uFEA4': u'h', u'\uFEA3': u'h',
u'خ': u'x', u'\uFEA5': u'x', u'\uFEA6': u'x', u'\uFEA8': u'x', u'\uFEA7': u'x',
u'د': u'd', u'\uFEA9': u'd', u'\uFEAA': u'd',
u'ذ': u'd', u'\uFEAB': u'd', u'\uFEAC': u'd',
u'ر': u'r', u'\uFEAD': u'r', u'\uFEAE': u'r',
u'ز': u'z', u'\uFEAF': u'z', u'\uFEB0': u'z',
u'س': u's', u'\uFEB1': u's', u'\uFEB2': u's', u'\uFEB4': u's', u'\uFEB3 ': u's',
u'ش': u'sh', u'\uFEB5': u'sh', u'\uFEB6': u'sh', u'\uFEB8': u'sh', u'\uFEB7': u'sh',
u'ص': u's', u'\uFEB9': u's', u'\uFEBA': u's', u'\uFEBC': u's', u'\uFEBB': u's',
u'ض': u'd', u'\uFEBD': u'd', u'\uFEBE': u'd', u'\uFEC0': u'd', u'\uFEBF': u'd',
u'ط': u't', u'\uFEC1': u't', u'\uFEC2': u't', u'\uFEC4': u't', u'\uFEC3': u't',
u'ظ': u'z', u'\uFEC5': u'z', u'\uFEC6': u'z', u'\uFEC8': u'z', u'\uFEC7': u'z',
u'ع': u'ao', u'\uFEC9': u'ao', u'\uFECA': u'ao', u'\uFECC': u'ao', u'\uFECB': u'ao',
u'غ': u'za', u'\uFECD': u'za', u'\uFECE': u'za', u'\uFED0': u'za', u'\uFECF': u'za',
u'ف': u'f', u'\uFED1': u'f', u'\uFED2': u'f', u'\uFED4': u'f', u'\uFED3': u'f',
u'ق': u'q', u'\uFED5': u'q', u'\uFED6': u'q', u'\uFED8': u'q', u'\uFED7': u'q',
u'ك': u'k', u'\uFED9': u'k', u'\uFEDA': u'k', u'\uFEDC': u'k', u'\uFEDB': u'k',
u'ل': u'l', u'\uFEDD': u'l', u'\uFEDE': u'l', u'\uFEE0': u'l', u'\uFEDF': u'l',
u'م': u'm', u'\uFEE1': u'm', u'\uFEE2': u'm', u'\uFEE4': u'm', u'\uFEE3': u'm',
u'ن': u'n', u'\uFEE5': u'n', u'\uFEE6': u'n', u'\uFEE8': u'n', u'\uFEE7': u'n',
u'ه': u'h', u'\uFEE9': u'h', u'\uFEEA': u'h', u'\uFEEC': u'h', u'\uFEEB': u'h',
u'و': u'wa', u'\uFEED': u'wa', u'\uFEEE': u'wa',
u'ي': u'ya', u'\uFEF1': u'ya', u'\uFEF2': u'ya', u'\uFEF4': u'ya', u'\uFEF3': u'ya',
u'ة': u'at', u'\uFE93': u'at', u'\uFE94': u'at',
u'ى': u'a', u'\uFEEF': u'a', u'\uFEF0': u'a',
u'ی': u'ye', u'\uFBFC': u'ye', u'\uFBFD': u'ye', u'\uFBFE': u'ye', u'\uFBFF': u'ye',
# Arabic Sukun
u'\u064B': u'', u'\u064C': u'', u'\u064D': u'', u'\u064E': u'', u'\u064F': u'',
u'\u0650': u'', u'\u0651': u'', u'\u0652': u'', u'\u0653': u'', u'\u0670': u'',
# Arabic punctuation
u'،': u',', u'؍': u'.', u'؟': u'?', u'٭': u'★', u'؞': u'...', u'٬': u'\'', u'\u200C': u'',
}
ascii_str = (u'_0123456789'
u'abcdefghijklmnopqrstuvwxyz'
u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
u'!"#$%&\'()*+,_-./:;<=>?@[\\]^`{|}~ \t\n\r\x0b\x0c')
ascii = ({}, dict(zip(ascii_str, ascii_str)))
for t in [latin, greek, turkish, russian, ukrainian, czech, polish, latvian, kazakh, farsi]:
if isinstance(t, dict):
t = ({}, t)
ascii[0].update(t[0])
ascii[1].update(t[1])
del t
ascii[1][None] = u'_'
slug = (ascii[0].copy(), ascii[1].copy())
for c in u'''!"#$%&'()*+,_-./:;<=>?@[\\]^`{|}~ \t\n\r\x0b\x0c''':
del slug[1][c]
tables = {u'ascii': ascii, u'text': ascii, u'slug': slug, u'id': slug}
# Main Trans with default tales
# It uses for str.encode('trans')
trans = Trans(tables=tables, default_table='ascii')
# trans codec work only with python 2
if PY2:
def encode(input, errors='strict', table_name='ascii'):
try:
table = trans.tables[table_name]
except KeyError:
raise ValueError("Table {0!r} not found in tables!".format(table_name))
else:
data = trans(input, table)
return data, len(data)
def no_decode(input, errors='strict'):
raise TypeError("trans codec does not support decode.")
def trans_codec(enc):
if enc == 'trans':
return codecs.CodecInfo(encode, no_decode)
try:
enc_name, table_name = enc.split(u'/', 1)
except ValueError:
return None
if enc_name != 'trans':
return None
if table_name not in trans.tables:
raise ValueError(u"Table {0!r} not found in tables!").format(table_name)
return codecs.CodecInfo(lambda i, e='strict': encode(i, e, table_name), no_decode)
codecs.register(trans_codec)
|
Weighing 1.4kgs and made from quality steel, the Buckaroo Scaffold Hammer has been manufactured to suit strict requirements as demanded by the scaffolder's we have worked with during the design process. This is our improved design.
It features a non slip handle and can be used on most system scaffolds. It has a balanced solid head and tapered poster at the other.
Will fit the TMPH and TMPH frogs.
|
#/usr/bin/env python
from subprocess import call
from moviepy.editor import (
VideoFileClip,
CompositeVideoClip,
TextClip,
ImageClip,
concatenate
)
from numpy.testing import assert_approx_equal
from os import listdir
from os.path import expanduser, isfile, getsize
def process_video(filename, video_height=480, overwrite=False):
gif_name = 'gifs/' + filename + '.gif'
if isfile(gif_name) and overwrite == False:
print "Skipping " + gif_name + " as it already exists."
return
video_file = VideoFileClip(filename)
try:
assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
except:
print "Not resizing video."
video_file = video_file.resize(height=video_height)
end_image = video_file.to_ImageClip(0).set_duration(0.7)
video_file = concatenate([video_file, end_image])
logo_size = video_height/6
text = ImageClip(expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(video_file.duration).resize(width=logo_size).set_pos((video_file.w-logo_size,video_file.h-logo_size))
composite_video_file = CompositeVideoClip([video_file, text])
composite_video_file.write_gif(gif_name,fps=20)
fuzz_amt = 5
commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'
process = call(commands, shell=True)
if getsize(gif_name) > 5*1024**2:
process_video(filename, video_height=video_height*0.75, overwrite=True)
if __name__ == '__main__':
from multiprocessing import Pool
p = Pool(processes=4)
q = Pool(processes=4)
p.map(process_video, [x for x in listdir('.') if x.find('.mp4') != -1])
q.map(process_video, [x for x in listdir('.') if x.find('.mov') != -1])
# for filename in [x for x in listdir('.') if x.find('.mp4') != -1]:
# process_video(filename)
# for filename in [x for x in listdir('.') if x.find('.mov') != -1]:
# process_video(filename)
|
You have several projects on your plate already, and an e-mail arrives asking you to start another right away. You don’t want to say “no” to the Big Boss. What do you do?
I think there’s always a way to say “yes.” Start by guessing at the number of hours it would take you. If taking the project will not impact your ability to complete the others on time, go for it, obviously. If taking it will impact the other projects, say: “I will be glad to. Please tell me what priority this has compared to my other projects, and I will push back the deadline on the lower priorities by x hours,” with “x” being the hours you think the new project will take.
Imagine you are a judge handling labor issues in Italy. You can imagine that, right? Lawyers on both sides are pushing you to hold the first hearing on a new case. An oversight commission may hassle you if you don’t start the case within 60 days of your getting it. But you feel like you have a full load already. Do you go ahead and open the case?
The new study shows pretty clearly that you will slow down that new case, and all your other cases, if you do. It was conducted by three economists, Decio Coviello of the Univ. of Rome “Tor Vergata,” Andrea Ichino of the Univ. of Bologna, and Nicola Persico of New York Univ. It was published by the U.S. National Bureau of Economic Research (NBER).
You may be wondering why economists are looking at judge’s behaviors. Economics is defined on Dictionary.com as “the science that deals with the production, distribution, and consumption of goods and services…” Judging is a service produced. The study lists a number of other studies by economists showing problems caused by workers juggling tasks.
In my “Active Listening” class, I would report on work by neurologists on the subject. Brain scans show that when you think you are focusing equally on two things at once, what your brain is actually doing is switching so fast between one task and the other that you don’t consciously notice. But every time the switch happens, there is a period where the brain isn’t focused on either task. When I told a friend who works at Red Hat about this, Director of Certification Randy Russell, he said computer programmers call this “context switching.” They try to limit how much the computer does it, because it wastes time and energy. Ditto for our brains.
The NBER study measured the case closings of 31 judges over six years. The economists accounted for some factors that might influence how long it took to get a case closed, such as the judge’s experience, ability, quality of work, and level of effort. Of course it would take longer to close a more complicated case, but by law the cases are assigned in a completely random way, so case types should have been relatively the same for all judges.
The more cases on average a judge kept open at any one time, the longer it took him or her to complete any case on average, and the bigger the backlog the judge built up. “The judges who work on few cases at the same time, and try to close them quickly before opening new ones, succeed in closing a larger number of cases per quarter and in less time from the assignment” of the case to them, the authors write. “It is important to keep in mind that these differences emerge among judges of the same office, who work in exactly the same conditions, with the same secretarial assistance and with a very similar workload in terms of quantity and quality,” they add.
Managers should note that the 60-day rule proved counterproductive. It caused some judges to open cases before finishing other ones, increasing their backlog, which would eventually harm the chances of their making the deadline on later cases. This is measurable, real-world evidence for a practice psychology and project management experts have long said actually hurts the managers who use it. Telling people to “hurry up” instead of taking a measured look at their workloads only makes the situation worse.
The next time you have to do several tasks in a short period, figure out how long you have for each (allowing for interruptions), and do each, one at a time, for that length. For example, if you have to do three items in an hour, first turn off your e-mail program and silence the phone. The world will survive without you for an hour. Then do one task for 20 minutes and stop, and repeat for the next two. I promise you will get more done at a higher quality than you would otherwise.
Source: Coviello, D., A. Ichino, and N. Persico (2010), “Don’t Spread Yourself Too Thin: The Impact of Task Juggling on Workers’ Speed of Job Completion,” National Bureau of Economic Research, Working Paper 16502.
In developed nations, the events that trigger this reaction rarely require physical action or risk injury, however. Repeated exposure to these physical changes without the ability to respond physically is blamed for the well-documented connection between what business people call stress and higher disease and death rates.
But there’s a catch. “How the individual responds to those reactions determines whether they produce feelings of distress (a negative feeling) or produce feelings of eustress (a positive feeling),” Dr. Franken wrote in his textbook, Human Motivation. Jumping from an airplane creates the fight-or-flight response. If you jumped out on purpose, you enjoy the response. If the plane has stopped working, you probably don’t.
This distinction is important because the internal physical reaction is actually a two-step process. First the reaction needed to arouse greater attention and deal with an immediate threat kicks in. About 10 seconds later, secondary reactions that provide more long-term energy occur, and that response takes between 15 minutes and one hour to return to normal. The diseases associated with distress, to use the more precise term, appear to come mostly from this second phase.
There can be a third phase. If the distress remains long enough, the adrenal glands that create all of the hormones in the two-phase reactions can eventually collapse, “often the precursor to death…” Franken wrote.
What I see in this is a 10-second window of opportunity to control your internal response to a stress event, which fits perfectly with what I teach as the “S-R/S-R” model. An external stimulus (“S”) creates a response (“R”) in us, but it’s not like the classic response test where you cross your legs and the doctor hits below your kneecap with a rubber hammer, creating an automatic external reaction. The knee jerk is due to the “autonomic nervous system.” The nerve signal from the hammer only has to travel to your spinal cord, which sends back the message to move out of the way of the perceived danger to the leg. The brain only perceives the problem later, though this happens so fast we don’t realize there’s a gap.
Instead, the stress reaction it is an internal response which becomes an internal stimulus (“R/S”) for the external response. In the workplace, then, we have two opportunities to control our external responses to outside events like a harsh word from a co-worker. First, we can change our internal R such that it does not become an internal S. Second, if that doesn’t work—if we get angered by that harsh word—we can control the external R. That is, we can keep our trap shut until we calm down.
Now I know how long we have to do that before we hurt ourselves: 10 seconds, which is an eternity. Maybe that old anger-management advice about counting to 10 has a scientific basis after all.
Source: Franken, R. (1994), Human Motivation, 3rd ed. Brooks/Cole Publishing Co.: Pacific Grove, CA.
In each of these examples, I would want an autocratic leader, because you don’t have time for the group to debate orders. A slow reaction on the battlefield equals death. An American football game would last days if the team with the ball debated each play in the huddle, rather than taking orders from the coach or quarterback. Nor would members of either group question the need for that arrangement. Granted, these teams benefit from discussion when not in fighting mode: The American military is a leading funder of teamwork research. However, a study published in 2013 finds there are cases where an order-giving boss might be perfectly fine for team performance.
The researchers also asked each team’s upper manager (a level above the team leader) about overall performance. When they crunched the numbers, they found that on average, a team leader’s beliefs about obedience had zero correlation to team performance!
As found in other studies, better-performing teams tended to perceive higher levels of procedural justice. (We can’t judge which caused the other from this study.) When the leader and team members agreed on power distance, the team both saw the leader as fair and performed well. I should note that if they disagreed, it was better for the boss to be less autocratic than the team wanted, rather than too autocratic. To play it safe, err on the side of not giving orders where possible.
OCBs were related to better performance, but not the way the scientists expected. They thought the highest levels of OCBs would occur at the highest levels of perceived justice, but OCBs actually peaked at a medium level of justice. The scientists were not sure why. They speculate in a journal article it may be because a boss sometimes has to make decisions affecting individual team members whether or not the member (or boss) likes that. I can think of other cases where the team leader may act contrary to the level of order-giving both sides prefer. For instance, I have known people who just want to be told what to do, but an autocratic boss pressured to be a “leader” may ask for the team’s input anyway sometimes. In any of these cases, a team may recognize a high level of procedural justice yet not behave well when the manager goes against the preferred style.
This study is the first to look at this topic, and the subjects were mostly young, male workers in China, so we can’t claim this finding applies to everybody. But it should lead you to question sweeping generalizations made by consultants.
To be clear, I do not think this study is a license for autocrats. First, bosses tend to have warped views of what their teams really want. Most people are reluctant to criticize the boss, and managers like other humans suffer “confirmation bias,” the tendency to only see evidence that supports our opinions. Especially—but not only—in the case of the autocratic boss, the boss tends to think the team accepts their management style. You would need an anonymous survey of members like the one in this study to get an unbiased picture.
Also, since most business teams are not under gunfire or being tackled at work, we have to respect the powerful body of evidence proving teams that make their own decisions outperform those ordered around, in most cases. For interdependent workers that do not want empowerment, the answer is not to become more autocratic. Instead, educate the team on the benefits of empowerment and provide a structure that makes it less burdensome and scary, like that detailed in The SuddenTeams™ Program.
Source: Cole, M., M. Carter, and Z. Zhang (2013), “Leader–Team Congruence in Power Distance Values and Team Effectiveness: The Mediating Role of Procedural Justice Climate.” Journal of Applied Psychology, advance online publication, doi: 10.1037/a0034269.
Sometimes, dear reader, I wonder if I’m making your life harder. Many “obvious” or popular ideas about working with other people turn out to be more complicated than you thought. Some turn out to be false. And instead of letting you go on doing what worked well enough in the past, I tell you to change your ways if you want to be the best you can be.
With that worry expressed, I now have the duty to inform you that providing emotional support to a co-worker could make their depressed mood worse. For managers who recognize providing support is part of their gig or people who try to be good team players by providing it, this could get frustrating.
The culprits in this latest complication in your life are behavioral scientist Inbal Nahum-Shani of the Univ. of Michigan, organizational behaviorist Peter Bamberger of Tel Aviv Univ., and labor management professor Samuel Bacharach at Cornell Univ. Something they saw in previous studies apparently got them curious. One line of thought is that humans have an unwritten rule you should give as much emotional support as you get. When the exchange is out of balance, the over-giver feels resentment and the over-receiver feels guilty. A competing idea is that being an over-giver makes you feel important, like you matter. The journal article calls this the “mattering principle,” a phrase I love. Being an over-receiver, by contrast, makes one feels weak and inferior. Beneath both ideas is the concept of “conservation of resources,” meaning simply that we all need to keep some emotional reserves to get through life. There are studies to support each of these theories, but not overwhelming proof. Something more is going on.
A year later, the interviewers called again and re-asked the questions about the depression symptoms. This step sets this study apart from a lot of the ones you read about in the news. It makes it much more likely the emotional support levels caused any changes in depression symptoms. Too often people, including journalists, assume that because two factors viewed at one point in time seemed related, one change caused the other. I read a cute story recently about a statistics professor who pointed out that crickets chirp more when temperatures are warmer. A student asked if that meant rising temperatures cause more chirping, and the professor deadpanned that he thought cricket chirping caused temperatures to rise.
Gave about as much support as they got, higher overall levels of support at the time of the first interview decreased depression symptoms a year later, as you would expect.
Gave more support than they got, getting higher support had no effect one way or the other a year later.
Gave less support than they got, getting higher support increased depression symptoms.
Clearly it is better for all parties if people feel they are getting roughly as much emotional support as they are giving. Do not start parceling out your support based on how much you already give someone. Instead, Nahum-Shani, Bamberger, and Bacharach suggest “implementing employee support programs such as peer-based assistance programs” which would give people more opportunities to help others.
In a team setting, you could pair up people who seem to need more emotional support with others who need help in technical areas in which the first set have skills. Trading technical for emotional support might help the balance sheet. Also, when you recognize you are giving a lot of support to someone, find ways they can support you back. Perhaps you don’t really feel the need for someone’s ear when you hit a glitch in a project. But it might be worth asking for it anyway if you knew the other person had gone through the same thing. They, and your relationship, might benefit from their expressing sympathy for you.
Of course, there are people who will always demand more support than others. Almost everybody gets depressed on occasion, but science has identified genes that cause some people to be more susceptible. Unless you’re a doctor, you cannot cure them. In the U.S., it is probably illegal for a manager to even suggest that a worker might be one of those people or that they should get some help. Depression is covered under the Americans with Disabilities Act, so keep any conversations focused on harmful work behaviors, not possible causes.
Maybe you enjoy providing emotional support to others. Maybe you find it uncomfortable. Either way, human emotions are a major factor in the working world. They often are at the root of bias, bad decisions, conflicts, lost productivity, absences, and quitting. The best you can do, perhaps, is to keep your ears open for others when they need it. But sometimes, for their sake if not yours, you might need to open up about yourself.
Source: Nahum-Shani, I., P. Bamberger, and S. Bacharach (2011), “Social Support and Employee Well-Being: The Conditioning Effect of Perceived Patterns of Supportive Exchange,” Journal of Health and Social Behavior 52(1):123.
I call it the “Baby with Bathwater Syndrome” (BWBS). Many of the objections I have heard to empowering teams, especially against self-directed teams, come from some example the objector raises of a team or company where it didn’t work. Then I start asking questions. Did the teams create charters, and the most vital components thereof? Were they given measurable goals to accomplish, into which they had input? Were they given enough authority and resources to accomplish the tasks assigned, including people, training, equipment and supplies? Did they…? Did they…?
Most often the answer to each is, “I don’t know.” When the person does know, I can point out several or many things the managers did wrong in implementing empowerment. Either way, the objector had thrown the baby—empowerment, which is consistently shown in studies to do good things—out with the dirty bathwater of mistakes. Instead those mistakes could have provided information to help empower the objector’s teams correctly.
This topic’s research reflects one of the frequent objections I hear, that empowered teams end up arguing so much, nothing gets done. The assumption is that empowerment caused the arguing. The fact that many empowered teams do not result in gridlock illustrates that empowerment is not the direct cause of the arguing, but this study provides better evidence.
In the field study of 105 team leaders and 386 of their direct reports, leaders’ ratings of their team members’ behaviors replaced self-reported statements for the first two. All of the second study’s wordings were changed to refer to the worker’s job. The first study was small, more of a pilot test, so I will focus on the second. That said, both in the lab and in the field, team empowerment was linked to the level of connection members felt with the teams and their senses of personal empowerment. In both studies higher levels of all correlated with lower desire to quit their jobs, and personal empowerment or connection was linked to the likelihood of innovation and teamwork.
The correlation between empowered leadership and personal conflict showed that when one was higher, the other was lower (–0.35). We can’t say whether an empowered leadership style led to lower conflict, or lower conflict made leaders more likely to adopt an empowered style. But we can definitely say empowerment did not cause conflict. An empowered style also correlated with higher innovation and teamwork behaviors. Of course, teams with higher levels of conflict were less likely to enjoy the positive effects of empowerment.
Individuals who were more group-oriented than individual-oriented (more “collectivist”) were less influenced by the leader’s empowerment style. However, whether a team or leader was Chinese or American or mixed did not impact the data, and the various factors were related in the same ways. To repeat a theme from other topics in this hypertext, people are more alike than different.
I should note that the study’s emphasis was on the interactions between all of the variables mentioned here. Some of the findings are too preliminary to act on. The “actionable” information for you, because it agrees with earlier studies, is that conflict is usually lower in empowered teams but reduces the positive effects of empowerment when present. “Managers who encounter moderate to high levels of relationship conflict in their work teams should thus seek to mitigate its effects on employees by developing a cohesive and supportive team environment among team members,” the study article says.
BWBS has its origins in at least two biases that infect our rational thought, from a list I’ve mentioned before: “Illusory correlation,” the false belief that two factors are linked when they are not; and the “law of small numbers,” the incorrect assumption that personal experiences or examples you’ve heard reflect the way things usually work. People who have seen conflicts or other problems in empowered teams assume empowerment caused the problems, when in fact other factors independent of empowerment caused them. And, they assume that because the empowered teams they know about had problems, most empowered teams have problems. As this study illustrates, neither is true. Based on my personal experiences with empowered teams, I could have stated many years ago they usually outperform those where the bosses make most of the decisions. Still, I did research to confirm what I thought was true—research that proved me wrong on other things, I’ll add. To touch on another theme, the scientists dong this topic’s studies were forced by their data to admit they were wrong about some ideas they had going into the study.
This leads to another bias that is the biggest problem in implementing an empowered leadership style. “Information avoidance” is defined as, “People’s tendency to avoid information that might cause mental discomfort or dissonance.” Every manager I have met who thought he or she had empowered their team really had not. None had asked for objective help to question their belief.
Source: Chen, G., et al. (2011), “Motivating and Demotivating Forces in Teams: Cross-Level Influences of Empowering Leadership and Relationship Conflict,” Journal of Applied Psychology 96(3):541–557.
As you read articles about leadership, you soon come to understand that engaged workers are satisfied workers. If people are highly involved in their work, they like the job, right?
Probably, one study said. But wait—there’s more!
When first defined, scientists thought engagement was the opposite of burnout, and here we get the first reminder that assumptions can be wrong. Later research showed that greater workload demand increases engagement but lowers satisfaction, the authors report. You can be very engaged and end up burned out. This makes it reasonable to ask whether engagement and satisfaction are the same thing.
Alarcon and Lyons asked that by analyzing samples used in three previous studies. Two were of U.S. undergraduate students who also worked at least half-time. The third used responses from 394 full-time employees who had volunteered to be involved in online surveys. Each of these samples raises questions of how well the data applies to the average worker (its “external validity,” to use the scientific term). Obviously, college kids may be very different from all workers, and so might study volunteers with easy computer access and an interest in science, 74% white.
The researchers randomly combined responses from the three samples into two big ones and performed factor analysis on the results. Factor analysis tests whether answers tend to clump together in predictable ways. Alarcon and Lyons found that engagement, satisfaction, and AWL showed up as three different patterns.
An engaged nurse may still switch to a different hospital or department. An excellent software developer I know, a hard worker with unique skills on his team, left a good company abruptly. Dissatisfaction with a manager or pay, or those AWL factors above, might lead to a job change without any change in engagement.
The lesson is, just because everybody on your team works hard and voluntarily puts in long hours, that does not mean they are happy in their jobs. During the Great Recession a number of polls indicated that at least a third of U.S. workers intended to look for another job as soon as the economy turned around, while at the same time overall productivity (output per worker) was reaching new heights.
At the team level, do some serious workload planning and consider whether you really have the ideal number of people in the group. A project team I was working with got hit with a huge scope change. It spent six hours planning the next three months, with all members working late into the night. But the result was hard numbers allowing the team to go back to management and say, “We need help.” In this case the team leader was already working on it, an awareness which helps explain his team’s low turnover rate. The numbers still helped in the allocation of those new resources.
Source: Alarcon, G., and J. Lyons (2011), “The Relationship of Engagement and Job Satisfaction in Working Samples,” The Journal of Psychology 145(5):463.
If you’re a top manager wondering why you can’t get more output from your workers, you might look into whether their teams can’t coordinate as needed 33 percent of the time.
Standard operating procedures are enough for two teams who don’t have to talk to do their work in compatible ways.
Matrix organizations, those which do most of their work using project teams with members from different functional teams.
The U.S. Department of Defense wanted to know if managers usually match up the right method of coordination with the task at hand. The department funded business researchers Daniel Sherman of the Univ. of Alabama-Huntsville and Robert Keller of the Univ. of Houston to find out. The resulting study mentioned above and published in Organization Science might help you raise productivity.
One researcher spent 50 percent of his working time for four months observing the units like an anthropologist.
With this background, he talked to the four division managers who oversaw the teams and came to agreements with each about how much the teams ought to be coordinating.
The researchers interviewed each of the 20 unit managers to learn how much each thought his or her unit needed to coordinate with each other unit.
The researchers put together a panel of experts—two Ph.D.’s and two people with 25-plus years in the industry—who determined what the best methods of coordination would be based on descriptions of the units.
Interviews of the division and unit managers were used to reveal the actual coordination methods.
Questionnaires of the 327 unit employees checked the amount and quality of communication between each pair of employees (that’s 53,301 pairs!).
The results were analyzed for each possible pair of teams (190). The results were sobering. In one-third of the pairs, a team manager underestimated how often their team needed to coordinate their work with the other team. Because of this, they provided too weak a method of coordination about one-third of the time. But those are just averages. In the case of organizations that should have had the most complex methods, such as project teams, three out of four managers were wrong. A few managers in that case thought, in effect, their teams could coordinate well enough by just following SOPs! The managers almost always underestimated the coordination required, not overestimating.
I’ll restate the methods and findings as a story. Hector and Shirley, managers of the Data Entry Unit and System Architecture Unit respectively, think they only need to communicate personally to coordinate the teams. But a panel of experts would tell them they really need to create a task force with members from each team to optimize the work. Because they don’t, pairs of employees on the separate teams don’t exchange as much information as they should. And because of that, there is a gap between the amount of coordination help their division managers says the teams should have, and the amount Hector and Shirley give them. Perhaps if Shirley’s system architects better understood the problems Hector’s data entry folks ran into by watching them work, the architects would make programming decisions that led to more efficient operations. Instead, Hector and Shirley exchange e-mails and think everything is fine.
Sherman and Keller propose some possible reasons for the problem. These tie in perfectly with the topic on behavioral operations that emphasized the harmful role mental shortcuts play in management. This study provides more examples.
Say the organization installed new workflow control software. Before then, passing the work back and forth was difficult, so the unit managers (or their predecessors) set up the process to have Team A do all its work before handing off to Team B. With the new software, the teams could be working on different steps of the same process at the same time. Unfortunately, their managers are stuck in their old “mental model” about how the process works and don’t stop to think about why it was set up that way.
Another example, the authors write, would be if customer requirements had changed. Engineers used to design a product and hand it off to the Manufacturing Department to figure out how to produce it. When speed-to-market became more important, R&D and manufacturing learned to work together from the start of the process, Sherman and Keller say. Some companies are taking longer than others. Like all humans, managers tend to reject new information that conflicts with their previously formed beliefs, as the authors state.
Also like all humans, many managers tend to take a simple approach to complex problems; after all, we can only retain and process so much information. Taken together, these mental shortcuts help explain why I teach about persuasion skills for “managing upward”: You have to break through those old mental models. The findings also explain why so many studies have shown that a group of people with relevant skills will make a better decision than a single expert given the same information. Multiple brains can process more information, and multiple perceptions are more likely to challenge old models.
Sherman and Keller say about their results, “a simple awareness of this phenomenon on the part of practicing managers can lead to corrective action so the proper modes (of coordination) are implemented.” They recommend, for example, creating diagrams of interactions between teams.
Source: Sherman, D., and R. Keller (2011), “Suboptimal Assessment of Interunit Task Interdependence: Modes of Integration and Information Processing for Coordination Performance,” Organization Science 22(1):245.
Pay, you’ve no doubt heard, is not the top employee motivator. But money does play a major role in ways that fact hides, as shown by a decades-old model of human motivation that got more support from a survey by the Society for Human Resource Management (SHRM). By coincidence, the survey also supports a discussion comment I made on motivational quotations, but first we go to psychology researcher Abraham Maslow.
Safety—Security, both physical and of resources.
Love/belonging—All varieties, including romantic and family.
As I would say in my persuasion class, if someone’s financial security is under threat, there’s no point in appealing to their self-esteem. Your listener is focused on how to keep the money flowing and won’t hear anything else until the threat is resolved.
When I used to speak at events, most audience members realized pay is not a primary motivator for most workers, consistently coming in around #5 in surveys. The SHRM report, “2010 Job Satisfaction Report: Investigating What Matters Most to Employees,” concurs. Employees surveyed for it ranked “Compensation/pay” at #6 among items considered “very important” to job satisfaction. (Satisfaction and motivation are not the same thing scientifically, but I think there is enough overlap to treat them that way for this purpose.) Money still played a big role, however. Number 1 on the list was “Job security,” #2 was “Benefits,” and #4 was “Organization’s financial stability.” The company’s money matters in each, and each speaks to the bottom levels of Maslow’s triangle, ensuring you have the basics of life. Studies into happiness find that money can, in fact, buy it if you aren’t getting enough to eat. Only after the typical person has enough to cover basic needs, plus a bit left over, does extra money lose a lot of its power to motivate.
The report actually covered two groups of people, which leads to my discussion comment. One is a scientific sample of workers, based on all U.S. households with telephones (probably landlines), a total of 606 respondents. The other asked the same questions of a random selection of SHRM members who appeared to be working outside of academia, with 589 respondents. One big difference emerged in comparing these groups. The HR folks thought “Relationship with immediate supervisor” was going to come in at #1 in importance, with 72% saying it was “Very important.” But it came in #7 for employees, with 48%. “Communication between employees and senior management” came in #3 for HR professionals (65%), but #8 for employees (47%).
I understand why HR folks would think manager relations are more important than employees say they are. As a member of SHRM at the time, I know these topics come up all the time. There’s nothing wrong with that: The results show these matters are very important to half the workforce. I just find it intriguing that the emphasis in HR-group presentations and related magazines cause HR reps to miss a critical fact. If people feel their basic needs are in jeopardy, they will put up with a bad boss and poor communications at least until the job market turns around. Those who aren’t confident about their chances in that market will stick around forever.
This can lead a bad boss to think everything is hunky-dory because nobody is quitting. I addressed this in a LinkedIn discussion focused on managers who use motivational quotes instead of solving the problems that are so demotivating. That creates a cynicism that makes later change efforts difficult. People think it’s the latest “feel-good” campaign and see no point in changing. As the first commenter, I gave an example from a study in which the rah-rah didn’t work, so the company went back and tackled its operational problems through training and coaching with marvelous results.
I won’t provide a link to the discussion to protect the very nice person who defended the use of motivational quotes. She said she had been sending them out regularly at the behest of a former manager and had nothing but positive comments. When comments in the discussion turned a bit negative, I jumped back in to tell her I was sure that was true. Unfortunately, based on my experience with teams, I told her I could almost guarantee the percentage of people who hated them was nearly as large as the supporters, and another bunch of recipients ignored them, some with annoyance. In addition to the genuine supporters, many of the nonsupporters would say positive things either for reasons of office politics or because they knew her intentions were good. The only way to know people’s true opinions would be to conduct an anonymous employee survey, I said (or arrange anonymous interviews, I could have added).
Whether you are trying to raise productivity or morale, you are probably using the wrong methods unless you have a means of getting objective information about what matters to your employees. The most cost-effective way to fix that is to skip the surveys and let the employees solve the issue. Tell them what the problem is and why it is a problem; ask them for the solution; and pledge to help them put that solution in place—even if you have your doubts about it. They’re going to have to implement the solution anyway, and you don’t have to motivate them when it’s their solution.
For my fellow statistics geeks, confidence level was reported at 96% with a margin of error around 4%.
|
#!/usr/bin/env python3
__package__ = 'archivebox.cli'
__command__ = 'archivebox oneshot'
import sys
import argparse
from pathlib import Path
from typing import List, Optional, IO
from ..main import oneshot
from ..util import docstring
from ..config import OUTPUT_DIR
from ..logging_util import SmartFormatter, accept_stdin, stderr
@docstring(oneshot.__doc__)
def main(args: Optional[List[str]]=None, stdin: Optional[IO]=None, pwd: Optional[str]=None) -> None:
parser = argparse.ArgumentParser(
prog=__command__,
description=oneshot.__doc__,
add_help=True,
formatter_class=SmartFormatter,
)
parser.add_argument(
'url',
type=str,
default=None,
help=(
'URLs or paths to archive e.g.:\n'
' https://getpocket.com/users/USERNAME/feed/all\n'
' https://example.com/some/rss/feed.xml\n'
' https://example.com\n'
' ~/Downloads/firefox_bookmarks_export.html\n'
' ~/Desktop/sites_list.csv\n'
)
)
parser.add_argument(
"--extract",
type=str,
help="Pass a list of the extractors to be used. If the method name is not correct, it will be ignored. \
This does not take precedence over the configuration",
default=""
)
parser.add_argument(
'--out-dir',
type=str,
default=OUTPUT_DIR,
help= "Path to save the single archive folder to, e.g. ./example.com_archive"
)
command = parser.parse_args(args or ())
stdin_url = None
url = command.url
if not url:
stdin_url = accept_stdin(stdin)
if (stdin_url and url) or (not stdin and not url):
stderr(
'[X] You must pass a URL/path to add via stdin or CLI arguments.\n',
color='red',
)
raise SystemExit(2)
oneshot(
url=stdin_url or url,
out_dir=Path(command.out_dir).resolve(),
extractors=command.extract,
)
if __name__ == '__main__':
main(args=sys.argv[1:], stdin=sys.stdin)
|
The Sharper Image 3-in-1 Smart Groomer is the compact and versatile tool designed to give you a neat and groomed appearance, quickly and easily. Three separate cutting areas groom your facial hair (beard, moustache, sideburns), nose hair, ear hair and eyebrows to help you achieve a stylish and professional appearance. Cordless and portable for use at home, at the gym or when you travel. Includes protective travel caps.
Looking for a single tool to handle all your facial grooming needs? The Sharper Image 3-in-1 Smart Groomer features a trio of cutting tools for beard, moustache, nose hair and more!
• Fits in a drawer, suitcase, gym bag, etc.
|
"""
# Pytess
Pure Python tessellation of points into polygons, including
Delauney/Thiessin, and Voronoi polygons. Built as a
convenient user interface for Bill Simons/Carson Farmer python port of
Steven Fortune C++ version of a Delauney triangulator.
## Platforms
Tested on Python version 2.x.
## Dependencies
Pure Python, no dependencies.
## Installing it
Pytess is installed with pip from the commandline:
pip install pytess
## Usage
To triangulate a set of points, simply do:
import pytess
points = [(1,1), (5,5), (3,5), (8,1)]
triangles = pytess.triangulate(points)
And for voronoi diagrams:
import pytess
points = [(1,1), (5,5), (3,5), (8,1)]
voronoipolys = pytess.voronoi(points)
## More Information:
- [Home Page](http://github.com/karimbahgat/Pytess)
- [API Documentation](http://pythonhosted.org/Pytess)
## License:
This code is free to share, use, reuse,
and modify according to the MIT license, see license.txt
## Credits:
I just made it more convenient to use for end-users and uploaded it to PyPi.
The real credit goes to Bill Simons/Carson Farmer and Steven Fortune for
implementing the algorithm in the first place.
Karim Bahgat (2015)
"""
__version__ = "0.1.0"
from .main import *
|
FWIW: I have heard one bad story about Fikses. One of my instructors for a DE said he had cracked, small-hairline type, a couple sets of FM-5's. These were 18"ers on 928 GTS. AFAIK, he hasn't had the same problems with similar wheels on a 968.
That said, I would not hesitate to pick up a set of FM-5's for myself, at the right price. Just putting this out there.
Thank you for your interest in Fikse USA Inc. We have introduced a new series of wheels in 18" and 19" sizes called Profil. They have many additional benefits over our Classic line which are still available in 17" and 18" sizes.
1. Deeper spoke section dimensions results in increased stiffness for better cornering performance.
2. Deeper mounting pad thickness allows clearance for larger brakes.
3. Sculpted pockets on the mounting pad for reduced weight.
4. 3D chamfering on all spoke edges. Increased fatigue life and improved look.
5. 18"-19" diameter wheel designs in proportion with each other for staggered fitments.
6. New valve stem with lower profile for greater strength.
7. Streamlined rim flange for greater curb protection.
8. New heated treated alloy rims for stronger/lighter wheels.
9. New full polished finish with clear coating over the center section.
Usual disclaimer: I have no affiliation whatsoever with Fikse.
What do the Profil series look like? Does anyone have a picture? Can't see it on Fikse's web site.
I believe the FM10 picture on the Wheel Enhancement ad in Panorama or Excellence is the FM10 from the new Profil series.
The changes seem to be very slight from the cosmetic standpoint.
Thanks. I see that the Profil has a slightly more pronounced and deeper center cup.
Actually if you choose the K28 with the new reverse lip rim configuration (which is even nicer) the weight will increase about 2 lb per rim (according to Kinesis).
I'm sorry if this is off topic but, I just wanted to know how the Simmons wheels are. Are they lightwieght? Are they good quality? They are not that expensive and they look real good. Any ideas?
|
# Node specific local settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# The URL for this META-SHARE node django application
DJANGO_URL = 'http://localhost:{0}/metashare'.format(%%DJANGO_PORT%%)
DJANGO_BASE = 'metashare/'
SECRET_KEY = 'fdklsc)dscdus8f7odc$slacud%%8so7cwp2fsFDASFWR/REFEsfjskdcjsdl3W'
#STORAGE_PATH = ROOT_PATH + '/storageFolder'
STORAGE_PATH = '%%STORAGE_PATH%%'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql',
# 'postgresql', 'sqlite3', 'oracle'.
'NAME': '{0}'.format('%%DATABASE_FILE%%'), # Or path to file if using sqlite3.
# '{0}/development.db'.format(ROOT_PATH)
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost.
# Not used with sqlite3.
'PORT': '', # Set to empty string for default.
# Not used with sqlite3.
}
}
# the URL of the Solr server which is used as a search backend
HAYSTACK_SOLR_URL = 'http://127.0.0.1:{0}/solr'.format(%%SOLR_PORT%%)
# the URL of the Solr server (or server core) which is used as a search backend
SOLR_URL = 'http://127.0.0.1:{0}/solr/main'.format(%%SOLR_PORT%%)
# the URL of the Solr server (or server core) which is used as a search backend
# when running tests
TESTING_SOLR_URL = 'http://127.0.0.1:{0}/solr/testing'.format(%%SOLR_PORT%%)
# List of other META-SHARE Managing Nodes from which the local node imports
# resource descriptions. Any remote changes will later be updated
# ("synchronized"). Use this if you are a META-SHARE Managing Node!
%%CORE_NODES%%
# User accounts with the permission to access synchronization information on
# this node:
%%SYNC_USERS%%
# List of other META-SHARE Nodes from which the local node imports resource
# descriptions. Any remote changes will later be updated ("synchronized"). Any
# imported resource descriptions will also be shared with other nodes that
# synchronize with this local node, i.e., this node acts as a proxy for the
# listed nodes. This setting is meant to be used by META-SHARE Managing Nodes
# which make normal META-SHARE Node resource descriptions available on the
# META-SHARE Managing Nodes.
%%PROXIED_NODES%%
|
Fitbit "Alta HR Wristband (Special Edition) | Rose Gold, gold"
The Fitbit Alta Alta HR Special Edition Soft Pink is now available in Malaysia. The cheapest Fitbit Alta Alta HR Special Edition Soft Pink price in Malaysia is from Lazada. Fitbit Alta Alta HR Special Edition Soft Pink price details are updated April 2019.
|
#! /usr/bin/env python
#
# Copyright (c) 2011 SEOmoz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import pymysql
import datetime
from sauron import logger
from sauron.metrics import Metric, MetricException
class SphinxMetric(Metric):
"""
return 1 during the given timeframe. This can be used for self implementing
scheduled autoscaling
Attributes:
host (string): the ip of your Sphinxhost (default: 127.0.0.1)
port (int): the port for Sphinx (default: 9306)
"""
def reconfig(self, **kwargs):
Metric.reconfig(self, **kwargs)
self.conn = None
self.cur = None
def __del__(self):
try:
self.cur.close()
self.conn.close()
except AttributeError:
pass
def values(self):
try:
self.conn = pymysql.connect(host=self.host, port=self.port)
self.cur = self.conn.cursor()
self.cur.execute('show status')
r = dict(self.cur.fetchall())
return {
'results' : {
'uptime' : (r['uptime'], 'Seconds'),
'queries' : (r['queries'], 'Count'),
'avg_wall' : (r['avg_query_wall'], 'Seconds'),
'avg_cpu' : (r['avg_query_cpu'], 'Percent'),
'avg_read' : (r['avg_query_readkb'], 'Kilobytes')
}
}
except pymysql.err.MySQLError:
raise MetricException('Error connecting to sphinx searchd')
except KeyError:
raise MetricException('Could not find all keys in searchd status')
|
SAN FRANCISCO – May 7, 2018 – Francoise Gilbert and Ian C. Ballon of global law firm Greenberg Traurig, LLP will speak at the 19th Annual Institute on Privacy and Data Security Law held at The Practicing Law Institute (PLI) California Center in San Francisco, May 7-8, 2018.
Gilbert focuses her practice and research on U.S. and global data privacy and security matters. She counsels clients on complex issues and compliance requirements related to privacy, security, and e-business risks. Some of her favorite engagements include designing data protection programs that comply with the European Union General Data Protection Regulation (GDPR), shaping digital advertising campaigns, integrating privacy and security controls in data analytics and artificial intelligence projects, identifying and addressing personal data issues likely to arise from the use of products or services used in digital cities, connected devices, intelligent vehicles, and other emerging technologies. Gilbert is the author and editor of the leading two-volume treatise Global Privacy and Security Law, published by Wolters Kluwer (www.globalprivacybook.com), which analyzes in depth the privacy and data protection laws of 68 countries on all continents (WolterKluwer 2009-2018). She has received law degrees and obtained bar admissions both in the United States and in France, and holds CIPP/US, CIPP/EU, and CIPM certifications from the International Association of Privacy Professionals (IAPP).
Ballon defends data privacy, security breach, and TCPA class action suits, in addition to representing companies in other technology, IP and internet, and mobile-related complex litigation. A list of his recent cases may be found here. He is also the author of the leading treatise on internet law, E-Commerce and Internet Law: Treatise with Forms 2nd edition, the 5-volume set published by West (www.IanBallon.com), which includes extensive sections on data privacy, cybersecurity, and defending class action suits. In addition, he is the author of The Complete CAN-SPAM Act Handbook (West 2008) and The Complete State Security Breach Notification Compliance Handbook (West 2009). Ballon serves as executive director of Stanford University Law School’s Center for E-Commerce. He was most recently included on the Daily Journal’s 2018 “Top Intellectual Property Lawyers” list, as well as being recognized as a “Groundbreaker” in The Recorder’s 2017 Litigation Departments of the Year Awards and an intellectual property “Trailblazer” by the National Law Journal. He was named the Lawyer of the Year for information technology law in the 2018, 2016, and 2013 editions of The Best Lawyers in America. In addition to his J.D. and LL.M., Ballon holds the CIPP/US certification from the International Association of Privacy Professionals (IAPP).
|
import pytest
import tempfile
import numpy as np
import deepchem as dc
from deepchem.feat import MolGraphConvFeaturizer
from deepchem.models.tests.test_graph_models import get_dataset
try:
import dgl
import dgllife
import torch
from deepchem.models.torch_models import MPNNModel
has_torch_and_dgl = True
except:
has_torch_and_dgl = False
@pytest.mark.torch
def test_mpnn_regression():
# load datasets
featurizer = MolGraphConvFeaturizer(use_edges=True)
tasks, dataset, transformers, metric = get_dataset(
'regression', featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model = MPNNModel(mode='regression', n_tasks=n_tasks, learning_rate=0.0005)
# overfit test
model.fit(dataset, nb_epoch=400)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean_absolute_error'] < 0.5
# test on a small MoleculeNet dataset
from deepchem.molnet import load_delaney
tasks, all_dataset, transformers = load_delaney(featurizer=featurizer)
train_set, _, _ = all_dataset
model = MPNNModel(
mode='regression',
n_tasks=len(tasks),
node_out_feats=2,
edge_hidden_feats=2,
num_step_message_passing=1,
num_step_set2set=1,
num_layer_set2set=1)
model.fit(train_set, nb_epoch=1)
@pytest.mark.torch
def test_mpnn_classification():
# load datasets
featurizer = MolGraphConvFeaturizer(use_edges=True)
tasks, dataset, transformers, metric = get_dataset(
'classification', featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model = MPNNModel(
mode='classification', n_tasks=n_tasks, learning_rate=0.0005)
# overfit test
model.fit(dataset, nb_epoch=200)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.80
# test on a small MoleculeNet dataset
from deepchem.molnet import load_bace_classification
tasks, all_dataset, transformers = load_bace_classification(
featurizer=featurizer)
train_set, _, _ = all_dataset
model = MPNNModel(
mode='classification',
n_tasks=len(tasks),
node_out_feats=2,
edge_hidden_feats=2,
num_step_message_passing=1,
num_step_set2set=1,
num_layer_set2set=1)
model.fit(train_set, nb_epoch=1)
@pytest.mark.torch
def test_mpnn_reload():
# load datasets
featurizer = MolGraphConvFeaturizer(use_edges=True)
tasks, dataset, transformers, metric = get_dataset(
'classification', featurizer=featurizer)
# initialize models
n_tasks = len(tasks)
model_dir = tempfile.mkdtemp()
model = MPNNModel(
mode='classification',
n_tasks=n_tasks,
model_dir=model_dir,
batch_size=10,
learning_rate=0.001)
model.fit(dataset, nb_epoch=200)
scores = model.evaluate(dataset, [metric], transformers)
assert scores['mean-roc_auc_score'] >= 0.80
reloaded_model = MPNNModel(
mode='classification',
n_tasks=n_tasks,
model_dir=model_dir,
batch_size=10,
learning_rate=0.001)
reloaded_model.restore()
pred_mols = ["CCCC", "CCCCCO", "CCCCC"]
X_pred = featurizer(pred_mols)
random_dataset = dc.data.NumpyDataset(X_pred)
original_pred = model.predict(random_dataset)
reload_pred = reloaded_model.predict(random_dataset)
assert np.all(original_pred == reload_pred)
|
As days pass by, we see tablets, most commonly the iPad from Apple, finding more and more application in businesses such as restaurants etc. The uses of these tablets are seen to change the entire process of conducting transactions as well as receiving the feedback from customers. In today’s world driven by data, we are witnessing a significant evolution in the field of street retailing.
Nowadays it’s a common sight in a fancy restaurant when the waiter comes with the bill along with a tablet in his hand. The tablet is to take the customer review but it could be just as efficiently used to pay the bill too! We often see that when we pay a bill through our debit card, the guy at the store swaps it into a machine and gives us the bill! These machines are called POS! These machines are so commonly used that the customers take it for granted! However what we keep on forgetting is that these machines are extremely costly and require a lot of bucks to be set up in a store! With the help of cloud computing and tablets, a shop owner does not need to install these POS anymore! Check out the info graphic to see how else the iPad has been changing the small businesses!
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.volume.drivers.violin.vxg.vshare import igroup as IGROUP
from cinder.volume.drivers.violin.vxg.vshare import iscsi as ISCSI
from cinder.volume.drivers.violin.vxg.vshare import lun as LUN
from cinder.volume.drivers.violin.vxg.vshare import snapshot as SNAPSHOT
CLASS_NAMES = 'VShare'
"""
Adding new classes to this module:
All new classes should be added at the bottom of this file (you can't inherit
from a class that hasn't been defined yet). Keep the most up-to-date class
named "VShare". When adding a new VShare class, rename the current VShare
to "VShare_x", where x is +1 of the highest named class in this file. This
will typically be +1 of whatever class the old "VShare" class is inheriting
from).
Here's an example snippit of old code before updating:
class VShare(VShare_5):
def __init__(self, session):
super(VShare, self).__init__(session)
...
Here's what this would change to (two updates):
class VShare_6(VShare_5):
def __init__(self, session):
super(VShare_6, self).__init__(session)
...
"""
class VShare_1(object):
versions = '5.0.2'
def __init__(self, session):
self.basic = session
self.lun = LUN.LUNManager(self.basic)
self.close = self.basic.close
def __del__(self):
try:
self.basic.close()
except Exception:
pass
@property
def debug(self):
return self.basic.debug
@debug.setter
def debug(self, value):
self.basic.debug = value
def __repr__(self):
return '<%s host:%s user:%s proto:%s>' % (self.__class__.__name__,
self.basic.host,
self.basic.user,
self.basic.proto)
class VShare_2(VShare_1):
versions = '5.1.0'
def __init__(self, session):
super(VShare_2, self).__init__(session)
self.lun = LUN.LUNManager_1(self.basic)
class VShare_3(VShare_2):
versions = '5.2.0'
def __init__(self, session):
super(VShare_3, self).__init__(session)
self.lun = LUN.LUNManager_2(self.basic)
self.igroup = IGROUP.IGroupManager(self.basic)
self.iscsi = ISCSI.ISCSIManager(self.basic)
class VShare(VShare_3):
versions = '6.0.0'
def __init__(self, session):
super(VShare, self).__init__(session)
self.lun = LUN.LUNManager_3(self.basic)
self.igroup = IGROUP.IGroupManager_1(self.basic)
self.snapshot = SNAPSHOT.SnapshotManager(self.basic)
|
Sunny styles bring sunny smiles that last all week, so fill your heart and home with sunshine, love and flowers. Make sure all your colorful celebrations start with warm and uplifting blooms of sunshine from Petals and Treasures!
Having a hard time deciding on the perfect gift for dad? Petals and Treasures can help! Send dad a artistically crafted floral tribute to show him how much you care. Perhaps your dad is the type that would prefer a gift basket filled with his favorite snacks! Whether your dad lives in Dagsboro, or clear across the country, we've got you covered!
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os, sys
from pybot.adapter import Adapter
from pybot.message import TextMessage
if os.environ.get('HUBOT_SHELL_HISTSIZE'):
history_size = int(os.environ.get('HUBOT_SHELL_HISTSIZE'))
else:
history_size = 1024
class Shell(Adapter):
def send(self, envelope, *strings):
if sys.platform is not 'win32':
for string in strings:
print "\x1b[01;32m%s\x1b[0m" % string
else:
for string in strings:
print string
self.repl.prompt()
def emote(self, envelope, *strings):
self.send(envelope, [ "* %s" for string in strings ])
def reply(self, envelope, *strings):
for string in strings:
string = envelope.user.name + ': ' + string
seld.send(envelope, *strings)
def run(self):
history_file_path = ".hubot_history"
try:
f = open(history_file_path, 'r')
history_lines = [ l[:-1] for l in f.readlines()[:history_size] ]
f.close()
except IOError:
history_lines = []
self.emit('connected')
f = open(history_file_path, 'w')
while True:
line = raw_input('> ')
if len(history_lines) >= history_size:
history_lines.pop(0)
history_lines.append(line)
if line == 'exit' or line == 'quit':
self.robot.shutdown()
break
elif line == 'history':
for history in history_lines:
print history
else:
user_id = int(os.environ.get('HUBOT_SHELL_USER_ID') or '1')
user_name = os.environ.get('HUBOT_SHELL_USER_NAME') or 'Shell'
options = { 'name': user_name, 'room': 'Shell' }
user = self.robot.brain.user_for_id(user_id, options)
self.recieve(TextMessage(user, line, 'messageId'))
for line in history_lines:
f.write(line + '\n')
f.close()
sys.exit(0)
def close(self):
pass
|
Health officials have confirmed nine additional cases of the outbreak strain of Salmonella Virchow associated with consumption of RAW Meal Organic Shake & Meal products produced by Garden of Life, LLC, bringing the total number of confirmed cases to 27. The most recent illness started on March 13, 2016.
Nine new cases reported by state agencies were spread across eight states, five of which had not previously confirmed a case tied to the ongoing outbreak. Minnesota and Texas both reported one additional outbreak case. The new illnesses brought the total number of cases confirmed in Minnesota to 3, and Texas now has confirmed two cases of the outbreak strain of Salmonella Virchow.
The five following states, none of which had previously reported a case, each confirmed one patient with Salmonella that matched the outbreak strain DNA: Alabama, California, Hawaii, Michigan, and West Virginia.
On February 19th, the CDC released an update confirming 18 Salmonella illnesses associated with the outbreak. As of March 21st, a total of 27 people and 20 states were associated with the outbreak. The broad distribution of cases – 14 of the 20 states confirmed only one case of Salmonella Virchow matching the outbreak strain – reflects the widespread distribution of the product. Retail establishments in all 50 states sold RAW Meal Organic Shake & Meal products, the source of the outbreak that sickened individuals as young as under one year of age and as old as 84. Internet retailers also sold the Garden of Life products, further increasing the reach of the powdered meal substitute.
Minnesota, the only state reporting more than two illnesses, confirmed three cases of Salmonella Virchow that matched the DNA fingerprint of the outbreak strain. The list of the five other states confirming more than one outbreak case currently includes California, New Jersey, Texas and Utah. Each of these states reported two confirmed cases.
The following states, each associated with one confirmed case of Salmonella Virchow, account for the remaining sixteen cases liked to the outbreak: Alabama, Florida, Hawaii, Massachusetts, Maryland, Michigan, New Mexico, Ohio, Oklahoma, Oregon, Rhode Island, Tennessee, Wisconsin and West Virginia.
The most recent CDC outbreak update reported that five of the outbreak cases required hospitalization. Federal and state health officials confirmed that that the outbreak was not currently associated with any fatalities as of March 25. 2016.
The CDC continues to advise retailers and consumers to check any RAW Meal Organic Shake & Meal products prior to consumption to confirm they do not appear on the recall list. “These recalled products have a long shelf life and may still be in people’s homes, and illnesses may continue to be reported,” the CDC said.
The outbreak strain has been linked to RAW Meal nutritional powders manufactured by West Palm Beach, company Garden of Life, LLC. The first case associated with the outbreak began on December 5th, 2015, while the date of onset for the most recent confirmed illness is March 13th of this year.
The company first issued a voluntarily recall of several lots of chocolate, original, vanilla and vanilla chai RAW Meal Organic Shake & Meal Products on January 29, 2016, and Garden of Life expanded the recall to include additional RAW Meal Organic products on February 12th.
When Garden of Life officials expanded the recall Feb. 12, they announced they had determined the cause of the contamination.
“After extensive testing of the product and its ingredients, and working in collaboration with the U.S. Food and Drug Administration, the manufacturer, suppliers and other third-party experts, we have now identified the likely source of Salmonella contamination to be organic moringa leaf powder from a supplier used only in Raw Meal,” according to the expanded recall notice.
Garden of Life removed organic moringa powder from its Raw Meal products and released new products to be available in stores and through online retailers .
According to the FDA, the investigation into the outbreak is ongoing, and the agency will continue to provide updates on the investigation as they become available.
If you or a family member became ill with a Salmonella infection after consuming RAW Meal products, you can contact the attorneys at Ron Simon & Associates for a free case evaluation. Ron Simon & Associates is one of the nation’s leading law firms representing victims of Salmonella and other foodborne illnesses.
|
# icbuild - a tool to ease building collections of source packages
# Copyright (C) 2001-2006 James Henstridge
# Copyright (C) 2007-2008 Frederic Peters
# Copyright (C) 2014 Canonical Limited
# Copyright (C) 2015 Ignacio Casal Quinteiro
#
# environment.py: environment variable setup
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os
from icbuild.errors import FatalError, CommandError
def addpath(envvar, path, prepend=True):
'''Adds a path to an environment variable.'''
pathsep = os.pathsep
envval = os.environ.get(envvar, path)
parts = envval.split(pathsep)
if prepend:
parts.insert(0, path)
else:
parts.append(path)
# remove duplicate entries:
i = 1
while i < len(parts):
if parts[i] in parts[:i]:
del parts[i]
else:
i += 1
envval = pathsep.join(parts)
os.environ[envvar] = envval
def setup_env(config):
'''set environment variables for using prefix'''
# PATH
msys2bindir = os.path.join(config.msys2dir, 'bin')
addpath('PATH', msys2bindir)
|
1. A North Country transfer, or, Abraham Newland alarm'd [graphic].
2. A Will o' the wisp, or, Iohn Bull in a bog [graphic] / IC.
3. A collection of hobgoblins [graphic] : respectfuly [sic] dedicated to the admirers of blue devils and vapours! / Woodward del. ; I.C. sclul.
7. A fancy sketch to the memory of Shakespeare [graphic] Woodward del. ; Cruikshanks scp.
8. A gown metamorphose'd into a ghost!! [graphic] / Woodward del. ; Cruikshanks s.
|
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <andrew@topdog.za.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"Cluster functions"
from beaker.cache import cache_region
from celery.exceptions import TimeoutError, QueueNotFound
from baruwa.model.meta import Session
from baruwa.model.settings import Server
from baruwa.tasks.status import systemstatus
@cache_region('system_status', 'cluster-status')
def cluster_status():
"Check cluster status"
hosts = Session.query(Server.hostname)\
.filter(Server.enabled == True).all()
if not hosts:
return False
for host in hosts:
if host.hostname == 'default':
continue
if not host_status(host.hostname):
return False
return True
@cache_region('system_status', 'host-status')
def host_status(hostname):
"Check host status"
try:
task = systemstatus.apply_async(queue=hostname)
task.wait(30)
hoststatus = task.result
except (TimeoutError, QueueNotFound):
return False
# check load
if hoststatus['load'][0] > 15:
return False
# return quick if any service is not running
for service in ['mta', 'scanners', 'av']:
if hoststatus[service] == 0:
return False
# check disks
for part in hoststatus['partitions']:
if part['percent'] >= 95:
return False
return True
|
LEARN academy, a San Diego coding academy and member of Union CoWork has created a new scholarship fund for their upcoming November class starting on the 26th. This is available for members of Union CoWork as well as their friends and family. The goal of this scholarship was to help our community and their network take advantage of the opportunities we have available here at our school.
What does LEARN academy do?
Our professional instructors will help you dive into the world of web development where you will LEARN to create web applications from the ground up. We work with all skill levels.
During the last month of our program, all students are given an opportunity for an internship at a local San Diego tech company. This internship gets our students their first professional opportunity in the tech industry.
After graduation, our in-house Career Services Manager will work with you on all aspects of job assistance. From tailoring your resume, helping you with salary negotiations, and interview prep, we do everything we can to get you started in a new career.
What is Elixir and who uses it?
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import with_statement
import logging
import os
import pipes
import random
import shutil
import string
import subprocess
import sys
import tempfile
import time
import urllib2
import warnings
from optparse import OptionParser
from sys import stderr
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
DEFAULT_SPARK_VERSION = "1.1.0"
SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
# cfr. https://issues.apache.org/jira/browse/SPARK-3821
MESOS_SPARK_EC2_BRANCH = "v4"
# A URL prefix from which to fetch AMI information
AMI_PREFIX = "https://raw.github.com/klbostee/spark-ec2/{b}/ami-list".format(b=MESOS_SPARK_EC2_BRANCH)
class UsageError(Exception):
pass
class TimeoutError(Exception):
pass
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(
usage="spark-ec2 [options] <action> <cluster_name>"
+ "\n\n<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves",
add_help_option=False)
parser.add_option(
"-h", "--help", action="help",
help="Show this help message and exit")
parser.add_option(
"-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: %default)")
parser.add_option(
"-w", "--wait", type="int",
help="DEPRECATED (no longer necessary) - Seconds to wait for nodes to start")
parser.add_option(
"-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option(
"-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option(
"-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: %default). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="m1.medium",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option(
"-r", "--region", default="us-east-1",
help="EC2 region zone to launch instances in")
parser.add_option(
"-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies)")
parser.add_option("-a", "--ami", help="Amazon Machine Image ID to use")
parser.add_option(
"-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %default)")
parser.add_option(
"-p", "--python", default="python2.7",
help="Python executable to use for PySpark (default: %default)")
parser.add_option(
"--spark-git-repo",
default="https://github.com/apache/spark",
help="Github repo from which to checkout supplied commit hash")
parser.add_option(
"--hadoop-major-version", default="2",
help="Major version of Hadoop (default: %default)")
parser.add_option(
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option(
"--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="standard",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0." +
"Only support up to 8 EBS volumes.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option(
"--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option(
"--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option(
"-u", "--user", default="root",
help="The SSH user you want to connect as (default: %default)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created.")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: %default)")
parser.add_option(
"--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMI's interpret this as an initialization script)")
parser.add_option(
"--security-group-prefix", type="string", default=None,
help="Use this prefix for the security group rather than the cluster name.")
parser.add_option(
"--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)")
parser.add_option(
"--additional-security-group", type="string", default="",
help="Additional security group to place the machines in")
parser.add_option(
"--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir is None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
if os.getenv('AWS_ACCESS_KEY_ID') is None:
print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
"must be set")
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
"must be set")
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print "Creating security group " + name
return conn.create_security_group(name, "Spark EC2 group")
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
# Return correct versions of Spark and Shark, given the supplied Spark version
def get_spark_shark_version(opts):
spark_shark_map = {
"0.7.3": "0.7.1",
"0.8.0": "0.8.0",
"0.8.1": "0.8.1",
"0.9.0": "0.9.0",
"0.9.1": "0.9.1",
"1.0.0": "1.0.0",
"1.0.1": "1.0.1",
"1.0.2": "1.0.2",
"1.1.0": "1.1.0",
}
version = opts.spark_version.replace("v", "")
if version not in spark_shark_map:
print >> stderr, "Don't know about Spark version: %s" % version
sys.exit(1)
return (version, spark_shark_map[version])
# Attempt to resolve an appropriate AMI given the architecture and region of the request.
# Source: http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
# Last Updated: 2015-06-19
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
def get_spark_ami(opts):
instance_types = {
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"c3.large": "pvm",
"c3.xlarge": "pvm",
"c3.2xlarge": "pvm",
"c3.4xlarge": "pvm",
"c3.8xlarge": "pvm",
"c4.large": "hvm",
"c4.xlarge": "hvm",
"c4.2xlarge": "hvm",
"c4.4xlarge": "hvm",
"c4.8xlarge": "hvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"cr1.8xlarge": "hvm",
"d2.xlarge": "hvm",
"d2.2xlarge": "hvm",
"d2.4xlarge": "hvm",
"d2.8xlarge": "hvm",
"g2.2xlarge": "hvm",
"g2.8xlarge": "hvm",
"hi1.4xlarge": "pvm",
"hs1.8xlarge": "pvm",
"i2.xlarge": "hvm",
"i2.2xlarge": "hvm",
"i2.4xlarge": "hvm",
"i2.8xlarge": "hvm",
"m1.small": "pvm",
"m1.medium": "pvm",
"m1.large": "pvm",
"m1.xlarge": "pvm",
"m2.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"m3.medium": "hvm",
"m3.large": "hvm",
"m3.xlarge": "hvm",
"m3.2xlarge": "hvm",
"m4.large": "hvm",
"m4.xlarge": "hvm",
"m4.2xlarge": "hvm",
"m4.4xlarge": "hvm",
"m4.10xlarge": "hvm",
"r3.large": "hvm",
"r3.xlarge": "hvm",
"r3.2xlarge": "hvm",
"r3.4xlarge": "hvm",
"r3.8xlarge": "hvm",
"t1.micro": "pvm",
"t2.micro": "hvm",
"t2.small": "hvm",
"t2.medium": "hvm",
"t2.large": "hvm",
}
if opts.instance_type in instance_types:
instance_type = instance_types[opts.instance_type]
else:
instance_type = "pvm"
print >> stderr,\
"Don't recognize %s, assuming type is pvm" % opts.instance_type
ami_path = "%s/%s/%s" % (AMI_PREFIX, opts.region, instance_type)
try:
ami = urllib2.urlopen(ami_path).read().strip()
print "Spark AMI: " + ami
except:
print >> stderr, "Could not resolve AMI at: " + ami_path
sys.exit(1)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print >> stderr, "ERROR: Must provide an identity file (-i) for ssh connections."
sys.exit(1)
if opts.key_pair is None:
print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances."
sys.exit(1)
user_data_content = None
if opts.user_data:
with open(opts.user_data) as user_data_file:
user_data_content = user_data_file.read()
print "Setting up security groups..."
if opts.security_group_prefix is None:
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
else:
master_group = get_or_make_group(conn, opts.security_group_prefix + "-master")
slave_group = get_or_make_group(conn, opts.security_group_prefix + "-slaves")
authorized_address = opts.authorized_address
if master_group.rules == []: # Group was just now created
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
master_group.authorize('tcp', 22, 22, authorized_address)
master_group.authorize('tcp', 8080, 8081, authorized_address)
master_group.authorize('tcp', 18080, 18080, authorized_address)
master_group.authorize('tcp', 19999, 19999, authorized_address)
master_group.authorize('tcp', 50030, 50030, authorized_address)
master_group.authorize('tcp', 50070, 50070, authorized_address)
master_group.authorize('tcp', 60070, 60070, authorized_address)
master_group.authorize('tcp', 4040, 4045, authorized_address)
if opts.ganglia:
master_group.authorize('tcp', 5080, 5080, authorized_address)
if slave_group.rules == []: # Group was just now created
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
slave_group.authorize('tcp', 22, 22, authorized_address)
slave_group.authorize('tcp', 8080, 8081, authorized_address)
slave_group.authorize('tcp', 50060, 50060, authorized_address)
slave_group.authorize('tcp', 50075, 50075, authorized_address)
slave_group.authorize('tcp', 60060, 60060, authorized_address)
slave_group.authorize('tcp', 60075, 60075, authorized_address)
# Check if instances are already running with the cluster name
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print >> stderr, ("ERROR: There are already instances for name: %s " % cluster_name)
sys.exit(1)
# Figure out Spark AMI
if opts.ami is None:
opts.ami = get_spark_ami(opts)
additional_groups = []
if opts.additional_security_group:
additional_groups = [sg
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
print "Launching instances..."
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.ami
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price is not None:
# Launch spot instances with the requested price
print ("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_groups=[slave_group] + additional_groups,
instance_type=opts.instance_type,
block_device_map=block_map,
user_data=user_data_content)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print "Waiting for spot instances to be granted..."
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
outstanding_request_ids = []
for i in my_req_ids:
if i in id_to_req:
if id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
else:
outstanding_request_ids.append(i)
if len(active_instance_ids) == opts.slaves:
print "All %d slaves granted" % opts.slaves
reservations = conn.get_all_instances(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print "%d of %d slaves granted, waiting longer for request ids including %s" % (
len(active_instance_ids), opts.slaves, outstanding_request_ids[0:10])
except:
print "Canceling spot instance requests"
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print >> stderr, ("WARNING: %d instances are still running" % running)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(key_name=opts.key_pair,
security_groups=[slave_group] + additional_groups,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
user_data=user_data_content)
slave_nodes += slave_res.instances
print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
zone, slave_res.id)
i += 1
# Launch or resume masters
if existing_masters:
print "Starting master..."
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(key_name=opts.key_pair,
security_groups=[master_group] + additional_groups,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
user_data=user_data_content)
master_nodes = master_res.instances
print "Launched master in %s, regid = %s" % (zone, master_res.id)
# Give the instances descriptive names
for master in master_nodes:
name = '{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id)
tag_instance(master, name)
for slave in slave_nodes:
name = '{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id)
tag_instance(slave, name)
# Return all the instances
return (master_nodes, slave_nodes)
def tag_instance(instance, name):
for i in range(0, 5):
try:
instance.add_tag(key='Name', value=name)
break
except:
print "Failed attempt %i of 5 to tag %s" % ((i + 1), name)
if i == 5:
raise "Error - failed max attempts to add name tag"
time.sleep(5)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters and slaves
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
print "Searching for existing cluster " + cluster_name + "..."
# Search all the spot instance requests, and copy any tags from the spot
# instance request to the cluster.
spot_instance_requests = conn.get_all_spot_instance_requests()
for req in spot_instance_requests:
if req.state != u'active':
continue
name = req.tags.get(u'Name', "")
if name.startswith(cluster_name):
reservations = conn.get_all_instances(instance_ids=[req.instance_id])
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for instance in active:
if instance.tags.get(u'Name') is None:
tag_instance(instance, name)
# Now proceed to detect master and slaves instances.
reservations = conn.get_all_instances()
master_nodes = []
slave_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for inst in active:
name = inst.tags.get(u'Name', "")
if name.startswith(cluster_name + "-master"):
master_nodes.append(inst)
elif name.startswith(cluster_name + "-slave"):
slave_nodes.append(inst)
if any((master_nodes, slave_nodes)):
print "Found %d master(s), %d slaves" % (len(master_nodes), len(slave_nodes))
if master_nodes != [] or not die_on_error:
return (master_nodes, slave_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print >> sys.stderr, "ERROR: Could not find master in with name " + \
cluster_name + "-master"
else:
print >> sys.stderr, "ERROR: Could not find any existing cluster"
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master = master_nodes[0].public_dns_name
if deploy_ssh_key:
print "Generating cluster's SSH key on master..."
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print "Transferring cluster's SSH key to slaves..."
for slave in slave_nodes:
print slave.public_dns_name
ssh_write(slave.public_dns_name, opts, ['tar', 'x'], dot_ssh_tar)
modules = ['cleanup', 'python27', 'spark', 'shark', 'ephemeral-hdfs', 'mapreduce', 'spark-standalone']
if opts.ebs_vol_size > 0:
modules.append('persistent-hdfs')
if opts.hadoop_major_version == "1":
modules = filter(lambda x: x != "mapreduce", modules)
if opts.ganglia:
modules.append('ganglia')
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
ssh(
host=master,
opts=opts,
command="rm -rf spark-ec2"
+ " && "
+ "git clone https://github.com/klbostee/spark-ec2.git -b {b}".format(b=MESOS_SPARK_EC2_BRANCH)
)
print "Deploying files to master..."
deploy_files(
conn=conn,
root_dir=SPARK_EC2_DIR + "/" + "deploy.generic",
opts=opts,
master_nodes=master_nodes,
slave_nodes=slave_nodes,
modules=modules
)
print "Running setup on master..."
setup_spark_cluster(master, opts)
print "Done!"
def setup_standalone_cluster(master, slave_nodes, opts):
slave_ips = '\n'.join([i.public_dns_name for i in slave_nodes])
ssh(master, opts, "echo \"%s\" > spark/conf/slaves" % (slave_ips))
ssh(master, opts, "/root/spark/sbin/start-all.sh")
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
print "Spark standalone cluster started at http://%s:8080" % master
if opts.ganglia:
print "Ganglia started at http://%s:5080/ganglia" % master
def is_ssh_available(host, opts):
"Checks if SSH is available on the host."
try:
with open(os.devnull, 'w') as devnull:
ret = subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=devnull,
stderr=devnull
)
return ret == 0
except subprocess.CalledProcessError as e:
return False
def is_cluster_ssh_available(cluster_instances, opts):
for i in cluster_instances:
if not is_ssh_available(host=i.ip_address, opts=opts):
return False
else:
return True
def wait_for_cluster_state(cluster_instances, cluster_state, opts, max_attempts=20):
"""
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for all instances in cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
num_attempts = 0
while num_attempts <= max_attempts:
time.sleep(3 * num_attempts)
for i in cluster_instances:
s = i.update() # capture output to suppress print to screen in newer versions of boto
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
else:
raise TimeoutError("Not able to SSH to instances even after {0} attempts.".format(num_attempts))
sys.stdout.write("\n")
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# Last Updated: 2014-06-20
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
disks_by_instance = {
"c1.medium": 1,
"c1.xlarge": 4,
"c3.2xlarge": 2,
"c3.4xlarge": 2,
"c3.8xlarge": 2,
"c3.large": 2,
"c3.xlarge": 2,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"cr1.8xlarge": 2,
"g2.2xlarge": 1,
"hi1.4xlarge": 2,
"hs1.8xlarge": 24,
"i2.2xlarge": 2,
"i2.4xlarge": 4,
"i2.8xlarge": 8,
"i2.xlarge": 1,
"m1.large": 2,
"m1.medium": 1,
"m1.small": 1,
"m1.xlarge": 4,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"m2.xlarge": 1,
"m3.2xlarge": 2,
"m3.large": 1,
"m3.medium": 1,
"m3.xlarge": 2,
"r3.2xlarge": 1,
"r3.4xlarge": 1,
"r3.8xlarge": 2,
"r3.large": 1,
"r3.xlarge": 1,
"t1.micro": 0,
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print >> stderr, ("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
#
# root_dir should be an absolute path to the directory with the files we want to deploy.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
active_master = master_nodes[0].public_dns_name
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
cluster_url = "%s:7077" % active_master
if "." in opts.spark_version:
# Pre-built spark & shark deploy
(spark_v, shark_v) = get_spark_shark_version(opts)
else:
# Spark-only custom deploy
spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version)
shark_v = ""
modules = filter(lambda x: x != "shark", modules)
template_vars = {
"master_list": '\n'.join([i.public_dns_name for i in master_nodes]),
"active_master": active_master,
"slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]),
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules),
"spark_version": spark_v,
"shark_version": shark_v,
"hadoop_major_version": opts.hadoop_major_version,
"spark_worker_instances": "%d" % opts.worker_instances,
"spark_master_opts": opts.master_opts,
"pyspark_python": opts.python
}
if opts.copy_aws_credentials:
template_vars["aws_access_key_id"] = conn.aws_access_key_id
template_vars["aws_secret_access_key"] = conn.aws_secret_access_key
else:
template_vars["aws_access_key_id"] = ""
template_vars["aws_secret_access_key"] = ""
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no']
parts += ['-o', 'UserKnownHostsFile=/dev/null']
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host),
stringify_command(command)])
except subprocess.CalledProcessError as e:
if tries > 5:
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError(
"Failed to SSH to remote host {0}.\n" +
"Please check that you have provided the correct --identity-file and " +
"--key-pair parameters and try again.".format(host))
else:
raise e
print >> stderr, \
"Error executing remote command, retrying after 30 seconds: {0}".format(e)
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, arguments):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE)
proc.stdin.write(arguments)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif tries > 5:
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print >> stderr, \
"Error {0} while executing remote command, retrying after 30 seconds".format(status)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total / num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
def real_main():
(opts, action, cluster_name) = parse_args()
# Input parameter validation
if opts.wait is not None:
# NOTE: DeprecationWarnings are silent in 2.7+ by default.
# To show them, run Python with the -Wdefault switch.
# See: https://docs.python.org/3.5/whatsnew/2.7.html
warnings.warn(
"This option is deprecated and has no effect. "
"spark-ec2 automatically waits as long as necessary for clusters to startup.",
DeprecationWarning
)
if opts.ebs_vol_num > 8:
print >> stderr, "ebs-vol-num cannot be greater than 8"
sys.exit(1)
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print >> stderr, (e)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.slaves <= 0:
print >> sys.stderr, "ERROR: You have to start at least 1 slave"
sys.exit(1)
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
wait_for_cluster_state(
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready',
opts=opts
)
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
elif action == "destroy":
print "Are you sure you want to destroy the cluster %s?" % cluster_name
print "The following instances will be terminated:"
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
for inst in master_nodes + slave_nodes:
print "> %s" % inst.public_dns_name
msg = "ALL DATA ON ALL NODES WILL BE LOST!!\nDestroy cluster %s (y/N): " % cluster_name
response = raw_input(msg)
if response == "y":
print "Terminating master..."
for inst in master_nodes:
inst.terminate()
print "Terminating slaves..."
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
print "Deleting security groups (this will take some time)..."
if opts.security_group_prefix is None:
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
else:
group_names = [opts.security_group_prefix + "-master",
opts.security_group_prefix + "-slaves"]
wait_for_cluster_state(
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated',
opts=opts
)
attempt = 1
while attempt <= 3:
print "Attempt %d" % attempt
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print "Deleting rules in security group " + group.name
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
conn.delete_security_group(group.name)
print "Deleted security group " + group.name
except boto.exception.EC2ResponseError:
success = False
print "Failed to delete security group " + group.name
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success:
break
attempt += 1
if not success:
print "Failed to delete all security groups after 3 tries."
print "Try re-running in a few minutes."
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
print "Logging into master " + master + "..."
proxy_opt = []
if opts.proxy_port is not None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Rebooting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print "Rebooting " + inst.id
inst.reboot()
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print master_nodes[0].public_dns_name
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Stopping master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print "Starting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster_state(
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready',
opts=opts
)
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print >> stderr, "Invalid action: %s" % action
sys.exit(1)
def main():
try:
real_main()
except UsageError, e:
print >> stderr, "\nError:\n", e
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
|
Are you looking for a place to spend your summer vacation or weekend getaway? Look no further than Cabo San Lucas, one of Mexico’s top five tourist destinations. It is known for the sea arch El Arco de Cabo San Lucas as well as for its marine life, scuba diving locations and its beaches. And, its top resorts have reopened just in time for the bikini season, which is perfect for that getaway on your must-do list.
Formerly known as the Capelle Pedregal, the Resort at Pedregal is set on 24 acres of land located at the southernmost tip of the peninsula of Baja California. It has 22 residential villas and 66 rooms, each of which has its own private plunge pool.
When it opened its doors again in January this year, new amenities were added and these include the rehabilitated Luna y Mar spa menu with its indigenous treatments and the Champagne Terrace, which can be found at the entryway to the El Faralon restaurant perched right on top of the cliffs.
The 57-room Esperanza, an Auberge Resort was one of those that was hit the hardest by Hurricane Odile in September last year. They are back on their feet though, with their soft opening just this April.
It was held to unveil the complete renovation of all its accommodations as well as the upgrades done to the Cocina del Mar restaurant. They have also added an outdoor relaxation lounge for their Spa as well as fresh retail space. The resort is now offering spa promotions and a Rediscover Esperanza travel package.
Built originally in 1956, One&Only Palmilla went through a $140 million rehabilitation and reopened just April 20 this year. Aside from having newly-remodeled suites and guest rooms, the resort also sports a new fitness center and spa.
They also added a gastronomic addition in Seared, which is a steakhouse helmed by Michelen-starred chef Jean-Georges Vongerichten. This is meant to complement the resort’s popular Agua by Larbi restaurant, which is headed by chef Larbi Dahrouch.
A Rosewood Resort situated between Cabo San Lucas and San José del Cabo, Las Ventanas al Paraiso reopened in June 4. Aside from refurbishing its 12 villas and 71 suites, the resort has also done some refinements to its world-renowned service.
They now have butlers in for their villas, who are fluent in English, Spanish, and French, available not just to act as guides for the guests, but also to keep the kids entertained and to make the meals. Dedicated butlers have also been made available for pets of guests in the resort.
Now that you have been apprised of which top resorts in Cabo San Lucas reopened just in time for your summer getaway, it is time to book that flight now and make those reservations. Relax and enjoy some time in the sun and get away from the daily grind of city life.
Enjoying reading? Check out related articles.
|
"""
Create a setup so we can easily define families. Input is a ped file to define
the pedigree and a vector indicating the genotype.
>>> fam = TestFamily(\"\"\"
... #family_id sample_id paternal_id maternal_id sex phenotype
... 1 dad 0 0 1 1
... 1 mom 0 0 2 1
... 1 kid dad mom 1 2
... 1 kid2 dad mom 1 1
... 1 grandma 0 0 2 1
... 1 grandpa 0 0 1 1\"\"\")
>>> fam.gt_types = [HET, HET, HOM_ALT, HET, HET, HET]
>>> fam.gt_depths = [9] * 6
>>> _ = fam.dot()
<BLANKLINE>
auto_rec
--------
default True
strict=False True
only_affected=False True
both False True
<BLANKLINE>
auto_dom
--------
default False
strict=False False
only_affected=False False
both False False
>>> fam.auto_rec()
True
# attach granparents to mom
>>> fam.subjects[1].mom = fam.subjects[4]
>>> fam.subjects[1].dad = fam.subjects[5]
>>> fam.auto_rec()
True
>>> _ = fam.dot(tests=[])
# if grandpa is affected it is no longer autosomal recessive
>>> fam.subjects[5].affected = True
>>> fam.auto_rec()
False
>>> _ = fam.dot(tests=[])
# reset.
>>> fam.subjects[5].affected = False
# set both kids to HOM_ALT (including the
>>> fam.gt_types[3] = HOM_ALT
>>> fam.auto_rec(only_affected=True)
False
>>> fam.auto_rec(only_affected=False)
True
>>> fam.auto_rec(min_depth=10)
False
>>> fam.auto_dom()
False
# dad:un, mom:un, kid:aff, kid2:un, gma:un, gpa:un
>>> fam.gt_types = [HOM_REF, HOM_REF, HET, HET, HET, HET]
>>> fam.de_novo()
False
>>> fam.de_novo(only_affected=False)
True
>>> fam.gt_types = [HOM_ALT, HOM_REF, HET, HET, HET, HET]
>>> fam.de_novo()
False
>>> fam.gt_types = [HOM_ALT, HOM_ALT, HET, HET, HET, HET]
>>> fam.de_novo()
False
>>> fam.mendel_plausible_denovo()
True
>>> cfam = TestFamily(\"\"\"
... #family_id sample_id paternal_id maternal_id sex phenotype
... 1 dad 0 0 1 1
... 1 mom 0 0 2 1
... 1 akid dad mom 1 2
... 1 ukid dad mom 1 1
... 1 bkid dad mom 1 2\"\"\")
>>> gt_types1 = [HOM_REF, HET, HET, HOM_REF, HET]
>>> gt_bases1 = ["A/A", "A/T", "A/T", "A/A", "A/T"]
>>> gt_types2 = [HET, HOM_REF, HET, HOM_REF, HET]
>>> gt_bases2 = ["A/C", "A/A", "A/C", "A/A", "A/C"]
>>> cfam.gt_types = gt_types1
>>> cfam.comp_het()
True
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2)
# note that stuff got phased in-place:
#>>> gt_bases1, gt_bases2
#(['A/A', 'A/T', 'T|A', 'A/A', 'T|A'], ['G/C', 'G/G', 'G|C', 'G/G', 'G|C'])
>>> result['candidate']
True
>>> result['affected_phased']
[Sample(akid;affected;male), Sample(bkid;affected;male)]
>>> sorted(result.keys())
['affected_phased', 'affected_skipped', 'affected_unphased', 'candidate', 'candidates', 'priority', 'unaffected_phased', 'unaffected_unphased']
>>> assert result['affected_skipped'] == result['affected_unphased'] == result['unaffected_unphased'] == []
# remove as a candidate if even one of the affecteds doesn't share the het
# pair:
>>> gt_bases1[-1], gt_types1[-1] = "A/A", HOM_REF
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2, allow_unaffected=True)
>>> result['candidate']
False
# restore.
>>> gt_bases1[-1], gt_types1[-1] = "A/T", HET
>>> cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2, allow_unaffected=True)['candidate']
True
# a parent (unphaseable) has the same het pair so we know they will be phased to
# the same chrom in the affected kid.
>>> gt_types1 = [HET, HOM_REF, HET, HOM_REF, HET]
>>> gt_bases1 = ["A/T", "A/A", "A/T", "A/A", "A/T"]
>>> gt_types2 = [HET, HOM_REF, HET, HOM_REF, HET]
>>> gt_bases2 = ["G/C", "G/G", "G/C", "G/G", "G/C"]
>>> cfam.gt_types = gt_types1
>>> cfam.comp_het()
True
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2, allow_unaffected=True)
>>> gt_bases1, gt_bases2
(['A/T', 'A/A', 'A|T', 'A/A', 'A|T'], ['G/C', 'G/G', 'G|C', 'G/G', 'G|C'])
# NOTE how the variants are on the same chromosome (T, then C). so it's not a candidate.
>>> result['candidate']
False
>>> result['unaffected_unphased'], result['unaffected_phased'], result['candidate']
([Sample(dad;unaffected;male)], [], False)
# phase dad so he has same het pair (won't be a candidate):
>>> gt_bases1[0], gt_bases2[0] = "A|T", "G|C"
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2)
>>> result['unaffected_unphased'], result['unaffected_phased'], result['candidate']
([], [], False)
# unaffected kid has same het pair as affected.
>>> cfam = TestFamily(\"\"\"
... #family_id sample_id paternal_id maternal_id sex phenotype
... 1 dad 0 0 1 1
... 1 mom 0 0 2 1
... 1 akid dad mom 1 2
... 1 bkid dad mom 1 1\"\"\")
>>> gt_types1 = [HOM_REF, HET, HET, HET]
>>> gt_bases1 = ["A/A", "A/T", "A/T", "A/T"]
>>> gt_types2 = [HET, HOM_REF, HET, HET]
>>> gt_bases2 = ["G/C", "G/G", "G/C", "G/C"]
>>> cfam.gt_types = gt_types1
>>> cfam.comp_het()
True
>>> result = cfam.comp_het_pair(gt_types1, gt_bases1, gt_types2, gt_bases2)
>>> result['candidate']
False
# unaffected kid is ok, parent is hom_alt (this get's filtered in the first pass
# without even considering the pair).
>>> gt_bases1[-1], gt_types1[-1] = "A/A", HOM_REF
>>> gt_bases1[0], gt_types1[0] = "T/T", HOM_ALT
>>> cfam.comp_het()
False
###################################################3
# comp_het: pattern only
###################################################3
>>> cfam = TestFamily(\"\"\"
... #family_id sample_id paternal_id maternal_id sex phenotype
... 1 dad 0 0 1 -9
... 1 mom 0 0 2 -9
... 1 akid dad mom 1 -9
... 1 bkid dad mom 1 -9\"\"\")
>>> gt_types1 = [HOM_REF, HET, HET, HOM_REF]
>>> gt_bases1 = ["A/A", "A/T", "A/T", "A/A"]
>>> gt_types2 = [HET, HOM_REF, HET, HOM_REF]
>>> gt_bases2 = ["A/C", "A/A", "A/C", "A/A"]
>>> cfam.gt_types = gt_types1
>>> cfam.comp_het_pair(gt_types1, gt_bases1,
... gt_types2, gt_bases2)['candidate']
False
>>> cfam.comp_het_pair(gt_types1, gt_bases1,
... gt_types2, gt_bases2, pattern_only=True)
{'priority': 1, 'candidates': [Sample(akid;unknown;male)], 'candidate': True}
# get a higher priority with phased parents.
>>> gt_types1 = [HOM_REF, HET, HET, HOM_REF]
>>> gt_bases1 = ["A|A", "A|T", "A|T", "A|A"]
>>> gt_types2 = [HET, HOM_REF, HET, HOM_REF]
>>> gt_bases2 = ["A|C", "A|A", "A|C", "A|A"]
>>> cfam.comp_het_pair(gt_types1, gt_bases1,
... gt_types2, gt_bases2, pattern_only=True)
{'priority': 1, 'candidates': [Sample(akid;unknown;male)], 'candidate': True}
>>> fh = open('test/from_inheritance.vcf', 'w')
>>> cfam.to_vcf(fh)
>>> cfam.gt_types = gt_types2
>>> cfam.to_vcf(fh, header=False)
>>> fh.close()
>>> cfam.family.to_ped(open("test/from_inheritance.ped", "w"))
####################################################3
# auto_dom penetrance
####################################################3
>>> dfam = TestFamily(\"\"\"
... #family_id individual_id paternal_id maternal_id sex phenotype
... 1 DS134791 DS134793 DS134792 1 2
... 1 DS134792 0 0 2 0
... 1 DS134793 0 0 1 0\"\"\")
>>> dfam.gt_types = [HET, HOM_REF, HET]
>>> dfam.auto_dom()
False
>>> dfam.auto_dom(strict=False)
True
"""
from __future__ import print_function
import os
import sys
import tempfile
import atexit
from gemini import family
import itertools as it
HOM_REF, HET, UNKNOWN, HOM_ALT = range(4)
def tmp(pedstr, suf=".ped"):
t = tempfile.mktemp(suffix=suf)
atexit.register(os.unlink, t)
with open(t, "w") as fh:
for line in pedstr.split("\n"):
if not line.strip(): continue
print(line.strip(), file=fh)
return t
class TestFamily(object):
__slots__ = ('ped', 'family', 'gt_types', '_gt_types', 'gt_depths',
'_gt_depths', 'strict', 'subjects')
def draw(self, tests=('auto_rec', 'auto_dom')):
from IPython.display import Image, display
if isinstance(tests, basestring):
tests = (tests,)
img = self.dot(tests=tests)
return display(Image(filename=img))
def __init__(self, ped, fam_id=None, gt_types=None, gt_depths=None):
# can send in a family.
if isinstance(ped, family.Family):
self.family = ped
else:
if isinstance(ped, basestring) and len(ped.split("\n")) > 1:
self.ped = tmp(ped)
else:
self.ped = ped
self.family = family.Family.from_ped(self.ped) # always want 1 family
if fam_id is None:
assert len(self.family) == 1
self.family = self.family.values()[0]
else:
self.family = self.family[fam_id]
for s in self.family.subjects:
if s.sample_id[0].isdigit(): s.sample_id = "s" + s.sample_id
self.subjects = self.family.subjects
self._gt_types = None
self.gt_types = gt_types
self._gt_depths = None
self.gt_depths = gt_depths
def dot(self, comment=None, path="test.gv", view=False, tests=('auto_rec', 'auto_dom')):
from graphviz import Digraph
viz = Digraph(comment=comment)
subjects = self.family.subjects
lookup = ["HOM_REF", "HET", "UNKOWN", "HOM_ALT"]
for i, s in enumerate(subjects):
attrs = dict(style="filled", fontcolor="white")
attrs["fillcolor"] = {True: 'black', False: 'white', None: 'gray'}[s.affected]
attrs["shape"] = {'male': 'square', 'female': 'circle', None: 'octagon'}[s.gender]
if attrs["fillcolor"] == "black":
attrs["fontcolor"] = "white"
elif attrs["fillcolor"] == "white":
attrs["fontcolor"] = "black"
gt = lookup[self.gt_types[i]]
label = s.name
viz.node(s.name, label + "\n" + gt, **attrs)
for s in subjects:
if s.dad is not None:
viz.edge(s.dad.name, s.name)
if s.mom is not None:
viz.edge(s.mom.name, s.name)
for test in tests:
res = {}
res['default'] = getattr(self, test)()
res['strict=False'] = getattr(self, test)(strict=False)
res['only_affected=False'] = getattr(self, test)(only_affected=False)
res['both False'] = getattr(self, test)(only_affected=False, strict=False)
print("\n" + test)
print("-" * len(test))
for k in ("default", "strict=False", "only_affected=False", "both False"):
print("%-20s\t%s" % (k, res[k]))
viz._format = "png"
return viz.render(path, view=view)
@property
def gt_types(self):
return self._gt_types
@gt_types.setter
def gt_types(self, gt_types):
if gt_types is not None:
assert len(gt_types) == len(self.family)
self._gt_types = gt_types
@property
def gt_depths(self):
return self._gt_depths
@gt_depths.setter
def gt_depths(self, gt_depths):
if gt_depths is not None:
assert len(gt_depths) == len(self.family)
self._gt_depths = gt_depths
def __getattr__(self, gt):
assert self._gt_types
def func(*args, **kwargs):
if 'min_depth' in kwargs:
assert self._gt_depths is not None
debug = kwargs.pop('debug', False)
flt = getattr(self.family, gt)(*args, **kwargs)
if gt == "comp_het_pair":
return flt
env = {s.sample_id: i for i, s in enumerate(self.family.subjects)}
if debug:
print(flt, file=sys.stderr)
env['gt_types'] = self.gt_types
env['gt_depths'] = self.gt_depths
return eval(flt, env)
return func
def to_vcf(self, fh, var_dict=None, header=True, _POS=[100001]):
if header:
fh.write("##fileformat=VCFv4.1\n")
fh.write("""##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n""")
fh.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t")
fh.write("\t".join(s.name for s in self.subjects) + "\n")
if var_dict is None:
var_dict = {}
for k in ("ID", "QUAL", "INFO"):
if k not in var_dict:
var_dict[k] = "."
var_dict["FILTER"] = "PASS"
var_dict["FORMAT"] = "GT"
if not "CHROM" in var_dict:
var_dict["CHROM"] = "1"
if not "POS" in var_dict:
var_dict["POS"] = _POS[0]
_POS[0] += 1
if not "REF" in var_dict:
var_dict["REF"] = "A"
if not "ALT" in var_dict:
var_dict["ALT"] = "G"
# convert from number back to repr
x = ["0/0", "0/1", "./.", "1/1"]
formats = [x[t] for t in self.gt_types]
if self.gt_depths:
var_dict["FORMAT"] += ":DP"
for i, d in enumerate(self.gt_depths):
formats[i] += (":%d" % d)
"""
if self.gt_phred_ll_homref:
var_dict["FORMAT"] += ":PL"
for i, (hom, het, alt) in enumerate(it.izip(self.gt_phred_ll_homref,
self.gt_phred_ll_het,
self.gt_phred_ll_homalt)):
formats[i] += (":%s,%s,%s" % (hom, het, alt))
"""
fh.write("{CHROM}\t{POS}\t{ID}\t{REF}\t{ALT}\t{QUAL}\t{FILTER}\t{INFO}\t{FORMAT}\t".format(**var_dict))
fh.write("\t".join(formats) + "\n")
def main():
f = TestFamily("test/test.auto_rec.ped", "1")
f.gt_types = [HET, HET, HOM_ALT]
f.family.subjects[0].gender = "male"
f.family.subjects[1].gender = "female"
f.family.subjects[2].gender = "male"
print(f.family.subjects)
print(f.auto_rec(strict=False))
print(f.auto_rec(strict=True))
gm = family.Sample("grandma", False, gender="female")
f.family.subjects[1].mom = gm
gp = family.Sample("grandpa", None, gender="male")
f.family.subjects[1].dad = gp
f.gt_types.extend([HOM_REF, HET])
f.family.subjects.extend([gm, gp])
print(f.dot("autosomal recessive"))
#f.gt_depths = [9, 9, 9]
#print f.auto_rec(strict=True, min_depth=8)
#print f.auto_rec(strict=True, min_depth=18)
#f.gt_types = [HOM_ALT, HET, HOM_ALT]
#print f.auto_rec(strict=False)
#print f.auto_rec(strict=True)
f.to_vcf(open('a.vcf', 'w'))
f.family.to_ped(open('a.ped', 'w'))
import sys
import doctest
sys.stderr.write(str(doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE, verbose=0)) + "\n")
if __name__ == "__main__":
main()
|
Air Liquide Australia provides solutions for meeting environmental pH regulations and ammonia fish toxicity regulations. You can adjust the pH of your mines final effluent by using Air Liquide's pH control technology with carbon dioxide. Air Liquide supplies a range of products, equipment and services for water and wastewater treatment. To explore all of our water treatment offers, visit our environment page.
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_NOT_IMPLEMENTED = mcl.status.framework.ERR_START + 1
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 2
ERR_REG_OPEN_FAILED = mcl.status.framework.ERR_START + 3
ERR_ALLOC_FAILED = mcl.status.framework.ERR_START + 4
ERR_QUERY_FAILED = mcl.status.framework.ERR_START + 5
ERR_INVALID_FORMAT = mcl.status.framework.ERR_START + 6
ERR_UNHANDLED_REVISION = mcl.status.framework.ERR_START + 7
ERR_INTERNAL_ERROR = mcl.status.framework.ERR_START + 8
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_NOT_IMPLEMENTED: 'Not implemented on this platform',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_REG_OPEN_FAILED: 'Open of registry failed',
ERR_ALLOC_FAILED: 'Failed to allocate memory',
ERR_QUERY_FAILED: 'Failed to query performance data',
ERR_INVALID_FORMAT: 'Performance data is not formed correctly',
ERR_UNHANDLED_REVISION: 'Unhandled performance data revision',
ERR_INTERNAL_ERROR: 'Internal error parsing performance data'
}
|
This is a Browning 1886 rifle that has been restored to its current condition. The gun has been restocked with English Walnut fore grip and stock. The stock has a black leather covered pad. This gun has been re barreled to be a .45-70 and 21 1/2 inch barrel length. The engraving and color case hardened receiver was done by Turnbull in the late 2000’s. The engraving is the Winchester #9 pattern with a gold inlay of a bear on the right side and bison on the left side. The gun comes with peep sights as well. The gun is in great condition and is being sold as is.
|
from __future__ import print_function
from __future__ import absolute_import
from time import time
import dnf
import dnf.yum
import dnf.const
import dnf.conf
import dnf.subject
class Packages:
'''
Get access to packages in the dnf (hawkey) sack in an easy way
'''
def __init__(self, base):
self._base = base
self._sack = base.sack
self._inst_na = self._sack.query().installed().na_dict()
def _filter_packages(self, pkg_list, replace=True):
'''
Filter a list of package objects and replace
the installed ones with the installed object, instead
of the available object
'''
pkgs = []
for pkg in pkg_list:
key = (pkg.name, pkg.arch)
inst_pkg = self._inst_na.get(key, [None])[0]
if inst_pkg and inst_pkg.evr == pkg.evr:
if replace:
pkgs.append(inst_pkg)
else:
pkgs.append(pkg)
return pkgs
@property
def query(self):
'''
Get the query object from the current sack
'''
return self._sack.query()
@property
def installed(self):
'''
get installed packages
'''
return self.query.installed().run()
@property
def updates(self):
'''
get available updates
'''
return self.query.upgrades().run()
@property
def all(self,showdups = False):
'''
all packages in the repositories
installed ones are replace with the install package objects
'''
if showdups:
return self._filter_packages(self.query.available().run())
else:
return self._filter_packages(self.query.latest().run())
@property
def available(self, showdups = False):
'''
available packages there is not installed yet
'''
if showdups:
return self._filter_packages(self.query.available().run(), replace=False)
else:
return self._filter_packages(self.query.latest().run(), replace=False)
@property
def extras(self):
'''
installed packages, not in current repos
'''
# anything installed but not in a repo is an extra
avail_dict = self.query.available().pkgtup_dict()
inst_dict = self.query.installed().pkgtup_dict()
pkgs = []
for pkgtup in inst_dict:
if pkgtup not in avail_dict:
pkgs.extend(inst_dict[pkgtup])
return pkgs
@property
def obsoletes(self):
'''
packages there is obsoleting some installed packages
'''
inst = self.query.installed()
return self.query.filter(obsoletes=inst)
@property
def recent(self, showdups=False):
'''
Get the recent packages
'''
recent = []
now = time()
recentlimit = now-(self._base.conf.recent*86400)
if showdups:
avail = self.query.available()
else:
avail = self.query.latest()
for po in avail:
if int(po.buildtime) > recentlimit:
recent.append(po)
return recent
class DnfBase(dnf.Base):
'''
class to encapsulate and extend the dnf.Base API
'''
def __init__(self, setup_sack=True):
dnf.Base.__init__(self)
# setup the dnf cache
RELEASEVER = dnf.rpm.detect_releasever(self.conf.installroot)
self.conf.substitutions['releasever'] = RELEASEVER
# read the repository infomation
self.read_all_repos()
if setup_sack:
# populate the dnf sack
self.fill_sack()
self._packages = Packages(self) # Define a Packages object
def setup_base(self):
self.fill_sack()
self._packages = Packages(self) # Define a Packages object
@property
def packages(self):
''' property to get easy acceess to packages'''
return self._packages
def cachedir_fit(self):
conf = self.conf
subst = conf.substitutions
# this is not public API, same procedure as dnf cli
suffix = dnf.conf.parser.substitute(dnf.const.CACHEDIR_SUFFIX, subst)
cli_cache = dnf.conf.CliCache(conf.cachedir, suffix)
return cli_cache.cachedir, cli_cache.system_cachedir
def setup_cache(self):
"""Setup the dnf cache, same as dnf cli"""
conf = self.conf
conf.substitutions['releasever'] = dnf.rpm.detect_releasever('/')
conf.cachedir, self._system_cachedir = self.cachedir_fit()
print("cachedir: %s" % conf.cachedir)
|
Requires Unity 5.1.0 or higher.
The Solution for horizontal 360° perspective billboards.
Useful for Pc solutions and mobile.
For the sake of simplicity atlasing and dynamic lighting feature are now included.
Follow quick guide in manual for fast scene setup!
Now support for dynamic batching. Reduce batches and gpu thread time dramatically! Build big scenes of many thousend objects and gain an amazing far view.
Atlasing feature now included. Lower draw calls and increase performance!
Dynamic lighting takes base and normal map of selected object and applies lighting in real-time to a Blinn-Phong shader by default. An UBER shader now also supports nature soft occlusion tree shaders. Here a parameter map is added to capture each trees ambient occlusion, directional occlusion and other Information unitys shader uses. In the future more supported shaders like standard shader are possible.
Bake billboards for your static and moving objects. Rotation to camera is taken care of in gpu to reduce cpu load. Billboards position and scaling is now calculated even after quads are dynamically batched together.
Recent manual is included and can be downloaded here.
Shaders are now written by hand and mobile optimization will taken care of in next updates.
GMT+8, 2019-4-20 02:43 , Processed in 0.048117 second(s), 38 queries , Gzip On.
|
from sqlalchemy import Column, Integer, String, DateTime, Boolean, ForeignKey
from database import Base
import datetime
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True)
password = Column(String(128), unique=False)
active_until = Column(DateTime, unique=False, default=(datetime.datetime.now))
x = Column(Integer, unique=False)
y = Column(Integer, unique=False)
direction = Column(Integer, unique=False)
health = Column(Integer, unique=False)
moving = Column(Boolean)
character_id = Column(Integer, ForeignKey('characters.id'))
character = relationship('Character', backref=backref('users', lazy='dynamic'))
def __init__(self, username, password):
self.username = username
self.password = password
def __repr__(self):
return '<User %r>' % (self.username)
class Character(Base):
__tablename__ = 'characters'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
description = Column(String(144))
max_health = Column(Integer)
speed = Column(Integer)
decode_time = Column(Integer)
regen_speed = Column(Integer)
def __init__(self, name, description, max_health, speed, decode_time, regen_speed):
self.name = name
self.description = description
self.max_health = max_health
self.speed = speed
self.decode_time = decode_time
self.regen_speed = regen_speed
def __repr__(self):
return '<Character %r>' % (self.name)
class Tile(Base):
__tablename__ = 'tiles'
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
tile_type = Column(Integer)
status = Column(Integer)
next_change = Column(DateTime)
def __init__(self, x, y, tile_type, status):
self.x = x
self.y = y
self.tile_type = tile_type
self.status = status
def __repr__(self):
return '<Tile %r>' % (self.id)
class Message(Base):
__tablename__ = 'messages'
id = Column(Integer, primary_key=True)
text = Column(String(144), unique=False)
created_at = Column(DateTime, unique=False, default=datetime.datetime.now)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship('User', backref=backref('messages', lazy='dynamic'))
x = Column(Integer)
y = Column(Integer)
def __init__(self, text, user):
self.text = text
self.user = user
def __repr__(self):
return '<Message %r>' % (self.text)
|
Red Mountain Road is a hiking, biking, and horse trail in , Colorado. It is within Gunnison National Forest. It is 8.1 miles long and begins at 8,192 feet altitude. Traveling the entire trail is 16.3 miles with a total elevation gain of 2,769 feet.
One of Colorado's best trails, Red Mountain Road is located near Gunnison National Forest, CO. Trails' printable online topo maps offer shaded and un-shaded reliefs, and aerial photos too. Use topographic maps to find elevation, print high resolution maps, save a PNG, or just learn the topography around Red Mountain Road. You can also get free latitude and longitude coordinates from the topographical map and set your GPS. Premium members can download or print any topo and cover more terrain when you map your Red Mountain Road route ahead of time.
|
import asyncio
from collections import defaultdict
from functools import partial
import json
import logging
import random
import uuid
from again.utils import unique_hex
from retrial.retrial import retry
import aiohttp
from .services import TCPServiceClient, HTTPServiceClient
from .pubsub import PubSub
from .packet import ControlPacket, MessagePacket
from .protocol_factory import get_vyked_protocol
HTTP = 'http'
TCP = 'tcp'
_logger = logging.getLogger(__name__)
def _retry_for_client_conn(result):
if isinstance(result, tuple):
return not isinstance(result[0], asyncio.transports.Transport) or not isinstance(result[1], asyncio.Protocol)
return True
def _retry_for_pub(result):
return not result
def _retry_for_exception(_):
return True
class HTTPBus:
def __init__(self, registry_client):
self._registry_client = registry_client
def send_http_request(self, app: str, service: str, version: str, method: str, entity: str, params: dict):
"""
A convenience method that allows you to send a well formatted http request to another service
"""
host, port, node_id, service_type = self._registry_client.resolve(service, version, entity, HTTP)
url = 'http://{}:{}{}'.format(host, port, params.pop('path'))
http_keys = ['data', 'headers', 'cookies', 'auth', 'allow_redirects', 'compress', 'chunked']
kwargs = {k: params[k] for k in http_keys if k in params}
query_params = params.pop('params', {})
if app is not None:
query_params['app'] = app
query_params['version'] = version
query_params['service'] = service
response = yield from aiohttp.request(method, url, params=query_params, **kwargs)
return response
class TCPBus:
def __init__(self, registry_client):
self._registry_client = registry_client
self._client_protocols = {}
self._pingers = {}
self._node_clients = {}
self._service_clients = []
self._pending_requests = []
self.tcp_host = None
self.http_host = None
self._host_id = unique_hex()
self._ronin = False
self._registered = False
def _create_service_clients(self):
futures = []
for sc in self._service_clients:
for host, port, node_id, service_type in self._registry_client.get_all_addresses(sc.properties):
self._node_clients[node_id] = sc
future = self._connect_to_client(host, node_id, port, service_type, sc)
futures.append(future)
return asyncio.gather(*futures, return_exceptions=False)
def register(self, host, port, service, version, clients, service_type):
for client in clients:
if isinstance(client, (TCPServiceClient, HTTPServiceClient)):
client.bus = self
self._service_clients = clients
self._registry_client.register(host, port, service, version, clients, service_type)
def registration_complete(self):
if not self._registered:
f = self._create_service_clients()
self._registered = True
def fun(_):
if self.tcp_host:
self._clear_request_queue()
f.add_done_callback(fun)
def send(self, packet: dict):
packet['from'] = self._host_id
func = getattr(self, '_' + packet['type'] + '_sender')
func(packet)
def _request_sender(self, packet: dict):
"""
Sends a request to a server from a ServiceClient
auto dispatch method called from self.send()
"""
self._pending_requests.append(packet)
self._clear_request_queue()
@retry(should_retry_for_result=_retry_for_client_conn, should_retry_for_exception=_retry_for_exception, timeout=10,
strategy=[0, 2, 2, 4])
def _connect_to_client(self, host, node_id, port, service_type, service_client):
_logger.info('node_id' + node_id)
future = asyncio.async(
asyncio.get_event_loop().create_connection(partial(get_vyked_protocol, service_client), host, port))
future.add_done_callback(
partial(self._service_client_connection_callback, self._node_clients[node_id], node_id, service_type))
return future
def _service_client_connection_callback(self, sc, node_id, service_type, future):
_, protocol = future.result()
# TODO : handle pinging
# if service_type == TCP:
# pinger = Pinger(self, asyncio.get_event_loop())
# self._pingers[node_id] = pinger
# pinger.register_tcp_service(protocol, node_id)
# asyncio.async(pinger.start_ping())
self._client_protocols[node_id] = protocol
@staticmethod
def _create_json_service_name(app, service, version):
return {'app': app, 'service': service, 'version': version}
@staticmethod
def _handle_ping(packet, protocol):
protocol.send(ControlPacket.pong(packet['node_id']))
def _handle_pong(self, node_id, count):
pinger = self._pingers[node_id]
asyncio.async(pinger.pong_received(count))
def _clear_request_queue(self):
self._pending_requests[:] = [each for each in self._pending_requests if not self._send_packet(each)]
def _send_packet(self, packet):
node_id = self._get_node_id_for_packet(packet)
if node_id is not None:
client_protocol = self._client_protocols[node_id]
if client_protocol.is_connected():
packet['to'] = node_id
client_protocol.send(packet)
return True
else:
return False
return False
def _get_node_id_for_packet(self, packet):
app, service, version, entity = packet['app'], packet['service'], packet['version'], packet['entity']
node = self._registry_client.resolve(service, version, entity, TCP)
return node[2] if node else None
def handle_ping_timeout(self, node_id):
_logger.info("Service client connection timed out {}".format(node_id))
self._pingers.pop(node_id, None)
service_props = self._registry_client.get_for_node(node_id)
_logger.info('service client props {}'.format(service_props))
if service_props is not None:
host, port, _node_id, _type = service_props
asyncio.async(self._connect_to_client(host, _node_id, port, _type))
def receive(self, packet: dict, protocol, transport):
if packet['type'] == 'ping':
self._handle_ping(packet, protocol)
elif packet['type'] == 'pong':
self._handle_pong(packet['node_id'], packet['count'])
elif packet['type'] == 'publish':
self._handle_publish(packet, protocol)
else:
if self.tcp_host.is_for_me(packet['service'], packet['version']):
func = getattr(self, '_' + packet['type'] + '_receiver')
func(packet, protocol)
else:
_logger.warn('wrongly routed packet: ', packet)
def _request_receiver(self, packet, protocol):
api_fn = getattr(self.tcp_host, packet['endpoint'])
if api_fn.is_api:
from_node_id = packet['from']
entity = packet['entity']
future = asyncio.async(api_fn(from_id=from_node_id, entity=entity, **packet['payload']))
def send_result(f):
result_packet = f.result()
protocol.send(result_packet)
future.add_done_callback(send_result)
else:
print('no api found for packet: ', packet)
def _handle_publish(self, packet, protocol):
service, version, endpoint, payload, publish_id = packet['service'], packet['version'], packet['endpoint'], \
packet['payload'], packet['publish_id']
for client in self._service_clients:
if client.name == service and client.version == version:
fun = getattr(client, endpoint)
asyncio.async(fun(payload))
protocol.send(MessagePacket.ack(publish_id))
class PubSubBus:
PUBSUB_DELAY = 5
def __init__(self, registry_client):
self._pubsub_handler = None
self._registry_client = registry_client
self._clients = None
self._pending_publishes = {}
def create_pubsub_handler(self, host, port):
self._pubsub_handler = PubSub(host, port)
yield from self._pubsub_handler.connect()
def register_for_subscription(self, clients):
self._clients = clients
subscription_list = []
xsubscription_list = []
for client in clients:
if isinstance(client, TCPServiceClient):
for each in dir(client):
fn = getattr(client, each)
if callable(fn) and getattr(fn, 'is_subscribe', False):
subscription_list.append(self._get_pubsub_key(client.name, client.version, fn.__name__))
elif callable(fn) and getattr(fn, 'is_xsubscribe', False):
xsubscription_list.append((client.name, client.version, fn.__name__, getattr(fn, 'strategy')))
self._registry_client.x_subscribe(xsubscription_list)
yield from self._pubsub_handler.subscribe(subscription_list, handler=self.subscription_handler)
def publish(self, service, version, endpoint, payload):
endpoint_key = self._get_pubsub_key(service, version, endpoint)
asyncio.async(self._retry_publish(endpoint_key, json.dumps(payload)))
publish_id = str(uuid.uuid4())
future = asyncio.async(self.xpublish(publish_id, service, version, endpoint, payload))
self._pending_publishes[publish_id] = future
def xpublish(self, publish_id, service, version, endpoint, payload):
subscribers = yield from self._registry_client.get_subscribers(service, version, endpoint)
strategies = defaultdict(list)
for subscriber in subscribers:
strategies[(subscriber['service'], subscriber['version'])].append(
(subscriber['host'], subscriber['port'], subscriber['node_id'], subscriber['strategy']))
if not len(subscribers):
future = self._pending_publishes[publish_id]
future.cancel()
return
yield from self._connect_and_publish(publish_id, service, version, endpoint, strategies, payload)
yield from asyncio.sleep(self.PUBSUB_DELAY)
yield from self.xpublish(publish_id, service, version, endpoint, payload)
def receive(self, packet, transport, protocol):
if packet['type'] == 'ack':
future = self._pending_publishes.pop(packet['request_id'])
future.cancel()
transport.close()
def _retry_publish(self, endpoint, payload):
return (yield from self._pubsub_handler.publish(endpoint, payload))
def subscription_handler(self, endpoint, payload):
service, version, endpoint = endpoint.split('/')
client = [sc for sc in self._clients if (sc.name == service and sc.version == int(version))][0]
func = getattr(client, endpoint)
asyncio.async(func(**json.loads(payload)))
@staticmethod
def _get_pubsub_key(service, version, endpoint):
return '/'.join((service, str(version), endpoint))
def _connect_and_publish(self, publish_id, service, version, endpoint, strategies, payload):
for key, value in strategies.items():
if value[0][3] == 'LEADER':
host, port = value[0][0], value[0][1]
else:
random_metadata = random.choice(value)
host, port = random_metadata[0], random_metadata[1]
transport, protocol = yield from asyncio.get_event_loop().create_connection(
partial(get_vyked_protocol, self), host, port)
packet = MessagePacket.publish(publish_id, service, version, endpoint, payload)
protocol.send(packet)
|
Meet The Miracles Hair and Beauty Lounge Team!
I am the founder and owner of Miracles Hair and Beauty Lounge.
I am a fully qualified beauty therapist.
After opening the salon in 2006 I quickly became established in Durham as an intimate waxing specialist.
In my spare time I love to spend time with my husband and 2 children and researching my family history (Genealogy).
I am a fully qualified beauty therapist and I am currently in my 3rd year of hairdressing.
I enjoy eye treatments which include high definition brows, semi permanaent eyelashes and lash lift.
I love to be creative with hair and especially enjoy blow drys and special occasion styling.
I am a bubbly chatty girl who likes to socialise and spend time with my family.
I am the newest and youngest member to join the team.
I am currently in my 1st year of hairdressing training and I have recently done several beauty courses.
I look forward to improving on my skills learnt in nail enhancements, blow drying and make up skills.
I love listening to music and enjoy socialising with my friends.
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019, 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from __future__ import division
__all__ = [
'sequential_substitution_2P',
'sequential_substitution_2P_functional',
'sequential_substitution_GDEM3_2P',
'dew_bubble_Michelsen_Mollerup',
'bubble_T_Michelsen_Mollerup',
'dew_T_Michelsen_Mollerup',
'bubble_P_Michelsen_Mollerup',
'dew_P_Michelsen_Mollerup',
'minimize_gibbs_2P_transformed',
'sequential_substitution_Mehra_2P',
'nonlin_2P',
'nonlin_n_2P',
'sequential_substitution_NP',
'minimize_gibbs_NP_transformed',
'TPV_HSGUA_guesses_1P_methods',
'TPV_solve_HSGUA_guesses_1P',
'sequential_substitution_2P_HSGUAbeta',
'sequential_substitution_2P_sat',
'TP_solve_VF_guesses',
'TPV_double_solve_1P',
'nonlin_2P_HSGUAbeta',
'sequential_substitution_2P_double',
'cm_flash_tol',
'nonlin_2P_newton',
'dew_bubble_newton_zs',
'existence_3P_Michelsen_Mollerup',
'SS_VF_simultaneous',
'stability_iteration_Michelsen',
'assert_stab_success_2P',
'nonlin_equilibrium_NP',
'nonlin_spec_NP',
'TPV_solve_HSGUA_guesses_VL',
'solve_P_VF_IG_K_composition_independent',
'solve_T_VF_IG_K_composition_independent'
]
from fluids.constants import R
from fluids.numerics import (UnconvergedError, trunc_exp, newton,
brenth, secant, translate_bound_f_jac,
numpy as np, assert_close, assert_close1d,
damping_maintain_sign, oscillation_checking_wrapper,
OscillationError, NotBoundedError, jacobian,
best_bounding_bounds, isclose, newton_system,
make_damp_initial, newton_minimize,
root, minimize, fsolve)
from fluids.numerics import py_solve, trunc_log
from chemicals.utils import (exp, log, copysign, normalize,
mixing_simple, property_mass_to_molar)
from chemicals.heat_capacity import (Dadgostar_Shaw_integral,
Dadgostar_Shaw_integral_over_T,
Lastovka_Shaw_integral,
Lastovka_Shaw_integral_over_T)
from chemicals.rachford_rice import (flash_inner_loop,
Rachford_Rice_solutionN,
Rachford_Rice_flash_error,
Rachford_Rice_solution_LN2)
from chemicals.phase_change import SMK
from chemicals.volume import COSTALD
from chemicals.flash_basic import flash_wilson, flash_Tb_Tc_Pc, flash_ideal
from chemicals.exceptions import TrivialSolutionError
from thermo.phases import Phase, CoolPropPhase, CEOSLiquid, CEOSGas, IAPWS95
from thermo.phases.phase_utils import lnphis_direct
from thermo.coolprop import CPiP_min
LASTOVKA_SHAW = 'Lastovka Shaw'
DADGOSTAR_SHAW_1 = 'Dadgostar Shaw 1'
STP_T_GUESS = '298.15 K'
LAST_CONVERGED = 'Last converged'
FIXED_GUESS = 'Fixed guess'
IG_ENTHALPY = 'Ideal gas'
IDEAL_LIQUID_ENTHALPY = 'Ideal liquid'
WILSON_GUESS = 'Wilson'
TB_TC_GUESS = 'Tb Tc'
IDEAL_PSAT = 'Ideal Psat'
PT_SS = 'SS'
PT_SS_MEHRA = 'SS Mehra'
PT_SS_GDEM3 = 'SS GDEM3'
PT_NEWTON_lNKVF = 'Newton lnK VF'
IDEAL_WILSON = 'Ideal Wilson'
SHAW_ELEMENTAL = 'Shaw Elemental'
PH_T_guesses_1P_methods = [LASTOVKA_SHAW, DADGOSTAR_SHAW_1, IG_ENTHALPY,
IDEAL_LIQUID_ENTHALPY, FIXED_GUESS, STP_T_GUESS,
LAST_CONVERGED]
TPV_HSGUA_guesses_1P_methods = PH_T_guesses_1P_methods
def sequential_substitution_2P(T, P, V, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
check_G=False, check_V=False, dZ_allow=0.1):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
cmps = range(len(zs))
err, err1, err2, err3 = 0.0, 0.0, 0.0, 0.0
G_old = None
V_over_F_old = V_over_F
restrained = 0
restrained_switch_count = 300
# Code for testing phis at zs
l, g = liquid_phase, gas_phase
if liquid_phase.T != T or liquid_phase.P != P:
liquid_phase = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
if gas_phase.T != T or gas_phase.P != P:
gas_phase = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
for iteration in range(maxiter):
# g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
# l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
# l = liquid_phase.to(xs, T=T, P=P, V=V)
# g = gas_phase.to(ys, T=T, P=P, V=V)
# lnphis_g = g.lnphis()
# lnphis_l = l.lnphis()
lnphis_g = gas_phase.lnphis_at_zs(ys)
lnphis_l = liquid_phase.lnphis_at_zs(xs)
limited_Z = False
try:
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g)
except OverflowError:
Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps] # K_value(phi_l=l, phi_g=g)
V_over_F_old = V_over_F
try:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
except Exception as e:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F, check=True)
# K_low, K_high = False, False
# for zi, Ki in zip(zs, Ks):
# if zi != 0.0:
# if Ki > 1.0:
# K_high = True
# else:
# K_low = True
# if K_high and K_low:
# break
# if not (K_high and K_low):
# raise TrivialSolutionError("Converged to trivial condition, all K same phase",
# comp_difference, iteration, err)
# else:
if check_G:
V_over_F_G = min(max(V_over_F_old, 0), 1)
G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G()
print('new G', G, 'old G', G_old)
if G_old is not None:
if G > G_old:
step = .5
while G > G_old and step > 1e-4:
# ys_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(xs, xs_old)])
# xs_working = normalize([step*xo + (1.0 - step)*xi for xi, xo in zip(ys, ys_old)])
# ys_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)])
# xs_working = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)])
# g = gas_phase.to(ys_working, T=T, P=P, V=V)
# l = liquid_phase.to(xs_working, T=T, P=P, V=V)
# lnphis_g = g.lnphis()
# lnphis_l = l.lnphis()
# try:
# Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
# except OverflowError:
# Ks = [trunc_exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks_old, Ks)]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F)
# V_over_F_G = min(max(V_over_F, 0), 1)
g = gas_phase.to(ys_new, T=T, P=P, V=V)
l = liquid_phase.to(xs_new, T=T, P=P, V=V)
G = g.G()*V_over_F_G + (1.0 - V_over_F_G)*l.G()
print('step', step, G, V_over_F, Ks)
step *= 0.5
# xs, ys = xs_working, ys_working
# print('Gibbs increased', G/G_old)
G_old = G
if check_V and iteration > 2:
big_Z_change = (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow)
if restrained <= restrained_switch_count and big_Z_change:
limited_Z = True
step = .5 #.5
while (abs(1.0 - l_old.Z()/l.Z()) > dZ_allow or abs(1.0 - g_old.Z()/g.Z()) > dZ_allow ) and step > 1e-8:
# Ks_working = [step*xo + (1.0 - step)*xi for xo, xi in zip(Ks, Ks_old)]
# Ks_working = [Ks[i]*(Ks_old[i]/Ks[i])**(1.0 - step) for i in cmps] # step = 0 - all new; step = 1 - all old
# Ks_working = [Ks_old[i]*(exp(lnphis_l[i])/exp(lnphis_g[i])/Ks_old[i])**(1.0 - step) for i in cmps]
ys_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(ys, ys_old)])
xs_new = normalize([step*xo + (1.0 - step)*xi for xo, xi in zip(xs, xs_old)])
# V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks_working, guess=V_over_F)
l = liquid_phase.to(xs_new, T=T, P=P, V=V)
g = gas_phase.to(ys_new, T=T, P=P, V=V)
# lnphis_g = g.lnphis()
# lnphis_l = l.lnphis()
print('step', step, V_over_F, g.Z())
step *= 0.5
xs, ys = xs_new, ys_new
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
restrained += 1
elif restrained > restrained_switch_count and big_Z_change:
restrained = 0
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum_inv = 1.0/sum(abs(i) for i in xs_new)
for i in cmps:
xs_new[i] = abs(xs_new[i])*xs_new_sum_inv
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum_inv = 1.0/sum(abs(i) for i in ys_new)
for i in cmps:
ys_new[i] = abs(ys_new[i])*ys_new_sum_inv
break
# Calculate the error using the new Ks and old compositions
# Claimed error function in CONVENTIONAL AND RAPID FLASH
# CALCULATIONS FOR THE SOAVE-REDLICH-KWONG AND PENG-ROBINSON EQUATIONS OF STATE
err = 0.0
# Suggested tolerance 1e-15
try:
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
err = 0.0
for Ki, xi, yi in zip(Ks, xs, ys):
try:
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
pass
if err > 0.0 and err in (err1, err2, err3):
raise OscillationError("Converged to cycle in errors, no progress being made")
# Accept the new compositions
xs_old, ys_old, Ks_old = xs, ys, Ks
# if not limited_Z:
# assert xs == l.zs
# assert ys == g.zs
xs, ys = xs_new, ys_new
lnphis_g_old, lnphis_l_old = lnphis_g, lnphis_l
l_old, g_old = l, g
# print(err, V_over_F, Ks) # xs, ys
# Check for
comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])
if comp_difference < trivial_solution_tol:
raise TrivialSolutionError("Converged to trivial condition, compositions of both phases equal",
comp_difference, iteration, err)
if err < tol and not limited_Z:
# Temporary!
# err_mole_balance = 0.0
# for i in cmps:
# err_mole_balance += abs(xs_old[i] * (1.0 - V_over_F_old) + ys_old[i] * V_over_F_old - zs[i])
# if err_mole_balance < mole_balance_tol:
# return V_over_F, xs, ys, l, g, iteration, err
if iteration == 0:
# We are composition independent!
g = gas_phase.to(ys_new, T=T, P=P, V=V)
l = liquid_phase.to(xs_new, T=T, P=P, V=V)
return V_over_F, xs_new, ys_new, l, g, iteration, err
else:
g = gas_phase.to(ys_old, T=T, P=P, V=V)
l = liquid_phase.to(xs_old, T=T, P=P, V=V)
return V_over_F_old, xs_old, ys_old, l, g, iteration, err
# elif err < tol and limited_Z:
# print(l.fugacities()/np.array(g.fugacities()))
err1, err2, err3 = err, err1, err2
raise UnconvergedError('End of SS without convergence')
def sequential_substitution_2P_functional(zs, xs_guess, ys_guess,
liquid_args, gas_args, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=0.5):
xs, ys = xs_guess, ys_guess
V_over_F = V_over_F_guess
N = len(zs)
err = 0.0
V_over_F_old = V_over_F
Ks = [0.0]*N
for iteration in range(maxiter):
lnphis_g = lnphis_direct(ys, *gas_args)
lnphis_l = lnphis_direct(xs, *liquid_args)
for i in range(N):
Ks[i] = exp(lnphis_l[i] - lnphis_g[i])
V_over_F_old = V_over_F
try:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
except:
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F, check=True)
for xi in xs_new:
if xi < 0.0:
# Remove negative mole fractions - may help or may still fail
xs_new_sum_inv = 0.0
for xj in xs_new:
xs_new_sum_inv += abs(xj)
xs_new_sum_inv = 1.0/xs_new_sum_inv
for i in range(N):
xs_new[i] = abs(xs_new[i])*xs_new_sum_inv
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum_inv = 0.0
for yj in ys_new:
ys_new_sum_inv += abs(yj)
ys_new_sum_inv = 1.0/ys_new_sum_inv
for i in range(N):
ys_new[i] = abs(ys_new[i])*ys_new_sum_inv
break
err = 0.0
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
xs_old, ys_old = xs, ys
xs, ys = xs_new, ys_new
comp_difference = 0.0
for xi, yi in zip(xs, ys):
comp_difference += abs(xi - yi)
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if err < tol:
return V_over_F_old, xs_old, ys_old, iteration, err
raise ValueError('End of SS without convergence')
def sequential_substitution_NP(T, P, zs, compositions_guesses, betas_guesses,
phases, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, ref_phase=2):
compositions = compositions_guesses
cmps = range(len(zs))
phase_count = len(phases)
phases_iter = range(phase_count)
phase_iter_n1 = range(phase_count - 1)
betas = betas_guesses
if len(betas) < len(phases):
betas.append(1.0 - sum(betas))
compositions_K_order = [compositions[i] for i in phases_iter if i != ref_phase]
compositions_ref = compositions_guesses[ref_phase]
for iteration in range(maxiter):
phases = [phases[i].to_TP_zs(T=T, P=P, zs=compositions[i]) for i in phases_iter]
lnphis = [phases[i].lnphis() for i in phases_iter]
Ks = []
lnphis_ref = lnphis[ref_phase]
for i in phases_iter:
if i != ref_phase:
lnphis_i = lnphis[i]
try:
Ks.append([exp(lnphis_ref[j] - lnphis_i[j]) for j in cmps])
except OverflowError:
Ks.append([trunc_exp(lnphis_ref[j] - lnphis_i[j]) for j in cmps])
beta_guesses = [betas[i] for i in phases_iter if i != ref_phase]
#if phase_count == 3:
# Rachford_Rice_solution2(zs, Ks[0], Ks[1], beta_y=beta_guesses[0], beta_z=beta_guesses[1])
betas_new, compositions_new = Rachford_Rice_solutionN(zs, Ks, beta_guesses)
# Sort the order back
beta_ref_new = betas_new[-1]
betas_new = betas_new[:-1]
betas_new.insert(ref_phase, beta_ref_new)
compositions_ref_new = compositions_new[-1]
compositions_K_order_new = compositions_new[:-1]
compositions_new = list(compositions_K_order_new)
compositions_new.insert(ref_phase, compositions_ref_new)
err = 0.0
for i in phase_iter_n1:
Ks_i = Ks[i]
ys = compositions_K_order[i]
try:
for Ki, xi, yi in zip(Ks_i, compositions_ref, ys):
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
err = 0.0
for Ki, xi, yi in zip(Ks_i, compositions_ref, ys):
try:
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
except ZeroDivisionError:
pass
# print(betas, Ks, 'calculated', err)
# print(err)
compositions = compositions_new
compositions_K_order = compositions_K_order_new
compositions_ref = compositions_ref_new
betas = betas_new
# TODO trivial solution check - how to handle - drop phase?
# Check for
# comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])
# if comp_difference < trivial_solution_tol:
# raise ValueError("Converged to trivial condition, compositions of both phases equal")
if err < tol:
return betas, compositions, phases, iteration, err
# if iteration > 100:
# return betas, compositions, phases, iteration, err
raise UnconvergedError('End of SS without convergence')
def sequential_substitution_Mehra_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5,
acc_frequency=3, acc_delay=5,
lambda_max=3, lambda_min=0.0,
V_over_F_guess=None):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
N = len(zs)
cmps = range(N)
lambdas = [1.0]*N
Ks = [ys[i]/xs[i] for i in cmps]
gs = []
import numpy as np
for iteration in range(maxiter):
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
fugacities_g = g.fugacities()
fugacities_l = l.fugacities()
# Ks = [fugacities_l[i]*ys[i]/(fugacities_g[i]*xs[i]) for i in cmps]
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
phis_g = g.phis()
phis_l = l.phis()
# Ks = [Ks[i]*exp(-lnphis_g[i]/lnphis_l[i]) for i in cmps]
# Ks = [Ks[i]*(phis_l[i]/phis_g[i]/Ks[i])**lambdas[i] for i in cmps]
# Ks = [Ks[i]*fugacities_l[i]/fugacities_g[i] for i in cmps]
# Ks = [Ks[i]*exp(-phis_g[i]/phis_l[i]) for i in cmps]
# Mehra, R. K., R. A. Heidemann, and K. Aziz. “An Accelerated Successive Substitution Algorithm.” The Canadian Journal of Chemical Engineering 61, no. 4 (August 1, 1983): 590-96. https://doi.org/10.1002/cjce.5450610414.
# Strongly believed correct
gis = np.log(fugacities_g) - np.log(fugacities_l)
if not (iteration % acc_frequency) and iteration > acc_delay:
gis_old = np.array(gs[-1])
# lambdas = np.abs(gis_old.T*gis_old/(gis_old.T*(gis_old - gis))*lambdas).tolist() # Alrotithm 3 also working
# lambdas = np.abs(gis_old.T*(gis_old-gis)/((gis_old-gis).T*(gis_old - gis))*lambdas).tolist() # WORKING
lambdas = np.abs(gis.T*gis/(gis_old.T*(gis - gis_old))).tolist() # 34, working
lambdas = [min(max(li, lambda_min), lambda_max) for li in lambdas]
# print(lambdas[0:5])
print(lambdas)
# print('Ks', Ks, )
# print(Ks[-1], phis_l[-1], phis_g[-1], lambdas[-1], gis[-1], gis_old[-1])
Ks = [Ks[i]*(phis_l[i]/phis_g[i]/Ks[i])**lambdas[i] for i in cmps]
# print(Ks)
else:
Ks = [Ks[i]*fugacities_l[i]/fugacities_g[i] for i in cmps]
# print(Ks[0:5])
gs.append(gis)
# lnKs = [lnKs[i]*1.5 for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum = sum(abs(i) for i in xs_new)
xs_new = [abs(i)/xs_new_sum for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum = sum(abs(i) for i in ys_new)
ys_new = [abs(i)/ys_new_sum for i in ys_new]
break
err = 0.0
# Suggested tolerance 1e-15
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
print(err)
# Accept the new compositions
xs, ys = xs_new, ys_new
# Check for
comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])
if comp_difference < trivial_solution_tol:
raise TrivialSolutionError("Converged to trivial condition, compositions of both phases equal",
comp_difference, iteration, err)
if err < tol:
return V_over_F, xs, ys, l, g, iteration, err
raise UnconvergedError('End of SS without convergence')
def sequential_substitution_GDEM3_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
acc_frequency=3, acc_delay=3,
):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
cmps = range(len(zs))
all_Ks = []
all_lnKs = []
for iteration in range(maxiter):
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
# Mehra et al. (1983) is another option
# Ks = [exp(l - g) for l, g in zip(lnphis_l, lnphis_g)]
# if not (iteration %3) and iteration > 3:
# dKs = gdem(Ks, all_Ks[-1], all_Ks[-2], all_Ks[-3])
# print(iteration, dKs)
# Ks = [Ks[i] + dKs[i] for i in cmps]
# all_Ks.append(Ks)
# lnKs = [(l - g) for l, g in zip(lnphis_l, lnphis_g)]
# if not (iteration %3) and iteration > 3:
## dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3])
#
# dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3])
# lnKs = [lnKs[i] + dlnKs[i] for i in cmps]
# Mehra, R. K., R. A. Heidemann, and K. Aziz. “An Accelerated Successive Substitution Algorithm.” The Canadian Journal of Chemical Engineering 61, no. 4 (August 1, 1983): 590-96. https://doi.org/10.1002/cjce.5450610414.
lnKs = [(l - g) for l, g in zip(lnphis_l, lnphis_g)]
if not (iteration %acc_frequency) and iteration > acc_delay:
dlnKs = gdem(lnKs, all_lnKs[-1], all_lnKs[-2], all_lnKs[-3])
print(dlnKs)
lnKs = [lnKs[i] + dlnKs[i] for i in cmps]
# Try to testaccelerated
all_lnKs.append(lnKs)
Ks = [exp(lnKi) for lnKi in lnKs]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum = sum(abs(i) for i in xs_new)
xs_new = [abs(i)/xs_new_sum for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum = sum(abs(i) for i in ys_new)
ys_new = [abs(i)/ys_new_sum for i in ys_new]
break
err = 0.0
# Suggested tolerance 1e-15
for Ki, xi, yi in zip(Ks, xs, ys):
# equivalent of fugacity ratio
# Could divide by the old Ks as well.
err_i = Ki*xi/yi - 1.0
err += err_i*err_i
# Accept the new compositions
xs, ys = xs_new, ys_new
# Check for
comp_difference = sum([abs(xi - yi) for xi, yi in zip(xs, ys)])
if comp_difference < trivial_solution_tol:
raise TrivialSolutionError("Converged to trivial condition, compositions of both phases equal",
comp_difference, iteration, err)
if err < tol:
return V_over_F, xs, ys, l, g, iteration, err
raise UnconvergedError('End of SS without convergence')
def nonlin_equilibrium_NP(T, P, zs, compositions_guesses, betas_guesses,
phases, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, ref_phase=-1,
method='hybr', solve_kwargs=None, debug=False):
if solve_kwargs is None:
solve_kwargs = {}
compositions = compositions_guesses
N = len(zs)
Nm1 = N - 1
cmps = range(N)
phase_count = len(phases)
phase_iter = range(phase_count)
if ref_phase < 0:
ref_phase = phase_count + ref_phase
phase_iter_n1 = [i for i in phase_iter if i != ref_phase]
phase_iter_n1_0 = range(phase_count-1)
betas = betas_guesses
if len(betas) < len(phases):
betas.append(1.0 - sum(betas))
flows_guess = [compositions_guesses[j][i]*betas[j] for j in phase_iter_n1 for i in cmps]
jac = True
if method in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
jac = False
global iterations, info
iterations = 0
info = []
def to_solve(flows, jac=jac):
global iterations, info
try:
flows = flows.tolist()
except:
flows = list(flows)
iterations += 1
iter_flows = []
iter_comps = []
iter_betas = []
iter_phases = []
jac_arr = None
remaining = zs
for i in range(len(flows)):
if flows[i] < 0.0:
flows[i] = 1e-100
for j, k in zip(phase_iter_n1, phase_iter_n1_0):
v = flows[k*N:k*N+N]
vs = v
vs_sum = sum(abs(i) for i in vs)
if vs_sum == 0.0:
# Handle the case an optimizer takes all of all compounds already
ys = zs
else:
vs_sum_inv = 1.0/vs_sum
ys = [abs(vs[i]*vs_sum_inv) for i in cmps]
ys = normalize(ys)
iter_flows.append(vs)
iter_comps.append(ys)
iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1
iter_phases.append(phases[j].to_TP_zs(T=T, P=P, zs=ys))
remaining = [remaining[i] - vs[i] for i in cmps]
flows_ref = remaining
iter_flows.insert(ref_phase, remaining)
beta_ref = sum(remaining)
iter_betas.insert(ref_phase, beta_ref)
xs_ref = normalize([abs(i) for i in remaining])
iter_comps.insert(ref_phase, xs_ref)
phase_ref = phases[ref_phase].to_TP_zs(T=T, P=P, zs=xs_ref)
iter_phases.insert(ref_phase, phase_ref)
lnphis_ref = phase_ref.lnphis()
dlnfugacities_ref = phase_ref.dlnfugacities_dns()
errs = []
for k in phase_iter_n1:
phase = iter_phases[k]
lnphis = phase.lnphis()
xs = iter_comps[k]
for i in cmps:
# This is identical to lnfugacity(i)^j - lnfugacity(i)^ref
gi = trunc_log(xs[i]/xs_ref[i]) + lnphis[i] - lnphis_ref[i]
errs.append(gi)
if jac:
jac_arr = [[0.0]*N*(phase_count-1) for i in range(N*(phase_count-1))]
for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):
p = iter_phases[ni]
dlnfugacities = p.dlnfugacities_dns()
# Begin with the first row using ni, nj;
for i in cmps:
for ki, kj in zip(phase_iter_n1, phase_iter_n1_0):
for j in cmps:
delta = 1.0 if nj == kj else 0.0
v_ref = dlnfugacities_ref[i][j]/beta_ref
jac_arr[nj*N + i][kj*N + j] = dlnfugacities[i][j]*delta/iter_betas[ni] + v_ref
info[:] = iter_betas, iter_comps, iter_phases, errs, jac_arr, flows
if jac:
return errs, jac_arr
return errs
if method == 'newton_system':
comp_val, iterations = newton_system(to_solve, flows_guess, jac=True,
xtol=tol, damping=1,
damping_func=damping_maintain_sign)
else:
def f_jac_numpy(flows_guess):
# needed
ans = to_solve(flows_guess)
if jac:
return np.array(ans[0]), np.array(ans[1])
return np.array(ans)
sln = root(f_jac_numpy, flows_guess, tol=tol, jac=(True if jac else None), method=method, **solve_kwargs)
iterations = sln['nfev']
betas, compositions, phases, errs, jac, flows = info
sln = (betas, compositions, phases, errs, jac, iterations)
if debug:
return sln, flows, to_solve
return sln
def nonlin_spec_NP(guess, fixed_val, spec_val, zs, compositions_guesses, betas_guesses,
phases, iter_var='T', fixed_var='P', spec='H',
maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, ref_phase=-1,
# method='hybr',
method='fsolve',
solve_kwargs=None, debug=False,
analytical_jac=True):
if solve_kwargs is None:
solve_kwargs = {}
phase_kwargs = {fixed_var: fixed_val, iter_var: guess}
compositions = compositions_guesses
N = len(zs)
Nm1 = N - 1
cmps = range(N)
phase_count = len(phases)
phase_iter = range(phase_count)
if ref_phase < 0:
ref_phase = phase_count + ref_phase
phase_iter_n1 = [i for i in phase_iter if i != ref_phase]
phase_iter_n1_0 = range(phase_count-1)
betas = betas_guesses
if len(betas) < len(phases):
betas.append(1.0 - sum(betas))
guesses = [compositions_guesses[j][i]*betas[j] for j in phase_iter_n1 for i in cmps]
guesses.append(guess)
spec_callables = [getattr(phase.__class__, spec) for phase in phases]
dlnphis_diter_s = 'dlnphis_d' + iter_var
dlnphis_diter_callables = [getattr(phase.__class__, dlnphis_diter_s) for phase in phases]
dspec_diter_s = 'd%s_d%s' %(spec, iter_var)
dspec_diter_callables = [getattr(phase.__class__, dspec_diter_s) for phase in phases]
dspec_dn_s = 'd%s_dns' %(spec)
dspec_dn_callables = [getattr(phase.__class__, dspec_dn_s) for phase in phases]
jac = True
if method in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov', 'fsolve'):
jac = False
global iterations, info
iterations = 0
info = []
def to_solve(flows, jac=jac, skip_err=False):
global iterations, info
try:
flows = flows.tolist()
except:
flows = list(flows)
iter_val = flows[-1]
phase_kwargs[iter_var] = iter_val
flows = flows[:-1]
iter_flows = []
iter_comps = []
iter_betas = []
iter_phases = []
jac_arr = None
remaining = zs
if not skip_err:
# print(flows, iter_val)
iterations += 1
for i in range(len(flows)):
if flows[i] < 0.0:
flows[i] = 1e-100
for j, k in zip(phase_iter_n1, phase_iter_n1_0):
v = flows[k*N:k*N+N]
vs = v
vs_sum = sum(abs(i) for i in vs)
if vs_sum == 0.0:
# Handle the case an optimizer takes all of all compounds already
ys = zs
else:
vs_sum_inv = 1.0/vs_sum
ys = [abs(vs[i]*vs_sum_inv) for i in cmps]
ys = normalize(ys)
iter_flows.append(vs)
iter_comps.append(ys)
iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1
iter_phases.append(phases[j].to_TP_zs(zs=ys, **phase_kwargs))
remaining = [remaining[i] - vs[i] for i in cmps]
flows_ref = remaining
iter_flows.insert(ref_phase, remaining)
beta_ref = sum(remaining)
iter_betas.insert(ref_phase, beta_ref)
xs_ref = normalize([abs(i) for i in remaining])
iter_comps.insert(ref_phase, xs_ref)
phase_ref = phases[ref_phase].to_TP_zs(zs=xs_ref, **phase_kwargs)
iter_phases.insert(ref_phase, phase_ref)
lnphis_ref = phase_ref.lnphis()
errs = []
for k in phase_iter_n1:
phase = iter_phases[k]
lnphis = phase.lnphis()
xs = iter_comps[k]
for i in cmps:
# This is identical to lnfugacity(i)^j - lnfugacity(i)^ref
gi = trunc_log(xs[i]/xs_ref[i]) + lnphis[i] - lnphis_ref[i]
errs.append(gi)
spec_phases = []
spec_calc = 0.0
for k in phase_iter:
spec_phase = spec_callables[k](iter_phases[k])
spec_phases.append(spec_phase)
spec_calc += spec_phase*iter_betas[k]
errs.append(spec_calc - spec_val)
else:
iter_betas, iter_comps, iter_phases, errs, jac_arr, flows, iter_val_check, spec_phases = info
beta_ref = iter_betas[ref_phase]
xs_ref = iter_comps[ref_phase]
phase_ref = iter_phases[ref_phase]
lnphis_ref = phase_ref.lnphis()
# print(errs[-1], 'err', iter_val, 'T')
if jac:
dlnfugacities_ref = phase_ref.dlnfugacities_dns()
jac_arr = [[0.0]*(N*(phase_count-1) + 1) for i in range(N*(phase_count-1)+1)]
for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):
p = iter_phases[ni]
dlnfugacities = p.dlnfugacities_dns()
# Begin with the first row using ni, nj;
for i in cmps:
for ki, kj in zip(phase_iter_n1, phase_iter_n1_0):
for j in cmps:
delta = 1.0 if nj == kj else 0.0
v_ref = dlnfugacities_ref[i][j]/beta_ref
jac_arr[nj*N + i][kj*N + j] = dlnfugacities[i][j]*delta/iter_betas[ni] + v_ref
dlnphis_dspec = [dlnphis_diter_callables[i](phases[i]) for i in phase_iter]
dlnphis_dspec_ref = dlnphis_dspec[ref_phase]
for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):
p = iter_phases[ni]
for i in cmps:
jac_arr[nj*N + i][-1] = dlnphis_dspec[ni][i] - dlnphis_dspec_ref[i]
# last =
dspec_calc = 0.0
for k in phase_iter:
dspec_calc += dspec_diter_callables[k](iter_phases[k])*iter_betas[k]
jac_arr[-1][-1] = dspec_calc
dspec_dns = [dspec_dn_callables[i](phases[i]) for i in phase_iter]
dspec_dns_ref = dspec_dns[ref_phase]
last_jac_row = jac_arr[-1]
for ni, nj in zip(phase_iter_n1, phase_iter_n1_0):
for i in cmps:
# What is wrong?
# H is multiplied by the phase fraction, of which this n is a part of
# So there must be two parts here
last_jac_row[nj*N + i] = ((iter_betas[ni]*dspec_dns[ni][i]/iter_betas[ni] - beta_ref*dspec_dns_ref[i]/beta_ref)
+ (spec_phases[ni] - spec_phases[ref_phase]))
if skip_err:
return jac_arr
info[:] = iter_betas, iter_comps, iter_phases, errs, jac_arr, flows, iter_val, spec_phases
if jac:
return errs, jac_arr
return errs
if method == 'newton_system':
comp_val, iterations = newton_system(to_solve, guesses, jac=True,
xtol=tol, damping=1,
damping_func=damping_maintain_sign)
else:
def f_jac_numpy(flows_guess):
# needed
ans = to_solve(flows_guess)
if jac:
return np.array(ans[0]), np.array(ans[1])
return np.array(ans)
def jac_numpy(flows_guess):
if flows_guess.tolist() == info[5] + [info[6]]:
a = np.array(to_solve(flows_guess, jac=True, skip_err=True))
# b = np.array(to_solve(flows_guess, jac=True)[1])
# from numpy.testing import assert_allclose
# assert_allclose(a, b, rtol=1e-10)
return a
# print('fail jac', tuple(flows_guess.tolist()), tuple(info[5]))
# print('new jac')
return np.array(to_solve(flows_guess, jac=True)[1])
if method == 'fsolve':
# Need a function cache! 2 wasted fevals, 1 wasted jaceval
if analytical_jac:
jac = False
sln, infodict, _, _ = fsolve(f_jac_numpy, guesses, fprime=jac_numpy, xtol=tol, full_output=1, **solve_kwargs)
else:
sln, infodict, _, _ = fsolve(f_jac_numpy, guesses, xtol=tol, full_output=1, **solve_kwargs)
iterations = infodict['nfev']
else:
sln = root(f_jac_numpy, guesses, tol=tol, jac=(True if jac else None), method=method, **solve_kwargs)
iterations = sln['nfev']
betas, compositions, phases, errs, jac, flows, iter_val, spec_phases = info
sln = (iter_val, betas, compositions, phases, errs, jac, iterations)
if debug:
return sln, flows, to_solve
return sln
def nonlin_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
method='hybr'):
# Do with just n?
cmps = range(len(zs))
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
Ks_guess = [ys[i]/xs[i] for i in cmps]
info = [0, None, None, None]
def to_solve(lnKsVFTrans):
Ks = [trunc_exp(i) for i in lnKsVFTrans[:-1]]
V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + trunc_exp(-lnKsVFTrans[-1]))) # Translation function - keep it zero to 1
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
# print(g.fugacities(), l.fugacities())
new_Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
VF_err = Rachford_Rice_flash_error(V_over_F, zs, new_Ks)
err = [new_Ks[i] - Ks[i] for i in cmps] + [VF_err]
info[1:] = l, g, err
info[0] += 1
return err
VF_guess_in_basis = -log((1.0-V_over_F)/(V_over_F-0.0))
guesses = [log(i) for i in Ks_guess]
guesses.append(VF_guess_in_basis)
# try:
sol = root(to_solve, guesses, tol=tol, method=method)
# No reliable way to get number of iterations from OptimizeResult
# solution, infodict, ier, mesg = fsolve(to_solve, guesses, full_output=True)
solution = sol.x.tolist()
V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + exp(-solution[-1])))
Ks = [exp(solution[i]) for i in cmps]
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
# except Exception as e:
# raise UnconvergedError(e)
tot_err = 0.0
for i in info[3]:
tot_err += abs(i)
return V_over_F, xs, ys, info[1], info[2], info[0], tot_err
def nonlin_2P_HSGUAbeta(spec, spec_var, iter_val, iter_var, fixed_val,
fixed_var, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
method='hybr'
):
cmps = range(len(zs))
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
Ks_guess = [ys[i]/xs[i] for i in cmps]
kwargs_l = {'zs': xs_guess, fixed_var: fixed_val}
kwargs_g = {'zs': ys_guess, fixed_var: fixed_val}
info = [0, None, None, None, None]
def to_solve(lnKsVFTransHSGUABeta):
Ks = [trunc_exp(i) for i in lnKsVFTransHSGUABeta[:-2]]
V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + trunc_exp(-lnKsVFTransHSGUABeta[-2]))) # Translation function - keep it zero to 1
iter_val = lnKsVFTransHSGUABeta[-1]
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
kwargs_l[iter_var] = iter_val
kwargs_l['zs'] = xs
kwargs_g[iter_var] = iter_val
kwargs_g['zs'] = ys
g = gas_phase.to(**kwargs_g)
l = liquid_phase.to(**kwargs_l)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
new_Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
VF_err = Rachford_Rice_flash_error(V_over_F, zs, new_Ks)
val_l = getattr(l, spec_var)()
val_g = getattr(g, spec_var)()
val = V_over_F*val_g + (1.0 - V_over_F)*val_l
other_err = val - spec
err = [new_Ks[i] - Ks[i] for i in cmps] + [VF_err, other_err]
info[1:] = l, g, err, other_err
info[0] += 1
# print(lnKsVFTransHSGUABeta, err)
return err
VF_guess_in_basis = -log((1.0-V_over_F)/(V_over_F-0.0))
guesses = [log(i) for i in Ks_guess]
guesses.append(VF_guess_in_basis)
guesses.append(iter_val)
# solution, iterations = broyden2(guesses, fun=to_solve, jac=False, xtol=1e-7,
# maxiter=maxiter, jac_has_fun=False, skip_J=True)
sol = root(to_solve, guesses, tol=tol, method=method)
solution = sol.x.tolist()
V_over_F = (0.0 + (1.0 - 0.0)/(1.0 + exp(-solution[-2])))
iter_val = solution[-1]
Ks = [exp(solution[i]) for i in cmps]
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
tot_err = 0.0
for v in info[3]:
tot_err += abs(v)
return V_over_F, solution[-1], xs, ys, info[1], info[2], info[0], tot_err
#def broyden2(xs, fun, jac, xtol=1e-7, maxiter=100, jac_has_fun=False,
# skip_J=False):
def nonlin_n_2P(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None,
method='hybr'):
cmps = range(len(zs))
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.45
else:
V_over_F = V_over_F_guess
ns = [ys[i]*V_over_F for i in cmps]
info = [0, None, None, None]
def to_solve(ns):
ys = normalize(ns)
ns_l = [zs[i] - ns[i] for i in cmps]
# print(sum(ns)+sum(ns_l))
xs = normalize(ns_l)
# print(ys, xs)
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
# print(np.array(g.dfugacities_dns()) - np.array(l.dfugacities_dns()) )
fugacities_g = g.fugacities()
fugacities_l = l.fugacities()
err = [fugacities_g[i] - fugacities_l[i] for i in cmps]
info[1:] = l, g, err
info[0] += 1
# print(err)
return err
# print(np.array(jacobian(to_solve, ns, scalar=False)))
# print('ignore')
sol = root(to_solve, ns, tol=tol, method=method)
ns_sln = sol.x.tolist()
ys = normalize(ns_sln)
xs_sln = [zs[i] - ns_sln[i] for i in cmps]
xs = normalize(xs_sln)
return xs, ys
def nonlin_2P_newton(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, xtol=1E-10,
trivial_solution_tol=1e-5, V_over_F_guess=None):
N = len(zs)
cmps = range(N)
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
Ks_guess = [ys[i]/xs[i] for i in cmps]
info = []
def to_solve(lnKsVF):
# Jacobian verified. However, very sketchy - mole fractions may want
# to go negative.
lnKs = lnKsVF[:-1]
Ks = [exp(lnKi) for lnKi in lnKs]
VF = float(lnKsVF[-1])
# if VF > 1:
# VF = 1-1e-15
# if VF < 0:
# VF = 1e-15
xs = [zi/(1.0 + VF*(Ki - 1.0)) for zi, Ki in zip(zs, Ks)]
ys = [Ki*xi for Ki, xi in zip(Ks, xs)]
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
size = N + 1
J = [[None]*size for i in range(size)]
d_lnphi_dxs = l.dlnphis_dzs()
d_lnphi_dys = g.dlnphis_dzs()
J[N][N] = 1.0
# Last column except last value; believed correct
# Was not correct when compared to numerical solution
Ksm1 = [Ki - 1.0 for Ki in Ks]
RR_denoms_inv2 = []
for i in cmps:
t = 1.0 + VF*Ksm1[i]
RR_denoms_inv2.append(1.0/(t*t))
RR_terms = [zs[k]*Ksm1[k]*RR_denoms_inv2[k] for k in cmps]
for i in cmps:
value = 0.0
d_lnphi_dxs_i, d_lnphi_dys_i = d_lnphi_dxs[i], d_lnphi_dys[i]
for k in cmps:
value += RR_terms[k]*(d_lnphi_dxs_i[k] - Ks[k]*d_lnphi_dys_i[k])
J[i][-1] = value
# Main body - expensive to compute! Lots of elements
zsKsRRinvs2 = [zs[j]*Ks[j]*RR_denoms_inv2[j] for j in cmps]
one_m_VF = 1.0 - VF
for i in cmps:
Ji = J[i]
d_lnphi_dxs_is, d_lnphi_dys_is = d_lnphi_dxs[i], d_lnphi_dys[i]
for j in cmps:
value = 1.0 if i == j else 0.0
value += zsKsRRinvs2[j]*(VF*d_lnphi_dxs_is[j] + one_m_VF*d_lnphi_dys_is[j])
Ji[j] = value
# Last row except last value - good, working
# Diff of RR w.r.t each log K
bottom_row = J[-1]
for j in cmps:
bottom_row[j] = zsKsRRinvs2[j]*(one_m_VF) + VF*zsKsRRinvs2[j]
# Last value - good, working, being overwritten
dF_ncp1_dB = 0.0
for i in cmps:
dF_ncp1_dB -= RR_terms[i]*Ksm1[i]
J[-1][-1] = dF_ncp1_dB
err_RR = Rachford_Rice_flash_error(VF, zs, Ks)
Fs = [lnKi - lnphi_l + lnphi_g for lnphi_l, lnphi_g, lnKi in zip(lnphis_l, lnphis_g, lnKs)]
Fs.append(err_RR)
info[:] = VF, xs, ys, l, g, Fs, J
return Fs, J
guesses = [log(i) for i in Ks_guess]
guesses.append(V_over_F)
# TODO trust-region
sln, iterations = newton_system(to_solve, guesses, jac=True, xtol=xtol,
maxiter=maxiter,
damping_func=make_damp_initial(steps=3),
damping=.5)
VF, xs, ys, l, g, Fs, J = info
tot_err = 0.0
for Fi in Fs:
tot_err += abs(Fi)
return VF, xs, ys, l, g, tot_err, J, iterations
def gdem(x, x1, x2, x3):
cmps = range(len(x))
dx2 = [x[i] - x3[i] for i in cmps]
dx1 = [x[i] - x2[i] for i in cmps]
dx = [x[i] - x1[i] for i in cmps]
b01, b02, b12, b11, b22 = 0.0, 0.0, 0.0, 0.0, 0.0
for i in cmps:
b01 += dx[i]*dx1[i]
b02 += dx[i]*dx2[i]
b12 += dx1[i]*dx2[i]
b11 += dx1[i]*dx1[i]
b22 += dx2[i]*dx2[i]
den_inv = 1.0/(b11*b22 - b12*b12)
mu1 = den_inv*(b02*b12 - b01*b22)
mu2 = den_inv*(b01*b12 - b02*b11)
factor = 1.0/(1.0 + mu1 + mu2)
return [factor*(dx[i] - mu2*dx1[i]) for i in cmps]
def minimize_gibbs_2P_transformed(T, P, zs, xs_guess, ys_guess, liquid_phase,
gas_phase, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, V_over_F_guess=None):
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
flows_v = [yi*V_over_F for yi in ys_guess]
cmps = range(len(zs))
calc_phases = []
def G(flows_v):
vs = [(0.0 + (zs[i] - 0.0)/(1.0 - flows_v[i])) for i in cmps]
ls = [zs[i] - vs[i] for i in cmps]
xs = normalize(ls)
ys = normalize(vs)
VF = flows_v[0]/ys[0]
g = gas_phase.to_TP_zs(T=T, P=P, zs=ys)
l = liquid_phase.to_TP_zs(T=T, P=P, zs=xs)
G_l = l.G()
G_g = g.G()
calc_phases[:] = G_l, G_g
GE_calc = (G_g*VF + (1.0 - VF)*G_l)/(R*T)
return GE_calc
ans = minimize(G, flows_v)
flows_v = ans['x']
vs = [(0.0 + (zs[i] - 0.0) / (1.0 - flows_v[i])) for i in cmps]
ls = [zs[i] - vs[i] for i in cmps]
xs = normalize(ls)
ys = normalize(vs)
V_over_F = flows_v[0] / ys[0]
return V_over_F, xs, ys, calc_phases[0], calc_phases[1], ans['nfev'], ans['fun']
def minimize_gibbs_NP_transformed(T, P, zs, compositions_guesses, phases,
betas, tol=1E-13,
method='L-BFGS-B', opt_kwargs=None, translate=False):
if opt_kwargs is None:
opt_kwargs = {}
N = len(zs)
cmps = range(N)
phase_count = len(phases)
phase_iter = range(phase_count)
phase_iter_n1 = range(phase_count-1)
if method == 'differential_evolution':
translate = True
# RT_inv = 1.0/(R*T)
# Only exist for the first n phases
# Do not multiply by zs - we are already multiplying by a composition
flows_guess = [compositions_guesses[j][i]*betas[j] for j in range(phase_count - 1) for i in cmps]
# Convert the flow guesses to the basis used
remaining = zs
if translate:
flows_guess_basis = []
for j in range(phase_count-1):
phase_guess = flows_guess[j*N:j*N+N]
flows_guess_basis.extend([-trunc_log((remaining[i]-phase_guess[i])/(phase_guess[i]-0.0)) for i in cmps])
remaining = [remaining[i] - phase_guess[i] for i in cmps]
else:
flows_guess_basis = flows_guess
global min_G, iterations
jac, hess = False, False
real_min = False
min_G = 1e100
iterations = 0
info = []
last = []
def G(flows):
global min_G, iterations
try:
flows = flows.tolist()
except:
flows = list(flows)
iterations += 1
iter_flows = []
iter_comps = []
iter_betas = []
iter_phases = []
remaining = zs
if not translate:
for i in range(len(flows)):
if flows[i] < 1e-10:
flows[i] = 1e-10
for j in phase_iter:
v = flows[j*N:j*N+N]
# Mole flows of phase0/vapor
if j == phase_count - 1:
vs = remaining
else:
if translate:
vs = [(0.0 + (remaining[i] - 0.0)/(1.0 + trunc_exp(-v[i]))) for i in cmps]
else:
vs = v
vs_sum = sum(abs(i) for i in vs)
if vs_sum == 0.0:
# Handle the case an optimizer takes all of all compounds already
ys = zs
else:
vs_sum_inv = 1.0/vs_sum
ys = [abs(vs[i]*vs_sum_inv) for i in cmps]
ys = normalize(ys)
iter_flows.append(vs)
iter_comps.append(ys)
iter_betas.append(vs_sum) # Would be divided by feed but feed is zs = 1
remaining = [remaining[i] - vs[i] for i in cmps]
G = 0.0
jac_array = []
for j in phase_iter:
comp = iter_comps[j]
phase = phases[j].to_TP_zs(T=T, P=P, zs=comp)
lnphis = phase.lnphis()
if real_min:
# fugacities = phase.fugacities()
# fugacities = phase.phis()
#G += sum([iter_flows[j][i]*trunc_log(fugacities[i]) for i in cmps])
G += phase.G()*iter_betas[j]
else:
for i in cmps:
G += iter_flows[j][i]*(trunc_log(comp[i]) + lnphis[i])
iter_phases.append(phase)
if 0:
fugacities_last = iter_phases[-1].fugacities()
# G = 0.0
for j in phase_iter_n1:
fugacities = iter_phases[j].fugacities()
G += sum([abs(fugacities_last[i] - fugacities[i]) for i in cmps])
# lnphis = phase.lnphis()
# if real_min:
# G += G_base
# # if not jac:
# for j in phase_iter:
# comp = iter_comps[j]
# G += phase.G()*iter_betas[j]
# if jac:
# r = []
# for i in cmps:
# v = (log())
# jac_array.append([log()])
jac_arr = []
comp = iter_comps[0]
phase = iter_phases[0]
lnphis = phase.lnphis()
base = [log(xi) + lnphii for xi, lnphii in zip(comp, lnphis)]
if jac:
for j in range(1, phase_count):
comp = iter_comps[j]
phase = iter_phases[j]
lnphis = phase.lnphis()
jac_arr.extend([ref - (log(xi) + lnphii) for ref, xi, lnphii in zip(base, comp, lnphis)])
jac_arr = []
comp_last = iter_comps[-1]
phase_last = iter_phases[-1]
flows_last = iter_flows[-1]
lnphis_last = phase_last.lnphis()
dlnphis_dns_last = phase_last.dlnphis_dns()
for j in phase_iter_n1:
comp = iter_comps[j]
phase = iter_phases[j]
flows = iter_flows[j]
lnphis = phase.lnphis()
dlnphis_dns = phase.dlnphis_dns()
for i in cmps:
v = 0
for k in cmps:
v += flows[k][i]*lnphis[k][i]
v -= flows_last[i]*dlnphis_dns_last[k][i]
v += lnphis[i] + log(comp[i])
if G < min_G:
# 'phases', iter_phases
print('new min G', G, 'betas', iter_betas, 'comp', iter_comps)
info[:] = iter_betas, iter_comps, iter_phases, G
min_G = G
last[:] = iter_betas, iter_comps, iter_phases, G
if hess:
base = iter_phases[0].dlnfugacities_dns()
p1 = iter_phases[1].dlnfugacities_dns()
dlnphis_dns = [i.dlnphis_dns() for i in iter_phases]
dlnphis_dns0 = iter_phases[0].dlnphis_dns()
dlnphis_dns1 = iter_phases[1].dlnphis_dns()
xs, ys = iter_comps[0], iter_comps[1]
hess_arr = []
beta = iter_betas[0]
hess_arr = [[0.0]*N*(phase_count-1) for i in range(N*(phase_count-1))]
for n in range(1, phase_count):
for m in range(1, phase_count):
for i in cmps:
for j in cmps:
delta = 1.0 if i == j else 0.0
v = 1.0/iter_betas[n]*(1.0/iter_comps[n][i]*delta
- 1.0 + dlnphis_dns[n][i][j])
v += 1.0/iter_betas[0]*(1.0/iter_comps[0][i]*delta
- 1.0 + dlnphis_dns[0][i][j])
hess_arr[(n-1)*N+i][(m-1)*N+j] = v
#
# for n in range(1, phase_count):
# for i in cmps:
# r = []
# for j in cmps:
# v = 0.0
# for m in phase_iter:
# delta = 1.0 if i ==j else 0.0
# v += 1.0/iter_betas[m]*(1.0/iter_comps[m][i]*delta
# - 1.0 + dlnphis_dns[m][i][j])
#
# # How the heck to make this multidimensional?
# # v = 1.0/(beta*(1.0 - beta))*(zs[i]*delta/(xs[i]*ys[i])
# # - 1.0 + (1.0 - beta)*dlnphis_dns0[i][j]
# # + beta*dlnphis_dns1[i][j])
#
# # v = base[i][j] + p1[i][j]
# r.append(v)
# hess_arr.append(r)
# Going to be hard to figure out
# for j in range(1, phase_count):
# comp = iter_comps[j]
# phase = iter_phases[j]
# dlnfugacities_dns = phase.dlnfugacities_dns()
# row = [base[i] + dlnfugacities_dns[i] for i in cmps]
# hess_arr = row
# hess_arr.append(row)
return G, jac_arr, hess_arr
if jac:
return G, np.array(jac_arr)
return G
# ans = None
if method == 'differential_evolution':
from scipy.optimize import differential_evolution
real_min = True
translate = True
G_base = 1e100
for p in phases:
G_calc = p.to(T=T,P=P, zs=zs).G()
if G_base > G_calc:
G_base = G_calc
jac = hess = False
# print(G(list(flows_guess_basis)))
ans = differential_evolution(G, [(-30.0, 30.0) for i in cmps for j in range(phase_count-1)], **opt_kwargs)
# ans = differential_evolution(G, [(-100.0, 100.0) for i in cmps for j in range(phase_count-1)], **opt_kwargs)
objf = float(ans['fun'])
elif method == 'newton_minimize':
import numdifftools as nd
jac = True
hess = True
initial_hess = nd.Hessian(lambda x: G(x)[0], step=1e-4)(flows_guess_basis)
ans, iters = newton_minimize(G, flows_guess_basis, jac=True, hess=True, xtol=tol, ytol=None, maxiter=100, damping=1.0,
damping_func=damping_maintain_sign)
objf = None
else:
jac = True
hess = True
import numdifftools as nd
def hess_fun(flows):
return np.array(G(flows)[2])
# hess_fun = lambda flows_guess_basis: np.array(G(flows_guess_basis)[2])
# nd.Jacobian(G, step=1e-5)
# trust-constr special handling to add constraints
def fun_and_jac(x):
x, j, _ = G(x)
return x, np.array(j)
ans = minimize(fun_and_jac, flows_guess_basis, jac=True, hess=hess_fun, method=method, tol=tol, **opt_kwargs)
objf = float(ans['fun'])
# G(ans['x']) # Make sure info has right value
# ans['fun'] *= R*T
betas, compositions, phases, objf = info#info
return betas, compositions, phases, iterations, objf
def TP_solve_VF_guesses(zs, method, constants, correlations,
T=None, P=None, VF=None,
maxiter=50, xtol=1E-7, ytol=None,
bounded=False,
user_guess=None, last_conv=None):
if method == IDEAL_PSAT:
return flash_ideal(zs=zs, funcs=correlations.VaporPressures, Tcs=constants.Tcs, T=T, P=P, VF=VF)
elif method == WILSON_GUESS:
return flash_wilson(zs, Tcs=constants.Tcs, Pcs=constants.Pcs, omegas=constants.omegas, T=T, P=P, VF=VF)
elif method == TB_TC_GUESS:
return flash_Tb_Tc_Pc(zs, Tbs=constants.Tbs, Tcs=constants.Tcs, Pcs=constants.Pcs, T=T, P=P, VF=VF)
# Simple return values - not going through a model
elif method == STP_T_GUESS:
return flash_ideal(zs=zs, funcs=correlations.VaporPressures, Tcs=constants.Tcs, T=298.15, P=101325.0)
elif method == LAST_CONVERGED:
if last_conv is None:
raise ValueError("No last converged")
return last_conv
else:
raise ValueError("Could not converge")
def dew_P_newton(P_guess, T, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, xs_guess=None,
max_step_damping=1e5,
trivial_solution_tol=1e-4):
# Trial function only
V = None
N = len(zs)
cmps = range(N)
xs = zs if xs_guess is None else xs_guess
V_over_F = 1.0
def to_solve(lnKsP):
# d(fl_i - fg_i)/d(ln K,i) -
# rest is less important
# d d(fl_i - fg_i)/d(P) should be easy
Ks = [trunc_exp(i) for i in lnKsP[:-1]]
P = lnKsP[-1]
xs = [zs[i]/(1.0 + V_over_F*(Ks[i] - 1.0)) for i in cmps]
ys = [Ks[i]*xs[i] for i in cmps]
g = gas_phase.to(ys, T=T, P=P, V=V)
l = liquid_phase.to(xs, T=T, P=P, V=V)
fugacities_l = l.fugacities()
fugacities_g = g.fugacities()
VF_err = Rachford_Rice_flash_error(V_over_F, zs, Ks)
errs = [fi_l - fi_g for fi_l, fi_g in zip(fugacities_l, fugacities_g)]
errs.append(VF_err)
return errs
lnKs_guess = [log(zs[i]/xs[i]) for i in cmps]
lnKs_guess.append(P_guess)
def jac(lnKsP):
j = jacobian(to_solve, lnKsP, scalar=False)
return j
lnKsP, iterations = newton_system(to_solve, lnKs_guess, jac=jac, xtol=xtol)
xs = [zs[i]/(1.0 + V_over_F*(exp(lnKsP[i]) - 1.0)) for i in cmps]
# ys = [exp(lnKsP[i])*xs[i] for i in cmps]
return lnKsP[-1], xs, zs, iterations
def dew_bubble_newton_zs(guess, fixed_val, zs, liquid_phase, gas_phase,
iter_var='T', fixed_var='P', V_over_F=1, # 1 = dew, 0 = bubble
maxiter=200, xtol=1E-10, comp_guess=None,
max_step_damping=1e5, damping=1.0,
trivial_solution_tol=1e-4, debug=False,
method='newton', opt_kwargs=None):
V = None
N = len(zs)
cmps = range(N)
if comp_guess is None:
comp_guess = zs
if V_over_F == 1.0:
iter_phase, const_phase = liquid_phase, gas_phase
elif V_over_F == 0.0:
iter_phase, const_phase = gas_phase, liquid_phase
else:
raise ValueError("Supports only VF of 0 or 1")
lnKs = [0.0]*N
size = N + 1
errs = [0.0]*size
comp_invs = [0.0]*N
J = [[0.0]*size for i in range(size)]
#J[N][N] = 0.0 as well
JN = J[N]
for i in cmps:
JN[i] = -1.0
s = 'dlnphis_d%s' %(iter_var)
dlnphis_diter_var_iter = getattr(iter_phase.__class__, s)
dlnphis_diter_var_const = getattr(const_phase.__class__, s)
dlnphis_dzs = getattr(iter_phase.__class__, 'dlnphis_dzs')
info = []
kwargs = {}
kwargs[fixed_var] = fixed_val
kwargs['V'] = None
def to_solve_comp(iter_vals, jac=True):
comp = iter_vals[:-1]
iter_val = iter_vals[-1]
kwargs[iter_var] = iter_val
p_iter = iter_phase.to(comp, **kwargs)
p_const = const_phase.to(zs, **kwargs)
lnphis_iter = p_iter.lnphis()
lnphis_const = p_const.lnphis()
for i in cmps:
comp_invs[i] = comp_inv = 1.0/comp[i]
lnKs[i] = log(zs[i]*comp_inv)
errs[i] = lnKs[i] - lnphis_iter[i] + lnphis_const[i]
errs[-1] = 1.0 - sum(comp)
if jac:
dlnphis_dxs = dlnphis_dzs(p_iter)
dlnphis_dprop_iter = dlnphis_diter_var_iter(p_iter)
dlnphis_dprop_const = dlnphis_diter_var_const(p_const)
for i in cmps:
Ji = J[i]
Ji[-1] = dlnphis_dprop_const[i] - dlnphis_dprop_iter[i]
for j in cmps:
Ji[j] = -dlnphis_dxs[i][j]
Ji[i] -= comp_invs[i]
info[:] = [p_iter, p_const, errs, J]
return errs, J
return errs
damping = 1.0
guesses = list(comp_guess)
guesses.append(guess)
if method == 'newton':
comp_val, iterations = newton_system(to_solve_comp, guesses, jac=True,
xtol=xtol, damping=damping,
solve_func=py_solve,
# solve_func=lambda x, y:np.linalg.solve(x, y).tolist(),
damping_func=damping_maintain_sign)
elif method == 'odeint':
# Not even close to working
# equations are hard
from scipy.integrate import odeint
def fun_and_jac(x, t):
x, j = to_solve_comp(x.tolist() + [t])
return np.array(x), np.array(j)
def fun(x, t):
x, j = to_solve_comp(x.tolist() +[t])
return np.array(x)
def jac(x, t):
x, j = to_solve_comp(x.tolist() + [t])
return np.array(j)
ans = odeint(func=fun, y0=np.array(guesses), t=np.linspace(guess, guess*2, 5), Dfun=jac)
return ans
else:
if opt_kwargs is None:
opt_kwargs = {}
# def fun_and_jac(x):
# x, j = to_solve_comp(x.tolist())
# return np.array(x), np.array(j)
low = [.0]*N
low.append(1.0) # guess at minimum pressure
high = [1.0]*N
high.append(1e10) # guess at maximum pressure
f_j, into, outof = translate_bound_f_jac(to_solve_comp, jac=True, low=low, high=high, as_np=True)
ans = root(f_j, np.array(into(guesses)), jac=True, method=method, tol=xtol, **opt_kwargs)
comp_val = outof(ans['x']).tolist()
iterations = ans['nfev']
iter_val = comp_val[-1]
comp = comp_val[:-1]
comp_difference = 0.0
for i in cmps: comp_difference += abs(zs[i] - comp[i])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if iter_var == 'P' and iter_val > 1e10:
raise ValueError("Converged to unlikely point")
sln = [iter_val, comp]
sln.append(info[0])
sln.append(info[1])
sln.append(iterations)
tot_err = 0.0
for err_i in info[2]:
tot_err += abs(err_i)
sln.append(tot_err)
if debug:
return sln, to_solve_comp
return sln
l_undefined_T_msg = "Could not calculate liquid conditions at provided temperature %s K (mole fracions %s)"
g_undefined_T_msg = "Could not calculate vapor conditions at provided temperature %s K (mole fracions %s)"
l_undefined_P_msg = "Could not calculate liquid conditions at provided pressure %s Pa (mole fracions %s)"
g_undefined_P_msg = "Could not calculate vapor conditions at provided pressure %s Pa (mole fracions %s)"
def dew_bubble_Michelsen_Mollerup(guess, fixed_val, zs, liquid_phase, gas_phase,
iter_var='T', fixed_var='P', V_over_F=1,
maxiter=200, xtol=1E-10, comp_guess=None,
max_step_damping=.25, guess_update_frequency=1,
trivial_solution_tol=1e-7, V_diff=.00002, damping=1.0):
# for near critical, V diff very wrong - .005 seen, both g as or both liquid
kwargs = {fixed_var: fixed_val}
N = len(zs)
cmps = range(N)
comp_guess = zs if comp_guess is None else comp_guess
damping_orig = damping
if V_over_F == 1.0:
iter_phase, const_phase, bubble = liquid_phase, gas_phase, False
elif V_over_F == 0.0:
iter_phase, const_phase, bubble = gas_phase, liquid_phase, True
else:
raise ValueError("Supports only VF of 0 or 1")
if iter_var == 'T':
if V_over_F == 1.0:
iter_msg, const_msg = l_undefined_T_msg, g_undefined_T_msg
else:
iter_msg, const_msg = g_undefined_T_msg, l_undefined_T_msg
elif iter_var == 'P':
if V_over_F == 1.0:
iter_msg, const_msg = l_undefined_P_msg, g_undefined_P_msg
else:
iter_msg, const_msg = g_undefined_P_msg, l_undefined_P_msg
s = 'dlnphis_d%s' %(iter_var)
dlnphis_diter_var_iter = getattr(iter_phase.__class__, s)
dlnphis_diter_var_const = getattr(const_phase.__class__, s)
skip = 0
guess_old = None
V_ratio, V_ratio_last = None, None
V_iter_last, V_const_last = None, None
expect_phase = 'g' if V_over_F == 0.0 else 'l'
unwanted_phase = 'l' if expect_phase == 'g' else 'g'
successive_fails = 0
for iteration in range(maxiter):
kwargs[iter_var] = guess
try:
const_phase = const_phase.to_TP_zs(zs=zs, **kwargs)
lnphis_const = const_phase.lnphis()
dlnphis_dvar_const = dlnphis_diter_var_const(const_phase)
except Exception as e:
if guess_old is None:
raise ValueError(const_msg %(guess, zs), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
try:
skip -= 1
iter_phase = iter_phase.to_TP_zs(zs=comp_guess, **kwargs)
if V_diff is not None:
V_iter, V_const = iter_phase.V(), const_phase.V()
V_ratio = V_iter/V_const
if 1.0 - V_diff < V_ratio < 1.0 + V_diff or skip > 0 or V_iter_last and (abs(min(V_iter, V_iter_last)/max(V_iter, V_iter_last)) < .8):
# Relax the constraint for the iterating on variable so two different phases exist
#if iter_phase.eos_mix.phase in ('l', 'g') and iter_phase.eos_mix.phase == const_phase.eos_mix.phase:
# Alternatively, try a stability test here
if iter_phase.eos_mix.phase == unwanted_phase:
if skip < 0:
skip = 4
damping = .15
if iter_var == 'P':
split = min(iter_phase.eos_mix.P_discriminant_zeros()) # P_discriminant_zero_l
if bubble:
split *= 0.999999999
else:
split *= 1.000000001
elif iter_var == 'T':
split = iter_phase.eos_mix.T_discriminant_zero_l()
if bubble:
split *= 0.999999999
else:
split *= 1.000000001
kwargs[iter_var] = guess = split
iter_phase = iter_phase.to(zs=comp_guess, **kwargs)
const_phase = const_phase.to(zs=zs, **kwargs)
lnphis_const = const_phase.lnphis()
dlnphis_dvar_const = dlnphis_diter_var_const(const_phase)
print('adj iter phase', split)
elif const_phase.eos_mix.phase == expect_phase:
if skip < 0:
skip = 4
damping = .15
if iter_var == 'P':
split = min(const_phase.eos_mix.P_discriminant_zeros())
if bubble:
split *= 0.999999999
else:
split *= 1.000000001
elif iter_var == 'T':
split = const_phase.eos_mix.T_discriminant_zero_l()
if bubble:
split *= 0.999999999
else:
split *= 1.000000001
kwargs[iter_var] = guess = split
const_phase = const_phase.to(zs=zs, **kwargs)
lnphis_const = const_phase.lnphis()
dlnphis_dvar_const = dlnphis_diter_var_const(const_phase)
iter_phase = iter_phase.to(zs=comp_guess, **kwargs)
# Also need to adjust the other phase to keep it in sync
print('adj const phase', split)
lnphis_iter = iter_phase.lnphis()
dlnphis_dvar_iter = dlnphis_diter_var_iter(iter_phase)
except Exception as e:
if guess_old is None:
raise ValueError(iter_msg %(guess, zs), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_const, lnphis_iter)]
comp_guess = [zs[i]*Ks[i] for i in cmps]
y_sum = sum(comp_guess)
comp_guess = [y/y_sum for y in comp_guess]
if iteration % guess_update_frequency: # or skip > 0
continue
elif skip == 0:
damping = damping_orig
f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0
dfk_dvar = 0.0
for i in cmps:
dfk_dvar += zs[i]*Ks[i]*(dlnphis_dvar_const[i] - dlnphis_dvar_iter[i])
guess_old = guess
step = -f_k/dfk_dvar
# if near_critical:
adj_step = copysign(min(max_step_damping*guess, abs(step), abs(step)*damping), step)
if guess + adj_step <= 0.0:
adj_step *= 0.5
guess = guess + adj_step
# else:
# guess = guess + step
comp_difference = 0.0
for i in cmps: comp_difference += abs(zs[i] - comp_guess[i])
if comp_difference < trivial_solution_tol and iteration:
for zi in zs:
if zi == 1.0:
# Turn off trivial check for pure components
trivial_solution_tol = -1.0
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if abs(guess - guess_old) < xtol: #and not skip:
guess = guess_old
break
if V_diff is not None:
V_iter_last, V_const_last, V_ratio_last = V_iter, V_const, V_ratio
if abs(guess - guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return guess, comp_guess, iter_phase, const_phase, iteration, abs(guess - guess_old)
l_undefined_T_msg = "Could not calculate liquid conditions at provided temperature %s K (mole fracions %s)"
g_undefined_T_msg = "Could not calculate vapor conditions at provided temperature %s K (mole fracions %s)"
l_undefined_P_msg = "Could not calculate liquid conditions at provided pressure %s Pa (mole fracions %s)"
g_undefined_P_msg = "Could not calculate vapor conditions at provided pressure %s Pa (mole fracions %s)"
def existence_3P_Michelsen_Mollerup(guess, fixed_val, zs, iter_phase, liquid0, liquid1,
iter_var='T', fixed_var='P',
maxiter=200, xtol=1E-10, comp_guess=None,
liquid0_comp=None, liquid1_comp=None,
max_step_damping=.25, SS_tol=1e-10,
trivial_solution_tol=1e-7, damping=1.0,
beta=0.5):
# For convenience call the two phases that exist already liquid0, liquid1
# But one of them can be a gas, solid, etc.
kwargs = {fixed_var: fixed_val}
N = len(zs)
cmps = range(N)
comp_guess = zs if comp_guess is None else comp_guess
damping_orig = damping
if iter_var == 'T':
iter_msg, const_msg = g_undefined_T_msg, l_undefined_T_msg
elif iter_var == 'P':
iter_msg, const_msg = g_undefined_P_msg, l_undefined_P_msg
s = 'dlnphis_d%s' %(iter_var)
dlnphis_diter_var_iter = getattr(iter_phase.__class__, s)
dlnphis_diter_var_liquid0 = getattr(liquid0.__class__, s)
# dlnphis_diter_var_liquid1 = getattr(liquid1.__class__, s)
skip = 0
guess_old = None
successive_fails = 0
for iteration in range(maxiter):
kwargs[iter_var] = guess
try:
liquid0 = liquid0.to_TP_zs(zs=liquid0_comp, **kwargs)
lnphis_liquid0 = liquid0.lnphis()
dlnphis_dvar_liquid0 = dlnphis_diter_var_liquid0(liquid0)
except Exception as e:
if guess_old is None:
raise ValueError(const_msg %(guess, liquid0_comp), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
try:
liquid1 = liquid1.to_TP_zs(zs=liquid1_comp, **kwargs)
lnphis_liquid1 = liquid1.lnphis()
# dlnphis_dvar_liquid1 = dlnphis_diter_var_liquid1(liquid1)
except Exception as e:
if guess_old is None:
raise ValueError(const_msg %(guess, liquid0_comp), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
try:
iter_phase = iter_phase.to_TP_zs(zs=comp_guess, **kwargs)
lnphis_iter = iter_phase.lnphis()
dlnphis_dvar_iter = dlnphis_diter_var_iter(iter_phase)
except Exception as e:
if guess_old is None:
raise ValueError(iter_msg %(guess, zs), e)
successive_fails += 1
guess = guess_old + copysign(min(max_step_damping*guess, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_liquid0, lnphis_iter)]
comp_guess = [liquid0_comp[i]*Ks[i] for i in cmps]
y_sum_inv = 1.0/sum(comp_guess)
comp_guess = [y*y_sum_inv for y in comp_guess]
f_k = sum([liquid0_comp[i]*Ks[i] for i in cmps]) - 1.0
dfk_dvar = 0.0
for i in cmps:
dfk_dvar += liquid0_comp[i]*Ks[i]*(dlnphis_dvar_liquid0[i] - dlnphis_dvar_iter[i])
guess_old = guess
step = -f_k/dfk_dvar
adj_step = copysign(min(max_step_damping*guess, abs(step), abs(step)*damping), step)
if guess + adj_step <= 0.0:
adj_step *= 0.5
guess = guess + adj_step
comp_difference = 0.0
for i in cmps:
comp_difference += abs(liquid0_comp[i] - comp_guess[i])
if comp_difference < trivial_solution_tol and iteration:
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
# Do the SS part for the two phases
try:
Ks_SS = [exp(lnphis_liquid0[i] - lnphis_liquid1[i]) for i in cmps]
except OverflowError:
Ks_SS = [trunc_exp(lnphis_liquid0[i] - lnphis_liquid1[i]) for i in cmps]
beta, liquid0_comp_new, liquid1_comp_new = flash_inner_loop(zs, Ks_SS, guess=beta)
for xi in liquid0_comp_new:
if xi < 0.0:
xs_new_sum_inv = 1.0/sum(abs(i) for i in liquid0_comp_new)
for i in cmps:
liquid0_comp_new[i] = abs(liquid0_comp_new[i])*xs_new_sum_inv
break
for xi in liquid1_comp_new:
if xi < 0.0:
xs_new_sum_inv = 1.0/sum(abs(i) for i in liquid1_comp_new)
for i in cmps:
liquid1_comp_new[i] = abs(liquid1_comp_new[i])*xs_new_sum_inv
break
err_SS = 0.0
try:
for Ki, xi, yi in zip(Ks_SS, liquid0_comp, liquid1_comp):
err_i = Ki*xi/yi - 1.0
err_SS += err_i*err_i
except ZeroDivisionError:
err_SS = 0.0
for Ki, xi, yi in zip(Ks, xs, ys):
try:
err_i = Ki*xi/yi - 1.0
err_SS += err_i*err_i
except ZeroDivisionError:
pass
liquid0_comp, liquid1_comp = liquid0_comp_new, liquid1_comp_new
if abs(guess - guess_old) < xtol and err_SS < SS_tol:
err_VF = abs(guess - guess_old)
guess = guess_old
break
if abs(guess - guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return guess, [iter_phase, liquid0, liquid1], [0.0, 1.0-beta, beta], err_VF, err_SS, iteration
def bubble_T_Michelsen_Mollerup(T_guess, P, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, ys_guess=None,
max_step_damping=5.0, T_update_frequency=1,
trivial_solution_tol=1e-4):
N = len(zs)
cmps = range(N)
ys = zs if ys_guess is None else ys_guess
T_guess_old = None
successive_fails = 0
for iteration in range(maxiter):
try:
g = gas_phase.to_TP_zs(T=T_guess, P=P, zs=ys)
lnphis_g = g.lnphis()
dlnphis_dT_g = g.dlnphis_dT()
except Exception as e:
if T_guess_old is None:
raise ValueError(g_undefined_T_msg %(T_guess, ys), e)
successive_fails += 1
T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
try:
l = liquid_phase.to_TP_zs(T=T_guess, P=P, zs=zs)
lnphis_l = l.lnphis()
dlnphis_dT_l = l.dlnphis_dT()
except Exception as e:
if T_guess_old is None:
raise ValueError(l_undefined_T_msg %(T_guess, zs), e)
successive_fails += 1
T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]
ys = [zs[i]*Ks[i] for i in cmps]
if iteration % T_update_frequency:
continue
f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0
dfk_dT = 0.0
for i in cmps:
dfk_dT += zs[i]*Ks[i]*(dlnphis_dT_l[i] - dlnphis_dT_g[i])
T_guess_old = T_guess
step = -f_k/dfk_dT
# if near_critical:
T_guess = T_guess + copysign(min(max_step_damping, abs(step)), step)
# else:
# T_guess = T_guess + step
comp_difference = sum([abs(zi - yi) for zi, yi in zip(zs, ys)])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
y_sum = sum(ys)
ys = [y/y_sum for y in ys]
if abs(T_guess - T_guess_old) < xtol:
T_guess = T_guess_old
break
if abs(T_guess - T_guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return T_guess, ys, l, g, iteration, abs(T_guess - T_guess_old)
def dew_T_Michelsen_Mollerup(T_guess, P, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, xs_guess=None,
max_step_damping=5.0, T_update_frequency=1,
trivial_solution_tol=1e-4):
N = len(zs)
cmps = range(N)
xs = zs if xs_guess is None else xs_guess
T_guess_old = None
successive_fails = 0
for iteration in range(maxiter):
try:
g = gas_phase.to_TP_zs(T=T_guess, P=P, zs=zs)
lnphis_g = g.lnphis()
dlnphis_dT_g = g.dlnphis_dT()
except Exception as e:
if T_guess_old is None:
raise ValueError(g_undefined_T_msg %(T_guess, zs), e)
successive_fails += 1
T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
try:
l = liquid_phase.to_TP_zs(T=T_guess, P=P, zs=xs)
lnphis_l = l.lnphis()
dlnphis_dT_l = l.dlnphis_dT()
except Exception as e:
if T_guess_old is None:
raise ValueError(l_undefined_T_msg %(T_guess, xs), e)
successive_fails += 1
T_guess = T_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]
xs = [zs[i]/Ks[i] for i in cmps]
if iteration % T_update_frequency:
continue
f_k = sum(xs) - 1.0
dfk_dT = 0.0
for i in cmps:
dfk_dT += xs[i]*(dlnphis_dT_g[i] - dlnphis_dT_l[i])
T_guess_old = T_guess
step = -f_k/dfk_dT
# if near_critical:
T_guess = T_guess + copysign(min(max_step_damping, abs(step)), step)
# else:
# T_guess = T_guess + step
comp_difference = sum([abs(zi - xi) for zi, xi in zip(zs, xs)])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
y_sum = sum(xs)
xs = [y/y_sum for y in xs]
if abs(T_guess - T_guess_old) < xtol:
T_guess = T_guess_old
break
if abs(T_guess - T_guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return T_guess, xs, l, g, iteration, abs(T_guess - T_guess_old)
def bubble_P_Michelsen_Mollerup(P_guess, T, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, ys_guess=None,
max_step_damping=1e5, P_update_frequency=1,
trivial_solution_tol=1e-4):
N = len(zs)
cmps = range(N)
ys = zs if ys_guess is None else ys_guess
P_guess_old = None
successive_fails = 0
for iteration in range(maxiter):
try:
g = gas_phase = gas_phase.to_TP_zs(T=T, P=P_guess, zs=ys)
lnphis_g = g.lnphis()
dlnphis_dP_g = g.dlnphis_dP()
except Exception as e:
if P_guess_old is None:
raise ValueError(g_undefined_P_msg %(P_guess, ys), e)
successive_fails += 1
P_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
try:
l = liquid_phase= liquid_phase.to_TP_zs(T=T, P=P_guess, zs=zs)
lnphis_l = l.lnphis()
dlnphis_dP_l = l.dlnphis_dP()
except Exception as e:
if P_guess_old is None:
raise ValueError(l_undefined_P_msg %(P_guess, zs), e)
successive_fails += 1
T_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]
ys = [zs[i]*Ks[i] for i in cmps]
if iteration % P_update_frequency:
continue
f_k = sum([zs[i]*Ks[i] for i in cmps]) - 1.0
dfk_dP = 0.0
for i in cmps:
dfk_dP += zs[i]*Ks[i]*(dlnphis_dP_l[i] - dlnphis_dP_g[i])
P_guess_old = P_guess
step = -f_k/dfk_dP
P_guess = P_guess + copysign(min(max_step_damping, abs(step)), step)
comp_difference = sum([abs(zi - yi) for zi, yi in zip(zs, ys)])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
y_sum = sum(ys)
ys = [y/y_sum for y in ys]
if abs(P_guess - P_guess_old) < xtol:
P_guess = P_guess_old
break
if abs(P_guess - P_guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return P_guess, ys, l, g, iteration, abs(P_guess - P_guess_old)
def dew_P_Michelsen_Mollerup(P_guess, T, zs, liquid_phase, gas_phase,
maxiter=200, xtol=1E-10, xs_guess=None,
max_step_damping=1e5, P_update_frequency=1,
trivial_solution_tol=1e-4):
N = len(zs)
cmps = range(N)
xs = zs if xs_guess is None else xs_guess
P_guess_old = None
successive_fails = 0
for iteration in range(maxiter):
try:
g = gas_phase = gas_phase.to_TP_zs(T=T, P=P_guess, zs=zs)
lnphis_g = g.lnphis()
dlnphis_dP_g = g.dlnphis_dP()
except Exception as e:
if P_guess_old is None:
raise ValueError(g_undefined_P_msg %(P_guess, zs), e)
successive_fails += 1
P_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
try:
l = liquid_phase= liquid_phase.to_TP_zs(T=T, P=P_guess, zs=xs)
lnphis_l = l.lnphis()
dlnphis_dP_l = l.dlnphis_dP()
except Exception as e:
if P_guess_old is None:
raise ValueError(l_undefined_P_msg %(P_guess, xs), e)
successive_fails += 1
T_guess = P_guess_old + copysign(min(max_step_damping, abs(step)), step)
continue
if successive_fails > 2:
raise ValueError("Stopped convergence procedure after multiple bad steps")
successive_fails = 0
Ks = [exp(a - b) for a, b in zip(lnphis_l, lnphis_g)]
xs = [zs[i]/Ks[i] for i in cmps]
if iteration % P_update_frequency:
continue
f_k = sum(xs) - 1.0
dfk_dP = 0.0
for i in cmps:
dfk_dP += xs[i]*(dlnphis_dP_g[i] - dlnphis_dP_l[i])
P_guess_old = P_guess
step = -f_k/dfk_dP
P_guess = P_guess + copysign(min(max_step_damping, abs(step)), step)
comp_difference = sum([abs(zi - xi) for zi, xi in zip(zs, xs)])
if comp_difference < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
x_sum_inv = 1.0/sum(xs)
xs = [x*x_sum_inv for x in xs]
if abs(P_guess - P_guess_old) < xtol:
P_guess = P_guess_old
break
if abs(P_guess - P_guess_old) > xtol:
raise ValueError("Did not converge to specified tolerance")
return P_guess, xs, l, g, iteration, abs(P_guess - P_guess_old)
# spec, iter_var, fixed_var
strs_to_ders = {('H', 'T', 'P'): 'dH_dT_P',
('S', 'T', 'P'): 'dS_dT_P',
('G', 'T', 'P'): 'dG_dT_P',
('U', 'T', 'P'): 'dU_dT_P',
('A', 'T', 'P'): 'dA_dT_P',
('H', 'T', 'V'): 'dH_dT_V',
('S', 'T', 'V'): 'dS_dT_V',
('G', 'T', 'V'): 'dG_dT_V',
('U', 'T', 'V'): 'dU_dT_V',
('A', 'T', 'V'): 'dA_dT_V',
('H', 'P', 'T'): 'dH_dP_T',
('S', 'P', 'T'): 'dS_dP_T',
('G', 'P', 'T'): 'dG_dP_T',
('U', 'P', 'T'): 'dU_dP_T',
('A', 'P', 'T'): 'dA_dP_T',
('H', 'P', 'V'): 'dH_dP_V',
('S', 'P', 'V'): 'dS_dP_V',
('G', 'P', 'V'): 'dG_dP_V',
('U', 'P', 'V'): 'dU_dP_V',
('A', 'P', 'V'): 'dA_dP_V',
('H', 'V', 'T'): 'dH_dV_T',
('S', 'V', 'T'): 'dS_dV_T',
('G', 'V', 'T'): 'dG_dV_T',
('U', 'V', 'T'): 'dU_dV_T',
('A', 'V', 'T'): 'dA_dV_T',
('H', 'V', 'P'): 'dH_dV_P',
('S', 'V', 'P'): 'dS_dV_P',
('G', 'V', 'P'): 'dG_dV_P',
('U', 'V', 'P'): 'dU_dV_P',
('A', 'V', 'P'): 'dA_dV_P',
}
multiple_solution_sets = set([('T', 'S'), ('T', 'H'), ('T', 'U'), ('T', 'A'), ('T', 'G'),
('S', 'T'), ('H', 'T'), ('U', 'T'), ('A', 'T'), ('G', 'T'),
])
def TPV_solve_HSGUA_1P(zs, phase, guess, fixed_var_val, spec_val,
iter_var='T', fixed_var='P', spec='H',
maxiter=200, xtol=1E-10, ytol=None, fprime=False,
minimum_progress=0.3, oscillation_detection=True,
bounded=False, min_bound=None, max_bound=None,
multi_solution=False):
r'''Solve a single-phase flash where one of `T`, `P`, or `V` are specified
and one of `H`, `S`, `G`, `U`, or `A` are also specified. The iteration
(changed input variable) variable must be specified as be one of `T`, `P`,
or `V`, but it cannot be the same as the fixed variable.
This method is a secant or newton based solution method, optionally with
oscillation detection to bail out of tring to solve the problem to handle
the case where the spec cannot be met because of a phase change (as in a
cubic eos case).
Parameters
----------
zs : list[float]
Mole fractions of the phase, [-]
phase : `Phase`
The phase object of the mixture, containing the information for
calculating properties at new conditions, [-]
guess : float
The guessed value for the iteration variable,
[K or Pa or m^3/mol]
fixed_var_val : float
The specified value of the fixed variable (one of T, P, or V);
[K or Pa, or m^3/mol]
spec_val : float
The specified value of H, S, G, U, or A, [J/(mol*K) or J/mol]
iter_var : str
One of 'T', 'P', 'V', [-]
fixed_var : str
One of 'T', 'P', 'V', [-]
spec : str
One of 'H', 'S', 'G', 'U', 'A', [-]
maxiter : float
Maximum number of iterations, [-]
xtol : float
Tolerance for secant-style convergence of the iteration variable,
[K or Pa, or m^3/mol]
ytol : float or None
Tolerance for convergence of the spec variable,
[J/(mol*K) or J/mol]
Returns
-------
iter_var_val, phase, iterations, err
Notes
-----
'''
# Needs lots of work but the idea is here
# Can iterate chancing any of T, P, V with a fixed other T, P, V to meet any
# H S G U A spec.
store = []
global iterations
iterations = 0
if fixed_var == iter_var:
raise ValueError("Fixed variable cannot be the same as iteration variable")
if fixed_var not in ('T', 'P', 'V'):
raise ValueError("Fixed variable must be one of `T`, `P`, `V`")
if iter_var not in ('T', 'P', 'V'):
raise ValueError("Iteration variable must be one of `T`, `P`, `V`")
# Little point in enforcing the spec - might want to repurpose the function later
if spec not in ('H', 'S', 'G', 'U', 'A'):
raise ValueError("Spec variable must be one of `H`, `S`, `G` `U`, `A`")
multiple_solutions = (fixed_var, spec) in multiple_solution_sets
phase_kwargs = {fixed_var: fixed_var_val, 'zs': zs}
spec_fun = getattr(phase.__class__, spec)
# print('spec_fun', spec_fun)
if fprime:
try:
# Gotta be a lookup by (spec, iter_var, fixed_var)
der_attr = strs_to_ders[(spec, iter_var, fixed_var)]
except KeyError:
der_attr = 'd' + spec + '_d' + iter_var
der_attr_fun = getattr(phase.__class__, der_attr)
# print('der_attr_fun', der_attr_fun)
def to_solve(guess, solved_phase=None):
global iterations
iterations += 1
if solved_phase is not None:
p = solved_phase
else:
phase_kwargs[iter_var] = guess
p = phase.to(**phase_kwargs)
err = spec_fun(p) - spec_val
# err = (spec_fun(p) - spec_val)/spec_val
store[:] = (p, err)
if fprime:
# print([err, guess, p.eos_mix.phase, der_attr])
derr = der_attr_fun(p)
# derr = der_attr_fun(p)/spec_val
return err, derr
# print(err)
return err
arg_fprime = fprime
high = None # Optional and not often used bound for newton
if fixed_var == 'V':
if iter_var == 'T':
max_phys = phase.T_max_at_V(fixed_var_val)
elif iter_var == 'P':
max_phys = phase.P_max_at_V(fixed_var_val)
if max_phys is not None:
if max_bound is None:
max_bound = high = max_phys
else:
max_bound = high = min(max_phys, max_bound)
# TV iterations
ignore_bound_fail = (fixed_var == 'T' and iter_var == 'P')
if fixed_var in ('T',) and ((fixed_var == 'T' and iter_var == 'P') or (fixed_var == 'P' and iter_var == 'T') or (fixed_var == 'T' and iter_var == 'V') ) and 1:
try:
fprime = False
if iter_var == 'V':
dummy_iter = 1e8
else:
dummy_iter = guess
phase_kwargs[iter_var] = dummy_iter # Dummy pressure does not matter
phase_temp = phase.to(**phase_kwargs)
lower_phase, higher_phase = None, None
delta = 1e-9
if fixed_var == 'T' and iter_var == 'P':
transitions = phase_temp.P_transitions()
# assert len(transitions) == 1
under_trans, above_trans = transitions[0] * (1.0 - delta), transitions[0] * (1.0 + delta)
elif fixed_var == 'P' and iter_var == 'T':
transitions = phase_temp.T_transitions()
under_trans, above_trans = transitions[0] * (1.0 - delta), transitions[0] * (1.0 + delta)
assert len(transitions) == 1
elif fixed_var == 'T' and iter_var == 'V':
transitions = phase_temp.P_transitions()
delta = 1e-11
# not_separated = True
# while not_separated:
P_higher = transitions[0]*(1.0 + delta) # Dummy pressure does not matter
lower_phase = phase.to(T=fixed_var_val, zs=zs, P=P_higher)
P_lower = transitions[0]*(1.0 - delta) # Dummy pressure does not matter
higher_phase = phase.to(T=fixed_var_val, zs=zs, P=P_lower)
under_trans, above_trans = lower_phase.V(), higher_phase.V()
not_separated = isclose(under_trans, above_trans, rel_tol=1e-3)
# delta *= 10
# TODO is it possible to evaluate each limit at once, so half the work is avoided?
bracketed_high, bracketed_low = False, False
if min_bound is not None:
f_min = to_solve(min_bound)
f_low_trans = to_solve(under_trans, lower_phase)
if f_min*f_low_trans <= 0.0:
bracketed_low = True
bounding_pair = (min(min_bound, under_trans), max(min_bound, under_trans))
if max_bound is not None and (not bracketed_low or multiple_solutions):
f_max = to_solve(max_bound)
f_max_trans = to_solve(above_trans, higher_phase)
if f_max*f_max_trans <= 0.0:
bracketed_high = True
bounding_pair = (min(max_bound, above_trans), max(max_bound, above_trans))
if max_bound is not None and max_bound is not None and not bracketed_low and not bracketed_high:
if not ignore_bound_fail:
raise NotBoundedError("Between phases")
if bracketed_high or bracketed_low:
oscillation_detection = False
high = bounding_pair[1] # restrict newton/secant just in case
min_bound, max_bound = bounding_pair
if not (min_bound < guess < max_bound):
guess = 0.5*(min_bound + max_bound)
else:
if min_bound is not None and transitions[0] < min_bound and not ignore_bound_fail:
raise NotBoundedError("Not likely to bound")
if max_bound is not None and transitions[0] > max_bound and not ignore_bound_fail:
raise NotBoundedError("Not likely to bound")
except NotBoundedError as e:
raise e
except Exception:
pass
fprime = arg_fprime
# Plot the objective function
# tests = logspace(log10(10.6999), log10(10.70005), 15000)
# tests = logspace(log10(10.6), log10(10.8), 15000)
# tests = logspace(log10(min_bound), log10(max_bound), 1500)
# values = [to_solve(t)[0] for t in tests]
# values = [abs(t) for t in values]
# import matplotlib.pyplot as plt
# plt.loglog(tests, values)
# plt.show()
if oscillation_detection and ytol is not None:
to_solve2, checker = oscillation_checking_wrapper(to_solve, full=True,
minimum_progress=minimum_progress,
good_err=ytol*1e6)
else:
to_solve2 = to_solve
checker = None
solve_bounded = False
try:
# All three variables P, T, V are positive but can grow unbounded, so
# for the secant method, only set the one variable
if fprime:
iter_var_val = newton(to_solve2, guess, xtol=xtol, ytol=ytol, fprime=True,
maxiter=maxiter, bisection=True, low=min_bound, high=high, gap_detection=False)
else:
iter_var_val = secant(to_solve2, guess, xtol=xtol, ytol=ytol,
maxiter=maxiter, bisection=True, low=min_bound, high=high)
except (UnconvergedError, OscillationError, NotBoundedError):
solve_bounded = True
# Unconverged - from newton/secant; oscillation - from the oscillation detector;
# NotBounded - from when EOS needs to solve T and there is no solution
fprime = False
if solve_bounded:
if bounded and min_bound is not None and max_bound is not None:
if checker:
min_bound_prev, max_bound_prev, fa, fb = best_bounding_bounds(min_bound, max_bound,
f=to_solve, xs_pos=checker.xs_pos, ys_pos=checker.ys_pos,
xs_neg=checker.xs_neg, ys_neg=checker.ys_neg)
if abs(min_bound_prev/max_bound_prev - 1.0) > 2.5e-4:
# If the points are too close, odds are there is a discontinuity in the newton solution
min_bound, max_bound = min_bound_prev, max_bound_prev
# maxiter = 20
else:
fa, fb = None, None
else:
fa, fb = None, None
# try:
iter_var_val = brenth(to_solve, min_bound, max_bound, xtol=xtol,
ytol=ytol, maxiter=maxiter, fa=fa, fb=fb)
# except:
# # Not sure at all if good idea
# iter_var_val = secant(to_solve, guess, xtol=xtol, ytol=ytol,
# maxiter=maxiter, bisection=True, low=min_bound)
phase, err = store
return iter_var_val, phase, iterations, err
def solve_PTV_HSGUA_1P(phase, zs, fixed_var_val, spec_val, fixed_var,
spec, iter_var, constants, correlations, last_conv=None,
oscillation_detection=True, guess_maxiter=50,
guess_xtol=1e-7, maxiter=80, xtol=1e-10):
# TODO: replace oscillation detection with bounding parameters and translation
# The cost should be less.
if iter_var == 'T':
if isinstance(phase, CoolPropPhase):
min_bound = phase.AS.Tmin()
max_bound = phase.AS.Tmax()
else:
min_bound = phase.T_MIN_FIXED
max_bound = phase.T_MAX_FIXED
# if isinstance(phase, IAPWS95):
# min_bound = 235.0
# max_bound = 5000.0
elif iter_var == 'P':
min_bound = Phase.P_MIN_FIXED*(1.0 - 1e-12)
max_bound = Phase.P_MAX_FIXED*(1.0 + 1e-12)
if isinstance(phase, CoolPropPhase):
AS = phase.AS
max_bound = AS.pmax()*(1.0 - 1e-7)
min_bound = AS.trivial_keyed_output(CPiP_min)*(1.0 + 1e-7)
elif iter_var == 'V':
min_bound = Phase.V_MIN_FIXED
max_bound = Phase.V_MAX_FIXED
if isinstance(phase, (CEOSLiquid, CEOSGas)):
c2R = phase.eos_class.c2*R
Tcs, Pcs = constants.Tcs, constants.Pcs
b = sum([c2R*Tcs[i]*zs[i]/Pcs[i] for i in range(constants.N)])
min_bound = b*(1.0 + 1e-15)
if phase.is_gas:
methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS, IG_ENTHALPY,
LASTOVKA_SHAW]
elif phase.is_liquid:
methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS, IDEAL_LIQUID_ENTHALPY,
DADGOSTAR_SHAW_1]
else:
methods = [LAST_CONVERGED, FIXED_GUESS, STP_T_GUESS]
for method in methods:
try:
guess = TPV_solve_HSGUA_guesses_1P(zs, method, constants, correlations,
fixed_var_val, spec_val,
iter_var=iter_var, fixed_var=fixed_var, spec=spec,
maxiter=guess_maxiter, xtol=guess_xtol, ytol=abs(spec_val)*1e-5,
bounded=True, min_bound=min_bound, max_bound=max_bound,
user_guess=None, last_conv=last_conv, T_ref=298.15,
P_ref=101325.0)
break
except Exception:
pass
ytol = 1e-8*abs(spec_val)
if iter_var == 'T' and spec in ('S', 'H'):
ytol = ytol/100
if isinstance(phase, IAPWS95):
# Objective function isn't quite as nice and smooth as desired
ytol = None
_, phase, iterations, err = TPV_solve_HSGUA_1P(zs, phase, guess, fixed_var_val=fixed_var_val, spec_val=spec_val, ytol=ytol,
iter_var=iter_var, fixed_var=fixed_var, spec=spec, oscillation_detection=oscillation_detection,
minimum_progress=1e-4, maxiter=maxiter, fprime=True, xtol=xtol,
bounded=True, min_bound=min_bound, max_bound=max_bound)
T, P = phase.T, phase.P
return T, P, phase, iterations, err
def TPV_solve_HSGUA_guesses_1P(zs, method, constants, correlations,
fixed_var_val, spec_val,
iter_var='T', fixed_var='P', spec='H',
maxiter=20, xtol=1E-7, ytol=None,
bounded=False, min_bound=None, max_bound=None,
user_guess=None, last_conv=None, T_ref=298.15,
P_ref=101325.0):
if fixed_var == iter_var:
raise ValueError("Fixed variable cannot be the same as iteration variable")
if fixed_var not in ('T', 'P', 'V'):
raise ValueError("Fixed variable must be one of `T`, `P`, `V`")
if iter_var not in ('T', 'P', 'V'):
raise ValueError("Iteration variable must be one of `T`, `P`, `V`")
if spec not in ('H', 'S', 'G', 'U', 'A'):
raise ValueError("Spec variable must be one of `H`, `S`, `G` `U`, `A`")
cmps = range(len(zs))
iter_T = iter_var == 'T'
iter_P = iter_var == 'P'
iter_V = iter_var == 'V'
fixed_P = fixed_var == 'P'
fixed_T = fixed_var == 'T'
fixed_V = fixed_var == 'V'
always_S = spec in ('S', 'G', 'A')
always_H = spec in ('H', 'G', 'U', 'A')
always_V = spec in ('U', 'A')
if always_S:
P_ref_inv = 1.0/P_ref
dS_ideal = R*sum([zi*log(zi) for zi in zs if zi > 0.0]) # ideal composition entropy composition
def err(guess):
# Translate the fixed variable to a local variable
if fixed_P:
P = fixed_var_val
elif fixed_T:
T = fixed_var_val
elif fixed_V:
V = fixed_var_val
T = None
# Translate the iteration variable to a local variable
if iter_P:
P = guess
if not fixed_V:
V = None
elif iter_T:
T = guess
if not fixed_V:
V = None
elif iter_V:
V = guess
T = None
if T is None:
T = T_from_V(V, P)
# Compute S, H, V as necessary
if always_S:
S = S_model(T, P) - dS_ideal - R*log(P*P_ref_inv)
if always_H:
H = H_model(T, P)
if always_V and V is None:
V = V_model(T, P)
# print(H, S, V, 'hi')
# Return the objective function
if spec == 'H':
err = H - spec_val
elif spec == 'S':
err = S - spec_val
elif spec == 'G':
err = (H - T*S) - spec_val
elif spec == 'U':
err = (H - P*V) - spec_val
elif spec == 'A':
err = (H - P*V - T*S) - spec_val
# print(T, P, V, 'TPV', err)
return err
# Precompute some things depending on the method
if method in (LASTOVKA_SHAW, DADGOSTAR_SHAW_1):
MW = mixing_simple(zs, constants.MWs)
n_atoms = [sum(i.values()) for i in constants.atomss]
sv = mixing_simple(zs, n_atoms)/MW
if method == IG_ENTHALPY:
HeatCapacityGases = correlations.HeatCapacityGases
def H_model(T, P=None):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral(T_ref, T)
return H_calc
def S_model(T, P=None):
S_calc = 0.
for i in cmps:
S_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T)
return S_calc
def V_model(T, P): return R*T/P
def T_from_V(V, P): return P*V/R
elif method == LASTOVKA_SHAW:
H_ref = Lastovka_Shaw_integral(T_ref, sv)
S_ref = Lastovka_Shaw_integral_over_T(T_ref, sv)
def H_model(T, P=None):
H1 = Lastovka_Shaw_integral(T, sv)
dH = H1 - H_ref
return property_mass_to_molar(dH, MW)
def S_model(T, P=None):
S1 = Lastovka_Shaw_integral_over_T(T, sv)
dS = S1 - S_ref
return property_mass_to_molar(dS, MW)
def V_model(T, P): return R*T/P
def T_from_V(V, P): return P*V/R
elif method == DADGOSTAR_SHAW_1:
Tc = mixing_simple(zs, constants.Tcs)
omega = mixing_simple(zs, constants.omegas)
H_ref = Dadgostar_Shaw_integral(T_ref, sv)
S_ref = Dadgostar_Shaw_integral_over_T(T_ref, sv)
def H_model(T, P=None):
H1 = Dadgostar_Shaw_integral(T, sv)
Hvap = SMK(T, Tc, omega)
return (property_mass_to_molar(H1 - H_ref, MW) - Hvap)
def S_model(T, P=None):
S1 = Dadgostar_Shaw_integral_over_T(T, sv)
dSvap = SMK(T, Tc, omega)/T
return (property_mass_to_molar(S1 - S_ref, MW) - dSvap)
Vc = mixing_simple(zs, constants.Vcs)
def V_model(T, P=None): return COSTALD(T, Tc, Vc, omega)
def T_from_V(V, P): secant(lambda T: COSTALD(T, Tc, Vc, omega), .65*Tc)
elif method == IDEAL_LIQUID_ENTHALPY:
HeatCapacityGases = correlations.HeatCapacityGases
EnthalpyVaporizations = correlations.EnthalpyVaporizations
def H_model(T, P=None):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) - EnthalpyVaporizations[i](T))
return H_calc
def S_model(T, P=None):
S_calc = 0.
T_inv = 1.0/T
for i in cmps:
S_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) - T_inv*EnthalpyVaporizations[i](T))
return S_calc
VolumeLiquids = correlations.VolumeLiquids
def V_model(T, P=None):
V_calc = 0.
for i in cmps:
V_calc += zs[i]*VolumeLiquids[i].T_dependent_property(T)
return V_calc
def T_from_V(V, P):
T_calc = 0.
for i in cmps:
T_calc += zs[i]*VolumeLiquids[i].solve_property(V)
return T_calc
# Simple return values - not going through a model
if method == STP_T_GUESS:
if iter_T:
return 298.15
elif iter_P:
return 101325.0
elif iter_V:
return 0.024465403697038125
elif method == LAST_CONVERGED:
if last_conv is None:
raise ValueError("No last converged")
return last_conv
elif method == FIXED_GUESS:
if user_guess is None:
raise ValueError("No user guess")
return user_guess
try:
# All three variables P, T, V are positive but can grow unbounded, so
# for the secant method, only set the one variable
if iter_T:
guess = 298.15
elif iter_P:
guess = 101325.0
elif iter_V:
guess = 0.024465403697038125
return secant(err, guess, xtol=xtol, ytol=ytol,
maxiter=maxiter, bisection=True, low=min_bound)
except (UnconvergedError,):
# G and A specs are NOT MONOTONIC and the brackets will likely NOT BRACKET
# THE ROOTS!
return brenth(err, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter)
def PH_secant_1P(T_guess, P, H, zs, phase, maxiter=200, xtol=1E-10,
minimum_progress=0.3, oscillation_detection=True):
store = []
global iterations
iterations = 0
def to_solve(T):
global iterations
iterations += 1
p = phase.to_TP_zs(T, P, zs)
err = p.H() - H
store[:] = (p, err)
return err
if oscillation_detection:
to_solve, checker = oscillation_checking_wrapper(to_solve, full=True,
minimum_progress=minimum_progress)
T = secant(to_solve, T_guess, xtol=xtol, maxiter=maxiter)
phase, err = store
return T, phase, iterations, err
def PH_newton_1P(T_guess, P, H, zs, phase, maxiter=200, xtol=1E-10,
minimum_progress=0.3, oscillation_detection=True):
store = []
global iterations
iterations = 0
def to_solve(T):
global iterations
iterations += 1
p = phase.to_TP_zs(T, P, zs)
err = p.H() - H
derr_dT = p.dH_dT()
store[:] = (p, err)
return err, derr_dT
if oscillation_detection:
to_solve, checker = oscillation_checking_wrapper(to_solve, full=True,
minimum_progress=minimum_progress)
T = newton(to_solve, T_guess, fprime=True, xtol=xtol, maxiter=maxiter)
phase, err = store
return T, phase, iterations, err
def TVF_pure_newton(P_guess, T, liquids, gas, maxiter=200, xtol=1E-10):
one_liquid = len(liquids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_newton(P):
global iterations
iterations += 1
g = gas.to_TP_zs(T, P, zs)
fugacity_gas = g.fugacities()[0]
dfugacities_dP_gas = g.dfugacities_dP()[0]
if one_liquid:
lowest_phase = liquids[0].to_TP_zs(T, P, zs)
else:
ls = [l.to_TP_zs(T, P, zs) for l in liquids]
G_min, lowest_phase = 1e100, None
for l in ls:
G = l.G()
if G < G_min:
G_min, lowest_phase = G, l
fugacity_liq = lowest_phase.fugacities()[0]
dfugacities_dP_liq = lowest_phase.dfugacities_dP()[0]
err = fugacity_liq - fugacity_gas
derr_dP = dfugacities_dP_liq - dfugacities_dP_gas
store[:] = (lowest_phase, g, err)
return err, derr_dP
Psat = newton(to_solve_newton, P_guess, xtol=xtol, maxiter=maxiter,
low=Phase.P_MIN_FIXED,
require_eval=True, bisection=False, fprime=True)
l, g, err = store
return Psat, l, g, iterations, err
def TVF_pure_secant(P_guess, T, liquids, gas, maxiter=200, xtol=1E-10):
one_liquid = len(liquids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_secant(P):
global iterations
iterations += 1
g = gas.to_TP_zs(T, P, zs)
fugacity_gas = g.fugacities()[0]
if one_liquid:
lowest_phase = liquids[0].to_TP_zs(T, P, zs)
else:
ls = [l.to_TP_zs(T, P, zs) for l in liquids]
G_min, lowest_phase = 1e100, None
for l in ls:
G = l.G()
if G < G_min:
G_min, lowest_phase = G, l
fugacity_liq = lowest_phase.fugacities()[0]
err = fugacity_liq - fugacity_gas
store[:] = (lowest_phase, g, err)
return err
if P_guess < Phase.P_MIN_FIXED:
raise ValueError("Too low.")
# if P_guess < Phase.P_MIN_FIXED:
# low = None
# else:
# low = Phase.P_MIN_FIXED
Psat = secant(to_solve_secant, P_guess, xtol=xtol, maxiter=maxiter, low=Phase.P_MIN_FIXED*(1-1e-10))
l, g, err = store
return Psat, l, g, iterations, err
def PVF_pure_newton(T_guess, P, liquids, gas, maxiter=200, xtol=1E-10):
one_liquid = len(liquids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_newton(T):
global iterations
iterations += 1
g = gas.to_TP_zs(T, P, zs)
fugacity_gas = g.fugacities()[0]
dfugacities_dT_gas = g.dfugacities_dT()[0]
if one_liquid:
lowest_phase = liquids[0].to_TP_zs(T, P, zs)
else:
ls = [l.to_TP_zs(T, P, zs) for l in liquids]
G_min, lowest_phase = 1e100, None
for l in ls:
G = l.G()
if G < G_min:
G_min, lowest_phase = G, l
fugacity_liq = lowest_phase.fugacities()[0]
dfugacities_dT_liq = lowest_phase.dfugacities_dT()[0]
err = fugacity_liq - fugacity_gas
derr_dT = dfugacities_dT_liq - dfugacities_dT_gas
store[:] = (lowest_phase, g, err)
return err, derr_dT
Tsat = newton(to_solve_newton, T_guess, xtol=xtol, maxiter=maxiter,
low=Phase.T_MIN_FIXED,
require_eval=True, bisection=False, fprime=True)
l, g, err = store
return Tsat, l, g, iterations, err
def PVF_pure_secant(T_guess, P, liquids, gas, maxiter=200, xtol=1E-10):
one_liquid = len(liquids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_secant(T):
global iterations
iterations += 1
g = gas.to_TP_zs(T, P, zs)
fugacity_gas = g.fugacities()[0]
if one_liquid:
lowest_phase = liquids[0].to_TP_zs(T, P, zs)
else:
ls = [l.to_TP_zs(T, P, zs) for l in liquids]
G_min, lowest_phase = 1e100, None
for l in ls:
G = l.G()
if G < G_min:
G_min, lowest_phase = G, l
fugacity_liq = lowest_phase.fugacities()[0]
err = fugacity_liq - fugacity_gas
store[:] = (lowest_phase, g, err)
return err
Tsat = secant(to_solve_secant, T_guess, xtol=xtol, maxiter=maxiter,
low=Phase.T_MIN_FIXED)
l, g, err = store
return Tsat, l, g, iterations, err
def TSF_pure_newton(P_guess, T, other_phases, solids, maxiter=200, xtol=1E-10):
one_other = len(other_phases)
one_solid = len(solids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_newton(P):
global iterations
iterations += 1
if one_solid:
lowest_solid = solids[0].to_TP_zs(T, P, zs)
else:
ss = [s.to_TP_zs(T, P, zs) for s in solids]
G_min, lowest_solid = 1e100, None
for o in ss:
G = o.G()
if G < G_min:
G_min, lowest_solid = G, o
fugacity_solid = lowest_solid.fugacities()[0]
dfugacities_dP_solid = lowest_solid.dfugacities_dP()[0]
if one_other:
lowest_other = other_phases[0].to_TP_zs(T, P, zs)
else:
others = [l.to_TP_zs(T, P, zs) for l in other_phases]
G_min, lowest_other = 1e100, None
for o in others:
G = o.G()
if G < G_min:
G_min, lowest_other = G, o
fugacity_other = lowest_other.fugacities()[0]
dfugacities_dP_other = lowest_other.dfugacities_dP()[0]
err = fugacity_other - fugacity_solid
derr_dP = dfugacities_dP_other - dfugacities_dP_solid
store[:] = (lowest_other, lowest_solid, err)
return err, derr_dP
Psub = newton(to_solve_newton, P_guess, xtol=xtol, maxiter=maxiter,
require_eval=True, bisection=False, fprime=True)
other, solid, err = store
return Psub, other, solid, iterations, err
def PSF_pure_newton(T_guess, P, other_phases, solids, maxiter=200, xtol=1E-10):
one_other = len(other_phases)
one_solid = len(solids)
zs = [1.0]
store = []
global iterations
iterations = 0
def to_solve_newton(T):
global iterations
iterations += 1
if one_solid:
lowest_solid = solids[0].to_TP_zs(T, P, zs)
else:
ss = [s.to_TP_zs(T, P, zs) for s in solids]
G_min, lowest_solid = 1e100, None
for o in ss:
G = o.G()
if G < G_min:
G_min, lowest_solid = G, o
fugacity_solid = lowest_solid.fugacities()[0]
dfugacities_dT_solid = lowest_solid.dfugacities_dT()[0]
if one_other:
lowest_other = other_phases[0].to_TP_zs(T, P, zs)
else:
others = [l.to_TP_zs(T, P, zs) for l in other_phases]
G_min, lowest_other = 1e100, None
for o in others:
G = o.G()
if G < G_min:
G_min, lowest_other = G, o
fugacity_other = lowest_other.fugacities()[0]
dfugacities_dT_other = lowest_other.dfugacities_dT()[0]
err = fugacity_other - fugacity_solid
derr_dT = dfugacities_dT_other - dfugacities_dT_solid
store[:] = (lowest_other, lowest_solid, err)
return err, derr_dT
Tsub = newton(to_solve_newton, T_guess, xtol=xtol, maxiter=maxiter,
require_eval=True, bisection=False, fprime=True)
other, solid, err = store
return Tsub, other, solid, iterations, err
def solve_T_VF_IG_K_composition_independent(VF, T, zs, gas, liq, xtol=1e-10):
'''from sympy import *
zi, P, VF = symbols('zi, P, VF')
l_phi, g_phi = symbols('l_phi, g_phi', cls=Function)
# g_phi = symbols('g_phi')
# Ki = l_phi(P)/g_phi(P)
Ki = l_phi(P)#/g_phi
err = zi*(Ki-1)/(1+VF*(Ki-1))
cse([diff(err, P), err], optimizations='basic')'''
# gas phis are all one in IG model
# gas.to(T=T, P=P, zs=zs)
cmps = range(liq.N)
global Ks, iterations, err
iterations = 0
err = 0.0
def to_solve(lnP):
global Ks, iterations, err
iterations += 1
P = exp(lnP)
l = liq.to(T=T, P=P, zs=zs)
Ks = liquid_phis = l.phis()
dlnphis_dP_l = l.dphis_dP()
err = derr = 0.0
for i in cmps:
x1 = liquid_phis[i] - 1.0
x2 = VF*x1
x3 = 1.0/(x2 + 1.0)
x4 = x3*zs[i]
err += x1*x4
derr += x4*(1.0 - x2*x3)*dlnphis_dP_l[i]
return err, P*derr
# estimate bubble point and dew point
# Make sure to overwrite the phase so the Psats get cached
P_base = 1e5
liq = liq.to(T=T, P=P_base, zs=zs)
phis = liq.phis()
P_bub, P_dew = 0.0, 0.0
for i in range(liq.N):
P_bub += phis[i]*zs[i]
P_dew += zs[i]/(phis[i]*P_base)
P_bub = P_bub*liq.P
P_dew = 1.0/P_dew
P_guess = VF*P_dew + (1.0 - VF)*P_bub
# When Poynting is on, the are only an estimate; otherwise it is dead on
# and there is no need for a solver
if liq.use_Poynting or 0.0 < VF < 1.0:
lnP = newton(to_solve, log(P_guess), xtol=xtol, fprime=True)
P = exp(lnP)
else:
if VF == 0.0:
Ks = liq.to(T=T, P=P_bub, zs=zs).phis()
P = P_bub
elif VF == 1.0:
Ks = liq.to(T=T, P=P_dew, zs=zs).phis()
P = P_dew
else:
raise ValueError("Vapor fraction outside range 0 to 1")
xs = [zs[i]/(1.+VF*(Ks[i]-1.)) for i in cmps]
for i in cmps:
Ks[i] *= xs[i]
ys = Ks
return P, xs, ys, iterations, err
def solve_P_VF_IG_K_composition_independent(VF, P, zs, gas, liq, xtol=1e-10):
# gas phis are all one in IG model
# gas.to(T=T, P=P, zs=zs)
cmps = range(liq.N)
global Ks, iterations, err
iterations = 0
def to_solve(T):
global Ks, iterations, err
iterations += 1
dlnphis_dT_l, liquid_phis = liq.dphis_dT_at(T, P, zs, phis_also=True)
Ks = liquid_phis
# l = liq.to(T=T, P=P, zs=zs)
# Ks = liquid_phis = l.phis()
# dlnphis_dT_l = l.dphis_dT()
err = derr = 0.0
for i in cmps:
x1 = liquid_phis[i] - 1.0
x2 = VF*x1
x3 = 1.0/(x2 + 1.0)
x4 = x3*zs[i]
err += x1*x4
derr += x4*(1.0 - x2*x3)*dlnphis_dT_l[i]
return err, derr
try:
T = newton(to_solve, 300.0, xtol=xtol, fprime=True, low=1e-6)
except:
try:
T = brenth(lambda x: to_solve(x)[0], 300, 1000)
except:
T = newton(to_solve, 400.0, xtol=xtol, fprime=True, low=1e-6)
xs = [zs[i]/(1.+VF*(Ks[i]-1.)) for i in cmps]
for i in cmps:
Ks[i] *= xs[i]
ys = Ks
return T, xs, ys, iterations, err
def sequential_substitution_2P_sat(T, P, V, zs_dry, xs_guess, ys_guess, liquid_phase,
gas_phase, idx, z0, z1=None, maxiter=1000, tol=1E-13,
trivial_solution_tol=1e-5, damping=1.0):
xs, ys = xs_guess, ys_guess
V_over_F = 1.0
cmps = range(len(zs_dry))
if z1 is None:
z1 = z0*1.0001 + 1e-4
if z1 > 1:
z1 = z0*1.0001 - 1e-4
# secant step/solving
p0, p1, err0, err1 = None, None, None, None
def step(p0, p1, err0, err1):
if p0 is None:
return z0
if p1 is None:
return z1
else:
new = p1 - err1*(p1 - p0)/(err1 - err0)*damping
return new
for iteration in range(maxiter):
p0, p1 = step(p0, p1, err0, err1), p0
zs = list(zs_dry)
zs[idx] = p0
zs = normalize(zs)
# print(zs, p0, p1)
g = gas_phase.to(ys, T=T, P=P, V=V)
l = liquid_phase.to(xs, T=T, P=P, V=V)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
err0, err1 = 1.0 - V_over_F, err0
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum = sum(abs(i) for i in xs_new)
xs_new = [abs(i)/xs_new_sum for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum = sum(abs(i) for i in ys_new)
ys_new = [abs(i)/ys_new_sum for i in ys_new]
break
err, comp_diff = 0.0, 0.0
for i in cmps:
err_i = Ks[i]*xs[i]/ys[i] - 1.0
err += err_i*err_i + abs(ys[i] - zs[i])
comp_diff += abs(xs[i] - ys[i])
# Accept the new compositions
# xs, ys = xs_new, zs # This has worse convergence behavior?
xs, ys = xs_new, ys_new
if comp_diff < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if err < tol and abs(err0) < tol:
return V_over_F, xs, zs, l, g, iteration, err, err0
raise UnconvergedError('End of SS without convergence')
def SS_VF_simultaneous(guess, fixed_val, zs, liquid_phase, gas_phase,
iter_var='T', fixed_var='P', V_over_F=1,
maxiter=200, xtol=1E-10, comp_guess=None,
damping=0.8, tol_eq=1e-12, update_frequency=3):
if comp_guess is None:
comp_guess = zs
if V_over_F == 1 or V_over_F > 0.5:
dew = True
xs, ys = comp_guess, zs
else:
dew = False
xs, ys = zs, comp_guess
sln = sequential_substitution_2P_HSGUAbeta(zs=zs, xs_guess=xs, ys_guess=ys, liquid_phase=liquid_phase,
gas_phase=gas_phase, fixed_var_val=fixed_val, spec_val=V_over_F, tol_spec=xtol,
iter_var_0=guess, update_frequency=update_frequency,
iter_var=iter_var, fixed_var=fixed_var, spec='beta', damping=damping, tol_eq=tol_eq)
guess, _, xs, ys, l, g, iteration, err_eq, spec_err = sln
if dew:
comp_guess = xs
iter_phase, const_phase = l, g
else:
comp_guess = ys
iter_phase, const_phase = g, l
return guess, comp_guess, iter_phase, const_phase, iteration, {'err_eq': err_eq, 'spec_err': spec_err}
def sequential_substitution_2P_HSGUAbeta(zs, xs_guess, ys_guess, liquid_phase,
gas_phase, fixed_var_val, spec_val,
iter_var_0, iter_var_1=None,
iter_var='T', fixed_var='P', spec='H',
maxiter=1000, tol_eq=1E-13, tol_spec=1e-9,
trivial_solution_tol=1e-5, damping=1.0,
V_over_F_guess=None, fprime=True,
update_frequency=1, update_eq=1e-7):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
cmps = range(len(zs))
if iter_var_1 is None:
iter_var_1 = iter_var_0*1.0001 + 1e-4
tol_spec_abs = tol_spec*abs(spec_val)
if tol_spec_abs == 0.0:
if spec == 'beta':
tol_spec_abs = 1e-9
else:
tol_spec_abs = 1e-7
# secant step/solving
p0, p1, spec_err, spec_err_old = None, None, None, None
def step(p0, p1, spec_err, spec_err_old, step_der):
if p0 is None:
return iter_var_0
if p1 is None:
return iter_var_1
else:
secant_step = spec_err_old*(p1 - p0)/(spec_err_old - spec_err)*damping
if fprime and step_der is not None:
if abs(step_der) < abs(secant_step):
step = step_der
new = p0 - step
else:
step = secant_step
new = p1 - step
else:
new = p1 - secant_step
if new < 1e-7:
# Only handle positive values, damped steps to .5
new = 0.5*(1e-7 + p0)
# print(p0, p1, new)
return new
TPV_args = {fixed_var: fixed_var_val, iter_var: iter_var_0}
VF_spec = spec == 'beta'
if not VF_spec:
spec_fun_l = getattr(liquid_phase.__class__, spec)
spec_fun_g = getattr(gas_phase.__class__, spec)
s_der = 'd%s_d%s_%s'%(spec, iter_var, fixed_var)
spec_der_fun_l = getattr(liquid_phase.__class__, s_der)
spec_der_fun_g = getattr(gas_phase.__class__, s_der)
else:
V_over_F = iter_var_0
step_der = None
for iteration in range(maxiter):
if (not (iteration % update_frequency) or err_eq < update_eq) or iteration < 2:
p0, p1 = step(p0, p1, spec_err, spec_err_old, step_der), p0
TPV_args[iter_var] = p0
g = gas_phase.to(ys, **TPV_args)
l = liquid_phase.to(xs, **TPV_args)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
if not VF_spec:
spec_calc = spec_fun_l(l)*(1.0 - V_over_F) + spec_fun_g(g)*V_over_F
spec_der_calc = spec_der_fun_l(l)*(1.0 - V_over_F) + spec_der_fun_g(g)*V_over_F
# print(spec_der_calc)
else:
spec_calc = V_over_F
if (not (iteration % update_frequency) or err_eq < update_eq) or iteration < 2:
spec_err_old = spec_err # Only update old error on an update iteration
spec_err = spec_calc - spec_val
try:
step_der = spec_err/spec_der_calc
# print(spec_err, step_der, p1-p0)
except:
pass
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum_inv = 1.0/sum(abs(i) for i in xs_new)
xs_new = [abs(i)*xs_new_sum_inv for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum_inv = 1.0/sum(abs(i) for i in ys_new)
ys_new = [abs(i)*ys_new_sum_inv for i in ys_new]
break
err_eq, comp_diff = 0.0, 0.0
for i in cmps:
err_i = Ks[i]*xs[i]/ys[i] - 1.0
err_eq += err_i*err_i
comp_diff += abs(xs[i] - ys[i])
# Accept the new compositions
# xs, ys = xs_new, zs # This has worse convergence behavior; seems to not even converge some of the time
xs, ys = xs_new, ys_new
if comp_diff < trivial_solution_tol and iteration: # Allow the first iteration to start with the same composition
raise ValueError("Converged to trivial condition, compositions of both phases equal")
print('Guess: %g, Eq Err: %g, Spec Err: %g, VF: %g' %(p0, err_eq, spec_err, V_over_F))
# print(p0, err_eq, spec_err, V_over_F)
# print(p0, err, spec_err, xs, ys, V_over_F)
if err_eq < tol_eq and abs(spec_err) < tol_spec_abs:
return p0, V_over_F, xs, ys, l, g, iteration, err_eq, spec_err
raise UnconvergedError('End of SS without convergence')
def sequential_substitution_2P_double(zs, xs_guess, ys_guess, liquid_phase,
gas_phase, guess, spec_vals,
iter_var0='T', iter_var1='P',
spec_vars=['H', 'S'],
maxiter=1000, tol_eq=1E-13, tol_specs=1e-9,
trivial_solution_tol=1e-5, damping=1.0,
V_over_F_guess=None, fprime=True):
xs, ys = xs_guess, ys_guess
if V_over_F_guess is None:
V_over_F = 0.5
else:
V_over_F = V_over_F_guess
cmps = range(len(zs))
iter0_val = guess[0]
iter1_val = guess[1]
spec0_val = spec_vals[0]
spec1_val = spec_vals[1]
spec0_var = spec_vars[0]
spec1_var = spec_vars[1]
spec0_fun_l = getattr(liquid_phase.__class__, spec0_var)
spec0_fun_g = getattr(gas_phase.__class__, spec0_var)
spec1_fun_l = getattr(liquid_phase.__class__, spec1_var)
spec1_fun_g = getattr(gas_phase.__class__, spec1_var)
spec0_der0 = 'd%s_d%s_%s'%(spec0_var, iter_var0, iter_var1)
spec1_der0 = 'd%s_d%s_%s'%(spec1_var, iter_var0, iter_var1)
spec0_der1 = 'd%s_d%s_%s'%(spec0_var, iter_var1, iter_var0)
spec1_der1 = 'd%s_d%s_%s'%(spec1_var, iter_var1, iter_var0)
spec0_der0_fun_l = getattr(liquid_phase.__class__, spec0_der0)
spec0_der0_fun_g = getattr(gas_phase.__class__, spec0_der0)
spec1_der0_fun_l = getattr(liquid_phase.__class__, spec1_der0)
spec1_der0_fun_g = getattr(gas_phase.__class__, spec1_der0)
spec0_der1_fun_l = getattr(liquid_phase.__class__, spec0_der1)
spec0_der1_fun_g = getattr(gas_phase.__class__, spec0_der1)
spec1_der1_fun_l = getattr(liquid_phase.__class__, spec1_der1)
spec1_der1_fun_g = getattr(gas_phase.__class__, spec1_der1)
step_der = None
for iteration in range(maxiter):
TPV_args[iter_var0] = iter0_val
TPV_args[iter_var1] = iter1_val
g = gas_phase.to(zs=ys, **TPV_args)
l = liquid_phase.to(zs=xs, **TPV_args)
lnphis_g = g.lnphis()
lnphis_l = l.lnphis()
Ks = [exp(lnphis_l[i] - lnphis_g[i]) for i in cmps]
V_over_F, xs_new, ys_new = flash_inner_loop(zs, Ks, guess=V_over_F)
spec0_calc = spec0_fun_l(l)*(1.0 - V_over_F) + spec0_fun_g(g)*V_over_F
spec1_calc = spec1_fun_l(l)*(1.0 - V_over_F) + spec1_fun_g(g)*V_over_F
spec0_der0_calc = spec0_der0_fun_l(l)*(1.0 - V_over_F) + spec0_der0_fun_g(g)*V_over_F
spec0_der1_calc = spec0_der1_fun_l(l)*(1.0 - V_over_F) + spec0_der1_fun_g(g)*V_over_F
spec1_der0_calc = spec1_der0_fun_l(l)*(1.0 - V_over_F) + spec1_der0_fun_g(g)*V_over_F
spec1_der1_calc = spec1_der1_fun_l(l)*(1.0 - V_over_F) + spec1_der1_fun_g(g)*V_over_F
errs = [spec0_calc - spec0_val, spec1_calc - spec1_val]
jac = [[spec0_der0_calc, spec0_der1_calc], [spec1_der0_calc, spec1_der1_calc]]
# Do the newton step
dx = py_solve(jac, [-v for v in errs])
iter0_val, iter1_val = [xi + dxi*damping for xi, dxi in zip([iter0_val, iter1_val], dx)]
# Check for negative fractions - normalize only if needed
for xi in xs_new:
if xi < 0.0:
xs_new_sum = sum(abs(i) for i in xs_new)
xs_new = [abs(i)/xs_new_sum for i in xs_new]
break
for yi in ys_new:
if yi < 0.0:
ys_new_sum = sum(abs(i) for i in ys_new)
ys_new = [abs(i)/ys_new_sum for i in ys_new]
break
err, comp_diff = 0.0, 0.0
for i in cmps:
err_i = Ks[i]*xs[i]/ys[i] - 1.0
err += err_i*err_i
comp_diff += abs(xs[i] - ys[i])
xs, ys = xs_new, ys_new
if comp_diff < trivial_solution_tol:
raise ValueError("Converged to trivial condition, compositions of both phases equal")
if err < tol_eq and abs(err0) < tol_spec_abs:
return p0, V_over_F, xs, ys, l, g, iteration, err, err0
raise UnconvergedError('End of SS without convergence')
def stability_iteration_Michelsen(trial_phase, zs_test, test_phase=None,
maxiter=20, xtol=1E-12):
# So long as for both trial_phase, and test_phase use the lowest Gibbs energy fugacities, no need to test two phases.
# Very much no need to converge using acceleration - just keep a low tolerance
# At any point, can use the Ks working, assume a drop of the new phase, and evaluate two new phases and see if G drops.
# If it does, drop out early! This implementation does not do that.
# Should be possible to tell if converging to trivial solution during the process - and bail out then
# It is possible to switch this function to operated on lnphis e.g.
# corrections[i] = ci = zs[i]/zs_test[i]*trunc_exp(lnphis_trial[i] - lnphis_test[i])*sum_zs_test_inv
# however numerical differences seem to be huge and operate better on fugacities with the trunc_exp function
# then anything else.
# Can this whole function be switched to the functional approach?
# Should be possible
if test_phase is None:
test_phase = trial_phase
T, P, zs = trial_phase.T, trial_phase.P, trial_phase.zs
N = trial_phase.N
fugacities_trial = trial_phase.fugacities_lowest_Gibbs()
# Go through the feed composition - and the trial composition - if we have zeros, need to make them a trace;
zs_test2 = [0.0]*N
for i in range(N):
zs_test2[i] = zs_test[i]
zs_test = zs_test2
for i in range(N):
if zs_test[i] == 0.0:
zs_test[i] = 1e-50
# break
for i in range(N):
if zs[i] == 0.0:
zs2 = [0.0]*N
for i in range(N):
if zs[i] == 0.0:
zs2[i] = 1e-50
else:
zs2[i] = zs[i]
zs = zs2
# Requires another evaluation of the trial phase
trial_phase = trial_phase.to(T=T, P=P, zs=zs)
fugacities_trial = trial_phase.fugacities_lowest_Gibbs()
break
# Basis of equations is for the test phase being a gas, the trial phase assumed is a liquid
# makes no real difference
Ks = [0.0]*N
corrections = [1.0]*N
# Model converges towards fictional K values which, when evaluated, yield the
# stationary point composition
for i in range(N):
Ks[i] = zs_test[i]/zs[i]
sum_zs_test = sum_zs_test_inv = 1.0
converged = False
for _ in range(maxiter):
# test_phase = test_phase.to(T=T, P=P, zs=zs_test)
# fugacities_test = test_phase.fugacities_lowest_Gibbs()
fugacities_test = test_phase.fugacities_at_zs(zs_test)
err = 0.0
try:
for i in range(N):
corrections[i] = ci = fugacities_trial[i]/fugacities_test[i]*sum_zs_test_inv
Ks[i] *= ci
err += (ci - 1.0)*(ci - 1.0)
except:
# A test fugacity became zero
# May need special handling for this outside.
converged = True
break
if err < xtol:
converged = True
break
# Update compositions for the next iteration - might as well move this above the break check
for i in range(N):
zs_test[i] = Ks[i]*zs[i] # new test phase comp
# Cannot move the normalization above the error check - returning
# unnormalized sum_zs_test is used also to detect a trivial solution
sum_zs_test = 0.0
for i in range(N):
sum_zs_test += zs_test[i]
try:
sum_zs_test_inv = 1.0/sum_zs_test
except:
# Fugacities are all zero
converged = True
break
for i in range(N):
zs_test[i] *= sum_zs_test_inv
if converged:
try:
V_over_F, xs, ys = V_over_F, trial_zs, appearing_zs = flash_inner_loop(zs, Ks)
except:
# Converged to trivial solution so closely the math does not work
V_over_F, xs, ys = V_over_F, trial_zs, appearing_zs = 0.0, zs, zs
# Calculate the dG of the feed
dG_RT = 0.0
if V_over_F != 0.0:
lnphis_test = test_phase.lnphis_at_zs(zs_test) #test_phase.lnphis()
for i in range(N):
dG_RT += zs_test[i]*(log(zs_test[i]) + lnphis_test[i])
dG_RT *= V_over_F
# print(dG_RT)
return sum_zs_test, Ks, zs_test, V_over_F, trial_zs, appearing_zs, dG_RT
else:
raise UnconvergedError('End of stability_iteration_Michelsen without convergence')
def TPV_double_solve_1P(zs, phase, guesses, spec_vals,
goal_specs=('V', 'U'), state_specs=('T', 'P'),
maxiter=200, xtol=1E-10, ytol=None, spec_funs=None):
kwargs = {'zs': zs}
phase_cls = phase.__class__
s00 = 'd%s_d%s_%s' %(goal_specs[0], state_specs[0], state_specs[1])
s01 = 'd%s_d%s_%s' %(goal_specs[0], state_specs[1], state_specs[0])
s10 = 'd%s_d%s_%s' %(goal_specs[1], state_specs[0], state_specs[1])
s11 = 'd%s_d%s_%s' %(goal_specs[1], state_specs[1], state_specs[0])
try:
err0_fun = getattr(phase_cls, goal_specs[0])
err1_fun = getattr(phase_cls, goal_specs[1])
j00 = getattr(phase_cls, s00)
j01 = getattr(phase_cls, s01)
j10 = getattr(phase_cls, s10)
j11 = getattr(phase_cls, s11)
except:
pass
cache = []
def to_solve(states):
kwargs[state_specs[0]] = float(states[0])
kwargs[state_specs[1]] = float(states[1])
new = phase.to(**kwargs)
try:
v0, v1 = err0_fun(new), err1_fun(new)
jac = [[j00(new), j01(new)],
[j10(new), j11(new)]]
except:
v0, v1 = new.value(goal_specs[0]), new.value(goal_specs[1])
jac = [[new.value(s00), new.value(s01)],
[new.value(s10), new.value(s11)]]
if spec_funs is not None:
err0 = v0 - spec_funs[0](new)
err1 = v1 - spec_funs[1](new)
else:
err0 = v0 - spec_vals[0]
err1 = v1 - spec_vals[1]
errs = [err0, err1]
cache[:] = [new, errs, jac]
print(kwargs, errs)
return errs, jac
#
states, iterations = newton_system(to_solve, x0=guesses, jac=True, xtol=xtol,
ytol=ytol, maxiter=maxiter, damping_func=damping_maintain_sign)
phase = cache[0]
err = cache[1]
jac = cache[2]
return states, phase, iterations, err, jac
def assert_stab_success_2P(liq, gas, stab, T, P, zs, guess_name, xs=None,
ys=None, VF=None, SS_tol=1e-15, rtol=1e-7):
r'''Basic function - perform a specified stability test, and then a two-phase flash using it
Check on specified variables the method is working.
'''
gas = gas.to(T=T, P=P, zs=zs)
liq = liq.to(T=T, P=P, zs=zs)
trial_comp = stab.incipient_guess_named(T, P, zs, guess_name)
if liq.G() < gas.G():
min_phase, other_phase = liq, gas
else:
min_phase, other_phase = gas, liq
_, _, _, V_over_F, trial_zs, appearing_zs, dG_RT = stability_iteration_Michelsen(min_phase, trial_comp, test_phase=other_phase, maxiter=100)
V_over_F, xs_calc, ys_calc, l, g, iteration, err = sequential_substitution_2P(T=T, P=P, V=None,
zs=zs, xs_guess=trial_zs, ys_guess=appearing_zs,
liquid_phase=min_phase, tol=SS_tol,
gas_phase=other_phase)
if xs_calc is not None:
assert_close1d(xs, xs_calc, rtol)
if ys_calc is not None:
assert_close1d(ys, ys_calc, rtol)
if VF is not None:
assert_close(V_over_F, VF, rtol)
assert_close1d(l.fugacities(), g.fugacities(), rtol)
def TPV_solve_HSGUA_guesses_VL(zs, method, constants, correlations,
fixed_var_val, spec_val,
iter_var='T', fixed_var='P', spec='H',
maxiter=20, xtol=1E-7, ytol=None,
bounded=False, min_bound=None, max_bound=None,
user_guess=None, last_conv=None, T_ref=298.15,
P_ref=101325.0):
global V_over_F_guess
V_over_F_guess = 0.5
cmps = range(constants.N)
Tcs, Pcs, omegas = constants.Tcs, constants.Pcs, constants.omegas
if fixed_var == iter_var:
raise ValueError("Fixed variable cannot be the same as iteration variable")
if fixed_var not in ('T', 'P', 'V'):
raise ValueError("Fixed variable must be one of `T`, `P`, `V`")
if iter_var not in ('T', 'P', 'V'):
raise ValueError("Iteration variable must be one of `T`, `P`, `V`")
if spec not in ('H', 'S', 'G', 'U', 'A'):
raise ValueError("Spec variable must be one of `H`, `S`, `G` `U`, `A`")
cmps = range(len(zs))
iter_T = iter_var == 'T'
iter_P = iter_var == 'P'
iter_V = iter_var == 'V'
fixed_P = fixed_var == 'P'
fixed_T = fixed_var == 'T'
fixed_V = fixed_var == 'V'
if fixed_P:
P = fixed_var_val
elif fixed_T:
T = fixed_var_val
elif fixed_V:
V = fixed_var_val
always_S = spec in ('S', 'G', 'A')
always_H = spec in ('H', 'G', 'U', 'A')
always_V = spec in ('U', 'A')
def H_model(T, P, xs, ys, V_over_F):
if V_over_F >= 1.0:
return H_model_g(T, P, zs)
elif V_over_F <= 0.0:
return H_model_l(T, P, zs)
H_liq = H_model_l(T, P, xs)
H_gas = H_model_g(T, P, ys)
return H_liq*(1.0 - V_over_F) + V_over_F*H_gas
def S_model(T, P, xs, ys, V_over_F):
if V_over_F >= 1.0:
return S_model_g(T, P, zs)
elif V_over_F <= 0.0:
return S_model_l(T, P, zs)
S_liq = S_model_l(T, P, xs)
S_gas = S_model_g(T, P, ys)
return S_liq*(1.0 - V_over_F) + V_over_F*S_gas
def V_model(T, P, xs, ys, V_over_F):
if V_over_F >= 1.0:
return V_model_g(T, P, zs)
elif V_over_F <= 0.0:
return V_model_l(T, P, zs)
V_liq = V_model_l(T, P, xs)
V_gas = V_model_g(T, P, ys)
return V_liq*(1.0 - V_over_F) + V_over_F*V_gas
# whhat goes in here?
if always_S:
P_ref_inv = 1.0/P_ref
dS_ideal = R*sum([zi*log(zi) for zi in zs if zi > 0.0]) # ideal composition entropy composition
info = []
def err(guess):
# Translate the fixed variable to a local variable
if fixed_P:
P = fixed_var_val
elif fixed_T:
T = fixed_var_val
elif fixed_V:
V = fixed_var_val
T = None
# Translate the iteration variable to a local variable
if iter_P:
P = guess
if not fixed_V:
V = None
elif iter_T:
T = guess
if not fixed_V:
V = None
elif iter_V:
V = guess
T = None
if T is None:
T = T_from_V(V, P, zs)
VF, xs, ys = flash_model(T, P, zs)
info[:] = VF, xs, ys
# Compute S, H, V as necessary
if always_S:
S = S_model(T, P, xs, ys, VF) - dS_ideal - R*log(P*P_ref_inv)
if always_H:
H = H_model(T, P, xs, ys, VF)
if always_V and V is None:
V = V_model(T, P, xs, ys, VF)
# Return the objective function
if spec == 'H':
err = H - spec_val
elif spec == 'S':
err = S - spec_val
elif spec == 'G':
err = (H - T*S) - spec_val
elif spec == 'U':
err = (H - P*V) - spec_val
elif spec == 'A':
err = (H - P*V - T*S) - spec_val
# print(T, P, V, 'TPV', err)
return err
# Common models
VolumeLiquids = correlations.VolumeLiquids
def V_model_l(T, P, zs):
V_calc = 0.
for i in cmps:
V_calc += zs[i]*VolumeLiquids[i].T_dependent_property(T)
return V_calc
def T_from_V_l(V, P, zs):
T_calc = 0.
for i in cmps:
T_calc += zs[i]*VolumeLiquids[i].solve_property(V)
return T_calc
def V_model_g(T, P, zs):
return R*T/P
def T_from_V_g(V, P, zs):
return P*V/R
if method == IDEAL_WILSON or method == SHAW_ELEMENTAL:
if iter_P:
if fixed_T:
T_inv = 1.0/T
Ks_P = [Pcs[i]*exp((5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv))) for i in cmps]
def flash_model(T, P, zs):
global V_over_F_guess
P_inv = 1.0/P
if not fixed_T:
T_inv = 1.0/T
Ks_P_local = [Pcs[i]*exp((5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv))) for i in cmps]
Ks = [Ki*P_inv for Ki in Ks_P_local]
else:
Ks = [Ki*P_inv for Ki in Ks_P]
K_low, K_high = False, False
for i in cmps:
if zs[i] != 0.0:
if Ks[i] > 1.0:
K_high = True
else:
K_low = True
if K_high and K_low:
break
if K_high and K_low:
V_over_F_guess, xs, ys = Rachford_Rice_solution_LN2(zs, Ks, V_over_F_guess)
return V_over_F_guess, xs, ys
elif K_high:
return 1.0, zs, zs
else:
return 0.0, zs, zs
else:
P_inv = 1.0/P
def flash_model(T, P, zs):
global V_over_F_guess
T_inv = 1.0/T
Ks = [Pcs[i]*P_inv*exp((5.37*(1.0 + omegas[i])*(1.0 - Tcs[i]*T_inv))) for i in cmps]
K_low, K_high = False, False
for i in cmps:
if zs[i] != 0.0:
if Ks[i] > 1.0:
K_high = True
else:
K_low = True
if K_high and K_low:
break
if K_high and K_low:
V_over_F_guess, xs, ys = Rachford_Rice_solution_LN2(zs, Ks, V_over_F_guess)
return V_over_F_guess, xs, ys
elif K_high:
return 1.0, zs, zs
else:
return 0.0, zs, zs
if method == SHAW_ELEMENTAL:
VolumeLiquids = correlations.VolumeLiquids
MWs, n_atoms = constants.MWs, constants.n_atoms
def H_model_g(T, P, zs):
MW_g, sv_g = 0.0, 0.0
for i in cmps:
MW_g += MWs[i]*zs[i]
sv_g += n_atoms[i]*zs[i]
sv_g /= MW_g
H_ref_LS = Lastovka_Shaw_integral(T_ref, sv_g)
H1 = Lastovka_Shaw_integral(T, sv_g)
dH = H1 - H_ref_LS
H_gas = 1e-3*dH*MW_g #property_mass_to_molar(dH, MW_g)
return H_gas
def S_model_g(T, P, zs):
MW_g, sv_g = 0.0, 0.0
for i in cmps:
MW_g += MWs[i]*zs[i]
sv_g += n_atoms[i]*zs[i]
sv_g /= MW_g
S_ref_LS = Lastovka_Shaw_integral_over_T(T_ref, sv_g)
S1 = Lastovka_Shaw_integral_over_T(T, sv_g)
dS = S1 - S_ref_LS
S_gas = 1e-3*dS*MW_g
return S_gas
def H_model_l(T, P, zs):
MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0
for i in cmps:
MW_l += MWs[i]*zs[i]
sv_l += n_atoms[i]*zs[i]
Tc_l += Tcs[i]*zs[i]
omega_l += omegas[i]*zs[i]
sv_l /= MW_l
H_ref_DS = Dadgostar_Shaw_integral(T_ref, sv_l)
H1 = Dadgostar_Shaw_integral(T, sv_l)
Hvap = SMK(T, Tc_l, omega_l)
dH = H1 - H_ref_DS
H_liq = 1e-3*dH*MW_l #property_mass_to_molar(dH, MW_l)
return (H_liq - Hvap)
def S_model_l(T, P, zs):
MW_l, sv_l, Tc_l, omega_l = 0.0, 0.0, 0.0, 0.0
for i in cmps:
MW_l += MWs[i]*zs[i]
sv_l += n_atoms[i]*zs[i]
Tc_l += Tcs[i]*zs[i]
omega_l += omegas[i]*zs[i]
sv_l /= MW_l
S_ref_DS = Dadgostar_Shaw_integral_over_T(T_ref, sv_l)
S1 = Dadgostar_Shaw_integral_over_T(T, sv_l)
Hvap = SMK(T, Tc_l, omega_l)
dS = S1 - S_ref_DS
S_liq = 1e-3*dS*MW_l
return (S_liq - Hvap/T)
elif method == IDEAL_WILSON:
HeatCapacityGases = correlations.HeatCapacityGases
EnthalpyVaporizations = correlations.EnthalpyVaporizations
def flash_model(T, P, zs):
_, _, VF, xs, ys = flash_wilson(zs, constants.Tcs, constants.Pcs, constants.omegas, T=T, P=P)
return VF, xs, ys
def H_model_g(T, P, zs):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral(T_ref, T)
return H_calc
def S_model_g(T, P, zs):
S_calc = 0.
for i in cmps:
S_calc += zs[i]*HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T)
return S_calc
def H_model_l(T, P, zs):
H_calc = 0.
for i in cmps:
H_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral(T_ref, T) - EnthalpyVaporizations[i](T))
return H_calc
def S_model_l(T, P, zs):
S_calc = 0.
T_inv = 1.0/T
for i in cmps:
S_calc += zs[i]*(HeatCapacityGases[i].T_dependent_property_integral_over_T(T_ref, T) - T_inv*EnthalpyVaporizations[i](T))
return S_calc
try:
# All three variables P, T, V are positive but can grow unbounded, so
# for the secant method, only set the one variable
if iter_T:
guess = 298.15
elif iter_P:
guess = 101325.0
elif iter_V:
guess = 0.024465403697038125
val = secant(err, guess, xtol=xtol, ytol=ytol,
maxiter=maxiter, bisection=True, low=min_bound, require_xtol=False)
return val, info[0], info[1], info[2]
except (UnconvergedError,) as e:
val = brenth(err, min_bound, max_bound, xtol=xtol, ytol=ytol, maxiter=maxiter)
return val, info[0], info[1], info[2]
global cm_flash
cm_flash = None
def cm_flash_tol():
global cm_flash
if cm_flash is not None:
return cm_flash
from matplotlib.colors import ListedColormap
N = 100
vals = np.zeros((N, 4))
vals[:, 3] = np.ones(N)
# Grey for 1e-10 to 1e-7
low = 40
vals[:low, 0] = np.linspace(100/256, 1, low)[::-1]
vals[:low, 1] = np.linspace(100/256, 1, low)[::-1]
vals[:low, 2] = np.linspace(100/256, 1, low)[::-1]
# green 1e-6 to 1e-5
ok = 50
vals[low:ok, 1] = np.linspace(100/256, 1, ok-low)[::-1]
# Blue 1e-5 to 1e-3
mid = 70
vals[ok:mid, 2] = np.linspace(100/256, 1, mid-ok)[::-1]
# Red 1e-3 and higher
vals[mid:101, 0] = np.linspace(100/256, 1, 100-mid)[::-1]
newcmp = ListedColormap(vals)
cm_flash = newcmp
return cm_flash
def deduplicate_stab_results(results, tol_frac_err=5e-3):
if not results:
return results
N = len(results[0][0])
cmps = range(N)
results.sort(key=lambda x: (x[0][0], x[2]))
good_results = [results[0]]
for t in results[1:]:
xs_last, ys_last = good_results[-1][0], good_results[-1][1]
xs, ys = t[0], t[1]
diff_x = sum([abs(xs[i] - xs_last[i]) for i in cmps])/N
diff_y = sum([abs(ys[i] - ys_last[i]) for i in cmps])/N
if diff_x > tol_frac_err or diff_y > tol_frac_err:
good_results.append(t)
return good_results
empty_flash_conv = {'iterations': 0, 'err': 0.0, 'stab_guess_name': None}
one_in_list = [1.0]
empty_list = []
|
I have mentioned before that typically I am not a heels girl. I like comfortable shoes. Cowboy boots. Flats. Flip flops. It’s not that I don’t love wearing heels, it’s that for some reason my body HATES ME when I wear heels. Within 10 minutes my feet are aching and by the end of the day I’d rather cut my feet off because that would hurt LESS than how badly my feet hurt when I wear heels.
I admire girls who can wear heels all the live long day… I am not one of them.
UNLESS I find the perfect pair of comfortable heels. I have one pair of heels that I found a few months ago that are comfortable and I love, but recently I found out about Hotter.com and they have a TON of comfortable shoe options – INCLUDING comfortable heel options. I was skeptical. Comfortable heels are like Crystal Pepsi. It’s a myth, or something. Usually you find a pair of shoes that is either comfortable OR cute. Not both.
I got a pair of the Donna Heels from Hotter.com (If you order from them, make sure you order from the US website and not the UK website… unless you’re in the UK of course… 🙂) and when they came in the mail, I got them out of the box and immediately fell in love. And then I put them on. HOLY MOLY THEY ARE SO COMFORTABLE. I seriously didn’t know it was possible. They are so cute, so practical, and go with so much. AND THEY ARE SO COMFORTABLE. I want to scream it from the rooftops. Comfortable heels are such a rarity for me.
I can tell that these are going to be a serious staple for me for years to come. So, with that, I wanted to show y’all FIVE different ways to wear the Donna Heels from Hotter.com!
ONE: The casual style with jeans look.
You know, for the everyday outfit.
TWO: The shorts and a tee look.
The perfect weekend look, running errands look, or just hanging out doing a whole lot of nothing look.
THREE: The trendy all-white look.
PRO TIP: If you’re going to rock the all-white trend, make sure you have varying textures and patterns. Plus, a pop of color always helps – whether it’s in a shoe or in a bag.
FOUR: The comfortable, casual maxi-skirt look.
To me, there’s nothing more comfortable than a maxi skirt and tee. If I could live in this outfit, I think I would.
FIVE: The fancy wedding guest look.
For the fancy occasions! Because isn’t that what heels are actually meant for?
So, what do you think? Which look is your favorite? How would YOU style the Donna heels? Do you have a pair of comfortable heels that you love?
Shop Black Pj Sets, Wide Leg Jeans, Cutout Dresses, Frayed Shorts and more. Get this widget.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train helper for source separation using tf.estimator."""
import tensorflow.compat.v1 as tf
from . import inference_graph
def execute(model_fn, input_fn, **params):
"""Execute train or eval and/or inference graph writing.
Args:
model_fn: An estimator compatible function taking parameters
(features, labels, mode, params) that returns a EstimatorSpec.
input_fn: An estimator compatible function taking 'params' that returns a
dataset
**params: Dict of additional params to pass to both model_fn and input_fn.
"""
if params['write_inference_graph']:
inference_graph.write(model_fn, input_fn, params, params['model_dir'])
def estimator_model_fn(features, labels, mode, params):
spec = model_fn(features, labels, mode, params)
return spec
def train_input_fn():
train_params = params.copy()
train_params['input_data'] = params['input_data_train']
train_params['batch_size'] = params['train_batch_size']
if params['randomize_training']:
train_params['randomize_order'] = True
return input_fn(train_params)
def eval_input_fn():
eval_params = params.copy()
eval_params['input_data'] = params['input_data_eval']
eval_params['batch_size'] = params['eval_batch_size']
return input_fn(eval_params)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=params['train_steps'])
eval_steps = int(round(params['eval_examples'] / params['eval_batch_size']))
eval_spec = tf.estimator.EvalSpec(
name=params['eval_suffix'], input_fn=eval_input_fn, steps=eval_steps,
throttle_secs=params.get('eval_throttle_secs', 600))
run_config = tf.estimator.RunConfig(
model_dir=params['model_dir'],
save_summary_steps=params['save_summary_steps'],
save_checkpoints_secs=params['save_checkpoints_secs'],
keep_checkpoint_every_n_hours=params['keep_checkpoint_every_n_hours'])
estimator = tf.estimator.Estimator(
model_fn=estimator_model_fn,
params=params,
config=run_config)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
|
3) Help "change the lens" on youth of color, especially African American boys, whose portrayal in the media is too often narrow and negative.
Media coverage from 25+ outlets including USAToday, Huffington Post, Take Part and Dropout Nation.
Dozens of high-profile speaking opportunities for those featured in the films, including this TED Talk, The Atlantic's Race & Justice Summit (broadcast on C-Span), and events such as at the Museum of the Moving Image, Kapor Center for Social Impact, SXSWEdu, LinkedIn, Salesforce, ASU-GSV Education Innovation Summit, the U.S. Department of Education and numerous governmental offices, community organizations, faith-based entities, colleges, universities, businesses and more.
By being in Code Oakland, I am showing other girls what they can do! I am a living example to other young girls of color that we can have a career in technology and have an impact on our community and world. I am able to show that coding is fun and can open a world of possibilities.
Being a part of Unchartered Territory impacted me in ways I could not have imagined. I have been asked by schools, districts and government agencies to speak all across the country. It has given me the opportunity to shed light on serious issues surrounding education and race.
For me, having TEACHED as the central basis for Education Equality Week was the best decision we made. It was perfect, because it built a foundation for the rest of the programs. The films should definitely be screened first before any kind of discussion of education equality, especially because everyone has different levels of knowledge on the issue.
TEACHED provided a platform and audience that I otherwise never would have had exposure to. Director Kelly Amis has launched something bigger than a film...people like me have started to mobilize and make things happen. How do I explain the impact? It was transformative on so many different levels. The beautiful part is that it isn't over.
We’re examining education and race equality more deeply in our first feature-length documentary (now in production in Oakland, CA!). Please help make this indie film a reality with a donation today.
Engage your community in education equality issues by hosting a TEACHED film screening. You'll receive digital discussion guides, promotional materials and event-planning assistance. Questions? Contact us.
Please share our website with friends and colleagues, follow us on social media and join our mailing list.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-27 17:27
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dcim', '0051_rackreservation_tenant'),
]
operations = [
migrations.CreateModel(
name='VirtualChassis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(blank=True, max_length=30)),
('master', models.OneToOneField(default=1, on_delete=django.db.models.deletion.PROTECT, related_name='vc_master_for', to='dcim.Device')),
],
),
migrations.AddField(
model_name='device',
name='virtual_chassis',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='members', to='dcim.VirtualChassis'),
),
migrations.AddField(
model_name='device',
name='vc_position',
field=models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(255)]),
),
migrations.AddField(
model_name='device',
name='vc_priority',
field=models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(255)]),
),
migrations.AlterUniqueTogether(
name='device',
unique_together=set([('virtual_chassis', 'vc_position'), ('rack', 'position', 'face')]),
),
]
|
DR DOPING sells medicines, vitamins, nootropics, additives, celebrex drug category flash and other healthy products at the international level.
DR DOPING continuously strives to improve your online shopping experience. Initially, our company promoted the benefits of doping Mildronate and generic name for lamotrigine; hence the name - DR DOPING. We are confident that the DR DOPING offers the best conditions for the ordering of health products available on the market.
|
import re
import os
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
import collections.abc as collections
except ImportError:
import collections
from music_app.settings import DATABASE_NAME
from tinydb import TinyDB, Query
def parse_listing(data):
"""Method to parse the listing from data"""
songs = [{key:song[key] for key in song.keys() if key in ['url', 'score', 'created_utc', 'thumbnail',
'title']} for song in [flatten(thing['data']) for thing
in data['data']['children']
if thing['kind'] == 't3']]
for song in songs:
parsed = parse_title(song['title'])
if parsed is None:
continue
song.update(parsed)
return songs
def flatten(d, parent_key='', sep='_'):
"""Flatten a dictionary of dictionaries"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def parse_title(title):
"""
Returns parsed contents of a post's title
"""
ro = re.compile(r"""
(?P<artist>.+[^- ]+) # The artist
\s*-+\s* # Skip some spaces and dashes
(?P<title>.*) # The title
\s*\[ # Skip some spaces and opening bracket
(?P<genre>.*) # The genre
\]\s*\( # Skip closing bracket, spaces and opening parenthesis
(?P<year>\d+) # The year
\) # Skip closing parenthesis
""", re.VERBOSE | re.IGNORECASE)
mo = ro.search(title)
if mo is None:
return
return {'artist': mo.group('artist'), 'title': mo.group('title'), 'genre': mo.group('genre'), 'year': mo.group(
'year')}
def get_genres(database_name):
"""Utility method to get all the genres as a set"""
db = TinyDB(os.path.join(os.getcwd(), database_name))
all_genres = { song['genre'] for song in db.all() }
specific_genres = set()
for genre in all_genres:
specific_genres = specific_genres.union(set(genre.strip().split('/')))
db.close()
return _strip_spaces(specific_genres)
def _strip_spaces(genres):
"""Helper method to strip spaces and remove duplicates from genres """
return { x.strip() for x in genres }
def get_total_songs(database_name):
"""Utility Method to get the total number of songs in the database"""
db = TinyDB(os.path.join(os.getcwd(), database_name))
total_length = len(db.all())
db.close()
return total_length
|
Photo taken from my hotel, probably in 2002. For me the Chao Phraya IS Bangkok. But maybe that is because I come from a city with a big river and a big harbor.
|
import datetime
# Approximate representation of PRISM 400m grid
bc_400m = {'lon': {'start': -140, 'step': 0.008333333, 'count': 1680 },
'lat': {'start': 48, 'step': 0.008333333, 'count': 3241 } }
# Approximate representation of BCSD/BCCAQ grid
canada_5k = {'lon': {'start': -141, 'step': 0.08333333, 'count': 1068 },
'lat': {'start': 41, 'step': 0.08333333, 'count': 510 } }
# Approximate representation of high res CMIP5 (MIROC5) grid
world_125k = {'lon': {'start': 0, 'step': 1.40625, 'count': 256 },
'lat': {'start': -89.296875, 'step': 1.40625, 'count': 128 } }
# Approximate representation of standard CMIP5 (CanESM) grid
world_250k = {'lon': {'start': 0, 'step': 2.8125, 'count': 128 },
'lat': {'start': -88.59375, 'step': 2.8125, 'count': 64 } }
# Timescales
start_day = datetime.date(1950,1,1)
end_day = datetime.date(2100,1,1)
timescales = {'seasonal': range(17), # the seasonal index
'annual': range(1950, 2100), # the year
'monthly': range(12 * 150), # months since January 1950
'daily':range((end_day - start_day).days)} # days since January 1, 1950
|
Ulaanbaatar /MONTSAME/ The National Information Technology Park (NITP), ACE Singaporean organization and the Asian Business Association are to co-implement Export Accelerator Program for encouraging Mongolian small and medium sized enterprises and start-up companies to enter Asian market.
The Export Accelerator Program will run for ten weeks from April 25 to June 25 and that three selected companies from the participants of this program will be involved in Market Access program to be held in Singapore this July. Moreover, the Asian Young Entrepreneur Program will run its activities in Mongolia this autumn and the winners will compete in Singapore, said NITP Director Ts.Sodnomdamba.
Companies which are locally recognized, have its own customers, products to export and financial and human resources are qualified to take part in the program and they will be trained under guidance of successful businessmen, investors and experienced mentors for 10 weeks. Registration of the program will run till April 20.
|
"""init
Revision ID: 7afc95e24778
Revises:
Create Date: 2017-08-03 11:51:08.190298
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7afc95e24778'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('accounts',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.String(), nullable=False),
sa.Column('policy_enabled', sa.Boolean(), server_default='FALSE', nullable=False),
sa.Column('policy_keep_latest', sa.Integer(), server_default='0', nullable=False),
sa.Column('policy_keep_favourites', sa.Boolean(), server_default='TRUE', nullable=False),
sa.Column('policy_delete_every', sa.Interval(), server_default='0', nullable=False),
sa.Column('policy_keep_younger', sa.Interval(), server_default='0', nullable=False),
sa.Column('display_name', sa.String(), nullable=True),
sa.Column('screen_name', sa.String(), nullable=True),
sa.Column('avatar_url', sa.String(), nullable=True),
sa.Column('last_fetch', sa.DateTime(), server_default='epoch', nullable=True),
sa.Column('last_delete', sa.DateTime(), server_default='epoch', nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_accounts'))
)
op.create_table('oauth_tokens',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('token', sa.String(), nullable=False),
sa.Column('token_secret', sa.String(), nullable=False),
sa.Column('account_id', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['accounts.id'], name=op.f('fk_oauth_tokens_account_id_accounts'), onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('token', name=op.f('pk_oauth_tokens'))
)
op.create_table('posts',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.String(), nullable=False),
sa.Column('body', sa.String(), nullable=True),
sa.Column('author_id', sa.String(), nullable=False),
sa.Column('favourite', sa.Boolean(), server_default='FALSE', nullable=False),
sa.ForeignKeyConstraint(['author_id'], ['accounts.id'], name=op.f('fk_posts_author_id_accounts'), onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_posts'))
)
op.create_table('sessions',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.String(), nullable=False),
sa.Column('account_id', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['account_id'], ['accounts.id'], name=op.f('fk_sessions_account_id_accounts'), onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_sessions'))
)
op.create_table('twitter_archives',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('account_id', sa.String(), nullable=False),
sa.Column('body', sa.LargeBinary(), nullable=False),
sa.Column('chunks', sa.Integer(), nullable=True),
sa.Column('chunks_successful', sa.Integer(), server_default='0', nullable=False),
sa.Column('chunks_failed', sa.Integer(), server_default='0', nullable=False),
sa.ForeignKeyConstraint(['account_id'], ['accounts.id'], name=op.f('fk_twitter_archives_account_id_accounts'), onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_twitter_archives'))
)
def downgrade():
op.drop_table('twitter_archives')
op.drop_table('sessions')
op.drop_table('posts')
op.drop_table('oauth_tokens')
op.drop_table('accounts')
|
If the world was coming to an end and we were to send one last message out into the cosmos that summed up the beauty of life on Earth, what would it be? Jane Goodall, Mohsin Hamid, Oscar Murillo, James Dyson, Richard Dawkins, Kyung-sook Shin and Daniel Humm tell us.
Tell us how you would sum up the beauty of life on Earth on the Times Opinion Facebook page. We may highlight your response in a follow-up to this piece.
@OP – If the world was coming to an end and we were to send one last message out into the cosmos that summed up the beauty of life on Earth, . . . . .. . . . . . . . .
Blow sending messages into the cosmos!!
“You just have time for one more launch from Earth – and we can accommodate one last ship-load of humans on the Titan Base.
Elsewhere, this was my response as a final message.
Sorry Alan, My last message would be; Do not allow Humans from Earth to immigrate. They already screwed up one planet.
And also, The Humans managed to screw up the planet in less than two thousand years.
They may also still be infected with the “God Delusion”………..
For human bases to operate in the outer Solar-System where sunlight is weak, some fission or fusion power source is required.
Such power sources are currently under development!
The agency’s Space Technology Mission Directorate (STMD) has provided multi-year funding to the Kilopower project.
Testing is due to start in November and go through early next year, with NASA partnering with the Department of Energy’s (DOE) Nevada National Security Site to appraise fission power technologies.
Alan, its not that I don’t see the need for research and space exploration, I am starting to have a morality issue with the spread of Humans.
And, if massive planet outpost are developed, what motivation is there to save Earth? Only the rich and famous will survive. And they will look upon Earth as a garbage dump. Economic cleansing. Then the Tribe mentality would set in. So we would in effect, be splitting the Human race into the Martians and the Earthlings.
Message: Hello, from planet Earth. A very fine chap named Dan made this soon-to-be dead world a little brighter.
I’m not sure about that! We certainly sent god-deluded astronauts into space, but ONLY by a huge back-up force of highly educated and dedicated scientists and engineers facilitating this!
The authority obeying faith-thinkers, would not have lasted a couple of days, without these rationalists providing the technical support services and instructions.
There is a very big difference in OUTCOMES between those trusting evidence based expert instructions from a team of reputable scientists, and those blindly following faith in “authoritative” priestly interpretations of mythology or political or corporate ideology!
And, if massive planet outpost are developed, what motivation is there to save Earth?
There will never be facilities to transport more than a tiny percentage of humans to exo-colonies, so while there may be political manipulations by the rich to acquire such postings for themselves, the harsh selective environment of space will quickly remove any anti-science wish-thinkers individually or en-block.
Only the rich and famous will survive. And they will look upon Earth as a garbage dump. Economic cleansing.
I think the colonists (especially remote ones), will not see themselves as having a commitment to Earth, but it will be up to the remaining Earth population to solve their own problems, while the colonists will have enough of their own challenges to meet in settlements which need artificial power generators, just to provide life support to survive!
Then the Tribe mentality would set in.
Your are probably right, that the separate settlements will go their own ways, although some trading of specialist materials and ideas will happen.
The exceptions to this, would be trading infrastructure colonies in Earth or Lunar orbit, selling services to Earth-based enterprises.
These would however be vulnerable to Earth related disasters such as asteroid showers or Solar flares.
|
#
# BreakPoint.py
#
# https://github.com/assafnativ/NativDebugging.git
# Nativ.Assaf@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
BREAK_POINT_HIDE = 2
BREAK_POINT_ACTIVE = 1
BREAK_POINT_DEACTIVE = 0 # Canceld, use ~BREAK_POINT_ACTIVE
BREAK_POINT_BYTE = ord('\xcc')
class BreakPoint:
def __init__( self, \
address = -1, \
state = BREAK_POINT_DEACTIVE, \
original_byte = None, \
proc = None ):
"""
Constructor of the BreakPoint class.
"""
self.address = address
self.state = state
self.original_byte = original_byte
self.proc = proc
|
I know I’ve shared my various desired actresses/actors for Something Strange & Deadly, the Movie (coming to a theater near you in the summer of never), but I’m not sure I’ve ever shared my ideal movie cast on my website.
I know I’ve said before that Chapter 19 is my favorite in Something Strange & Deadly. It’s not that I don’t love all the other chapters, but I actually wrote chapter 19 first.
Yep–it’s true! It was originally the opening scene in Something Strange & Deadly, and then the story jumped back to Eleanor waiting on her brother at the train station. OBVIOUSLY, that was a terrible way to open the book, and a wise critique partner quickly pointed out just how terrible.
But the fact remains that once-upon-a-time, Chapter 19 came first. So it’s special to me.
And because it’s special to me, I thought I’d share with YOU a playlist that goes along with the entire dynamite factory kerfuffle.
First, I’ll share the actual playlist so you can listen (and possibly be inspired to write your own stuff…?), and then I’ll break down which piece I associate with which pages. Happy listening!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.