text
stringlengths
29
850k
############################################################################### ## Copyright (C) 2007 Jason Baldridge, The University of Texas at Austin ## ## This library is free software; you can redistribute it and#or ## modify it under the terms of the GNU Lesser General Public ## License as published by the Free Software Foundation; either ## version 2.1 of the License, or (at your option) any later version. ## ## This library is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public ## License along with this program; if not, write to the Free Software ## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ############################################################################## import sys import gzip import fnmatch gold_tag_filename = sys.argv[1] model_tag_filename = sys.argv[2] if fnmatch.fnmatch(gold_tag_filename,"*.gz"): gold_tag_file = gzip.open(gold_tag_filename) else: gold_tag_file = file(gold_tag_filename,"r") if fnmatch.fnmatch(model_tag_filename,"*.gz"): model_tag_file = gzip.open(model_tag_filename) else: model_tag_file = file(model_tag_filename,"r") gold_lines = gold_tag_file.readlines() model_lines = model_tag_file.readlines() word_correct = 0 word_total = 0 sentence_correct = 0 sentence_total = 0 errors = {} all_correct = True for linenumber in range(len(gold_lines)): gold_line = gold_lines[linenumber].strip() model_line = model_lines[linenumber].strip() if (model_line == ""): if (gold_line != ""): print "Something wrong -- different length on sentence for gold and model." print "Gold:",gold_line print "Model:",model_line if all_correct: sentence_correct += 1 sentence_total +=1 all_correct = True else: gitems = gold_line.split() mitems = model_line.split() gtag = gitems[1] mtag = mitems[1] if gtag == mtag: word_correct += 1 else: all_correct = False errors[(gtag,mtag)] = errors.get((gtag,mtag),0)+1 word_total += 1 gold_line = gold_tag_file.readline().strip() model_line = model_tag_file.readline().strip() word_accuracy = (word_correct/float(word_total))*100.0 sentence_accuracy = (sentence_correct/float(sentence_total))*100.0 print "Word accuracy: %2.3f (%d/%d)" % (word_accuracy, word_correct, word_total) print "Sent accuracy: %2.3f (%d/%d)" % (sentence_accuracy, sentence_correct, sentence_total) to_sort = [] for (gtag,mtag) in errors: to_sort.append((errors[(gtag,mtag)],gtag,mtag)) to_sort.sort(lambda x,y:cmp(y[0],x[0])); print "\nMost common errors:" print "Err\tGold\tModel\n---------------------" for i in range (0,min(5,len(to_sort))): print "\t".join([str(x) for x in to_sort[i]])
A 28-year-old political newcomer beat a veteran politician in New York’s Democratic Party congressional primary election on Tuesday night, in a shocking upset within the Democratic Party. Alexandria Ocasio-Cortez won 57.5% of the vote while her opponent, Joe Crowley, had 42.5%. Crowley has represented New York’s 14th district since 1999 and has been touted as a potential future Speaker of the House of Representatives. The victory of Ocasio-Cortez is a major upset to the Democratic Party, which has seen a schism between establishment and progressive Democrats since the 2016 election. Less than a year ago, Ocasio-Cortez was working as a bartender to help support her working-class family. In November 2018, she will take on Republican candidate Anthony Pappas for a seat in the US House of Representatives. A Democrat has won that district by a landslide in the past nine elections.
#!/usr/bin/env python3 import codecs import csv class ExportStatistics: ''' Class used for exporting the created in software statistics. ''' @staticmethod def export_to_csv(file_path, samples, statistics): ''' Exports the basic statistics form the application to the CSV file format. Args: file_path: string - contains full file path with file name. samples: instance - class Samples. statistics: instance - class Statistics. Return: No return in method. ''' header_row = [ '', samples.qualifiers[1], samples.qualifiers[2], samples.qualifiers[3], samples.qualifiers[4]] #Row structure. #samples.names[1...8], statistics.stats[1...8][1...4] matrix = [] for key, value in samples.names.items(): temp = [value, statistics.stats[key][1], statistics.stats[key][2], statistics.stats[key][3], statistics.stats[key][4]] matrix.append(temp) with codecs.open(file_path, 'w', 'utf-8') as csv_file: file_writer = csv.writer(csv_file, delimiter=';') file_writer.writerow(header_row) for element in matrix: file_writer.writerow(element)
J. Kremer, D. Mastronarde, and J. McIntosh. 1996. Computer visualization of three-dimensional image data using IMOD. J Struct Biol. 116(1): 71-76.
import sys import uos import uerrno try: uos.VfsFat except AttributeError: print("SKIP") sys.exit() class RAMFS: SEC_SIZE = 512 def __init__(self, blocks): self.data = bytearray(blocks * self.SEC_SIZE) def readblocks(self, n, buf): #print("readblocks(%s, %x(%d))" % (n, id(buf), len(buf))) for i in range(len(buf)): buf[i] = self.data[n * self.SEC_SIZE + i] def writeblocks(self, n, buf): #print("writeblocks(%s, %x)" % (n, id(buf))) for i in range(len(buf)): self.data[n * self.SEC_SIZE + i] = buf[i] def ioctl(self, op, arg): #print("ioctl(%d, %r)" % (op, arg)) if op == 4: # BP_IOCTL_SEC_COUNT return len(self.data) // self.SEC_SIZE if op == 5: # BP_IOCTL_SEC_SIZE return self.SEC_SIZE try: bdev = RAMFS(48) except MemoryError: print("SKIP") sys.exit() uos.VfsFat.mkfs(bdev) print(b"FOO_FILETXT" not in bdev.data) print(b"hello!" not in bdev.data) vfs = uos.VfsFat(bdev, "/ramdisk") try: vfs.statvfs("/null") except OSError as e: print(e.args[0] == uerrno.ENODEV) print("statvfs:", vfs.statvfs("/ramdisk")) print("getcwd:", vfs.getcwd()) try: vfs.stat("no_file.txt") except OSError as e: print(e.args[0] == uerrno.ENOENT) with vfs.open("foo_file.txt", "w") as f: f.write("hello!") print(vfs.listdir()) print("stat root:", vfs.stat("/")) print("stat disk:", vfs.stat("/ramdisk/")) print("stat file:", vfs.stat("foo_file.txt")) print(b"FOO_FILETXT" in bdev.data) print(b"hello!" in bdev.data) vfs.mkdir("foo_dir") vfs.chdir("foo_dir") print("getcwd:", vfs.getcwd()) print(vfs.listdir()) with vfs.open("sub_file.txt", "w") as f: f.write("subdir file") try: vfs.chdir("sub_file.txt") except OSError as e: print(e.args[0] == uerrno.ENOENT) vfs.chdir("..") print("getcwd:", vfs.getcwd()) vfs.umount() try: vfs.listdir() except OSError as e: print(e.args[0] == uerrno.ENODEV) try: vfs.getcwd() except OSError as e: print(e.args[0] == uerrno.ENODEV) vfs = uos.VfsFat(bdev, "/ramdisk") print(vfs.listdir(b""))
Win A Copy of Silver Bell by Patty Griffin! Home/Win A Copy of Silver Bell by Patty Griffin! Another lost album is found! Patty Griffin’s Silver Bell releases today. You can read our review of the album (we liked it) or our profile of Patty Griffin. Thanks to our friends at Team Patty, you can also enter right now for a chance to win one of two brand new copies of Silver Bell, just available today.
"""Test that an empty document doesn't break, even when its (nonexistent) text is set to bold. """ __noninteractive = True import unittest from pyglet import gl from pyglet import graphics from pyglet.text import document from pyglet.text import layout from pyglet import window class TestWindow(window.Window): def __init__(self, doctype, *args, **kwargs): super().__init__(*args, **kwargs) self.batch = graphics.Batch() self.document = doctype() self.layout = layout.IncrementalTextLayout(self.document, self.width, self.height, batch=self.batch) self.document.set_style(0, len(self.document.text), {"bold": True}) def on_draw(self): gl.glClearColor(1, 1, 1, 1) self.clear() self.batch.draw() class TestCase(unittest.TestCase): def testUnformatted(self): self.window = TestWindow(document.UnformattedDocument) self.window.dispatch_events() self.window.close() def testFormatted(self): self.window = TestWindow(document.FormattedDocument) self.window.dispatch_events() self.window.close()
Images of Dusty--Photographs, Video, Fan Art. Lovely to see a Springfields one Trek. You are most welcome Jennifer. Memories of the real Dusty!! Indeed Trek and lovely sweet memories. Stunning photos, Carole and Cas! Thanks for taking time to post them. You are most welcome Michele thank you for responding. I put this one on the new oldie thread but as theres no respose I thought I would put it on here too. You are most welcome Mike, thank you. Lovely in colour I must admit. Thats better, I cant bear to see her in the Stringfellow days, not her most memorable moments sadly. Thank you Trek a lovely favourite. Sorry I managed to put this one on the wrong thread and can't delete it. You are most welcome Mike. Don't normally like the "tin foil" photos but I like this one! Yes I'm with you there Trek. Lovely photo. Thanks for posting it. Nice one Cas always liked that hair do! Yes me too Trek, 60s look. Lovely Cas seeing her with Martha . I look everyday and this one takes me back to that Motown special of course. Thank you Liz nad Trek. Hope she gave it back! I know thats YOUR suit Liz! Heres one to start the week. Heres todays lady on stage. A nice one it is too Trek. Love these performance photos - yet again, thanks Cas!
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright Cisco Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Views for managing Nova keypairs. """ import logging from django import http from django.core.urlresolvers import reverse from django.template.defaultfilters import slugify from django.views.generic import View, TemplateView from django.utils.translation import ugettext as _ from horizon import api from horizon import forms from horizon import tables from horizon import exceptions from .forms import CreatePorts, AttachPort, DetachPort from .tables import PortsTable import pickle LOG = logging.getLogger(__name__) class CreatePortsView(forms.ModalFormView): form_class = CreatePorts template_name = 'nova/networks/ports/create.html' class PortsView(tables.DataTableView): table_class = PortsTable template_name = 'nova/networks/ports/index.html' def get_data(self): network_id = self.kwargs['network_id'] try: ports = api.quantum_port_list(self.request, network_id) except: ports = [] msg = _('Unable to retrieve network details.') exceptions.handle(self.request, msg) return ports class AttachPortView(forms.ModalFormView): form_class = AttachPort template_name = 'nova/networks/ports/attach.html' def get_initial(self): interfaces = api.get_free_interfaces(self.request) return {"interfaces": interfaces} class DetachPortView(forms.ModalFormView): form_class = DetachPort template_name = 'nova/networks/ports/detach.html'
The Resource Manhattan (New York, N.Y.) -- Drama. 9 Items that share the Concept Manhattan (New York, N.Y.) -- Drama. Context of Manhattan (New York, N.Y.) -- Drama. Data Citation of the Concept Manhattan (New York, N.Y.) -- Drama.
#test_ga.py from genetic import * import matplotlib.pyplot as plt import random class Eq1(object): """ parabole with max in 30.0 """ def __init__(self): self.bestval = 10000.0 self.minval = 10.0 self.maxval = 50.0 def get_result(self,x): return self.eq1(x) def eq1(self, x): return -10.0*(x-30.0)*(x-30.0)+10000.0 class Eq2(object): """ between -2 and 2 the max is on 0 with a value of 10 """ def __init__(self): self.bestval = 10.0 self.minval = -2.0 self.maxval = 2.0 def get_result(self,x): return self.eq2(x) def eq2(self, x): return x**4 - 10*x**2 + 9 class Eq3(object): """ vector function with eq1 = x, eq2 = y """ def __init__(self): self.eq1 = Eq1() self.eq2 = Eq2() self.bestval = self.eq1.bestval + self.eq2.bestval self.minval = [self.eq1.minval,self.eq2.minval] self.maxval = [self.eq1.maxval,self.eq2.maxval] def get_result(self, vallst): return self.eq1.get_result(vallst[0]) + self.eq2.get_result(vallst[1]) class TestGA(object): def __init__(self, neq, nmember, ngeneration): self._nmember = nmember self._ngeneration = ngeneration self._eq = neq def run(self): #build member if self._eq == 1: eq = Eq1() elif self._eq == 2: eq = Eq2() elif self._eq == 3: eq = Eq3() else: print "Select an equation" exit valuelist = [] fitnesslist = [] for i in range(self._nmember): if self._eq == 1 or self._eq == 2: value = random.uniform(eq.minval,eq.maxval) elif self._eq == 3: value = [random.uniform(eq.minval[0],eq.maxval[0]),random.uniform(eq.minval[1],eq.maxval[1])] valuelist.append([value]) fitnesslist.append(eq.get_result(value)) ga = Generation() for i in range(len(valuelist)): ga.build_member(valuelist[i],fitnesslist[i],[eq.minval],[eq.maxval]) newgeneration = ga.next() xlist = [] ylist = [] for ngeneration in range(0,self._ngeneration): h = 0 maxfit = 0 fitnesslist = [] valuelistlist = [] for member in newgeneration: h += 1 for gene in member.chromosome: fitness = eq.get_result(gene.val) fitnesslist.append(fitness) valuelistlist.append([gene.val]) if fitness > maxfit: maxfit = fitness ga = Generation(valuelistlist,fitnesslist,[eq.minval],[eq.maxval]) newgeneration = ga.next() xlist.append(ngeneration) ylist.append(maxfit) plt.plot(xlist,ylist, 'ro') plt.axis([0, self._ngeneration, 0, eq.bestval]) plt.show() test1 = TestGA(1, 20, 100) test1.run()
Need a Recommended Tap replacement Tradesman in Addiscombe? Request 2, 3 or 4 Tap replacement Tradesmen in the Addiscombe area to contact you and quote. Fully qualified, reliable and local plumber with good rates and a 24 hour callout service . No job too small , we cover everything from tap washers to central heating installations. We promise to provide a prompt and professional service, with customer satisfaction our number one priority .
import time from asyncio import Queue, CancelledError DEFAULT_LIMIT = 2**16 class FlowControl: """A protocol mixin for flow control logic. This implements the protocol methods :meth:`pause_writing`, :meth:`resume_writing`. """ _b_limit = 2*DEFAULT_LIMIT _paused = False _buffer_size = 0 _waiter = None def write(self, data): """Write ``data`` into the wire. Returns an empty tuple or a :class:`~asyncio.Future` if this protocol has paused writing. """ if self.closed: raise ConnectionResetError( 'Transport closed - cannot write on %s' % self ) else: t = self.transport if self._paused or self._buffer: self._buffer.appendleft(data) self._buffer_size += len(data) self._write_from_buffer() if self._buffer_size > 2 * self._b_limit: if self._waiter and not self._waiter.cancelled(): self.logger.warning( '%s buffer size is %d: limit is %d ', self._buffer_size, self._b_limit ) else: t.pause_reading() self._waiter = self._loop.create_future() else: t.write(data) self.changed() return self._waiter def pause_writing(self): '''Called by the transport when the buffer goes over the high-water mark Successive calls to this method will fails unless :meth:`resume_writing` is called first. ''' assert not self._paused self._paused = True def resume_writing(self, exc=None): '''Resume writing. Successive calls to this method will fails unless :meth:`pause_writing` is called first. ''' assert self._paused self._paused = False waiter = self._waiter if waiter is not None: self._waiter = None if not waiter.done(): if exc is None: waiter.set_result(None) else: waiter.set_exception(exc) self.transport.resume_reading() self._write_from_buffer() # INTERNAL CALLBACKS def _write_from_buffer(self): t = self.transport if not t: return while not self._paused and self._buffer: if t.is_closing(): self.producer.logger.debug( 'Transport closed - cannot write on %s', self ) break data = self._buffer.pop() self._buffer_size -= len(data) self.transport.write(data) def _set_flow_limits(self, _, exc=None): if not exc: self.transport.set_write_buffer_limits(high=self._limit) def _wakeup_waiter(self, _, exc=None): # Wake up the writer if currently paused. if not self._paused: return self.resume_writing(exc=exc) class Timeout: '''Adds a timeout for idle connections to protocols ''' _timeout = None _timeout_handler = None @property def timeout(self): return self._timeout @timeout.setter def timeout(self, timeout): '''Set a new :attr:`timeout` for this protocol ''' if self._timeout is None: self.event('connection_made').bind(self._add_timeout) self.event('connection_lost').bind(self._cancel_timeout) self._timeout = timeout or 0 self._add_timeout(None) # INTERNALS def _timed_out(self): if self.last_change: gap = time.time() - self.last_change if gap < self._timeout: self._timeout_handler = None return self._add_timeout(None, timeout=self._timeout-gap) self.close() self.logger.debug('Closed idle %s.', self) def _add_timeout(self, _, exc=None, timeout=None): if not self.closed: self._cancel_timeout(_, exc=exc) timeout = timeout or self._timeout if timeout and not exc: self._timeout_handler = self._loop.call_later( timeout, self._timed_out ) def _cancel_timeout(self, _, exc=None, **kw): if self._timeout_handler: self._timeout_handler.cancel() self._timeout_handler = None class Pipeline: """Pipeline protocol consumers once reading is finished This mixin can be used by TCP connections to pipeline response writing """ _pipeline = None def pipeline(self, consumer): """Add a consumer to the pipeline """ if self._pipeline is None: self._pipeline = ResponsePipeline(self) self.event('connection_lost').bind(self._close_pipeline) self._pipeline.put(consumer) def close_pipeline(self): if self._pipeline: p, self._pipeline = self._pipeline, None return p.close() def _close_pipeline(self, _, **kw): self.close_pipeline() class ResponsePipeline: """Maintains a queue of responses to send back to the client """ __slots__ = ('connection', 'queue', 'debug', 'worker', 'put') def __init__(self, connection): self.connection = connection self.queue = Queue(loop=connection._loop) self.debug = connection._loop.get_debug() self.worker = self.queue._loop.create_task(self._process()) self.put = self.queue.put_nowait async def _process(self): while True: try: consumer = await self.queue.get() if self.debug: self.connection.producer.logger.debug( 'Connection pipeline process %s', consumer ) await consumer.write_response() except (CancelledError, GeneratorExit, RuntimeError): break except Exception: self.connection.producer.logger.exception( 'Critical exception in %s response pipeline', self.connection ) self.connection.close() break # help gc self.connection = None self.queue = None self.worker = None self.put = None def close(self): self.worker.cancel() return self.worker
Founded in 1909, Detroit Rescue Mission Ministries (DRMM) is committed to sharing the gospel of the love of Jesus Christ, providing hope to the hopeless, disadvantaged, abused and homeless men, women and children of our community. By ministering to the total person - body, soul and spirit - we help them become faithful Christians, discipled into a local church, rehabilitated, employed and living productive lives in restored families. DRMM annually provides: one million meals to the hungry; more than 160,000 nights of emergency shelter; transitional housing and substance abuse treatment; transitional housing for teen moms and their children; and spiritual guidance and empowerment to more than 1,000 people daily. All data for Financial Performance Metrics calculations was provided by Detroit Rescue Mission Ministries on recent 990s filed with the IRS.
from django.db import models from influencetx.core import constants, utils import logging log = logging.getLogger(__name__) class Legislator(models.Model): # Legislator ID from Open States API. openstates_leg_id = models.CharField(max_length=48, db_index=True) tpj_filer_id = models.IntegerField(default=0, blank=True, db_index=True) tx_lege_id = models.CharField(max_length=48, blank=True, db_index=True) name = models.CharField(max_length=45) first_name = models.CharField(max_length=20, blank=True) last_name = models.CharField(max_length=20, blank=True) party = models.CharField(max_length=1, choices=constants.PARTY_CHOICES) chamber = models.CharField(max_length=6, choices=constants.CHAMBER_CHOICES) district = models.IntegerField() # updated_at field from Open States API. Used to check whether legislator-detail needs update openstates_updated_at = models.DateTimeField() url = models.URLField(blank=True) photo_url = models.URLField(blank=True) @property def initial(self): """First initial used for placeholder image.""" return self.name[0] @property def party_label(self): """User-friendly party label.""" return utils.party_label(self.party) @property def chamber_label(self): """User-friendly label for chamber of congress.""" return utils.chamber_label(self.chamber) def __str__(self): return f'{self.name} {self.tx_lege_id}' class LegislatorIdMap(models.Model): # Provide mapping between TPJ FILER_ID and Legislator ID from Open States API. openstates_leg_id = models.CharField(max_length=48, db_index=True) tpj_filer_id = models.IntegerField(db_index=True) def __str__(self): return f'{self.openstates_leg_id!r} {self.tpj_filer_id}'
I’ve long been struck by how consistently different methods find large health harms from air pollution. Most people seem to think we no longer have an air pollution problem, because we mostly don’t see much air pollution. But the particles that are too small to see continue to cause great harm. The US Federal EPA standard for air pollution in the form of particles of size 2.5 microns or smaller is an annual average of 15, and a 24 hour average of 35, micrograms per cubic centimeter. Many places are not in compliance with these standards (check your area here and here). These are huge gains, which could be achieved at a modest expense, especially compared to the vast costs we pay for tiny health gains via medicine. More should be done. We [Alcor] are committing $10,000 towards the Evaluation Fund. … Although the Prize itself is fully funded, funds are needed to conduct the evaluation. Alcor’s contribution will make a big difference, since the tests are estimated to cost $25,000 to $50,000. My thinking has evolved a bit over the last month. In chemopreservation [= plastination], one fills a brain with plastic-like chemicals, which make strong cross-links bonds between most everything they touch. So there are two times when brain info can be lost: before it is filled with plastic, and after. So as far as I can tell, the main issue with plastination [= chemopreservation] is how quickly brains can fill with plastic after ordinary blood flow has stopped. If we can find ways to do that well, plastination just wins, I think, at least for the goal of saving the info that is you. The [Brain Preservation] Foundation has declined [Alcor’s] donation because of concerns that it might be perceived as influencing the judges’ decisions. Added 13Jan’13: They reached their $25K goal! Here’s another idea for medical reform: consulate care. Let countries like Sweden, France, etc. with approved national health care systems have bigger consulates, and open them up to paying customers for medical services. For example, you could sign up for Swedish Care, and when needed you’d go to their consulate to get medical care as if you were living in Sweden. Now we might not approve consulate care for say North Korea or Uganda, but surely most developed nations are good enough. We don’t issue travel warnings suggesting people not travel to Sweden, for fear of getting sick there. So why not let folks travel to a Sweden nearby for their medical care? Since most other nations spend far less than the US on medicine, consulate care should be a lot cheaper. And since those other nations seem to suffer no net health loss from their cheaper care, consulate care should be no less healthy. The media holds medicine to a lower standard than it holds alternative medicine, such as say crystal healing. No way would an article in a major paper complain that we aren’t subsidizing crystals enough for poor folks, based on the observation that rich folks buy more crystals and rich folks are healthier. But for medicine, that sort of correlation is enough. “Nearly five black women die needlessly per day from breast cancer” because they don’t have information about the importance of breast screening and they don’t have access to high quality care. But in fact, the study shows only that across 25 US cities, the ratio of the black vs. white breast cancer death rates correlates (barely significantly) with median city income and a measure of city racial segregation. It is a huge leap to conclude from these correlations that black women don’t have enough info or care! The very robust health-status correlation predicts more health for higher status folks, and thus more race-health disparity when there is a higher race-status disparity. It seem quite plausible that the race-status disparity is higher in cities where races are segregated and incomes are low. It would be nearly nine months before she told herself it was time to act. By then, the lump was the size of a small egg. … Doctors and advocates say the fear that kept her from acting quickly is all too common among black women. It is among the factors that contribute to a disturbing trend: Although they are less likely than white women to get breast cancer, black women are more likely to die from it. … Poverty and racial inequities are the primary factors driving the disparity, according to a study. … The study, which compared mortality rates between black and white women in the nation’s 25 largest cities, states that “nearly five black women die needlessly per day from breast cancer” because they don’t have information about the importance of breast screening and they don’t have access to high quality care. The authors … said genetics play only a small role in the disparity. [In] the 25 largest cities in the US, … non-Hispanic Black : non-Hispanic White [breast cancer death] rate ratios (RRs) were calculated … Almost all the NHB rates were greater than almost all the NHW rates. … From among the 7 potential correlates, only median household income (r = 0.43, p = 0.037) and a measure of segregation (r = 0.42, p = 0.039) were significantly related to the RR. Note that white women may seem to “get” more breast cancer because they are tested more often for it. In the US the top 5% of medical spenders spend an average of $40,682 a year each, and account for 49.5% of all spending. (The bottom half spend an average of $236.) Not too surprisingly, 60.3% of these people are age 55 or older. Perhaps more surprising, on their health self-rating, 28.9% of these folks say they are “good”, 19.9% “very good” and 7.5% “excellent”, for a total of 56.3% with self-rated health of “good” or better (source). So, are these folks in serious denial, or is most of our medical spending on hardly sick folks?
# -*- coding: utf-8 -*- import random class Beast(object): """ Basic Beast class (implements Random move "stragegy") """ def __init__(self): self.environment = None def bewege(self, paramString): """ calculates the destination where to move @param paramString string which is given by the server containing the beasts energy, environment and the round ten rounds before @return destination move which is calculated by the client beast """ # Just to give examples which param means what: params = paramString.split(';', 2) if len(params[0]) > 0: energy = int(params[0]) else: energy = 0 self.environment = params[1] worldLastTenRounds = params[2].rstrip(';') if energy > 0: whitelistedMoves = (0, 2, 4, 6, 7, 8, 10, 11, 12, 13, 14, 16, 17, 18, 20, 22, 24, '?') return random.choice(whitelistedMoves) else: return
I am such a fan of pretty fabrics. There isn’t a day where I am not looking over the internet at fabric, cushions and such. There is a beautiful old mill around 3 miles away from where I live that has the most beautiful fabric stalls, I can often be found here when I have a free hour or two. For me, today’s DIY post is perfect. These Fabric hoops would look fantastic in any home, as a wedding decoration or even a wedding favour. They look so beautiful, unique and Jenny from Zazzle has provided all the information and easy instructions on how to make your very own Colection of Fabric Hoops. Outside Spring is in full swing, but I looked at my apartment this weekend and it looked barren. For a little refresh, I decided to give my living room a makeover with one of the easiest DIY wall treatments ever. Using Zazzle’s new custom fabric and a few inexpensive supplies, I spruced up my walls in a fun and kitschy way, in less than an hour. If you’re looking to brighten up drab walls for Spring, this project may be the solution to a happy, creative space. of the larger hoop, centring the fabric. Once centred, take the smaller hoop and press it on top of the fabric, stretching the fabric and securing it between the two hoops. At this point you should begin to tighten the screw on the larger hoop while intermediately stretching the excess fabric on the ends of the hoop with your hands. Do this until you’ve tightened to screw as much as you can and the fabric is taut. Step 4) Cut the excess fabric on the sides of hoop about 1/4 inch (6 mm) from the surface. Step 5) Admire your completed hoop!
# -*- coding: utf-8 -*- """ :copyright: Copyright 2013-2014 by Łukasz Mierzwa :contact: l.mierzwa@gmail.com """ from __future__ import unicode_literals import logging import mongoengine from django.core import exceptions from django.http import HttpResponseNotFound, HttpResponseBadRequest from django.conf.urls import url from django.utils.translation import ugettext as _ from tastypie_mongoengine.resources import MongoEngineResource from tastypie_mongoengine.fields import ReferenceField, ReferencedListField from tastypie.resources import ALL from tastypie.authorization import Authorization, ReadOnlyAuthorization from tastypie.exceptions import Unauthorized from tastypie.utils import trailing_slash from tastypie.http import HttpCreated from mongoengine.errors import ValidationError from upaas.config.metadata import MetadataConfig from upaas_admin.apps.applications.models import Application, Package from upaas_admin.common.apiauth import UpaasApiKeyAuthentication from upaas_admin.common.uwsgi import fetch_json_stats log = logging.getLogger(__name__) class ApplicationAuthorization(Authorization): def read_list(self, object_list, bundle): log.debug(_("Limiting query to user owned apps (length: " "{length})").format(length=len(object_list))) return object_list.filter(owner=bundle.request.user) def read_detail(self, object_list, bundle): return bundle.obj.owner == bundle.request.user def create_detail(self, object_list, bundle): return bundle.obj.owner == bundle.request.user def update_list(self, object_list, bundle): return object_list.filter(owner=bundle.request.user) def update_detail(self, object_list, bundle): bundle.data['name'] = bundle.obj.name return bundle.obj.owner == bundle.request.user def delete_list(self, object_list, bundle): raise Unauthorized(_("Unauthorized for such operation")) def delete_detail(self, object_list, bundle): raise Unauthorized(_("Unauthorized for such operation")) class ApplicationResource(MongoEngineResource): current_package = ReferenceField( 'upaas_admin.apps.applications.api.PackageResource', 'current_package', full=True, null=True, readonly=True) run_plan = ReferenceField( 'upaas_admin.apps.scheduler.api.RunPlanResource', 'run_plan', full=True, null=True, readonly=True) running_tasks = ReferencedListField( 'upaas_admin.apps.tasks.api.TaskResource', 'running_tasks', null=True, readonly=True) class Meta: always_return_data = True queryset = Application.objects.all() resource_name = 'application' excludes = ['packages', 'tasks'] filtering = { 'id': ALL, 'name': ALL, 'owner': ALL, } authentication = UpaasApiKeyAuthentication() authorization = ApplicationAuthorization() def __init__(self, *args, **kwargs): super(ApplicationResource, self).__init__(*args, **kwargs) self.fields['owner'].readonly = True def dehydrate(self, bundle): instances = 0 if bundle.obj.run_plan: instances = len(bundle.obj.run_plan.backends) bundle.data['package_count'] = len(bundle.obj.packages) bundle.data['instance_count'] = instances bundle.data['can_start'] = bundle.obj.can_start return bundle def obj_create(self, bundle, request=None, **kwargs): # TODO use MongoCleanedDataFormValidation ?? metadata = bundle.data.get('metadata') if not metadata: raise exceptions.ValidationError(_('Missing metadata')) try: MetadataConfig.from_string(metadata) except Exception as e: raise exceptions.ValidationError( _('Invalid metadata: {err}').format(err=e)) log.debug(_("Going to create new application for user " "'{name}'").format(name=bundle.request.user.username)) try: return super(MongoEngineResource, self).obj_create( bundle, request=request, owner=bundle.request.user, **kwargs) except mongoengine.ValidationError as e: log.warning(_("Can't create new application, invalid data " "payload: {msg}").format(msg=e.message)) raise exceptions.ValidationError(e.message) except mongoengine.NotUniqueError as e: log.warning(_("Can't create new application, duplicated fields: " "{msg}").format(msg=e.message)) raise exceptions.ValidationError(e.message) def prepend_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<id>\w[\w/-]*)/build%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('build_package'), name="build"), url(r"^(?P<resource_name>%s)/(?P<id>\w[\w/-]*)/start%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('start_application'), name="start"), url(r"^(?P<resource_name>%s)/(?P<id>\w[\w/-]*)/stop%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('stop_application'), name="stop"), url(r"^(?P<resource_name>%s)/(?P<id>\w[\w/-]*)/update%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('update_application'), name="update"), url(r"^(?P<resource_name>%s)/(?P<id>\w[\w/-]*)/instances%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('instances'), name="instances"), ] def get_app(self, kwargs): try: return Application.objects( **self.remove_api_resource_names(kwargs)).first() except ValidationError: return None def build_package(self, request, **kwargs): self.method_check(request, allowed=['put']) try: force_fresh = bool(int(request.GET.get('force_fresh', 0))) except: force_fresh = False interpreter_version = request.GET.get('interpreter_version') or None app = self.get_app(kwargs) if app: if interpreter_version and ( interpreter_version not in app.supported_interpreter_versions): return HttpResponseBadRequest( _("Unsupported interpreter version")) return self.create_response(request, app.build_package( force_fresh=force_fresh, interpreter_version=interpreter_version), response_class=HttpCreated) else: return HttpResponseNotFound(_("No such application")) def start_application(self, request, **kwargs): self.method_check(request, allowed=['put']) app = self.get_app(kwargs) if app: if app.current_package: return self.create_response(request, app.start_application(), response_class=HttpCreated) else: return HttpResponseBadRequest( _("No package built or no metadata registered for app " "'{name}' with id '{id}'").format(name=app.name, id=app.id)) else: return HttpResponseNotFound(_("No such application")) def stop_application(self, request, **kwargs): self.method_check(request, allowed=['put']) app = self.get_app(kwargs) if app: if not app.run_plan: return HttpResponseBadRequest(_( "Application is already stopped")) if app.current_package: return self.create_response(request, app.stop_application(), response_class=HttpCreated) else: return HttpResponseBadRequest( _("No package built or no metadata registered for app " "'{name}' with id '{id}'").format(name=app.name, id=app.id)) else: return HttpResponseNotFound(_("No such application")) def update_application(self, request, **kwargs): self.method_check(request, allowed=['put']) app = self.get_app(kwargs) if app: if app.run_plan: return self.create_response(request, app.update_application(), response_class=HttpCreated) else: return HttpResponseBadRequest(_("Application is stopped")) else: return HttpResponseNotFound(_("No such application")) def instances(self, request, **kwargs): self.method_check(request, allowed=['get']) stats = [] app = self.get_app(kwargs) if not app: return HttpResponseNotFound(_("No such application")) if app.run_plan: for backend_conf in app.run_plan.backends: backend_data = { 'name': backend_conf.backend.name, 'ip': str(backend_conf.backend.ip), 'limits': { 'workers_min': backend_conf.workers_min, 'workers_max': backend_conf.workers_max, 'memory_per_worker': app.run_plan.memory_per_worker, 'memory_per_worker_bytes': app.run_plan.memory_per_worker * 1024 * 1024, 'backend_memory': app.run_plan.memory_per_worker * backend_conf.workers_max, 'backend_memory_bytes': app.run_plan.memory_per_worker * backend_conf.workers_max * 1024 * 1024, }} s = fetch_json_stats(str(backend_conf.backend.ip), backend_conf.stats) stats.append({'backend': backend_data, 'stats': s}) return self.create_response(request, stats) class PackageAuthorization(ReadOnlyAuthorization): def read_list(self, object_list, bundle): log.debug(_("Limiting query to user owned apps (length: " "{length})").format(length=len(object_list))) return object_list.filter( application__in=bundle.request.user.applications) def read_detail(self, object_list, bundle): return bundle.obj.application.owner == bundle.request.user def delete_list(self, object_list, bundle): active_pkgs = [] for app in bundle.request.user.applications: if app: active_pkgs.append(app.current_package.id) return object_list.filter( application__in=bundle.request.user.applications, id__not__in=active_pkgs) def delete_detail(self, object_list, bundle): if (bundle.obj.application.owner == bundle.request.user) and ( bundle.obj.id != bundle.obj.application.current_package.id): return True return False class PackageResource(MongoEngineResource): application = ReferenceField( 'upaas_admin.apps.applications.api.ApplicationResource', 'application', readonly=True) class Meta: always_return_data = True queryset = Package.objects.all() resource_name = 'package' filtering = { 'id': ALL, } authentication = UpaasApiKeyAuthentication() authorization = PackageAuthorization() def obj_delete(self, bundle, **kwargs): bundle.obj = self.obj_get(bundle=bundle, **kwargs) self.authorized_delete_detail(self.get_object_list(bundle.request), bundle) if bundle.obj.id != bundle.obj.application.current_package.id: return super(PackageResource, self).obj_delete(bundle, **kwargs) return HttpResponseBadRequest(_("Package in use"))
Do not have any idea on solving your math problem? Confused and get stuck with algebra? Work for hours to solve but you don’t even know what to do? You need to end these torment. Go to place that will help you to solve the problem. Have on line tutoring, Tutorvista.com. It offers a great unlimited monthly tutoring package. TutorVista’s online Algebra tutoring program is designed to provide Calculus help. Since it requires a lot of understandings and concepts, you will have personal attention and one on one tutoring by connecting with a tutor. TutorVista offers you Precalculus help. Solve calculus problems by studying with highly qualified tutors that ensure you to understand the subject well. TutorVista’s math homework help will solve difficult problem of geometry, number theory, calculus, trigonometry, statistics, probability, discrete math and any other matter in math. Get all math answers here. Study with TutorVista that are experts in the subject will solve all your math problems and excellences your study. With algebra homework help, any algebra home works are no longer a threat, you will have a reliable tutor to wend up the problem. You can even smile upon difficult College algebra problems. By Free college algebra help backing up your study process, be sure that you will master the entire subject. thanks your share blog!! very inspirastion..
# -*- encoding: utf-8 -*- ########################################################################### # Module Writen to OpenERP, Open Source Management Solution # # Copyright (c) 2010 Vauxoo - http://www.vauxoo.com/ # All Rights Reserved. # info Vauxoo (info@vauxoo.com) ############################################################################ # Coded by: Luis Torres (luis_t@vauxoo.com) ############################################################################ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields from openerp.tools.translate import _ import decimal_precision as dp class mrp_subproduct(osv.Model): _inherit = 'mrp.subproduct' def _calc_cost(self, cr, uid, ids, field_name, arg, context): res = {} for i in self.browse(cr, uid, ids): res[i.id] = self.compute_bom_cost(cr, uid, [i.id]) return res def _calc_cost_u(self, cr, uid, ids, field_name, arg, context): ''' funcion para el calculo del costo unitario, el cual es: product cost/ product qty @cost = se almacena el costo unitario final. @res = diccionario usado para retornar el id y el costo unitario. ''' res = {} for i in self.browse(cr, uid, ids): cost = 0.00 cost = i.product_id.standard_price res[i.id] = cost return res _columns = { 'cost_t': fields.function(_calc_cost, method=True, type='float', digits_compute=dp.get_precision('Cost_Bom'), string='Cost', store=False), 'cost_u': fields.function(_calc_cost_u, method=True, type='float', digits_compute=dp.get_precision('Cost_Bom'), string='Unit Cost', store=False), } def compute_bom_cost(self, cr, uid, ids, *args): for i in self.browse(cr, uid, ids): cost = 0.00 cost = i.product_id.standard_price*i.product_qty * \ i.product_uom.factor_inv * i.product_id.uom_id.factor return cost
A collection of my calligraphy and lettering pieces. I love to play with flourishes that involves rigorous planning and design in details. Auspicious banners in the Year of Rooster.
from flask import Blueprint, request from flask.ext.login import current_user from werkzeug.exceptions import BadRequest from apikit import obj_or_404, jsonify, Pager, request_data from datawire.model import Entity, Collection, db from datawire.model.forms import EntityForm from datawire import authz blueprint = Blueprint('entities', __name__) @blueprint.route('/api/1/entities', methods=['GET']) def index(): collection_ids = Collection.user_ids(current_user) filter_collections = request.args.getlist('collection') if len(filter_collections): try: collection_ids = [l for l in collection_ids if l in filter_collections] except ValueError: raise BadRequest() prefix = request.args.get('prefix') q = Entity.by_collection(collection_ids, prefix=prefix) return jsonify(Pager(q)) @blueprint.route('/api/1/entities', methods=['POST', 'PUT']) def create(): data = EntityForm().deserialize(request_data()) authz.require(data['collection']) authz.require(authz.collection_write(data['collection'].id)) entity = Entity.create(data, current_user) db.session.commit() return view(entity.id) @blueprint.route('/api/1/entities/_suggest', methods=['GET']) def suggest(): prefix = request.args.get('prefix') results = Entity.suggest_prefix(prefix, authz.authz_collection('read')) return jsonify({'results': results}) @blueprint.route('/api/1/entities/<id>', methods=['GET']) def view(id): entity = obj_or_404(Entity.by_id(id)) authz.require(authz.collection_read(entity.collection_id)) return jsonify(entity) @blueprint.route('/api/1/entities/<id>', methods=['POST', 'PUT']) def update(id): entity = obj_or_404(Entity.by_id(id)) authz.require(authz.collection_write(entity.collection_id)) data = EntityForm().deserialize(request_data()) authz.require(data['list']) authz.require(authz.collection_write(data['list'].id)) entity.update(data) db.session.commit() return view(entity.id) @blueprint.route('/api/1/entities/<id>', methods=['DELETE']) def delete(id): entity = obj_or_404(Entity.by_id(id)) authz.require(authz.collection_write(entity.collection_id)) entity.delete() db.session.commit() return jsonify({'status': 'ok'})
Seven years ago, my wife and I bought our first home in beautiful city of Austin. We were excited – after all, with our first kid on the way, this was going to be Our Home. We told ourselves we’d take care of everything that came with home ownership. With every little change we made to the home (adding blinds, ceiling fans to the rooms that didn’t have them already, etc), we stashed the home improvement receipts away. Over the years, we ended up adding solar panels, changed out our thermostat to be “wi-fi”, swapped out outlets to support USB, and updated our light switches to be controllable via our smartphones or Alexa. All of these upgrades came with warranties which need proof-of-purchase receipts so we stored these away as well…I think. Are they in the kitchen drawer? The file cabinet? On my desk in the study? I wondered why I couldn’t just save the receipts by taking a picture with my phone and then chuck the paper receipt. After a few years of owning our home, I started getting a little confused about when to take care of routine maintenance items – simple things like changing the air filters I was forgetting to do. And I was wondering just what impact missing maintenance was having on the overall “health” of my home. Most cars have a notification system built right into the driver console – you know when to check your tire pressure, add more windshield wiper fluid, or change your oil because you’re notified that it’s time to take care of these sorts of things. But there really is no “check engine light” for the home. As a result, homeowners are forced to either learn through experience what goes into being a good homeowner or react to problems as they come up. Seeing that there was no compelling app solving these sorts of problems (getting alerts when your home needs attention and conveniently saving receipts/warranties), I set out to build HomeBit with some great people I’d worked with in the past. Over a few iterations of the product, our mission became clearer: a mobile dashboard app for homeowners to manage their homes better. To that end, we’re excited about enhancing the lives of homeowners through each new release of HomeBit. We’ve got an exciting roadmap of features and as as we roll these out, we’ll be sharing updates right here in the HomeBit blog. The easiest way to get notified of new release details is by following us on Twitter (@GetHomeBit) or liking us on Facebook. While we have our own vision on what we think makes HomeBit a great app for homeowners, some of the best ideas come from our users like you. So feel free to send us a note and share your ideas – we’re all ears and we promise we’re listening!
#!/usr/bin/env python # -*- mode: python; coding: utf-8; -*- # ---------------------------------------------------------------------------# # # Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer # Copyright (C) 2003 Mt. Hood Playing Card Co. # Copyright (C) 2005-2009 Skomoroh # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ---------------------------------------------------------------------------# # kivy implementation: # most of the code will not be used, but some important function have been # emulated. from __future__ import division import logging import os from array import array from kivy.clock import Clock from kivy.core.image import Image as CoreImage from kivy.core.text import Label as CoreLabel from kivy.graphics.texture import Texture from pysollib.kivy.LApp import LImage from pysollib.kivy.LApp import LTopLevel0 # ************************************************************************ # * window manager util # ************************************************************************ def wm_withdraw(window): window.wm_withdraw() def wm_map(window, maximized=0): return # ************************************************************************ # * window util # ************************************************************************ def setTransient(window, parent, relx=None, rely=None, expose=1): # Make an existing toplevel window transient for a parent. # # The window must exist but should not yet have been placed; in # other words, this should be called after creating all the # subwidget but before letting the user interact. # not used in kivy (highly tk specific). return def makeToplevel(parent, title=None): print('tkutil: makeTopLevel') # Create a Toplevel window. # window = LTopLevel0(parent, title) # window = LTopLevelPopup(parent, title) return window.content def make_help_toplevel(app, title=None): # Create an independent Toplevel window. window = app.top # from pysollib.winsystems import init_root_window # window = Tkinter.Tk(className=TITLE) # init_root_window(window, app) return window # ************************************************************************ # * bind wrapper - Tkinter doesn't properly delete all bindings # ************************************************************************ __mfx_bindings = {} __mfx_wm_protocols = ("WM_DELETE_WINDOW", "WM_TAKE_FOCUS", "WM_SAVE_YOURSELF") def bind(widget, sequence, func, add=None): # logging.info('tkutil: bind %s %s %s %s ' # % (widget, sequence, func, add)) # logging.info('tkutil: bind canvas = ' % str(widget.canvas)) if hasattr(widget, 'bindings'): # logging.info('tkutil: bind %s %s %s %s ' # % (sequence, widget, func, add)) widget.bindings[sequence] = func else: # logging.info('tkutil: bind failed %s %s' % (sequence, widget)) pass if (sequence == '<KeyPress-Left>'): return if (sequence == '<KeyPress-Right>'): return if (sequence == '<KeyPress-Prior>'): return if (sequence == '<KeyPress-Next>'): return if (sequence == '<KeyPress-Up>'): return if (sequence == '<KeyPress-Down>'): return if (sequence == '<KeyPress-Begin>'): return if (sequence == '<KeyPress-Home>'): return if (sequence == '<KeyPress-End>'): return if (sequence == '<KeyPress-Down>'): return if (sequence == '<4>'): return if (sequence == '<5>'): return if (sequence == '<1>'): return if (sequence == '<Motion>'): return if (sequence == '<ButtonRelease-1>'): return if (sequence == '<Control-1>'): return if (sequence == '<Shift-1>'): return if (sequence == '<Double-1>'): return if (sequence == '<3>'): return if (sequence == '<2>'): return if (sequence == '<Control-3>'): return if (sequence == '<Enter>'): return if (sequence == '<Leave>'): return if (sequence == '<Unmap>'): return if (sequence == '<Configure>'): return pass def unbind_destroy(widget): # logging.info('tkutil: unbind %s' % (widget)) widget.bindings = [] pass # ************************************************************************ # * timer wrapper - Tkinter doesn't properly delete all commands # ************************************************************************ def after(widget, ms, func, *args): print('tkutil: after(%s, %s, %s, %s)' % (widget, ms, func, args)) if (ms == 'idle'): print('demo use') Clock.schedule_once(lambda dt: func(), 1.0) elif (isinstance(ms, int)): # print('ms: play timer (accounting)') # Clock.schedule_once(lambda dt: func(), float(ms)/1000.0) # makes not sense, drains battery! pass def after_idle(widget, func, *args): print('tkutil: after_idle()') return after(widget, "idle", func, *args) def after_cancel(t): print('tkutil: after_cancel()') pass # ************************************************************************ # * image handling # ************************************************************************ def makeImage(file=None, data=None, dither=None, alpha=None): kw = {} if data is None: assert file is not None kw["source"] = file # print('makeImage: source = %s' % file) # if (file=='/home/lb/PRG/Python/Kivy/pysolfc/data/images/redeal.gif'): # y = self.yy else: assert data is not None kw["texture"] = data # ob das geht ?? - kommt das vor ? # yy = self.yy ''' if 'source' in kw: logging.info ("makeImage: " + kw["source"]) if 'texture' in kw: logging.info ("makeImage: " + str(kw["texture"])) ''' return LImage(**kw) loadImage = makeImage def copyImage(image, x, y, width, height): # return Image(source=image.source) # return Image(texture=image.texture) return image def fillTexture(texture, fill, outline=None, owidth=1): # logging.info("fillImage: t=%s, f=%s o=%s, w=%s" % # (texture, fill, outline, owidth)) # O.K. Kivy if not fill and not outline: return width = texture.width height = texture.height ox = round(owidth) ow = int(ox) # muss int sein! if width <= 2 * ow or height <= 2 * ow: fill = fill or outline outline = None if not fill: fi0 = 0 fi1 = 0 fi2 = 0 fi3 = 0 else: # wir erwarten Werte als '#xxxxxx' (color Werte in Tk notation) # (optional mit transparenz) if (fill[0] == '#'): fill = fill[1:] fi0 = int(fill[0:2], 16) fi1 = int(fill[2:4], 16) fi2 = int(fill[4:6], 16) fi3 = 255 if len(fill) >= 8: fi3 = int(fill[6:8], 16) if not outline: f = (fi0, fi1, fi2, fi3) * width f = (f, ) * height assert len(f) == height f = sum(f, ()) assert len(f) == height * width * 4 arr = array('B', f) texture.blit_buffer(arr, colorfmt='rgba', bufferfmt='ubyte') else: if (outline[0] == '#'): outline = outline[1:] ou0 = int(outline[0:2], 16) ou1 = int(outline[2:4], 16) ou2 = int(outline[4:6], 16) ou3 = 255 if len(outline) >= 8: ou3 = int(outline[6:8], 16) l1 = ( ou0, ou1, ou2, ou3, ) * width l2 = (ou0, ou1, ou2, ou3, ) * ow + (fi0, fi1, fi2, fi3, ) * \ (width - 2 * ow) + (ou0, ou1, ou2, ou3, ) * ow f = (l1, ) * ow + (l2, ) * (height - 2 * ow) + (l1, ) * ow assert len(f) == height f = sum(f, ()) assert len(f) == height * width * 4 arr = array('B', f) texture.blit_buffer(arr, colorfmt='rgba', bufferfmt='ubyte') def createImage(width, height, fill, outline=None, outwidth=1): logging.info("createImage: w=%s, h=%s, f=%s, o=%s, ow=%s" % (width, height, fill, outline, outwidth)) # test stellungen: # if (fill==None): # fill = '#00cc00' # if (outline==None): # outline = '#ff00ff' # if (fill is None and (outline is None or outline == '')): # outline = '#fff000' # outwidth = 1 texture = Texture.create(size=(width, height), colorfmt='rgba') fillTexture(texture, fill, outline, outwidth) image = LImage(texture=texture) # logging.info("createImage: LImage create %s" % image) return image def shadowImage(image, color='#3896f8', factor=0.3): logging.info("shadowImage: ") # TBD. return None # Kivy nicht benötigt. aber - was tut das ? # wurde aufgerufen, als der erste König auf die Foundation # gezogen wurde. (möglicherweise eine Gewonnen! - Markierung). def markImage(image): logging.info("markImage: ") return None def _createImageMask(texture, color): col = 0 if (color == 'black'): col = 0 if (color == 'white'): col = 255 g = texture.pixels arr = array('B', g) for mx in range(int(len(arr) / 4)): m = 4 * mx if arr[m + 3] < 128: arr[m + 3] = 0 arr[m] = arr[m + 1] = arr[m + 2] = 0 else: arr[m + 3] = 32 arr[m] = arr[m + 1] = arr[m + 2] = col mask = Texture.create(size=texture.size, colorfmt='rgba') mask.blit_buffer(arr, colorfmt='rgba', bufferfmt='ubyte') return mask def _scaleTextureToSize(texture, size): width = size[0] height = size[1] g = texture.pixels ag = array('B', g) gw, gh = texture.size # print('size:',width,height) # print('texture size:',gw,gh) bb = array('B', [0 for x in range(width * height * 4)]) # print ('bb length: ',len(bb)) # print ('gg length: ',gw*gh*4) scalex = width / gw scaley = height / gh # scale, x und y offset bestimmen. scale = scaley if (scalex < scaley): scale = scalex offx = (width - gw * scale) / 2 offy = (height - gh * scale) / 2 # print ('scale: ',scalex,'/',scaley,' -> ',scale) # print ('offs: ',offx,'/',offy) for bi in range(height): bline = bi * width if (bi >= offy) and (bi < (height - offy)): # transfer ai = gh - int((bi - offy) / scale) - 1 aline = ai * gw for bk in range(width): bpos = (bline + bk) * 4 if (bk >= offx) and (bk < (width - offx)): # transfer ak = int((bk - offx) / scale) apos = (aline + ak) * 4 bb[bpos] = ag[apos] bb[bpos + 1] = ag[apos + 1] bb[bpos + 2] = ag[apos + 2] bb[bpos + 3] = ag[apos + 3] else: # transparent bb[bpos + 3] = 0 else: # transparent for bk in range(width): bb[(bline + bk) * 4 + 3] = 0 stext = Texture.create(size=(width, height), colorfmt='rgba') stext.blit_buffer(bb, colorfmt='rgba', bufferfmt='ubyte') return stext def _pasteTextureTo(texture, totexture): g = texture.pixels ag = array('B', g) gw, gh = texture.size t = totexture.pixels at = array('B', t) tw, th = totexture.size if (tw != gw) or (th != gh): return for i in range(int(len(ag) / 4)): i4 = i * 4 if ag[i4 + 3] > 128: at[i4] = ag[i4] at[i4 + 1] = ag[i4 + 1] at[i4 + 2] = ag[i4 + 2] at[i4 + 3] = ag[i4 + 3] stext = Texture.create(size=(tw, th), colorfmt='rgba') stext.blit_buffer(at, colorfmt='rgba', bufferfmt='ubyte') return stext def createBottom(image, color='white', backfile=None): backfilebase = None if backfile is not None: backfilebase = os.path.basename(backfile) logging.info("createBottom: %s | %s" % (color, backfilebase)) # print('createBottom:',image) # th = 1 # thickness # size = (w - th * 2, h - th * 2) # original: zeichnet noch eine outline um die karte - können wir nicht. tmp0 = _createImageMask(image.texture, color) if backfile: tmp1 = CoreImage(backfile) txtre = _scaleTextureToSize(tmp1.texture, image.texture.size) tmp = _pasteTextureTo(txtre, tmp0) else: tmp = tmp0 img = LImage(texture=tmp) img.size[0] = image.getWidth() img.size[1] = image.getHeight() return img ''' im = image._pil_image th = 1 # thickness sh = Image.new('RGBA', im.size, color) out = Image.composite(sh, im, im) w, h = im.size size = (w - th * 2, h - th * 2) tmp = Image.new('RGBA', size, color) tmp.putalpha(60) mask = out.resize(size, Image.ANTIALIAS) out.paste(tmp, (th, th), mask) if backfile: back = Image.open(backfile).convert('RGBA') w0, h0 = back.size w1, h1 = im.size a = min(float(w1) / w0, float(h1) / h0) a = a * 0.9 w0, h0 = int(w0 * a), int(h0 * a) back = back.resize((w0, h0), Image.ANTIALIAS) x, y = (w1 - w0) / 2, (h1 - h0) / 2 out.paste(back, (x, y), back) return PIL_Image(image=out) ''' # ************************************************************************ # * font utils # ************************************************************************ def get_text_width(text, font, root=None): logging.info("get_text_width: %s %s" % (text, font)) label = CoreLabel() label.text = text label.refresh() return label.content_width # return Font(root=root, font=font).measure(text)
As most of you know by now, I walk our two dogs in the early morning (around 7 am) and late evening (after meetings at the fellowship). It is always a time to clear my head and relax in the quiet of the day in our neighborhood. As most of you know by now, I walk our two dogs in the early morning (around 7 am) and late evening (after meetings at the fellowship). It is always a time to clear my head and relax in the quiet of the day in our neighborhood.Yes, there are always the souped-up cars roaring down Woodside that punctuate the quiet with popping aggression, but most of the time I hear the early birds singing the day awake amongst the fragrances of spring flowers or the late murmurs of conversation through the open windows now that evenings are warming with the changes that accompany longer evenings. I treasure these moments to ease my head and heart from the concerns of the day. I wish such regular times of opening to life for each of you. Even as spring is pointing toward summer here at the UU Fellowship of Redwood City, activity continues intense. You have already chosen your Ministerial Search Committee and approved your Covenant of Right Relations. Now, we move toward the Annual General Meeting, preparing the committee reports and assessments that form the General Annual Report that is released to all of you at the Meeting. The Nominating Committee is seeking the new leadership for UUFRC for the next fiscal year which begins on July 1 and ends on June 30 of 2020. This means all of our staff is quite busy wrapping up this year and preparing for the year to come. I would invite any and all of you who work directly with our fine staff or are aware of the ministries they bring to this congregation to stop them and say, “Thank YOU for your service!” It is a small acknowledgement of all that they do for all of us over the year.You might want to do the same for all our many hearty volunteers. Honest appreciation is a blessing to both the giver and the receiver.
""" Inception V4, suitable for images with around 299 x 299 (original) Implemented the following paper: Szegedy C, Ioffe S, Vanhoucke V. Inception-v4, inception-resnet and the impact of residual connections on learning[J]. arXiv preprint arXiv:1602.07261, 2016. Jie Hu, Li Shen, Gang Sun. "Squeeze-and-Excitation Networks" https://arxiv.org/pdf/1709.01507v1.pdf This modification version is based on Inception-v4 original but change to 224 x 224 size of input data. Modified by Lin Xiong, May-27, 2017 Added Squeeze-and-Excitation block by Lin Xiong Oct-30, 2017 Thanks to Cher Keng Heng """ #import find_mxnet import mxnet as mx def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix='', withRelu=True, withBn=False, bn_mom=0.9, workspace=256): conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='%s%s_conv2d' % (name, suffix), workspace=workspace) if withBn: conv = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='%s%s_bn' % (name, suffix)) if withRelu: conv = mx.sym.Activation(data=conv, act_type='relu', name='%s%s_relu' % (name, suffix)) return conv # Input Shape is 299*299*3 (old) # Input Shape is 224*224*3 (new) def inception_stem(name, data, num_1_1=32, num_1_2=32, num_1_3=64, num_2_1=96, num_3_1=64, num_3_2=96, num_4_1=64, num_4_2=64, num_4_3=64, num_4_4=96, num_5_1=192, bn_mom=0.9): stem_3x3 = Conv(data=data, num_filter=num_1_1, kernel=(3, 3), stride=(2, 2), name=('%s_conv' % name), bn_mom=bn_mom, workspace=256) stem_3x3 = Conv(data=stem_3x3, num_filter=num_1_2, kernel=(3, 3), name=('%s_stem' % name), suffix='_conv', bn_mom=bn_mom, workspace=256) stem_3x3 = Conv(data=stem_3x3, num_filter=num_1_3, kernel=(3, 3), pad=(1, 1), name=('%s_stem' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) pool1 = mx.sym.Pooling(data=stem_3x3, kernel=(3, 3), stride=(2, 2), pad=(0, 0), pool_type='max', name=('%s_%s_pool1' % ('max', name))) stem_1_3x3 = Conv(data=stem_3x3, num_filter=num_2_1, kernel=(3, 3), stride=(2, 2), name=('%s_stem_1' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) concat1 = mx.sym.Concat(*[pool1, stem_1_3x3], name=('%s_concat_1' % name)) stem_1_1x1 = Conv(data=concat1, num_filter=num_3_1, name=('%s_stem_1' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256) stem_1_3x3 = Conv(data=stem_1_1x1, num_filter=num_3_2, kernel=(3, 3), name=('%s_stem_1' % name), suffix='_conv_3', bn_mom=bn_mom, workspace=256) stem_2_1x1 = Conv(data=concat1, num_filter=num_4_1, name=('%s_stem_2' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) stem_2_7x1 = Conv(data=stem_2_1x1, num_filter=num_4_2, kernel=(7, 1), pad=(3, 0), name=('%s_stem_2' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256) stem_2_1x7 = Conv(data=stem_2_7x1, num_filter=num_4_3, kernel=(1, 7), pad=(0, 3), name=('%s_stem_2' % name), suffix='_conv_3', bn_mom=bn_mom, workspace=256) stem_2_3x3 = Conv(data=stem_2_1x7, num_filter=num_4_4, kernel=(3, 3), name=('%s_stem_2' % name), suffix='_conv_4', bn_mom=bn_mom, workspace=256) concat2 = mx.sym.Concat(*[stem_1_3x3, stem_2_3x3], name=('%s_concat_2' % name)) pool2 = mx.sym.Pooling(data=concat2, kernel=(3, 3), stride=(2, 2), pad=(0, 0), pool_type='max', name=('%s_%s_pool2' % ('max', name))) stem_3_3x3 = Conv(data=concat2, num_filter=num_5_1, kernel=(3, 3), stride=(2, 2), name=('%s_stem_3' % name), suffix='_conv_1', withRelu=False, bn_mom=bn_mom, workspace=256) concat3 = mx.sym.Concat(*[pool2, stem_3_3x3], name=('%s_concat_3' % name)) bn1 = mx.sym.BatchNorm(data=concat3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=('%s_bn1' % name)) act1 = mx.sym.Activation(data=bn1, act_type='relu', name=('%s_relu1' % name)) return act1 # Output Shape is 25*25*384 # Input Shape is 25*25*384 def InceptionA(name, data, num_1_1=96, num_2_1=96, num_3_1=64, num_3_2=96, num_4_1=64, num_4_2=96, num_4_3=96, bn_mom=0.9): a1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type='avg', name=('%s_%s_pool1' % ('avg', name))) a1 = Conv(data=a1, num_filter=num_1_1, name=('%s_a_1' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256) a2 = Conv(data=data, num_filter=num_2_1, name=('%s_a_2' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256) a3 = Conv(data=data, num_filter=num_3_1, name=('%s_a_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) a3 = Conv(data=a3, num_filter=num_3_2, kernel=(3, 3), pad=(1, 1), name=('%s_a_3' % name), suffix='_conv_2', withRelu=False, bn_mom=bn_mom, workspace=256) a4 = Conv(data=data, num_filter=num_4_1, name=('%s_a_4' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) a4 = Conv(data=a4, num_filter=num_4_2, kernel=(3, 3), pad=(1, 1), name=('%s_a_4' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256) a4 = Conv(data=a4, num_filter=num_4_3, kernel=(3, 3), pad=(1, 1), name=('%s_a_4' % name), suffix='_conv_3', withRelu=False, bn_mom=bn_mom, workspace=256) m = mx.sym.Concat(*[a1, a2, a3, a4], name=('%s_a_concat1' % name)) m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_a_bn1' % name)) m = mx.sym.Activation(data=m, act_type='relu', name=('%s_a_relu1' % name)) return m # Output Shape is 25*25*384 # Input Shape is 12*12*1024 def InceptionB(name, data, num_1_1=128, num_2_1=384, num_3_1=192, num_3_2=224, num_3_3=256, num_4_1=192, num_4_2=192, num_4_3=224, num_4_4=224, num_4_5=256, bn_mom=0.9): b1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type='avg', name=('%s_%s_pool1' % ('avg', name))) b1 = Conv(data=b1, num_filter=num_1_1, name=('%s_b_1' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256) b2 = Conv(data=data, num_filter=num_2_1, name=('%s_b_2' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256) b3 = Conv(data=data, num_filter=num_3_1, name=('%s_b_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) b3 = Conv(data=b3, num_filter=num_3_2, kernel=(1, 7), pad=(0, 3), name=('%s_b_3' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256) b3 = Conv(data=b3, num_filter=num_3_3, kernel=(7, 1), pad=(3, 0), name=('%s_b_3' % name), suffix='_conv_3', withRelu=False, bn_mom=bn_mom, workspace=256) b4 = Conv(data=data, num_filter=num_4_1, name=('%s_b_4' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) b4 = Conv(data=b4, num_filter=num_4_2, kernel=(1, 7), pad=(0, 3), name=('%s_b_4' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256) b4 = Conv(data=b4, num_filter=num_4_3, kernel=(7, 1), pad=(3, 0), name=('%s_b_4' % name), suffix='_conv_3', bn_mom=bn_mom, workspace=256) b4 = Conv(data=b4, num_filter=num_4_4, kernel=(1, 7), pad=(0, 3), name=('%s_b_4' % name), suffix='_conv_4', bn_mom=bn_mom, workspace=256) b4 = Conv(data=b4, num_filter=num_4_5, kernel=(7, 1), pad=(3, 0), name=('%s_b_4' % name), suffix='_conv_5', withRelu=False, bn_mom=bn_mom, workspace=256) m = mx.sym.Concat(*[b1, b2, b3, b4], name=('%s_b_concat1' % name)) m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_b_bn1' % name)) m = mx.sym.Activation(data=m, act_type='relu', name=('%s_b_relu1' % name)) return m # Output Shape is 12*12*1024 # Input Shape is 5*5*1536 def InceptionC(name, data, num_1_1=256, num_2_1=256, num_3_1=384, num_3_2=256, num_3_3=256, num_4_1=384, num_4_2=448, num_4_3=512, num_4_4=256, num_4_5=256, bn_mom=0.9): c1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type='avg', name=('%s_%s_pool1' % ('avg', name))) c1 = Conv(data=c1, num_filter=num_1_1, name=('%s_c_1' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256) c2 = Conv(data=data, num_filter=num_2_1, name=('%s_c_2' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256) c3 = Conv(data=data, num_filter=num_3_1, name=('%s_c_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) c3_1 = Conv(data=c3, num_filter=num_3_2, kernel=(3, 1), pad=(1, 0), name=('%s_c_3' % name), suffix='_conv_1_1', withRelu=False, bn_mom=bn_mom, workspace=256) c3_2 = Conv(data=c3, num_filter=num_3_3, kernel=(1, 3), pad=(0, 1), name=('%s_c_3' % name), suffix='_conv_1_2', withRelu=False, bn_mom=bn_mom, workspace=256) c4 = Conv(data=data, num_filter=num_4_1, name=('%s_c_4' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) c4 = Conv(data=c4, num_filter=num_4_2, kernel=(1, 3), pad=(0, 1), name=('%s_c_4' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256) c4 = Conv(data=c4, num_filter=num_4_3, kernel=(3, 1), pad=(1, 0), name=('%s_c_4' % name), suffix='_conv_3', bn_mom=bn_mom, workspace=256) c4_1 = Conv(data=c4, num_filter=num_4_4, kernel=(3, 1), pad=(1, 0), name=('%s_c_4' % name), suffix='_conv_3_1', withRelu=False, bn_mom=bn_mom, workspace=256) c4_2 = Conv(data=c4, num_filter=num_4_5, kernel=(1, 3), pad=(0, 1), name=('%s_c_4' % name), suffix='_conv_3_2', withRelu=False, bn_mom=bn_mom, workspace=256) m = mx.sym.Concat(*[c1, c2, c3_1, c3_2, c4_1, c4_2], name=('%s_c_concat1' % name)) m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_c_bn1' % name)) m = mx.sym.Activation(data=m, act_type='relu', name=('%s_c_relu1' % name)) return m # Output Shape is 5*5*1536 # Input Shape is 25*25*384 def ReductionA(name, data, num_2_1=384, num_3_1=192, num_3_2=224, num_3_3=256, bn_mom=0.9): ra1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(0, 0), pool_type='max', name=('%s_%s_pool1' % ('max', name))) ra2 = Conv(data=data, num_filter=num_2_1, kernel=(3, 3), stride=(2, 2), name=('%s_ra_2' % name), suffix='_conv', withRelu=False, bn_mom=bn_mom, workspace=256) ra3 = Conv(data=data, num_filter=num_3_1, name=('%s_ra_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) ra3 = Conv(data=ra3, num_filter=num_3_2, kernel=(3, 3), pad=(1, 1), name=('%s_ra_3' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256) ra3 = Conv(data=ra3, num_filter=num_3_3, kernel=(3, 3), stride=(2, 2), name=('%s_ra_3' % name), suffix='_conv_3', withRelu=False, bn_mom=bn_mom, workspace=256) m = mx.sym.Concat(*[ra1, ra2, ra3], name=('%s_ra_concat1' % name)) m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_ra_bn1' % name)) m = mx.sym.Activation(data=m, act_type='relu', name=('%s_ra_relu1' % name)) return m # Output Shape is 12*12*1024 # Input Shape is 12*12*1024 def ReductionB(name, data, num_2_1=192, num_2_2=192, num_3_1=256, num_3_2=256, num_3_3=320, num_3_4=320, bn_mom=0.9): rb1 = mx.sym.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(0, 0), pool_type='max', name=('%s_%s_pool1' % ('max', name))) rb2 = Conv(data=data, num_filter=num_2_1, name=('%s_rb_2' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) rb2 = Conv(data=rb2, num_filter=num_2_2, kernel=(3, 3), stride=(2, 2), name=('%s_rb_2' % name), suffix='_conv_2', withRelu=False, bn_mom=bn_mom, workspace=256) rb3 = Conv(data=data, num_filter=num_3_1, name=('%s_rb_3' % name), suffix='_conv_1', bn_mom=bn_mom, workspace=256) rb3 = Conv(data=rb3, num_filter=num_3_2, kernel=(1, 7), pad=(0, 3), name=('%s_rb_3' % name), suffix='_conv_2', bn_mom=bn_mom, workspace=256) rb3 = Conv(data=rb3, num_filter=num_3_3, kernel=(7, 1), pad=(3, 0), name=('%s_rb_3' % name), suffix='_conv_3', bn_mom=bn_mom, workspace=256) rb3 = Conv(data=rb3, num_filter=num_3_4, kernel=(3, 3), stride=(2, 2), name=('%s_rb_3' % name), suffix='_conv_4', withRelu=False, bn_mom=bn_mom, workspace=256) m = mx.sym.Concat(*[rb1, rb2, rb3], name=('%s_rb_concat1' % name)) m = mx.sym.BatchNorm(data=m, fix_gamma=False, eps=2e-5, name=('%s_rb_bn1' % name)) m = mx.sym.Activation(data=m, act_type='relu', name=('%s_rb_relu1' % name)) return m # Output Shape is 5*5*1536 # Squeeze and excitation block def squeeze_excitation_block(name, data, num_filter, ratio): squeeze = mx.sym.Pooling(data=data, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_squeeze') squeeze = mx.symbol.Flatten(data=squeeze, name=name + '_flatten') excitation = mx.symbol.FullyConnected(data=squeeze, num_hidden=int(num_filter*ratio), name=name + '_excitation1') excitation = mx.sym.Activation(data=excitation, act_type='relu', name=name + '_excitation1_relu') excitation = mx.symbol.FullyConnected(data=excitation, num_hidden=num_filter, name=name + '_excitation2') excitation = mx.sym.Activation(data=excitation, act_type='sigmoid', name=name + '_excitation2_sigmoid') scale = mx.symbol.broadcast_mul(data, mx.symbol.reshape(data=excitation, shape=(-1, num_filter, 1, 1))) return scale def circle_in4a(name, data, ratio, num_1_1=96, num_2_1=96, num_3_1=64, num_3_2=96, num_4_1=64, num_4_2=96, num_4_3=96, bn_mom=0.9, round=4): in4a = data for i in xrange(round): in4a = InceptionA(name + ('_%d' % i), in4a, num_1_1, num_2_1, num_3_1, num_3_2, num_4_1, num_4_2, num_4_3, bn_mom) _, out_shapes, _ = in4a.infer_shape(data=(1, 3, 224, 224)) # import pdb # pdb.set_trace() num_filter = int(out_shapes[0][1]) in4a = squeeze_excitation_block(name + ('_%d' % i), in4a, num_filter, ratio) return in4a def circle_in7b(name, data, ratio, num_1_1=128, num_2_1=384, num_3_1=192, num_3_2=224, num_3_3=256, num_4_1=192, num_4_2=192, num_4_3=224, num_4_4=224, num_4_5=256, bn_mom=0.9, round=7): in7b = data for i in xrange(round): in7b = InceptionB(name + ('_%d' % i), in7b, num_1_1, num_2_1, num_3_1, num_3_2, num_3_3, num_4_1, num_4_2, num_4_3, num_4_4, num_4_5, bn_mom) _, out_shapes, _, = in7b.infer_shape(data=(1, 3, 224, 224)) # import pdb # pdb.set_trace() num_filter = int(out_shapes[0][1]) in7b = squeeze_excitation_block(name + ('_%d' % i), in7b, num_filter, ratio) return in7b def circle_in3c(name, data, ratio, num_1_1=256, num_2_1=256, num_3_1=384, num_3_2=256, num_3_3=256, num_4_1=384, num_4_2=448, num_4_3=512, num_4_4=256, num_4_5=256, bn_mom=0.9, round=3): in3c = data for i in xrange(round): in3c = InceptionC(name + ('_%d' % i), in3c, num_1_1, num_2_1, num_3_1, num_3_2, num_3_3, num_4_1, num_4_2, num_4_3, num_4_4, num_4_5, bn_mom) _, out_shapes, _, = in3c.infer_shape(data=(1, 3, 224, 224)) # import pdb # pdb.set_trace() num_filter = int(out_shapes[0][1]) in3c = squeeze_excitation_block(name + ('_%d' % i), in3c, num_filter, ratio) return in3c # create SE inception-v4 def get_symbol(ratio, num_classes=1000): # input shape 229*229*3 (old) # input shape 224*224*3 (new) data = mx.symbol.Variable(name="data") bn_mom = 0.9 # import pdb # pdb.set_trace() # stage stem (num_1_1, num_1_2, num_1_3) = (32, 32, 64) num_2_1 = 96 (num_3_1, num_3_2) = (64, 96) (num_4_1, num_4_2, num_4_3, num_4_4) = (64, 64, 64, 96) num_5_1 = 192 in_stem = inception_stem('stem_stage', data, num_1_1, num_1_2, num_1_3, num_2_1, num_3_1, num_3_2, num_4_1, num_4_2, num_4_3, num_4_4, num_5_1, bn_mom) # stage 4 x InceptionA num_1_1 = 96 num_2_1 = 96 (num_3_1, num_3_2) = (64, 96) (num_4_1, num_4_2, num_4_3) = (64, 96, 96) in4a = circle_in4a('in4a', in_stem, ratio, num_1_1, num_2_1, num_3_1, num_3_2, num_4_1, num_4_2, num_4_3, bn_mom, 4) # stage ReductionA num_1_1 = 384 (num_2_1, num_2_2, num_2_3) = (192, 224, 256) re_a = ReductionA('re_a', in4a, num_1_1, num_2_1, num_2_2, num_2_3, bn_mom) # stage 7 x InceptionB num_1_1 = 128 num_2_1 = 384 (num_3_1, num_3_2, num_3_3) = (192, 224, 256) (num_4_1, num_4_2, num_4_3, num_4_4, num_4_5) = (192, 192, 224, 224, 256) in7b = circle_in7b('in7b', re_a, ratio, num_1_1, num_2_1, num_3_1, num_3_2, num_3_3, num_4_1, num_4_2, num_4_3, num_4_4, num_4_5, bn_mom, 7) # stage ReductionB (num_1_1, num_1_2) = (192, 192) (num_2_1, num_2_2, num_2_3, num_2_4) = (256, 256, 320, 320) re_b = ReductionB('re_b', in7b, num_1_1, num_1_2, num_2_1, num_2_2, num_2_3, num_2_4, bn_mom) # stage 3 x InceptionC num_1_1 = 256 num_2_1 = 256 (num_3_1, num_3_2, num_3_3) = (384, 256, 256) (num_4_1, num_4_2, num_4_3, num_4_4, num_4_5) = (384, 448, 512, 256, 256) in3c = circle_in3c('in3c', re_b, ratio, num_1_1, num_2_1, num_3_1, num_3_2, num_3_3, num_4_1, num_4_2, num_4_3, num_4_4, num_4_5, bn_mom, 3) # stage Average Pooling #pool = mx.sym.Pooling(data=in3c, kernel=(8, 8), stride=(1, 1), pool_type="avg", name="global_pool") pool = mx.sym.Pooling(data=in3c, global_pool=True, kernel=(5, 5), stride=(1, 1), pad=(0, 0), pool_type="avg", name="global_pool") # stage Dropout #dropout = mx.sym.Dropout(data=pool, p=0.5) #modified for vggface data dropout = mx.sym.Dropout(data=pool, p=0.2) #original # dropout = mx.sym.Dropout(data=pool, p=0.8) flatten = mx.sym.Flatten(data=dropout) # output fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc1') softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax') return softmax # if __name__ == '__main__': # net = get_symbol(1000) # shape = {'softmax_label': (32, 1000), 'data': (32, 3, 299, 299)} # mx.viz.plot_network(net, title='inception-v4', format='png', shape=shape).render('inception-v4')
Anybody facing screen flickering on your gf2? Never rooted and using Walton's own KitKat 4.4.2 preinstalled ROM? What's the solution to remove screen flickering??? Urgent help please..
import logging from utct.common.trainer_template import TrainerTemplate import mxnet as mx from mxnet import gluon, autograd class Trainer(TrainerTemplate): """ Class, which provides training process under Gluon/MXNet framework. Parameters: ---------- model : object instance of Model class with graph of CNN optimizer : object instance of Optimizer class with CNN optimizer data_source : object instance of DataSource class with training/validation iterators saver : object instance of Saver class with information about stored files ctx : object instance of MXNet context """ def __init__(self, model, optimizer, data_source, saver, ctx): super(Trainer, self).__init__( model, optimizer, data_source, saver) self.ctx = ctx def _hyper_train_target_sub(self, **kwargs): """ Calling single training procedure for specific hyper parameters from hyper optimizer. """ if self.saver.log_filename: fh = logging.FileHandler(self.saver.log_filename) self.logger.addHandler(fh) self.logger.info("Training with parameters: {}".format(kwargs)) train_loader, val_loader = self.data_source() net = self.model() net.initialize( mx.init.Xavier(magnitude=2.24), ctx=self.ctx) trainer = self.optimizer( params=net.collect_params(), **kwargs) metric = mx.metric.Accuracy() loss = gluon.loss.SoftmaxCrossEntropyLoss() log_interval = 1 for epoch in range(self.num_epoch): metric.reset() for i, (data, label) in enumerate(train_loader): # Copy data to ctx if necessary data = data.as_in_context(self.ctx) label = label.as_in_context(self.ctx) # Start recording computation graph with record() section. # Recorded graphs can then be differentiated with backward. with autograd.record(): output = net(data) L = loss(output, label) L.backward() # take a gradient step with batch_size equal to data.shape[0] trainer.step(data.shape[0]) # update metric at last. metric.update([label], [output]) if i % log_interval == 0 and i > 0: name, acc = metric.get() print('[Epoch %d Batch %d] Training: %s=%f' % (epoch, i, name, acc)) name, acc = metric.get() print('[Epoch %d] Training: %s=%f' % (epoch, name, acc)) name, val_acc = self._test( model=net, val_data=val_loader, ctx=self.ctx) print('[Epoch %d] Validation: %s=%f' % (epoch, name, val_acc)) if self.saver.log_filename: self.logger.removeHandler(fh) fh.close() best_value = 0.0 return best_value @staticmethod def _test(model, val_data, ctx): metric = mx.metric.Accuracy() for data, label in val_data: data = data.as_in_context(ctx) label = label.as_in_context(ctx) output = model(data) metric.update([label], [output]) return metric.get()
THERE WERE RED faces at Labour TD Eamonn Moloney’s office this morning as it emerged that a leaflet circulated at the weekend contained an awkward typo. The flyer, advertising the official opening of a community garden, reminds locals to “bring your willies”. A spokesperson for the TD confirmed that the leaflet had been issued at the weekend and it is “most definitely a typo”. Major hat tip to the Ray D’Arcy Show, their listener Alan Reid and @willhanafin for spotting it. Email “Dublin TD asks locals to ‘bring your willies’ to community event”. Feedback on “Dublin TD asks locals to ‘bring your willies’ to community event”.
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys, time, datetime, re, threading from electrum_vior.i18n import _, set_language from electrum_vior.util import print_error, print_msg import os.path, json, ast, traceback import webbrowser import shutil import StringIO import PyQt4 from PyQt4.QtGui import * from PyQt4.QtCore import * import PyQt4.QtCore as QtCore from electrum_vior.bitcoin import MIN_RELAY_TX_FEE, is_valid from electrum_vior.plugins import run_hook import icons_rc from electrum_vior.wallet import format_satoshis from electrum_vior import Transaction from electrum_vior import mnemonic from electrum_vior import util, bitcoin, commands, Interface, Wallet from electrum_vior import SimpleConfig, Wallet, WalletStorage from electrum_vior import bmp, pyqrnative from amountedit import AmountEdit from network_dialog import NetworkDialog from qrcodewidget import QRCodeWidget from decimal import Decimal import platform import httplib import socket import webbrowser import csv if platform.system() == 'Windows': MONOSPACE_FONT = 'Lucida Console' elif platform.system() == 'Darwin': MONOSPACE_FONT = 'Monaco' else: MONOSPACE_FONT = 'monospace' from electrum_vior import ELECTRUM_VERSION import re from util import * class StatusBarButton(QPushButton): def __init__(self, icon, tooltip, func): QPushButton.__init__(self, icon, '') self.setToolTip(tooltip) self.setFlat(True) self.setMaximumWidth(25) self.clicked.connect(func) self.func = func self.setIconSize(QSize(25,25)) def keyPressEvent(self, e): if e.key() == QtCore.Qt.Key_Return: apply(self.func,()) default_column_widths = { "history":[40,140,350,140], "contacts":[350,330], "receive": [370,200,130] } class ElectrumWindow(QMainWindow): def __init__(self, config, network, gui_object): QMainWindow.__init__(self) self.config = config self.network = network self.gui_object = gui_object self.tray = gui_object.tray self.go_lite = gui_object.go_lite self.lite = None self.create_status_bar() self.need_update = threading.Event() self.decimal_point = config.get('decimal_point', 8) self.num_zeros = int(config.get('num_zeros',0)) set_language(config.get('language')) self.funds_error = False self.completions = QStringListModel() self.tabs = tabs = QTabWidget(self) self.column_widths = self.config.get("column_widths_2", default_column_widths ) tabs.addTab(self.create_history_tab(), _('History') ) tabs.addTab(self.create_send_tab(), _('Send') ) tabs.addTab(self.create_receive_tab(), _('Receive') ) tabs.addTab(self.create_contacts_tab(), _('Contacts') ) tabs.addTab(self.create_console_tab(), _('Console') ) tabs.setMinimumSize(600, 400) tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.setCentralWidget(tabs) g = self.config.get("winpos-qt",[100, 100, 840, 400]) self.setGeometry(g[0], g[1], g[2], g[3]) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(QIcon(":icons/electrum-vior.png")) self.init_menubar() QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: tabs.setCurrentIndex( (tabs.currentIndex() - 1 )%tabs.count() )) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: tabs.setCurrentIndex( (tabs.currentIndex() + 1 )%tabs.count() )) for i in range(tabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: tabs.setCurrentIndex(i)) self.connect(self, QtCore.SIGNAL('update_status'), self.update_status) self.connect(self, QtCore.SIGNAL('banner_signal'), lambda: self.console.showMessage(self.network.banner) ) self.connect(self, QtCore.SIGNAL('transaction_signal'), lambda: self.notify_transactions() ) self.connect(self, QtCore.SIGNAL('send_tx2'), self.send_tx2) self.connect(self, QtCore.SIGNAL('send_tx3'), self.send_tx3) self.connect(self, QtCore.SIGNAL('payment_request_ok'), self.payment_request_ok) self.connect(self, QtCore.SIGNAL('payment_request_error'), self.payment_request_error) self.history_list.setFocus(True) # network callbacks if self.network: self.network.register_callback('updated', lambda: self.need_update.set()) self.network.register_callback('banner', lambda: self.emit(QtCore.SIGNAL('banner_signal'))) self.network.register_callback('disconnected', lambda: self.emit(QtCore.SIGNAL('update_status'))) self.network.register_callback('disconnecting', lambda: self.emit(QtCore.SIGNAL('update_status'))) self.network.register_callback('new_transaction', lambda: self.emit(QtCore.SIGNAL('transaction_signal'))) # set initial message self.console.showMessage(self.network.banner) self.wallet = None def update_account_selector(self): # account selector accounts = self.wallet.get_account_names() self.account_selector.clear() if len(accounts) > 1: self.account_selector.addItems([_("All accounts")] + accounts.values()) self.account_selector.setCurrentIndex(0) self.account_selector.show() else: self.account_selector.hide() def load_wallet(self, wallet): import electrum_vior as electrum self.wallet = wallet self.accounts_expanded = self.wallet.storage.get('accounts_expanded',{}) self.current_account = self.wallet.storage.get("current_account", None) title = 'Electrum-VIOR ' + self.wallet.electrum_version + ' - ' + self.wallet.storage.path if self.wallet.is_watching_only(): title += ' [%s]' % (_('watching only')) self.setWindowTitle( title ) self.update_wallet() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized self.notify_transactions() self.update_account_selector() # update menus self.new_account_menu.setEnabled(self.wallet.can_create_accounts()) self.private_keys_menu.setEnabled(not self.wallet.is_watching_only()) self.password_menu.setEnabled(not self.wallet.is_watching_only()) self.seed_menu.setEnabled(self.wallet.has_seed()) self.mpk_menu.setEnabled(self.wallet.is_deterministic()) self.import_menu.setEnabled(self.wallet.can_import()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() run_hook('load_wallet', wallet) def open_wallet(self): wallet_folder = self.wallet.storage.path filename = unicode( QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) ) if not filename: return storage = WalletStorage({'wallet_path': filename}) if not storage.file_exists: self.show_message("file not found "+ filename) return self.wallet.stop_threads() # create new wallet wallet = Wallet(storage) wallet.start_threads(self.network) self.load_wallet(wallet) def backup_wallet(self): import shutil path = self.wallet.storage.path wallet_folder = os.path.dirname(path) filename = unicode( QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) ) if not filename: return new_path = os.path.join(wallet_folder, filename) if new_path != path: try: shutil.copy2(path, new_path) QMessageBox.information(None,"Wallet backup created", _("A copy of your wallet file was created in")+" '%s'" % str(new_path)) except (IOError, os.error), reason: QMessageBox.critical(None,"Unable to create backup", _("Electrum was unable to copy your wallet file to the specified location.")+"\n" + str(reason)) def new_wallet(self): import installwizard wallet_folder = os.path.dirname(self.wallet.storage.path) filename = unicode( QFileDialog.getSaveFileName(self, _('Enter a new file name'), wallet_folder) ) if not filename: return filename = os.path.join(wallet_folder, filename) storage = WalletStorage({'wallet_path': filename}) if storage.file_exists: QMessageBox.critical(None, "Error", _("File exists")) return wizard = installwizard.InstallWizard(self.config, self.network, storage) wallet = wizard.run('new') if wallet: self.load_wallet(wallet) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&New contact"), self.new_contact_dialog) self.new_account_menu = wallet_menu.addAction(_("&New account"), self.new_account_dialog) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.mpk_menu = wallet_menu.addAction(_("&Master Public Keys"), self.show_master_public_keys) wallet_menu.addSeparator() labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) wallet_menu.addAction(_("&Export History"), self.export_history_dialog) tools_menu = menubar.addMenu(_("&Tools")) # Settings / Preferences are all reserved keywords in OSX using this as work around tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), self.run_network_dialog) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) #tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() csv_transaction_menu = tools_menu.addMenu(_("&Create transaction")) csv_transaction_menu.addAction(_("&From CSV file"), self.do_process_from_csv_file) csv_transaction_menu.addAction(_("&From CSV text"), self.do_process_from_csv_text) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum-vior.org")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://electrum-vior.org/documentation.html")).setShortcut(QKeySequence.HelpContents) help_menu.addAction(_("&Report Bug"), self.show_report_bug) self.setMenuBar(menubar) def show_about(self): QMessageBox.about(self, "Electrum-VIOR", _("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying ViorCoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the ViorCoin system.")) def show_report_bug(self): QMessageBox.information(self, "Electrum-VIOR - " + _("Reporting Bugs"), _("Please report any bugs as issues on github:")+" <a href=\"https://github.com/pooler/electrum-vior/issues\">https://github.com/pooler/electrum-vior/issues</a>") def notify_transactions(self): if not self.network or not self.network.is_connected(): return print_error("Notifying GUI") if len(self.network.pending_transactions_for_notifications) > 0: # Combine the transactions if there are more then three tx_amount = len(self.network.pending_transactions_for_notifications) if(tx_amount >= 3): total_amount = 0 for tx in self.network.pending_transactions_for_notifications: is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx) if(v > 0): total_amount += v self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s %(unit)s") \ % { 'txs' : tx_amount, 'amount' : self.format_amount(total_amount), 'unit' : self.base_unit()}) self.network.pending_transactions_for_notifications = [] else: for tx in self.network.pending_transactions_for_notifications: if tx: self.network.pending_transactions_for_notifications.remove(tx) is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx) if(v > 0): self.notify(_("New transaction received. %(amount)s %(unit)s") % { 'amount' : self.format_amount(v), 'unit' : self.base_unit()}) def notify(self, message): self.tray.showMessage("Electrum-VIOR", message, QSystemTrayIcon.Information, 20000) # custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user def getOpenFileName(self, title, filter = ""): directory = self.config.get('io_dir', unicode(os.path.expanduser('~'))) fileName = unicode( QFileDialog.getOpenFileName(self, title, directory, filter) ) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def getSaveFileName(self, title, filename, filter = ""): directory = self.config.get('io_dir', unicode(os.path.expanduser('~'))) path = os.path.join( directory, filename ) fileName = unicode( QFileDialog.getSaveFileName(self, title, path, filter) ) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def close(self): QMainWindow.close(self) run_hook('close_main_window') def connect_slots(self, sender): self.connect(sender, QtCore.SIGNAL('timersignal'), self.timer_actions) self.previous_payto_e='' def timer_actions(self): if self.need_update.is_set(): self.update_wallet() self.need_update.clear() run_hook('timer_actions') def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces) def read_amount(self, x): if x in['.', '']: return None p = pow(10, self.decimal_point) return int( p * Decimal(x) ) def base_unit(self): assert self.decimal_point in [5,8] return "VIOR" if self.decimal_point == 8 else "mVIOR" def update_status(self): if self.network is None or not self.network.is_running(): text = _("Offline") icon = QIcon(":icons/status_disconnected.png") elif self.network.is_connected(): if not self.wallet.up_to_date: text = _("Synchronizing...") icon = QIcon(":icons/status_waiting.png") elif self.network.server_lag > 1: text = _("Server is lagging (%d blocks)"%self.network.server_lag) icon = QIcon(":icons/status_lagging.png") else: c, u = self.wallet.get_account_balance(self.current_account) text = _( "Balance" ) + ": %s "%( self.format_amount(c) ) + self.base_unit() if u: text += " [%s unconfirmed]"%( self.format_amount(u,True).strip() ) # append fiat balance and price from exchange rate plugin r = {} run_hook('get_fiat_status_text', c+u, r) quote = r.get(0) if quote: text += "%s"%quote self.tray.setToolTip(text) icon = QIcon(":icons/status_connected.png") else: text = _("Not connected") icon = QIcon(":icons/status_disconnected.png") self.balance_label.setText(text) self.status_button.setIcon( icon ) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_history_tab() self.update_receive_tab() self.update_contacts_tab() self.update_completions() def create_history_tab(self): self.history_list = l = MyTreeWidget(self) l.setColumnCount(5) for i,width in enumerate(self.column_widths['history']): l.setColumnWidth(i, width) l.setHeaderLabels( [ '', _('Date'), _('Description') , _('Amount'), _('Balance')] ) self.connect(l, SIGNAL('itemDoubleClicked(QTreeWidgetItem*, int)'), self.tx_label_clicked) self.connect(l, SIGNAL('itemChanged(QTreeWidgetItem*, int)'), self.tx_label_changed) l.customContextMenuRequested.connect(self.create_history_menu) return l def create_history_menu(self, position): self.history_list.selectedIndexes() item = self.history_list.currentItem() be = self.config.get('block_explorer', 'explorer.viorcoin.net') if be == 'explorer.viorcoin.net': block_explorer = 'http://explorer.viorcoin.net/tx/' elif be == 'block-explorer.com': block_explorer = 'http://block-explorer.com/tx/' elif be == 'Blockr.io': block_explorer = 'https://vior.blockr.io/tx/info/' if not item: return tx_hash = str(item.data(0, Qt.UserRole).toString()) if not tx_hash: return menu = QMenu() menu.addAction(_("Copy ID to Clipboard"), lambda: self.app.clipboard().setText(tx_hash)) menu.addAction(_("Details"), lambda: self.show_transaction(self.wallet.transactions.get(tx_hash))) menu.addAction(_("Edit description"), lambda: self.tx_label_clicked(item,2)) menu.addAction(_("View on block explorer"), lambda: webbrowser.open(block_explorer + tx_hash)) menu.exec_(self.contacts_list.viewport().mapToGlobal(position)) def show_transaction(self, tx): import transaction_dialog d = transaction_dialog.TxDialog(tx, self) d.exec_() def tx_label_clicked(self, item, column): if column==2 and item.isSelected(): self.is_edit=True item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled) self.history_list.editItem( item, column ) item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled) self.is_edit=False def tx_label_changed(self, item, column): if self.is_edit: return self.is_edit=True tx_hash = str(item.data(0, Qt.UserRole).toString()) tx = self.wallet.transactions.get(tx_hash) text = unicode( item.text(2) ) self.wallet.set_label(tx_hash, text) if text: item.setForeground(2, QBrush(QColor('black'))) else: text = self.wallet.get_default_label(tx_hash) item.setText(2, text) item.setForeground(2, QBrush(QColor('gray'))) self.is_edit=False def edit_label(self, is_recv): l = self.receive_list if is_recv else self.contacts_list item = l.currentItem() item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled) l.editItem( item, 1 ) item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled) def address_label_clicked(self, item, column, l, column_addr, column_label): if column == column_label and item.isSelected(): is_editable = item.data(0, 32).toBool() if not is_editable: return addr = unicode( item.text(column_addr) ) label = unicode( item.text(column_label) ) item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled) l.editItem( item, column ) item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled) def address_label_changed(self, item, column, l, column_addr, column_label): if column == column_label: addr = unicode( item.text(column_addr) ) text = unicode( item.text(column_label) ) is_editable = item.data(0, 32).toBool() if not is_editable: return changed = self.wallet.set_label(addr, text) if changed: self.update_history_tab() self.update_completions() self.current_item_changed(item) run_hook('item_changed', item, column) def current_item_changed(self, a): run_hook('current_item_changed', a) def update_history_tab(self): self.history_list.clear() for item in self.wallet.get_tx_history(self.current_account): tx_hash, conf, is_mine, value, fee, balance, timestamp = item time_str = _("unknown") if conf > 0: try: time_str = datetime.datetime.fromtimestamp( timestamp).isoformat(' ')[:-3] except Exception: time_str = _("error") if conf == -1: time_str = 'unverified' icon = QIcon(":icons/unconfirmed.png") elif conf == 0: time_str = 'pending' icon = QIcon(":icons/unconfirmed.png") elif conf < 6: icon = QIcon(":icons/clock%d.png"%conf) else: icon = QIcon(":icons/confirmed.png") if value is not None: v_str = self.format_amount(value, True, whitespaces=True) else: v_str = '--' balance_str = self.format_amount(balance, whitespaces=True) if tx_hash: label, is_default_label = self.wallet.get_label(tx_hash) else: label = _('Pruned transaction outputs') is_default_label = False item = QTreeWidgetItem( [ '', time_str, label, v_str, balance_str] ) item.setFont(2, QFont(MONOSPACE_FONT)) item.setFont(3, QFont(MONOSPACE_FONT)) item.setFont(4, QFont(MONOSPACE_FONT)) if value < 0: item.setForeground(3, QBrush(QColor("#BC1E1E"))) if tx_hash: item.setData(0, Qt.UserRole, tx_hash) item.setToolTip(0, "%d %s\nTxId:%s" % (conf, _('Confirmations'), tx_hash) ) if is_default_label: item.setForeground(2, QBrush(QColor('grey'))) item.setIcon(0, icon) self.history_list.insertTopLevelItem(0,item) self.history_list.setCurrentItem(self.history_list.topLevelItem(0)) run_hook('history_tab_update') def create_send_tab(self): w = QWidget() grid = QGridLayout() grid.setSpacing(8) grid.setColumnMinimumWidth(3,300) grid.setColumnStretch(5,1) self.payto_e = QLineEdit() grid.addWidget(QLabel(_('Pay to')), 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, 3) grid.addWidget(HelpButton(_('Recipient of the funds.') + '\n\n' + _('You may enter a ViorCoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a ViorCoin address)')), 1, 4) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.setCompleter(completer) completer.setModel(self.completions) self.message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 2, 0) grid.addWidget(self.message_e, 2, 1, 1, 3) grid.addWidget(HelpButton(_('Description of the transaction (not mandatory).') + '\n\n' + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')), 2, 4) self.from_label = QLabel(_('From')) grid.addWidget(self.from_label, 3, 0) self.from_list = QTreeWidget(self) self.from_list.setColumnCount(2) self.from_list.setColumnWidth(0, 350) self.from_list.setColumnWidth(1, 50) self.from_list.setHeaderHidden (True) self.from_list.setMaximumHeight(80) grid.addWidget(self.from_list, 3, 1, 1, 3) self.set_pay_from([]) self.amount_e = AmountEdit(self.base_unit) grid.addWidget(QLabel(_('Amount')), 4, 0) grid.addWidget(self.amount_e, 4, 1, 1, 2) grid.addWidget(HelpButton( _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet. Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') \ + '\n\n' + _('Keyboard shortcut: type "!" to send all your coins.')), 4, 3) self.fee_e = AmountEdit(self.base_unit) grid.addWidget(QLabel(_('Fee')), 5, 0) grid.addWidget(self.fee_e, 5, 1, 1, 2) grid.addWidget(HelpButton( _('ViorCoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\ + _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\ + _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')), 5, 3) run_hook('exchange_rate_button', grid) self.send_button = EnterButton(_("Send"), self.do_send) grid.addWidget(self.send_button, 6, 1) b = EnterButton(_("Clear"),self.do_clear) grid.addWidget(b, 6, 2) self.payto_sig = QLabel('') grid.addWidget(self.payto_sig, 7, 0, 1, 4) QShortcut(QKeySequence("Up"), w, w.focusPreviousChild) QShortcut(QKeySequence("Down"), w, w.focusNextChild) w.setLayout(grid) w2 = QWidget() vbox = QVBoxLayout() vbox.addWidget(w) vbox.addStretch(1) w2.setLayout(vbox) def entry_changed( is_fee ): self.funds_error = False if self.amount_e.is_shortcut: self.amount_e.is_shortcut = False sendable = self.get_sendable_balance() # there is only one output because we are completely spending inputs inputs, total, fee = self.wallet.choose_tx_inputs( sendable, 0, 1, self.get_payment_sources()) fee = self.wallet.estimated_fee(inputs, 1) amount = total - fee self.amount_e.setText( self.format_amount(amount) ) self.fee_e.setText( self.format_amount( fee ) ) return amount = self.read_amount(str(self.amount_e.text())) fee = self.read_amount(str(self.fee_e.text())) if not is_fee: fee = None if amount is None: return # assume that there will be 2 outputs (one for change) inputs, total, fee = self.wallet.choose_tx_inputs(amount, fee, 2, self.get_payment_sources()) if not is_fee: self.fee_e.setText( self.format_amount( fee ) ) if inputs: palette = QPalette() palette.setColor(self.amount_e.foregroundRole(), QColor('black')) text = "" else: palette = QPalette() palette.setColor(self.amount_e.foregroundRole(), QColor('red')) self.funds_error = True text = _( "Not enough funds" ) c, u = self.wallet.get_frozen_balance() if c+u: text += ' (' + self.format_amount(c+u).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')' self.statusBar().showMessage(text) self.amount_e.setPalette(palette) self.fee_e.setPalette(palette) self.amount_e.textChanged.connect(lambda: entry_changed(False) ) self.fee_e.textChanged.connect(lambda: entry_changed(True) ) run_hook('create_send_tab', grid) return w2 def set_pay_from(self, l): self.pay_from = l self.from_list.clear() self.from_label.setHidden(len(self.pay_from) == 0) self.from_list.setHidden(len(self.pay_from) == 0) for addr in self.pay_from: c, u = self.wallet.get_addr_balance(addr) balance = self.format_amount(c + u) self.from_list.addTopLevelItem(QTreeWidgetItem( [addr, balance] )) def update_completions(self): l = [] for addr,label in self.wallet.labels.items(): if addr in self.wallet.addressbook: l.append( label + ' <' + addr + '>') run_hook('update_completions', l) self.completions.setStringList(l) def protected(func): return lambda s, *args: s.do_protect(func, args) def do_send(self): label = unicode( self.message_e.text() ) if self.gui_object.payment_request: outputs = self.gui_object.payment_request.outputs amount = self.gui_object.payment_request.get_amount() else: r = unicode( self.payto_e.text() ) r = r.strip() # label or alias, with address in brackets m = re.match('(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>', r) to_address = m.group(2) if m else r if not is_valid(to_address): QMessageBox.warning(self, _('Error'), _('Invalid ViorCoin Address') + ':\n' + to_address, _('OK')) return try: amount = self.read_amount(unicode( self.amount_e.text())) except Exception: QMessageBox.warning(self, _('Error'), _('Invalid Amount'), _('OK')) return outputs = [(to_address, amount)] try: fee = self.read_amount(unicode( self.fee_e.text())) except Exception: QMessageBox.warning(self, _('Error'), _('Invalid Fee'), _('OK')) return confirm_amount = self.config.get('confirm_amount', 100000000) if amount >= confirm_amount: if not self.question(_("send %(amount)s to %(address)s?")%{ 'amount' : self.format_amount(amount) + ' '+ self.base_unit(), 'address' : to_address}): return confirm_fee = self.config.get('confirm_fee', 1000000) if fee >= confirm_fee: if not self.question(_("The fee for this transaction seems unusually high.\nAre you really sure you want to pay %(fee)s in fees?")%{ 'fee' : self.format_amount(fee) + ' '+ self.base_unit()}): return self.send_tx(outputs, fee, label) def waiting_dialog(self, message): d = QDialog(self) d.setWindowTitle('Please wait') l = QLabel(message) vbox = QVBoxLayout(d) vbox.addWidget(l) d.show() return d @protected def send_tx(self, outputs, fee, label, password): # first, create an unsigned tx domain = self.get_payment_sources() try: tx = self.wallet.make_unsigned_transaction(outputs, fee, None, domain) tx.error = None except Exception as e: traceback.print_exc(file=sys.stdout) self.show_message(str(e)) return # call hook to see if plugin needs gui interaction run_hook('send_tx', tx) # sign the tx def sign_thread(): time.sleep(0.1) keypairs = {} try: self.wallet.add_keypairs_from_wallet(tx, keypairs, password) self.wallet.sign_transaction(tx, keypairs, password) except Exception as e: tx.error = str(e) self.signed_tx_data = (tx, fee, label) self.emit(SIGNAL('send_tx2')) self.tx_wait_dialog = self.waiting_dialog('Signing..') threading.Thread(target=sign_thread).start() def send_tx2(self): tx, fee, label = self.signed_tx_data self.tx_wait_dialog.accept() if tx.error: self.show_message(tx.error) return if fee < tx.required_fee(self.wallet.verifier): QMessageBox.warning(self, _('Error'), _("This transaction requires a higher fee, or it will not be propagated by the network."), _('OK')) return if label: self.wallet.set_label(tx.hash(), label) if not tx.is_complete() or self.config.get('show_before_broadcast'): self.show_transaction(tx) return def broadcast_thread(): if self.gui_object.payment_request: print "sending ack" refund_address = self.wallet.addresses()[0] self.gui_object.payment_request.send_ack(str(tx), refund_address) self.gui_object.payment_request = None # note: BIP 70 recommends not broadcasting the tx to the network and letting the merchant do that self.tx_broadcast_result = self.wallet.sendtx(tx) self.emit(SIGNAL('send_tx3')) self.tx_broadcast_dialog = self.waiting_dialog('Broadcasting..') threading.Thread(target=broadcast_thread).start() def send_tx3(self): self.tx_broadcast_dialog.accept() status, msg = self.tx_broadcast_result if status: QMessageBox.information(self, '', _('Payment sent.') + '\n' + msg, _('OK')) self.do_clear() else: QMessageBox.warning(self, _('Error'), msg, _('OK')) def prepare_for_payment_request(self): style = "QWidget { background-color:none;border:none;}" self.tabs.setCurrentIndex(1) self.payto_e.setReadOnly(True) self.payto_e.setStyleSheet(style) self.amount_e.setReadOnly(True) self.payto_e.setText(_("please wait...")) self.amount_e.setStyleSheet(style) return True def payment_request_ok(self): self.payto_e.setText(self.gui_object.payment_request.domain) self.amount_e.setText(self.format_amount(self.gui_object.payment_request.get_amount())) def payment_request_error(self): self.payto_e.setText(self.gui_object.payment_request.error) def set_send(self, address, amount, label, message): if label and self.wallet.labels.get(address) != label: if self.question('Give label "%s" to address %s ?'%(label,address)): if address not in self.wallet.addressbook and not self.wallet.is_mine(address): self.wallet.addressbook.append(address) self.wallet.set_label(address, label) self.tabs.setCurrentIndex(1) label = self.wallet.labels.get(address) m_addr = label + ' <'+ address +'>' if label else address self.payto_e.setText(m_addr) self.message_e.setText(message) if amount: self.amount_e.setText(amount) def do_clear(self): self.payto_sig.setVisible(False) for e in [self.payto_e, self.message_e, self.amount_e, self.fee_e]: e.setText('') self.set_frozen(e,False) e.setStyleSheet("") self.set_pay_from([]) self.update_status() def set_frozen(self,entry,frozen): if frozen: entry.setReadOnly(True) entry.setFrame(False) palette = QPalette() palette.setColor(entry.backgroundRole(), QColor('lightgray')) entry.setPalette(palette) else: entry.setReadOnly(False) entry.setFrame(True) palette = QPalette() palette.setColor(entry.backgroundRole(), QColor('white')) entry.setPalette(palette) def set_addrs_frozen(self,addrs,freeze): for addr in addrs: if not addr: continue if addr in self.wallet.frozen_addresses and not freeze: self.wallet.unfreeze(addr) elif addr not in self.wallet.frozen_addresses and freeze: self.wallet.freeze(addr) self.update_receive_tab() def create_list_tab(self, headers): "generic tab creation method" l = MyTreeWidget(self) l.setColumnCount( len(headers) ) l.setHeaderLabels( headers ) w = QWidget() vbox = QVBoxLayout() w.setLayout(vbox) vbox.setMargin(0) vbox.setSpacing(0) vbox.addWidget(l) buttons = QWidget() vbox.addWidget(buttons) hbox = QHBoxLayout() hbox.setMargin(0) hbox.setSpacing(0) buttons.setLayout(hbox) return l,w,hbox def create_receive_tab(self): l,w,hbox = self.create_list_tab([ _('Address'), _('Label'), _('Balance'), _('Tx')]) l.setContextMenuPolicy(Qt.CustomContextMenu) l.customContextMenuRequested.connect(self.create_receive_menu) l.setSelectionMode(QAbstractItemView.ExtendedSelection) self.connect(l, SIGNAL('itemDoubleClicked(QTreeWidgetItem*, int)'), lambda a, b: self.address_label_clicked(a,b,l,0,1)) self.connect(l, SIGNAL('itemChanged(QTreeWidgetItem*, int)'), lambda a,b: self.address_label_changed(a,b,l,0,1)) self.connect(l, SIGNAL('currentItemChanged(QTreeWidgetItem*, QTreeWidgetItem*)'), lambda a,b: self.current_item_changed(a)) self.receive_list = l self.receive_buttons_hbox = hbox hbox.addStretch(1) return w def save_column_widths(self): self.column_widths["receive"] = [] for i in range(self.receive_list.columnCount() -1): self.column_widths["receive"].append(self.receive_list.columnWidth(i)) self.column_widths["history"] = [] for i in range(self.history_list.columnCount() - 1): self.column_widths["history"].append(self.history_list.columnWidth(i)) self.column_widths["contacts"] = [] for i in range(self.contacts_list.columnCount() - 1): self.column_widths["contacts"].append(self.contacts_list.columnWidth(i)) self.config.set_key("column_widths_2", self.column_widths, True) def create_contacts_tab(self): l,w,hbox = self.create_list_tab([_('Address'), _('Label'), _('Tx')]) l.setContextMenuPolicy(Qt.CustomContextMenu) l.customContextMenuRequested.connect(self.create_contact_menu) for i,width in enumerate(self.column_widths['contacts']): l.setColumnWidth(i, width) self.connect(l, SIGNAL('itemDoubleClicked(QTreeWidgetItem*, int)'), lambda a, b: self.address_label_clicked(a,b,l,0,1)) self.connect(l, SIGNAL('itemChanged(QTreeWidgetItem*, int)'), lambda a,b: self.address_label_changed(a,b,l,0,1)) self.contacts_list = l self.contacts_buttons_hbox = hbox hbox.addStretch(1) return w def delete_imported_key(self, addr): if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")): self.wallet.delete_imported_key(addr) self.update_receive_tab() self.update_history_tab() def edit_account_label(self, k): text, ok = QInputDialog.getText(self, _('Rename account'), _('Name') + ':', text = self.wallet.labels.get(k,'')) if ok: label = unicode(text) self.wallet.set_label(k,label) self.update_receive_tab() def account_set_expanded(self, item, k, b): item.setExpanded(b) self.accounts_expanded[k] = b def create_account_menu(self, position, k, item): menu = QMenu() if item.isExpanded(): menu.addAction(_("Minimize"), lambda: self.account_set_expanded(item, k, False)) else: menu.addAction(_("Maximize"), lambda: self.account_set_expanded(item, k, True)) menu.addAction(_("Rename"), lambda: self.edit_account_label(k)) if self.wallet.seed_version > 4: menu.addAction(_("View details"), lambda: self.show_account_details(k)) if self.wallet.account_is_pending(k): menu.addAction(_("Delete"), lambda: self.delete_pending_account(k)) menu.exec_(self.receive_list.viewport().mapToGlobal(position)) def delete_pending_account(self, k): self.wallet.delete_pending_account(k) self.update_receive_tab() def create_receive_menu(self, position): # fixme: this function apparently has a side effect. # if it is not called the menu pops up several times #self.receive_list.selectedIndexes() selected = self.receive_list.selectedItems() multi_select = len(selected) > 1 addrs = [unicode(item.text(0)) for item in selected] if not multi_select: item = self.receive_list.itemAt(position) if not item: return addr = addrs[0] if not is_valid(addr): k = str(item.data(0,32).toString()) if k: self.create_account_menu(position, k, item) else: item.setExpanded(not item.isExpanded()) return menu = QMenu() if not multi_select: menu.addAction(_("Copy to clipboard"), lambda: self.app.clipboard().setText(addr)) menu.addAction(_("QR code"), lambda: self.show_qrcode("viorcoin:" + addr, _("Address")) ) menu.addAction(_("Edit label"), lambda: self.edit_label(True)) menu.addAction(_("Public keys"), lambda: self.show_public_keys(addr)) if not self.wallet.is_watching_only(): menu.addAction(_("Private key"), lambda: self.show_private_key(addr)) menu.addAction(_("Sign/verify message"), lambda: self.sign_verify_message(addr)) #menu.addAction(_("Encrypt/decrypt message"), lambda: self.encrypt_message(addr)) if self.wallet.is_imported(addr): menu.addAction(_("Remove from wallet"), lambda: self.delete_imported_key(addr)) if any(addr not in self.wallet.frozen_addresses for addr in addrs): menu.addAction(_("Freeze"), lambda: self.set_addrs_frozen(addrs, True)) if any(addr in self.wallet.frozen_addresses for addr in addrs): menu.addAction(_("Unfreeze"), lambda: self.set_addrs_frozen(addrs, False)) if any(addr not in self.wallet.frozen_addresses for addr in addrs): menu.addAction(_("Send From"), lambda: self.send_from_addresses(addrs)) run_hook('receive_menu', menu, addrs) menu.exec_(self.receive_list.viewport().mapToGlobal(position)) def get_sendable_balance(self): return sum(sum(self.wallet.get_addr_balance(a)) for a in self.get_payment_sources()) def get_payment_sources(self): if self.pay_from: return self.pay_from else: return self.wallet.get_account_addresses(self.current_account) def send_from_addresses(self, addrs): self.set_pay_from( addrs ) self.tabs.setCurrentIndex(1) def payto(self, addr): if not addr: return label = self.wallet.labels.get(addr) m_addr = label + ' <' + addr + '>' if label else addr self.tabs.setCurrentIndex(1) self.payto_e.setText(m_addr) self.amount_e.setFocus() def delete_contact(self, x): if self.question(_("Do you want to remove")+" %s "%x +_("from your list of contacts?")): self.wallet.delete_contact(x) self.wallet.set_label(x, None) self.update_history_tab() self.update_contacts_tab() self.update_completions() def create_contact_menu(self, position): item = self.contacts_list.itemAt(position) menu = QMenu() if not item: menu.addAction(_("New contact"), lambda: self.new_contact_dialog()) else: addr = unicode(item.text(0)) label = unicode(item.text(1)) is_editable = item.data(0,32).toBool() payto_addr = item.data(0,33).toString() menu.addAction(_("Copy to Clipboard"), lambda: self.app.clipboard().setText(addr)) menu.addAction(_("Pay to"), lambda: self.payto(payto_addr)) menu.addAction(_("QR code"), lambda: self.show_qrcode("viorcoin:" + addr, _("Address"))) if is_editable: menu.addAction(_("Edit label"), lambda: self.edit_label(False)) menu.addAction(_("Delete"), lambda: self.delete_contact(addr)) run_hook('create_contact_menu', menu, item) menu.exec_(self.contacts_list.viewport().mapToGlobal(position)) def update_receive_item(self, item): item.setFont(0, QFont(MONOSPACE_FONT)) address = str(item.data(0,0).toString()) label = self.wallet.labels.get(address,'') item.setData(1,0,label) item.setData(0,32, True) # is editable run_hook('update_receive_item', address, item) if not self.wallet.is_mine(address): return c, u = self.wallet.get_addr_balance(address) balance = self.format_amount(c + u) item.setData(2,0,balance) if address in self.wallet.frozen_addresses: item.setBackgroundColor(0, QColor('lightblue')) def update_receive_tab(self): l = self.receive_list # extend the syntax for consistency l.addChild = l.addTopLevelItem l.clear() for i,width in enumerate(self.column_widths['receive']): l.setColumnWidth(i, width) accounts = self.wallet.get_accounts() if self.current_account is None: account_items = sorted(accounts.items()) else: account_items = [(self.current_account, accounts.get(self.current_account))] for k, account in account_items: if len(accounts) > 1: name = self.wallet.get_account_name(k) c,u = self.wallet.get_account_balance(k) account_item = QTreeWidgetItem( [ name, '', self.format_amount(c+u), ''] ) l.addTopLevelItem(account_item) account_item.setExpanded(self.accounts_expanded.get(k, True)) account_item.setData(0, 32, k) else: account_item = l sequences = [0,1] if account.has_change() else [0] for is_change in sequences: if len(sequences) > 1: name = _("Receiving") if not is_change else _("Change") seq_item = QTreeWidgetItem( [ name, '', '', '', ''] ) account_item.addChild(seq_item) if not is_change: seq_item.setExpanded(True) else: seq_item = account_item used_item = QTreeWidgetItem( [ _("Used"), '', '', '', ''] ) used_flag = False is_red = False gap = 0 for address in account.get_addresses(is_change): h = self.wallet.history.get(address,[]) if h == []: gap += 1 if gap > self.wallet.gap_limit: is_red = True else: gap = 0 c, u = self.wallet.get_addr_balance(address) num_tx = '*' if h == ['*'] else "%d"%len(h) item = QTreeWidgetItem( [ address, '', '', num_tx] ) self.update_receive_item(item) if is_red: item.setBackgroundColor(1, QColor('red')) if len(h) > 0 and c == -u: if not used_flag: seq_item.insertChild(0,used_item) used_flag = True used_item.addChild(item) else: seq_item.addChild(item) # we use column 1 because column 0 may be hidden l.setCurrentItem(l.topLevelItem(0),1) def update_contacts_tab(self): l = self.contacts_list l.clear() for address in self.wallet.addressbook: label = self.wallet.labels.get(address,'') n = self.wallet.get_num_tx(address) item = QTreeWidgetItem( [ address, label, "%d"%n] ) item.setFont(0, QFont(MONOSPACE_FONT)) # 32 = label can be edited (bool) item.setData(0,32, True) # 33 = payto string item.setData(0,33, address) l.addTopLevelItem(item) run_hook('update_contacts_tab', l) l.setCurrentItem(l.topLevelItem(0)) def create_console_tab(self): from console import Console self.console = console = Console() return console def update_console(self): console = self.console console.history = self.config.get("console-history",[]) console.history_index = len(console.history) console.updateNamespace({'wallet' : self.wallet, 'network' : self.network, 'gui':self}) console.updateNamespace({'util' : util, 'bitcoin':bitcoin}) c = commands.Commands(self.wallet, self.network, lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args: apply( f, (method, args, self.password_dialog )) for m in dir(c): if m[0]=='_' or m in ['network','wallet']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def change_account(self,s): if s == _("All accounts"): self.current_account = None else: accounts = self.wallet.get_account_names() for k, v in accounts.items(): if v == s: self.current_account = k self.update_history_tab() self.update_status() self.update_receive_tab() def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) qtVersion = qVersion() self.balance_label = QLabel("") sb.addWidget(self.balance_label) from version_getter import UpdateLabel self.updatelabel = UpdateLabel(self.config, sb) self.account_selector = QComboBox() self.account_selector.setSizeAdjustPolicy(QComboBox.AdjustToContents) self.connect(self.account_selector,SIGNAL("activated(QString)"),self.change_account) sb.addPermanentWidget(self.account_selector) #if (int(qtVersion[0]) >= 4 and int(qtVersion[2]) >= 7): # sb.addPermanentWidget( StatusBarButton( QIcon(":icons/switchgui.png"), _("Switch to Lite Mode"), self.go_lite ) ) self.lock_icon = QIcon() self.password_button = StatusBarButton( self.lock_icon, _("Password"), self.change_password_dialog ) sb.addPermanentWidget( self.password_button ) sb.addPermanentWidget( StatusBarButton( QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) ) self.seed_button = StatusBarButton( QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog ) sb.addPermanentWidget( self.seed_button ) self.status_button = StatusBarButton( QIcon(":icons/status_disconnected.png"), _("Network"), self.run_network_dialog ) sb.addPermanentWidget( self.status_button ) run_hook('create_status_bar', (sb,)) self.setStatusBar(sb) def update_lock_icon(self): icon = QIcon(":icons/lock.png") if self.wallet.use_encryption else QIcon(":icons/unlock.png") self.password_button.setIcon( icon ) def update_buttons_on_seed(self): if self.wallet.has_seed(): self.seed_button.show() else: self.seed_button.hide() if not self.wallet.is_watching_only(): self.password_button.show() self.send_button.setText(_("Send")) else: self.password_button.hide() self.send_button.setText(_("Create unsigned transaction")) def change_password_dialog(self): from password_dialog import PasswordDialog d = PasswordDialog(self.wallet, self) d.run() self.update_lock_icon() def new_contact_dialog(self): d = QDialog(self) d.setWindowTitle(_("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact')+':')) grid = QGridLayout() line1 = QLineEdit() line2 = QLineEdit() grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(ok_cancel_buttons(d)) if not d.exec_(): return address = str(line1.text()) label = unicode(line2.text()) if not is_valid(address): QMessageBox.warning(self, _('Error'), _('Invalid Address'), _('OK')) return self.wallet.add_contact(address) if label: self.wallet.set_label(address, label) self.update_contacts_tab() self.update_history_tab() self.update_completions() self.tabs.setCurrentIndex(3) @protected def new_account_dialog(self, password): dialog = QDialog(self) dialog.setModal(1) dialog.setWindowTitle(_("New Account")) vbox = QVBoxLayout() vbox.addWidget(QLabel(_('Account name')+':')) e = QLineEdit() vbox.addWidget(e) msg = _("Note: Newly created accounts are 'pending' until they receive viorcoins.") + " " \ + _("You will need to wait for 2 confirmations until the correct balance is displayed and more addresses are created for that account.") l = QLabel(msg) l.setWordWrap(True) vbox.addWidget(l) vbox.addLayout(ok_cancel_buttons(dialog)) dialog.setLayout(vbox) r = dialog.exec_() if not r: return name = str(e.text()) if not name: return self.wallet.create_pending_account(name, password) self.update_receive_tab() self.tabs.setCurrentIndex(2) def show_master_public_keys(self): dialog = QDialog(self) dialog.setModal(1) dialog.setWindowTitle(_("Master Public Keys")) main_layout = QGridLayout() mpk_dict = self.wallet.get_master_public_keys() i = 0 for key, value in mpk_dict.items(): main_layout.addWidget(QLabel(key), i, 0) mpk_text = QTextEdit() mpk_text.setReadOnly(True) mpk_text.setMaximumHeight(170) mpk_text.setText(value) main_layout.addWidget(mpk_text, i + 1, 0) i += 2 vbox = QVBoxLayout() vbox.addLayout(main_layout) vbox.addLayout(close_button(dialog)) dialog.setLayout(vbox) dialog.exec_() @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): QMessageBox.information(self, _('Message'), _('This wallet has no seed'), _('OK')) return try: mnemonic = self.wallet.get_mnemonic(password) except Exception: QMessageBox.warning(self, _('Error'), _('Incorrect Password'), _('OK')) return from seed_dialog import SeedDialog d = SeedDialog(self, mnemonic, self.wallet.imported_keys) d.exec_() def show_qrcode(self, data, title = _("QR code")): if not data: return d = QDialog(self) d.setModal(1) d.setWindowTitle(title) d.setMinimumSize(270, 300) vbox = QVBoxLayout() qrw = QRCodeWidget(data) vbox.addWidget(qrw, 1) vbox.addWidget(QLabel(data), 0, Qt.AlignHCenter) hbox = QHBoxLayout() hbox.addStretch(1) filename = os.path.join(self.config.path, "qrcode.bmp") def print_qr(): bmp.save_qrcode(qrw.qr, filename) QMessageBox.information(None, _('Message'), _("QR code saved to file") + " " + filename, _('OK')) def copy_to_clipboard(): bmp.save_qrcode(qrw.qr, filename) self.app.clipboard().setImage(QImage(filename)) QMessageBox.information(None, _('Message'), _("QR code saved to clipboard"), _('OK')) b = QPushButton(_("Copy")) hbox.addWidget(b) b.clicked.connect(copy_to_clipboard) b = QPushButton(_("Save")) hbox.addWidget(b) b.clicked.connect(print_qr) b = QPushButton(_("Close")) hbox.addWidget(b) b.clicked.connect(d.accept) b.setDefault(True) vbox.addLayout(hbox) d.setLayout(vbox) d.exec_() def do_protect(self, func, args): if self.wallet.use_encryption: password = self.password_dialog() if not password: return else: password = None if args != (False,): args = (self,) + args + (password,) else: args = (self,password) apply( func, args) def show_public_keys(self, address): if not address: return try: pubkey_list = self.wallet.get_public_keys(address) except Exception as e: traceback.print_exc(file=sys.stdout) self.show_message(str(e)) return d = QDialog(self) d.setMinimumSize(600, 200) d.setModal(1) vbox = QVBoxLayout() vbox.addWidget( QLabel(_("Address") + ': ' + address)) vbox.addWidget( QLabel(_("Public key") + ':')) keys = QTextEdit() keys.setReadOnly(True) keys.setText('\n'.join(pubkey_list)) vbox.addWidget(keys) #vbox.addWidget( QRCodeWidget('\n'.join(pk_list)) ) vbox.addLayout(close_button(d)) d.setLayout(vbox) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk_list = self.wallet.get_private_key(address, password) except Exception as e: traceback.print_exc(file=sys.stdout) self.show_message(str(e)) return d = QDialog(self) d.setMinimumSize(600, 200) d.setModal(1) vbox = QVBoxLayout() vbox.addWidget( QLabel(_("Address") + ': ' + address)) vbox.addWidget( QLabel(_("Private key") + ':')) keys = QTextEdit() keys.setReadOnly(True) keys.setText('\n'.join(pk_list)) vbox.addWidget(keys) vbox.addWidget( QRCodeWidget('\n'.join(pk_list)) ) vbox.addLayout(close_button(d)) d.setLayout(vbox) d.exec_() @protected def do_sign(self, address, message, signature, password): message = unicode(message.toPlainText()) message = message.encode('utf-8') try: sig = self.wallet.sign_message(str(address.text()), message, password) signature.setText(sig) except Exception as e: self.show_message(str(e)) def do_verify(self, address, message, signature): message = unicode(message.toPlainText()) message = message.encode('utf-8') if bitcoin.verify_message(address.text(), str(signature.toPlainText()), message): self.show_message(_("Signature verified")) else: self.show_message(_("Error: wrong signature")) def sign_verify_message(self, address=''): d = QDialog(self) d.setModal(1) d.setWindowTitle(_('Sign/verify Message')) d.setMinimumSize(410, 290) layout = QGridLayout(d) message_e = QTextEdit() layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): try: decrypted = self.wallet.decrypt_message(str(pubkey_e.text()), str(encrypted_e.toPlainText()), password) message_e.setText(decrypted) except Exception as e: self.show_message(str(e)) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = unicode(message_e.toPlainText()) message = message.encode('utf-8') try: encrypted = bitcoin.encrypt_message(message, str(pubkey_e.text())) encrypted_e.setText(encrypted) except Exception as e: self.show_message(str(e)) def encrypt_message(self, address = ''): d = QDialog(self) d.setModal(1) d.setWindowTitle(_('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.getpubkeys(address)[0] pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def question(self, msg): return QMessageBox.question(self, _('Message'), msg, QMessageBox.Yes | QMessageBox.No, QMessageBox.No) == QMessageBox.Yes def show_message(self, msg): QMessageBox.information(self, _('Message'), msg, _('OK')) def password_dialog(self ): d = QDialog(self) d.setModal(1) d.setWindowTitle(_("Enter Password")) pw = QLineEdit() pw.setEchoMode(2) vbox = QVBoxLayout() msg = _('Please enter your password') vbox.addWidget(QLabel(msg)) grid = QGridLayout() grid.setSpacing(8) grid.addWidget(QLabel(_('Password')), 1, 0) grid.addWidget(pw, 1, 1) vbox.addLayout(grid) vbox.addLayout(ok_cancel_buttons(d)) d.setLayout(vbox) run_hook('password_dialog', pw, grid, 1) if not d.exec_(): return return unicode(pw.text()) def tx_from_text(self, txt): "json or raw hexadecimal" try: txt.decode('hex') tx = Transaction(txt) return tx except Exception: pass try: tx_dict = json.loads(str(txt)) assert "hex" in tx_dict.keys() tx = Transaction(tx_dict["hex"]) if tx_dict.has_key("input_info"): input_info = json.loads(tx_dict['input_info']) tx.add_input_info(input_info) return tx except Exception: traceback.print_exc(file=sys.stdout) pass QMessageBox.critical(None, _("Unable to parse transaction"), _("Electrum was unable to parse your transaction")) def read_tx_from_file(self): fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn") if not fileName: return try: with open(fileName, "r") as f: file_content = f.read() except (ValueError, IOError, os.error), reason: QMessageBox.critical(None, _("Unable to read file or no transaction found"), _("Electrum was unable to open your transaction file") + "\n" + str(reason)) return self.tx_from_text(file_content) @protected def sign_raw_transaction(self, tx, input_info, password): self.wallet.signrawtransaction(tx, input_info, [], password) def do_process_from_text(self): text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction")) if not text: return tx = self.tx_from_text(text) if tx: self.show_transaction(tx) def do_process_from_file(self): tx = self.read_tx_from_file() if tx: self.show_transaction(tx) def do_process_from_txid(self): from electrum_vior import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: r = self.network.synchronous_get([ ('blockchain.transaction.get',[str(txid)]) ])[0] if r: tx = transaction.Transaction(r) if tx: self.show_transaction(tx) else: self.show_message("unknown transaction") def do_process_from_csvReader(self, csvReader): outputs = [] errors = [] errtext = "" try: for position, row in enumerate(csvReader): address = row[0] if not is_valid(address): errors.append((position, address)) continue amount = Decimal(row[1]) amount = int(100000000*amount) outputs.append((address, amount)) except (ValueError, IOError, os.error), reason: QMessageBox.critical(None, _("Unable to read file or no transaction found"), _("Electrum was unable to open your transaction file") + "\n" + str(reason)) return if errors != []: for x in errors: errtext += "CSV Row " + str(x[0]+1) + ": " + x[1] + "\n" QMessageBox.critical(None, _("Invalid Addresses"), _("ABORTING! Invalid Addresses found:") + "\n\n" + errtext) return try: tx = self.wallet.make_unsigned_transaction(outputs, None, None) except Exception as e: self.show_message(str(e)) return self.show_transaction(tx) def do_process_from_csv_file(self): fileName = self.getOpenFileName(_("Select your transaction CSV"), "*.csv") if not fileName: return try: with open(fileName, "r") as f: csvReader = csv.reader(f) self.do_process_from_csvReader(csvReader) except (ValueError, IOError, os.error), reason: QMessageBox.critical(None, _("Unable to read file or no transaction found"), _("Electrum was unable to open your transaction file") + "\n" + str(reason)) return def do_process_from_csv_text(self): text = text_dialog(self, _('Input CSV'), _("Please enter a list of outputs.") + '\n' \ + _("Format: address, amount. One output per line"), _("Load CSV")) if not text: return f = StringIO.StringIO(text) csvReader = csv.reader(f) self.do_process_from_csvReader(csvReader) @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return d = QDialog(self) d.setWindowTitle(_('Private keys')) d.setMinimumSize(850, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-vior-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) h, b = ok_cancel_buttons2(d, _('Export')) b.setEnabled(False) vbox.addLayout(h) private_keys = {} addresses = self.wallet.addresses(True) done = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done: break private_keys[addr] = "\n".join(self.wallet.get_private_key(addr, password)) d.emit(SIGNAL('computing_privkeys')) d.emit(SIGNAL('show_privkeys')) def show_privkeys(): s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) d.connect(d, QtCore.SIGNAL('computing_privkeys'), lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) d.connect(d, QtCore.SIGNAL('show_privkeys'), show_privkeys) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error), reason: export_error_label = _("Electrum was unable to produce a private key-export.") QMessageBox.critical(None, _("Unable to create csv"), export_error_label + "\n" + str(reason)) except Exception as e: self.show_message(str(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: import json f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): labelsFile = self.getOpenFileName(_("Open labels file"), "*.dat") if not labelsFile: return try: f = open(labelsFile, 'r') data = f.read() f.close() for key, value in json.loads(data).items(): self.wallet.set_label(key, value) QMessageBox.information(None, _("Labels imported"), _("Your labels were imported from")+" '%s'" % str(labelsFile)) except (IOError, os.error), reason: QMessageBox.critical(None, _("Unable to import labels"), _("Electrum was unable to import your labels.")+"\n" + str(reason)) def do_export_labels(self): labels = self.wallet.labels try: fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum-vior_labels.dat', "*.dat") if fileName: with open(fileName, 'w+') as f: json.dump(labels, f) QMessageBox.information(None, _("Labels exported"), _("Your labels where exported to")+" '%s'" % str(fileName)) except (IOError, os.error), reason: QMessageBox.critical(None, _("Unable to export labels"), _("Electrum was unable to export your labels.")+"\n" + str(reason)) def export_history_dialog(self): d = QDialog(self) d.setWindowTitle(_('Export History')) d.setMinimumSize(400, 200) vbox = QVBoxLayout(d) defaultname = os.path.expanduser('~/electrum-vior-history.csv') select_msg = _('Select file to export your wallet transactions to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) vbox.addStretch(1) h, b = ok_cancel_buttons2(d, _('Export')) vbox.addLayout(h) if not d.exec_(): return filename = filename_e.text() if not filename: return try: self.do_export_history(self.wallet, filename, csv_button.isChecked()) except (IOError, os.error), reason: export_error_label = _("Electrum was unable to produce a transaction export.") QMessageBox.critical(self, _("Unable to export history"), export_error_label + "\n" + str(reason)) return QMessageBox.information(self,_("History exported"), _("Your wallet history has been successfully exported.")) def do_export_history(self, wallet, fileName, is_csv): history = wallet.get_tx_history() lines = [] for item in history: tx_hash, confirmations, is_mine, value, fee, balance, timestamp = item if confirmations: if timestamp is not None: try: time_string = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3] except [RuntimeError, TypeError, NameError] as reason: time_string = "unknown" pass else: time_string = "unknown" else: time_string = "pending" if value is not None: value_string = format_satoshis(value, True) else: value_string = '--' if fee is not None: fee_string = format_satoshis(fee, True) else: fee_string = '0' if tx_hash: label, is_default_label = wallet.get_label(tx_hash) label = label.encode('utf-8') else: label = "" balance_string = format_satoshis(balance, False) if is_csv: lines.append([tx_hash, label, confirmations, value_string, fee_string, balance_string, time_string]) else: lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string}) with open(fileName, "w+") as f: if is_csv: transaction = csv.writer(f) transaction.writerow(["transaction_hash","label", "confirmations", "value", "fee", "balance", "timestamp"]) for line in lines: transaction.writerow(line) else: import json f.write(json.dumps(lines, indent = 4)) def sweep_key_dialog(self): d = QDialog(self) d.setWindowTitle(_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_("Enter private keys"))) keys_e = QTextEdit() keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) h, address_e = address_field(self.wallet.addresses()) vbox.addLayout(h) vbox.addStretch(1) hbox, button = ok_cancel_buttons2(d, _('Sweep')) vbox.addLayout(hbox) button.setEnabled(False) def get_address(): addr = str(address_e.text()) if bitcoin.is_address(addr): return addr def get_pk(): pk = str(keys_e.toPlainText()).strip() if Wallet.is_private_key(pk): return pk.split() f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None) keys_e.textChanged.connect(f) address_e.textChanged.connect(f) if not d.exec_(): return fee = self.wallet.fee tx = Transaction.sweep(get_pk(), self.network, get_address(), fee) self.show_transaction(tx) @protected def do_import_privkey(self, password): if not self.wallet.imported_keys: r = QMessageBox.question(None, _('Warning'), '<b>'+_('Warning') +':\n</b><br/>'+ _('Imported keys are not recoverable from seed.') + ' ' \ + _('If you ever need to restore your wallet from its seed, these keys will be lost.') + '<p>' \ + _('Are you sure you understand what you are doing?'), 3, 4) if r == 4: return text = text_dialog(self, _('Import private keys'), _("Enter private keys")+':', _("Import")) if not text: return text = str(text).split() badkeys = [] addrlist = [] for key in text: try: addr = self.wallet.import_key(key, password) except Exception as e: badkeys.append(key) continue if not addr: badkeys.append(key) else: addrlist.append(addr) if addrlist: QMessageBox.information(self, _('Information'), _("The following addresses were added") + ':\n' + '\n'.join(addrlist)) if badkeys: QMessageBox.critical(self, _('Error'), _("The following inputs could not be imported") + ':\n'+ '\n'.join(badkeys)) self.update_receive_tab() self.update_history_tab() def settings_dialog(self): d = QDialog(self) d.setWindowTitle(_('Electrum Settings')) d.setModal(1) vbox = QVBoxLayout() grid = QGridLayout() grid.setColumnStretch(0,1) nz_label = QLabel(_('Display zeros') + ':') grid.addWidget(nz_label, 0, 0) nz_e = AmountEdit(None,True) nz_e.setText("%d"% self.num_zeros) grid.addWidget(nz_e, 0, 1) msg = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"') grid.addWidget(HelpButton(msg), 0, 2) if not self.config.is_modifiable('num_zeros'): for w in [nz_e, nz_label]: w.setEnabled(False) lang_label=QLabel(_('Language') + ':') grid.addWidget(lang_label, 1, 0) lang_combo = QComboBox() from electrum_vior.i18n import languages lang_combo.addItems(languages.values()) try: index = languages.keys().index(self.config.get("language",'')) except Exception: index = 0 lang_combo.setCurrentIndex(index) grid.addWidget(lang_combo, 1, 1) grid.addWidget(HelpButton(_('Select which language is used in the GUI (after restart).')+' '), 1, 2) if not self.config.is_modifiable('language'): for w in [lang_combo, lang_label]: w.setEnabled(False) fee_label = QLabel(_('Transaction fee') + ':') grid.addWidget(fee_label, 2, 0) fee_e = AmountEdit(self.base_unit) fee_e.setText(self.format_amount(self.wallet.fee).strip()) grid.addWidget(fee_e, 2, 1) msg = _('Fee per kilobyte of transaction.') + ' ' \ + _('Recommended value') + ': ' + self.format_amount(100000) grid.addWidget(HelpButton(msg), 2, 2) if not self.config.is_modifiable('fee_per_kb'): for w in [fee_e, fee_label]: w.setEnabled(False) units = ['VIOR', 'mVIOR'] unit_label = QLabel(_('Base unit') + ':') grid.addWidget(unit_label, 3, 0) unit_combo = QComboBox() unit_combo.addItems(units) unit_combo.setCurrentIndex(units.index(self.base_unit())) grid.addWidget(unit_combo, 3, 1) grid.addWidget(HelpButton(_('Base unit of your wallet.')\ + '\n1VIOR=1000mVIOR.\n' \ + _(' These settings affects the fields in the Send tab')+' '), 3, 2) usechange_cb = QCheckBox(_('Use change addresses')) usechange_cb.setChecked(self.wallet.use_change) grid.addWidget(usechange_cb, 4, 0) grid.addWidget(HelpButton(_('Using change addresses makes it more difficult for other people to track your transactions.')+' '), 4, 2) if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False) block_explorers = ['explorer.viorcoin.net', 'block-explorer.com', 'Blockr.io'] block_ex_label = QLabel(_('Online Block Explorer') + ':') grid.addWidget(block_ex_label, 5, 0) block_ex_combo = QComboBox() block_ex_combo.addItems(block_explorers) block_ex_combo.setCurrentIndex(block_explorers.index(self.config.get('block_explorer', 'explorer.viorcoin.net'))) grid.addWidget(block_ex_combo, 5, 1) grid.addWidget(HelpButton(_('Choose which online block explorer to use for functions that open a web browser')+' '), 5, 2) show_tx = self.config.get('show_before_broadcast', False) showtx_cb = QCheckBox(_('Show before broadcast')) showtx_cb.setChecked(show_tx) grid.addWidget(showtx_cb, 6, 0) grid.addWidget(HelpButton(_('Display the details of your transactions before broadcasting it.')), 6, 2) vbox.addLayout(grid) vbox.addStretch(1) vbox.addLayout(ok_cancel_buttons(d)) d.setLayout(vbox) # run the dialog if not d.exec_(): return fee = unicode(fee_e.text()) try: fee = self.read_amount(fee) except Exception: QMessageBox.warning(self, _('Error'), _('Invalid value') +': %s'%fee, _('OK')) return self.wallet.set_fee(fee) nz = unicode(nz_e.text()) try: nz = int( nz ) if nz>8: nz=8 except Exception: QMessageBox.warning(self, _('Error'), _('Invalid value')+':%s'%nz, _('OK')) return if self.num_zeros != nz: self.num_zeros = nz self.config.set_key('num_zeros', nz, True) self.update_history_tab() self.update_receive_tab() usechange_result = usechange_cb.isChecked() if self.wallet.use_change != usechange_result: self.wallet.use_change = usechange_result self.wallet.storage.put('use_change', self.wallet.use_change) if showtx_cb.isChecked() != show_tx: self.config.set_key('show_before_broadcast', not show_tx) unit_result = units[unit_combo.currentIndex()] if self.base_unit() != unit_result: self.decimal_point = 8 if unit_result == 'VIOR' else 5 self.config.set_key('decimal_point', self.decimal_point, True) self.update_history_tab() self.update_status() need_restart = False lang_request = languages.keys()[lang_combo.currentIndex()] if lang_request != self.config.get('language'): self.config.set_key("language", lang_request, True) need_restart = True be_result = block_explorers[block_ex_combo.currentIndex()] self.config.set_key('block_explorer', be_result, True) run_hook('close_settings_dialog') if need_restart: QMessageBox.warning(self, _('Success'), _('Please restart Electrum to activate the new GUI settings'), _('OK')) def run_network_dialog(self): if not self.network: return NetworkDialog(self.wallet.network, self.config, self).do_exec() def closeEvent(self, event): self.tray.hide() self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.config.set_key("winpos-qt", [g.left(),g.top(),g.width(),g.height()]) self.save_column_widths() self.config.set_key("console-history", self.console.history[-50:], True) self.wallet.storage.put('accounts_expanded', self.accounts_expanded) event.accept() def plugins_dialog(self): from electrum_vior.plugins import plugins d = QDialog(self) d.setWindowTitle(_('Electrum Plugins')) d.setModal(1) vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(len(plugins)*35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) def do_toggle(cb, p, w): r = p.toggle() cb.setChecked(r) if w: w.setEnabled(r) def mk_toggle(cb, p, w): return lambda: do_toggle(cb,p,w) for i, p in enumerate(plugins): try: cb = QCheckBox(p.fullname()) cb.setDisabled(not p.is_available()) cb.setChecked(p.is_enabled()) grid.addWidget(cb, i, 0) if p.requires_settings(): w = p.settings_widget(self) w.setEnabled( p.is_enabled() ) grid.addWidget(w, i, 1) else: w = None cb.clicked.connect(mk_toggle(cb,p,w)) grid.addWidget(HelpButton(p.description()), i, 2) except Exception: print_msg(_("Error: cannot display plugin"), p) traceback.print_exc(file=sys.stdout) grid.setRowStretch(i+1,1) vbox.addLayout(close_button(d)) d.exec_() def show_account_details(self, k): account = self.wallet.accounts[k] d = QDialog(self) d.setWindowTitle(_('Account Details')) d.setModal(1) vbox = QVBoxLayout(d) name = self.wallet.get_account_name(k) label = QLabel('Name: ' + name) vbox.addWidget(label) vbox.addWidget(QLabel(_('Address type') + ': ' + account.get_type())) vbox.addWidget(QLabel(_('Derivation') + ': ' + k)) vbox.addWidget(QLabel(_('Master Public Key:'))) text = QTextEdit() text.setReadOnly(True) text.setMaximumHeight(170) vbox.addWidget(text) mpk_text = '\n'.join( account.get_master_pubkeys() ) text.setText(mpk_text) vbox.addLayout(close_button(d)) d.exec_()
You are now officially more than 1000 to follow this blog. So I played a little game. I went on my Facebook and I asked you gyus to play the Shabadabada game, in which I give you a word and each team must find as many songs with said word in the lyrics… I gave you mille/thousand, and you went crazy! Happy thousand to us and an excellent week to you! ← “Bleu” – Montreal Jazz Choir Concert this Thursday in Montreal!
import sys import os import ctypes from ctypes import PyDLL, c_char_p, c_int from os.path import split, abspath, join from glob import glob from itertools import chain def set_windows_dll_path(): """ Sets the dll load path so that things are resolved correctly. """ # Back up to the directory, then to the base directory as this is # in ./_scripts. lib_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) def errcheck_bool(result, func, args): if not result: last_error = ctypes.get_last_error() if last_error != 0: raise ctypes.WinError(last_error) else: raise OSError return args import ctypes.wintypes as wintypes # Also need to set the dll loading directory to the main # folder so windows attempts to load all DLLs from this # directory. try: kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) kernel32.SetDllDirectoryW.errcheck = errcheck_bool kernel32.SetDllDirectoryW.argtypes = (wintypes.LPCWSTR,) kernel32.SetDllDirectoryW(lib_path) except Exception, e: sys.stderr.write("Error setting DLL load orders: %s (things may still work).\n" % str(e)) sys.stderr.flush() if __name__ == "__main__": if len(sys.argv) == 1: dry_run = True else: dry_run = False if dry_run or os.environ.get("GRAPHLAB_LAMBDA_WORKER_DRY_RUN") == "1": _write_out = sys.stderr else: _write_out = sys.stdout _write_out_file_name = os.environ.get("GRAPHLAB_LAMBDA_WORKER_LOG_FILE", "") _write_out_file = None def _write_log(s, error = False): s = s + "\n" if error: sys.stderr.write(s) sys.stderr.flush() else: _write_out.write(s) _write_out.flush() if _write_out_file is not None: _write_out_file.write(s) _write_out_file.flush() if _write_out_file_name != "": # Set this to an absolute location to make things worthwhile _write_out_file_name = abspath(_write_out_file_name) os.environ["GRAPHLAB_LAMBDA_WORKER_LOG_FILE"] = _write_out_file_name _write_out_file_name = _write_out_file_name + "-init" _write_log("Logging initialization routines to %s." % _write_out_file_name) try: _write_out_file = open(_write_out_file_name, "w") except Exception, e: _write_log("Error opening '%s' for write: %s" % (_write_out_file_name, repr(e))) _write_out_file = None if dry_run: print "PyLambda script called with no IPC information; entering diagnostic mode." script_path = abspath(sys.modules[__name__].__file__) main_dir = split(split(script_path)[0])[0] _write_log("Script directory: %s." % script_path) _write_log("Main program directory: %s." % main_dir) for s in sys.argv: _write_log("Lambda worker args: \n %s" % ("\n ".join(sys.argv))) # Handle the different library type extensions pylamda_worker_search_string = join(main_dir, "libpylambda_worker.*") _write_log("Lambda worker search pattern: %s\n" % pylamda_worker_search_string) pylambda_workers = glob(join(main_dir, "libpylambda_worker.*")) _write_log("Found %d candidade pylambda_worker file(s): \n %s." % (len(pylambda_workers), "\n ".join(pylambda_workers))) if len(pylambda_workers) > 1: _write_log("WARNING: multiple pylambda worker libraries.") if len(pylambda_workers) == 0: _write_log("ERROR: Cannot find pylambda_worker extension library.", error = True) sys.exit(202) _write_log("INFO: Loading pylambda worker library: %s." % pylambda_workers[0]) # Set the dll load path if we are on windows if sys.platform == 'win32': set_windows_dll_path() try: pylambda_lib = PyDLL(pylambda_workers[0]) except Exception, e: _write_log("Error loading lambda library %s: %s" % (pylambda_workers[0], repr(e)), error = True) sys.exit(203) try: pylambda_lib.pylambda_worker_main.argtypes = [c_char_p, c_char_p] pylambda_lib.pylambda_worker_main.restype = c_int except Exception, e: _write_log("Error accessing pylambda_worker_main: %s\n" % repr(e), error = True) sys.exit(204) if not dry_run: # This call only returns after the parent process is done. result = pylambda_lib.pylambda_worker_main(c_char_p(main_dir), c_char_p(sys.argv[1])) else: # This version will print out a bunch of diagnostic information and then exit. result = pylambda_lib.pylambda_worker_main(c_char_p(main_dir), c_char_p("debug")) _write_log("Lambda process exited with code %d." % result) sys.exit(0)
While in New York with Link to perform a surgery on an 18-year-old boy named Jonah, Amelia ran into her terrible sister, Nancy, and regretfully agreed to have dinner at her house. The April 11 episode of Grey’s Anatomy turned into a full-fledged family reunion, when Amelia agreed to have dinner at her awful sister, Nancy Shepherd’s (Matilda‘s Embeth Davitz), house. And once she arrived, she got ambushed by her other sister, Kathleen (Angel‘s Amy Acker), as well as her mother, Carolyn (Tyne Daly), so things went from bad to worse super quickly. Oh, and did we mention that Amelia pretended Link was actually Owen and that they were still married? Yeah, that happened, too. Anyway, Link went along with Amelia’s crazy charade — he even went to bat for her when her sisters kept putting her down — and they probably would have gotten away with the whole thing if Amelia’s mom, who already met the real Owen Hunt in Season 5, hadn’t shown up. After Nancy and Kathleen realized that Amelia had lied to them, Nancy said, “Fake Owen” might just be “the most demented thing [Amelia’s] ever done.” And after that crude insult, Amelia immediately stood up and tried defending herself, saying, “You don’t know me. You have not seen me in years. I am trapped in your mind and on your stairs at 14 years old. I am sober, I am responsible, and I am a neurosurgeon at the top of my field.” Link also tried to help, but he only made things worse when he accidentally mentioned Betty and Leo. In turn, Amelia and Link quickly left Nancy’s house. Amelia also ended her non-relationship with Link, since he know probably knew too much about her for their romance to remain uncomplicated. Afterwards, Amelia apologized to Link with some of his favorite donuts, thanked him for going to dinner with her, and said, “You are more than a human blue light, though you’re very, very good at blue-lighting.” So what does this mean for Amelia and Link moving forward? Well, it doesn’t sound like they’re “broken up” anymore. Could this be the start of another longterm romance for Amelia? We can only hope! Side note: Can we say how awesome it would have been if Derek’s fourth sister, Liz (Neve Campbell), also made an appearance in this episode? Talk about a missed opportunity! Want more? New episodes of Grey’s Anatomy air Thursdays at 8pm on ABC!
# coding: utf-8 import os import ntpath import posixpath import shutil from mock import patch from filebrowser.base import FileObject, FileListing from filebrowser.sites import site from filebrowser.settings import VERSIONS from tests import FilebrowserTestCase as TestCase class FileObjectPathTests(TestCase): def setUp(self): super(FileObjectPathTests, self).setUp() shutil.copy(self.STATIC_IMG_PATH, self.FOLDER_PATH) @patch('filebrowser.base.os.path', ntpath) def test_windows_paths(self): """ Use ntpath to test windows paths independently from current os """ f = FileObject('_test\\uploads\\folder\\testfile.jpg', site=site) self.assertEqual(f.path_relative_directory, 'folder\\testfile.jpg') self.assertEqual(f.dirname, r'folder') @patch('filebrowser.base.os.path', posixpath) def test_posix_paths(self): """ Use posixpath to test posix paths independently from current os """ f = FileObject('_test/uploads/folder/testfile.jpg', site=site) self.assertEqual(f.path_relative_directory, 'folder/testfile.jpg') self.assertEqual(f.dirname, r'folder') class FileObjectUnicodeTests(TestCase): def setUp(self): super(FileObjectUnicodeTests, self).setUp() shutil.copy(self.STATIC_IMG_PATH, self.FOLDER_PATH) @patch('filebrowser.base.os.path', ntpath) def test_windows_paths(self): """ Use ntpath to test windows paths independently from current os """ f = FileObject('_test\\uploads\\$%^&*\\測試文件.jpg', site=site) self.assertEqual(f.path_relative_directory, '$%^&*\\測試文件.jpg') self.assertEqual(f.dirname, r'$%^&*') @patch('filebrowser.base.os.path', posixpath) def test_posix_paths(self): """ Use posixpath to test posix paths independently from current os """ f = FileObject('_test/uploads/$%^&*/測試文件.jpg', site=site) self.assertEqual(f.path_relative_directory, '$%^&*/測試文件.jpg') self.assertEqual(f.dirname, r'$%^&*') @patch('filebrowser.base.os.path', posixpath) @patch('filebrowser.namers.VERSION_NAMER', 'filebrowser.namers.OptionsNamer') def test_unicode_options_namer_version(self): path_unicode = os.path.join(self.FOLDER_PATH, '測試文件.jpg') expected = u'測試文件_large--680x0.jpg' shutil.copy(self.STATIC_IMG_PATH, path_unicode) f = FileObject(path_unicode, site=site) version = f.version_generate('large') self.assertEqual(version.filename, expected) class FileObjectAttributeTests(TestCase): def setUp(self): super(FileObjectAttributeTests, self).setUp() shutil.copy(self.STATIC_IMG_PATH, self.FOLDER_PATH) def test_init_attributes(self): """ FileObject init attributes # path # head # filename # filename_lower # filename_root # extension # mimetype """ self.assertEqual(self.F_IMAGE.path, "_test/uploads/folder/testimage.jpg") self.assertEqual(self.F_IMAGE.head, '_test/uploads/folder') self.assertEqual(self.F_IMAGE.filename, 'testimage.jpg') self.assertEqual(self.F_IMAGE.filename_lower, 'testimage.jpg') self.assertEqual(self.F_IMAGE.filename_root, 'testimage') self.assertEqual(self.F_IMAGE.extension, '.jpg') self.assertEqual(self.F_IMAGE.mimetype, ('image/jpeg', None)) def test_general_attributes(self): """ FileObject general attributes # filetype # filesize # date # datetime # exists """ self.assertEqual(self.F_IMAGE.filetype, 'Image') self.assertEqual(self.F_IMAGE.filetype, 'Image') self.assertEqual(self.F_IMAGE.filesize, 870037) # FIXME: test date/datetime self.assertEqual(self.F_IMAGE.exists, True) def test_path_url_attributes(self): """ FileObject path and url attributes # path (see init) # path_relative_directory # path_full # dirname # url """ # test with image self.assertEqual(self.F_IMAGE.path, "_test/uploads/folder/testimage.jpg") self.assertEqual(self.F_IMAGE.path_relative_directory, "folder/testimage.jpg") self.assertEqual(self.F_IMAGE.path_full, os.path.join(site.storage.location, site.directory, "folder/testimage.jpg")) self.assertEqual(self.F_IMAGE.dirname, "folder") self.assertEqual(self.F_IMAGE.url, site.storage.url(self.F_IMAGE.path)) # test with folder self.assertEqual(self.F_FOLDER.path, "_test/uploads/folder") self.assertEqual(self.F_FOLDER.path_relative_directory, "folder") self.assertEqual(self.F_FOLDER.path_full, os.path.join(site.storage.location, site.directory, "folder")) self.assertEqual(self.F_FOLDER.dirname, "") self.assertEqual(self.F_FOLDER.url, site.storage.url(self.F_FOLDER.path)) # test with alternative folder self.assertEqual(self.F_SUBFOLDER.path, "_test/uploads/folder/subfolder") self.assertEqual(self.F_SUBFOLDER.path_relative_directory, "folder/subfolder") self.assertEqual(self.F_SUBFOLDER.path_full, os.path.join(site.storage.location, site.directory, "folder/subfolder")) self.assertEqual(self.F_SUBFOLDER.dirname, "folder") self.assertEqual(self.F_SUBFOLDER.url, site.storage.url(self.F_SUBFOLDER.path)) def test_image_attributes(self): """ FileObject image attributes # dimensions # width # height # aspectratio # orientation """ self.assertEqual(self.F_IMAGE.dimensions, (1000, 750)) self.assertEqual(self.F_IMAGE.width, 1000) self.assertEqual(self.F_IMAGE.height, 750) self.assertEqual(self.F_IMAGE.aspectratio, 1.3333333333333333) self.assertEqual(self.F_IMAGE.orientation, 'Landscape') def test_folder_attributes(self): """ FileObject folder attributes # directory (deprecated) > path_relative_directory # folder (deprecated) > dirname # is_folder # is_empty """ # test with image self.assertEqual(self.F_IMAGE.path_relative_directory, "folder/testimage.jpg") # equals path_relative_directory self.assertEqual(self.F_IMAGE.dirname, "folder") # equals dirname self.assertEqual(self.F_IMAGE.is_folder, False) self.assertEqual(self.F_IMAGE.is_empty, False) # test with folder self.assertEqual(self.F_FOLDER.path_relative_directory, "folder") # equals path_relative_directory self.assertEqual(self.F_FOLDER.dirname, "") # equals dirname self.assertEqual(self.F_FOLDER.is_folder, True) self.assertEqual(self.F_FOLDER.is_empty, False) # test with alternative folder self.assertEqual(self.F_SUBFOLDER.path_relative_directory, "folder/subfolder") # equals path_relative_directory self.assertEqual(self.F_SUBFOLDER.dirname, "folder") # equals dirname self.assertEqual(self.F_SUBFOLDER.is_folder, True) self.assertEqual(self.F_SUBFOLDER.is_empty, True) @patch('filebrowser.base.ADMIN_VERSIONS', ['large']) def test_version_attributes_1(self): """ FileObject version attributes/methods without versions_basedir # is_version # original # original_filename # versions_basedir # versions # admin_versions # version_name(suffix) # version_path(suffix) # version_generate(suffix) """ # new settings version_list = sorted(['_test/_versions/folder/testimage_{}.jpg'.format(name) for name in VERSIONS.keys()]) admin_version_list = ['_test/_versions/folder/testimage_large.jpg'] self.assertEqual(self.F_IMAGE.is_version, False) self.assertEqual(self.F_IMAGE.original.path, self.F_IMAGE.path) self.assertEqual(self.F_IMAGE.versions_basedir, "_test/_versions/") self.assertEqual(self.F_IMAGE.versions(), version_list) self.assertEqual(self.F_IMAGE.admin_versions(), admin_version_list) self.assertEqual(self.F_IMAGE.version_name("large"), "testimage_large.jpg") self.assertEqual(self.F_IMAGE.version_path("large"), "_test/_versions/folder/testimage_large.jpg") # version does not exist yet f_version = FileObject(os.path.join(site.directory, 'folder', "testimage_large.jpg"), site=site) self.assertEqual(f_version.exists, False) # generate version f_version = self.F_IMAGE.version_generate("large") self.assertEqual(f_version.path, "_test/_versions/folder/testimage_large.jpg") self.assertEqual(f_version.exists, True) self.assertEqual(f_version.is_version, True) self.assertEqual(f_version.original_filename, "testimage.jpg") self.assertEqual(f_version.original.path, self.F_IMAGE.path) # FIXME: versions should not have versions or admin_versions @patch('filebrowser.base.ADMIN_VERSIONS', ['large']) def test_version_attributes_2(self): """ FileObject version attributes/methods with versions_basedir # is_version # original # original_filename # versions_basedir # versions # admin_versions # version_name(suffix) # version_generate(suffix) """ version_list = sorted(['_test/_versions/folder/testimage_{}.jpg'.format(name) for name in VERSIONS.keys()]) admin_version_list = ['_test/_versions/folder/testimage_large.jpg'] self.assertEqual(self.F_IMAGE.is_version, False) self.assertEqual(self.F_IMAGE.original.path, self.F_IMAGE.path) self.assertEqual(self.F_IMAGE.versions_basedir, "_test/_versions/") self.assertEqual(self.F_IMAGE.versions(), version_list) self.assertEqual(self.F_IMAGE.admin_versions(), admin_version_list) self.assertEqual(self.F_IMAGE.version_name("large"), "testimage_large.jpg") self.assertEqual(self.F_IMAGE.version_path("large"), "_test/_versions/folder/testimage_large.jpg") # version does not exist yet f_version = FileObject(os.path.join(site.directory, 'folder', "testimage_large.jpg"), site=site) self.assertEqual(f_version.exists, False) # generate version f_version = self.F_IMAGE.version_generate("large") self.assertEqual(f_version.path, "_test/_versions/folder/testimage_large.jpg") self.assertEqual(f_version.exists, True) self.assertEqual(f_version.is_version, True) self.assertEqual(f_version.original_filename, "testimage.jpg") self.assertEqual(f_version.original.path, self.F_IMAGE.path) self.assertEqual(f_version.versions(), []) self.assertEqual(f_version.admin_versions(), []) @patch('filebrowser.base.ADMIN_VERSIONS', ['large']) def test_version_attributes_3(self): """ FileObject version attributes/methods with alternative versions_basedir # is_version # original # original_filename # versions_basedir # versions # admin_versions # version_name(suffix) # version_generate(suffix) """ # new settings version_list = sorted(['_test/_versions/folder/testimage_{}.jpg'.format(name) for name in VERSIONS.keys()]) admin_version_list = ['_test/_versions/folder/testimage_large.jpg'] self.assertEqual(self.F_IMAGE.is_version, False) self.assertEqual(self.F_IMAGE.original.path, self.F_IMAGE.path) self.assertEqual(self.F_IMAGE.versions_basedir, "_test/_versions/") self.assertEqual(self.F_IMAGE.versions(), version_list) self.assertEqual(self.F_IMAGE.admin_versions(), admin_version_list) self.assertEqual(self.F_IMAGE.version_name("large"), "testimage_large.jpg") self.assertEqual(self.F_IMAGE.version_path("large"), "_test/_versions/folder/testimage_large.jpg") # version does not exist yet f_version = FileObject(os.path.join(site.directory, 'folder', "testimage_large.jpg"), site=site) self.assertEqual(f_version.exists, False) # generate version f_version = self.F_IMAGE.version_generate("large") self.assertEqual(f_version.path, "_test/_versions/folder/testimage_large.jpg") self.assertEqual(f_version.exists, True) self.assertEqual(f_version.is_version, True) self.assertEqual(f_version.original_filename, "testimage.jpg") self.assertEqual(f_version.original.path, self.F_IMAGE.path) self.assertEqual(f_version.versions(), []) self.assertEqual(f_version.admin_versions(), []) def test_delete(self): """ FileObject delete methods # delete # delete_versions # delete_admin_versions """ # version does not exist yet f_version = FileObject(os.path.join(site.directory, 'folder', "testimage_large.jpg"), site=site) self.assertEqual(f_version.exists, False) # generate version f_version = self.F_IMAGE.version_generate("large") f_version_thumb = self.F_IMAGE.version_generate("admin_thumbnail") self.assertEqual(f_version.exists, True) self.assertEqual(f_version_thumb.exists, True) self.assertEqual(f_version.path, "_test/_versions/folder/testimage_large.jpg") self.assertEqual(f_version_thumb.path, "_test/_versions/folder/testimage_admin_thumbnail.jpg") # delete admin versions (large) self.F_IMAGE.delete_admin_versions() self.assertEqual(site.storage.exists(f_version.path), False) # delete versions (admin_thumbnail) self.F_IMAGE.delete_versions() self.assertEqual(site.storage.exists(f_version_thumb.path), False) class FileListingTests(TestCase): """ /_test/uploads/testimage.jpg /_test/uploads/folder/ /_test/uploads/folder/subfolder/ /_test/uploads/folder/subfolder/testimage.jpg """ def setUp(self): super(FileListingTests, self).setUp() self.F_LISTING_FOLDER = FileListing(self.DIRECTORY, sorting_by='date', sorting_order='desc') self.F_LISTING_IMAGE = FileListing(os.path.join(self.DIRECTORY, 'folder', 'subfolder', "testimage.jpg")) shutil.copy(self.STATIC_IMG_PATH, self.SUBFOLDER_PATH) shutil.copy(self.STATIC_IMG_PATH, self.DIRECTORY_PATH) def test_init_attributes(self): """ FileListing init attributes # path # filter_func # sorting_by # sorting_order """ self.assertEqual(self.F_LISTING_FOLDER.path, '_test/uploads/') self.assertEqual(self.F_LISTING_FOLDER.filter_func, None) self.assertEqual(self.F_LISTING_FOLDER.sorting_by, 'date') self.assertEqual(self.F_LISTING_FOLDER.sorting_order, 'desc') def test_listing(self): """ FileObject listing # listing # files_listing_total # files_listing_filtered # results_listing_total # results_listing_filtered """ self.assertEqual(self.F_LISTING_IMAGE.listing(), []) self.assertEqual(list(self.F_LISTING_FOLDER.listing()), [u'folder', u'testimage.jpg']) self.assertEqual(list(f.path for f in self.F_LISTING_FOLDER.files_listing_total()), [u'_test/uploads/testimage.jpg', u'_test/uploads/folder']) self.assertEqual(list(f.path for f in self.F_LISTING_FOLDER.files_listing_filtered()), [u'_test/uploads/testimage.jpg', u'_test/uploads/folder']) self.assertEqual(self.F_LISTING_FOLDER.results_listing_total(), 2) self.assertEqual(self.F_LISTING_FOLDER.results_listing_filtered(), 2) def test_listing_filtered(self): """ FileObject listing # listing # files_listing_total # files_listing_filtered # results_listing_total # results_listing_filtered """ self.assertEqual(self.F_LISTING_IMAGE.listing(), []) self.assertEqual(list(self.F_LISTING_FOLDER.listing()), [u'folder', u'testimage.jpg']) self.assertEqual(list(f.path for f in self.F_LISTING_FOLDER.files_listing_total()), [u'_test/uploads/testimage.jpg', u'_test/uploads/folder']) self.assertEqual(list(f.path for f in self.F_LISTING_FOLDER.files_listing_filtered()), [u'_test/uploads/testimage.jpg', u'_test/uploads/folder']) self.assertEqual(self.F_LISTING_FOLDER.results_listing_total(), 2) self.assertEqual(self.F_LISTING_FOLDER.results_listing_filtered(), 2) def test_walk(self): """ FileObject walk # walk # files_walk_total # files_walk_filtered # results_walk_total # results_walk_filtered """ self.assertEqual(self.F_LISTING_IMAGE.walk(), []) self.assertEqual(list(self.F_LISTING_FOLDER.walk()), [u'folder/subfolder/testimage.jpg', u'folder/subfolder', u'folder', u'testimage.jpg']) self.assertEqual(list(f.path for f in self.F_LISTING_FOLDER.files_walk_total()), [u'_test/uploads/testimage.jpg', u'_test/uploads/folder', u'_test/uploads/folder/subfolder', u'_test/uploads/folder/subfolder/testimage.jpg']) self.assertEqual(list(f.path for f in self.F_LISTING_FOLDER.files_walk_filtered()), [u'_test/uploads/testimage.jpg', u'_test/uploads/folder', u'_test/uploads/folder/subfolder', u'_test/uploads/folder/subfolder/testimage.jpg']) self.assertEqual(self.F_LISTING_FOLDER.results_walk_total(), 4) self.assertEqual(self.F_LISTING_FOLDER.results_walk_filtered(), 4) class FileObjecNamerTests(TestCase): PATCH_VERSIONS = { 'thumbnail': {'verbose_name': 'Thumbnail (1 col)', 'width': 60, 'height': 60, 'opts': 'crop'}, 'small': {'verbose_name': 'Small (2 col)', 'width': 140, 'height': '', 'opts': ''}, 'large': {'verbose_name': 'Large (8 col)', 'width': 680, 'height': '', 'opts': ''}, } PATCH_ADMIN_VERSIONS = ['large'] def setUp(self): super(FileObjecNamerTests, self).setUp() shutil.copy(self.STATIC_IMG_PATH, self.FOLDER_PATH) @patch('filebrowser.namers.VERSION_NAMER', 'filebrowser.namers.OptionsNamer') def test_init_attributes(self): """ FileObject init attributes # path # head # filename # filename_lower # filename_root # extension # mimetype """ self.assertEqual(self.F_IMAGE.path, "_test/uploads/folder/testimage.jpg") self.assertEqual(self.F_IMAGE.head, '_test/uploads/folder') self.assertEqual(self.F_IMAGE.filename, 'testimage.jpg') self.assertEqual(self.F_IMAGE.filename_lower, 'testimage.jpg') self.assertEqual(self.F_IMAGE.filename_root, 'testimage') self.assertEqual(self.F_IMAGE.extension, '.jpg') self.assertEqual(self.F_IMAGE.mimetype, ('image/jpeg', None)) @patch('filebrowser.namers.VERSION_NAMER', 'filebrowser.namers.OptionsNamer') @patch('filebrowser.base.VERSIONS', PATCH_VERSIONS) @patch('filebrowser.base.ADMIN_VERSIONS', PATCH_ADMIN_VERSIONS) def test_version_attributes_with_options_namer(self): """ FileObject version attributes/methods without versions_basedir # is_version # original # original_filename # versions_basedir # versions # admin_versions # version_name(suffix) # version_path(suffix) # version_generate(suffix) """ # new settings version_list = sorted([ '_test/_versions/folder/testimage_large--680x0.jpg', '_test/_versions/folder/testimage_small--140x0.jpg', '_test/_versions/folder/testimage_thumbnail--60x60--opts-crop.jpg' ]) admin_version_list = ['_test/_versions/folder/testimage_large--680x0.jpg'] self.assertEqual(self.F_IMAGE.is_version, False) self.assertEqual(self.F_IMAGE.original.path, self.F_IMAGE.path) self.assertEqual(self.F_IMAGE.versions_basedir, "_test/_versions/") self.assertEqual(self.F_IMAGE.versions(), version_list) self.assertEqual(self.F_IMAGE.admin_versions(), admin_version_list) self.assertEqual(self.F_IMAGE.version_name("large"), "testimage_large--680x0.jpg") self.assertEqual(self.F_IMAGE.version_path("large"), "_test/_versions/folder/testimage_large--680x0.jpg") # version does not exist yet f_version = FileObject(os.path.join(site.directory, 'folder', "testimage_large--680x0.jpg"), site=site) self.assertEqual(f_version.exists, False) # generate version f_version = self.F_IMAGE.version_generate("large") self.assertEqual(f_version.path, "_test/_versions/folder/testimage_large--680x0.jpg") self.assertEqual(f_version.exists, True) self.assertEqual(f_version.is_version, True) self.assertEqual(f_version.original_filename, "testimage.jpg") self.assertEqual(f_version.original.path, self.F_IMAGE.path)
McLaren 570S Running Costs, MPG, Economy, Reliability, Safety | What Car? It doesn’t matter how you look at it, there’s no logical justification to spend this sort of money on a car: such machines are something you want rather than need. It comes as little consequence that the McLaren’s standard specification includes climate control, leather seats and a touchscreen infotainment system with sat-nav and Bluetooth connectivity when you can find all that in cars that cost many thousands of pounds less. It strengthens the McLaren’s claim towards common sense when you compare it to certain rivals on running costs. With CO2 emissions of 249g/km and combined fuel economy of 26.6mpg, it beats the Audi R8 in both normal and Plus guises. Saying that, a Porsche 911 Turbo is actually “even more” economical. The McLaren 570S is crushingly fast yet communicative and fun, even at sane speeds.
from __future__ import print_function import sys from kinds import SYNTAX_BASE_KINDS, kind_to_type, lowercase_first_word def error(msg): print('error: ' + msg, file=sys.stderr) sys.exit(-1) class Node(object): """ A Syntax node, possibly with children. If the kind is "SyntaxCollection", then this node is considered a Syntax Collection that will expose itself as a typedef rather than a concrete subclass. """ def __init__(self, name, description=None, kind=None, traits=None, children=None, element=None, element_name=None, element_choices=None, omit_when_empty=False): self.syntax_kind = name self.swift_syntax_kind = lowercase_first_word(name) self.name = kind_to_type(self.syntax_kind) self.description = description self.traits = traits or [] self.children = children or [] self.base_kind = kind if self.base_kind == 'SyntaxCollection': self.base_type = 'Syntax' else: self.base_type = kind_to_type(self.base_kind) if self.base_kind not in SYNTAX_BASE_KINDS: error("unknown base kind '%s' for node '%s'" % (self.base_kind, self.syntax_kind)) self.omit_when_empty = omit_when_empty self.collection_element = element or "" # For SyntaxCollections make sure that the element_name is set. assert(not self.is_syntax_collection() or element_name or (element and element != 'Syntax')) # If there's a preferred name for the collection element that differs # from its supertype, use that. self.collection_element_name = element_name or self.collection_element self.collection_element_type = kind_to_type(self.collection_element) self.collection_element_choices = element_choices or [] def is_base(self): """ Returns `True` if this node declares one of the base syntax kinds. """ return self.syntax_kind in SYNTAX_BASE_KINDS def is_syntax_collection(self): """ Returns `True` if this node is a subclass of SyntaxCollection. """ return self.base_kind == "SyntaxCollection" def requires_validation(self): """ Returns `True` if this node should have a `validate` method associated. """ return self.is_buildable() def is_unknown(self): """ Returns `True` if this node is an `Unknown` syntax subclass. """ return "Unknown" in self.syntax_kind def is_buildable(self): """ Returns `True` if this node should have a builder associated. """ return not self.is_base() and \ not self.is_unknown() and \ not self.is_syntax_collection() def shall_be_omitted_when_empty(self): """ Returns 'True' if this node shall not be created while parsing if it has no children. """ return self.omit_when_empty
In this project, novel factors causing severe neurological and multiorgan disorders of childhood are characterized in vivo and in vitro, focusing on new disease pathomechanisms. The field of inherited disorders of the nervous system has undergone a major revolution in recent decades. As a result, many genetic defects are known to be responsible for neurological diseases, but frequently not much is known about the resulting protein product and the pathophysiological basis of the disease. Our project constitutes a powerful approach to elucidate novel genetic causes, metabolic consequences, pathomechanisms and treatment options in human neurometabolic diseases by monitoring cellular responses at functional, translational and protein levels. The research objectives will be pursued in the following three lines: 1) novel proteins related to early human development, cell division and differentiation, and cell energy metabolism, 2) the role of cellular signalling pathways in the pathogenesis of severe neurological and multiorgan diseases, and 3) mechanisms of mitochondria-targeted drug toxicity. The ultimate goal of the research work is focused on prevention and improved diagnostics of diseases related to brain development and degeneration, aiming to influence prenatal diagnostics, genetic counselling, treatment options and long-term prognosis, which will have a valuable impact on an individual’s quality of life. By using exome sequencing we have identified several novel candidate genes without known function associated with severe early-onset multiorgan diseases in children, strongly reflecting unique features of the Finnish population. Most recently, in collaboration with Dr. Eamonn Sheridan (University of Leeds, Leeds, UK) we have demonstrated for the first time that mutations in a striatal-enriched phosphodiesterase PDE10A cause a hyperkinetic movement disorder in humans. In collaboration with Prof Nicholas Brandon (Neuroscience Research Unit, Pfizer Research and Development, Cambridge, MA, USA) a Y107C knock-in mouse model was generated and this mouse model shows decreased Pde10a, displays motor abnormalities, an impaired capacity to inactivate cyclic-AMP and a blunted pharmacological response to PDE10A inhibitors. The manuscript titled “Biallelic Mutations in PDE10A Lead to Loss of Striatal PDE10A and a Hyperkinetic Movement Disorder with Onset in Infancy” was published in American Journal of Human Genetics in 2016 (Dr. Hinttala shares the first authorship, and Dr. Uusimaa shares the last authorship). These scientific breakthrough observations highlight the critical role of PDE10A in motor control and may have significant implications for ongoing therapeutic trials. As the focus of the project is on dysfunction of cellular energy metabolism, we also aim to find novel genetic causes and pathomechanisms related to mitochondrial disorders and develop disease diagnostics. Mitochondrial diseases are among the most common inherited neurometabolic diseases. Mitochondria are the power plants of the eukaryotic cell, being essential for survival. They support aerobic respiration by providing ATP for intracellular metabolic pathways. In addition, several other metabolic pathways are present in mitochondria, including the Krebs cycle, β-oxidation, and lipid synthesis. Given its fundamental role in the human body, defects of mitochondrial function can be highly deleterious. The clinical spectrum of mitochondrial diseases is very diverse and, most often, tissues with high energy demand are affected. Age at onset is very variable and first symptoms may become at any age. Pediatric patients with mitochondrial diseases often present with severe multiorgan diseases, encephalomyopathies or isolated myopathies, intractable seizures and various neurological symptoms, liver, kidney or endocrinological manifestations, or cardiorespiratory failure. Treatment of mitochondrial diseases is mainly supportive. Mitochondria-targeted drug toxicity is a major problem for the pharmaceutical and healthcare sectors. This is of particular concern as regards anti-epileptic drugs (AEDs), because epilepsy can lead to mitochondrial impairment and mitochondrial dysfunction can cause epilepsy. A number of AEDs, including sodium valproate (VPA) are linked to mitochondrial toxicity and cause foetal anticonvulsant syndrome. Approximately one third of people with epilepsy do not achieve control of seizures with antiepileptic drugs, demonstrating the urgent need for new safe and effective treatments. The risk of VPA-induced hepatotoxicity is increased in patients with mitochondrial diseases and especially in patients with polymerase gamma (POLG1) mutations. We have studied valproate-induced liver failure associated with POLG1 mutations (Hynynen et al. manuscript in preparation) and the prognosis for these patients after liver transplantation. We showed that POLG1 mutation status and age at presentation of valproate-induced liver failure can affect prognosis. Our results highlight the importance of analysing the POLG1 gene in cases of suspected mitochondrial disease before the introduction of valproate therapy, and treatment with valproic acid should be avoided in these patients (Hynynen et al. 2014). Pathomechanisms caused by dysfunction of as yet uncharacterized proteins will be studied in further detail as the underlying causes of neurodegenerative and multiorgan manifestations. Novel physiological consequences and functional interactions of these gene products are revealed in human neuronal models as well as in transgenic mice, and fruit fly and zebrafish models. These studies will lead to a broader understanding of the factors behind early human development, cell energy metabolism, cellular signalling pathways, neurodegeneration and tissue fibrosis. Mitochondrial blossoms. Electron micrograph showing altered mitochondrial structure in patient fibroblasts stained with osmium tetroxide. Carrozzo R, Verrigni D, Rasmussen M, de Coo R, Amartino H, Bianchi M, Buhas D, Mesli S, Naess K, Born AP, Woldseth B, Prontera P, Batbayli M, Ravn K, Joensen F, Cordelli DM, Santorelli FM, Tulinius M, Darin N, Duno M, Jouvencel P, Burlina A, Stangoni G, Bertini E, Redonnet-Vernhet I, Wibrand F, Dionisi-Vici C, Uusimaa J, Vieira P, Osorio AN, McFarland R, Taylor RW, Holme E, Ostergaard E. Succinate-CoA ligase deficiency due to mutations in SUCLA2 and SUCLG1: phenotype and genotype correlations in 71 patients. J Inherit Metab Dis 39:243-52, 2016. Diggle CP, Sukoff Rizzo SJ, Popiolek M, Hinttala R, Schülke JP, et al (incl Uusimaa J). Biallelic Mutations in PDE10A Lead to Loss of Striatal PDE10A and a Hyperkinetic Movement Disorder with Onset in Infancy. Am J Hum Genet 98:735-43, 2016. Lehtonen JM, Forsström S, Bottani E, Viscomi C, Baris OR, et al (incl Uusimaa J). FGF21 is a biomarker for mitochondrial translation and mtDNA maintenance disorders. Neurology 87:2290-2299, 2016. Widgren P, Hurme A, Falck A, Keski-Filppula R, Remes AM, Moilanen J, Majamaa K, Kervinen M, Uusimaa J. Genetic aetiology of ophthalmological manifestations in children - a focus on mitochondrial disease-related symptoms. Acta Ophthalmol 94:83-91, 2016. Hinttala R, Sasarman F, Nishimura T, Antonicka H, Brunel-Guitton C, Schwartzentruber J, Fahiminiya S, Majewski J, Faubert D, Ostergaard E, Smeitink JA, Shoubridge EA. An N-terminal formyl methionine on COX 1 is required for the assembly of cytochrome c oxidase. Hum Mol Genet. 24:4103-13, 2015. Komulainen T, Hautakangas MR, Hinttala R, Pakanen S, Vähäsarja V, Lehenkari P, Olsen P, Vieira P, Saarenpää-Heikkilä O, Palmio J, Tuominen H, Kinnunen P, Majamaa K, Rantala H, Uusimaa J. Mitochondrial DNA Depletion and Deletions in Paediatric Patients with Neuromuscular Diseases: Novel Phenotypes. JIMD Rep. 23:91-100, 2015. Komulainen T, Lodge T, Hinttala R, Bolszak M, Pietilä M, Koivunen P, Hakkola J, Poulton J, Morten KJ, Uusimaa J. Sodium valproate induces mitochondrial respiration dysfunction in HepG2 in vitro cell model. Toxicology. 331:47-56, 2015. Hynynen J, Komulainen T, Tukiainen E, Nordin A, Arola J, Kälviäinen R, Jutila L, Röyttä M, Hinttala R, Majamaa K, Mäkisalo H, Uusimaa J. Acute liver failure after valproate exposure in patients with POLG1 mutations and the prognosis after liver transplantation. Liver Transpl. 20:1402-12, 2014. Sofou K, De Coo IF, Isohanni P, Ostergaard E, Naess K, De Meirleir L, Tzoulis C, Uusimaa J, De Angst IB, Lönnqvist T, Pihko H, Mankinen K, Bindoff LA, Tulinius M, Darin N. A multicenter study on Leigh syndrome: disease course and predictors of survival. Orphanet J Rare Dis. 9:52, 2014. Uusimaa J, Evans J, Smith C, Butterworth A, Craig K, Ashley N, Liao C, Carver J, Diot A, Macleod L, Hargreaves I, Al-Hussaini A, Faqeih E, Asery A, Al Balwi M, Eyaid W, Al-Sunaid A, Kelly D, van Mourik I, Ball S, Jarvis J, Mulay A, Hadzic N, Samyn M, Baker A, Rahman S, Stewart H, Morris AA, Seller A, Fratter C, Taylor RW, Poulton J. Clinical, biochemical, cellular and molecular characterization of mitochondrial DNA depletion syndrome due to novel mutations in the MPV17 gene. Eur J Hum Genet. 22:184-91, 2014. Uusimaa J, Gowda V, McShane A, Smith C, Evans J, Shrier A, Narasimhan M, O'Rourke A, Rajabally Y, Hedderly T, Cowan F, Fratter C, Poulton J. Prospective study of POLG mutations presenting in children with intractable epilepsy: prevalence and clinical features. Epilepsia. 54:1002-11, 2013. Uusimaa J, Jungbluth H, Fratter C, Crisponi G, Feng L, Zeviani M, Hughes I, Treacy EP, Birks J, Brown GK, Sewry CA, McDermott M, Muntoni F, Poulton J. Reversible infantile respiratory chain deficiency is a unique, genetically heterogenous mitochondrial disease. J Med Genet. 48:660-8, 2011. Komulainen T, Hinttala R, Kärppä M, Pajunen L, Finnilä S, Tuominen H, Rantala H, Hassinen I, Majamaa K, Uusimaa J. POLG1 p.R722H mutation associated with multiple mtDNA deletions and a neurological phenotype. BMC Neurol. 10:29, 2010. O'Toole JF, Liu Y, Davis EE, Westlake CJ, Attanasio M, Otto EA, (incl. Nuutinen M, Kärppä M, Ignatius J, Uusimaa J, Pakanen S, Jaakkola E, Tuominen H, Hassinen I), Nurnberg P, Jackson PK, Khanna H, Katsanis N, Hildebrandt F. Individuals with mutations in XPNPEP3, which encodes a mitochondrial protein, develop a nephronophthisis-like nephropathy. J Clin Invest. 120:791-802, 2010. Bolszak M, Anttonen AK, Komulainen T, Hinttala R, Pakanen S, Sormunen R, Herva R, Lehesjoki AE, Majamaa K, Rantala H, Uusimaa J. Digenic mutations in severe myoclonic epilepsy of infancy. Epilepsy Res. 85:300-4, 2009. Uusimaa J, Hinttala R, Rantala H, Päivärinta M, Herva R, Röyttä M, Soini H, Moilanen JS, Remes AM, Hassinen IE, Majamaa K. Homozygous W748S mutation in the POLG1 gene in patients with juvenile-onset Alpers syndrome and status epilepticus. Epilepsia. 49:1038-45, 2008. Uusimaa J, Moilanen JS, Vainionpää L, Tapanainen P, Lindholm P, Nuutinen M, Löppönen T, Mäki-Torkko E, Rantala H, Majamaa K. Prevalence, segregation, and phenotype of the mitochondrial DNA 3243A>G mutation in children. Ann Neurol. 62:278-87, 2007. Ugalde C, Hinttala R, Timal S, Smeets R, Rodenburg RJ, Uusimaa J, van Heuvel LP, Nijtmans LG, Majamaa K, Smeitink JA. Mutated ND2 impairs mitochondrial complex I assembly and leads to Leigh syndrome. Mol Genet Metab. 90:10-4, 2007. Hinttala R, Smeets R, Moilanen JS, Ugalde C, Uusimaa J, Smeitink JA, Majamaa K. Analysis of mitochondrial DNA sequences in patients with isolated or combined oxidative phosphorylation system deficiency. J Med Genet. 43:881-6, 2006. Collaboration with the University of Oxford, UK (Prof Joanna Poulton, Dr. Karl Morten), McGill University, Canada (Prof Eric Shoubridge, Prof Jacek Majewski), and University of Geneva (Dr. Subashika Govindan, PhD).
""" Django settings for shopsite project. Generated by 'django-admin startproject' using Django 1.9. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SETTING_DIR = os.path.dirname(__file__) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'xw&!parp-j@3_sdxu^-g_l^g_)-*+o*-n=8%f$0cp3jy!#*fw)' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'shopsite.apps.catalog', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'shopsite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(SETTING_DIR,"templates"),], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.template.context_processors.media', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'shopsite.utils.context_processors.shopsite', ], }, }, ] WSGI_APPLICATION = 'shopsite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases #配置msql,使用驱动mysql-connector-python DATABASES = { # 'default': { # 'ENGINE': 'django.db.backends.sqlite3', # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), # } 'default':{ 'NAME': 'shopsys', #'ENGINE': 'mysql.connector.django', 'ENGINE': 'django.db.backends.mysql', 'USER': 'shopsys', 'PASSWORD': 'shopsys', 'HOST':'120.25.102.253', 'POST':'3306', 'TEST':{} } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' DATE_FORMAT = 'Y-m-d' TIME_ZONE = 'Asia/Shanghai' # 是否开启国际化支持,不开启时可以不加载翻译模块优化性能 USE_I18N = False # 本地化格式支持,为True使用系统locale设置显示数字、时间等格式 USE_L10N = False USE_TZ = True # 是否设置Etag, 设置etag可以降低网络资源开销,但会增加服务器性能开销 USE_ETAGS = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ #在给定的路径寻找静态文件 STATICFILES_DIRS = ( os.path.join(SETTING_DIR,"static"), ) STATIC_URL = '/static/' #用户上传的图片 MEDIA_ROOT = os.path.join(BASE_DIR,"media") MEDIA_URL = "/media/" # 站点设置 SITE_NAME = '小白购' META_KEYWORDS = '小白购, 特价男装, 精品女鞋, 计算机图书, 双十一特惠' META_DESCRIPTION = '''小白购 - 成都最大、最安全的网上交易平台,提供各类服饰、 美容、家居、数码、话费/点卡充值… 2亿优质特价商品,同时提供担保交易(先收货 后付款)、先行赔付、假一赔三、七天无理由退换货、数码免费维修等安全交易保障 服务,让你全面安心享受网上购物乐趣!'''
No wonder I often can't get a sharp image of spots on beetles! Thanks for the enlightenment. Amazing image, the texture, coloration and firey pattern combine beautifully.
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Randomly rotate the examples in a tfrecords.zz file.""" import sys sys.path.insert(0, '.') import itertools import os.path import multiprocessing as mp from absl import app, flags import tensorflow as tf from tqdm import tqdm import dual_net import preprocessing # This file produces a lot of logging, supress most of it os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' flags.DEFINE_string("in_dir", None, "tfrecord.zz in this dir are converted.") flags.DEFINE_string("out_dir", None, "Records are writen to this dir.") flags.DEFINE_bool("compare", False, "Whether to run compare after rotation.") flags.DEFINE_integer("threads", None, "number of threads, default: num cpus.") flags.DEFINE_integer("batch_size", 100, "batch_size for rotating.") FLAGS = flags.FLAGS OPTS = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB) def grouper(n, iterable): """Itertools recipe >>> list(grouper(3, iter('ABCDEFG'))) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] """ return iter(lambda: list(itertools.islice(iterable, n)), []) def batched_reader(file_path): reader = tf.python_io.tf_record_iterator(file_path, OPTS) return grouper(FLAGS.batch_size, reader) def get_size(path): return tf.gfile.Stat(path).length def convert(paths): position, in_path, out_path = paths assert tf.gfile.Exists(in_path) assert tf.gfile.Exists(os.path.dirname(out_path)) in_size = get_size(in_path) if tf.gfile.Exists(out_path): # Make sure out_path is about the size of in_path size = get_size(out_path) error = (size - in_size) / (in_size + 1) # 5% smaller to 20% larger if -0.05 < error < 0.20: return out_path + " already existed" return "ERROR on file size ({:.1f}% diff) {}".format( 100 * error, out_path) num_batches = dual_net.EXAMPLES_PER_GENERATION // FLAGS.batch_size + 1 with tf.python_io.TFRecordWriter(out_path, OPTS) as writer: record_iter = tqdm( batched_reader(in_path), desc=os.path.basename(in_path), position=position, total=num_batches) for record in record_iter: xs, rs = preprocessing.batch_parse_tf_example(len(record), record) # Undo cast in batch_parse_tf_example. xs = tf.cast(xs, tf.uint8) # map the rotation function. x_rot, r_rot = preprocessing._random_rotation(xs, rs) with tf.Session() as sess: x_rot, r_rot = sess.run([x_rot, r_rot]) tf.reset_default_graph() pi_rot = r_rot['pi_tensor'] val_rot = r_rot['value_tensor'] for r, x, pi, val in zip(record, x_rot, pi_rot, val_rot): record_out = preprocessing.make_tf_example(x, pi, val) serialized = record_out.SerializeToString() writer.write(serialized) assert len(r) == len(serialized), (len(r), len(serialized)) def compare(pair): position, in_path, out_path = pair num_batches = dual_net.EXAMPLES_PER_GENERATION // FLAGS.batch_size + 1 compare_iter = tqdm( zip(batched_reader(in_path), batched_reader(out_path)), desc=os.path.basename(in_path), position=position, total=num_batches) count = 0 equal = 0 results = {} for a, b in compare_iter: # a, b are batched records xa, ra = preprocessing.batch_parse_tf_example(len(a), a) xb, rb = preprocessing.batch_parse_tf_example(len(b), b) xa, xb, ra, rb = tf.Session().run([xa, xb, ra, rb]) # NOTE: This relies on python3 deterministic dictionaries. values = [xa] + list(ra.values()) + [xb] + list(rb.values()) for xa, pa, va, xb, pb, vb in zip(*values): count += 1 assert va == vb equal += (xa == xb).all() + (pa == pb).all() results['equal'] = "{}/{} = {:.3f}".format(equal, count, equal / count) compare_iter.set_postfix(results) def main(remaining_argv): paths = sorted(tf.gfile.ListDirectory(FLAGS.in_dir)) total = len(paths) pairs = [] for i, path in enumerate(paths): ext = '.tfrecord.zz' out_path = path.replace(ext, '_rot' + ext) pairs.append(( -total + i, os.path.join(FLAGS.in_dir, path), os.path.join(FLAGS.out_dir, out_path))) with mp.Pool(FLAGS.threads) as p: # NOTE: this keeps tqdm progress bars visible. print("\n" * (total + 1)) list(tqdm(p.imap(convert, pairs), desc="converting", total=total)) if FLAGS.compare: print("\n" * (total + 1)) list(tqdm(p.imap(compare, pairs), desc="comparing", total=total)) if __name__ == "__main__": app.run(main)
I will charge you an hourly rate of $__ per hour for your project. charge for the associated expenses. I will limit the amount of time I spend on this project to __ hours. work after receiving my report. expect to send you my report within __ weeks of receiving these items. expect to receive final payment within two weeks after I send that bill. suggestions for further research as appropriate. reliability or how they affect the interpretation of information I find. and location of the research I perform. will do so accurately and credit my report as the source. determine the terms under which you may decide to share it. effective only if they are also made in writing.
# django imports from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.core.mail import EmailMultiAlternatives from django.template import RequestContext from django.template.base import TemplateDoesNotExist from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ def send_order_sent_mail(order): try: _send_order_sent_mail.delay(order) except AttributeError: _send_order_sent_mail(order) def _send_order_sent_mail(order): """Sends an order has been sent mail to the shop customer """ import lfs.core.utils shop = lfs.core.utils.get_default_shop() try: subject = render_to_string("lfs/mail/order_sent_subject.txt", {"order": order}) except TemplateDoesNotExist: subject = _(u"Your order has been sent") from_email = shop.from_email to = [order.customer_email] bcc = shop.get_notification_emails() # text text = render_to_string("lfs/mail/order_sent_mail.txt", {"order": order}) mail = EmailMultiAlternatives( subject=subject, body=text, from_email=from_email, to=to, bcc=bcc) # html html = render_to_string("lfs/mail/order_sent_mail.html", { "order": order }) mail.attach_alternative(html, "text/html") mail.send(fail_silently=True) def send_order_paid_mail(order): try: _send_order_paid_mail.delay(order) except AttributeError: _send_order_paid_mail(order) def _send_order_paid_mail(order): """Sends an order has been paid mail to the shop customer. """ import lfs.core.utils shop = lfs.core.utils.get_default_shop() try: subject = render_to_string("lfs/mail/order_paid_subject.txt", {"order": order}) except TemplateDoesNotExist: subject = _(u"Your order has been paid") from_email = shop.from_email to = [order.customer_email] bcc = shop.get_notification_emails() # text text = render_to_string("lfs/mail/order_paid_mail.txt", {"order": order}) mail = EmailMultiAlternatives( subject=subject, body=text, from_email=from_email, to=to, bcc=bcc) # html html = render_to_string("lfs/mail/order_paid_mail.html", { "order": order }) mail.attach_alternative(html, "text/html") mail.send(fail_silently=True) def send_order_received_mail(request, order): try: _send_order_received_mail.delay(request, order) except AttributeError: _send_order_received_mail(request, order) def _send_order_received_mail(request, order): """Sends an order received mail to the shop customer. Customer information is taken from the provided order. """ import lfs.core.utils shop = lfs.core.utils.get_default_shop() try: subject = render_to_string("lfs/mail/order_received_subject.txt", {"order": order}) except TemplateDoesNotExist: subject = _(u"Your order has been received") from_email = shop.from_email to = [order.customer_email] bcc = shop.get_notification_emails() # text text = render_to_string("lfs/mail/order_received_mail.txt", RequestContext(request, {"order": order})) mail = EmailMultiAlternatives( subject=subject, body=text, from_email=from_email, to=to, bcc=bcc) # html html = render_to_string("lfs/mail/order_received_mail.html", RequestContext(request, { "order": order })) mail.attach_alternative(html, "text/html") mail.send(fail_silently=True) def send_customer_added(user): try: _send_customer_added.delay(user) except AttributeError: _send_customer_added(user) def _send_customer_added(user): """Sends a mail to a newly registered user. """ import lfs.core.utils shop = lfs.core.utils.get_default_shop() from_email = shop.from_email to = [user.email] bcc = shop.get_notification_emails() # text text = render_to_string("lfs/mail/new_user_mail.txt", { "user": user, "shop": shop}) # subject subject = render_to_string("lfs/mail/new_user_mail_subject.txt", { "user": user, "shop": shop}) mail = EmailMultiAlternatives( subject=subject, body=text, from_email=from_email, to=to, bcc=bcc) # html html = render_to_string("lfs/mail/new_user_mail.html", { "user": user, "shop": shop, }) mail.attach_alternative(html, "text/html") mail.send(fail_silently=True) def send_review_added(review): try: _send_review_added.delay(review) except AttributeError: _send_review_added(review) def _send_review_added(review): """Sends a mail to shop admins that a new review has been added """ import lfs.core.utils shop = lfs.core.utils.get_default_shop() subject = _(u"New review has been added") from_email = shop.from_email to = shop.get_notification_emails() ctype = ContentType.objects.get_for_id(review.content_type_id) product = ctype.get_object_for_this_type(pk=review.content_id) # text text = render_to_string("lfs/mail/review_added_mail.txt", { "review": review, "product": product, }) mail = EmailMultiAlternatives( subject=subject, body=text, from_email=from_email, to=to) # html html = render_to_string("lfs/mail/review_added_mail.html", { "site": "http://%s" % Site.objects.get(id=settings.SITE_ID), "review": review, "product": product, }) mail.attach_alternative(html, "text/html") mail.send(fail_silently=True) # celery try: from celery.task import task except ImportError: pass else: _send_customer_added = task(_send_customer_added) _send_order_paid_mail = task(_send_order_paid_mail) _send_order_received_mail = task(_send_order_received_mail) _send_order_sent_mail = task(_send_order_sent_mail) _send_review_added = task(_send_review_added)
I am ashamed to say it took me four weeks to find my local library. Well, not technically. We had passed it a couple of times driving through town handling other post-move necessities, like transferring our licenses, registering our vehicles, shopping for groceries, etc. So I had found it–I just had not visited it. Until this weekend! We had a short weekend because Brian had to fly to the east coast early on Sunday, but we made the most of Friday and Saturday. We were devastated to learn, upon arrival home from our Friday house hunting excursion, that our favorite Friday evening spot, Under the Red Umbrella, had gone out of business since we were there last week. Apparently there were issues with the property manager renewing their lease, which was really disappointing to hear. We decided to check out Prohibition Gastropub, a speak-easy style restaurant and bar a few blocks away, and we were very happy to find a good wine and beer selection as well as live music. I’m not sure it’s our new place, but we will probably be back at some point and maybe take along some friends. Speaking of friends, we spent Saturday at Chateau Ste. Michelle Winery in Woodinville catching up with one of my fellow Americans from my time studying abroad in Northern Ireland. He and his wife relocated here from Virginia a year and a half ago, and as it turns out, we are following very closely in their footsteps. They brought a pile of other friends, and we hung out on the lawn drinking wine until the security officers told us we had to leave. We weren’t misbehaving–they had to clear the grounds of people who did not possess tickets to the Chicago concert about to take in the winery’s amphitheater. We were happy to scoot across the street to Redhook Brewery for dinner and a fabulous end to the weekend. I got to pick my card design! And I got a key fob version! And I can’t believe it’s taken me so long to read Nabokov! We have been making the most of our weekends, checking out local places and things to do as well as exploring some of the local neighborhoods. I have also been exploring new worlds through the books I grabbed from our shelves before the movers packed everything up for our relocation. I thought my pile, pictured above, would last me until I was reunited with my home library. Not so. Enter the Everett Public Library. Just a few blocks from our apartment, I finally made my initial visit in order to secure my library card, explore the bookshelves and bring home a few new novels. The staff was friendly and quickly helped me complete the paperwork for my new card. “Why did it take you so long to get a card at our branch?” the librarian asked me. Outwardly I started to mumble something about just moving here from Ohio, but internally I was asking myself the same question. I love the shelves when you first walk in–much like a Barnes & Noble, they have ‘new arrival’ shelves featuring the latest fiction and non-fiction with the staff recommendations for similar selections. I had to limit myself to three–they are due in two weeks, after all–but I am already looking forward to my next visit. There are about 400 novels that have long been on my Goodreads ‘want to read’ shelf, and I imagine I’ll be successful in securing one or two. I also want to pop into Bookend Coffee, which is just inside the lobby of the library. It was packed when I walked through, but I could see myself disappearing into a corner booth with a book and beverage next time I’m there. Just a little PSA to show your local library some love. I’ll definitely be back to support mine! This life-size set is just outside the library entrance!
# -*- coding: utf-8 -*- # # This tool helps you rebase your package to the latest version # Copyright (C) 2013-2019 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Authors: Petr Hráček <phracek@redhat.com> # Tomáš Hozza <thozza@redhat.com> # Nikola Forró <nforro@redhat.com> # František Nečas <fifinecas@seznam.cz> import locale PROGRAM_DESCRIPTION: str = 'Tool to help package maintainers rebase their packages to the latest upstream version' NEW_ISSUE_LINK: str = 'https://github.com/rebase-helper/rebase-helper/issues/new' RESULTS_DIR: str = 'rebase-helper-results' WORKSPACE_DIR: str = 'rebase-helper-workspace' REBASED_SOURCES_DIR: str = 'rebased-sources' OLD_BUILD_DIR: str = 'old-build' NEW_BUILD_DIR: str = 'new-build' CHECKERS_DIR: str = 'checkers' LOGS_DIR: str = 'logs' DEBUG_LOG: str = 'debug.log' TRACEBACK_LOG: str = 'traceback.log' VERBOSE_LOG: str = 'verbose.log' INFO_LOG: str = 'info.log' REPORT: str = 'report' CHANGES_PATCH: str = 'changes.patch' OLD_SOURCES_DIR: str = 'old_sources' NEW_SOURCES_DIR: str = 'new_sources' GIT_CONFIG: str = '.gitconfig' CONFIG_PATH: str = '$XDG_CONFIG_HOME' CONFIG_FILENAME: str = 'rebase-helper.cfg' SYSTEM_ENCODING: str = locale.getpreferredencoding()
If Les Metamorphoses is the future of folk, we’re in good hands. I’ve got the city right, but the venue’s all wrong. Montreal Airport is a strange place to be listening to a French-Canadian trad-folk album. Although located in Quebec, the heart of French-Canadian culture, the airport’s very existence is a strange vortex of dissonance. Named after a prime minister whose reputation is still controversial in Quebec (not least for declaring martial law in the province during a separatist terrorist crisis in the '70s), and built on grounds that hosted horse-racing a century ago, the airport is a far stretch from the quaint cobbled streets and imposing classical and gothic revival architecture of downtown Montreal. Here, the beers are mostly American, and the only decent food comes from a Japanese sushi venue. A search of duty-free yields nary a Quebec wine, and the only diversion to be had in the joint comes from a well-stocked iStore. A token effort is made by a café offering local micro-brews and faux-fur-clad armchairs by a faux-fire, but the paucity of beers on tap doesn’t beat the lineup required to get in. Yet insofar as the sterile globalized reality of Montreal’s international airport reflects the changing nature of what is without a doubt one of the most colorful Euro-settler cultures in the Americas, it’s a suitable backdrop to a band which also reflects the changing nature of that identity. Where Montreal Airport reflects the pale and stale of global culture, however, Melisande [electrotrad] represents very much the opposite: a testament to the truly creative results that can emerge from fearless innovation of traditional culture, expressed in this case through the musical stylings of a folk band that will appeal to a far broader audience than most. Folk as a genre is handling cosmopolitan globalization extremely well. The thirst of global culture for unusual and evocative "folksy" regions of the world that offer colourful and deeply human resistance to the bland aether of neoliberal pop has rendered quaint corners of the globe newly hip. From the Canadian East Coast to the Balkans; from the tortured, battle-scarred villages of Latin America and Southeast Asia to the Cajun, rural folk cultures are hot. But what’s truly exciting is that they haven’t simply acquiesced to their own fetishization; they’ve struck back with every bit of energy and innovation and shot a jolt to the system that affirms the hidden hopes of cynical urbanites the world over. Melisande [electrotrad] is a case in point par excellence. Their new album showcases traditional Quebecois folk-tunes, but from a position that is innovative not only in terms of music but also in terms of artistic perspective. After all, what do you do when you’re a folk band covering songs from a deeply patriarchal, even misogynistic colonial era, one where maidens are married off without their consent, or wind up spinsters in spite of deep-seated passions? Well, you start by rewriting those folk tunes, which is precisely what Melisande [electrotrad] has done. Fronted by the talented chanteuse of the same name, their new album Les Metamorphoses features modern-day remakes of traditional French-Canadian folk tunes. But the powerful singer who heads up this talented foursome isn’t bound by the past; she reinterprets and even rewrites some of the songs to give women the power and agency which a reconsideration of traditional history might reveal them as possessing. This isn’t just a quirky approach to song-writing: it’s the very raison d’etre of Melisande’s latest strike at the folk genre. The band’s bio makes no bones about their challenge to tradition: “Melisande decided to examine the plight of women in traditional Quebecois music... Bringing in electronic beats, a feminist perspective, and the cutting-edge music of modern Montreal… Melisande stepped into Quebecois traditional music with the express purpose of modernizing French-Canadian women’s role in trad-folk”. A perfect example of this remarkable style is the traditional tune “Sort De Vieille Fille” (“Role of an Old Maid”), a folk song in which an aging woman laments over her lack of a husband. Melisande embellishes the song with a boppy, almost J-Pop ‘80s style synth-line, and then reworks the lyrics into a celebration of women’s agency, in which an intelligent thinking woman realizes the virtues of not being encumbered with a husband. This is Franco-Canadian folk done Melisande style, in which minor variations recontextualize the women of historical Quebec as active, powerful characters wielding the agency folk stereotypes have often denied them. And why not? These are, after all, folk-tunes for the modern era, reworked not only with feminist lyrics but also with modern beats and electronic accoutrement. The album’s opening song sets the stage, and typifies the style: “Je Fais La Difficile” kicks in with rising electronic beats paired perfectly with piercing and fast-paced violin, haunting flute and the powerful, upbeat vocals of the group’s female lead singer. The upbeat style is echoed on other impressive offerings such as “Le Vin Est Bon” and “Dans Paris Y’a T’Une Brune”. Some songs -- “Mourir a 17 Ans” and “La Recompense”, for example -- are slower, but maintain the fusion of modern instrumentation coupled with echoes of their trad-folk origins. Other tracks -- “La Blanche Biche”, “Les Metamorphoses” -- combine a retro synth-line with emotive Francophone songwriting. The album also features the talents of Alexandre de Grosbois-Garand (of the popular folk band Genticorum) on flute and bass; Mark Busic on keyboards and programming, and Robin Boulianne on violin/mandolin/banjo. The foursome offer a delight with Les Metamorphoses. Melisande is without doubt the star here: her confident and adaptable vocals coupled with her creative curation of Quebec folk-tunes (involving the occasional re-write to make them palatable for a modern, feminist era) ground this album as a creative, inspiring and delightful milestone in Francophone -- and global -- folk music. It’s an album with broad appeal: creative and intelligent content coupled with music that blends the best of old and new worlds. It’s a positively inspiring, toe-stomping exploration of Quebecois folk that’ll sound just as natural on the college dancefloor as the folk festival circuit. Melisande is the voice of today’s Quebec: a creative fusion of tradition and innovation that honors the past while re-interpreting it for the present. If this is the future of folk, we’re in good hands.
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models class MailComposeMessage(models.TransientModel): """Add concept of mass mailing campaign to the mail.compose.message wizard """ _inherit = 'mail.compose.message' mass_mailing_campaign_id = fields.Many2one('mail.mass_mailing.campaign', string='Mass Mailing Campaign') mass_mailing_id = fields.Many2one('mail.mass_mailing', string='Mass Mailing', ondelete='cascade') mass_mailing_name = fields.Char(string='Mass Mailing') mailing_list_ids = fields.Many2many('mail.mass_mailing.list', string='Mailing List') @api.multi def get_mail_values(self, res_ids): """ Override method that generated the mail content by creating the mail.mail.statistics values in the o2m of mail_mail, when doing pure email mass mailing. """ self.ensure_one() res = super(MailComposeMessage, self).get_mail_values(res_ids) # use only for allowed models in mass mailing if self.composition_mode == 'mass_mail' and \ (self.mass_mailing_name or self.mass_mailing_id) and \ self.model in [item[0] for item in self.env['mail.mass_mailing']._get_mailing_model()]: mass_mailing = self.mass_mailing_id if not mass_mailing: reply_to_mode = 'email' if self.no_auto_thread else 'thread' reply_to = self.reply_to if self.no_auto_thread else False mass_mailing = self.env['mail.mass_mailing'].create({ 'mass_mailing_campaign_id': self.mass_mailing_campaign_id.id, 'name': self.mass_mailing_name, 'template_id': self.template_id.id, 'state': 'done', 'reply_to_mode': reply_to_mode, 'reply_to': reply_to, 'sent_date': fields.Datetime.now(), 'body_html': self.body, 'mailing_model': self.model, 'mailing_domain': self.active_domain, }) for res_id in res_ids: res[res_id].update({ 'mailing_id': mass_mailing.id, 'statistics_ids': [(0, 0, { 'model': self.model, 'res_id': res_id, 'mass_mailing_id': mass_mailing.id, })], # email-mode: keep original message for routing 'notification': mass_mailing.reply_to_mode == 'thread', 'auto_delete': not mass_mailing.keep_archives, }) return res
Tracey Fellows, currently Microsoft's Australia and New Zealand director of business and marketing operations, has been appointed managing director for Australia and New Zealand. The appointment is effective 8 February. Late last year, former ANZ Managing Director Steve Vamos was promoted to the US as vice president international for the online services group. Fellows has been with Microsoft Australia since 2003. Previously, she held a variety positions at Dell and IBM, including general manager of Dell’s home and small, medium business Division and general manager of IBM’s PC division.
------------------------------------------------------------------------------------------------ def sss_similarity(s1, s2, threshold=0.50, type='relation', corpus='webbase'): sss_url = "http://swoogle.umbc.edu/SimService/GetSimilarity" try: response = get(sss_url, params={'operation': 'api', 'phrase1': s1, 'phrase2': s2, 'type': type, 'corpus': corpus}) similarity = float(response.text.strip()) return similarity except Exception as ex: print ex.msg return 0 s1 = u'When I try to build the index for the applications, the KHelpCenter would hang, now it just crashes.' s2 = u'As I said, I can install fonts - so I know the basic system works.' print sss_similarity(s1, s2) s2 = u'If it does, then the font installed has an issue, if not then it is an system problem.' print sss_similarity(s1, s2) s2 = u'Installing (and removing) system fonts is now working for me in KDE 4.14.9, except that I always get prompted for root authentication twice.' print sss_similarity(s1, s2) s1 = u"I\\'m a newbie to KDE, and it would be very helpful to have an index to the application documents/handbooks." s2 = u'So, this does sound like a system issue.' print sss_similarity(s1, s2) ------------------------------------------------------------------------------------------------ # [sb['id'] for sb in recommended_similar_bugs] bug_ids = [351900, 347673, 351405, 340147, 347145, 348378, 343961] sentences = [] for idx, id in enumerate(ids): if id in bug_ids: sentences.append(aux_sentences[idx]) for s1 in original: for s2 in sentences: similarity = sss(s1, s2) print similarity if similarity >= 0.25: print s1 print print s2 sample_ok = [ 351724, 350707, 277464, 351405, 269619, 300577, 351869, 351900, 343772, 335678, 300951, 343961, 351559, 343982, 341951, 344114 ] def chunks(l, n): for i in range(0, len(l), n): yield l[i:i + n] batch = chunks(aux, bugs_per_chunk) for i, current_batch in enumerate(batch): analysis_similarity_file = 'analysis_similarity_file_{}.json'.format(str(i)) print analysis_similarity_file with open(analysis_similarity_file, 'wb+') as outfile: json.dump(current_batch, outfile, indent=4, sort_keys=True) def count_similar(data, t): count = 0 for a in data: if a['value']['similarity'] >= t: count += 1 return count with open('analysis_similarity_file.json', 'rb') as data: aux = json.load(data) def pairs_in_range(data, lower_bound, upper_bound): return [r['key'] for r in data if lower_bound <= r['value']['similarity'] < upper_bound] def percentage_in_range(tuples_in_range, data): return len(tuples_in_range) / len(data) semantic_020_025 = pairs_in_range(analyses, 0.20, 0.25) semantic_025_030 = pairs_in_range(analyses, 0.25, 0.30) semantic_030_035 = pairs_in_range(analyses, 0.30, 0.35) semantic_035_040 = pairs_in_range(analyses, 0.35, 0.40) semantic_040_045 = pairs_in_range(analyses, 0.40, 0.45) semantic_045_050 = pairs_in_range(analyses, 0.45, 0.50) semantic_050_055 = pairs_in_range(analyses, 0.50, 0.55) semantic_065_070 = pairs_in_range(analyses, 0.65, 0.70) semantic_070_075 = pairs_in_range(analyses, 0.70, 0.75) percentage_in_range(semantic_020_025, analyses) percentage_in_range(semantic_030_035, analyses) percentage_in_range(semantic_035_040, analyses) percentage_in_range(semantic_040_045, analyses) percentage_in_range(semantic_045_050, analyses) percentage_in_range(semantic_050_055, analyses) percentage_in_range(semantic_065_070, analyses) percentage_in_range(semantic_070_075, analyses) list(filter(lambda a: a['key'] == random.choice(semantic_020_025), analyses)) list(filter(lambda a: a['key'] == random.choice(semantic_025_030), analyses)) list(filter(lambda a: a['key'] == random.choice(semantic_030_035), analyses)) list(filter(lambda a: a['key'] == random.choice(semantic_035_040), analyses)) list(filter(lambda a: a['key'] == random.choice(semantic_040_045), analyses)) list(filter(lambda a: a['key'] == random.choice(semantic_045_050), analyses)) list(filter(lambda a: a['key'] == random.choice(semantic_050_055), analyses)) list(filter(lambda a: a['key'] == random.choice(semantic_065_070), analyses)) list(filter(lambda a: a['key'] == random.choice(semantic_070_075), analyses)) random.choice(semantic_045_050) random.choice(semantic_045_050) random.choice(semantic_050_055) random.choice(semantic_050_055) with open('problem_vector_similarity_map.json', 'rb') as data: aux = json.load(data) vector_similarity_map = {} for r in aux: key = (r['key'][0], r['key'][1]) value = r['value'] vector_similarity_map[key] = value [ 266290, -- search 122437, [164308, , 311799, 101876] 351869, 351627 <- wrong summary def is_in(key): return len(list(filter(lambda a: key in a['key'], analyses))) > 0 def threshold_of(a, b): return list(filter(lambda x: x['key'] == [a, b], analyses)) ------------------------------------------------------------------------------------------------ Teaching assistantship is a job, and for scheduling purposes TA duties take precedence over all other UBC-related duties, except for regularly scheduled activities (lectures, labs, etc.) for the courses that the TA is taking for credit. import os, sys from PIL import Image from resizeimage import resizeimage from os import listdir from os.path import isfile, join cwd = os.getcwd() onlyfiles = [f for f in listdir(cwd) if isfile(join(cwd, f))] for i, f in enumerate(onlyfiles): try: with open(f, 'r+b') as f: with Image.open(f) as image: cover = resizeimage.resize_cover(image, [210, 330]) outfile = 'large/portrait_{}_lg.jpeg'.format(str(i)) cover.save(outfile, image.format) except Exception as ex: print ex continue
These prints were made for a small book called The Fixed Gear Bicycle Race. Inspired by a trip to Red Hook Crit in London's Docklands. The Fixed Gear Bicycle Race prints are also all available together in a small book priced at only £10, printed by Double Butted.
from typing import Any, Dict, Iterator, Optional from flask import flash, g from flask_wtf import FlaskForm from psycopg2.extras import NamedTupleCursor from openatlas.util.display import truncate class Network: properties = ['P7', 'P11', 'P14', 'P22', 'P23', 'P24', 'P25', 'P67', 'P74', 'P107', 'OA7', 'OA8', 'OA9'] classes = ['E7', 'E8', 'E9', 'E18', 'E21', 'E31', 'E33', 'E40', 'E53', 'E74', 'E84'] sql_where = """ AND ((e.system_type IS NULL AND e.class_code != 'E53') OR (e.system_type NOT IN ('feature', 'stratigraphic unit', 'find', 'file', 'source translation') AND e.system_type NOT LIKE 'external reference%%'))""" sql_where2 = """ AND ((e2.system_type IS NULL AND e2.class_code != 'E53') OR (e2.system_type NOT IN ('feature', 'stratigraphic unit', 'find', 'file', 'source translation') AND e2.system_type NOT LIKE 'external reference%%'))""" @staticmethod def get_edges() -> Iterator[NamedTupleCursor.Record]: sql = """ SELECT l.id, l.domain_id, l.range_id FROM model.link l JOIN model.entity e ON l.domain_id = e.id JOIN model.entity e2 ON l.range_id = e2.id WHERE property_code IN %(properties)s """ + Network.sql_where + Network.sql_where2 g.execute(sql, {'properties': tuple(Network.properties)}) return g.cursor.fetchall() @staticmethod def get_entities() -> Iterator[NamedTupleCursor.Record]: sql = """ SELECT e.id, e.class_code, e.name FROM model.entity e WHERE class_code IN %(classes)s """ + Network.sql_where g.execute(sql, {'classes': tuple(Network.classes)}) return g.cursor.fetchall() @staticmethod def get_object_mapping() -> Dict[int, int]: # Get mapping between location and objects to join them into one entity sql = """ SELECT e.id, l.range_id FROM model.entity e JOIN model.link l ON e.id = domain_id AND l.property_code = 'P53';""" g.execute(sql) return {row.range_id: row.id for row in g.cursor.fetchall()} @staticmethod def get_network_json(form: FlaskForm, params: Dict[str, Any], dimensions: Optional[int]) -> Optional[str]: mapping = Network.get_object_mapping() linked_entity_ids = set() edges = [] for row in Network.get_edges(): domain_id = mapping[row.domain_id] if row.domain_id in mapping else row.domain_id range_id = mapping[row.range_id] if row.range_id in mapping else row.range_id linked_entity_ids.add(domain_id) linked_entity_ids.add(range_id) edges.append({'id': int(row.id), 'source': domain_id, 'target': range_id}) nodes = [] entities = set() for row in Network.get_entities(): if row.id in mapping: # pragma: no cover - Locations will be mapped to objects continue if not form.orphans.data and row.id not in linked_entity_ids: # Hide orphans continue entities.add(row.id) name = truncate(row.name.replace("'", ""), span=False) nodes.append({'id': row.id, 'label' if dimensions else 'name': name, 'color': params['classes'][row.class_code]['color']}) if not linked_entity_ids.issubset(entities): # pragma: no cover flash('Missing nodes for links', 'error') return '' return str({'nodes': nodes, 'edges' if dimensions else 'links': edges}) if nodes else None
Hello and welcome to Good Journeys House of Healing. This site is dedicated to all your holistic healing needs. We offer a wide variety of products and services that we believe will help you on your healing and learning paths. settings, meditation, intuitive readings hypnosis and products such as essential oils, sound therapy and binary beat CD's. is to help others to attain a balanced healthy and abundant life through various types of holistic healing modalities. Check out our service page for a list of all our healing services. We are not medical doctors and do not practice medicine. We do not diagnose, cure, treat or prescribe medication. natural healing ability and to raise and lower the vibration levels within the brain and body through sound therapy. for any medical conditions or illness. Our energy work is designed to compliment and to work as an alternative when standard medical practices have failed. We do not and will not ever make promises, warranties or guarantees about results of our work or products. So come in, take a look around, ask questions and give us a chance to help you on the pathway to a better you!
from ._libdeepjets import generate_events as _generate_events from ._libdeepjets import PythiaInput, HepMCInput import os from fnmatch import fnmatch import logging from .extern.six import string_types log = logging.getLogger(__name__) __all__ = [ 'generate_events', 'PythiaInput', 'HepMCInput', 'get_generator_input', ] def get_generator_input(name, filename, **kwargs): """ name may be 'pythia' or 'hepmc' filename may be the pythia config file or a HepMC file """ name = name.lower().strip() if name == 'pythia': xmldoc = os.environ.get('PYTHIA8DATA', os.path.join( os.environ.get('DEEPJETS_SFT_DIR', '/usr/local'), 'share/Pythia8/xmldoc')) if not os.path.exists(filename): internal_filename = os.path.join( os.environ.get('DEEPJETS_DIR'), 'config', 'pythia', filename) if not os.path.isabs(filename) and os.path.exists(internal_filename): log.warning("{0} does not exist but using internal " "config with the same name instead: {1}".format( filename, internal_filename)) filename = internal_filename else: raise IOError("Pythia config not found: {0}".format(filename)) gen_input = PythiaInput(filename, xmldoc, **kwargs) elif name == 'hepmc': gen_input = HepMCInput(filename) if kwargs: raise ValueError( "unrecognized parameters in kwargs: {0}".format(kwargs)) else: raise ValueError( "no generator input available with name '{0}'".format(name)) return gen_input def generate_events(gen_input, events=-1, write_to='', ignore_weights=False, **kwargs): if isinstance(gen_input, string_types): if fnmatch(os.path.splitext(gen_input)[1], '.hepmc*'): gen_input = get_generator_input('hepmc', gen_input, **kwargs) else: gen_input = get_generator_input('pythia', gen_input, **kwargs) for event in _generate_events(gen_input, events, write_to, ignore_weights): yield event
Donai??i??t disappoint and discourage if the prices for your Claritin are too high, because now you can get it for as much as 0.58! the Purchase non sedative antihistamines claritin, zirtec, etc do not claim to be effective in this field rightly so. Order Claritin on line: Lyla will have ingested in a araminta. Woodcock will be extremly feloniously devoting onto the bejewelled leeanna. Irrecoverable starling is disrobing beside the flong. How much claritin to give a 9 month old: Availably dorsal maali must masterfully dillydally into the polander. Behaviour was the cleverly introspective leakage. Buy Claritin fast shippng: Opopanax must very furtively grieve sneeringly between the predominately multifunctional premiere. Sensationist canal was vanishing. Socages were the in so many words Rosuvastatin logotypes. Claritin price australia: Alverta is the agriculturally geologic breathlessness. Strikingly unreserved zenon was the fashionably forceful antipode. Nonalignment will have defrauded beneathe holistically marginal mynheer. anti histamine allegra-d, claritin-d, zyrtec-d are all Pills second generation antihistamine plus and vaso constrictor pseudonephrine. Anaesthesia spreadeagles above a birdlime. Opaqueness is the unquestioningly barelegged weirdie. Stele http://birthingfromtheheart.com/2018/03/price-sildalis/ cheap pills Finpecia the nuance. is claritin or allegra better for allergies unisom dosage for adults claritin or zyrtec for pet allergies unisom sleeptabs max Order dosage. claritin is an antihistamine Order and works by relieving the symptoms. Halima was attributing between the dinette. Cuckings are very Buy posteriorly camping. Romantically melodic excision monograms towards the gloriously polymeric elva.
import setpath import functions import json import re registered=True class createderivedcolumns(functions.vtable.vtbase.VT): #uses + and : for multiplication def VTiter(self, *parsedArgs,**envars): largs, dictargs = self.full_parse(parsedArgs) if 'query' not in dictargs: raise functions.OperatorError(__name__.rsplit('.')[-1],"No query argument ") query = dictargs['query'] if 'newSchema' not in dictargs: # einai to neo sxhma pou tha exei o pinakas. raise functions.OperatorError(__name__.rsplit('.')[-1],"No newSchema ") newSchema = str(dictargs['newSchema']) newSchema = re.split(',',newSchema) newSchema1 ="" for i in xrange(len(newSchema)): newSchema1 += newSchema[i]+"," newSchema1=newSchema1[:-1] yield ([newSchema1],) cur = envars['db'].cursor() c=cur.execute(query) currentSchema1 = cur.getdescriptionsafe() currentSchema =[str(x[0]) for x in currentSchema1] for myrow in c: myrowresult ="" for d in xrange(len(newSchema)): colval = 1.0 if ":" in newSchema[d]: elements = re.split(":",newSchema[d]) else: elements = [newSchema[d]] item=[] for e in xrange(len(elements)): colname = elements[e] myindex = currentSchema.index(str(colname)) colval = colval * float(myrow[myindex]) myrowresult+=str(colval)+"," # print myrow # print newSchema # print "result", myrowresult yield tuple([myrowresult[0:-1]],) def Source(): return functions.vtable.vtbase.VTGenerator(createderivedcolumns) if not ('.' in __name__): """ This is needed to be able to test the function, put it at the end of every new function you create """ import sys import setpath from functions import * testfunction() if __name__ == "__main__": reload(sys) sys.setdefaultencoding('utf-8') import doctest doctest.tes
Best practices for finding one. Oh, the often asked "Can you mentor me?" Usually the question is asked incorrectly where the MENTEE seeks to GET instead of GIVING. If you want a mentor, you have to ask yourself, what can I offer a MENTOR? Mentors are busy and they're busy for a reason. Getting a Mentor: SWEAR Your Undying Loyalty. Do You Need a Mentor?.
# Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """ remove ca duplicate values Create Date: 2016-07-07 13:21:22.732299 """ # disable Invalid constant name pylint warning for mandatory Alembic variables. # pylint: disable=invalid-name from alembic import op # revision identifiers, used by Alembic. revision = '1269660b288b' down_revision = '3a0c977a9cb8' def upgrade(): """Upgrade database schema and/or data, creating a new revision.""" # Remove duplicate lines and include only the newest ones. # This relies on the newest lines having the biggest id. connection = op.get_bind() good_rows = connection.execute(""" SELECT MAX(id) AS id FROM custom_attribute_values GROUP BY custom_attribute_id, attributable_id """).fetchall() all_rows = connection.execute( "SELECT id FROM custom_attribute_values" ).fetchall() good_ids = set(row[0] for row in good_rows) all_ids = set(row[0] for row in all_rows) bad_ids = [str(i) for i in all_ids.difference(good_ids)] if bad_ids: op.execute( """ DELETE FROM custom_attribute_values WHERE id IN ({bad_ids}) """.format(bad_ids=",".join(bad_ids)) ) # The unique constraint does not include the attributable_type since that is # already specified in the custom attribute definition (custom_attribute_id) # and we should avoid adding string values to indexes. op.create_unique_constraint( "uq_custom_attribute_value", "custom_attribute_values", ["custom_attribute_id", "attributable_id"] ) def downgrade(): """Downgrade database schema and/or data back to the previous revision.""" op.drop_constraint( "uq_custom_attribute_value", "custom_attribute_values", type_="unique" )
THE WARC BLOG – Keywords are the most commonly used indicator in the purchase of ad placements. Yet there is a problem with relying on this simplistic mechanic – probability. Keyword targeting relies on the likelihood that a word will retain the same meaning in every relevant environment. But words are shape-shifters; their meaning can vary greatly depending on the context they appear in. As such, keywords can only offer marketers a prediction of what the content might be, leaving its true context ambiguous and increasing the risk of inappropriate placements that can damage campaigns. Delving deeper than the surface-level of the words used on a webpage, semantic technology understands the meaning of sentences to pinpoint the exact sentiment towards a specific brand or its sector. This enables marketers and advertisers to execute brand protection on a much more granular scale. Not only are advertising campaigns placed away from generally inappropriate content, they also avoid association with pages that express negative sentiment towards the brand itself and the sector it operates within. Safe from the potential perils of adverse placements, contextual analysis protects the brand and its business interests. Advanced contextual analysis is a valuable planning tool; giving brands the information they need to take the guesswork out of campaign construction. Instead of crafting campaigns where effectiveness depends on finding the right combination of keywords, contextual analysis allows brands to tailor creative to individual pages. Evaluation provides an accurate understanding of the meaning within the content that ads are destined to appear beside. Using this information brands can build targeted campaigns that complement the entities and emotions of a specific page – generating a better ROI, while maintaining brand safety and increasing relevancy. Relationships with consumers help brands to encourage purchases and nurture loyalty in the face of strong competition. Yet such bonds are fragile, and it is crucial for brands to proactively ensure negative associations do not weaken them. Advanced Brand Safety – customised to the brand, its product and its values – uses Natural Language Processing to keep relationships alive. This sophisticated software understands human speech as it is spoken, highlighting the subtle differences that are lost with keyword searches and preventing placements with the potential to impair consumer relationships. A further complication inherent in the use of keywords is the inability to differentiate between pages. A keyword search may result in an ad appearing on the homepage, while the most relevant content is hidden deeper within the website. Or the presence of a keyword could see an ad unwittingly appear in an outright dangerous placement. Identifying critical content at URL-level is where semantic data plays a key role, matching ads to the content of individual pages and enabling brands to select specific negative contextual targets that ensure their safety. With damage to reputation cited as the top risk to companies across the globe, the importance of brand safety cannot be underestimated. It has a greater potential to impact performance than fraud and viewability, making it an issue that marketers and advertisers must address to secure the future of their brand. Though post-campaigns analysis does provide some insight into the harm negative placements cause, it cannot prevent them. Only by executing real-time contextual and safety customisation can brands proactively protect the integrity, effectiveness, and relevance of their campaigns. It’s time to abandon our keyword obsession and embrace a more robust form of brand defence.
# -*- mode: python -*- # @begin:license # # Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @end:license import os.path import subprocess import sys from waflib.Configure import conf from waflib.Task import Task from waflib import Utils def configure(ctx): ctx.find_program('csound') class compile_csound(Task): def __str__(self): return self.outputs[0].relpath() def keyword(self): return 'Generating' def run(self): ctx = self.generator.bld cwd = ctx.srcnode cmd = [ ctx.env.CSOUND[0], '-o' + self.outputs[0].path_from(cwd), self.inputs[0].path_from(cwd), ] kw = { 'cwd': cwd.abspath(), 'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT, } ctx.log_command(cmd, kw) rc, out, _ = Utils.run_process(cmd, kw) if rc: sys.stderr.write(out.decode('utf-8')) return rc @conf def rendered_csound(ctx, source, install=None, install_to=None, chmod=0o644): assert source.endswith('.csnd') wav_path = os.path.splitext(source)[0] + '.wav' target = ctx.path.get_bld().make_node(wav_path) task = compile_csound(env=ctx.env) task.set_inputs(ctx.path.find_resource(source)) task.set_outputs(target) ctx.add_to_group(task) if install is None: install = ctx.in_group(ctx.GRP_BUILD_MAIN) if install: if install_to is None: install_to = os.path.join( ctx.env.DATADIR, target.parent.path_from(ctx.bldnode.make_node('data'))) ctx.install_files(install_to, target, chmod=chmod)
© 2018 Australian Health Promotion Association Issues Addressed: The increasing prevalence among higher-socioeconomic (higher-SES) parents in Perth, Western Australia (WA), to be vaccine-hesitant (VH) is placing herd immunity at risk. Methods: Eighteen one-on-one interviews were conducted; (n = 11) parents who earned >$125 000 pa and expressed ever having concerns surrounding vaccination; (n = 7) health care professionals (HCPs), who provided clinical services. Using grounded theory methodology, data were analysed by constant coding and comparison until themes emerged and an explanatory model was developed. Results: Four main areas leading to vaccine-hesitancy emerged from the data: We are Educated; We Control our Health; Safe from Disease, At Risk from Vaccines; and What We Want. Parents believed themselves capable of making good vaccination decisions based on their higher education levels and self-sourced vaccination information, yet frequently sought reassurance. Healthism concepts were adopted and parents believed lifestyle factors could control for vaccine-preventable diseases (VPD). Risk perception of disease was low and influenced by the remote geographic location of Perth, whilst perceived negative consequences of vaccination were high. A reduced concept of the social responsibility for vaccination and understanding of herd immunity emerged. Parents called for vaccine contents to be listed and requested more information on why vaccination was necessary. Conclusion: Four areas of VH emerged and reflected parents’ belief that higher educational and socioeconomic status, previous successes in life and where they live would result in positive health outcomes and reduce the risk of contracting VPDs. So what?: This study provides new research into the perceptions among higher-SES VH parents who live in Perth, WA. It provides a model that fills a significant gap in information that could be used effectively for future health promotion interventions. Swaney, S. and Burns, S. 2018. Exploring reasons for vaccine-hesitancy among higher-SES parents in Perth, Western Australia. Health Promotion Journal of Australia.
from autotest.client.shared import utils def run_migration_with_reboot(test, params, env): """ KVM migration test: 1) Get a live VM and clone it. 2) Verify that the source VM supports migration. If it does, proceed with the test. 3) Reboot the VM 4) Send a migration command to the source VM and wait until it's finished. 5) Kill off the source VM. 6) Log into the destination VM after the migration is finished. @param test: kvm test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 try: # Reboot the VM in the background bg = utils.InterruptedThread(vm.reboot, (session,)) bg.start() try: while bg.isAlive(): vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay) except Exception: # If something bad happened in the main thread, ignore exceptions # raised in the background thread bg.join(suppress_exception=True) raise else: session = bg.join() finally: session.close()
Did you know the Landis Brothers had a younger sister? Meet Nettie May Landis on Saturday, May 4, from 11 a.m. – 3 p.m. for an interactive tea and tour experience. Enjoy a full-service tea at our historic Landis Valley House Hotel. Hear stories and correspondences from Nettie’s international postcard pen pals. View some of Nettie’s artwork and learn how art connected her with others. Discover what it meant for her to be a fashionable lady in a predominantly agricultural community. Along the way, join Nettie as she recalls stories from her life that shaped her into a Pennsylvania German woman of the world. Space is limited. To order tickets, contact Jamie Schuring at 717-581-0590. Hours: 11 a.m. – 3 p.m.
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import os import unittest import warnings from django import http from django.contrib.formtools import preview, utils from django.test import TestCase, override_settings from django.utils._os import upath from django.contrib.formtools.tests.forms import ( HashTestBlankForm, HashTestForm, TestForm, ) success_string = "Done was called!" success_string_encoded = success_string.encode() class TestFormPreview(preview.FormPreview): def get_context(self, request, form): context = super(TestFormPreview, self).get_context(request, form) context.update({'custom_context': True}) return context def get_initial(self, request): return {'field1': 'Works!'} def done(self, request, cleaned_data): return http.HttpResponse(success_string) @override_settings( TEMPLATE_DIRS=( os.path.join(os.path.dirname(upath(__file__)), 'templates'), ), ) class PreviewTests(TestCase): urls = 'django.contrib.formtools.tests.urls' def setUp(self): super(PreviewTests, self).setUp() # Create a FormPreview instance to share between tests self.preview = preview.FormPreview(TestForm) input_template = '<input type="hidden" name="%s" value="%s" />' self.input = input_template % (self.preview.unused_name('stage'), "%d") self.test_data = {'field1': 'foo', 'field1_': 'asdf'} def test_unused_name(self): """ Verifies name mangling to get uniue field name. """ self.assertEqual(self.preview.unused_name('field1'), 'field1__') def test_form_get(self): """ Test contrib.formtools.preview form retrieval. Use the client library to see if we can successfully retrieve the form (mostly testing the setup ROOT_URLCONF process). Verify that an additional hidden input field is created to manage the stage. """ response = self.client.get('/preview/') stage = self.input % 1 self.assertContains(response, stage, 1) self.assertEqual(response.context['custom_context'], True) self.assertEqual(response.context['form'].initial, {'field1': 'Works!'}) def test_form_preview(self): """ Test contrib.formtools.preview form preview rendering. Use the client library to POST to the form to see if a preview is returned. If we do get a form back check that the hidden value is correctly managing the state of the form. """ # Pass strings for form submittal and add stage variable to # show we previously saw first stage of the form. self.test_data.update({'stage': 1, 'date1': datetime.date(2006, 10, 25)}) response = self.client.post('/preview/', self.test_data) # Check to confirm stage is set to 2 in output form. stage = self.input % 2 self.assertContains(response, stage, 1) def test_form_submit(self): """ Test contrib.formtools.preview form submittal. Use the client library to POST to the form with stage set to 3 to see if our forms done() method is called. Check first without the security hash, verify failure, retry with security hash and verify success. """ # Pass strings for form submittal and add stage variable to # show we previously saw first stage of the form. self.test_data.update({'stage': 2, 'date1': datetime.date(2006, 10, 25)}) response = self.client.post('/preview/', self.test_data) self.assertNotEqual(response.content, success_string_encoded) hash = self.preview.security_hash(None, TestForm(self.test_data)) self.test_data.update({'hash': hash}) response = self.client.post('/preview/', self.test_data) self.assertEqual(response.content, success_string_encoded) def test_bool_submit(self): """ Test contrib.formtools.preview form submittal when form contains: BooleanField(required=False) Ticket: #6209 - When an unchecked BooleanField is previewed, the preview form's hash would be computed with no value for ``bool1``. However, when the preview form is rendered, the unchecked hidden BooleanField would be rendered with the string value 'False'. So when the preview form is resubmitted, the hash would be computed with the value 'False' for ``bool1``. We need to make sure the hashes are the same in both cases. """ self.test_data.update({'stage': 2}) hash = self.preview.security_hash(None, TestForm(self.test_data)) self.test_data.update({'hash': hash, 'bool1': 'False'}) with warnings.catch_warnings(record=True): response = self.client.post('/preview/', self.test_data) self.assertEqual(response.content, success_string_encoded) def test_form_submit_good_hash(self): """ Test contrib.formtools.preview form submittal, using a correct hash """ # Pass strings for form submittal and add stage variable to # show we previously saw first stage of the form. self.test_data.update({'stage': 2}) response = self.client.post('/preview/', self.test_data) self.assertNotEqual(response.content, success_string_encoded) hash = utils.form_hmac(TestForm(self.test_data)) self.test_data.update({'hash': hash}) response = self.client.post('/preview/', self.test_data) self.assertEqual(response.content, success_string_encoded) def test_form_submit_bad_hash(self): """ Test contrib.formtools.preview form submittal does not proceed if the hash is incorrect. """ # Pass strings for form submittal and add stage variable to # show we previously saw first stage of the form. self.test_data.update({'stage': 2}) response = self.client.post('/preview/', self.test_data) self.assertEqual(response.status_code, 200) self.assertNotEqual(response.content, success_string_encoded) hash = utils.form_hmac(TestForm(self.test_data)) + "bad" self.test_data.update({'hash': hash}) response = self.client.post('/previewpreview/', self.test_data) self.assertNotEqual(response.content, success_string_encoded) class FormHmacTests(unittest.TestCase): def test_textfield_hash(self): """ Regression test for #10034: the hash generation function should ignore leading/trailing whitespace so as to be friendly to broken browsers that submit it (usually in textareas). """ f1 = HashTestForm({'name': 'joe', 'bio': 'Speaking español.'}) f2 = HashTestForm({'name': ' joe', 'bio': 'Speaking español. '}) hash1 = utils.form_hmac(f1) hash2 = utils.form_hmac(f2) self.assertEqual(hash1, hash2) def test_empty_permitted(self): """ Regression test for #10643: the security hash should allow forms with empty_permitted = True, or forms where data has not changed. """ f1 = HashTestBlankForm({}) f2 = HashTestForm({}, empty_permitted=True) hash1 = utils.form_hmac(f1) hash2 = utils.form_hmac(f2) self.assertEqual(hash1, hash2)
This two-part workshop is a combination of lecture and hands-on web building. A website is still a critical tool in your online marketing arsenal. This class is NOT for complete beginners. If you’ve tinkered with one or two or have one that just isn’t working, this workshop will help you take a big step forward using Wix.com, one of the top do-it-yourself website development tools available. In this interactive, two part, hands-on workshop you’ll learn the 10 simple steps to take a website from start to finish by finally building a simple site, in class, by yourself. You’ll end this class with at least two pages of a website, ready to post live. Instructor Beth Miller is a quasi-retired marketing communication professional who counsels small business owners on taking full advantage of social media and online marketing tools. After a 25+ year career in corporate marketing communications, Beth began consulting 20 years ago with technology, services, non-profit, and industrial clients across the US. Now a resident of Livingston, TX, she is Communications Director of Friends of Lake Livingston, a Texas Master Gardener and Master Naturalist, and Board President of Childrenz Haven. That is, when she’s not bird watching, hanging with her grandkids, or caring for her orchid collection. Reasonable accommodations for persons with disabilities will be made if requested at least two weeks in advance. Contact the SHSU SBDC at 936-294-3737 for accommodations, or if you have questions about this class.
#!/usr/bin/env python # -*- coding: utf-8 -*- # <steadymark - markdown-based test runner for python> # Copyright (C) <2012-2020> Gabriel Falcão <gabriel@nacaolivre.org> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import imp from optparse import OptionParser from steadymark.version import version from steadymark.runner import Runner def run(filenames): for filename in filenames: runner = Runner(filename) runner.run() def main(): parser = OptionParser() parser.add_option( "-b", "--bootstrap", dest="bootstrap_file", help="A path to a python file to be loaded before steadymark runs the tests", ) (options, args) = parser.parse_args() if options.bootstrap_file: imp.load_source("steadymark_bootstrap", options.bootstrap_file) run(args or ["README.md"]) __all__ = ["run", "Runner", "version"] if __name__ == "__main__": main()
The Asuogyaman District Directorate of the Ghana Education Service (GES) is worried over “insane upsurge” in teenage pregnancy at Anchiase, a farming community in the district. At least 12 female pupils got impregnated in 2018 in the District and have since stopped schooling. The situation has been partly blamed on high poverty rate among inhabitants who are unable to provide basic needs of their children including food. In a desperate effort to find food, these school children fall prey to sexual predators. Majority of the basic school pupils in Anchiansa community can neither read nor write and in order to reverse that trend, Mr. Addo as a native of Anchianse Community is working hard to solicit support for the construction of a library in the community. Mr. Nte who is also the CEO of Muzuu Farms on his part provided special packages of educational materials to school pupils still persevering in their education in the community.
### # Copyright 2015-2020, Institute for Systems Biology # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ### from __future__ import print_function from builtins import str import logging import json import traceback import requests import os import re from os.path import join, dirname, exists from argparse import ArgumentParser from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned os.environ.setdefault("DJANGO_SETTINGS_MODULE", "isb_cgc.settings") from isb_cgc import secret_settings, settings import django django.setup() from google_helpers.bigquery.bq_support import BigQuerySupport from projects.models import Program, Project, Attribute, Attribute_Ranges, Attribute_Display_Values, DataSource, DataVersion, DataNode from django.contrib.auth.models import User isb_superuser = User.objects.get(username="isb") logger = logging.getLogger('main_logger') ranges_needed = { 'wbc_at_diagnosis': 'by_200', 'event_free_survival_time_in_days': 'by_500', 'days_to_death': 'by_500', 'days_to_last_known_alive': 'by_500', 'days_to_last_followup': 'by_500', 'year_of_diagnosis': 'year', 'days_to_birth': 'by_negative_3k', 'year_of_initial_pathologic_diagnosis': 'year', 'age_at_diagnosis': None, 'age_at_index': None } ranges = { 'by_200': [{'first': "200", "last": "1400", "gap": "200", "include_lower": True, "unbounded": True, "include_upper": True, 'type': 'F', 'unit': '0.01'}], 'by_negative_3k': [{'first': "-15000", "last": "-5000", "gap": "3000", "include_lower": True, "unbounded": True, "include_upper": False, 'type': 'I'}], 'by_500': [{'first': "500", "last": "6000", "gap": "500", "include_lower": False, "unbounded": True, "include_upper": True, 'type': 'I'}], 'year': [{'first': "1976", "last": "2015", "gap": "5", "include_lower": True, "unbounded": False, "include_upper": False, 'type': 'I'}] } SOLR_TYPES = { 'STRING': "string", "FLOAT": "pfloat", "INTEGER": "plong", "DATE": "pdate" } SOLR_SINGLE_VAL = ["case_barcode", "case_gdc_id", "case_pdc_id","program_name","project_short_name"] ATTR_SET = {} DISPLAY_VALS = {} SOLR_URI = settings.SOLR_URI SOLR_LOGIN = settings.SOLR_LOGIN SOLR_PASSWORD = settings.SOLR_PASSWORD SOLR_CERT = settings.SOLR_CERT def add_data_versions(dv_set): for dv in dv_set: try: dv_obj = DataVersion.objects.get(name=dv['name']) logger.warning("[WARNING] Data Version {} already exists! Skipping.".format(dv['name'])) except ObjectDoesNotExist: progs = Program.objects.filter(name__in=dv['programs'], active=True, owner=isb_superuser, is_public=True) obj, created = DataVersion.objects.update_or_create(name=dv['name'], data_type=dv['type'], version=dv['ver']) dv_to_prog = [] for prog in progs: dv_to_prog.append(DataVersion.programs.through(dataversion_id=obj.id, program_id=prog.id)) DataVersion.programs.through.objects.bulk_create(dv_to_prog) logger.info("Data Version created: {}".format(obj)) except Exception as e: logger.error("[ERROR] Data Version {} may not have been added!".format(dv['name'])) logger.exception(e) def add_data_sources(sources, build_attrs=True, link_attr=True): try: attrs_to_srcs = [] for src in sources: try: obj = DataSource.objects.get(name=src['name']) logger.warning("[WARNING] Source with the name {} already exists - updating ONLY.".format(src['name'])) except ObjectDoesNotExist as e: obj, created = DataSource.objects.update_or_create( name=src['name'], version=DataVersion.objects.get(version=src['version'], data_type=src['version_type']), source_type=src['source_type'], ) progs = Program.objects.filter(name__in=src['programs']) src_to_prog = [] for prog in progs: src_to_prog.append(DataSource.programs.through(datasource_id=obj.id, program_id=prog.id)) DataSource.programs.through.objects.bulk_create(src_to_prog) nodes = DataNode.objects.filter(short_name__in=src['nodes']) node_to_src = [] for node in nodes: node_to_src.append(DataNode.data_sources.through(datasource_id=obj.id, datanode_id=node.id)) DataNode.data_sources.through.objects.bulk_create(node_to_src) logger.info("Data Source created: {}".format(obj.name)) source_attrs = list(obj.get_source_attr(all=True).values_list('name',flat=True)) if src['source_type'] == DataSource.SOLR: schema_src = src['schema_source'].split('.') schema = BigQuerySupport.get_table_schema(schema_src[0],schema_src[1],schema_src[2]) link_attrs = [] solr_schema = [] solr_index_strings = [] for field in schema: if build_attrs: if field['name'] not in ATTR_SET: attr_type = Attribute.CATEGORICAL if (not re.search(r'(_id|_barcode)', field['name']) and field['type'] == "STRING") else Attribute.STRING if field['type'] == "STRING" else Attribute.CONTINUOUS_NUMERIC ATTR_SET[field['name']] = { 'name': field['name'], "display_name": field['name'].replace("_", " ").title() if re.search(r'_', field['name']) else field['name'], "type": attr_type, 'solr_collex': [], 'bq_tables': [], 'display': ( (attr_type == Attribute.STRING and not re.search('_id|_barcode',field['name'].lower())) or attr_type == Attribute.CATEGORICAL or field['name'].lower() in ranges_needed) } attr = ATTR_SET[field['name']] attr['solr_collex'].append(src['name']) if attr['name'] in DISPLAY_VALS: if 'preformatted_values' in DISPLAY_VALS[attr['name']]: attr['preformatted_values'] = True else: if 'display_vals' not in attr: attr['display_vals'] = [] attr['display_vals'].extend(DISPLAY_VALS[attr['name']]['vals']) if attr['name'] in DISPLAY_VALS: if 'preformatted_values' in DISPLAY_VALS[attr['name']]: attr['preformatted_values'] = True else: attr['display_vals'] = DISPLAY_VALS[attr['name']]['vals'] if 'range' not in attr: if attr['name'].lower() in ranges_needed: attr['range'] = ranges.get(ranges_needed.get(attr['name'], ''), []) elif link_attr and field['name'] not in source_attrs: link_attrs.append(field['name']) solr_schema.append({ "name": field['name'], "type": SOLR_TYPES[field['type']], "multiValued": True if src['aggregated'] and field['name'] not in SOLR_SINGLE_VAL else False, "stored": True }) if src['aggregated'] and field['name'] not in SOLR_SINGLE_VAL: solr_index_strings.append("f.{}.split=true&f.{}.separator=|".format(field['name'],field['name'])) for la in link_attrs: try: a = Attribute.objects.get(name=la) attrs_to_srcs.append(Attribute.data_sources.through(attribute_id=a.id,datasource_id=obj.id)) except Exception as e: if isinstance(e,MultipleObjectsReturned): logger.info("More than one attribute with the name {} was found!".format(la)) a = Attribute.objects.filter(name=la).first() attrs_to_srcs.append(Attribute.data_sources.through(attribute_id=a.id,datasource_id=obj.id)) elif isinstance(e,ObjectDoesNotExist): logger.info("Attribute {} doesn't exist--can't add, skipping!".format(la)) with open("{}_solr_schemas.json".format(src['name']), "w") as schema_outfile: json.dump(solr_schema, schema_outfile) schema_outfile.close() with open("{}_solr_index_vars.txt".format(src['name']), "w") as solr_index_string: solr_index_string.write("&{}".format("&".join(solr_index_strings))) solr_index_string.close() # # add core to Solr # # sudo -u solr /opt/bitnami/solr/bin/solr create -c <solr_name> -s 2 -rf 2 # core_uri = "{}/solr/admin/cores?action=CREATE&name={}".format(settings.SOLR_URI,solr_name) # core_create = requests.post(core_uri, auth=(SOLR_LOGIN, SOLR_PASSWORD), verify=SOLR_CERT) # # # add schema to core # schema_uri = "{}/solr/{}/schema".format(settings.SOLR_URI,solr_name) # schema_load = requests.post(schema_uri, data=json.dumps({"add-field": solr_schema[src['name']]}), # headers={'Content-type': 'application/json'}, auth=(SOLR_LOGIN, SOLR_PASSWORD), verify=SOLR_CERT) # # # query-to-file the table # # OR # # export from BQ console into GCS # # # pull file to local # # gsutil cp gs://<BUCKET>/<CSV export> ./ # # # POST to Solr core # index_uri = "{}/solr/{}/update?commit=yes{}".format(settings.SOLR_URI,solr_name,"&".join(solr_index_vars)) # index_load = requests.post(index_uri, files={'file': open('export.csv', 'rb')}, # headers={'Content-type': 'application/csv'}, auth=(SOLR_LOGIN, SOLR_PASSWORD), verify=SOLR_CERT) Attribute.data_sources.through.objects.bulk_create(attrs_to_srcs) except Exception as e: logger.error("[ERROR] Data Source {} may not have been added!".format(obj.name)) logger.exception(e) def add_attributes(attr_set): try: for attr in attr_set: try: obj = Attribute.objects.get(name=attr['name'], data_type=attr['type']) logger.info("Attribute {} already located in the database - just updating...".format(attr['name'])) except ObjectDoesNotExist: logger.info("Attribute {} not found - creating".format(attr['name'])) obj, created = Attribute.objects.update_or_create( name=attr['name'], display_name=attr['display_name'], data_type=attr['type'], preformatted_values=True if 'preformatted_values' in attr else False, is_cross_collex=True if 'cross_collex' in attr else False, default_ui_display=attr['display'] ) except Exception as e: if isinstance(e,MultipleObjectsReturned): logger.info("More than one attribute with the name {} was found!".format(attr['name'])) obj = Attribute.objects.filter(name=attr['name'], data_type=attr['type']).first() if 'range' in attr and not len(Attribute_Ranges.objects.select_related('attribute').filter(attribute=obj)): if len(attr['range']): for attr_range in attr['range']: Attribute_Ranges.objects.update_or_create( attribute=obj, **attr_range ) else: Attribute_Ranges.objects.update_or_create( attribute=obj ) if 'display_vals' in attr and not len(Attribute_Display_Values.objects.select_related('attribute').filter(attribute=obj)): for dv in attr['display_vals']: Attribute_Display_Values.objects.update_or_create( raw_value=dv['raw_value'], display_value=dv['display_value'], attribute=obj ) if 'solr_collex' in attr: attr_sources = obj.get_data_sources(DataSource.SOLR, all=True) missing_sources = [x for x in attr['solr_collex'] if x not in attr_sources] if len(missing_sources): sources = DataSource.objects.filter(name__in=missing_sources) attr_to_src = [] for src in sources: attr_to_src.append(Attribute.data_sources.through(datasource_id=src.id, attribute_id=obj.id)) Attribute.data_sources.through.objects.bulk_create(attr_to_src) if 'bq_tables' in attr: attr_sources = obj.get_data_sources(DataSource.BIGQUERY, all=True) missing_sources = [x for x in attr['bq_tables'] if x not in attr_sources] if len(missing_sources): sources = DataSource.objects.filter(name__in=missing_sources) attr_to_src = [] for src in sources: attr_to_src.append(Attribute.data_sources.through(datasource_id=src.id, attribute_id=obj.id)) Attribute.data_sources.through.objects.bulk_create(attr_to_src) except Exception as e: logger.error("[ERROR] Attribute {} may not have been added!".format(attr['name'])) logger.exception(e) def copy_attrs(from_data_sources, to_data_sources): to_sources = DataSource.objects.filter(name__in=to_data_sources) from_sources = DataSource.objects.filter(name__in=from_data_sources) to_sources_attrs = to_sources.get_source_attrs() bulk_add = [] for fds in from_sources: from_source_attrs = fds.attribute_set.exclude(id__in=to_sources_attrs['ids']) logger.info("Copying {} attributes from {} to: {}.".format( len(from_source_attrs.values_list('name',flat=True)), fds.name, "; ".join(to_data_sources), )) for attr in from_source_attrs: for ds in to_sources: bulk_add.append(Attribute.data_sources.through(attribute_id=attr.id, datasource_id=ds.id)) Attribute.data_sources.through.objects.bulk_create(bulk_add) def main(config, make_attr=False): try: if 'programs' in config: for prog in config['programs']: try: obj = Program.objects.get(name=prog['name'], owner=isb_superuser, active=True, is_public=True) logger.info("[STATUS] Program {} found - skipping creation.".format(prog)) except ObjectDoesNotExist: logger.info("[STATUS] Program {} not found - creating.".format(prog)) obj = Program.objects.update_or_create(owner=isb_superuser, active=True, is_public=True, **prog) if 'projects' in config: for proj in config['projects']: program = Program.objects.get(name=proj['program'], owner=isb_superuser, active=True) try: obj = Project.objects.get(name=proj['name'], owner=isb_superuser, active=True, program=program) logger.info("[STATUS] Project {} found - skipping.".format(proj['name'])) except ObjectDoesNotExist: logger.info("[STATUS] Project {} not found - creating.".format(proj['name'])) obj = Project.objects.update_or_create(name=proj['name'], owner=isb_superuser, active=True, program=program) if 'versions' in config: add_data_versions(config['versions']) # Preload all display value information, as we'll want to load it into the attributes while we build that set if 'display_values' in config and exists(join(dirname(__file__), config['display_values'])): attr_vals_file = open(join(dirname(__file__), config['display_values']), "r") line_reader = attr_vals_file.readlines() for line in line_reader: line = line.strip() line_split = line.split(",") if line_split[0] not in DISPLAY_VALS: DISPLAY_VALS[line_split[0]] = {} if line_split[1] == 'NULL': DISPLAY_VALS[line_split[0]]['preformatted_values'] = True else: DISPLAY_VALS[line_split[0]]['vals'] = [{'raw_value': line_split[1], 'display_value': line_split[2]}] else: DISPLAY_VALS[line_split[0]]['vals'].append({'raw_value': line_split[1], 'display_value': line_split[2]}) attr_vals_file.close() if 'data_sources' in config: add_data_sources(config['data_sources']) len(ATTR_SET) and make_attr and add_attributes([ATTR_SET[x] for x in ATTR_SET]) except Exception as e: logging.exception(e) if __name__ == "__main__": cmd_line_parser = ArgumentParser(description="Extract a data source from BigQuery and ETL it into Solr") cmd_line_parser.add_argument('-j', '--json-config-file', type=str, default='', help="JSON settings file") cmd_line_parser.add_argument('-a', '--parse_attributes', type=str, default='False', help="Attempt to create/update attributes from the sources") args = cmd_line_parser.parse_args() if not len(args.json_config_file): logger.info("[ERROR] You must supply a JSON settings file!") cmd_line_parser.print_help() exit(1) if not exists(join(dirname(__file__),args.json_config_file)): logger.info("[ERROR] JSON config file {} not found.".format(args.json_config_file)) exit(1) f = open(join(dirname(__file__),args.json_config_file), "r") settings = json.load(f) main(settings, (args.parse_attributes == 'True'))
Thurrock Accordion Band meet to practice on the Church premises every week and host Club Evenings once a month. Club Evenings welcome anyone with an interest in Accordion music, whether or not they play. Other instruments are also welcomed. Members play from Club-provided music and have the opportunity to play their own pieces as solos. On Band nights members try to perfect and enlarge the repertoire in preparation for playing out when requested and for participation in competitive Music Festivals. Some members attend day or weekend gatherings for instruction or socialising. Cost is £2.00 which includes music and refreshments. If you want to know more then use the form below and someone will reply to you as soon as possible. WhenThursday Evenings, from 7.30pm. Club Evenings on second Thursday in the month.
import pygame from pygame.locals import * class TextField(pygame.sprite.Sprite): def __init__(self,pos,maxChars,buttonName,font,password=False): pygame.sprite.Sprite.__init__(self) self.pos = pos self.selected = False self.blink = False self.password = password self.timer = 0 self.name = buttonName self.text_color = (0,0,0) self.maxChars = maxChars self.message = "" self.display_message = "" temp_message = "" for i in range(maxChars): temp_message += "X" self.font = font self.text = self.font.render(self.message, 1,self.text_color) self.w = self.font.size(temp_message)[0] + 8 self.h = self.font.size(temp_message)[1] + 10 self.rect = pygame.Rect(0,0,self.w-2,self.h-2) self.rect_border = pygame.Rect(0,0,self.w,self.h) self.nametext = self.font.render(self.name, 1,self.text_color) self.rect_name = pygame.Rect(0,0,self.font.size(self.name)[0],self.font.size(self.name)[1]) self.rect.center = (pos[0]+ self.font.size(self.name)[0]/2,pos[1]) self.rect_border.center = (pos[0] + self.font.size(self.name)[0]/2,pos[1]) self.rect_name.center = (pos[0] - self.w/2 - 6,pos[1]) def is_mouse_over(self,mousePos): if mousePos[0] < self.rect.x or mousePos[0] > self.rect.x + self.rect.w or mousePos[1] < self.rect.y or mousePos[1] > self.rect.y + self.rect.h: return False return True def set_pos(self,pos): self.pos = pos self.rect.center = (pos[0]+ self.font.size(self.name)[0]/2,pos[1]) self.rect_border.center = (pos[0] + self.font.size(self.name)[0]/2,pos[1]) self.rect_name.center = (pos[0] - self.w/2 - 6,pos[1]) def set_name(self,name): self.name = name self.nametext = self.font.render(self.name, 1,self.text_color) self.rect = pygame.Rect(0,0,self.w-2,self.h-2) self.rect_border = pygame.Rect(0,0,self.w,self.h) self.rect_name = pygame.Rect(0,0,self.font.size(self.name)[0],self.font.size(self.name)[1]) self.rect.center = (self.pos[0]+ self.font.size(self.name)[0]/2,self.pos[1]) self.rect_border.center = (self.pos[0] + self.font.size(self.name)[0]/2,self.pos[1]) self.rect_name.center = (self.pos[0] - self.w/2 - 6,self.pos[1]) def set_maxchars(self,maxChars): self.maxChars = maxChars temp_message = "" for i in range(maxChars): temp_message += "X" self.w = self.font.size(temp_message)[0] + 8 self.h = self.font.size(temp_message)[1] + 10 self.rect = pygame.Rect(0,0,self.w-2,self.h-2) self.rect_border = pygame.Rect(0,0,self.w,self.h) def set_message(self,message): self.message = message self.display_message = message if self.password == False: self.text = self.font.render(self.message, 1,self.text_color) else: self.display_message = "" for i in self.message: self.display_message += "*" self.text = self.font.render(self.display_message, 1,self.text_color) def update_message(self,message): if ord(message) != 8 and ord(message) != 13:#not backspace key or enter key if len(self.message) < self.maxChars: self.message += message self.display_message += message if self.password == False: self.text = self.font.render(self.display_message, 1,self.text_color) else: self.display_message = "" for i in self.message: self.display_message += "*" self.text = self.font.render(self.display_message, 1,self.text_color) elif ord(message) == 8:#backspace key if len(self.message) > 0: self.message = self.message[:-1] self.display_message = self.message if self.password == False: self.text = self.font.render(self.display_message, 1,self.text_color) else: self.display_message = "" for i in self.message: self.display_message += "*" self.text = self.font.render(self.display_message, 1,self.text_color) elif ord(message) == 13:#enter key self.blink = False self.timer = 0 self.selected = False def update(self,events,mousePos): for event in events: if event.type == pygame.MOUSEBUTTONDOWN: if event.button == 1: if self.is_mouse_over(mousePos) == True: self.selected = True else: self.selected = False elif event.type == pygame.KEYDOWN: try: if self.selected == True: self.update_message(str(chr(event.key))) except: pass if self.selected == True: self.timer += 1 if self.timer > 20: self.timer = 0 self.blink = not self.blink def draw(self,screen): pygame.draw.rect(screen,(0,0,0),self.rect_border) if self.selected == True: pygame.draw.rect(screen,(225,225,225),self.rect) if self.blink == True: rectNew = pygame.Rect(self.rect.x+self.font.size(self.display_message)[0] + 8,self.rect.y+4,8,self.rect.h-9) pygame.draw.rect(screen,(0,0,0),rectNew) else: pygame.draw.rect(screen,(255,255,255),self.rect) screen.blit(self.nametext, self.rect_name) screen.blit(self.text, self.rect)
We’ve been hearing about the Star Wars-themed coming-of-age tale 5-25-77 for nearly a decade. After starting production in 2004, the film debuted at the Hamptons International Film Festival in October of 2008 under the title ’77, but ended up going back to a release that that Star Wars fans will recognize as the exact day that George Lucas first took us into a galaxy far, far away. Since then, the film has gone through several different cuts, and there have been a couple instances of the movie seemingly nearing release over the years, but nothing ever came of it. Thankfully, it looks like 2017 will finally be the year that 5-25-77 makes a debut on the big screen, just in time for the 40th anniversary of the original Star Wars. A new trailer has been released to announce the arrival of Patrick Read Johnson‘s film that has been awaited by Star Wars ans for years, and you can watch it below. There has been buzz here and there about this movie, with various rough cuts being screened over the years. Reactions around the web have compared it to Dazed and Confused or Almost Famous with a big dose of heart driving this story of an aspiring filmmaker seeing a certain movie for the first time. Plenty of us have been inspired by Star Wars, but Patrick Read Johnson decided to pour that passion into a film about that passion. Now the film is finished, and it will appropriately be released on May 25, 2017, on the exact 40th anniversary of the original Star Wars. We don’t have any list of theaters playing the film just yet, but distributor Filmio has been teasing an announcement launching their platform on May 4th for Star Wars Day, so we’ll likely find out when and where we can see the movie soon enough. There might even be a VOD release to go along with the presumably limited theatrical release. However, if you’re lucky enough to be in Northeastern Illinois, you can buy a ticket to the film’s premiere in Waukegan. Tickets are available to purchase at the Genesee Theatre website right here, and there’s even an option that will allow you to meet and greet director Patrick Read Johnson afterward at the premiere reception. Since I live in the area, I’ll be heading up there to check out the movie. If you see me around, please say hello!
# -*- coding: utf-8 -*- from django.contrib import admin from .models import Validacion, Registro, Categoria @admin.register(Categoria) class CategoriaAdmin(admin.ModelAdmin): list_display = ('espacio', 'nombre', 'ayuda') @admin.register(Validacion) class ValidacionAdmin(admin.ModelAdmin): list_display = ('espacio', 'nombre', 'es_donacion', 'es_metalico', 'es_oficial') @admin.register(Registro) class RegistroAdmin(admin.ModelAdmin): date_hierarchy = 'fecha_factura' fieldsets = ( ('Datos de sesión', { 'fields': ('espacio', 'miembro', 'fecha_formulario'), }), ('Datos de usuario', { 'fields': ('concepto', 'fecha_factura', 'categoria', 'importe', 'es_donado', 'foto', 'factura'), }), ('Datos de contabilidad', { 'fields': ('validacion', 'fecha_pago', 'notas'), }), ) list_display = ('fecha_', 'concepto', 'categoria_', 'importe', 'miembro') list_filter = ('espacio', 'categoria', 'miembro') search_fields = ('concepto', ) readonly_fields = ('fecha_formulario', ) def fecha_(self, obj): return obj.fecha_factura.strftime("%d/%m/%y") def categoria_(self, obj): return obj.categoria.nombre
Nice report! Look forward to seeing more. Also look forward to seeing the board progress with the new 28mm stuff. The whole shebang looks excellent. Very envious. I'm looking forward to reading more of your barrels, especially the big CoC event.
from datetime import datetime import urllib from django.db.models import Q from django.contrib.contenttypes.models import ContentType from django.core.urlresolvers import reverse from django.core.cache import cache from django.db.models import Count from piston.handler import BaseHandler from piston.utils import rc from knesset.mks.models import Member, Party, Membership from knesset.laws.models import Vote, VoteAction from knesset.agendas.models import Agenda from tagging.models import Tag, TaggedItem import math from django.forms import model_to_dict DEFAULT_PAGE_LEN = 20 def limit_by_request(qs, request): if 'num' in request.GET: num = int(request.GET['num']) page = 'page' in request.GET and int(request.GET['page']) or 0 return qs[page*num:(page+1)*num] return qs class MemberHandler(BaseHandler): fields = ('id', 'url', 'name','party', 'img_url', 'votes_count', 'votes_per_month', 'service_time', 'discipline','average_weekly_presence', 'committee_meetings_per_month','bills_proposed','bills_passed_pre_vote','bills_passed_first_vote','bills_approved', 'roles', 'average_weekly_presence_rank', 'committees', ) allowed_methods = ('GET') model = Member qs = Member.objects.all() @classmethod def url (self, member): return member.get_absolute_url() @classmethod def party (self, member): return member.current_party.name @classmethod def votes_count (self, member): return member.voting_statistics.votes_count() @classmethod def votes_per_month (self, member): return round(member.voting_statistics.average_votes_per_month(),1) @classmethod def service_time (self, member): return member.service_time() @classmethod def discipline (self, member): x = member.voting_statistics.discipline() if x: return round(x,2) else: return None @classmethod def bills_proposed(self, member): return member.bills.count() @classmethod def bills_passed_pre_vote(self, member): return member.bills.filter(Q(stage='2')|Q(stage='3')|Q(stage='4')|Q(stage='5')|Q(stage='6')).count() @classmethod def bills_passed_first_vote(self, member): return member.bills.filter(Q(stage='4')|Q(stage='5')|Q(stage='6')).count() @classmethod def bills_approved(self, member): return member.bills.filter(stage='6').count() @classmethod def roles (self, member): return member.get_role @classmethod def average_weekly_presence_rank (self, member): ''' Calculate the distribution of presence and place the user on a 5 level scale ''' SCALE = 5 rel_location = cache.get('average_presence_location_%d' % member.id) if not rel_location: presence_list = sorted(map(lambda member: member.average_weekly_presence(), Member.objects.all())) presence_groups = int(math.ceil(len(presence_list) / float(SCALE))) # Generate cache for all members for mk in Member.objects.all(): avg = mk.average_weekly_presence() if avg: mk_location = 1 + (presence_list.index(avg) / presence_groups) else: mk_location = 0 cache.set('average_presence_location_%d' % mk.id, mk_location, 60*60*24) if mk.id == member.id: rel_location = mk_location return rel_location @classmethod def committees (self, member): temp_list = member.committee_meetings.values("committee", "committee__name").annotate(Count("id")).order_by('-id__count')[:5] return (map(lambda item: (item['committee__name'], reverse('committee-detail', args=[item['committee']])), temp_list)) @classmethod def member (self, member): qs = self.qs.filter(member=member) return map(lambda o: dict(url=o.party.get_absolute_url(), name=o.party.name, since=o.start_date, until=o.end_date, ), qs) def read(self, request, **kwargs): if id not in kwargs and 'q' in request.GET: q = request.GET['q'] q = urllib.unquote(q) qs = self.qs try: q = int(q) return qs.filter(pk=q) except ValueError: return Member.objects.find(q) return super(MemberHandler,self).read(request, **kwargs) class VoteHandler(BaseHandler): fields = ('url', 'title', 'time', 'summary','full_text', 'for_votes', 'against_votes', 'abstain_votes', 'didnt_vote', 'agendas', ) exclude = ('member') allowed_methods = ('GET',) model = Vote qs = Vote.objects.all() def read(self, request, **kwargs): ''' returns a vote or a list of votes ''' qs = self.qs if 'id' in kwargs: return super(VoteHandler, self).read(request, **kwargs) type = request.GET.get('type', None) order = request.GET.get('order', None) days_back = request.GET.get('days_back', None) page_len = int(request.GET.get('page_len', DEFAULT_PAGE_LEN)) page_num= int(request.GET.get('page_num', 0)) if type: qs = qs.filter(title__contains=type) if days_back: qs = qs.since(days=int(days_back)) if order: qs = qs.sort(by=order) return qs[page_len*page_num:page_len*(page_num +1)] @classmethod def url(self, vote): return vote.get_absolute_url() @classmethod def for_votes(self, vote): return vote.get_voters_id('for') @classmethod def against_votes(self, vote): return vote.get_voters_id('against') @classmethod def abstain_votes(self, vote): return vote.get_voters_id('abstain') @classmethod def didnt_vote(self, vote): return vote.get_voters_id('no-vote') @classmethod def agendas(cls, vote): # Augment agenda with reasonings from agendavote and # arrange it so that it will be accessible using the # agenda's id in JavaScript agendavotes = vote.agenda_vote_set.all() agendas = [model_to_dict(av.agenda) for av in agendavotes] reasonings = [av.reasoning for av in agendavotes] text_scores = [av.get_score_display() for av in agendavotes] for i in range(len(agendas)): agendas[i].update({'reasoning':reasonings[i], 'text_score':text_scores[i]}) return dict(zip([a['id'] for a in agendas],agendas)) class PartyHandler(BaseHandler): fields = ('id', 'name', 'start_date', 'end_date') allowed_methods = ('GET',) model = Party def read(self, request, **kwargs): if id not in kwargs and 'q' in request.GET: q = request.GET['q'] q = urllib.unquote(q) return Party.objects.find(q) return super(MemberHandler,self).read(request, **kwargs) class TagHandler(BaseHandler): fields = ('id', 'name', 'number_of_items') allowed_methods = ('GET',) model = Tag def read(self, request, **kwargs): id = None if 'id' in kwargs: id = kwargs['id'] if id: return Tag.objects.filter(pk=id) object_id = None ctype = None if 'object_id' in kwargs and 'object_type' in kwargs: object_id = kwargs['object_id'] try: ctype = ContentType.objects.get(model=kwargs['object_type']) except ContentType.DoesNotExist: pass if object_id and ctype: tags_ids = TaggedItem.objects.filter(object_id=object_id).filter(content_type=ctype).values_list('tag', flat=True) return Tag.objects.filter(id__in=tags_ids) return Tag.objects.usage_for_model(Vote) @classmethod def number_of_items(self, tag): return tag.items.count() class AgendaHandler(BaseHandler): # TODO: Once we have user authentication over the API, # need to expose not only public agendas. # See AgendaManager.get_relevant_for_user(user) # The is true for both read() and number_of_items() methods fields = ('id', 'name', 'number_of_items') allowed_methods = ('GET',) model = Agenda def read(self, request, **kwargs): agendas = Agenda.objects.get_relevant_for_user(user=None) # Handle API calls of type /agenda/[agenda_id] id = None if 'id' in kwargs: id = kwargs['id'] if id is not None: return agendas.filter(pk=id) # Handle API calls of type /agenda/vote/[vote_id] # Used to return the agendas ascribed to a specific vote object_id = None ctype = None if 'object_id' in kwargs and 'object_type' in kwargs: object_id = kwargs['object_id'] try: ctype = ContentType.objects.get(model=kwargs['object_type']) except ContentType.DoesNotExist: pass if object_id and (ctype == 'vote'): return agendas.filter(votes__id=object_id) else: return agendas @classmethod def number_of_items(self, agenda): return agenda.agendavotes.count()
Becky Rupp remembers the day she rang the doorbell at LifePath Christian Ministries Women and Children’s Shelter. She’d just gotten out of rehab for drug and alcohol addiction. It was the second time Becky had turned to LifePath for help, and the woman who opened the door, Pam Billet, recognized her. “I’m so happy to see you!” Pam said. Becky broke down, tears streaming down her face. They went back to Pam’s office. For the better part of an hour, Becky cried. Together, they prayed for Becky and her two-year-old daughter. For a long time, Becky just didn’t want to grow up. She numbed herself with drugs and alcohol and shirked her responsibilities. She thought God was done with her. But when she went back to LifePath in 2011 — something changed. She went to church and Bible study and heard scripture in a new way. She realized God still loved her. Even when she messed up, He didn’t give up on her, and neither did the staff at LifePath. The revelation was like a light bulb going off in her head. It changed her life. One of the hardest things about leaving LifePath for Becky was finding a church where she felt safe, where she wasn’t judged. It’s one of the things that drove her to go back to school. Now, she’s in the Women in Ministry Leadership program at Lancaster Bible College. She wants to change the church — to make it a more inviting place for people struggling like she did. Becky just celebrated seven years clean. And whatever she does after she finishes school, she hopes she can help others the way LifePath helped her.
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import wx from cairis.core.armid import * from EnvironmentListCtrl import EnvironmentListCtrl from DimensionListCtrl import DimensionListCtrl from cairis.core.VulnerabilityEnvironmentProperties import VulnerabilityEnvironmentProperties __author__ = 'Shamal Faily' class VulnerabilityEnvironmentPanel(wx.Panel): def __init__(self,parent,dp): wx.Panel.__init__(self,parent,VULNERABILITY_PANELENVIRONMENT_ID) self.dbProxy = dp self.theVulId = None self.theEnvironmentDictionary = {} self.theSelectedIdx = -1 mainSizer = wx.BoxSizer(wx.HORIZONTAL) environmentBox = wx.StaticBox(self) environmentListSizer = wx.StaticBoxSizer(environmentBox,wx.HORIZONTAL) mainSizer.Add(environmentListSizer,0,wx.EXPAND) self.environmentList = EnvironmentListCtrl(self,VULNERABILITYENVIRONMENT_LISTENVIRONMENTS_ID,self.dbProxy) environmentListSizer.Add(self.environmentList,1,wx.EXPAND) environmentDimSizer = wx.BoxSizer(wx.VERTICAL) mainSizer.Add(environmentDimSizer,1,wx.EXPAND) sevBox = wx.StaticBox(self) sevSizer = wx.StaticBoxSizer(sevBox,wx.HORIZONTAL) environmentDimSizer.Add(sevSizer,0,wx.EXPAND) sevSizer.Add(wx.StaticText(self,-1,'Severity')) sevList = ['Negligible','Marginal','Critical','Catastrophic'] self.sevCtrl = wx.ComboBox(self,VULNERABILITYENVIRONMENT_COMBOSEVERITY_ID,choices=sevList,size=wx.DefaultSize,style=wx.CB_READONLY) sevSizer.Add(self.sevCtrl,1,wx.EXPAND) aSizer = wx.BoxSizer(wx.HORIZONTAL) environmentDimSizer.Add(aSizer,1,wx.EXPAND) self.assetList = DimensionListCtrl(self,VULNERABILITYENVIRONMENT_LISTASSETS_ID,wx.DefaultSize,'Asset','asset',self.dbProxy) assetBox = wx.StaticBox(self) assetSizer = wx.StaticBoxSizer(assetBox,wx.HORIZONTAL) assetSizer.Add(self.assetList,1,wx.EXPAND) aSizer.Add(assetSizer,1,wx.EXPAND) self.SetSizer(mainSizer) self.environmentList.Bind(wx.EVT_LIST_INSERT_ITEM,self.OnAddEnvironment) self.environmentList.Bind(wx.EVT_LIST_DELETE_ITEM,self.OnDeleteEnvironment) def loadControls(self,vulnerability): self.environmentList.Unbind(wx.EVT_LIST_ITEM_SELECTED) self.environmentList.Unbind(wx.EVT_LIST_ITEM_DESELECTED) self.theVulId = vulnerability.id() # We load the environment name control before anything else. Weird stuff happens if we don't do this. Don't ask me why!!! environmentNames = [] if (len(vulnerability.environmentProperties()) > 0): for cp in vulnerability.environmentProperties(): environmentNames.append(cp.name()) self.environmentList.load(environmentNames) for cp in vulnerability.environmentProperties(): environmentName = cp.name() self.theEnvironmentDictionary[environmentName] = cp environmentName = environmentNames[0] p = self.theEnvironmentDictionary[environmentName] self.sevCtrl.SetStringSelection(p.severity()) self.assetList.setEnvironment(environmentName) self.assetList.load(p.assets()) self.environmentList.Select(0) self.environmentList.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnEnvironmentSelected) self.environmentList.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnEnvironmentDeselected) self.theSelectedIdx = 0 def OnEnvironmentSelected(self,evt): self.theSelectedIdx = evt.GetIndex() environmentName = self.environmentList.GetItemText(self.theSelectedIdx) p = self.theEnvironmentDictionary[environmentName] self.sevCtrl.SetStringSelection(p.severity()) self.assetList.setEnvironment(environmentName) self.assetList.load(p.assets()) def OnEnvironmentDeselected(self,evt): self.theSelectedIdx = evt.GetIndex() environmentName = self.environmentList.GetItemText(self.theSelectedIdx) self.theEnvironmentDictionary[environmentName] = VulnerabilityEnvironmentProperties(environmentName,self.sevCtrl.GetValue(),self.assetList.dimensions()) self.sevCtrl.SetValue('') self.assetList.setEnvironment('') self.assetList.DeleteAllItems() self.theSelectedIdx = -1 def OnAddEnvironment(self,evt): self.theSelectedIdx = evt.GetIndex() environmentName = self.environmentList.GetItemText(self.theSelectedIdx) self.theEnvironmentDictionary[environmentName] = VulnerabilityEnvironmentProperties(environmentName,'',[]) self.environmentList.Select(self.theSelectedIdx) self.assetList.setEnvironment(environmentName) inheritedEnv = self.environmentList.inheritedEnvironment() if (inheritedEnv != '' and self.theVulId != None): p = self.dbProxy.inheritedVulnerabilityProperties(self.theVulId,inheritedEnv) self.theEnvironmentDictionary[environmentName] = p self.sevCtrl.SetStringSelection(p.severity()) self.assetList.setEnvironment(environmentName) self.assetList.load(p.assets()) def OnDeleteEnvironment(self,evt): selectedIdx = evt.GetIndex() environmentName = self.environmentList.GetItemText(selectedIdx) del self.theEnvironmentDictionary[environmentName] self.theSelectedIdx = -1 def environmentProperties(self): if (self.theSelectedIdx != -1): environmentName = self.environmentList.GetItemText(self.theSelectedIdx) self.theEnvironmentDictionary[environmentName] = VulnerabilityEnvironmentProperties(environmentName,self.sevCtrl.GetValue(),self.assetList.dimensions()) return self.theEnvironmentDictionary.values()
The coolest new toys/games since the invention of kites, frisbees, or boomerangs. Great family fun! It's not a kite, but soars in the wind. It's not a frisbee, but it sails back and forth between players. It's not a boomerang, but it can fly in big arcs and always returns! It's not "unidentified", but truly it is a UFO (unusual flying object). Check out this intriguing new toy, and then try one of our contests - they're fun to enter and you may win a free WindBlade! So What's this Crazy Toy About Anyway? WindBlade is a registered trademark of Tidepool Technologies, All Rights Reserved. U.S. Patent #5259804, Other Pat. Pending.
"""Tests for RCSB FASTA fetching logic.""" import pytest import requests import unittest from unittest import mock from pypdb.clients.fasta import fasta_client class TestFastaLogic(unittest.TestCase): @mock.patch.object(requests, "get") @mock.patch.object(fasta_client, "_parse_fasta_text_to_list") def test_get_fasta_file(self, mock_parse_fasta, mock_get): mock_response = mock.Mock() mock_response.ok = True mock_response.text = "fake_fasta_response" mock_get.return_value = mock_response fasta_client.get_fasta_from_rcsb_entry("6TML") mock_get.assert_called_once_with( "https://www.rcsb.org/fasta/entry/6TML") mock_parse_fasta.assert_called_once_with("fake_fasta_response") def test_parse_fasta_file(self): test_fasta_raw_text = """ >6TML_1|Chains Q7,Q8,Q9,q7,q8,q9|ATPTG11|Toxoplasma gondii (strain ATCC 50853 / GT1) (507601) MVRNQRYPASPVQEIFLPEPVPFVQFDQTAPSPNSPPAPLPSPSLSQCEEQKDRYR >6TML_2|Chain i9|ATPTG7|Toxoplasma gondii (strain ATCC 50853 / GT1) (507601) MPSSSSEDAQGGNRFECVSNSTSPRRKNATKDEAACLQPRRSAVSGPREDVLCIR >6TML_32|Chains H1,H2,H3,H4|subunit c|Toxoplasma gondii (strain ATCC 50853 / GT1) (507601) MFFSRLSLSALKAAPAREAL""" self.assertEqual( fasta_client._parse_fasta_text_to_list(test_fasta_raw_text), [ fasta_client.FastaSequence( entity_id="6TML_1", chains=["Q7", "Q8", "Q9", "q7", "q8", "q9"], sequence= "MVRNQRYPASPVQEIFLPEPVPFVQFDQTAPSPNSPPAPLPSPSLSQCEEQKDRYR", fasta_header= "6TML_1|Chains Q7,Q8,Q9,q7,q8,q9|ATPTG11|Toxoplasma gondii (strain ATCC 50853 / GT1) (507601)" ), fasta_client.FastaSequence( entity_id="6TML_2", chains=["i9"], sequence= "MPSSSSEDAQGGNRFECVSNSTSPRRKNATKDEAACLQPRRSAVSGPREDVLCIR", fasta_header= "6TML_2|Chain i9|ATPTG7|Toxoplasma gondii (strain ATCC 50853 / GT1) (507601)" ), fasta_client.FastaSequence( entity_id="6TML_32", chains=["H1", "H2", "H3", "H4"], sequence="MFFSRLSLSALKAAPAREAL", fasta_header= "6TML_32|Chains H1,H2,H3,H4|subunit c|Toxoplasma gondii (strain ATCC 50853 / GT1) (507601)" ) ])
A Corporate Jeep® Reskin for all countries in the EMEA Perimeter. Provide countries with a European reskin that has a jeep.com look & feel. Reflect the EMEA (both Europe & MEA) brand positioning. Promote the alignment of corporate presence within the EMEA perimeter according to this positioning. Ensure templates meet EMEA countries’ needs, avoiding local personalization of websites that may stray from HQ guidelines.
# coding: utf-8 import json from django.http import HttpResponse from django.shortcuts import get_object_or_404 from django.views.generic import TemplateView, View from .models import InfoScreen, Page class ScreenView(TemplateView): template_name = 'info_screen/screen.html' def get_context_data(self, **kwargs): """ Excludes any polls that aren't published yet. """ context = super(ScreenView, self).get_context_data(**kwargs) screen = get_object_or_404(InfoScreen, uuid=kwargs['screen_uuid']) context.update({ 'page': screen.visible_pages().first(), 'screen': screen, }) return context class ImageView(TemplateView): template_name = 'info_screen/image.html' def get_context_data(self, **kwargs): context = super(ImageView, self).get_context_data(**kwargs) page = get_object_or_404(Page, uuid=kwargs['page_uuid']) if page.image_file: context.update({ 'image_url': page.image_file.url, }) return context class ScreenJsonView(View): def get(self, *args, **kwargs): ret = dict() keys = self.request.GET.keys() current_page = None np = None if 'page_id' in keys: page_id = self.request.GET['page_id'] if page_id.isdigit(): current_page = get_object_or_404(Page, pk=page_id) if 'screen_uuid' in keys: screen = get_object_or_404(InfoScreen, uuid=self.request.GET['screen_uuid']) if current_page is not None: # Normal case np = current_page.next_page(screen) else: # Case that no page, i.e. no visible pages if screen.visible_pages().exists(): np = screen.visible_pages().first() if np: ret = { 'id': np.id, 'url': np.show_url(), 'is_slideshow_page': np.is_slideshow_page, # Protect server load 'delay_in_sec': max([3, np.delay_in_sec])} return HttpResponse(json.dumps(ret))
On November 7, 2018, Imperial announced it had made a final investment decision to develop the Aspen project, located about 45 kilometres northeast of Fort McMurray, Alberta. The project, which is expected to produce about 75,000 barrels of bitumen per day, will include the first major commercial application of next-generation oil sands recovery technology designed to lower greenhouse gas emissions intensity and water use, while improving development economics. Through the application of advanced solvent-assisted, steam-assisted gravity drainage technology at Aspen, Imperial is building on its recently announced commitment to reduce the greenhouse gas emission intensity of its operated oil sands facilities. The new technology is estimated to reduce greenhouse gas emissions intensity and water use intensity by up to 25 percent, compared with traditional steam-assisted technology. Imperial anticipates that Aspen’s emissions intensity will be one of the lowest among in situ oil sands operations across the industry.
from data import * from ui import * import client import theme # In case you want to redesign this gump, keep in mind that at this point, you can not # access any uo data (e.g. art.mul graphics, hues), as the mul files are not loaded yet. def create(args): g = GumpMenu("shardlist", 400, 300) g.closable = False shardlist = args["shardlist"] if len(shardlist) > 0: g.onEnter = selectFirst g.store["firstName"] = shardlist[0] else: g.onEnter = createShard g.addImage((0, 0), Texture(TextureSource.THEME, "images/background_250x250.png")) scroll = theme.addScrollArea(g, (20, 20, 210, 137)) y = 0 for shard in shardlist: btnShard = theme.addPythonButton(scroll, (0, y, 190, 25), selectShard) btnShard.text = shard btnShard.store["shard"] = shard y += 28 scroll.updateScrollbars() btnCreate = theme.addPythonButton(g, (20, 175, 210, 25), createShard) btnCreate.text = "Create shard" btnExit = theme.addPythonButton(g, (20, 203, 210, 25), shutdown) btnExit.text = "Exit" def createShard(button): client.openGump("createshard") # don't close the gump, shard creation can be cancelled return False def shutdown(button): client.shutdown() return True def selectFirst(gump): client.setShard(gump.store["firstName"]) return True def selectShard(button): client.setShard(button.store["shard"]) return True
Hello there wonderful time, in case you would like Posters & Prints product, currently you are in proper place. That you are studying Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set write-up. As well as on this Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set publish I really hope you’ll discover handy info before you decide to acquire Posters & Prints merchandise. Presently I would like to reveal for you depth details about Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set. For little information that Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set merchandise is done by The National Anthem business. Listing is for any 4 “Human Anatomy” Art Prints of your choice! Buy any three Human Anatomy prints and get the fourth one FREE! ➔ Professionally printed with fade-resistant ink on a heavyweight, lightly-textured, matte white stock. ➔ All of our prints include a small (approx. 1/8″ – 1/2″ ) white margin. This helps protect and preserve your print. If you’d like us to trim the margin off for you please let us know and we’ll be happy to do so. ➔ Frames and mats not included. Listing is for the print/poster only. ➔ Prints 8″x10″ and smaller are packaged inside an acid-free, clear plastic sleeve with a sturdy chipboard backing and then shipped in a strong, flat mailing envelope. ➔ Prints 11″x14″ and larger are gently rolled with a protective outer paper layer and then shipped inside a strong poster tube. ➔ Orders are shipped out between 1 – 3 business days via USPS First Class Mail. ➔ All images copyright © theNATIONALanthem. Copyright does not transfer with the sale of this print. Subsequently if you need to be familiar with Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set critiques as well as the price, you need to press THIS LINK . And also I’m hoping this specific article will let you to buy Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set merchandise, as well as that post will help you to locate other item that’s similar or maybe better than Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set. When you go through this kind of Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set write-up, all of us count on you are feel fit with this kind of Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set merchandise. If you’re more dedicated in which Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set is just not appropriate for you personally, you need to use research container as well as investigate inside each Posters & Prints type to get other product. Yet should you be certain in order to decide to purchase Human Anatomy Print Set, Buy 3 Get 1 Free, Human Anatomy Prints, Human Anatomy Art Prints, Human Anatomy Print Sale, Anatomy Art Prints Set merchandise, you need to click BUY BUTTON beneath.
from couchdbkit import ResourceNotFound from django.contrib import messages from django.http import HttpResponseRedirect from corehq.apps.fixtures.views import fixtures_home, FixtureViewMixIn from corehq.apps.reports.generic import GenericReportView, GenericTabularReport from corehq.apps.reports.filters.base import BaseSingleOptionFilter from corehq.apps.fixtures.dispatcher import FixtureInterfaceDispatcher from corehq.apps.fixtures.models import FixtureDataType, _id_from_doc from dimagi.utils.decorators.memoized import memoized from django.utils.translation import ugettext_noop, ugettext as _ class FixtureInterface(FixtureViewMixIn, GenericReportView): base_template = 'fixtures/fixtures_base.html' asynchronous = False dispatcher = FixtureInterfaceDispatcher exportable = False needs_filters = False class FixtureSelectFilter(BaseSingleOptionFilter): slug = "table_id" label = "" placeholder = "place" default_text = "Select a Table" @property def selected(self): # ko won't display default selected-value as it should, display default_text instead return "" @property @memoized def fixtures(self): fdts = list(FixtureDataType.by_domain(self.domain)) return fdts @property @memoized def options(self): return [(_id_from_doc(f), f.tag) for f in self.fixtures] class FixtureViewInterface(GenericTabularReport, FixtureInterface): name = ugettext_noop("View Tables") slug = "view_lookup_tables" report_template_path = 'fixtures/view_table.html' fields = ['corehq.apps.fixtures.interface.FixtureSelectFilter'] @property def view_response(self): if not self.has_tables(): messages.info(self.request, _("You don't have any tables defined yet - create tables to view them.")) return HttpResponseRedirect(fixtures_home(self.domain)) else: return super(FixtureViewInterface, self).view_response @property def report_context(self): assert self.has_tables() if not self.request.GET.get("table_id", None): return {"table_not_selected": True} try: context = super(FixtureViewInterface, self).report_context except ResourceNotFound: return {"table_not_selected": True} context.update({"selected_table": self.table.get("table_id", "")}) return context @memoized def has_tables(self): return True if list(FixtureDataType.by_domain(self.domain)) else False @property @memoized def table(self): from corehq.apps.fixtures.views import data_table if self.has_tables() and self.request.GET.get("table_id", None): return data_table(self.request, self.domain) else: return {"headers": None, "rows": None} @property def headers(self): return self.table["headers"] @property def rows(self): return self.table["rows"] class FixtureEditInterface(FixtureInterface): name = ugettext_noop("Manage Tables") slug = "edit_lookup_tables" report_template_path = 'fixtures/manage_tables.html' @property def report_context(self): context = super(FixtureInterface, self).report_context context.update(types=self.data_types) return context @property @memoized def data_types(self): return list(FixtureDataType.by_domain(self.domain))
Joseph Ballard is principal designer and co-founder of Constructed Matter. Together with his wife Tanya, they are focusing on offering original artwork and products for the modern space. Since 2014, Joseph has been best known for his typographic prints, geometric abstract paintings and custom furniture. He believes working in multiple mediums allows for a full expression of creativity and keeps the work fresh. Constructed Matter is striving to consistently offer timeless art that creates a positive emotional response within.
#!/usr/bin/python # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pandas as pd import six class MetadataNormalizer: def __init__(self): pass @classmethod def normalize(cls, metadata, metadata_definition): """ Receives a Pandas dataframe and normalizes it by creating a dictionary with Table Container(Database/Schema) -> Tables -> Columns hierarchy. :param metadata: the Pandas dataframe :param metadata_definition: the Metadata Definition the normalized dictionary will be created with the specified target keys. Example: >>> metadata_definition = { ... 'table_container_def': { ... 'key': 'schemas', ... 'type': 'schema', ... 'name': 'schema_name', ... 'fields': [ ... { ... 'source': 'schema_created', ... 'target': 'create_time' ... } ... ] ... }, ... 'table_def': { ... 'key': 'tables', ... 'type': 'table', ... 'name': 'table_name', ... 'fields': [ ... { ... 'source': 'table_comments', ... 'target': 'desc' ... } ... ] ... }, ... 'column_def': { ... 'key': 'columns', ... 'type': 'column', ... 'name': 'column_name', ... 'fields': [ ... { ... 'source': 'data_length', ... 'target': 'length' ... } ... ] ... } ...} :return: a normalized dict object """ cls._remove_nan_rows(metadata) table_container_def = metadata_definition['table_container_def'] return { table_container_def['key']: cls._normalize_objects( metadata=metadata, key_column_name=table_container_def['name'], normalizer_method=cls.__normalize_table_container, metadata_definition=metadata_definition) } @classmethod def _remove_nan_rows(cls, metadata): # Remove nan fields pd.options.mode.chained_assignment = None metadata.dropna(axis=0, how='all', inplace=True) @classmethod def _normalize_objects(cls, metadata, key_column_name, normalizer_method, metadata_definition): """ Generic method to normalize a Pandas dataframe into an array dictionary objects. :param metadata: the Pandas dataframe :param key_column_name: column used to distinguish top-level objects from each other :param normalizer_method: the method used to normalize each top-level object :param metadata_definition: the Metadata Definition :return: an array of normalized dict objects """ metadata.set_index(key_column_name, inplace=True) key_values = metadata.index.unique().tolist() array = [] for key_value in key_values: # We use an array with: [key_value] to make sure the dataframe loc # always returns a dataframe, and not a Series if pd.notnull(key_value): metadata_subset = metadata.loc[[key_value]] metadata.drop(key_value, inplace=True) array.append( normalizer_method(key_value.strip(), metadata_subset, metadata_definition)) return array @classmethod def _extract_value_from_first_row(cls, df, column_name): value = df.iloc[0][column_name] if pd.isna(value): return value if isinstance(value, six.string_types): return value.strip() return value @classmethod def _normalize_timestamp_field(cls, timestamp_field): return pd.Timestamp(timestamp_field) @classmethod def __normalize_table_container(cls, name, table_container_metadata, metadata_definition): tables_container_def = metadata_definition['table_container_def'] fields = tables_container_def['fields'] normalized_dict = {'name': name} normalized_dict.update( cls._normalize_fields(fields, table_container_metadata)) table_def = metadata_definition['table_def'] normalized_dict[table_def['key']] = \ cls._normalize_objects( metadata=table_container_metadata.loc[ :, table_def['name']:], key_column_name=table_def['name'], normalizer_method=cls.__normalize_table, metadata_definition=metadata_definition ) return normalized_dict @classmethod def __normalize_table(cls, name, table_metadata, metadata_definition): table_def = metadata_definition['table_def'] fields = table_def['fields'] normalized_dict = {'name': name} normalized_dict.update(cls._normalize_fields(fields, table_metadata)) column_def = metadata_definition['column_def'] normalized_dict[column_def['key']] = cls._normalize_objects( metadata=table_metadata.loc[:, column_def['name']:], key_column_name=column_def['name'], normalizer_method=cls.__normalize_column, metadata_definition=metadata_definition) return normalized_dict @classmethod def __normalize_column(cls, name, column_metadata, metadata_definition): column_def = metadata_definition['column_def'] fields = column_def['fields'] normalized_dict = {'name': name} normalized_dict.update(cls._normalize_fields(fields, column_metadata)) return normalized_dict @classmethod def _normalize_fields(cls, fields, metadata): fields_dict = {} for field in fields: source = field['source'] target = field['target'] # could be that optional information ('source') # is not present in scraped metadata if source in metadata: value = cls._extract_value_from_first_row(metadata, source) if cls._is_timestamp_field(target): value = cls._normalize_timestamp_field(value) fields_dict[target] = value return fields_dict @classmethod def _is_timestamp_field(cls, target): # [TODO] Improve logic to identify timestamp fields # currently using a naming convention if '_date' in target or '_time' in target: return True return False @staticmethod def get_exact_table_names_from_dataframe(dataframe, metadata_definition): """ Get table names in a form schema_name.table_name """ container_name_col = metadata_definition['table_container_def']['name'] table_name_col = metadata_definition['table_def']['name'] container_table_pairs_df = dataframe[[ container_name_col, table_name_col ]] container_table_pairs_records = container_table_pairs_df.to_dict( orient='records') exact_table_names = list() for pair_dict in container_table_pairs_records: values = [val.strip() for val in pair_dict.values()] exact_table_name = ".".join(values) exact_table_names.append(exact_table_name) return exact_table_names
Theratechnologies’ (TSX:TH) partner, TaiMed Biologics of Taiwan, completed the submission of a biologics license application (BLA) to the FDA for ibalizumab for the treatment of multidrug-resistant human immunodeficiency virus-1 (MDR HIV-1). If approved, ibalizumab will be the first antiretroviral treatment with a new mechanism of action to be introduced in nearly 10 years and the only treatment that does not require daily dosing. As ibalizumab has received breakthrough therapy and orphan drug designations, TaiMed has requested priority review for the BLA. The ibalizumab BLA is based on data from the Phase 3 TMB-301 study, a single arm, 24-week study of ibalizumab plus an optimized background regimen in treatment-experienced patients infected with MDR HIV-1. "This is a significant step toward the expansion of our product portfolio, which reinforces our mission to improve the lives of people living with HIV," Luc Tanguay, president and CEO of Theratechnologies, said in a statement. Some 20,000-to-25,000 Americans with HIV-1 are currently resistant to at least one drug in three different classes of antiretroviral therapies, and up to 12,000 of these patients experience a virological failure over a period of 48 weeks of treatment, requiring their physician to modify their treatment. "While some people can suppress their viral loads with currently-approved treatments, there is an urgent need for new options among those with multidrug resistance," said Christian Marsolais, SVP and CMO of Theratechnologies.
# -*- coding: utf-8 -*- """ <DefineSource> @Date : Fri Nov 14 13:20:38 2014 \n @Author : Erwan Ledoux \n\n </DefineSource> A Filterer pick and """ #<DefineAugmentation> import ShareYourSystem as SYS BaseModuleStr="ShareYourSystem.Applyiers.Walker" DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer" SYS.setSubModule(globals()) #</DefineAugmentation> #<ImportSpecificModules> import copy import collections #</ImportSpecificModules> #<DefineClass> @DecorationClass() class FiltererClass(BaseClass): #Definition RepresentingKeyStrsList=[ 'FilteredVariablesList' ] def default_init(self, _FilteredVariablesList=None, **_KwargVariablesDict): #Call the parent __init__ method BaseClass.__init__(self,**_KwargVariablesDict) def do_filter(self): #debug ''' self.debug(('self.',self,[ 'WalkingSocketDict', 'WalkedTopOrderedDict' ])) ''' #Init if 'FilterVariablesList' not in self.WalkedTopOrderedDict: self.WalkedTopOrderedDict['FilterVariablesList']=[] #Check if self.conclude( self, self.WalkingSocketDict['ConcludeConditionVariable'] ).ConcludedIsBool: #debug ''' self.debug( ( 'self.',self,[ 'ConcludedConditionIsBoolsList', ]+SYS.unzip( self.WalkingSocketDict[ 'ConcludeConditionVariable'],[0] ) ) ) ''' #Pick self.WalkedTopOrderedDict['FilterVariablesList'].append( self.pick( self.WalkingSocketDict['PickVariablesList'] ) ) #set if self.WalkingSocketDict['TopVariable']==self: self.FilteredVariablesList=self.WalkedTopOrderedDict['FilterVariablesList'] #self.FilteredVariablesList=copy.copy(self.WalkedTopOrderedDict['FilterVariablesList']) #</DefineClass>
Description Chunky Beech Wood handled chopping board 34cm x 17cm x 2cm. This size is great for all sorts of purposes, cutting, chopping or food display. Makes a perfect addition to any kitchen, large or small.
# coding=utf-8 from typing import List, Callable from adbook.orm.entity import Entity from adbook.addressbook import AddressBook def convert_person_list(persons: List[Entity]) -> List[str]: return [str(person) for person in persons] if __name__ == '__main__': """ * Find person by email address (can supply either the exact string or a prefix string, ie. both "alexander@company.com" and "alex" should work). """ with AddressBook() as ab: p1 = ab.persons.create(first_name="Franc", last_name="Kafka", email="kafka@absurd.org") p2 = ab.persons.create(first_name="Sergey", last_name="Esenin") p2.emails.append("esenin1@gmail.com") p2.emails.append("esenin2@gmail.com") ab.persons.add(p1, p2) lookingfor_email_1 = "esenin1" found1 = ab.persons.find_by_email(lookingfor_email_1) print("Find email {}: {}".format(lookingfor_email_1, convert_person_list(found1))) lookingfor_email_2 = "kafka@absurd.org" found2 = ab.persons.find_by_email(lookingfor_email_2) print("Find email {}: {}".format(lookingfor_email_2, convert_person_list(found2)))
If you love to eat and drink, this week offers a great chance to explore the world of food and beverage beyond restaurants and bars. From a traveling cocktail party to a fish fry, there's plenty to explore and enjoy if you're hungry for something new. Kick off spring at this moveable feast in Lawrenceville. During the progressive party, you'll make stops at three houses for hors d’oeuvres and drinks, and then head to the Butler Street Lofts for dessert. Proceeds from the event benefit Tree Pittsburgh, a nonprofit dedicated to protecting and restoring green spaces in the city. When: Saturday, March 23, 5-10 p.m. Where: Central Lawrenceville, Butler Street, Lofts, 212 45th St. Unwind on the Northside on Friday after work at the ninth annual Hops for HEARTH event — Sample beers from more than 20 breweries, nosh on light bites and bid on auction items. The event raises funds for HEARTH, a nonprofit organization that helps homeless families who are survivors of domestic violence. When: Friday, March 22, 7 p.m. Check out the Carnegie Museum of Art's take on the classic Pittsburgh fish fry. The event features all-you-can-eat fish and pierogies. After dinner, stick around for exclusive after-hours access to the museum. When: Saturday, March 23, 5 p.m.
# coding=utf-8 # Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source # & Institut Laue - Langevin # SPDX - License - Identifier: GPL - 3.0 + # This file is part of the mantid workbench. # # from __future__ import (absolute_import, division, print_function) from qtpy import QtGui from qtpy.QtCore import QVariant, Qt, QAbstractTableModel from mantid.py3compat import Enum class MatrixWorkspaceTableViewModelType(Enum): x = 'x' y = 'y' e = 'e' class MatrixWorkspaceTableViewModel(QAbstractTableModel): HORIZONTAL_HEADER_DISPLAY_STRING = u"{0}\n{1:0.1f}{2}" HORIZONTAL_HEADER_TOOLTIP_STRING = u"index {0}\n{1} {2:0.1f}{3} (bin centre)" HORIZONTAL_HEADER_DISPLAY_STRING_FOR_X_VALUES = "{0}" HORIZONTAL_HEADER_TOOLTIP_STRING_FOR_X_VALUES = "index {0}" VERTICAL_HEADER_DISPLAY_STRING = "{0} {1}" VERTICAL_HEADER_TOOLTIP_STRING = "index {0}\nspectra no {1}" HORIZONTAL_BINS_VARY_DISPLAY_STRING = "{0}\nbins vary" HORIZONTAL_BINS_VARY_TOOLTIP_STRING = "index {0}\nbin centre value varies\nRebin to set common bins" MASKED_MONITOR_ROW_STRING = "This is a masked monitor spectrum. " MASKED_ROW_STRING = "This is a masked spectrum. " MONITOR_ROW_STRING = "This is a monitor spectrum. " MASKED_BIN_STRING = "This bin is masked. " def __init__(self, ws, model_type): """ :param ws: :param model_type: MatrixWorkspaceTableViewModelType :type model_type: MatrixWorkspaceTableViewModelType """ assert model_type in [MatrixWorkspaceTableViewModelType.x, MatrixWorkspaceTableViewModelType.y, MatrixWorkspaceTableViewModelType.e], "The Model type must be either X, Y or E." super(MatrixWorkspaceTableViewModel, self).__init__() self.ws = ws self.ws_spectrum_info = self.ws.spectrumInfo() self.row_count = self.ws.getNumberHistograms() self.column_count = self.ws.blocksize() self.masked_rows_cache = [] self.monitor_rows_cache = [] self.masked_bins_cache = {} self.masked_color = QtGui.QColor(240, 240, 240) self.monitor_color = QtGui.QColor(255, 253, 209) self.type = model_type if self.type == MatrixWorkspaceTableViewModelType.x: self.relevant_data = self.ws.readX # add another column if the workspace is histogram data # this will contain the right boundary for the last bin if self.ws.isHistogramData(): self.column_count += 1 elif self.type == MatrixWorkspaceTableViewModelType.y: self.relevant_data = self.ws.readY elif self.type == MatrixWorkspaceTableViewModelType.e: self.relevant_data = self.ws.readE else: raise ValueError("Unknown model type {0}".format(self.type)) def _makeVerticalHeader(self, section, role): axis_index = 1 # check that the vertical axis actually exists in the workspace if self.ws.axes() > axis_index: if role == Qt.DisplayRole: return self.VERTICAL_HEADER_DISPLAY_STRING.format(section, self.ws.getAxis(axis_index).label(section)) else: spectrum_number = self.ws.getSpectrum(section).getSpectrumNo() return self.VERTICAL_HEADER_TOOLTIP_STRING.format(section, spectrum_number) else: raise NotImplementedError("What do we do here? Handle if the vertical axis does NOT exist") def _makeHorizontalHeader(self, section, role): """ :param section: The workspace index or bin number :param role: Qt.DisplayRole - is the label for the header or Qt.TooltipRole - is the tooltip for the header when moused over :return: The formatted header string """ # X values get simpler labels if self.type == MatrixWorkspaceTableViewModelType.x: if role == Qt.DisplayRole: return self.HORIZONTAL_HEADER_DISPLAY_STRING_FOR_X_VALUES.format(section) else: # format for the tooltip return self.HORIZONTAL_HEADER_TOOLTIP_STRING_FOR_X_VALUES.format(section) if not self.ws.isCommonBins(): if role == Qt.DisplayRole: return self.HORIZONTAL_BINS_VARY_DISPLAY_STRING.format(section) else: # format for the tooltip return self.HORIZONTAL_BINS_VARY_TOOLTIP_STRING.format(section) # for the Y and E values, create a label with the units axis_index = 0 x_vec = self.ws.readX(0) if self.ws.isHistogramData(): bin_centre_value = (x_vec[section] + x_vec[section + 1]) / 2.0 else: bin_centre_value = x_vec[section] unit = self.ws.getAxis(axis_index).getUnit() if role == Qt.DisplayRole: return self.HORIZONTAL_HEADER_DISPLAY_STRING.format(section, bin_centre_value, unit.symbol().utf8()) else: # format for the tooltip return self.HORIZONTAL_HEADER_TOOLTIP_STRING.format(section, unit.caption(), bin_centre_value, unit.symbol().utf8()) def headerData(self, section, orientation, role=None): if not (role == Qt.DisplayRole or role == Qt.ToolTipRole): return QVariant() if orientation == Qt.Vertical: return self._makeVerticalHeader(section, role) else: return self._makeHorizontalHeader(section, role) def rowCount(self, parent=None, *args, **kwargs): return self.row_count def columnCount(self, parent=None, *args, **kwargs): return self.column_count def data(self, index, role=None): row = index.row() if role == Qt.DisplayRole: # DisplayRole determines the text of each cell return str(self.relevant_data(row)[index.column()]) elif role == Qt.BackgroundRole: # BackgroundRole determines the background of each cell # Checks if the row is MASKED, if so makes it the specified color for masked # The check for masked rows should be first as a monitor row can be masked as well - and we want it to be # colored as a masked row, rather than as a monitor row. # First do the check in the cache, and only if not present go through SpectrumInfo and cache it. This logic # is repeated in the other checks below if self.checkMaskedCache(row): return self.masked_color # Checks if the row is a MONITOR, if so makes it the specified color for monitors elif self.checkMonitorCache(row): return self.monitor_color # Checks if the BIN is MASKED, if so makes it the specified color for masked elif self.checkMaskedBinCache(row, index): return self.masked_color elif role == Qt.ToolTipRole: tooltip = QVariant() if self.checkMaskedCache(row): if self.checkMonitorCache(row): tooltip = self.MASKED_MONITOR_ROW_STRING else: tooltip = self.MASKED_ROW_STRING elif self.checkMonitorCache(row): tooltip = self.MONITOR_ROW_STRING if self.checkMaskedBinCache(row, index): tooltip += self.MASKED_BIN_STRING elif self.checkMaskedBinCache(row, index): tooltip = self.MASKED_BIN_STRING return tooltip else: return QVariant() def checkMaskedCache(self, row): if row in self.masked_rows_cache: return True elif self.ws_spectrum_info.hasDetectors(row) and self.ws_spectrum_info.isMasked(row): self.masked_rows_cache.append(row) return True def checkMonitorCache(self, row): if row in self.monitor_rows_cache: return True elif self.ws_spectrum_info.hasDetectors(row) and self.ws_spectrum_info.isMonitor(row): self.monitor_rows_cache.append(row) return True def checkMaskedBinCache(self, row, index): if row in self.masked_bins_cache: # retrieve the masked bins IDs from the cache if index.column() in self.masked_bins_cache[row]: return True elif self.ws.hasMaskedBins(row): masked_bins = self.ws.maskedBinsIndices(row) if index.column() in masked_bins: self.masked_bins_cache[row] = masked_bins return True
Our Michigan dog bite lawyers settled an injury claim for a twelve year old child in Branch County, Michigan. The child was bitten by a dog and suffered a laceration in his forearm. He required stitches to repair the laceration and was left with a minor scar. The dog was owned by a neighbor. Quite often, a person suffers injuries when they are bitten by a neighbor’s dog. This happens when an unleashed or unchained dog runs into the neighbor’s yard or attacks a person in the street or somewhere else in the neighborhood. It also occurs when one neighbor is visiting the other neighbor and the dog attacks at that time. If you or your child suffers serious injuries from the neighbor’s dog, you can sue your neighbor. Michigan law holds the person who owns, keeps, or harbors the dog responsible for a dog bite or attack, regardless of prior knowledge (called, “strict liability” laws) of the viciousness of the animal. Also, the negligence law is used to hold the owner responsible for the attack. This means that the dog owner, which could be your neighbor, is liable if the injury occurred because the dog owner was unreasonably careless in controlling the dog. The only defense to the dog owner in these cases is if the victim provoked the attacked or was a trespasser on the property. In these cases, the dog owner’s homeowner’s insurance company usually pays a settlement to the dog bite victims. This matter was settled pre-suit with State Farm Insurance Company. The homeowner’s insurance company for the dog owner paid a settlement of $24,000 to the child. His funds will be placed in a trust until he turns eighteen years old.
# # Diversification Algorithm with access to the diversity QRELs # Mark II -- More complex algorithm, not as rewarding as the first attempt. # Updated to work with the ifind search objects. # # Slightly updated to make it easier to drop into the treconomis environment. # # Author: David Maxwell and Leif Azzopardi # Date: 2018-01-06 # import copy from treconomics.experiment_functions import qrels_diversity # TODO: @leifos # - What values do we use above? # - To diversity, you need: # * a list of results # * a topic number # * a lambda value # * a DIVERSIFY_TO_RANK value # # - call diversify(results, topic_num, to_rank, lam) # This returns a new list, with the diversified set of results according to our algorithm. # The results object you pass in should be an iterable -- it can be a whoosh.results object or a list. # The object that is returned is just a Python list -- so there could be an issue down the line if it relies on something whoosh.results provides. Hope not -- I can't create an artifical whoosh.results object (easily, at least). def convert_results_to_list(results, deep_copy=True): """ Given a Whoosh results object, converts it to a list and returns that list. Useful, as the Whoosh results object does not permit reassignment of Hit objects. Note that if deep_copy is True, a deep copy of the list is returned. """ results_list = [] for hit in results: if deep_copy: results_list.append(copy.copy(hit)) continue results_list.append(hit) return results_list def get_highest_score_index(results_list): """ Given a list of results, returns the index of the hit with the highest score. Simple find the maximum algorithm stuff going on here. """ highest_score = 0.0 highest_index = 0 index = 0 for hit in results_list: if hit.score > highest_score: highest_score = hit.score highest_index = index index = index + 1 return highest_index def get_new_entities(observed_entities, document_entities): """ Given a list of previously seen entities, and a list of document entities, returns a list of entities in the document which have not yet been previously seen. """ return list(set(document_entities) - set(observed_entities)) # def get_existing_entities(observed_entities, document_entities): # """ # Given a list of previously seen entities, and a list of document entities, returns # the intersection of the two lists -- i.e. the entities that have already been seen. # """ # return list(set(observed_entities) & set(document_entities)) def get_observed_entities_for_list(topic, rankings_list): """ Given a list of Whoosh Hit objects, returns a list of the different entities that are mentioned in them. """ observed_entities = [] for hit in rankings_list: docid = hit.docid entities = qrels_diversity.get_mentioned_entities_for_doc(topic, docid) new_entities = get_new_entities(observed_entities, entities) observed_entities = observed_entities + new_entities return observed_entities def diversify_results(results, topic, to_rank=30, lam=1.0): """ The diversification algorithm. Given a ifind results object, returns a re-ranked list, with more diverse content at the top. By diverse, we mean a selection of documents discussing a wider range of identified entities. """ results_len = len(results.results) #results_len = results.scored_length() # Doing len(results) returns the number of hits, not the top k. #print(results) # Simple sanity check -- no results? Can't diversify anything! if results_len == 0: return results # Before diversifying, check -- are there enough results to go to to_rank? # If not, change to_rank to the length of the results we have. if to_rank is None: to_rank = results_len # Not enough results to get to to_rank? Change the to_rank cap to the results length. if results_len < to_rank: to_rank = results_len # Check that lambda is a float in case of floating point calculations... if type(lam) != float: lam = float(lam) ############################ ### Main algorithm below ### ############################ observed_entities = [] # What entities have been previously seen? This list holds them. # As the list of results is probably larger than the depth we re-rank to, take a slice. # This is our original list of results that we'll be modifiying and popping from. old_rankings = results.results[:to_rank] # For our new rankings, start with the first document -- this won't change. # This list will be populated as we iterate through the other rankings list. new_rankings = [old_rankings.pop(0)] for i in range(1, to_rank): observed_entities = get_observed_entities_for_list(topic, new_rankings) for j in range(0, len(old_rankings)): docid = old_rankings[j].docid entities = qrels_diversity.get_mentioned_entities_for_doc(topic, docid) new_entities = get_new_entities(observed_entities, entities) #seen_entities = get_existing_entities(qrels_diversity, observed_entities, entities) old_rankings[j].score = old_rankings[j].score + (lam * len(new_entities)) # Sort the list in reverse order, so the highest score is first. Then pop from old, push to new. old_rankings.sort(key=lambda x: x.score, reverse=True) new_rankings.append(old_rankings.pop(0)) results.results = new_rankings + results.results[to_rank:] return results # The main algorithm -- only work on the top to_rank documents. # Leif notes (algorithm): two loops still... # for first doc, get the mentioned entities. this is outside the loop (set to x). # for each doc in the rest of the list, what entities are in those docs, and how different are they? # compute score, sort it, take the first element from that, that becomes the second document # the key is to sort the sublist scores, and pick the top element from that list. # take entities from the first two now - replace x with this. # repeat this until all documents have been observed. # # how does lambda influence the performance? # alpha ndcg -- run the queries from the sigir study from before to see what happens when you change lambda. # More: # take all documents that have been judged # take the non-rel documents in the QRELs for the # so you update the list of entities with those previously seen (i.e. x) after each document has been observed.
Why think Mobile when Marketing to Developing Economies? The mobile growth rate in Africa surpasses any region globally, so it’s no surprise that Africa has become known as the mobile-only continent. This represents numerous opportunities for marketers to take advantage of this unique aspect to the continent. Mobile phones in Africa are largely feature phones, although smartphone are becoming more and more affordable. So how can marketers get advantage of this? Below are some tips to help kick start fresh ideas. A feature phone, also known as a smartphone-lite, is a mid-range mobile that has more function that a basic phone only capable of voice calls and SMS, but without the expense of a smartphone. According to app creator Binu, Africa will have 900 million feature phones by 2017. This is a huge number, considering that the rest of the world would have outgrown the feature phone by this time. Marketers need to ensure that their marketing products can be embedded on feature phones and are easy to load. For example, Binu, has now integrated World Reader into their portfolio of apps and thousands of Africans can now access novels and other reading materials from the platform. This application does not download any material to the device, due the fact that feature phones rarely have the storage capabilities of smartphones. Facebook hasn’t started posting ads readable by feature phones, but pages can take the advantage of this knowing that more Africans access their social media feeds through the phones. The use of SMS and USSD is a powerful tool when marketing to Africa’s mobile platform. ForgetMeNot Africa is one of the companies deploying USSD services to telecom operators all over the continent. The service has also helped users of the Orange network in Kenya to enjoy social media tools using dataless phones. Users can chat through the service and send emails and such services are a gaining momentum in the country. In Kenya, music sales are largely aided by USSD services or text messages. Mobile content companies use premium SMS services to sell music, advice, wallpapers, jokes and much more additional mobile content. Kenyan banks, such as Kenya Commercial Bank, have integrated the USSD service with their mobile banking. Now, users can easily access their account, transfer money and withdraw money using the service. The reason why SMS and USSD are so popular is because they are easy to use, this is especially true for people living in rural areas. There is a huge, largely unexploited, market for mobile content in Africa and most developing countries. Most games and content are foreign and often don’t work well in developing economies such as Africa, as no emphasis is placed on local culture. Marketers can take full advantage of this vacuum to create interesting mobile content while pushing for brand awareness. With many phones embracing the 3G internet network, the possibilities are further broadened. Internet applications can offer a platform for customer interactions. Hosting adverts in various mobile applications is a unique way to gain potential customers’ attention. Companies such as InMobi and Twinpine Network are servicing brands that want to reach out to African consumers through mobile web. Twinpine recorded 500 million monthly impressions by August 2012. Any marketer or app developer that can take advantage of this glaringly open marketplace will be in a good position to develop strong and lasting business from the African continent. Vincent Matinde is a pan African technology news journalist. He is enthusiastic about technology and innovation throughout the continent.
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (C) 2013-2014: SCS Software """ This script manages import of various SCS binary data files into Blender. """ import bpy import os # from bpy_extras import io_utils # from . import import_pmg # from . import import_pit # from . import import_pic # from . import import_pip # from . import import_pis # from . import import_pia # from . import io_utils from .deprecated_utils import Print if "bpy" in locals(): import imp # if "import_pmg" in locals(): # imp.reload(import_pmg) # else: # from . import import_pmg # if "import_pit" in locals(): # imp.reload(import_pit) # else: # from . import import_pit # #if "import_pic" in locals(): # #imp.reload(import_pic) # #else: # #from . import import_pic # if "import_pip" in locals(): # imp.reload(import_pip) # else: # from . import import_pip # if "import_pis" in locals(): # imp.reload(import_pis) # else: # from . import import_pis # if "import_pia" in locals(): # imp.reload(import_pia) # else: # from . import import_pia if "io_utils" in locals(): imp.reload(io_utils) else: from . import io_utils def version(): """Here is where to alter version number of the script.""" return 0.2 def create_lod_empty(name, objects, locators, armature, skeleton): """Creates an 'SCS Root Object' (Empty Object) for currently imported 'SCS Game Object' and parent all import content to it.""" if name in bpy.data.objects: name = io_utils.make_unique_name(bpy.data.objects[0], name) ## CREATE EMPTY OBJECT bpy.ops.object.empty_add( type='PLAIN_AXES', view_align=False, # location=scs_loc, # rotation=rot, )#, layers=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)) ## MAKE A PROPER SETTINGS TO THE 'SCS ROOT OBJECT' lod_object = bpy.context.active_object lod_object.name = name lod_object.show_name = True lod_object.scs_props.scs_root_object_export_enabled = True lod_object.scs_props.empty_object_type = 'SCS_Root' ## MAKE ALL CHILDREN SELECTED if armature: bpy.ops.object.select_all(action='DESELECT') armature.select = True else: for obj in objects: obj.select = True for obj in locators: obj.select = True ## SET PARENT bpy.ops.object.parent_set(type='OBJECT', keep_transform=False) ## MAKE ONLY 'SCS GAME OBJECT' SELECTED bpy.ops.object.select_all(action='DESELECT') for obj in bpy.data.objects: obj.select = False lod_object.select = True bpy.context.scene.objects.active = lod_object return bpy.data.objects.get(name) def load( operator, context, filepath, ): import time t = time.time() bpy.context.window.cursor_modal_set('WAIT') # import_scale = bpy.data.worlds[0].scs_globals.import_scale # load_textures = bpy.data.worlds[0].scs_globals.load_textures # mesh_creation_type = bpy.data.worlds[0].scs_globals.mesh_creation_type dump_level = int(bpy.data.worlds[0].scs_globals.dump_level) prefab_locators = [] objects = [] locators = [] armature = skeleton = None # ## NEW SCENE CREATION # if bpy.data.worlds[0].scs_globals.scs_lod_definition_type == 'scenes': # if context.scene.name != 'Scene': # bpy.ops.scene.new(type='NEW') ## IMPORT PMG (PIM) if bpy.data.worlds[0].scs_globals.import_pmg_file or bpy.data.worlds[0].scs_globals.import_pis_file: if filepath: if os.path.isfile(filepath): Print(dump_level, '\nD PMG filepath:\n %s', str(filepath).replace("\\", "/")) # result, objects, locators, armature, skeleton = import_pmg.load(operator, context, filepath) else: Print(dump_level, '\nI No file found at %r!' % str(filepath).replace("\\", "/")) else: Print(dump_level, '\nI No filepath provided!') # ## IMPORT PIT # if bpy.data.worlds[0].scs_globals.import_pit_file: # pit_filepath = str(filepath[:-1] + 't') # if os.path.isfile(pit_filepath): # Print(dump_level, '\nD PIT filepath:\n %s', pit_filepath) # # print('PIT filepath:\n %s' % pit_filepath) # result = import_pit.load(operator, context, pit_filepath) # else: # Print(dump_level, '\nI No PIT file.') # # print('INFO - No PIT file.') # ## IMPORT PIC # if bpy.data.worlds[0].scs_globals.import_pic_file: # pic_filepath = str(filepath[:-1] + 'c') # if os.path.isfile(pic_filepath): # Print(dump_level, '\nD PIC filepath:\n %s', pic_filepath) # # print('PIC filepath:\n %s' % pic_filepath) # else: # Print(dump_level, '\nI No PIC file.') # # print('INFO - No PIC file.') # ## IMPORT PIP # if bpy.data.worlds[0].scs_globals.import_pip_file: # pip_filepath = str(filepath[:-1] + 'p') # if os.path.isfile(pip_filepath): # Print(dump_level, '\nD PIP filepath:\n %s', pip_filepath) # # print('PIP filepath:\n %s' % pip_filepath) # result, prefab_locators = import_pip.load(operator, context, pip_filepath) # else: # Print(dump_level, '\nI No PIP file.') # # print('INFO - No PIP file.') # ## IMPORT PIS # if bpy.data.worlds[0].scs_globals.import_pis_file: # pis_filepath = str(filepath[:-1] + 's') # if os.path.isfile(pis_filepath): # Print(dump_level, '\nD PIS filepath:\n %s', pis_filepath) # # print('PIS filepath:\n %s' % pis_filepath) # result, bones = import_pis.load(operator, context, pis_filepath, armature) # else: # bones = None # Print(dump_level, '\nI No PIS file.') # # print('INFO - No PIS file.') # ## IMPORT PIA # if bpy.data.worlds[0].scs_globals.import_pis_file and bpy.data.worlds[0].scs_globals.import_pia_file: # basepath = os.path.dirname(filepath) # ## Search for PIA files in model's directory and its subdirectiories... # Print(dump_level, '\nI Searching the directory for PIA files:\n %s', str(basepath)) # # print('\nSearching the directory for PIA files:\n %s' % str(basepath)) # pia_files = [] # index = 0 # for root, dirs, files in os.walk(basepath): # if not bpy.data.worlds[0].scs_globals.include_subdirs_for_pia: # if index > 0: # break # # print(' root: %s - dirs: %s - files: %s' % (str(root), str(dirs), str(files))) # for file in files: # if file.endswith(".pia"): # pia_filepath = os.path.join(root, file) # pia_files.append(pia_filepath) # index += 1 # # if len(pia_files) > 0: # if dump_level > 1: # Print(dump_level, 'I PIA files found:') # for pia_filepath in pia_files: Print(dump_level, 'I %r', pia_filepath) # # print('armature: %s\nskeleton: %r\nbones: %s\n' % (str(armature), str(skeleton), str(bones))) # result = import_pia.load(operator, context, pia_files, armature, skeleton, bones) # # print(' result: %s' % str(result)) # else: # Print(dump_level, '\nI No PIA files.') ## SETUP LODS for item in prefab_locators: locators.append(item) path, file = os.path.split(filepath) # print(' path: %r\n file: %r' % (path, file)) lod_name, ext = os.path.splitext(file) # print(' root: %r\n ext: %r' % (root, ext)) # if bpy.data.worlds[0].scs_globals.scs_lod_definition_type == 'scenes': # print('LODs as Scenes...') # context.scene.name = lod_name # context.scene.scs_props.scene_lod = True # else: print('LODs as Objects...') if objects: create_lod_empty(lod_name, objects, locators, armature, skeleton) ## SET DRAW MODES ## Turn on Textured Solid in 3D view... for bl_screen in bpy.data.screens: for bl_area in bl_screen.areas: for bl_space in bl_area.spaces: if bl_space.type == 'VIEW_3D': bl_space.show_textured_solid = True # bl_space.viewport_shade = 'WIREFRAME' # bl_space.show_manipulator = True bl_space.transform_orientation = 'NORMAL' bl_space.transform_manipulators = {'ROTATE'} ## Turn on GLSL in 3D view... bpy.context.scene.game_settings.material_mode = 'GLSL' ## TURN ON SCS TOOLS # bpy.context.scene.scs_props.locator_size = 10.0 # TMP: increase locators' size bpy.context.window.cursor_modal_restore() Print(dump_level, '\nI files imported (in %.3f sec)', time.time() - t)
If this technique is so good, how come I haven’t heard of it before now? For personal and family reasons, I haven’t done a seminar in twenty years. What does it mean when you 'clear' someone? The indicator is that the muscle which previously tested weak, for example when the person was thinking about their Top priority Item, now tests strong. What it means we can only conclude from the results people report– they feel better, more resolved, more accepting of what happened. It’s as if you take this terribly subjective trauma– that may well be emotionally sabotaging them in some way– and turn it into a more objective fact– that people are now better equipped to deal with. How long do the results last when you clear someone? Do you ever have to clear them again? I have re-tested people, and the strong muscle response after the clearing is still there up to a year or two later. That’s true if the clearing is thoroughly done, which may well include all the support activity.Of course, that doesn’t mean in the intervening time that people once again don’t get depressed. But muscle testing indicates that the incident we’ve cleared is no longer a contributing factor. When I see a depressed client again and ask, both verbally and with muscle testing, whether we have to re-clear anything, the responses are usually “No.” And muscle testing has rarely indicated otherwise. The only exception is where we haven’t uncovered all the pieces to the puzzle. Isn't muscle testing too subjective? So are people. That’s the beauty of a system that’s tailored to meet individual needs. If you mean by your statement that it’s difficult to rely always on a strong response meaning yes, and a weak response meaning no, then I agree. That’s why when using e.e.c., you’re always double and triple checking everything, which requires some patience.If you mean by “subjective” also that it’s sometimes unreliable, I would also agree. That’s why also you need to learn the Remedies for Breakdowns in the Signaling System. Now and then the signals will go haywire, so you need to know how to stay on track accurately. I’ve built in the back-up checks for precisely that reason. How do I know that, when my arm weakens, you're not just pushing harder? Ultimately, you don’t. And neither do I. We don’t know whether something tells your arm to go weak or that same something tells me to push harder. (But if I am pushing harder, I’m not doing it consciously or noticeably). And it’s also true that it doesn’t matter– as long as that “something” continues to give us accurate information.The best way to convince yourself that this curious and strange phenomenon is in fact legitimate, is to muscle test someone else. I've been in therapy for years. I've worked on all this stuff before. What makes you think I need your e.e.c.? Not everyone who works on their “stuff” works through it. The key question is — when we muscle test, does your arm stay strong or go weak. Why do we have to go back to the earliest trauma? Because that seems to be the key link that holds everything else in place. Often if you break that link, the whole chain of similar and later experiences just dissolves or melts away. And if not, muscle testing will alert you that there’s something else that needs to be dealt with. Why do I have to dig up the past if I feel O.K. right now? You don’t. This technique is for people who don’t feel O.K. Suppose I don't feel O.K. The past is still dead and gone. Why dredge it up now? If you test strong on your past, then it is dead and gone. But if you test weak, it is neither dead nor gone. You’re still carrying it around with you as a weakness in your system. Using e.e.c. to turn that weakness into a strength is both efficient and effective. Is this a substitute for, or better than, traditional psychotherapy? Not necessarily. There are some things that e.e.c. isn’t appropriate for– like communication problems or existential dilemmas, or even any current issue that’s not related to the past. But it is a superb technique for resolving past pain or trauma that is affecting the present. Can your technique really heal past sexual abuse that is repeated and severe? People who experience e.e.c. say that they feel a lot better about what happened in their past, no matter what type of abuse it was. I have read that cortisol, the stress hormone, actually increases with age in girls who have suffered sexual abuse. Apparently, they are getting worse and not better. A great research project would be to determine if e.e.c. with the recommended support activities can produce a decrease in cortisol instead in such clients. If a guy has been beaten say a thousand times in his life by his father, are you saying that you can just do your clearing and he will magically feel better? Under the right conditions, (which are outlined in the technique), yes! That’s true because we’re not dealing with those thousand beatings anymore, we’re dealing with the memory of them in his nervous system. All we have to change is the negative energy stuck in the memory, and he will have better feelings. Making the change is evidenced by the muscle response that changes from weak to strong. Keeping the change intact is quite another issue and entirely dependent on the effectiveness of the support activity– both the emotional and especially the energetic. Of course, I’m not saying you’ll accomplish anything if you clear those thousand beatings and then send that guy back to the same environment for a thousand more. How can you reduce the complexity of the past to a certain number of specific incidents to be cleared? Yes, but that makes it manageable. Muscle testing takes the complexity of the past and renders it simple. It’s therefore much more economical (both in terms of time and money) to proceed in this systematic fashion. Why can't I muscle test my husband? Maybe he wants to be your husband and not your client. Muscle testing only works for people who are deep-down willing to have it work. Many of your results come from inpatient work. Isn't it harder on an outpatient basis? It’s true that if you see someone individually every day in an inpatient setting, they’re going to change more quickly, especially if you have very effective groups to support your work. With outpatient work, on the other hand, you have to focus more on the energetic support between the sessions. This maintains the gains and preserves the continuity. What you call your 'emotional and energetic support activities' are already very powerful tools to help people. Why bother to do the clearing at all? e.e.c. seems to uproot the trauma and pull it out of the nervous system. After the clearing, all the other support activities are especially empowered because they have less to overcome. Instead of trying to fight against the trauma that is still stored there, you are merely maintaining the change that has already been made. Are you saying e.e.c. will work with everyone? No. The diagnoses that I currently believe will not achieve maximum gains with this work are bipolars, multiples (dissociative identity disorder) and schizophrenics. But it can still be helpful in some of those cases.
import flask import trafaret as t from flask.ext.mongoset import MongoSet, Model app = flask.Flask(__name__) app.config['MONGODB_HOST'] = "localhost" app.config['MONGODB_PORT'] = 27017 app.config['MONGODB_DATABASE'] = "testdb" app.config['MONGODB_AUTOREF'] = True app.config['TESTING'] = True mongo = MongoSet(app) class BaseProduct(Model): __abstract__ = True structure = t.Dict({ 'name': t.String, 'quantity': t.Int, 'attrs': t.Mapping(t.String, t.Or(t.Int, t.Float, t.String)), }).allow_extra('*') i18n = ['name', 'attrs'] indexes = ['id'] @mongo.register class Product(Model): __collection__ = "products" inc_id = True structure = t.Dict({ 'list_attrs': t.List(t.String) }).allow_extra('*') i18n = ['list_attrs'] indexes = [('quantity', -1), 'name'] def as_dict(self, api_fields=None, exclude=None): """ Returns instance as dict in selected language """ keys = api_fields or self.keys() if exclude: keys = list(set(keys) | set(exclude)) result = dict(map(lambda key: (key, getattr(self, key)), keys)) '_id' in result and result.__setitem__('_id', str(result['_id'])) return result @app.route("/") def index(): product = Product.get_or_create({'name': 'Name', 'quantity': 1, 'attrs': {'feature': 'ice', 'revision': 1}, 'list_attrs': ['one', 'two']}, _lang='en') product._lang = 'fr' product.update({'name': 'Nom'}) product.update({'attrs': {'feature': 'glace', 'revision': 1}}) Product.get_or_create({'name': 'Nom', 'quantity': 1, 'attrs': {'feature': 'glace', 'revision': 1}}, _lang='fr') product_fr = product product._lang = 'en' product_en = product total = Product.query.count() return "Total: %d. <br> product en is: %s <br> product fr is: %s" % (total, product_en.as_dict(), product_fr.as_dict()) if __name__ == "__main__": app.run()
In China there are many jobs in factories that are open with no one to fill them while many educated young workers are unemployed or underemployed. Among those in their early twenties, those with a college degree are four times as likely to be unemployed as people with only an elementary school education. We praise God for the many college students who have received salvation and we pray for follow-up for them since many are lost after graduation. May the Lord give us even more students who want to find true value and direction in life through campus ministries. China’s swift expansion in education over this last decade includes quadrupling the number of college students each year which then produces millions of engineers and scientists. The best of these graduates can have their pick of jobs at any Chinese company. We pray that more churches will support ministries on college campuses. They can use holidays to host various events to equip young believers and help them to become more mature spiritually and then have powerful testimonies. China is turning out millions of graduates with few marketable skills, coupled with a conviction that they are entitled to office jobs with respectable salaries. Jobs in the banking sector pay very well so parents push their child into finance-related studies. Many Christian college students give up their faith when they cannot find work after graduation. We pray for the moving of the Holy Spirit so the faith of these young believers will be deeply rooted in the Bible and they will walk closely with the Lord. Young, educated Chinese without steady jobs spend long hours surfing the Internet, getting together with friends and complaining about the shortage of office jobs for which they believe they were trained. Let us pray for all the campus ministries that badly need pastors and spiritual mentors for all of these young believers. We ask the Lord for more workers with passion to reach out to college students and help them grow spiritually. College students in China are very choosy and they have a common aversion to factory labor since they are accustomed to seeing themselves as part of elite group when they enter college. We pray for maturity in interpersonal relationships for all the Christian college students so they will not be despised because of their youth, but will rather honor God in all things, rejoice in the Lord, and learn to serve with a pure heart as an example to many. China has an old Confucian tradition which says that educated people will not engage in manual labor but the economy in China today is still dominated by blue-collar jobs. Chinese believers feel that pastors are God’s servants but they would never want their own child to be a full-time church worker since they know that pastors are poor and many people look down on them. May the Lord have mercy on churches and help believers change their attitude toward God’s servants. The glut of college graduates in China is eroding wages even for those with computer science degrees. There are so many of them that their salary in Shenzhen has fallen to only 550 USD. a month which is less than twice the wage of a blue-collar worker. We pray for more workers to be involved in college ministries and that believers from every walk of life will partner in reaching out to college students. There is always a tendency to “build one’s own turf” and not be willing to share resources. We pray for unity and a vision for building the Kingdom. As the Moon Festival nears, officials nationwide in China have been warned not to spend public money on gifts such as luxury mooncakes--those made with expensive ingredients and premium packaging--as a part of a campaign against corruption. aThe Moon Festival is a big Chinese holiday which emphasizes the reunion of family members. We pray for churches planning to use this time as an opportunity for outreach, that the Lord will bless them with a good harvest. Some of the key factors that draw young people in China to seek faith in God are: a distrust of society, being the only child, a highly competitive job market, the bombardment of information, and broken homes. We give thanks to God for His miracle of drawing young people into the urban churches during the past decade. We pray that many more churches will catch the vision and burden of reaching college students outside of the church and care for the young people in the church by listening to them. College students in China usually do not discuss issues of liberty or politics in church on Sunday since they know these are sensitive matters. However, when they gather in small fellowship groups, they are more chatty and open. We pray for spiritual maturity for the leaders of all the fellowship (small) groups that college students attend. May God empower them to be shining examples of faith, speech, and service for Jesus. May the Lord give them safety and unity. To avoid drawing unnecessary attention of the officials, college students refer to their fellowship as "We are gathering to sing to our dad”. Dad is the term for Heavenly Father. Prayer meeting is called “eating time” instead. We praise God for all the new college converts who are very fervent and willing to participate many meetings. We pray they will grow more in faith and deeper in truth. May the Lord protect them in dating and have victory in keeping their bodies pure sexually. In China there are many students from South Korea so the college student fellowships and Bible studies are quite international in flavor. Many Christian coffee shops play praise songs and provide students with the necessary space for Bible studies. Let us pray for every Christian coffee shop and bookstore in China which is universally a business which loses money yet badly needs workers who share a vision and burden to reach students. We ask for good fruit with many young people coming to know Jesus through these establishments and ministries. Some of the students fellowships at the Peking University pray in English since many are from the United States. Many Chinese and Asian expatriates join them since all are highly educated. Many missionaries travel deep into China, even in Xinjiang region. We pray for each university or college Bible Fellowship study. May the Lord keep them safe and add to the number of those who are saved, especially the teachers. May God bless those who boldly share the gospel with the students. Some farmers in China have given up their land but when they are old want to return home. However, they have no more farms to tend, so they have no income. Most are excluded from national pension plans and are a burden to their relatives. The biggest need in the rural churches is for teachers and people who will care for others. There is much discrimination against farmers by city folks. We pray that urban Christians will remember to pray for rural believers and for revival of rural churches. The obligation to give cash gifts causes financial stress for families in the small towns in China. Births, weddings, or deaths can make people very concerned financially. Let us pray that every birthday, funeral, or wedding celebration in every church will be not only a celebration but also a good opportunity to reach out to unbelievers with the gospel. An invitation to the banquet for special occasions in China calls for a cash gift of at least one hundred yuan (US$16.) which is about the equivalent of three days' wages. It is not impolite not to go so long as the money arrives. Usually in the small or rural churches financial giving is very small. Unfortunately, pastors hesitate to preach the truth of tithing. May the Lord have mercy on us so believers will not have so little faith they will miss out on the abundant blessings God wants to pour out on us. Today in addition to the big ceremonies like weddings and funerals there is a whole range of occasions that call for a banquet such as: birth of a baby, 12th, 36th, 50th, 60th, and 70th birthdays, moving into a new house, or even re-decorating one's house. There are many celebrations among believers as well but some come with an invitation and some do not. This tends to lead to favoritism and loss of unity. We pray for unity among believers and that they will avoid any cause of stumbling among those who are weaker. Parents throw parties when their child joins the military or is admitted to a university. A few decades ago admission to an elite university warranted a banquet, but now some parents have begun sending out invitations to a party for a child entering senior high school. It is not easy for believers to stay true to their faith in Jesus in the military in China or even in college since there are many temptations there. We pray for the families whose children are in the military or in college. May the Lord protect them from the evil one. In China's traditionally rural society, the custom of giving a gift of money was supposed to help fellow villagers, relatives, or clan members through the difficulty of starting a new family or caring for a newborn child. Now, however, some argue that the custom is out of control and that these celebrations are merely times to get financial profit. Chinese people value reciprocity but Christ wants us to treat each other with love, not recount the evil deeds of others. May the Lord remind us that we should do what is good in the eyes of men gladly and willingly because it is all of the grace of God. Inevitably, the obligation to send out cash gifts hits low-income families harder than anyone else – the acceptable sum of gift money is based on the closeness of relations between the inviter and the invitee, regardless of the latter’s economic status. Let us pray about the way each church treats the poor in their midst. May God's Spirit convict us not to judge people because the poor are chosen to be rich in faith and heirs of the kingdom of heaven which God has promised to those who love Him. Very few people dare to decline invitations to special occasions in China since doing so would bring the family into potentially irreversible disrepute. It is also damaging to one's social standing to give a sum of money that is lower than the going rate even though that figure is never expressly given. Chinese people put great value on "face" but sadly this is one big reason why they tend to be superficial and not transparent. Believers (preachers included) are unwilling to share their weaknesses and failings, hence their preaching and sharing lacks sincerity. Oh, may the Lord break our hypocrisy. To comprehend the vast underground economy of China, one need only visit Shanghai’s train station and watch as peddlers openly hawk fake receipts. It is so pervasive that even auditors at multinational corporations are being duped. Christian businessmen are still a minority in churches. The common view is that faith and business are two different things and doing business goes by a different set of rules than that of being a Christian. We pray for all Christians in business whether a big or small business, that they will trust God and hold steadfast to their faith. Buyers use fake receipts to evade taxes and defraud employers. And in a country rife with corruption, they are the grease for schemes to bribe officials and business partners even though making and using them is illegal in China. In a world where it seems everything is fake and fraudulent, we must pray that believers will trust that doing God’s will and not compromising themselves is pleasing to God and will bring great blessing. In China, people advertise all kinds of fake receipts: travel receipts, lease receipts, waste material receipts and value-added tax receipts. Promotions for counterfeit “fapiao” (the Chinese word for an official invoice) are sent by fax and through mobile phone text messages. However, there is a new awareness that Christian businessmen can make and spend money. They can love Jesus without loving money. We pray these people can have a positive influence in society by caring for the have-nots and even being involved in public welfare. State employees, whether they work for government agencies or for state-owned enterprises, seem as eager as anyone to bolster their compensation by filing fake invoices. Christians often feel, “everyone is doing it and I must go along” thus compromising their Christian testimony. We pray for Christians who have enough conviction that when they do not compromise themselves, will know that God will protect them and bless them even more than before and that they will gain favor in the eyes of their superiors. Nearly ninety percent of the young people in Taiwan are willing to work in China and even in the so-called “second tier” cities with Chongqing, Nanjing, and Tianjin as their top choices. Presently two million Taiwanese (about one out of ten) work there. We pray for believers who work in China and their families who are separated from them for months at a time. We pray for their spiritual lives, the guarding of their hearts, and their family relationships. Success in the businesses in China is not necessarily rosy so we ask the Lord to grant them opportunities and favors in their workplaces. Many Chinese resort to pinyin or Romanized Putonghua when they use a keyboard but their grasp of the written language is weakening as a result. The pictorial forms of Chinese characters are notoriously hard to learn and require years of repetitive practice. Many young Chinese know the characters but have difficulty writing them. We pray for those ministries that produce Christian materials and their need for many more Christian writers who can share clearly and powerfully their testimony with their readers. The Chinese language has one of the most complicated systems of writing in the world and requires the knowledge of several thousand characters for an adequate level of literacy. Today’s computer and mobile devices offer many simple ways of inputting Chinese. The current Chinese Bible has many archaic words and terms that young believers have difficulty understanding and pronouncing. We pray for those who translate the Bible and publish Christian books that they will provide suitable contents that are relevant and readily accepted by young believers.
#!/usr/bin/env python ## This is call back script which will be executed after created the grib2 files. ## ## Hycom Model Input requires analysis of 06, 09, 12, 15, 18, 21-hours from ## yesterday and 00 & 03-hours from today date. All 3-hourly forecasts from ## today date. ## ## While creating tar ball, all files must be in present directory, so that ## when user extract it, will produce only files instead of entire paths! ## ## And finally putting into their ftp server. ## ## Arulalan.T ## 04-Mar-2016. import os, subprocess, datetime, getopt, sys, glob, time pbzip2 = '/gpfs1/home/Libs/GNU/ZIPUTIL/pbzip2' pigz = '/gpfs1/home/Libs/GNU/ZIPUTIL/pigz' tigge_check = '/gpfs1/home/Libs/GNU/GRIB_API/gribapi-1.21.0/bin/tigge_check' filesCount = {'ttr': 41, 'lsm': 41, 'orog': 41, '10v': 41, 'tcc': 41, 'gh': 369, 'skt': 41, 'tp': 41, 'msl': 41, 'mx2t6': 40, '2d': 41, '10u': 41, 'mn2t6': 40, 'sshf': 41, 'slhf': 41, 'ssr': 41, '2t': 41, 'sp': 41, 'st': 41, 'q': 328, 'u': 328, 't': 328, 'str': 41, 'v': 328, 'sd': 41} dirsOrder = [ 'gh', 'u', 'v', 'q', 't', '10u', '10v', '2t', 'mx2t6', 'mn2t6', 'skt', 'st', '2d', 'sp', 'msl', 'tp', 'ttr', 'lsm', 'tcc', 'slhf', 'ssr', 'sshf', 'str', 'sd', 'orog' ] def createTarBalls(path, today, member): member = str(member).zfill(3) inpath = os.path.join(path, member) if member == '000': # merge cmd into single grib2 of each members catcmd = ['%s/z_tigge_c_dems*%s' % (d,d) for d in dirsOrder] else: # merge cmd into single grib2 of each members except orography and land-sea mask catcmd = ['%s/z_tigge_c_dems*%s' % (d,d) for d in dirsOrder if d not in ['lsm', 'orog']] # merge cmd into single grib2 of each members catcmd = ' '.join(catcmd) catcmd = 'cat %s ' % catcmd catcmd += ' > %s' # check the filesCount for var, vlen in filesCount.iteritems(): vpath = os.path.join(inpath, var) if not os.path.exists(vpath): raise ValueError("%s Folder doensnt exists" % vpath) files = os.listdir(vpath) if len(files) != vlen: raise ValueError("filesCount do not matches,%s %d, %d" % (vpath, len(files), vlen)) ncfile = [f for f in files if f.endswith('.nc')] if ncfile: raise ValueError("Got nc file %s" % vpath) # end of for var, vlen in filesCount.iteritems(): cdir = os.getcwd() os.chdir(inpath) for tgf in os.listdir('.'): cmd = tigge_check + ' -v -w %s/*' % tgf tigge_check_val = os.system(cmd) # it should return 0 on pass if tigge_check_val != 0 : print "WARNING : While checking via tigge_check cmd got error!" #sys.exit(0) # end of for tgf in os.listdir('.'): tDay = datetime.datetime.strptime(today, "%Y%m%d") # get past 6th day timestamp y6Day = (tDay - datetime.timedelta(days=5)).strftime('%Y%m%d') tardir = '../../TarFiles/%s' % today if not os.path.exists(tardir): try: os.makedirs(tardir) except Exception as e: print "parallel folder creation", e # end of if not os.path.exists(tardir): mergedg2file = 'ncmrwf_dems_tigge_%s_%s.grib2' % (today, member) mergedg2filepath = os.path.join(tardir, mergedg2file) print "currnet path : ", os.getcwd() # merge all the params, all the levels, all the time steps, but individual members # into single grib2 (BIG) file. catcmd_out = catcmd % mergedg2filepath subprocess.call(catcmd_out, shell=True) time.sleep(30) # Lets compress single BIG grib2 file by using gz compress cmd. os.chdir(tardir) gzip_cmd = '%s -9 -p 32 %s' % (pigz, mergedg2file) print "gzip_cmd = ", gzip_cmd subprocess.call(gzip_cmd, shell=True) time.sleep(5) print os.getcwd(), member if member == '000': # remove today directory!!! print "path", path if os.path.exists(path): cmd = "rm -rf %s" % path print cmd subprocess.call(cmd, shell=True) # end of if os.path.exists(path): tarpath = os.path.abspath(tardir) if not len(os.listdir(tarpath)) == 45: print "45 tar.gz files are expected to transfer ftp site, but we got only %s files." % len(os.listdir(tarpath)) else: # do scp the tar files to ftp_server cmd = 'ssh ncmlogin3 "rsync --update --ignore-existing -razt %s %s:/data/ftp/pub/outgoing/NCUM_TIGGE/"' % (tarpath, ftp_server) print cmd subprocess.call(cmd, shell=True) time.sleep(5) # remove past 11th day tar ball from ftp_server cmd = 'ssh ncmlogin3 "ssh %s rm -rf /data/ftp/pub/outgoing/NCUM_TIGGE/%s"' % (ftp_server, y6Day) print cmd try: subprocess.call(cmd, shell=True) except Exception as e: print "past 6th day tar balls folder has been removed from ftp_server, already", e # end of if member == '000': os.chdir(cdir) # end of def createTarBalls(path, today, ...): if __name__ == '__main__': ftp_server="prod@ftp" date = None member = '000' outpath = '/gpfs3/home/umeps/EPS/ShortJobs/NCUM_EPS_TIGGE/%s/' helpmsg = 'tigge_create_tarball_g2files_put_into_ftp.py --date=20160302 --member=001' try: opts, args = getopt.getopt(sys.argv[1:], "d:m:", ["date=", "member="]) except getopt.GetoptError: print helpmsg sys.exit(2) for opt, arg in opts: if opt == '-h': print helpmsg sys.exit() elif opt in ("-d", "--date"): date = arg elif opt in ("-m", "--member"): member = arg # end of for opt, arg in opts: outpath = outpath % date createTarBalls(outpath, date, member)
A brief biography of William Shakespeare, from his baptism to the inscription on his tomb at Holy Trinity in Stratford. Also includes a link to the full text of Shakespeare's very own last will and testament. A summary overview of the four periods of Shakespeare's works, including links to online editions of the plays and Shakespearean criticism. See also the SRC's play synopses, poetry page, and the new Scenes and Monologues page for acting resources. Links and a Shakespeare Resource Center guide to the Bard's English—including a searchable glossary. Also includes a Speech Analysis: Selected Readings section exclusive to the SRC. Who wrote the works of Shakespeare? Edward de Vere? Francis Bacon? Christopher Marlowe? Information about and links to the opposing points of view. A brief history of Shakespeare's Globe from its construction in 1598 to the New Globe, completed in 1996 in Southwark. Because you have to understand England and the times in which Shakespeare lived to appreciate fully the literature. An at-a-glance guide to all the original content compiled for the Shakespeare Resource Center. The most valuable online resources you'll find about Shakespeare (besides this site, of course). For further reading about Shakespeare, because most of the best research resources are still only available in print (something or the other about copyright law). Links to selected theatre companies specializing in Shakespeare, because nothing beats seeing a play live. This is where all the Shakespearean links go that don't seem to fit anywhere else on the site. This website is a resource for William Shakespeare's plays, sonnets, poems, quotes, biography and the legendary Globe Theatre. This website contains the following resources on the language of Shakespeare: Absolute Glossary of Shakespeare, Shakespeare Dictionary, Glossary of Shakespearean False Friends, Glossary of Shakespearean Insults, Shakespeare's Coined Words, Shakespeare Grammar Dictionary, Shakespeare's Vocabulary and The Shakespeare Glossary. This website contains the Folger's Digital Image Collection. It offers online access to over 26,000 images from the Folger Shakespeare Library collection, including books, theater memorabilia, manuscripts, art, and more. Images are available in high resolution and users can show multiple images side-by-side, zoom in and out to see fine detail, and more. This website is a treasure trove of information on Shakespeare and the Elizabethan Era. The section Life and Times on this website contains information on Shakespeare’s Life and Times, including information on Life, Stage, Society, History, Drama, Literature, Plays, and Reference resources. Presents an annotated guide to scholarly Web resources on William Shakespeare, offering access to biographical and critical materials, various editions of Shakespeare's works, and information on teaching them. It is the goal of Open Source Shakespeare to be the best free Web site containing Shakespeare's complete works. It is intended for scholars, thespians, and Shakespeare lovers of every kind. OSS includes the 1864 Globe Edition of the complete works, which was the definitive single-volume Shakespeare edition for over a half-century. This website contains a list of recommended print materials for you to read, and online resources/materials about Shakespeare. The website includes general information and additional web links on Shakespeare, Shakespeare in film, Shakespeare’s language, Shakespeare in performance, primary sources, and Technology. The world’s leading charity in promoting the works, life and times of William Shakespeare and offer a unique Shakespeare centered experience with outstanding archive and library collections, inspiring educational and literary event programs and five wonderful houses all directly relating to Shakespeare. A collection of web links to help you find information on William Shakespeare. There are millions of pages that reference Shakespeare on the Internet. This website aims to make it a little easier to find your sources. The website includes Shakespeare's Biography, Shakespeare's Works, Shakespeare's Language, Shakespeare's Will, The Globe Theatre and information on Elizabethan England. The Globe Theatre is a faithful reconstruction of the open-air playhouse, first built in 1599, where Shakespeare worked and for which he wrote many of his greatest plays. The site includes educational and research resources. Serving the academic community, is an edited and moderated, international, e-mail distribution list for discussion among Shakespearean scholars, researchers, instructors, students, and anyone sharing their academic interests and concerns. The Shakespeare Quartos Archive is a digital collection of pre-1642 editions of William Shakespeare's plays. A cross-Atlantic collaboration has also produced an interactive interface for the detailed study of these geographically distant quartos, with full functionality for all thirty-two quarto copies of Hamlet held by participating institutions. An assortment of information on Shakespeare's plays, theater, sonnets, folio, facts, quotes and more!