text
stringlengths 29
850k
|
|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Candidato',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=50, verbose_name=b'Nombre')),
('apellidos', models.CharField(max_length=50, verbose_name=b'Apellidos')),
('email', models.CharField(unique=True, max_length=50, verbose_name=b'Email')),
('nif', models.CharField(unique=True, max_length=9, verbose_name=b'NIF')),
('telefono', models.CharField(max_length=9, verbose_name=b'Tel\xc3\xa9fono')),
('imagen', models.ImageField(upload_to=b'profile_images', verbose_name=b'Imagen', blank=True)),
('secretario', models.BooleanField(default=False, verbose_name=b'Secretario General')),
('consejo', models.BooleanField(default=False, verbose_name=b'Consejo Ciudadano')),
('biografia', models.TextField(max_length=2000, verbose_name=b'Biograf\xc3\xada', blank=True)),
('motivacion', models.TextField(max_length=2000, verbose_name=b'Motivaci\xc3\xb3n', blank=True)),
('youtube', models.CharField(max_length=50, verbose_name=b'V\xc3\xaddeo youtube', blank=True)),
('activo', models.BooleanField(default=True, verbose_name=b'Activo')),
],
options={
},
bases=(models.Model,),
),
]
|
"You're my daughter," Franco says to Kiki.
On the Monday, February 16 episode of ABC soap opera General Hospital, Alexis (Nancy Lee Grahn) admits her feelings for Julian (Willian deVry), who attempts to call Ned.
"You are not calling Ned," Alexis says, ending up nearly on top of Julian while reaching for the phone to stop him.
"Just five minutes," Julian replies.
"No, no, he would not appreciate it," insists Alexis.
"Okay, fine. If you won't let me call your ex, you're going to have to find another way to keep me occupied," Julian tells her.
"Okay," says Alexis, who climbs on top and plants a kiss on him. "How's that?"
Spoiler Alert! Don't miss General Hospital on Thursday, February 19 when Ned (Wally Kurth) makes a shocking announcement to Julian and Alexis.
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import os
import time
import logging
import re
import random
import datetime
import urllib.parse
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from bennu.filesystemutility import FileSystemUtility as FilesysUtility
from findfine_crawler.utility import Utility as FfUtility
from findfine_crawler.localdb import LocalDbForGYG
"""
爬取 GetYourGuide 資料存至 資料庫
"""
class CrawlerForGYG:
#建構子
def __init__(self):
self.dicSubCommandHandler = {
"index":self.crawlIndexPage,
"city":self.crawlCityPage,
"product":self.crawlProductPage
}
self.ffUtil = FfUtility()
self.fileUtil = FilesysUtility()
self.db = LocalDbForGYG()
self.lstDicParsedProductJson = [] #product.json 資料
self.intProductJsonIndex = 1
self.driver = None
#取得 spider 使用資訊
def getUseageMessage(self):
return (
"- GetYourGuide -\n"
"useage:\n"
"index - crawl index page of GetYourGuide \n"
"city - crawl not obtained city page \n"
"product [city_page_1_url] - crawl not obtained product page [of given city_page_1_url] \n"
)
#取得 selenium driver 物件
def getDriver(self):
chromeDriverExeFilePath = self.fileUtil.getPackageResourcePath(strPackageName="findfine_crawler.resource", strResourceName="chromedriver.exe")
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
driver = webdriver.Chrome(executable_path=chromeDriverExeFilePath, chrome_options=options)
return driver
#初始化 selenium driver 物件
def initDriver(self):
if not self.driver:
self.driver = self.getDriver()
#終止 selenium driver 物件
def quitDriver(self):
self.driver.quit()
self.driver = None
#重啟 selenium driver 物件
def restartDriver(self):
self.quitDriver()
time.sleep(5)
self.initDriver()
#執行 crawler
def runCrawler(self, lstSubcommand=None):
strSubcommand = lstSubcommand[0]
strArg1 = None
if len(lstSubcommand) == 2:
strArg1 = lstSubcommand[1]
self.initDriver() #init selenium driver
self.dicSubCommandHandler[strSubcommand](strArg1)
self.quitDriver() #quit selenium driver
#爬取 index 頁面
def crawlIndexPage(self, uselessArg1=None):
logging.info("crawl index page")
#GetYourGuide index 頁面
self.driver.get("https://www.getyourguide.com/")
#點開 show more cities
elesMoreBtn = self.driver.find_elements_by_css_selector("div.section-navigation button.cities-show-more")
while len(elesMoreBtn) > 0:
time.sleep(3)
elesMoreBtn[0].click()
time.sleep(5)
elesMoreBtn = self.driver.find_elements_by_css_selector("div.section-navigation button.cities-show-more")
time.sleep(3)
#解析 city 超連結
lstEleCityA = self.driver.find_elements_by_css_selector("div.top-destinations div.top-destination a.cities-image-box")
for eleCityA in lstEleCityA:
strCityHref = eleCityA.get_attribute("href")
#儲存 city 超連結至 localdb
if strCityHref.startswith("https://www.getyourguide.com/"):
lstStrCityKeyWord = re.sub("https://www.getyourguide.com/", "", strCityHref).split("-")
strQ = u"q=" + u"%20".join(lstStrCityKeyWord[0:-1])
strLc = u"lc=l" + re.sub("[^0-9]", "", lstStrCityKeyWord[-1])
strCityPage1Url = u"https://www.getyourguide.com/s/?" + strQ + u"&" + strLc
self.db.insertCityIfNotExists(strCityPage1Url=strCityPage1Url)
logging.info("save city url: %s"%strCityPage1Url)
#解析 city 頁面
def parseCityPage(self, strCityPage1Url=None):
#找尋 product 超連結
elesProductA = self.driver.find_elements_by_css_selector("article a.activity-card-link")
for eleProductA in elesProductA:
strProductUrl = eleProductA.get_attribute("href")
#儲存 product 超連結至 localdb
if strProductUrl.startswith("https://www.getyourguide.com/"):
logging.info("insert product url: %s"%strProductUrl)
self.db.insertProductUrlIfNotExists(strProductUrl=strProductUrl, strCityPage1Url=strCityPage1Url)
#爬取 city 頁面
def crawlCityPage(self, uselessArg1=None):
logging.info("crawl city page")
#取得 Db 中尚未下載的 city url
lstStrNotObtainedCityPage1Url = self.db.fetchallNotObtainedCityUrl()
for strNotObtainedCityPage1Url in lstStrNotObtainedCityPage1Url:
#re 找出 city 名稱
strCityName = re.sub("%20", " ", re.match("^https://www\.getyourguide\.com/s/\?q=(.*)&lc=l[\d]+$", strNotObtainedCityPage1Url).group(1))
#city 頁面
intCityPageNum = 1
#city 第1頁
time.sleep(random.randint(2,5)) #sleep random time
self.driver.get(strNotObtainedCityPage1Url)
time.sleep(60)
#解析 product 超連結
self.parseCityPage(strCityPage1Url=strNotObtainedCityPage1Url) #parse 第一頁
#點開 show more activities
elesShowMoreBtn = self.driver.find_elements_by_css_selector(".activities-show-more .btn")
while len(elesShowMoreBtn) > 0 and elesShowMoreBtn[0].is_displayed():
eleShowMoreBtn = elesShowMoreBtn[0]
time.sleep(random.randint(5,8)) #sleep random time
intCityPageNum = intCityPageNum+1
eleShowMoreBtn.click()
time.sleep(60) #wait click action complete
#解析 product 超連結
self.parseCityPage(strCityPage1Url=strNotObtainedCityPage1Url) #parse 第二三四...n-1 頁
#檢查 city page 有無 show more activities
elesShowMoreBtn = self.driver.find_elements_by_css_selector(".activities-show-more .btn")
#解析 product 超連結
self.parseCityPage(strCityPage1Url=strNotObtainedCityPage1Url) #parse 最後一頁
#更新 country DB 為已抓取 (isGot = 1)
self.db.updateCityStatusIsGot(strCityPage1Url=strNotObtainedCityPage1Url)
logging.info("got city %s find %d pages"%(strCityName, intCityPageNum))
#解析 product 頁面
def parseProductPage(self, strProductUrl=None, strCityName=None):
dicProductJson = {}
#strSource
dicProductJson["strSource"] = "GetYourGuide"
#strOriginUrl
dicProductJson["strOriginUrl"] = strProductUrl + u"?partner_id=JOIL1TN"
#strUpdateStatus
dicProductJson["strUpdateStatus"] = "up-to-date"
#strUpdateTime
dicProductJson["strUpdateTime"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#strImageUrl
strImageUrl = None
elesImg = self.driver.find_elements_by_css_selector("#photos div.photo-viewer-slider img.photo-item")
for eleImg in elesImg:
strImgSrc = eleImg.get_attribute("src")
if strImgSrc.startswith("https://cdn.getyourguide.com/img/"):
strImageUrl = strImgSrc
break
dicProductJson["strImageUrl"] = strImageUrl
#strTitle
strTitle = self.driver.find_element_by_css_selector("h1#activity-title").text
dicProductJson["strTitle"] = strTitle.strip()
#strLocation
dicProductJson["strLocation"] = strCityName
#intUsdCost
intUsdCost = 0
if len(self.driver.find_elements_by_css_selector("header.header p.total-price")) > 0:
strUsdCost = self.driver.find_element_by_css_selector("header.header p.total-price").text.strip()
if strUsdCost == "Sold out": #已售完
intUsdCost = 0
else:
elesDealPriceSpan = self.driver.find_elements_by_css_selector("header.header p.total-price span.deal-price")
isDealPriceExists = True if len(elesDealPriceSpan) > 0 else False
if isDealPriceExists: #特價
intUsdCost = int(float(re.sub("[^0-9\.]", "", elesDealPriceSpan[0].text)))
else: #標價
intUsdCost = int(float(re.sub("[^0-9\.]", "", strUsdCost)))
elif len(self.driver.find_elements_by_css_selector("div.activity-column-minor p.price strong.price-actual")) > 0:
strUsdCost = self.driver.find_element_by_css_selector("div.activity-column-minor p.price strong.price-actual").text.strip()
intUsdCost = int(float(re.sub("[^0-9\.]", "", strUsdCost)))
elif len(self.driver.find_elements_by_css_selector("div.price-detail p.price strong.price-actual")) > 0:
strUsdCost = self.driver.find_element_by_css_selector("div.price-detail p.price strong.price-actual").text.strip()
intUsdCost = int(float(re.sub("[^0-9\.]", "", strUsdCost)))
else:
pass
dicProductJson["intUsdCost"] = intUsdCost
#intReviewStar
intReviewStar = 0
if len(self.driver.find_elements_by_css_selector("div.activity-rating span.rating")) > 0:
strRatingTitle = self.driver.find_element_by_css_selector("div.activity-rating span.rating").get_attribute("title").strip()
strReviewStar = re.match("^Rating: ([0-9\.]+) out of 5$", strRatingTitle).group(1)
intReviewStar = int(float(strReviewStar))
dicProductJson["intReviewStar"] = intReviewStar
#intReviewVisitor
intReviewVisitor = 0
if len(self.driver.find_elements_by_css_selector("#rating-link")) > 0:
strReviewVisitor = re.sub("[^\d]", "", self.driver.find_element_by_css_selector("#rating-link").text).strip()
intReviewVisitor = int(float(strReviewVisitor))
dicProductJson["intReviewVisitor"] = intReviewVisitor
#strIntroduction
strIntroduction = u""
elesIntroduction = self.driver.find_elements_by_css_selector("#highlights *")
for eleIntroduction in elesIntroduction:
strIntroduction = strIntroduction + u" " + re.sub("\s", " ", eleIntroduction.text.strip())
dicProductJson["strIntroduction"] = strIntroduction.strip()
#intDurationHour
strDurationHour = self.driver.find_element_by_css_selector("div.key-info-box div div.time").text.strip()
strDurationHour = re.sub("\s", " ", strDurationHour.lower())
intDurationHour = self.convertDurationStringToHourInt(strDurtation=strDurationHour)
dicProductJson["intDurationHour"] = intDurationHour
#strGuideLanguage
strGuideLanguage = u"english"
if len(self.driver.find_elements_by_css_selector("div.key-info-box div.live-guide div.lang")) > 0:
strGuideLanguage = self.driver.find_element_by_css_selector("div.key-info-box div.live-guide div.lang").text.strip().lower()
dicProductJson["strGuideLanguage"] = strGuideLanguage
#intOption (待確認)
dicProductJson["intOption"] = None
#strStyle (GetYourGuide 無該資料)
dicProductJson["strStyle"] = None
self.lstDicParsedProductJson.append(dicProductJson)
#爬取 product 頁面 (strCityPage1Url == None 會自動找尋已爬取完成之 city)
def crawlProductPage(self, strCityPage1Url=None):
#清空計憶體殘留資料
self.lstDicParsedProductJson = []
self.intProductJsonIndex = 1
if not strCityPage1Url:
#未指定 city
lstStrObtainedCityUrl = self.db.fetchallCompletedObtainedCityUrl()
for strObtainedCountryUrl in lstStrObtainedCityUrl:
self.crawlProductPageWithGivenCityUrl(strCityPage1Url=strObtainedCountryUrl)
else:
#有指定 city url
self.crawlProductPageWithGivenCityUrl(strCityPage1Url=strCityPage1Url)
#將最後資料寫入 json
if len(self.lstDicParsedProductJson) > 0:
strJsonFileName = "%d_product.json"%(self.intProductJsonIndex*100)
strProductJsonFilePath = self.fileUtil.getPackageResourcePath(strPackageName="findfine_crawler.resource.parsed_json.gyg", strResourceName=strJsonFileName)
self.ffUtil.writeObjectToJsonFile(dicData=self.lstDicParsedProductJson, strJsonFilePath=strProductJsonFilePath)
self.lstDicParsedProductJson = []
#爬取 product 頁面 (指定 city url)
def crawlProductPageWithGivenCityUrl(self, strCityPage1Url=None):
logging.info("crawl product page with city %s"%strCityPage1Url)
#re 找出 city 名稱
strCityName = re.sub("%20", " ", re.match("^https://www\.getyourguide\.com/s/\?q=(.*)&lc=l[\d]+$", strCityPage1Url).group(1))
#取得 DB 紀錄中,指定 strCityPage1Url city 的 product url
lstStrProductUrl = self.db.fetchallProductUrlByCityUrl(strCityPage1Url=strCityPage1Url)
for strProductUrl in lstStrProductUrl:
#檢查 product 是否已下載
if not self.db.checkProductIsGot(strProductUrl=strProductUrl):
time.sleep(random.randint(5,8)) #sleep random time
try:
self.driver.get(strProductUrl)
#解析 product 頁面
self.parseProductPage(strProductUrl=strProductUrl, strCityName=strCityName)
#更新 product DB 為已爬取 (isGot = 1)
#self.db.updateProductStatusIsGot(strProductUrl=strProductUrl)
except Exception as e:
logging.warning(str(e))
logging.warning("selenium driver crashed. skip get product: %s"%strProductUrl)
self.restartDriver() #重啟
#顯示進度
logging.info("進度: %d/100"%len(self.lstDicParsedProductJson))
#寫入 json
if len(self.lstDicParsedProductJson) == 100:
strJsonFileName = "%d_product.json"%(self.intProductJsonIndex*100)
strProductJsonFilePath = self.fileUtil.getPackageResourcePath(strPackageName="findfine_crawler.resource.parsed_json.gyg", strResourceName=strJsonFileName)
self.ffUtil.writeObjectToJsonFile(dicData=self.lstDicParsedProductJson, strJsonFilePath=strProductJsonFilePath)
self.intProductJsonIndex = self.intProductJsonIndex+1
self.lstDicParsedProductJson = []
#轉換 duration 資訊
def convertDurationStringToHourInt(self, strDurtation=None):
intDefaultDuration = 1
if not strDurtation or ("hour" not in strDurtation and "day" not in strDurtation):
return intDefaultDuration
else:
intTotalDurationHour = 0
mDurationHour = re.search("([\d\.]+) hour", strDurtation)
mDurationDay = re.search("([\d\.]+) day", strDurtation)
if mDurationHour:
intDurationHour = int(float(mDurationHour.group(1)))
intTotalDurationHour = intTotalDurationHour + intDurationHour
if mDurationDay:
intDurationDay = int(float(mDurationDay.group(1)))
intTotalDurationHour = intTotalDurationHour + (intDurationDay*8)
return intTotalDurationHour
|
George Soros, the billionaire investor and philanthropist, is giving $100m to central and eastern Europe to counter the impact of the economic crisis on the poor, voluntary groups and non-government organsations.
Mr Soros disclosed the planned gift in an interview with the FT in which he urged the European Union to boost its aid for the region to finance increased welfare spending and tackle rising political extremism.
“The political risk is very severe and the rise of the chauvinistic, xenophobic far right is a disturbing development,” said Mr Soros, in a reference to the advances made by extremist parties in the recent European parliament elections, including Jobbik in Hungary, the Slovak National Party and the Greater Romania Party.
“The EU must do more in terms of providing support, including financial support. The International Monetary Fund programmes [launched in Hungary, Ukraine, Latvia and about five other countries] are very severe in terms of cutting budgets. The EU must solidify support for EU values,” said Mr Soros.
The Hungarian-born businessman, who has given over $3bn to the region since the fall of Communism, urged Brussels not to limit its assistance to EU members, such as Poland, but to support countries further afield, especially Ukraine.
“The region has suffered a setback through no fault of its own as this is a crisis that fundamentally originated in the west, and central and eastern Europe turns out to be hardest hit”.
Mr Soros’s gift will be concentrated on countries facing the severest recessions, including Ukraine, the Baltic states, the Balkans, and parts of central Asia and the Caucasus. The funds will target charities working with children and the young, and NGOs, some of which may have suffered declines in local donations.
Mr Soros has for over a decade concentrated his philanthropic work in the region on support for think tanks and similar organizations. But he said it was time to return, for a while, to the humanitarian aid he gave in the early 1990s. The gift follows a $50m donation he made recently to a New York charity.
|
from __future__ import unicode_literals
import logging
import sys
import types
import warnings
from django.conf import settings
from django.core import signals
from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed
from django.db import connections, transaction
from django.urls import get_resolver, get_urlconf, set_urlconf
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.module_loading import import_string
from .exception import (
convert_exception_to_response, get_exception_response,
handle_uncaught_exception,
)
logger = logging.getLogger('django.request')
class BaseHandler(object):
def __init__(self):
self._request_middleware = None
self._view_middleware = None
self._template_response_middleware = None
self._response_middleware = None
self._exception_middleware = None
self._middleware_chain = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE (or the deprecated
MIDDLEWARE_CLASSES).
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._request_middleware = []
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
if settings.MIDDLEWARE is None:
warnings.warn(
"Old-style middleware using settings.MIDDLEWARE_CLASSES is "
"deprecated. Update your middleware and use settings.MIDDLEWARE "
"instead.", RemovedInDjango20Warning
)
handler = convert_exception_to_response(self._legacy_get_response)
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if hasattr(mw_instance, 'process_request'):
self._request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
else:
handler = convert_exception_to_response(self._get_response)
for middleware_path in reversed(settings.MIDDLEWARE):
middleware = import_string(middleware_path)
try:
mw_instance = middleware(handler)
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if mw_instance is None:
raise ImproperlyConfigured(
'Middleware factory %s returned None.' % middleware_path
)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.insert(0, mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.append(mw_instance.process_template_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.append(mw_instance.process_exception)
handler = convert_exception_to_response(mw_instance)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._middleware_chain = handler
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if db.settings_dict['ATOMIC_REQUESTS'] and db.alias not in non_atomic_requests:
view = transaction.atomic(using=db.alias)(view)
return view
def get_exception_response(self, request, resolver, status_code, exception):
return get_exception_response(request, resolver, status_code, exception, self.__class__)
def get_response(self, request):
"""Return an HttpResponse object for the given HttpRequest."""
# Setup default url resolver for this thread
set_urlconf(settings.ROOT_URLCONF)
response = self._middleware_chain(request)
# This block is only needed for legacy MIDDLEWARE_CLASSES; if
# MIDDLEWARE is used, self._response_middleware will be empty.
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
# Complain if the response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
except Exception: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, get_resolver(get_urlconf()), sys.exc_info())
response._closable_objects.append(request)
# If the exception handler returns a TemplateResponse that has not
# been rendered, force it to be rendered.
if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)):
response = response.render()
if response.status_code == 404:
logger.warning(
'Not Found: %s', request.path,
extra={'status_code': 404, 'request': request},
)
return response
def _get_response(self, request):
"""
Resolve and call the view, then apply view, exception, and
template_response middleware. This method is everything that happens
inside the request/response middleware.
"""
response = None
if hasattr(request, 'urlconf'):
urlconf = request.urlconf
set_urlconf(urlconf)
resolver = get_resolver(urlconf)
else:
resolver = get_resolver()
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
response = self.process_exception_by_middleware(e, request)
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError(
"The view %s.%s didn't return an HttpResponse object. It "
"returned None instead." % (callback.__module__, view_name)
)
# If the response supports deferred rendering, apply template
# response middleware and then render the response
elif hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_template_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__)
)
try:
response = response.render()
except Exception as e:
response = self.process_exception_by_middleware(e, request)
return response
def process_exception_by_middleware(self, exception, request):
"""
Pass the exception to the exception middleware. If no middleware
return a response for this exception, raise it.
"""
for middleware_method in self._exception_middleware:
response = middleware_method(request, exception)
if response:
return response
raise
def handle_uncaught_exception(self, request, resolver, exc_info):
"""Allow subclasses to override uncaught exception handling."""
return handle_uncaught_exception(request, resolver, exc_info)
def _legacy_get_response(self, request):
"""
Apply process_request() middleware and call the main _get_response(),
if needed. Used only for legacy MIDDLEWARE_CLASSES.
"""
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
response = self._get_response(request)
return response
|
The BBC is off the mark in the biographical film "Victoria and Abdul." The film follows the life of the late Queen Victoria and her Indian servant Abdul. The movie focuses on the Queen's latter years. People are used to hearing of Victoria and her husband Albert. Therefore, to hear the pairing of "Victoria and Abdul" is different.
After the death of her husband Albert, the Queen went into a long period of mourning. Two Indian men of lower societal rank were chosen to present the Queen with a mohur, which is an Indian gold coin. Abdul catches Victoria's eye. They become friends much to the consternation of the royal household and British Prime Minister, due to his skin color, religion and lack of social standing.
Abdul, a Muslim, begins teaching the Queen about Islam and aspects of his nation's culture. He misrepresents his educational background and ends up teaching the Queen erroneous things, which she later finds out through an independent investigation commissioned by the royal household. She becomes disappointed. Spoiler alert: Abdul also failed to tell her he has a wife in India.
The movie "Victoria and Abdul" made the Queen look like a greedy, slovenly, lazy, indignant, petulant old woman, who is abusive to her staff and family. The film portrays Abdul in a negative manner, though it does not appear to do so outright. For instance, the royal doctor announcing Abdul is "riddled" with gonorrhea, stating it in very crude and jubilant terms, seeking a way to drive a wedge between him and the Queen. The royal doctor was angry at having to examine an "Indian d*ck" as he stated, to reach a diagnosis and attempt treatment. The doctor furiously stated he did not go to school to examine "Indian d*cks." I was left speechless.
This film was not very flattering to Victoria or Abdul. I was appalled by the film. Some of it was revolting. Even if they had all these issues and these things were said, it doesn't look good on film. It's actually quite upsetting and a sad throwback we really could have done without. While I realize this film was made to appeal to multi-cultural London, with the goal of keeping the royals ensconced in Buckingham Palace as the monarchy, in trying to state the Queen was a friend of a minority and defended him, the movie was racist and insulting.
My personal theory is, though the film did not broach the topic, the Queen's established dementia diagnosis was at play in the situation with Abdul. She was acting in a manner that was irrational and unorthodox. This coupled with the fact the Queen appeared to project her feelings for her late husband Albert onto Abdul, in a case of psychological transference, would explain her unorthodox behavior. Maybe she thought Albert had come back as Abdul (Indians believe in reincarnation). However, in reality that was not the case, as Abdul was not Albert.
|
import mobula as M
import mobula.layers as L
import numpy as np
def test_multiply():
N, C, H, W = 2,3,4,5
a = np.random.random((N, C, H, W))
b = np.random.random((N, C, H, W))
[la, lb] = L.Data([a,b])
w = la * lb
w.reshape()
assert w.Y.shape == a.shape
w.forward()
w.dY = np.random.random(w.Y.shape)
w.backward()
assert np.allclose(np.multiply(a, b), w.Y)
assert np.allclose(w.dX[0], np.multiply(w.dY, w.X[1]))
assert np.allclose(w.dX[1], np.multiply(w.dY, w.X[0]))
def test_mul():
N, C, H, W = 2,3,4,5
a = np.random.random((N, C, H, W))
data = L.Data(a)
l = data * 3
assert type(l) == L.MultiplyConstant
assert np.allclose(a * 3, l.eval())
l.dY = np.random.random(l.Y.shape)
l.backward()
assert np.allclose(l.dX, 3 * l.dY)
l = 3 * data
assert type(l) == L.MultiplyConstant
assert np.allclose(a * 3, l.eval())
l.dY = np.random.random(l.Y.shape)
l.backward()
assert np.allclose(l.dX, 3 * l.dY)
def test_matmul():
R, N, K = 3,4,5
a = np.random.random((R, N))
b = np.random.random((N, K))
[la, lb] = L.Data([a,b])
l = L.MatMul([la, lb])
assert np.allclose(l.eval(), np.dot(a, b))
l.dY = np.random.random(l.Y.shape)
l.backward()
assert np.allclose(l.dX[0], np.dot(l.dY, b.T))
assert np.allclose(l.dX[1], np.dot(a.T, l.dY))
# test constant
lac = M.dot(la, b)
lbc = M.dot(a, lb)
assert np.allclose(lac.eval(), np.dot(a, b))
assert np.allclose(lbc.eval(), np.dot(a, b))
lac.dY = np.random.random(lac.Y.shape)
lbc.dY = np.random.random(lbc.Y.shape)
lac.backward()
lbc.backward()
assert np.allclose(lac.dX, np.dot(lac.dY, b.T))
assert np.allclose(lbc.dX, np.dot(a.T, lbc.dY))
|
Republican Party of Arkansas: Yesterday, June 19, 2008, the State House Republican Caucus held a meeting to elect its leadership for the next legislative session. Representative Bryan King (Green Forest) was unanimously elected Minority Leader. King will be replacing Representative Johnny Key (Mountain Home), who will be moving to the State Senate.
King represents District 91, which encompasses Carroll and the most western parts of Boone Counties. He serves on both the Transportation and Aging/Children/Youth committees of which he serves as chair of the subcommittee on Aging. He resides in Green Forrest with his wife Lydia and their daughter Meghan. King is both a farmer and businessman.
Others elected during the Caucus meeting include Representative Jon Woods (Springdale) as Minority Whip, Representative Dan Greenberg (Little Rock) as Caucus Treasurer, and Representative Rick Green (Van Buren) as Caucus Secretary. The Caucus also welcomed the nine Republican Representative-Elects and continues to look forward to strengthening their numbers in this November.
Tags: Arkansas, Arkansas Republican Caucus, Bryan King, Republican Party of Arkansas To share or post to your site, click on "Post Link". Please mention / link to the ARRA News Service. Thanks!
|
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.test import TestCase
# from mock import call
from mock_django.http import MockHttpRequest
from mock_django.signals import mock_signal_receiver
from ..models import O2OTag
from ..signals import o2o_tags_created
from ..views import TagCreateView
from ..views import TagsCreateView
from .models import Tagged
from .models import TaggedIn
from .models import Tagger
class TagCreateViewTest(TestCase):
def setUp(self):
self.tagger = Tagger.objects.create()
self.tagger_content_type = ContentType.objects.get_for_model(Tagger)
self.tagged = Tagged.objects.create()
self.tagged_content_type = ContentType.objects.get_for_model(Tagged)
self.tagged_in = TaggedIn.objects.create()
self.tagged_in_content_type = ContentType.objects.get_for_model(
TaggedIn)
self.data = {'tagged_object_id': self.tagged.pk,
'tagged_content_type': self.tagged_content_type.pk,
'tagged_in_object_id': self.tagged_in.pk,
'tagged_in_content_type': self.tagged_in_content_type.pk,
}
self.url = reverse('o2o_taggin_tag_create')
def test_tag_create(self):
request = MockHttpRequest(POST=self.data)
request.user = self.tagger
response = TagCreateView.as_view()(request)
tag = O2OTag.objects.get()
self.assertEqual(201, response.status_code)
self.assertEqual(self.tagger, tag.tagger)
self.assertEqual(self.tagged, tag.tagged)
self.assertEqual(self.tagged_in, tag.tagged_in)
def test_tag_create__form_invalid(self):
request = MockHttpRequest(POST={})
request.user = self.tagger
response = TagCreateView.as_view()(request)
tags = O2OTag.objects.all()
self.assertEqual(400, response.status_code)
self.assertEqual(0, tags.count())
class TagsCreateViewTest(TestCase):
def setUp(self):
self.tagger_content_type = ContentType.objects.get_for_model(Tagger)
self.tagged_content_type = ContentType.objects.get_for_model(Tagged)
self.tagged_in_content_type = ContentType.objects.get_for_model(
TaggedIn)
self.tagger = Tagger.objects.create()
self.tagged = Tagged.objects.create()
self.tagged1 = Tagged.objects.create()
self.tagged_in = TaggedIn.objects.create()
self.data = {
'form-TOTAL_FORMS': u'2',
'form-INITIAL_FORMS': u'0',
'form-MAX_NUM_FORMS': u'',
'form-0-tagged_object_id': self.tagged.pk,
'form-0-tagged_content_type': self.tagged_content_type.pk,
'form-0-tagged_in_object_id': self.tagged_in.pk,
'form-0-tagged_in_content_type': self.tagged_in_content_type.pk,
'form-1-tagged_object_id': self.tagged1.pk,
'form-1-tagged_content_type': self.tagged_content_type.pk,
'form-1-tagged_in_object_id': self.tagged_in.pk,
'form-1-tagged_in_content_type': self.tagged_in_content_type.pk,
}
self.url = reverse('o2o_taggin_tag_create_multiple')
def test_create__tagger_is_request_user(self):
request = MockHttpRequest(POST=self.data)
request.user = self.tagger
response = TagsCreateView.as_view()(request)
tags = O2OTag.objects.all()
self.assertEqual(201, response.status_code)
self.assertEqual(2, tags.count())
for t in tags:
self.assertEqual(self.tagger, t.tagger)
def test_create__call_tags_created_signal(self):
from mock_django.http import MockHttpRequest
request = MockHttpRequest(POST=self.data)
request.user = self.tagger
with mock_signal_receiver(o2o_tags_created) as tags_created_receiver:
TagsCreateView.as_view()(request)
self.assertTrue(tags_created_receiver.called)
# this fail assertion is failing but must be correct
# self.assertEqual(tags_created_receiver.call_args_list, [
# call(signal=o2o_tags_created, sender=TagsCreateView, instances=tags),
# ])
def test_tag_create__form_invalid(self):
data = {
'form-TOTAL_FORMS': u'2',
'form-INITIAL_FORMS': u'0',
'form-MAX_NUM_FORMS': u'',
'form-0-tagged_object_id': self.tagged.pk,
'form-0-tagged_content_type': self.tagged_content_type.pk,
}
request = MockHttpRequest(POST=data)
request.user = self.tagger
response = TagsCreateView.as_view()(request)
tags = O2OTag.objects.all()
self.assertEqual(400, response.status_code)
self.assertEqual(0, tags.count())
|
The California Public Utilities Commission approved funding for seven more regional broadband consortia this morning. That brings the total to fourteen consortia representing 49 counties.
All five of the commissioners voted in favor of the grants. In contrast to the first round of funding, this second round vote did not generate any debate. The criteria and conditions that the commission laid out in December 2011 were deemed satisfied.
In line with the goals established by the CPUC, the consortia are focused on building and deploying broadband facilities in unserved and underserved areas of California, improving access to and knowledge of the broadband resources that are already available to Californians, and promoting greater adoption of broadband services.
Some of the groups, for example the Central Coast Broadband Consortium and Redwood Coast Connect, are well established within their local communities and are moving ahead with deployment and adoption projects. Others are in the organizational phase. Their focus in the first year of the program will be to pull together local leaders and broadband champions into a regional organization that can achieve the goals set by the CPUC.
The consortia program is paid for by the California Advanced Services Fund (CASF), which has total funding of $225 million authorized by the California legislature. Of that, lawmakers set aside $10 million for regional broadband groups. The rest of the money, for the most part, goes towards building broadband infrastructure in areas of California that lack it. Some of that money has already been spent, but most of it remains available for eligible projects. The program is managed by the CPUC.
The fourteen consortia cover diverse geographic regions. In urban areas where infrastructure exists, albeit not always to a sufficient level, the emphasis will be on promoting broadband adoption in communities where the digital divide sometimes seems insurmountable.
The benchmark for these efforts is the Get Connected program developed by the California Emerging Technology Fund. The consortia program is the result of years of work by CETF. Its CEO, Sunne Wright McPeak, and staff led a successful effort to bring existing regional broadband groups into a statewide organization and to foster new groups in areas that hadn’t yet produced their own.
There’s a different challenge in rural areas. Broadband infrastructure is frequently ten or twenty years behind that in more densely populated areas, and sometimes doesn’t exist at all. Most of the focus of the CASF infrastructure grant and loan program is on underserved and unserved areas of California. Consortia representing predominantly rural areas will be putting proportionately greater effort into broadband deployment projects.
Tellus Venture Associates managed the Central Coast Broadband Consortium’s successful grant application and advises several cities that are members of the East Bay Broadband Consortium, the City of Los Angeles and other municipal and community broadband projects around California.
|
#! /usr/bin/env python
"""
Calculate statistics for each study area, and prints results to stdout.
All it prints is the number of blankspots, the number of v1 nodes,
and the number of total nodes. Since I am no longer storing the blankspot
information in the hist_point table itself, these stats are no longer very informative.
Currently, user_analysis.py does what this script used to do. It prints the "output_totals_*"
files which contain the stats for each study area by date.
"""
import MapGardening
import optparse
usage = "usage: %prog [options]"
p = optparse.OptionParser(usage)
p.add_option('--place', '-p',
default="all"
)
options, arguments = p.parse_args()
possible_tables = [
'blankspots_1000_b',
]
if options.place == "all":
places = MapGardening.get_all_places()
else:
placename = options.place
place = MapGardening.get_place(placename)
places = {placename: place}
MapGardening.init_logging()
for placename in places.keys():
print "printing blankspot info for", placename
MapGardening.init_db(places[placename]['dbname'])
for table in possible_tables:
nodetable = MapGardening.NodeTable(table) # Table may not exist, but object will still be created
nodetable.get_blankspot_stats()
MapGardening.disconnect_db()
|
Paerau is not only a senior lecturer in Te Tumu, he is also a Barrister. He has held a practising certificate for nearly twenty years and his expertise is in Māori land law, trusts, wills, probate and administration. He is an equity lawyer. He has appeared in the Māori Land Court and Māori Appellate Court from time to time and his legal mentor was the late Robin Corcoran who practiced in the Māori Land Court in Christchurch for many decades. Paerau is the principal legal adviser to the lawyers in the Ngāi Tahu Maori Law Centre in Dunedin.
Paerau’s principal iwi is Ngāti Awa. However he has very close whakapapa ties to Te Arawa (Tuhourangi and Ngāti Rangitihi), Ngāti Tūwharetoa of Kawerau and Taupo, Ngāti Maniapoto and Whakatōhea and Whānau-a-Apanui. He also has links to Taitokerau iwi, Tūhoe and Ngāti Porou.
Paerau's academic research interests focuses on Māori, law and the Treaty of Waitangi and also whānau, hapū and iwi dynamics as well as Māori politics.
Mize, S., & Warbrick, P. (2017). Dealing with challenges that arise when lawyer and client are from different cultures. New Zealand Family Law Journal, 9, 56-66.
Warbrick, P. (2016). [Review of the book: Outcasts of the gods: The struggle over slavery in Māori New Zealand]. Journal of the Polynesian Society, 125(2), 189-191. [Book Review].
Warbrick, P. (2016, February). Echoes of the past: The painful side of Māori women's experience of world war. Verbal presentation at the Making Women Visible Conference: A Conference in Honour of Barbara Brookes, Dunedin, New Zealand.
Warbrick, W. P. (2004). Exhibit A: Whakapapa and list of heirs for Oke Pukeroa. Māori Land Court of New Zealand. Christchurch, New Zealand: Māori Land Court. 35p.
Warbrick, W. P. (2004). Exhibit A: Whakapapa and heirs to Marewa Te Kahupake or Te Ruatareti (died June 10. 1886). Māori Land Court of New Zealand. Christchurch, New Zealand: Māori Land Court. 8p.
Warbrick, W. P. (2004). Exhibit A: Whakapapa and heirs to Rangiheuea (died circa 1880). Māori Land Court of New Zealand. Christchurch, New Zealand: Māori Land Court. 22p.
Warbrick, W. P. (2003). Exhibit A: Whakapapa and list of heirs for Te Rangitukehu (died June 27, 1887). Christchurch, New Zealand: Māori Land Court of New Zealand. 15p.
Warbrick, W. P. (2002). Report by W. Paerau Warbrick for the identification of the shareholders Hana Rewite and Henare Rewite. Christchurch, New Zealand: Māori Land Court of New Zealand. 8p.
Warbrick, W. P. (2002). Whakapapa for the identification of the shareholders Hana Rewite and Henare Rewite. Christchurch, New Zealand: Māori Land Court of New Zealand. 6p.
Paerau Warbrick, W. (2011). Written expert statement for the defence on the operation of tamaiti whāngai and moko in Māori society (29 November 2011) for the Magistrates Court in Muswellbrook, New South Wales, Australia. In the matter of R (Commonwealth) v Matthew Alexander Waikato. Magistrates Court, Muswellbrook, New South Wales, Australia. [Government Submission].
Paerau Warbrick, W. (2011). Written submissions against the Crown’s claim that the islands known as Rūrima is Crown land for the Māori Land Court before Judge Harvey. In the matter of Rūrima Islands (A20070003741). Māori Land Court Registry, Rotorua, New Zealand, File A20070003741. [Government Submission].
Paerau Warbrick, W. (2011). Written submissions relating to judicial conflict of interest, judicial disclosure and judicial recusal of Judge Layne Harvey in regard to the Māori land Court hearing relating to the islands known as Rūrima. In the matter of Rūrima Islands (A20070003741). Māori Land Court Registry, Rotorua, New Zealand, File A20070003741. [Government Submission].
Paerau Warbrick, W. (2011). Written memorandum and submissions for the Māori Students at the University of Otago in regard to breach of their Treaty of Waitangi rights in the Waitangi Tribunal before Chief Judge Isaac, Judge Milroy and Judge Spencer. In the matter of prejudicial effects of the Education (Freedom of Association) Amendment Act 2011 (Wai 2343). Waitangi Tribunal, Wellington, New Zealand, File Wai 2343. [Government Submission].
Paerau Warbrick, W. (2010). Written memorandum for the Ngai Tahu Māori Law Centre on the operation of section 116 of Te Ture Whenua Māori Act 1993 for the Māori Land Court before Judge Coxhead. In the matter of succession to William James Wybrow (A20070006221), Māori Land Court Registry, Christchurch, New Zealand, File A20070006221, 63p. [Government Submission].
Paerau Warbrick, W. (2010). Written and oral submissions regarding the jurisdiction of the Chief Judge of the Māori Land Court to hear applications and grant remedies. In the matter of Hana Rewiti and Henare Rewiti (A20010005699). Māori Land Court Registry, Christchurch, New Zealand, Files A20010005699 & CJ 2002/35. [Government Submission].
Warbrick, P. (2010, September). The Māori Land Court in the 1960s and 1970s. University of Otago Te Tumu, School of Māori, Pacific and Indigenous Studies Seminar, Dunedin, New Zealand. [Department Seminar].
Paerau Warbrick, W. (2010). Written submission in regard to the Marine and Coastal Area (Takutai Moana) Bill (November 2010) before the Māori Affairs Select Committee. New Zealand House of Representatives, Wellington, New Zealand. [Government Submission].
Paerau Warbrick, W. (2009). Written and oral submissions and reports in the Māori Land Court hearings before Judge Savage and Judge Harvey. In the matter of Rangitaiki 60C1 Trust and Matahina A1D1 trust: Removal of Trustees (A20090001332). In the matter of Rangitaiki Valley Whenua Trust Lands: Constitute an ahu whenua trust (A20090018144). Māori Land Court Registry, Rotorua, New Zealand, Files A20090001332 & A20090018144. [Government Submission].
Paerau Warbrick, W. (2008). Written memorandum for the Ngāi Tahu Māori Law Centre on evidence and the standard of proof in Chief Judge Applications before the Māori Appellate Court under section 45 of Te Ture Whenua Māori Act 1993. In the matter of Waihao 903 Section IX Block, Henare Rakiihia Tau v Nga Whanau o Morven & Glenavy, 2010, Maori Appellate Court Minute Book 168. Māori Land Court Registry, Christchurch, New Zealand, File A20070011156. [Government Submission].
Paerau Warbrick, W. (2006). Written and oral submissions in the Māori Land Court hearings regarding tamaiti whāngai under section 115 of Te Ture Whenua Māori Act 1993 before Deputy Chief Judge Wilson Isaac. In the matter of succession to James Henry Newton (A19990005454). In the matter of succession to Charles Newton II (A19990005455). In the matter of succession to Robert Hart (A19990005458). Māori Land Court Registry, Christchurch, New Zealand, Files A19990005454, A19990005455, A19990005458. [Government Submission].
Warbick, P. (2005). Oral and written submissions to the Māori Land Court for determining heirs to Riini Whakataka, Te Maui Taiawatea, Hiria Te Taiawatea and Pineaha Takamoana. Dunedin, New Zealand: Te Tumu, University of Otago. [Submission to the Maori Land Court].
Warbrick, W. P. (2004). Submissions in the Māori Land Court for succession to Poihaere Paihau or Poihaere Te Wharehiraka or Ngakawhena Paihau/Paikau or Poihaere Ngawai. Christchurch, New Zealand: Māori Land Court of New Zealand. [Oral and Written Submissions].
Warbrick, W. P. (2002). Submissions in the Māori Land Court for correcting a succession to Henare Te Maire. Christchurch, New Zealand: Māori Land Court of New Zealand. [Oral and Written Submissions].
Warbrick, W. P. (2001). Submissions in the Māori Land Court for correcting a succession to Violet Victoria Seigle. Christchurch, New Zealand: Māori Land Court of New Zealand. [Oral and Written Submissions].
Warbrick, W. P. (2003). List of judges of the Native Land Court and Māori Land Court 1864-2003: He pepa o Ngā ingoa o ngā tiati o ngā kooti whenua Maori 1864-2003. 7p.
Warbrick, P. (2015). Minute books: An integral part of the Māori Land Court. In A. Cooper, L. Paterson & A. Wanhalla (Eds.), The lives of colonial objects. (pp. 129-133). Dunedin, New Zealand: Otago University Press.
Warbrick, P. (2012). 'O ratou whenua': Land and estate settlements. In N. R. Wheen & J. Hayward (Eds.), Treaty of Waitangi settlements. (pp. 92-101). Wellington, New Zealand: Bridget Williams Books.
Warbrick, W. P. (2009). Treaty of Waitangi. In X. Zhao & X. Qiao (Eds.), New Zealand: Its history, people and culture. (pp. 69-97). Shanghai, China: Fudan University Press.
Warbrick, P. (2014, May). Women and the Maori Land Court. Verbal presentation at the Sixteenth Berkshire Conference on the History of Women: Histories on the Edge, Toronto, Canada.
Warbrick, P. (2013, November). The Māori incorporations of the South Island: A commentary on the use of micro-history. Verbal presentation at the New Zealand Historical Association Biennial Conference, Dunedin, New Zealand.
Warbrick, P. (2013, March). Of the people, by the people, for the people. He tangata, he tangata, he tangata: Maori voters and the constitution. Verbal presentation at the Colonial Origins of New Zealand Politics and Government Conference, Dunedin, New Zealand.
Warbrick-Anderson, P. (2013, February). Minute books of the Maori Land Court. Verbal presentation at the Inaugural Conference of the Centre for Research on Colonial Culture, Dunedin, New Zealand.
Warbrick, P. (2009, June). Maori relations with pakeha and the law, 1900-1960. Verbal presentation at the Interracial Intimacies: New Zealand Histories Symposium, Dunedin, New Zealand.
Warbrick, P. (2007, December). The value of autobiography in academia: Maori women and post-world-war-II American presidents. Verbal presentation at the Self-Narratives 'Research Conversation' Day, Dunedin, New Zealand.
Warbrick, W. P. (2010). Māori Land Court 1960-1980: An autoethnographic and social commentary (PhD). University of Otago, Dunedin, New Zealand. 419p.
Warbrick, W. P. (1998). Labour′s Bill of Rights: The Labour Governmnet′s attempt at a bill of rights in the 1980s (Master of Arts). University of Otago, Dunedin, New Zealand. 137p.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/theodric/nsclock
#theodric 20170717
import os
import sys
import time
import requests
import xmltodict
import argparse
from collections import OrderedDict
from demo_opts import get_device
from luma.core.legacy import show_message
from luma.core.legacy.font import proportional, SINCLAIR_FONT
## This script makes use of the NS API, documented extensively here:
## http://www.ns.nl/en/travel-information/ns-api
## The below imports settings.py, which contains your NS login and API key.
## You can sign up for this key at http://www.ns.nl/ews-aanvraagformulier/
## settings.py must be created in the same directory as this script. Format:
############################################################################
## username = 'username@emailprovider.tld'
## apikey = 'abyC7M5QqRUXrt1ttyf4rtD-mttw4nkn0zzl35rkGJnMj1zznIppl3'
############################################################################
try:
import settings
except ImportError:
print('Copy settings_example.py to settings.py and set the configuration to your own credentials')
sys.exit(1)
## CONFIGURABLE ITEM
## Hardcode your default DESTINATION stations here.
## Look up what your destination stations' long and short names
## are by searching the official station list:
## http://webservices.ns.nl/ns-api-stations-v2
#startStation = not configured here!
destStation1 = "Den Helder"
destStation2 = "Schagen"
## There are two destinations that get me to my target station,
## so I'm checking for both, but you can just uncomment the
## next line if you only need to watch one destination.
#destStation2 = destStation1
## CONFIGURABLE ITEM
## the station=<VALUE> at the end of the URL is your start station
## Look up the short code for your station in the above-referenced
## station list.
## This block retrieves the current station list to /tmp/trains.xml
response = requests.get('http://webservices.ns.nl/ns-api-avt?station=asd',
auth=requests.auth.HTTPBasicAuth(
settings.username,
settings.apikey), stream=True)
with open('/tmp/trains.xml', 'wb') as handle:
for block in response.iter_content(1024):
handle.write(block)
## Define the OLED device to write to using the setup routine
device = get_device()
msg = "a"
show_message(device, msg, fill="white", font=proportional(SINCLAIR_FONT))
time.sleep(1)
## The below block reads the just-written XML file
with open('/tmp/trains.xml') as fd:
doc = xmltodict.parse(fd.read(), xml_attribs=True)
iterCount = 0
numDisplayed = 0
## Figure out how many trains are departing from your start station $
## the time the script is run. $
departingTrainsCount = len(doc['ActueleVertrekTijden']['VertrekkendeTrein']) $
## Then use that to feed the iterator so we don`t have an $
## underrun or miss any. $
if args.content:
for iterCount in range(departingTrainsCount):
## I'm only grabbing the end station, departure time, and
## departure platform at start station to display.
## There are more things you can retrieve-- paw through trains.xml
## +read xmltodict docs to understand how to retrieve them.
## I found this page useful:
## http://omz-software.com/pythonista/docs/ios/xmltodict.html
dest = doc['ActueleVertrekTijden']['VertrekkendeTrein'][iterCount]['EindBestemming']
time = doc['ActueleVertrekTijden']['VertrekkendeTrein'][iterCount]['VertrekTijd']
plat = doc['ActueleVertrekTijden']['VertrekkendeTrein'][iterCount]['VertrekSpoor']['#text']
spc = " "
print(dest + spc + time[11:16] + spc + plat) ## print each row on CLI
## CONFIGURABLE ITEM
## Currently the script outputs the next four trains matching your
## destination. Reduce the max value on both below checks of
## numDisplayed to get fewer results.
if (dest == destStation1 and numDisplayed <= 3) or (dest == destStation2 and numDisplayed <= 3):
## Shortening names to 3-letter codes to fit screen.
## I *may* automate and elegantize this later.
if dest == "Schagen":
dest = "SGN"
print("!! HIT") ## flagging matches on CLI for debug
elif dest == "Den Helder":
dest = "HLD"
print("!! HIT") ## flagging matches on CLI for debug
## save each extracted row to its own variable because
## I can't quite grasp how to do this better.
if numDisplayed == 0:
## chars [11:16] is where the time lives.
## the raw var contains e.g.
## 2017-07-01T21:07:00+0200
disp = dest + spc + time[11:16] + spc + "Spoor " + plat
elif numDisplayed == 1:
disp2 = dest + spc + time[11:16] + spc + "Spoor " + plat
elif numDisplayed == 2:
disp3 = dest + spc + time[11:16] + spc + "Spoor " + plat
elif numDisplayed == 3:
disp4 = dest + spc + time[11:16] + spc + "Spoor " + plat
numDisplayed += 1
#initialize screen buffer var "text" without displaying anything
text = PapirusTextPos(False, rotation=args.rotation)
#Append the first bit of text to the screen buffer, top centered.
#X position 12, Y position 0, font size 13, Id="Header"
#text.AddText("Text", Xpos, Xpos, fontSize, Id="freeformElementID")
text.AddText("Vertrek van de treinen", 12, 0, 13, Id="Header")
text.AddText(disp, 0, 19, 18, Id="opt1")
## The next three stanzas are merely an attempt to gracefully
## handle fewer than the maximum allowed number of results.
## The results, if they exist, are presented roughly centered
## in a stack starting from the top, as you can see from the
## increasing Y values in text.AddText.
try:
disp2
except NameError:
disp2_exists = False
else:
disp2_exists = True
if disp2_exists == True:
text.AddText(disp2, 0, 39, 18, Id="opt2")
try:
disp3
except NameError:
disp3_exists = False
else:
disp3_exists = True
if disp3_exists == True:
text.AddText(disp3, 0, 59, 18, Id="opt3")
try:
disp4
except NameError:
disp4_exists = False
else:
disp4_exists = True
if disp4_exists == True:
text.AddText(disp4, 0, 79, 18, Id="opt4")
#if __name__ == "__main__":
# try:
# main()
# except KeyboardInterrupt:
# pass
|
Bipolar disorder is a class of mood disorders that is marked by dramatic changes in mood, energy and behaviour. The key characteristic is that people with bipolar disorder alternate between episodes of mania (extreme elevated mood) and depression (extreme sadness). These episodes can last from hours to months. The mood disturbances are severe enough to cause marked impairment in the person’s functioning.The experience of mania is not pleasant and can be very frightening to the person. It can lead to impulsive behaviour that has serious consequences for the person and their family. A depressive episode makes it difficult or impossible for a person to function in their daily life.
People with bipolar disorder vary in how often they experience an episode of either mania or depression. Mood changes with bipolar disorder typically occur gradually. For some individuals there may be periods of wellness between the different mood episodes. Some people may also experience multiple episodes within a 12 month period, a week, or even a single day (referred to as “rapid cycling”). The severity of the mood can also range from mild to severe.
Establishing the particular type of bipolar disorder can greatly aid in determining the best type of treatment to manage the symptoms.
The Diagnostic Statistical Manual (DSM-IV-TR) is a manual used by doctors to determine the specific type of bipolar disorder. The diagnosis is based on the severity of symptoms and length of time that the symptoms are evident.
The different types of bipolar disorder are based on the severity and duration of the altered mood.
Bipolar I disorder is characterized by at least one manic episodes or mixed episodes and one or more major depressive episodes. These episodes last for at least one week but may continue for months. Bipolar I disorder is the most severe form of the illness.
Bipolar II disorder is characterized by predominantly depressive episodes accompanied by occasional hypomanic episodes. Hypomanic episodes are milder than manic episodes but can still impair functioning. Between episodes, there may be periods of normal functioning. The risk of suicide is high for this type of bipolar disorder.
Cyclothymic disorder is characterized by chronic fluctuating moods involving periods of hypomania and depression. The periods of both depressive and hypomanic symptoms are shorter, less severe, and are separated by periods of normal mood. However, these mood swings can impair a person’s life and create chaos as they can be feeling on top of the world one day and feeling down and depressed the next day. Some people with cyclothymia develop a more severe form of bipolar illness while for others, it continues as a chronic (ongoing) condition.
Bipolar Disorder Not Otherwise Specified: Some people will experience the symptoms of a manic episode and a major depressive episode, but their symptoms do not fit into the above mentioned types of bipolar disorder. This is known as Bipolar Disorder Not Otherwise Specified. For example,a person who experiences rapid cycling between manic and depressive episodes would be diagnosed with this type of bipolar disorder. Just like the other types of bipolar disorder,Bipolar Disorder Not Otherwise Specified is a treatable disorder.
Mixed episodes are ones in which a person simultaneously experiences characteristics of both mania and depression. For example, a person may experience excitability and agitation of mania coupled with depression and irritability. This combination of energy, agitation and depression makes a mixed episode the most dangerous for risk of suicide.
People with bipolar disorder can lead healthy and fulfilling lives when the illness is effectively treated and managed.
Without treatment, the illness tends to worsen. Over time a person may suffer more frequent and more intense episodes. Treatment can help to reduce frequency and severity of episodes and help to maintain a good quality of life.
|
#The MIT License (MIT)
#
#Copyright (c) 2015 Jiakai Lian
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#!/usr/bin/python
from mod_pbxproj import XcodeProject
import sys
import json
#print 'Number of arguments:', len(sys.argv), 'arguments.'
#print 'Argument List:', str(sys.argv)
class Configuration:
def __init__(self,jsonFileName):
self.jsonFileName = jsonFileName
#find config name
self.name = jsonFileName.split(".")[0].lower()
#load json data
with open(jsonFileName) as data_file:
self.jsonContent = json.load(data_file)
if len(sys.argv) < 2:
raise Exception("need project.pbxproj file path")
#read the file path
filePath = sys.argv[1]
if len(sys.argv) > 2:
jsonFiles = list(sys.argv)
del jsonFiles[0:2]
else:
jsonFiles = ["debug.json","release.json"]
print jsonFiles
#create configuration objects
dictOfConfig = dict();
for file in jsonFiles:
config = Configuration(file)
dictOfConfig[config.name] = config
#load project file and create a backup
project = XcodeProject.Load(filePath)
project.backup()
rootObject = project["rootObject"]
projectObject = project["objects"][rootObject]["buildConfigurationList"]
for id in project["objects"][projectObject]["buildConfigurations"]:
name = project["objects"][id]["name"].lower()
#if this configuration need to be changed
if dictOfConfig[name] is not None:
entry = project["objects"][id]["buildSettings"]
#for each setting in the json, apply to the target entry
for key in dictOfConfig[name].jsonContent:
entry[key] = dictOfConfig[name].jsonContent[key]
project.save()
print "Auto Configuration Complete"
|
Outer idler pivot bushing for the clutch bellcrank. For all 1970-83 AMC models with a manual transmission. Price is for 1 bushing.
|
from email.mime.image import MIMEImage
from email.utils import make_msgid
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.template import TemplateDoesNotExist
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from PIL import Image
from allauth.account import app_settings
from allauth.utils import get_user_model
from allauth_api.settings import allauth_api_settings
class AccountAdapterMixin(object):
def new_user_response_data(self, user, request=None):
serializer_class = self.new_user_serializer_class(user)
return_data = None
if serializer_class:
return_data = serializer_class(instance=user, context={'request': request}).data
return return_data
def new_user_serializer_class(self, user):
return None
def clean_username(self, username, shallow=False):
"""
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen. This copies most of the code from the django-allauth
DefaultAccountAdapter, but adds support for the CASE_INSENTIVE_IDS setting because the PRESERVE_USERNAME_CASING
setting in allauth, does not allow you to preserve the username case but check against it in a case-insensitive way
"""
for validator in app_settings.USERNAME_VALIDATORS:
validator(username)
# TODO: Add regexp support to USERNAME_BLACKLIST
username_blacklist_lower = [ub.lower()
for ub in app_settings.USERNAME_BLACKLIST]
if username.lower() in username_blacklist_lower:
raise forms.ValidationError(
self.error_messages['username_blacklisted'])
# Skipping database lookups when shallow is True, needed for unique
# username generation.
if not shallow:
from .utils import filter_users_by_username
if filter_users_by_username(username).exists():
user_model = get_user_model()
username_field = app_settings.USER_MODEL_USERNAME_FIELD
error_message = user_model._meta.get_field(
username_field).error_messages.get('unique')
if not error_message:
error_message = self.error_messages['username_taken']
raise forms.ValidationError(error_message)
return username
def login(self, request, user):
super(AccountAdapterMixin, self).login(request, user)
return {'detail': 'User logged in.'}
def add_message(self, request, level, message_template, message_context=None, extra_tags=''):
if allauth_api_settings.USE_DJANGO_MESSAGES:
super(AccountAdapterMixin, self).add_message(request, level, message_template, message_context, extra_tags)
def email_confirmation_key(self, request):
return request.data.get('key', None)
def email_confirmation_response_data(self, confirmation):
return {'detail': '%s %s' % (confirmation.email_address.email, _("confirmed"))}
def reset_password_confirmation_data(self, request):
return {
'uidb36': request.data.get('uidb36', None),
'key': request.data.get('key', None),
'password1': request.data.get('password1', None),
'password2': request.data.get('password2', None),
}
def reset_password_confirmation_form_kwargs(self, request):
return {}
def reset_password_confirmation_response_data(self, user):
return {'detail': _("User password changed")}
class ImageKeyMixin(object):
"""
A mixin class for an account adapter that enables sending and receiving images for email validation
and password reset keys.
"""
def render_mail(self, template_prefix, email, context):
"""
Overrides to catch the prefixes for email confirmation and password reset and render html
emails with image-based keys
"""
if template_prefix not in allauth_api_settings.IMAGE_KEY_PREFIXES:
return super(ImageKeyMixin, self).render_mail(template_prefix, email, context)
# Create an image key
gc = allauth_api_settings.IMAGE_KEY_GENERATOR_CLASS
generator = gc()
key = self.get_key_from_context(template_prefix, context)
image = generator.create_image_key(key)
key_cid = make_msgid()
context['key_cid'] = key_cid[1:-1] # trim angle brackets
subject = render_to_string('{0}_subject.txt'.format(template_prefix),
context)
# remove superfluous line breaks
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
bodies = {}
for ext in ['html', 'txt']:
try:
template_name = '{0}_message.{1}'.format(template_prefix, ext)
bodies[ext] = render_to_string(template_name,
context).strip()
except TemplateDoesNotExist:
# We require both html and text templates
raise ImproperlyConfigured('Both text and html templates must exist to use ImageKeyMixin')
msg = EmailMultiAlternatives(subject, bodies['txt'], settings.DEFAULT_FROM_EMAIL, [email])
msg.attach_alternative(bodies['html'], 'text/html')
img = MIMEImage(image.read())
img.add_header('Content-ID', key_cid)
img.add_header('Content-Disposition', 'inline')
# msg.attach('key.png', image.read(), 'image/png')
msg.attach(img)
image.close()
return msg
def get_key_from_context(self, template_prefix, context):
result = ""
if 'email_confirmation' in template_prefix:
result = context['key']
elif 'password_reset'in template_prefix:
result = context['password_reset_url'].split('/')[-2]
return result
def reset_password_confirmation_data(self, request):
data = {
'password1': request.data.get('password1', None),
'password2': request.data.get('password2', None),
}
key_image = request.data.get('key', None)
if key_image:
try:
image = Image.open(key_image)
key_text = image.text.get('key', None)
image.close()
except:
key_text = key_image # Fall back on single text key
if key_text:
i = key_text.index('-')
data['uidb36'] = key_text[0:i]
data['key'] = key_text[i + 1:]
return data
def email_confirmation_key(self, request):
key = None
key_image = request.data.get('key', None)
if key_image:
try:
key = Image.open(key_image).text.get('key', None)
key_image.close()
except:
key = key_image # Fall back on text key
return key
|
20:18–26 The ill-timed speech brings disaster (vv. 18–20); human respect may lead to rash promises and enmity (vv. 22–23); lies bring dishonor and lasting disgrace (vv. 24–26).
|
__author__ = 'LiGuangyu'
import aioredis
import asyncio,functools
from concurrent.futures import CancelledError
import logging;logging.basicConfig(level=logging.INFO)
from common.comm import singleton
import time
def handler(loop, context):
'''
本来打算在eventloop上注册异常处理函数,但是没玩明白,后续研究
'''
print('in exception Handler')
print(context)
class baseConnectPool(object):
_pool1 = None
_pool2 = None
pool = None
@classmethod
async def init(cls,loop=None,addr='127.0.0.1',port=6379,password=None):
# if not loop:
# loop = asyncio.get_event_loop()
# loop.set_exception_handler(handler=handler)
try:
baseConnectPool._pool1 = await aioredis.create_pool((addr,port),loop=loop,password=password,encoding='utf-8',minsize=1,maxsize=1)
baseConnectPool._pool2 = await aioredis.create_pool((addr,port),loop=loop,password=password,encoding='utf-8',minsize=1,maxsize=1)
baseConnectPool.pool = baseConnectPool._pool1
print('hello')
except ConnectionRefusedError as e:
print('Redis Cannot access')
raise e
except Exception as e:
print('Error')
print(e)
pass
@classmethod
def tryCatch(cls,func):
@functools.wraps(func)
async def wapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except aioredis.errors.ReplyError as err:
'''redis返回err信息,则进入此处'''
print('Reply Error Catched')
raise err
except CancelledError as err:
'''当单条连接连接断开时,进入此处异常,
如果使用单连接,此处可进行高可用处理
如果使用连接池,在下面判断'''
print('hello world')
except ConnectionRefusedError as err:
'''重连失败进入此处异常
如果使用连接池,在此处进行高可用
判断pool.size是否为0(池内连接数量)来判断是否单个Redis挂掉'''
print(cls._pool1)
print(cls._pool2)
print('connect Refused')
except Exception as err:
print(type(err))
print(err)
return wapper
class redis(baseConnectPool):
ORDERNOKEY = 'ORDERNOKEY'
def __init__(self):
pass
@classmethod
async def init(cls,loop=None,addr='127.0.0.1',port=6379,password=None):
a = super()
print(a)
await super().init(loop=loop,addr=addr,port=port,password=password)
@classmethod
@baseConnectPool.tryCatch
async def incr(cls,key):
with await cls.pool as rdsConn:
return await rdsConn.incr(key)
@classmethod
@baseConnectPool.tryCatch
async def set(cls,key,value):
with await cls.pool as rdsConn:
return await rdsConn.set(key,value)
@classmethod
@baseConnectPool.tryCatch
async def get(cls,key):
with await cls.pool as rdsConn:
return await rdsConn.get(key)
@classmethod
@baseConnectPool.tryCatch
async def getOrderId(cls):
with await cls.pool as rdsConn:
tr = rdsConn.multi_exec()
tr.setnx(cls.ORDERNOKEY, 1000000000000)
tr.incr(cls.ORDERNOKEY)
rev = await tr.execute()
if rev[1] > 9000000000000:
redis.incr(cls.ORDERNOKEY, - 8000000000000)
return rev[1]
@classmethod
@baseConnectPool.tryCatch
async def getTime(cls):
with await cls.pool as rdsConn:
tt = await rdsConn.time()
x = time.localtime(tt)
rev = time.strftime('%Y%m%d%H%M%S', x)
return rev
@classmethod
@baseConnectPool.tryCatch
async def getTime1(cls):
with await cls.pool as rdsConn:
return await rdsConn.time()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.set_exception_handler(handler=handler)
async def test():
await redis.init()
a = 0
print(await redis.set('test',0))
print(await redis.getTime1())
start_time = time.time()
while True:
a+=1
await redis.incr('test')
print('Time Used %s' % (time.time() - start_time))
loop.run_until_complete(test())
|
Abba Makarios said, "Walking in the desert one day, I found the skull of a dead man, lying on the ground. As I was moving it with my stick, the skull spoke to me. I said to it, 'Who are you?' The skull replied, 'I was high priest of the idols and of the pagans who dwelt in this place; but you are Makarios, the Spirit-bearer. Whenever you take pity on those who are in torments, and pray for them, they feel a little respite.' The old man said to him, 'What is this alleviation, and what is this torment?' He said to him, 'As far as the sky is removed from the earth, so great is the fire beneath us; we are ourselves standing in the midst of the fire, from the feet up to the head. It is not possible to see anyone face to face, but the face of one is fixed to the back of another. Yet when you pray for us, each of us can see the other's face a little. Such is our respite.' The old man in tears said, 'Alas the day when that man was born!' He said to the skull, 'Are there any punishments which are more painful than this?' The skull said to him, 'There is a more grievous punishment down below us.' The old man said, 'Who are the people down there?' The skull said to him: 'We have received a little mercy since we did not know God, but those who know God and denied Him are down below us.'" Then, picking up the skull, the old man buried it.
"It is not possible to see anyone face to face, but the face of one is fixed to the back of another. Yet when you pray for us, each of us can see the other's face a little."
|
from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from yowsup.layers.protocol_iq.protocolentities import ErrorIqProtocolEntity
from yowsup.layers.protocol_iq.protocolentities.iq_result import ResultIqProtocolEntity
from .protocolentities import *
import logging
logger = logging.getLogger(__name__)
class YowGroupsProtocolLayer(YowProtocolLayer):
HANDLE = (
CreateGroupsIqProtocolEntity,
InfoGroupsIqProtocolEntity,
LeaveGroupsIqProtocolEntity,
ListGroupsIqProtocolEntity,
SubjectGroupsIqProtocolEntity,
ParticipantsGroupsIqProtocolEntity,
AddParticipantsIqProtocolEntity,
PromoteParticipantsIqProtocolEntity,
DemoteParticipantsIqProtocolEntity,
RemoveParticipantsIqProtocolEntity
)
def __init__(self):
handleMap = {
"iq": (None, self.sendIq),
"notification": (self.recvNotification, None)
}
super(YowGroupsProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Groups Iq Layer"
def sendIq(self, entity):
if entity.__class__ in self.__class__.HANDLE:
if entity.__class__ == SubjectGroupsIqProtocolEntity:
self._sendIq(entity, self.onSetSubjectSuccess, self.onSetSubjectFailed)
elif entity.__class__ == CreateGroupsIqProtocolEntity:
self._sendIq(entity, self.onCreateGroupSuccess, self.onCreateGroupFailed)
elif entity.__class__ == ParticipantsGroupsIqProtocolEntity:
self._sendIq(entity, self.onGetParticipantsResult)
elif entity.__class__ == AddParticipantsIqProtocolEntity:
self._sendIq(entity, self.onAddParticipantsSuccess, self.onAddParticipantsFailed)
elif entity.__class__ == PromoteParticipantsIqProtocolEntity:
self._sendIq(entity, self.onPromoteParticipantsSuccess, self.onPromoteParticipantsFailed)
elif entity.__class__ == DemoteParticipantsIqProtocolEntity:
self._sendIq(entity, self.onDemoteParticipantsSuccess, self.onDemoteParticipantsFailed)
elif entity.__class__ == RemoveParticipantsIqProtocolEntity:
self._sendIq(entity, self.onRemoveParticipantsSuccess, self.onRemoveParticipantsFailed)
elif entity.__class__ == ListGroupsIqProtocolEntity:
self._sendIq(entity, self.onListGroupsResult)
elif entity.__class__ == LeaveGroupsIqProtocolEntity:
self._sendIq(entity, self.onLeaveGroupSuccess, self.onLeaveGroupFailed)
elif entity.__class__ == InfoGroupsIqProtocolEntity:
self._sendIq(entity, self.onInfoGroupSuccess, self.onInfoGroupFailed)
else:
self.entityToLower(entity)
def onCreateGroupSuccess(self, node, originalIqEntity):
logger.info("Group create success")
self.toUpper(SuccessCreateGroupsIqProtocolEntity.fromProtocolTreeNode(node))
def onCreateGroupFailed(self, node, originalIqEntity):
logger.error("Group create failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onSetSubjectSuccess(self, node, originalIqEntity):
logger.info("Group subject change success")
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(node))
def onSetSubjectFailed(self, node, originalIqEntity):
logger.error("Group subject change failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onGetParticipantsResult(self, node, originalIqEntity):
self.toUpper(ListParticipantsResultIqProtocolEntity.fromProtocolTreeNode(node))
def onAddParticipantsSuccess(self, node, originalIqEntity):
logger.info("Group add participants success")
self.toUpper(SuccessAddParticipantsIqProtocolEntity.fromProtocolTreeNode(node))
def onRemoveParticipantsFailed(self, node, originalIqEntity):
logger.error("Group remove participants failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onRemoveParticipantsSuccess(self, node, originalIqEntity):
logger.info("Group remove participants success")
self.toUpper(SuccessRemoveParticipantsIqProtocolEntity.fromProtocolTreeNode(node))
def onPromoteParticipantsFailed(self, node, originalIqEntity):
logger.error("Group promote participants failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onPromoteParticipantsSuccess(self, node, originalIqEntity):
logger.info("Group promote participants success")
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(node))
def onDemoteParticipantsFailed(self, node, originalIqEntity):
logger.error("Group demote participants failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onDemoteParticipantsSuccess(self, node, originalIqEntity):
logger.info("Group demote participants success")
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(node))
def onAddParticipantsFailed(self, node, originalIqEntity):
logger.error("Group add participants failed")
self.toUpper(FailureAddParticipantsIqProtocolEntity.fromProtocolTreeNode(node))
def onListGroupsResult(self, node, originalIqEntity):
self.toUpper(ListGroupsResultIqProtocolEntity.fromProtocolTreeNode(node))
def onLeaveGroupSuccess(self, node, originalIqEntity):
logger.info("Group leave success")
self.toUpper(SuccessLeaveGroupsIqProtocolEntity.fromProtocolTreeNode(node))
def onLeaveGroupFailed(self, node, originalIqEntity):
logger.error("Group leave failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def onInfoGroupSuccess(self, node, originalIqEntity):
logger.info("Group info success")
self.toUpper(InfoGroupsResultIqProtocolEntity.fromProtocolTreeNode(node))
def onInfoGroupFailed(self, node, originalIqEntity):
logger.error("Group info failed")
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(node))
def recvNotification(self, node):
if node["type"] == "w:gp2":
if node.getChild("subject"):
self.toUpper(SubjectGroupsNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("create"):
self.toUpper(CreateGroupsNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("remove"):
self.toUpper(RemoveGroupsNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("add"):
self.toUpper(AddGroupsNotificationProtocolEntity.fromProtocolTreeNode(node))
|
A first network SUSFOOD was initiated under FP7 and ran from 2011 until 2014. This predecessor of the current network SUSFOOD2 under H2020, had some major achievements, such as 2 research calls, one in 2013 and one in 2014, funding in total 15 projects, a strategic research agenda (SRA) and a metaknowledge base (MKB) which generated the SUSFOOD Country report.
After this succesfull edition, 26 partners applied for a H2020 SUSFOOD2 Cofunded which was granted and started in 2017. A first action of this Cofund was a 3rd call in which 12 research projects were funded.
|
#===================================================================================================
# report
#---------------------------------------------------------------------------------------------------
import inspect
import os
import os.path
import numpy
import labels
import output
from results import Images, Metrics, SegmentationResults
import ipdb
#---------------------------------------------------------------------------------------------------
def html( experiment_name,
epoch,
content,
page_foreground = '#000000',
page_background = '#ffffff',
image_table_foreground = '#ffffff',
image_table_background = '#000000' ):
return f'''
<html>
<style>
html
{{
width: 80%;
margin-left: auto;
margin-right: auto;
}}
body
{{
color: {page_foreground};
background-color: {page_background};
}}
table.metrictable
{{
width: 80%;
margin: 50px auto;
text-align: left;
}}
img
{{
margin: 25px 25px;
text-align: center;
vertical-align: center;
}}
table.imagetable
{{
width: 80%;
margin: 50px auto;
color: {image_table_foreground};
background-color: {image_table_background};
text-align: center;
}}
</style>
<body>
<h1>{experiment_name}</h1>
<h2>epoch: {epoch}</h2>
{content}
</body>
</html>
'''
#---------------------------------------------------------------------------------------------------
def source_section( source ):
return f'''
<hr>
<h2>definition</h2>
<pre>
{source}
</pre>
'''
#---------------------------------------------------------------------------------------------------
def metric_section(
metric_name,
statistics,
sample_images ):
value_table = statistics_table( statistics )
mean_image_table = image_table( 'mean ' + metric_name, sample_images.mean )
median_image_table = image_table( 'median ' + metric_name, sample_images.median )
minimum_image_table = image_table( 'minimum ' + metric_name, sample_images.minimum )
maximum_image_table = image_table( 'maximum ' + metric_name, sample_images.maximum )
return f'''
<hr>
<h2>{metric_name}</h2>
{value_table}
{mean_image_table}
{median_image_table}
{minimum_image_table}
{maximum_image_table}
'''
#---------------------------------------------------------------------------------------------------
def statistics_table( statistics ):
return f'''
<h3>overview</h3>
<table class="metrictable">
<tr> <th> metric </th> <th> value </th> </tr>
<tr> <td> mean </td> <td> {statistics.mean[0]:0.5} </td> </tr>
<tr> <td> median </td> <td> {statistics.median[0]:0.5} </td> </tr>
<tr> <td> minimum </td> <td> {statistics.minimum[0]:0.5} </td> </tr>
<tr> <td> maximum </td> <td> {statistics.maximum[0]:0.5} </td> </tr>
</table>
'''
#---------------------------------------------------------------------------------------------------
def image_table( table_name, sample_images ):
body = (
image_row( 'axial', sample_images[ 0 ] ) +
image_row( 'coronal', sample_images[ 1 ] ) +
image_row( 'sagittal', sample_images[ 2 ] ) )
return f'''
<h3>{table_name}</h3>
<table class="imagetable">
{body}
</table>
'''
def image_row( label_for_row, image_paths ):
image_cells = ''.join( [ image_cell( p ) for p in image_paths ] )
return f'''
<tr>
<td>{label_for_row}</td>
{image_cells}
</tr>
'''
def image_cell( image_path ):
return f'''
<td><image src="{image_path}"></td> '''
#---------------------------------------------------------------------------------------------------
def cost_table_row( epoch, phase, statistic ):
return f'''
<tr>
<td> {epoch} </td>
<td> {phase} </td>
<td> {statistic.mean:0.3} </td>
<td> {statistic.median:0.3} </td>
<td> {statistic.minimum:0.3} </td>
<td> {statistic.maximum:0.3} </td>
<td> {statistic.deviation:0.3} </td>
<td> {statistic.change_from_base:0.3} </td>
</tr>
'''
def cost_table( costs_per_epoch ):
rows = ''.join( [
cost_table_row( epoch, phase, cost )
for epoch, costs_per_phase in enumerate( costs_per_epoch )
for phase, cost in enumerate( costs_per_phase ) ] )
return f'''
<hr>
<h2>costs</h2>
<table class="metrictable">
<tr>
<th> epoch </th>
<th> phase </th>
<th> mean </th>
<th> median </th>
<th> minimum </th>
<th> maximum </th>
<th> deviation </th>
<th> change </th>
</tr>
{rows}
</table>
'''
#---------------------------------------------------------------------------------------------------
class SourceData( object ):
@staticmethod
def representative_volumes_for_metrics( metrics, dataset ):
indices = set( index for metric in metrics for statistic, index in metric )
volumes = { i: dataset.validation_set[ i ].read_volume() for i in indices }
return volumes
@staticmethod
def representative_distributions_and_offsets_for_metrics( metrics, results ):
indices = set( index for metric in metrics for statistic, index in metric )
distribution_and_offsets = {
i: results.predicted_distribution( i ) for i in indices }
for i in distribution_and_offsets:
_, offset = distribution_and_offsets[ i ]
assert offset[ 0 ] == i
distributions = { i : d for i, (d, o) in distribution_and_offsets.items() }
offsets = { i : o for i, (d, o) in distribution_and_offsets.items() }
return distributions, offsets
@staticmethod
def image_data_from_volumes( volumes, offsets, reconstructed_shape, margin ):
def extract( i ):
return Images.extract( volumes[i].images, offsets[i][1:], reconstructed_shape, margin )
return { i : extract( i ) for i in volumes }
@staticmethod
def reference_labels_from_volumes( volumes, offsets, reconstructed_shape, margin ):
def extract( i ):
return Images.extract( volumes[i].labels, offsets[i][1:], reconstructed_shape, margin )
return { i : extract( i ) for i in volumes }
@staticmethod
def predicted_labels_from_distributions( distributions ):
return { i : labels.dense_volume_distribution_to_dense_volume_indices( d )
for i, d in distributions.items() }
@staticmethod
def costs_for_epoch( epoch, archive ):
with archive.read_array_output( 'costs', epoch = epoch ) as data:
costs = data[ 'arr_0' ]
return costs
#---------------------------------------------------------------------------------------------------
class Report( object ):
@staticmethod
def results_only( epoch, experiment ):
definition = experiment.definition
results = SegmentationResults(
experiment.output_path, definition.experiment_id, epoch, definition.label_count )
results.restore( experiment.dataset, definition.sample_parameters, experiment.log )
results.persist()
@staticmethod
def generate( epoch, experiment ):
definition = experiment.definition
results = SegmentationResults(
experiment.output_path, definition.experiment_id, epoch, definition.label_count )
results.restore( experiment.dataset, definition.sample_parameters, experiment.log )
Report.write( results, experiment )
@staticmethod
def write( results, experiment ):
log = experiment.log
log.subsection( 'writing report' )
epoch = results.epoch
class_count = results.class_count
archive = results.archive
dataset = experiment.dataset
sample_parameters = experiment.definition.sample_parameters
reconstructed_shape = sample_parameters.reconstructed_shape
margin = sample_parameters.window_margin
log.entry( 'collating metrics' )
dice = results.statistics_for_mean_dice_score_per_volume
dice_per_class = [
results.statistics_for_dice_score_for_class( i )
for i in range( class_count ) ]
metrics = [ dice ] + dice_per_class
log.entry( 'loading data' )
volumes = SourceData.representative_volumes_for_metrics( metrics, dataset )
distributions, offsets = SourceData.representative_distributions_and_offsets_for_metrics(
metrics,
results )
log.entry( 'extracting data')
image_data = SourceData.image_data_from_volumes(
volumes, offsets, reconstructed_shape, margin )
reference = SourceData.reference_labels_from_volumes(
volumes, offsets, reconstructed_shape, margin )
predicted = SourceData.predicted_labels_from_distributions( distributions )
log.entry( 'generating source section' )
source_code = inspect.getsource( type( experiment.definition ) )
source = source_section( source_code )
log.entry( 'generating cost section' )
cost_data = SourceData.costs_for_epoch( epoch, results.archive )
costs = Metrics.costs_over_experiment( cost_data, phases = 10 )
section_for_costs = cost_table( costs )
log.entry( 'generating dice sections' )
section_for_all_classes = Report.section_for_all_classes(
dice, image_data, predicted, reference, results )
section_per_class = '\n'.join( [
Report.section_for_class(
c, dice_per_class[ c ], image_data, predicted, reference, results )
for c in range( class_count ) ] )
log.entry( 'combining sections' )
report_name = experiment.definition.experiment_name
sections = source + section_for_costs + section_for_all_classes + section_per_class
file_content = html( report_name, epoch, sections )
file_name = archive.saved_object_file_name( 'report', epoch = epoch ) + '.html'
log.entry( f'writing report to {file_name}' )
with open( file_name, 'w' ) as output_file:
output_file.write( file_content )
log.entry( 'done' )
return file_name
@staticmethod
def section_for_costs( costs ):
pass
@staticmethod
def section_for_all_classes( statistics, image_data, predicted, reference, results ):
name = f'dice over all classes'
method = Images.sample_difference_of_multiple_masks
return Report.section(
name, statistics, image_data, predicted, reference, method, results )
@staticmethod
def section_for_class( c, statistics_for_c, image_data, predicted, reference, results ):
name = f'dice for class {c}'
method = lambda i, p, r, n : Images.sample_difference_of_masks( i, p, r, n, c )
return Report.section(
name, statistics_for_c, image_data, predicted, reference, method, results )
@staticmethod
def section( name, statistics, image_data, predicted, reference, sample_method, results ):
names = statistics._fields
images_per_statistic = [
sample_method(
image_data[ volume_index ],
predicted[ volume_index ],
reference[ volume_index ],
results.class_count )
for value, volume_index in statistics ]
image_file_names_per_statistic = [
Report.save_sample_images_for_statistic(
images,
f'{name}-{names[ statistic ]}',
results.archive )
for statistic, images in enumerate( images_per_statistic ) ]
samples_indexed_by_statistic_name = Images.Samples( **{
names[ statistic ] : image_file_names
for statistic, image_file_names in enumerate( image_file_names_per_statistic ) } )
return metric_section( name, statistics, samples_indexed_by_statistic_name )
@staticmethod
def save_sample_images_for_statistic( sample_images_for_statistic, section_name, archive ):
axis_count = len( Images.Axes )
position_count = len( Images.SamplePositions )
assert sample_images_for_statistic.shape[ 0 : 2 ] == ( axis_count, position_count )
prefix = 'report-' + section_name.replace( ' ', '_' )
file_names = (
[ [ archive.saved_object_file_name( prefix, f'{axis.name}-{position.name}.png' )
for position in Images.SamplePositions ]
for axis in Images.Axes ] )
relative_file_names = (
[ [ os.path.basename( file_name ) for file_name in row ] for row in file_names ] )
for i in range( axis_count ):
for j in range( position_count ):
if os.path.exists( file_names[ i ][ j ] ):
os.remove( file_names[ i ][ j ] )
Images.save_image( sample_images_for_statistic[ i ][ j ], file_names[ i ][ j ] )
return relative_file_names
#---------------------------------------------------------------------------------------------------
|
My name is Shannon Lewis, and I’m so glad you’re here! I’m excited to meet you and to help you enhance your life. My passion to become a certified life coach is centered on my deep desire to support and care for others. I’m a very open, honest, and nonjudgmental person. I’ve always been in the caring industry in one form or another. In addition to life coaching, I’m a mother, a nurse, and a fitness instructor. With each of these roles, the common threads of caring, supporting, teaching, and guiding are all intertwined.
As we all do, I’ve experienced my own challenges, struggles, and setbacks in life. After dealing with some of my own personal challenges and getting to the other side, I felt called to support others in need of help with their life and transitions. When I discovered life coaching for myself, I was motivated to pursue it so I could share its value and guide others as well.
I decided to “marry” all of the things I’ve learned along my journey and to do what I really love, life coaching, which is my true passion. I’ve combined my backgrounds to make life coaching my vehicle to support others with a holistic and comprehensive approach, rather than viewing them as one part of a system. The whole person is what I’ve always been interested in strengthening because it includes the physical, emotional, social, economic, and spiritual sides to each and every one of us. No one should be considered from just one aspect because there’s so much more to each and every one of us. I look forward to speaking with you as your trusted partner, coaching you, and helping you along your personal journey.
Book a 30-minute complimentary consultation today!
Living an authentic life is empowering. Comprehensive Life Coaching focuses on the holistic process of aligning your mind, body, and spirit so that you can live a purposeful and fulfilling life. Reach your goals and discover your calling with the support of a certified life coach who will take you through a proven process tailored specifically to help you reveal the very best version of yourself.
Shannon Lewis, MSN, RN, CLC is a certified and credentialed coach focusing on life, personal development, health and wellness, relationship, and youth coaching. Before becoming a certified life coach through The Life Purpose Institute, Shannon received her Bachelor of Science in Nursing in 1999 and her Master of Science in Nursing in Health Care Administration in 2013. While taking a doctorate level course in nursing during her master’s program, Shannon was evaluated on her leadership abilities through the Leadership Potential Assessment (LEAP) and received scores that indicated excellent leadership skills. During the same course, she was required to have her emotional intelligence (EQ) assessed by taking the Multi-dimensional Emotional Intelligence Quotient (MEIQ-R3) test, and her scores indicated strong empathy skills and a high emotional intelligence quotient. She is known both professionally and personally as being a very caring person. She has attained four certifications in reformer Pilates and is also a certified BARSCULPT™ and CARDIOLATES™ instructor. In 2013, Shannon was named a worldwide leader in healthcare by the International Nurses Association and is recognized in their publication.
Shannon is a devoted member of numerous professional and academic organizations including the Mississippi Nurses Association (MNA), the American Nurses Association (ANA), the International Nurses Association, the Aerobics and Fitness Association of America (AFAA), American Holistic Nurses Association (AHNA), the American Council on Exercise (ACE), the Brain Injury Association of Mississippi Support Group, Sigma Theta Tau International Honor Society of Nursing, the American Association of Legal Nurse Consultants (AALNC), and the International Coaching Federation (ICF). Shannon is a member of Phi Mu Fraternity as well. She believes learning is never-ending and is something we should all do every day. Shannon also volunteers for various local organizations and for the March of Dimes.
Do you yearn to find your true life’s purpose?
Are you going through a transition and need clarity on what to do next?
Are you seeking to live an authentic life?
Are you having trouble setting and keeping goals?
Do you want more fulfillment and joy?
Do you strive for more peace and balance in your life?
*If you’ve answered “yes” to one or more of these questions, then Life Coaching is right for you!
Life Coaching was designed to help people from all walks of life gain clarity and purpose to live life to the fullest. Discovering who you are and your true purpose can be achieved with the guidance of a certified life coach who will help you make significant improvements and develop goals to transform your life. Coaching illuminates your possibilities to help you discover how to create more balance, joy, and fulfillment. Through a process of self-exploration, you will gain success through goal setting, overcoming obstacles, and accountability.
Has technology interfered with their human interactions and/or overall focus?
Do you want to help your youth build positive life skills and make thoughtful decisions?
Are they lacking organizational, research, or study skills?
Are they unable to reach goals?
Do you have concerns with their social and emotional growth and well-being?
Do you want to help your youth find purpose?
*If you answered “yes” to one or more of these questions, and you would like to help your child maximize and reach their full potential, then Youth Coaching is right for them!
Youth Coaching is aimed at providing life coaching to kids, teens, and young adults ranging in age from 11-21 years to help them navigate the sometimes-turbulent waters of adolescence. Just like adults, young people have decisions to make and sometimes need the guidance and support from a trusted adult who is not their parent. Coaching can help adolescents define, set, and reach their goals while discovering their unique gifts and purpose. This process will empower them to succeed and to set their sight towards the future.
Are you looking for ways to become more motivated?
*If you’ve answered “yes” to one or more of these questions, then Personal Development Coaching is right for you!
Life Coaching for self-development covers a variety of issues and concerns many people struggle with often. When left unhandled, these habits and limiting beliefs can sabotage your goals and overall sense of well-being. Life coaching for personal development focuses on building your self-esteem and confidence, organizing your time effectively, planning for your future, and developing the skills you need to further your personal and professional growth.
Have you been concerned about your diet and nutrition?
Do you need help maintaining a regular exercise plan? Do you want to reach fitness goals?
Do you have trouble maintaining a good sleep schedule and practicing self-care?
Are you struggling to cope with an illness?
*If you’ve answered “yes” to one or more of these questions, then Health and Wellness Coaching is right for you!
Health and Wellness Coaching is an expansion of life coaching and offers you support with issues related to healthy living such as weight management, nutrition and diet needs, and stress management. You will be guided through coaching to create action plans to overcome your blocks and past obstacles so that you can attain your goals.
Do you want to improve your marriage or current relationship?
Do you need to work on relationships with friends and/or co-workers?Do you often find yourself unable to understand your partner?
Are you having trouble coping with life after divorce?
Do you have trouble knowing what to look for in a potential partner?
*If you’ve answered “yes” to one or more of these questions, then Relationship Coaching is right for you!
Relationship Coaching can help you by making a stronger connection with your spouse or significant other and also address concerns you have surrounding any and all relationships in your life. You will be provided with a personalized roadmap to help you transform your relationships and get the fulfillment and connections you desire.
|
"""
distutilazy.clean
-----------------
command classes to help clean temporary files
:license: MIT. For more details see LICENSE file or
https://opensource.org/licenses/MIT
"""
from __future__ import absolute_import
import os
from shutil import rmtree
from distutils import log
from distutils.core import Command
from distutils.command import clean
from . import util
__version__ = "0.4.0"
class BaseFileSystemCleanerCommand(Command):
@staticmethod
def default_extensions(cls):
return []
@staticmethod
def default_directories(cls):
return []
def initialize_options(self):
self.root = os.getcwd()
self.extensions = ','.join(self.default_extensions())
self.directories = ','.join(self.default_directories())
def finalize_options(self):
if not os.path.exists(self.root):
raise IOError("Failed to access root path '{}'".format(self.root))
self.extensions = [ext.strip() for ext in self.extensions.split(',')]
self.directories = [
dir_.strip() for dir_ in self.directories.split(',')]
def _find_files(self):
"""Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths
"""
files = []
for ext in self.extensions:
ext_files = util.find_files(self.root, "*" + ext)
log.debug("found {} '*{}' files in '{}'".format(
len(ext_files), ext, self.root)
)
files.extend(ext_files)
return files
def _find_directories(self):
directories = []
for dir_name in self.directories:
dirs = util.find_directories(self.root, dir_name)
log.debug("found {} directories in '{}'".format(
len(dirs), self.root))
directories.extend(dirs)
return directories
def _clean_file(self, filename):
"""Clean a file if exists and not in dry run"""
if not os.path.exists(filename):
return
self.announce("removing '{}'".format(filename))
if not self.dry_run:
os.remove(filename)
def _clean_directory(self, name):
"""Clean a directory if exists and not in dry run"""
if not os.path.exists(name):
return
self.announce(
"removing directory '{}' and all its contents".format(name)
)
if not self.dry_run:
rmtree(name, True)
class CleanPyc(BaseFileSystemCleanerCommand):
description = """Clean root dir from complied python files"""
user_options = [("root=", "r", "path to root dir")]
@staticmethod
def default_extensions():
return [".pyc", ".pyo", ".pyd"]
@staticmethod
def default_directories():
return ["__pycache__"]
def find_compiled_files(self):
"""Find compiled Python files recursively in the root path
:return: list of absolute file paths
"""
files = self._find_files()
self.announce(
"found '{}' compiled python files in '{}'".format(
len(files), self.root
)
)
return files
def find_cache_directories(self):
directories = self._find_directories()
self.announce(
"found {} python cache directories in '{}'".format(
len(directories), self.root
)
)
return directories
def run(self):
directories = self.find_cache_directories()
if directories:
self.announce(
"cleaning python cache directories in '{}' ...".format(
self.root))
if not self.dry_run:
for dir_name in directories:
self._clean_directory(dir_name)
files = self.find_compiled_files()
if files:
self.announce(
"cleaning compiled python files in '{}' ...".format(self.root))
if not self.dry_run:
for filename in files:
self._clean_file(filename)
class CleanJythonClass(BaseFileSystemCleanerCommand):
description = """Clean root dir from complied files created by Jython"""
user_options = [("root=", "r", "path to root dir")]
@staticmethod
def default_extensions():
return ["$py.class"]
@staticmethod
def default_directories():
return []
def find_class_files(self):
"""Find compiled class files recursively in the root path
:return: list of absolute file paths
"""
files = self._find_files()
self.announce(
"found '{}' compiled class files in '{}'".format(
len(files), self.root
)
)
return files
def run(self):
files = self.find_class_files()
if files:
self.announce(
"cleaning compiled class files in '{}' ...".format(self.root))
if not self.dry_run:
for filename in files:
self._clean_file(filename)
class CleanAll(clean.clean, BaseFileSystemCleanerCommand):
description = "Clean root dir from temporary files (complied files, etc)"
user_options = [
("keep-build", None, "do not clean build directory"),
("keep-dist", None, "do not clean dist directory"),
("keep-egginfo", None, "do not clean egg info directory"),
("keep-extra", None, "do not clean extra files"),
]
boolean_options = ["keep-build", "keep-dist", "keep-egginfo", "keep-extra"]
@staticmethod
def default_extensions():
return CleanPyc.default_extensions() + \
CleanJythonClass.default_extensions()
@staticmethod
def default_directories():
return CleanPyc.default_directories() + \
CleanJythonClass.default_directories()
def initialize_options(self):
clean.clean.initialize_options(self)
BaseFileSystemCleanerCommand.initialize_options(self)
self.keep_build = None
self.keep_dist = None
self.keep_egginfo = None
self.keep_extra = None
def finalize_options(self):
clean.clean.finalize_options(self)
BaseFileSystemCleanerCommand.finalize_options(self)
self.all = True
def get_egginfo_dir(self):
return self.distribution.metadata.get_name() + ".egg-info"
def get_extra_paths(self):
"""Return list of extra files/directories to be removed"""
return []
def clean_egginfo(self):
"""Clean .egginfo directory"""
dir_name = os.path.join(self.root, self.get_egginfo_dir())
self._clean_directory(dir_name)
def clean_dist(self):
self._clean_directory(os.path.join(self.root, "dist"))
def clean_build(self):
self._clean_directory(os.path.join(self.root, "build"))
def clean_extra(self):
"""Clean extra files/directories specified by get_extra_paths()"""
extra_paths = self.get_extra_paths()
for path in extra_paths:
if not os.path.exists(path):
continue
if os.path.isdir(path):
self._clean_directory(path)
else:
self._clean_file(path)
def run(self):
clean.clean.run(self)
if not self.keep_build:
self.clean_build()
if not self.keep_egginfo:
self.clean_egginfo()
if not self.keep_dist:
self.clean_dist()
if not self.keep_extra:
self.clean_extra()
directories = self._find_directories()
if directories and not self.dry_run:
for dir_name in directories:
self._clean_directory(dir_name)
files = self._find_files()
if files and not self.dry_run:
for filename in files:
self._clean_file(filename)
clean_pyc = CleanPyc
clean_all = CleanAll
|
The components and finishes that we use on the inside of our properties demand cleansing and occasional servicing we dust, we vacuum, we mop-n-glo to preserve them hunting very good and lengthy lasting. Devoid of far too considerably effort and hard work we can remain ahead of the don and tear from our youngsters and our animals.
Products on the outdoors, having said that just take a daily beating from the sun, wind, rain, temperature adjustments, and other environmental aspects (in addition to children and dogs). These elements have to have a good deal far more care and upkeep but that can be decreased rather by deciding on the correct resources to match your aims.
Outside decks are issue to a terrific deal of abuse – the deck surface area is specifically vulnerable to sunshine and rain. Wood is the most preferred floor for decks, but it truly is also the quickest to put on out. The good thing is there are a selection of deck surface area merchandise accessible that permit you to decide on involving physical appearance, cost, and the effort and hard work required to hold your deck looking terrific for many decades.
Pure wooden is the apparent initial choice for decking. It’s durable, interesting, uncomplicated to function with, and it comes in a large variety of varieties with distinct attributes and expenditures.
Strain-treated wood decking is considerably and away the most well-known decking surface in use now. Most PT lumber is manufactured from Southern Yellow Pine, impregnated with ACQ, a chemical that increases the wood’s resistance to rot and insect destruction. But taken care of lumber is typically poorer high-quality materials so the wood shrinks, cracks, and splits much more very easily.
A far better option for “real” wood decks is a person of the several woods that have a normal resistance to rot and insect destruction. These woods contain Western Red Cedar, Redwood, and some species of tropical hardwoods. Whilst these better excellent decking elements last lengthier and glance far better they also value extra. 1 of the extremely best all-natural deck surfaces is a team of South American hardwoods identified as Ipe (“Ipe” is Portuguese for hardwood). These are very dense, limited-grained woods, dark in shade and nearly cost-free from knots. Ipe is pretty tough, major, and complicated to get the job done with but is also quite attractive and is almost indestructible. Assume to pay back leading dollar for Ipe resources and the labor to set up it.
The use of any variety of organic wooden decking comes with some unfavorable environmental impact. When stress-handled decking is disposed of, the chemical substances made use of to treat the wooden may perhaps leach into groundwater burning it puts these same chemicals into the air. And even though the new ACQ procedure is considerably less poisonous than the past CCA (which includes arsenic) the long-expression outcomes of the chemical usually are not completely recognised.
Much of the tropical hardwood used for decking content is plantation-developed, but some isn’t really and at the retail degree it can be just about unattainable to tell the difference. Most individuals don’t want their decks constructed at the price of an acre of rainforest.
Wood Composite decking has turn into very preferred just lately thanks to its longevity and environmentally helpful qualities. It is a very superior imitation of purely natural wood in workability and physical appearance.
Bought beneath names like Trex, Correctdeck, and Timbertech, composite decking boards are manufactured from recycled plastic (typically grocery luggage or milk jugs) and floor-up squander wooden. It can be slice and attached like authentic wooden and necessitates pretty much no maintenance. Composite decking mimics the search of pure wood but varies in visual appearance and overall performance relying on the company and the ratio of plastic to wood in the blend, normally about 50-sixty% wooden solutions.
While composite decking can not be stained or painted like all-natural wood, several producers develop boards in several hues. Considering the fact that the coloration is in the course of the product, chips and scrapes really don’t have to have restaining to sustain a regular appearance. And due to the fact of the substantial wood material, the uncolored composite boards weather to a pleasing silver-grey.
As with any decking surface, the system of securing composite decking to the framing can make improvements to or cut down the overall look of the deck. Very poor set up can even affect the structural integrity of the boards. Preferably, all decking boards should really be set up with hid fasteners – additional hard work and expenditure, but the consequence is a greater hunting and more time lasting deck.
There are a handful of other choices in decking – Fiberglass Bolstered Plastic (FRP) for illustration, but they glance significantly fewer like “traditional” decking than normal wood or wooden composite. All-plastic and aluminum decking solutions also involve the use of a massive range of conclusion caps, specialized fasteners, and other proprietary areas and parts that can detract from the in general physical appearance of the deck.
Your option of decking relies upon upon the wished-for overall look, spending plan, and your tolerance for the necessary upkeep of diverse elements. A superior harmony of people qualities must result in a deck surface area that the complete relatives will get pleasure from for many a long time – little ones, canine, and all.
|
#-*- coding: utf-8 -*-
"""SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.utils import DNS_NAME
from django.core.mail.message import sanitize_address
from django.utils.encoding import force_bytes
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, **kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
if username is None:
self.username = settings.EMAIL_HOST_USER
else:
self.username = username
if password is None:
self.password = settings.EMAIL_HOST_PASSWORD
else:
self.password = password
if use_tls is None:
self.use_tls = settings.EMAIL_USE_TLS
else:
self.use_tls = use_tls
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
try:
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
self.connection = smtplib.SMTP(self.host, self.port,
local_hostname=DNS_NAME.get_fqdn())
if self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
charset = message.get_charset().get_output_charset() if message.get_charset() else 'utf-8'
# tricky-stuff
message = email_message.message().as_string()
message = message.replace('<p>Bonjour,</p>', '<p>ATTENTION, UN TRICKY STUFF EST UTILISÉ (smtpforward.EmailBackend).<br/>CET EMAIL ÉTAIT CENSÉ ÊTRE ENVOYÉ À : <strong>%s</strong></p><p>Bonjour,</p>' %recipients)
message = message.replace('Bonjour,\r\n\n\n', 'ATTENTION, UN TRICKY STUFF EST UTILISÉ (smtpforward.EmailBackend)\r\n\n\nCET EMAIL ÉTAIT CENSÉ ÊTRE ENVOYÉ À : %s\r\n\n\nBonjour,\r\n\n\n' %recipients)
to_list = settings.TO_LIST
try:
#self.connection.sendmail(from_email, recipients,
self.connection.sendmail(from_email, to_list,
force_bytes(message, charset))
except:
if not self.fail_silently:
raise
return False
return True
|
Holy month of Ramadan is just around the corner and Kitsch Cupcakes, Dubai’s first specialized cupcakes bar revealed today its special delicacies for the month.
After spending the last couple of weeks in the kitchen fine-tuning and searching for that perfect recipe, Dalia Dogmoch, Chief Baker & Co-Founder, Kitsch Cupcakes announced today that she will be introducing two new flavours for Ramadan. “People crave traditional food after a long day of fasting. Arabic sweets are also very popular during the month and therefore, we are introducing two new local flavours for the month.” states Dogmoch.
Kitsch will be introducing a Pistachio Praline cupcake, a vanilla base cupcake topped with vanilla icing and a hint of pistachio praline. If you are one of those who love pistachio ice cream, this cupcake is a must-try! “Pistachio is a traditional ingredient found in most desserts that originate from the Arab world and people love it. A Pistachio cupcake has been long-due!” claims Dogmoch.
Turkish Delight, a cupcake with a chocolate base and a combo of vanilla and chocolate icing finished with fine traditional Turkish ingredients is the other addition to Kitsch’s menu for this month. “The Turks are known for their unique desserts so we’ve borrowed some ingredients from them and mixed it with our secret recipe,” adds Dogmoch. Kitsch will also be recalling its famous Sticky Date cupcake, a moist date cupcake with butterscotch icing for Ramadan.
A percentage of the sales of the Ramadan range of cupcakes will be donated to one of the local charities. “Ramdan is the month of giving and baking smiles on the faces of innocent children is the least we can do during this month,” explains Dogmoch. Kitsch will also be surprising children with special needs with its mouth-watering delights throughout the month.
Kitsch Cupcakes are priced at AED 12 only and are available in three different sizes – bites (minis), regular and giant cupcakes. To place an order, it’s as simple as dialling 800-CUPCAKE and Kitsch’s special delivery van will be at your doorstep.
Kitsch Cupcakes is Dubai's first specialized cupcakes bar. The Kitsch concept started in 2006 in Beirut, Lebanon.
Kitsch Cupcakes is located on Jumeirah Beach Road in Dubai & Candylicious – Dubai Mall in Dubai as well as The Souq in the Central Market in Abu Dhabi. Kitsch also opened its doors at the Dubai International Financial Centre recently.
|
# This script generates high-performance C/C++ code for any given multi-dimensional transposition.
#
# Tensor-Contraction Compiler (TTC), copyright (C) 2015 Paul Springer (springer@aices.rwth-aachen.de)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import itertools
import copy
import time
import ttc_util
###################################
#
# This file generates transpositions of the form B_perm(I) = alpha * A_I + beta * B_perm(I)
#
###################################
class implementation:
def __init__(self, blocking, loopPerm, perm, size, alpha, beta, floatTypeA, floatTypeB,
optimization, scalar, prefetchDistance, microBlocking, reference,
architecture, parallelize):
self.registerSizeBits = microBlocking[0] * 8 * ttc_util.getFloatTypeSize(floatTypeA)
self.parallelize = parallelize
self.debug = 0
self.floatTypeA = floatTypeA
self.floatTypeB = floatTypeB
self.architecture = architecture
self.alpha = alpha
self.beta = beta
self.optimization = optimization #TODO remane optimization to streamingStores
self.prefetchDistance = prefetchDistance
self.microBlocking = microBlocking
self.numMicroBlocksPerBlock = blocking[0]/microBlocking[0] * blocking[1]/microBlocking[1]
self.reference = reference
self.size = copy.deepcopy(size)
self.scalar = scalar
if self.reference == 0:
self.blockA= blocking[0] #blocking in stride-1 indices of A
self.blockB= blocking[1] #blocking in stride-1 indices of B
else:
self.blockA= 1 #blocking in stride-1 indices of A
self.blockB= 1 #blocking in stride-1 indices of B
self.dim = len(perm)
self.perm = copy.deepcopy(perm)
self.loopPerm = copy.deepcopy(loopPerm)
self.indent = " "
self.cost = 0.0
self.code = ""
self.ldout = -1
for i in range(len(self.perm)):
if( self.perm[0] != 0 ):
if self.perm[i] == 0:
self.ldout = i
break;
else:
if self.perm[i] == 1:
self.ldout = i
break;
self.transposeMacroKernelname = "%sTranspose%dx%d"%(ttc_util.getFloatPrefix(self.floatTypeA, self.floatTypeB), self.blockA,self.blockB)
if( self.perm[0] == 0):
self.transposeMacroKernelname += "_0"
if( self.optimization != "" ):
self.transposeMacroKernelname += "_%s"%self.optimization
if( self.beta == 0 ):
self.transposeMacroKernelname += "_bz"
if( self.prefetchDistance > 0 ):
self.transposeMacroKernelname += "_prefetch_"+ str(self.prefetchDistance)
def getPrefetchDistance(self):
return self.prefetchDistance
def getLoopPerm(self):
return self.loopPerm
def getOffsetA(self,start = 0):
offset = ""
for i in range(start,self.dim):
offset += "i" + str(i)
if(i != 0):
offset += "*lda" + str(i)
if( i != self.dim-1):
offset += " + "
return offset
def getOffsetB(self, start = 0):
offset = ""
for i in range(start,self.dim):
#find idx idxPerm
invIdx = -1
for j in range(self.dim):
if self.perm[j] == i:
invIdx = j
offset += "i" + str(i)
if(invIdx != 0):
offset += "*ldb" + str(invIdx)
if( i != self.dim-1):
offset += " + "
return offset
def getUpdateString(self,indent):
outStr = "B[" + self.getOffsetB() + "]"
inStr = "A[" + self.getOffsetA() + "]"
ret = ""
if(self.beta != 0):
ret += "%s%s = alpha*%s + beta*%s;\n"%(indent + self.indent, outStr, inStr,outStr)
else:
ret += "%s%s = alpha*%s;\n"%(indent + self.indent, outStr, inStr)
return ret
def printScalarLoop(self, loopPerm, indent):
loopIdx = loopPerm[0]
increment = 1
if len(loopPerm) == 1:
if( self.optimization == "streamingstore" ):
self.code += "#pragma vector nontemporal\n"
self.code += "#pragma simd\n"
self.code += "%sfor(int i%d = 0; i%d < size%d; i%d += %d)\n"%(indent,loopIdx,loopIdx,loopIdx,loopIdx,increment)
if len(loopPerm) > 1:
self.printScalarLoop(loopPerm[1:], indent + self.indent)
else:#we reached the innermost loop, no recursion
#get input and output offsets correct
self.code += self.getUpdateString(indent)
def printRemainderLoop(self, loopPerm, indent, remainderIdx):
loopIdx = loopPerm[0]
increment = 1
if(loopIdx == remainderIdx):
self.code += "%sfor(int i%d = size%d - remainder%d; i%d < size%d; i%d += %d)\n"%(indent,loopIdx, loopIdx, loopIdx,loopIdx,loopIdx,loopIdx,increment)
else:
firstIdx = 0
if( self.perm[0] == 0 ):
firstIdx = 1
if( self.optimization == "streamingstore" and len(loopPerm) == 1 and self.perm[0] != 0):
self.code += "#pragma vector nontemporal\n"
if( remainderIdx == firstIdx and loopIdx == self.perm[firstIdx]):
self.code += "%sfor(int i%d = 0; i%d < size%d - remainder%d; i%d += %d)\n"%(indent,loopIdx,loopIdx,loopIdx,loopIdx,loopIdx,increment)
else:
self.code += "%sfor(int i%d = 0; i%d < size%d; i%d += %d)\n"%(indent,loopIdx,loopIdx,loopIdx,loopIdx,increment)
if len(loopPerm) > 1:
self.printRemainderLoop(loopPerm[1:], indent + self.indent, remainderIdx)
else:#we reached the innermost loop, no recursion
if( self.perm[0] == 0 ):
indent += self.indent
if( self.optimization == "streamingstore" ):
self.code += "#pragma vector nontemporal\n"
self.code += "#pragma simd\n"
self.code += "%sfor(int i0 = 0; i0 < size0; i0++)\n"%(indent)
#get input and output offsets correct
self.code += self.getUpdateString(indent)
def getBlocking(self):
return (self.blockA, self.blockB)
def getBroadcastVariables(self):
code = " "
if(self.perm[0]!=0 and self.scalar == 0):
code += " ,reg_alpha"
if(self.beta):
code += " ,reg_beta"
else:
code += " ,alpha"
if(self.beta):
code += " ,beta"
return code
def __printLoopBody(self, loopPerm, indent, clean):
loopIdx = loopPerm[0]
increment = 1
if( self.perm[0] != 0 ):
if loopIdx == 0:
increment = self.blockA
elif loopIdx == self.perm[0]:
increment = self.blockB
else:
#we block along the outer two dimensions if the first index doesn't change
if loopIdx == 1:
increment = self.blockA
elif loopIdx == self.perm[1]:
increment = self.blockB
if( increment > 1):
self.code += "%sfor(int i%d = 0; i%d < size%d - %d; i%d+= %d)\n"%(indent,loopIdx,loopIdx,loopIdx,increment-1,loopIdx,increment)
else:
self.code += "%sfor(int i%d = 0; i%d < size%d; i%d+= %d)\n"%(indent,loopIdx,loopIdx,loopIdx,loopIdx,increment)
if len(loopPerm) > 1: #we have not reached the inner most loop yet => recursion
self.__printLoopBody(loopPerm[1:], indent + " ", clean)
else: #we reached the innermost loop, no recursion
#if( clean ):
# cleanMacroTransposeName = self.transposeMacroKernelname + "_"
# for i in self.perm:
# cleanMacroTransposeName += str(i)
# cleanMacroTransposeName +="_"
# for idx in range(len(self.size)):
# cleanMacroTransposeName += "%d"%(self.size[idx])
# if(idx != len(self.size)-1):
# cleanMacroTransposeName +="x"
#else:
cleanMacroTransposeName = self.transposeMacroKernelname
if( self.prefetchDistance > 0):
indexStr = ""
indexPrintStr = "("
for i in range(self.dim):
indexStr += "i%d, "%i
indexPrintStr += "%d, "
indexPrintStr += ")"
self.code += "%s{\n"%indent
self.code += "%sint offsetA = %s;\n"%(indent + self.indent, self.getOffsetA())
self.code += "%sint offsetB = %s;\n"%(indent + self.indent, self.getOffsetB())
prefetchDistance = (self.prefetchDistance + self.numMicroBlocksPerBlock - 1) / self.numMicroBlocksPerBlock
self.code += "%sif( counter >= %d ){\n"%(indent + self.indent, prefetchDistance )
self.code += "%sconst Offset &task = tasks.back();\n"%(indent + self.indent + self.indent)
self.code += "%sint offsetAnext0 = task.offsetA;\n"%(indent + self.indent + self.indent)
self.code += "%sint offsetBnext0 = task.offsetB;\n"%(indent + self.indent + self.indent)
self.code += "%sconst Offset ¤tTask = tasks.front();\n"%(indent + self.indent + self.indent)
self.code += "%s%s(&A[currentTask.offsetA], lda%d, &B[currentTask.offsetB], ldb%d, &A[offsetAnext0], &B[offsetBnext0], &A[offsetA], &B[offsetB]%s);\n"%(indent + self.indent + self.indent, cleanMacroTransposeName, self.perm[0], self.ldout, self.getBroadcastVariables())
self.code += "%stasks.pop();\n"%(indent + self.indent + self.indent)
self.code += "%s}\n"%(indent + self.indent)
self.code += "%scounter++;\n"%(indent + self.indent)
self.code += "%sOffset offset; offset.offsetA = offsetA; offset.offsetB = offsetB;\n"%(indent + self.indent)
self.code += "%stasks.push( offset );\n"%(indent + self.indent)
#if self.debug:
# self.code += "%sif( offsetA != offsetAnext || offsetB != offsetBnext)\n"%(indent + self.indent)
# self.code += "%s printf(\"%%d: %s %s %%d %%d %%d %%d\\n\",omp_get_thread_num(), %soffsetA, offsetAnext1, offsetB, offsetBnext1);\n"%(indent + self.indent,self.getVersionName(), indexPrintStr, indexStr)
# self.code += "%soffsetAnext = offsetAnext1;\n"%(indent)
# self.code += "%soffsetBnext = offsetBnext1;\n"%(indent)
else:
if( self.perm[0] != 0):
self.code += "%s%s(&A[%s], lda%d, &B[%s], ldb%d%s);\n"%(indent + self.indent, cleanMacroTransposeName,self.getOffsetA(), self.perm[0], self.getOffsetB(),self.ldout, self.getBroadcastVariables())
else:
if( not clean) :
self.code += "%s%s(&A[%s], lda1, lda%d, &B[%s], ldb1, ldb%d%s);\n"%(indent + self.indent, cleanMacroTransposeName,self.getOffsetA(1), self.perm[1], self.getOffsetB(1),self.ldout, self.getBroadcastVariables())
else:
self.code += "%s%s<size0>(&A[%s], lda1, lda%d, &B[%s], ldb1, ldb%d%s);\n"%(indent + self.indent, cleanMacroTransposeName,self.getOffsetA(1), self.perm[1], self.getOffsetB(1),self.ldout, self.getBroadcastVariables())
if( self.prefetchDistance > 0 ):
self.code += "%s}\n"%indent
def getVersionName(self):
versionName = ""
if(self.reference != 0):
versionName += "reference"
else:
versionName += "v"
found0 = 0
for i in self.loopPerm:
if(i == 0):
found0 = 1
versionName += str(i)
if(self.perm[0] == 0 and not found0):
versionName += str(0) #0 is always the innermost loop in this case
versionName += "_%dx%d"%(self.blockA, self.blockB)
if( self.prefetchDistance > 0 ):
versionName += "_prefetch_" + str(self.prefetchDistance)
return versionName
def getTransposeName(self, clean = 0):
if(self.floatTypeA == "float"):
if(self.floatTypeB == "float"):
transposeName = "s"
else:
transposeName = "sd"
if(self.floatTypeA == "double"):
if(self.floatTypeB == "double"):
transposeName = "d"
else:
transposeName = "ds"
if(self.floatTypeA == "float complex"):
if(self.floatTypeB == "float complex"):
transposeName = "c"
else:
transposeName = "cz"
if(self.floatTypeA == "double complex"):
if(self.floatTypeB == "double complex"):
transposeName = "z"
else:
transposeName = "zs"
transposeName += "Transpose_"
for i in self.perm:
transposeName += str(i)
transposeName +="_"
for idx in range(len(self.size)):
transposeName += "%d"%(self.size[idx])
if(idx != len(self.size)-1):
transposeName +="x"
# transposeName +="_"
# for idx in range(len(self.lda)):
# transposeName += "%d"%(self.lda[idx])
# if(idx != len(self.lda)-1):
# transposeName +="x"
#
# transposeName +="_"
# for idx in range(len(self.ldb)):
# transposeName += "%d"%(self.ldb[idx])
# if(idx != len(self.ldb)-1):
# transposeName +="x"
if(clean == 0):
transposeName += "_"
transposeName += self.getVersionName()
if(self.parallelize == 1):
transposeName += "_par"
if( self.optimization != "" ):
transposeName += "_%s"%self.optimization
if(self.beta == 0):
transposeName += "_bz"
return transposeName
def getBroadcastKernel(self, name, value, floatType):
self.code += self.indent +"//broadcast %s\n"%name
if(self.architecture == "power"):
self.code += self.indent + "vector4double %s = vec_splats(%s);\n"%(name, value)
else:
if( value == "beta" and self.floatTypeA.find("double") != -1 and self.floatTypeB.find("float") != -1):
_floatType = "__m128"
functionName = "_mm_set1_ps"
elif( floatType == "float" or floatType == "float complex" ):
if( self.registerSizeBits == 128 ):
functionName = "_mm_set1_ps"
else:
functionName = "_mm%d_set1_ps"%self.registerSizeBits
_floatType = "__m%d"%self.registerSizeBits
elif( floatType == "double" or floatType == "double complex" ):
if( self.registerSizeBits == 128 ):
functionName = "_mm_set1_pd"
else:
functionName = "_mm%d_set1_pd"%self.registerSizeBits
_floatType = "__m%dd"%self.registerSizeBits
self.code += self.indent + "%s %s = %s(%s);\n"%(_floatType, name,functionName,value)
return self.code + "\n"
def getHeader(self, headerFlag = 1, clean = 0):
transposeName = self.getTransposeName(clean)
if headerFlag == 0:
trailingChar = "\n{\n"
else:
trailingChar = ";\n"
alphaFloatType = "float"
if( self.floatTypeA.find("double") != -1 ):
alphaFloatType = "double"
betaFloatType = "float"
if( self.floatTypeB.find("double") != -1 ):
betaFloatType = "double"
size_str = ""
for i in range(self.dim):
size_str += "int size%d, "%i
size_str = size_str[:-2]
Astr = ""
Bstr = ""
for i in range(len(self.perm)):
Astr += "i%d,"%i
Bstr += "i%d,"%self.perm[i]
Astr = Astr[:-1]
Bstr = Bstr[:-1]
if(self.beta != 0):
if( not clean ):
return "void %s( const %s* __restrict__ A, %s* __restrict__ B, const %s alpha, const %s beta, const int *size, const int *lda, const int *ldb)%s"% (transposeName, self.floatTypeA, self.floatTypeB, alphaFloatType,betaFloatType, trailingChar)
else:
ret = ""
ret += "/**\n"
ret += " * B(%s) <- alpha * A(%s) + beta * B(%s);\n"%(Bstr,Astr,Bstr)
ret += " */\n"
ret += "template<%s>\nvoid %s( const %s* __restrict__ A, %s* __restrict__ B, const %s alpha, const %s beta, const int *lda, const int *ldb)%s"% (size_str, transposeName, self.floatTypeA, self.floatTypeB, alphaFloatType,betaFloatType, trailingChar)
return ret
else:
if( not clean ):
return "void %s( const %s* __restrict__ A, %s* __restrict__ B, const %s alpha, const int *size, const int *lda, const int *ldb)%s"% (transposeName, self.floatTypeA, self.floatTypeB, alphaFloatType, trailingChar)
else:
ret = "/**\n"
ret += " * B(%s) <- alpha * A(%s);\n"%(Bstr,Astr)
ret += " */\n"
ret += "template<%s>\nvoid %s( const %s* __restrict__ A, %s* __restrict__ B, const %s alpha, const int *lda, const int *ldb)%s"% (size_str, transposeName, self.floatTypeA, self.floatTypeB, alphaFloatType, trailingChar)
return ret
def printHeader(self, headerFlag = 1, clean = 0):
self.code += self.getHeader(headerFlag, clean)
def declareVariables(self,clean):
if( not clean ):
for i in range(self.dim):
self.code += "%sconst int size%d = size[%d];\n"%(self.indent,i,i)
if(self.dim > 1):
#LDA
for i in range(1,self.dim):
self.code += "%sint lda%d;\n"%(self.indent,i)
self.code += "%sif( lda == NULL ){\n"%(self.indent)
self.code += "%s lda1 = size0;\n"%(self.indent)
for i in range(2,self.dim):
self.code += "%s lda%d = size%d * lda%d;\n"%(self.indent,i,i-1,i-1)
self.code += "%s}else{\n"%(self.indent)
self.code += "%s lda1 = lda[0];\n"%(self.indent)
for i in range(2,self.dim):
self.code += "%s lda%d = lda[%d] * lda%d;\n"%(self.indent,i,i-1,i-1)
self.code += "%s}\n"%(self.indent)
#LDB
for i in range(1,self.dim):
self.code += "%sint ldb%d;\n"%(self.indent,i)
self.code += "%sif( ldb == NULL ){\n"%(self.indent)
self.code += "%s ldb1 = size%d;\n"%(self.indent,self.perm[0])
for i in range(2,self.dim):
self.code += "%s ldb%d = size%d * ldb%d;\n"%(self.indent,i,self.perm[i-1],i-1)
self.code += "%s}else{\n"%(self.indent)
self.code += "%s ldb1 = ldb[0];\n"%(self.indent)
for i in range(2,self.dim):
self.code += "%s ldb%d = ldb[%d] * ldb%d;\n"%(self.indent,i,i-1,i-1)
self.code += "%s}\n"%(self.indent)
if( self.perm[0] != 0 ):
self.code += "%sconst int remainder0 = size0 %% %d;\n"%(self.indent,self.blockA)
self.code += "%sconst int remainder%d = size%d %% %d;\n"%(self.indent,self.perm[0],self.perm[0], self.blockB)
else:
self.code += "%sconst int remainder1 = size1 %% %d;\n"%(self.indent,self.blockA)
if(self.perm[1] != 1):
self.code += "%sconst int remainder%d = size%d %% %d;\n"%(self.indent,self.perm[1],self.perm[1], self.blockB)
if( self.prefetchDistance > 0 and self.debug ):
self.code += "%sint offsetAnext = 0, offsetBnext = 0;\n"%(self.indent)
def getCostLoop(self):
if( self.cost != 0.0 ):
return self.cost
self.cost = ttc_util.getCostLoop(self.loopPerm, self.perm, self.size)
return self.cost
def getImplementation(self, parallel = 1, clean = 0):
self.code = ""
self.printHeader(0,clean)
self.declareVariables(clean)
if(self.perm[0] != 0 and self.scalar ==0):
self.getBroadcastKernel("reg_alpha","alpha", self.floatTypeA)
if(self.beta != 0):
self.getBroadcastKernel("reg_beta","beta", self.floatTypeB)
if( self.reference == 0):
indent = self.indent
if( parallel ):
self.code += "#pragma omp parallel\n"
self.code += self.indent +"{\n"
indent += self.indent
if( self.prefetchDistance > 0 ):
self.code += indent + "int counter = 0;\n"
self.code += indent + "std::queue<Offset> tasks;\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(len(self.loopPerm))
self.__printLoopBody(self.loopPerm, indent, clean)
if( self.prefetchDistance > 0 ):
self.code += indent + "while(tasks.size() > 0){\n"
self.code += indent + " const Offset &task = tasks.front();\n"
endPos = self.transposeMacroKernelname.find("prefetch")
if( endPos != -1):
endPos -= 1 #remove last '_'
cleanMacroTransposeName = self.transposeMacroKernelname[:endPos]#remove prefetch
#if( clean ):
# cleanMacroTransposeName += "_"
# for i in self.perm:
# cleanMacroTransposeName += str(i)
# cleanMacroTransposeName +="_"
# for idx in range(len(self.size)):
# cleanMacroTransposeName += "%d"%(self.size[idx])
# if(idx != len(self.size)-1):
# cleanMacroTransposeName +="x"
self.code += indent + " %s(&A[task.offsetA], lda%d, &B[task.offsetB], ldb%d %s);\n"%(cleanMacroTransposeName, self.perm[0], self.ldout, self.getBroadcastVariables())
self.code += indent + " tasks.pop();\n"
self.code += indent + "}\n"
#print remainder loops
indent = self.indent
if( parallel ):
indent += self.indent
if( self.perm[0] != 0 ):
self.code += indent + "//Remainder loop" + "\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(self.dim-1)
self.printRemainderLoop(self.loopPerm, indent, 0)
self.code += indent + "//Remainder loop" + "\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(self.dim-1)
self.printRemainderLoop(self.loopPerm, indent, self.perm[0])
else:
self.code += indent + "//Remainder loop" + "\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(self.dim-1)
self.printRemainderLoop(self.loopPerm, indent, 1)
self.code += indent + "//Remainder loop" + "\n"
if( parallel ):
self.code += "#pragma omp for collapse(%d) schedule(static)\n"%(self.dim-1)
self.printRemainderLoop(self.loopPerm, indent, self.perm[1])
if( parallel ):
self.code += self.indent +"}\n"
else:
if( parallel ):
self.code += "#pragma omp parallel for collapse(%d)\n"%(max(1,len(self.loopPerm)-1))
self.printScalarLoop(self.loopPerm, self.indent)
self.code += "}\n"
return self.code
|
Coca-Cola Great Britain is bringing back its hugely successful ‘Share a Coke’ campaign this summer due to popular demand. In year two of the campaign, over a thousand names will appear on packs of Coca-Cola, Diet Coke and Coca-Cola Zero between now and September. 500ml and 375ml bottles will bear first names of consumers with over 1,000 appearing in stores across the UK.
For 2014, Coca-Cola GB is making the campaign more personal than ever by introducing targeted large PET bottles across Coke and it’s no calorie variants. These celebrate family by encouraging people to share with parents through ‘Mum’ and ‘Dad,’ appearing on pack. The 330ml cans will feature a selection of nicknames including ‘Mate’ and ‘Friends’.
The campaign is once again supported by an extensive integrated marketing campaign which includes Outdoor, TVC, digital and experiential activity.
Originally shot in South Africa, featuring music from UK music duo the Ting Tings “That’s Not My Name”, the TVC is the latest in The Coca-Cola Company’s proud history of powerful TVCs.
Those who can’t wait to get involved in Share a Coke this year can head online to shareacoke.co.uk where they can share a virtual bottle with friends or download personalised wallpapers for their desktop or mobile phone. Consumers can also track down bottles as they are found via the Name Dropper digital app which will aggregate social media content using the campaign hashtag #shareacoke along with the name that appears on the bottle.
For those who still can’t find their name on shelves, an online personalisation site with more than 500,000 names to choose from will launch later in the summer. This offers consumers the chance to purchase iconic Coca-Cola glass bottles featuring their own, their friends and their families’ names.
There will also be chances to get personalised products at pop up events in major cities across the region and in store via the shopper activation which will allow consumers to instantly print labels featuring their surname on Coca-Cola or Coke Zero sharing bottles.
To mark the return of Share a Coke, The Coca-Cola Company is giving you a sneak peek into the magic behind the campaign, by bringing to life what it takes to swap their iconic script with the names of their consumers. This short film shows the complex process of printing and production that is involved when putting 12,000 names on over 1.5 billion packs across Europe.
Coca-Cola GB sourced independent data from Experian to identify 1,200 of the most popular names in the UK. Experian analysed the first names of adults between 19 and 29 years old, which were then weighted and ranked to generate over 1000 popular male and female first names in the UK reflecting the gender and the ethnic makeup of this segment of the population. The list of 1,200 names is available from the campaign website.
The names and nicknames will appear across individual 500ml and 375ml PET bottles of Coca-Cola, Diet Coke and Coke Zero. 330ml cans, multipack formats and larger sharing bottles across Coke and its light variants will be also carry a selection of the 11 nicknames. The campaign will run throughout summer.
Andy May, Open Road 0203 542 1119 / andrew.may@theopen-road.com on behalf of Coca-Cola.
Coca-Cola Great Britain is responsible for marketing 21 brands and over 100 products to consumers across Great Britain, with a focus upon developing new brands and extending existing brands including Coca-Cola. Other Coca-Cola Great Britain brands include Diet Coke, Coke Zero, Fanta, Sprite, Dr Pepper, Oasis, Glaceau vitaminwater, Oasis, Schweppes, 5 Alive, Lilt, Kia Ora, Relentless Energy Drink and Powerade. The Coca-Cola Great Britain portfolio is worth £2,095 million with value sales growth of 4.8% in the past year. Within this, the My Coke trilogy (Coca-Cola, Diet Coke and Coke Zero) is worth £1,162 million (Nielsen, w/c 20/04/13). Coca-Cola Great Britain is committed to developing innovative, responsible and sustainable initiatives that help protect the environment. Recently, the Coca-Cola system launched its PlantBottle™ plastic made from up to 22.5% plant-based materials. For more information about Coca-Cola in Great Britain, please visit our website at www.coca-cola.co.uk.
|
from grappa import GrappaExperiment, MPIRunGrappaExperiment
"""
A place to keep a bunch of example experiments.
Feel free to use this script while developing experiments.
But, when saving things for reproducibility, put each of your final experiments in a separate file
"""
# tpch
tpch_sampa = GrappaExperiment({
'trial': range(1, 3 + 1),
'qn': range(1, 22 + 1),
'exe': lambda qn: "grappa_tpc_q{0}.exe".format(qn),
'sf': 10,
'ppn': 12,
'nnode': 8,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v1',
'machine': 'sampa'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_pal = GrappaExperiment({
'trial': range(1, 3 + 1),
'qn': range(1, 22 + 1),
'exe': lambda qn: "grappa_tpc_q{0}.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v1',
'machine': 'pal'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_bigdatann = MPIRunGrappaExperiment({
'trial': range(1, 3 + 1),
'qn': range(1, 22 + 1),
'exe': lambda qn: "grappa_tpc_q{0}.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v1',
'machine': 'bigdata',
'system': 'radish'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_iter_bigdatann = MPIRunGrappaExperiment({
'trial': range(1, 3 + 1),
#'qn': range(1, 22 + 1),
'qn': [6,11,12,14,15,17,19],
'exe': lambda qn: "grappa_tpc_iter_q{0}.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v1',
'machine': 'bigdata',
'system': 'radish-iter'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_bigdatann_debug = MPIRunGrappaExperiment({
'trial': range(1, 3 + 1),
'qn': [18,19],
'exe': lambda qn: "grappa_tpc_q{0}.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v2-debugmode',
'machine': 'bigdata',
'system': 'radish'
},
{
'shared_pool_memory_fraction': 0.5
})
#tpch_bigdatann.run()
#tpch_bigdatann_debug.run()
tpch_iter_bigdatann.run()
|
Get into your supercar and perform crazy stunts in the city center. All streets are yours; there are no pedestrians and you have 100% opportunity to get adrenaline rush and excitement as you perform full 360 loop or do crazy jumps. Lots of fun for all car games fans!
|
"""All fuel shares of the base year for the
different technologies are defined in this file.
"""
from energy_demand.initalisations import helpers
def assign_by_fuel_tech_p(
enduses,
sectors,
fueltypes,
fueltypes_nr
):
"""Assigning fuel share per enduse for different technologies
for the base year.
Arguments
----------
enduses : dict
Enduses
sectors : dict
Sectors per submodel
fueltypes : dict
Fueltypes lookup
fueltypes_nr : int
Number of fueltypes
Returns
-------
fuel_tech_p_by : dict
Residential fuel share percentages
Note
----
- In an enduse, either all fueltypes with assigned fuelsneed to be
assigned with technologies or none. No mixing possible
- Technologies can be defined for the following fueltypes:
'solid_fuel': 0,
'gas': 1,
'electricity': 2,
'oil': 3,
'biomass': 4,
'hydrogen': 5,
'heat': 6
- Not defined fueltypes will be assigned placholder technologies
"""
fuel_tech_p_by = {}
_fuel_tech_p_by = helpers.init_fuel_tech_p_by(
enduses['residential'], fueltypes_nr)
fuel_tech_p_by.update(_fuel_tech_p_by)
_fuel_tech_p_by = helpers.init_fuel_tech_p_by(
enduses['service'], fueltypes_nr)
fuel_tech_p_by.update(_fuel_tech_p_by)
_fuel_tech_p_by = helpers.init_fuel_tech_p_by(
enduses['industry'], fueltypes_nr)
fuel_tech_p_by.update(_fuel_tech_p_by)
# ====================
# Residential Submodel
# ====================
# ---------------
# rs_lighting
# Calculated on the basis of ECUK Table 3.08
# ---------------
fuel_tech_p_by['rs_lighting'][fueltypes['electricity']] = {
'standard_lighting_bulb': 0.04,
'halogen': 0.56,
'fluorescent_strip_lighting': 0.07,
'energy_saving_lighting_bulb': 0.32,
'LED': 0.01}
# ---------------
# rs_cold
# Calculated on the basis of ECUK Table 3.08
# ---------------
fuel_tech_p_by['rs_cold'][fueltypes['electricity']] = {
'chest_freezer': 0.087,
'fridge_freezer': 0.588,
'refrigerator': 0.143,
'upright_freezer': 0.182}
# ---------------
# rs_cooking
# Calculated on the basis of ECUK Table 3.08
# Calculated on the assumption that 5 to 10%
# of all households have induction hobs (https://productspy.co.uk/are-induction-hobs-safe/ (5-10%))
# ---------------
fuel_tech_p_by['rs_cooking'][fueltypes['electricity']] = {
'hob_electricity': 0.95,
'hob_induction_electricity': 0.05}
fuel_tech_p_by['rs_cooking'][fueltypes['gas']] = {
'hob_gas': 1.0}
fuel_tech_p_by['rs_cooking'][fueltypes['hydrogen']] = {
'hob_hydrogen': 1.0}
fuel_tech_p_by['rs_cooking'][fueltypes['biomass']] = {
'hob_biomass': 1.0}
# ---------------
# rs_wet
# calculated on the basis of EUCK Table 3.08
# ---------------
fuel_tech_p_by['rs_wet'][fueltypes['electricity']] = {
'washing_machine': 0.305,
'washer_dryer': 0.157,
'dishwasher': 0.220,
'tumble_dryer': 0.318}
# ---------------
# rs_space_heating
#
# According to the DCLG (2014) English Housing Survey. Energy Report. doi: 10.1017/CBO9781107415324.004.
# Annex Table 3.1, the following number of electric heating technologies can be found in the UK:
#
# storage heaters 5.5 % of all houses
# electric room heaters 2.0 % of all houses
# electric central heating 0.65 % of all houses
#
# As heat pumps were not accounted for, they are taken from OFGEM (2015),
# which states that there are about 0.1m heat pumps of about in total 27m
# households in the UK. This corresponds to about 0.4 %. (see also Hannon 2015).
# According to Hannon (2015), heat pumps account only for a tiny fraction of the UK.
# heat supply for buildings (approximately 0.2%). This percentage is substract from
# the storage heaters.
#
# storage heaters 5.1 % of all houses --> ~ 62% (100.0 / 8.15) * 5.1
# secondary_heater_electricity
# electric room heaters 2.0 % of all houses --> ~ 25% (100.0 / 8.15) * 2.0
# electric central heating 0.65 % of all houses --> ~ 8% (100.0 / 8.15) * 0.65
# heat pumps 0.4 % of all houses --> ~ 0.5% (100.0 / 8.15) * 0.4
#
# OFGEM (2015); Insights paper on households with electric and other non-gas heating,
# (December), 1–84.
#
# Hannon, M. J. (2015). Raising the temperature of the UK heat pump market:
# Learning lessons from Finland. Energy Policy, 85, 369–375.
# https://doi.org/10.1016/j.enpol.2015.06.016
# ---------------
fuel_tech_p_by['rs_space_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
fuel_tech_p_by['rs_space_heating'][fueltypes['oil']] = {
'boiler_condensing_oil': 0.6,
'boiler_oil': 0.4}
# ---
# According to table 3.19, 59.7% (43.5% + 14.3%) have some form of condensing boiler.
# Todays share of district heating is about 2% of UK non-industraiyl demand
# http://fes.nationalgrid.com/media/1215/160712-national-grid-dh-summary-report.pdf
# ---
fuel_tech_p_by['rs_space_heating'][fueltypes['gas']] = {
'boiler_condensing_gas': 0.60,
'boiler_gas': 0.37,
'district_heating_CHP_gas': 0.03}
fuel_tech_p_by['rs_space_heating'][fueltypes['electricity']] = {
'district_heating_electricity' : 0,
'storage_heater_electricity': 0.62,
'secondary_heater_electricity':0.33,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['rs_space_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0,
'district_heating_biomass': 0.0}
fuel_tech_p_by['rs_space_heating'][fueltypes['hydrogen']] = {
'fuel_cell_hydrogen': 0,
'district_heating_fuel_cell': 0,
'boiler_hydrogen': 1.0,
'heat_pumps_hydrogen': 0.0}
# -------------
# Residential water heating
# -------------
fuel_tech_p_by['rs_water_heating'][fueltypes['gas']] = {
'boiler_condensing_gas': 0.60,
'boiler_gas': 0.37,
'district_heating_CHP_gas': 0.03}
fuel_tech_p_by['rs_water_heating'][fueltypes['electricity']] = {
'storage_heater_electricity': 0.62,
'secondary_heater_electricity':0.33,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['rs_water_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0,
'district_heating_biomass': 0.0}
fuel_tech_p_by['rs_water_heating'][fueltypes['hydrogen']] = {
'boiler_hydrogen': 1.0}
fuel_tech_p_by['rs_water_heating'][fueltypes['oil']] = {
'boiler_oil': 1.0}
fuel_tech_p_by['rs_water_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
# ===================
# Service subModel
# ===================
# ss_lighting Simplified based on Table 5.09 (Office lighting)
fuel_tech_p_by['ss_lighting'][fueltypes['electricity']] = {
'halogen': 0.45,
'fluorescent_strip_lighting': 0.07,
'energy_saving_lighting_bulb': 0.47, #All different lighting next to halogen are summarised here ("non-halogen lighting")
'LED': 0.01}
# ----------------
# Service space heating (ss_space_heating)
# For ss_space heating the load profile is the same for all technologies
# ----------------
fuel_tech_p_by['ss_space_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
fuel_tech_p_by['ss_space_heating'][fueltypes['gas']] = {
'district_heating_CHP_gas': 0.02,
'boiler_condensing_gas': 0.6,
'boiler_gas': 0.38}
fuel_tech_p_by['ss_space_heating'][fueltypes['electricity']] = {
'district_heating_electricity' : 0,
'secondary_heater_electricity': 0.95,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['ss_space_heating'][fueltypes['oil']] = {
'boiler_condensing_oil': 0.6,
'boiler_oil': 0.4}
fuel_tech_p_by['ss_space_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0}
fuel_tech_p_by['ss_space_heating'][fueltypes['hydrogen']] = {
'fuel_cell_hydrogen': 0,
'boiler_hydrogen': 1.0,
'heat_pumps_hydrogen': 0.0,
'district_heating_fuel_cell': 0.0}
# -------------
# Service water heating
# -------------
fuel_tech_p_by['ss_water_heating'][fueltypes['gas']] = {
'boiler_condensing_gas': 0.60,
'boiler_gas': 0.37,
'district_heating_CHP_gas': 0.03}
fuel_tech_p_by['ss_water_heating'][fueltypes['electricity']] = {
'storage_heater_electricity': 0.62,
'secondary_heater_electricity':0.33,
'heat_pumps_electricity': 0.05}
fuel_tech_p_by['ss_water_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0,
'district_heating_biomass': 0.0}
fuel_tech_p_by['ss_water_heating'][fueltypes['hydrogen']] = {
'boiler_hydrogen': 1.0}
fuel_tech_p_by['ss_water_heating'][fueltypes['oil']] = {
'boiler_oil': 1.0}
fuel_tech_p_by['ss_water_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
# ------------------------------
# Cooling
# ECUK Table 5.09
# ------------------------------
fuel_tech_p_by['ss_cooling_humidification'][fueltypes['electricity']] = {
'central_air_conditioner_electricity': 0.64,
'decentral_air_conditioner_electricity': 0.36}
fuel_tech_p_by['ss_cooling_humidification'][fueltypes['gas']] = {
'central_air_conditioner_gas': 0.64,
'decentral_air_conditioner_gas': 0.36}
fuel_tech_p_by['ss_cooling_humidification'][fueltypes['oil']] = {
'central_air_conditioner_oil': 0.64,
'decentral_air_conditioner_oil': 0.36}
# Helper: Transfer all defined shares for every enduse to every sector
fuel_tech_p_by = helpers.copy_fractions_all_sectors(
fuel_tech_p_by,
sectors['service'],
affected_enduses=enduses['service'])
# ===================
# Industry subModel - Fuel shares of technologies in enduse
# ===================
# ----------------
# Industrial space heating (is_space_heating)
# ----------------
fuel_tech_p_by['is_space_heating'][fueltypes['solid_fuel']] = {
'boiler_solid_fuel': 1.0}
fuel_tech_p_by['is_space_heating'][fueltypes['gas']] = {
'district_heating_CHP_gas': 0.02,
'boiler_condensing_gas': 0.6,
'boiler_gas': 0.38}
fuel_tech_p_by['is_space_heating'][fueltypes['electricity']] = {
'district_heating_electricity' : 0,
'secondary_heater_electricity': 0.95,
'heat_pumps_electricity': 0.05,
'storage_heater_electricity': 0}
fuel_tech_p_by['is_space_heating'][fueltypes['oil']] = {
'boiler_condensing_oil': 0.6,
'boiler_oil': 0.4}
fuel_tech_p_by['is_space_heating'][fueltypes['biomass']] = {
'boiler_biomass': 1.0}
fuel_tech_p_by['is_space_heating'][fueltypes['hydrogen']] = {
'fuel_cell_hydrogen': 0,
'boiler_hydrogen': 1.0,
'heat_pumps_hydrogen': 0.0,
'district_heating_fuel_cell': 0.0}
# Helper: Transfer all defined shares for every enduse to every sector
fuel_tech_p_by = helpers.copy_fractions_all_sectors(
fuel_tech_p_by,
sectors=sectors['industry'],
affected_enduses=enduses['industry'])
# ----------------
# Industrial High temporal processes (is_high_temp_process)
# ----------------
# Todays share is about: 17% electric furnace, 82% basic oxygen (Key Statistics 2016, appea, EnergyQuest)
#-- basic_metals (sector)
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['solid_fuel']] = {
'basic_oxygen_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['electricity']] = {
'electric_arc_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['gas']] = {
'SNG_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['biomass']] = {
'biomass_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['hydrogen']] = {
'hydrogen_furnace': 1.0}
fuel_tech_p_by['is_high_temp_process']['basic_metals'][fueltypes['oil']] = {
'oil_furnace': 1.0}
#-- non_metallic_mineral_products
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['solid_fuel']] = {
'dry_kiln_coal': 0.9,
'wet_kiln_coal': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['oil']] = {
'dry_kiln_oil': 0.9,
'wet_kiln_oil': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['gas']] = {
'dry_kiln_gas': 0.9,
'wet_kiln_gas': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['electricity']] = {
'dry_kiln_electricity': 0.9,
'wet_kiln_electricity': 0.1}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['biomass']] = {
'dry_kiln_biomass': 1.0}
fuel_tech_p_by['is_high_temp_process']['non_metallic_mineral_products'][fueltypes['hydrogen']] = {
'dry_kiln_hydrogen': 1.0}
return dict(fuel_tech_p_by)
|
Prominent & Leading Manufacturer from Mumbai, we offer clarifiers, water treatment plant, reactor clarifiers, effluent treatment plant and sewage treatment plant.
We are the leading and prominent Manufacturer And Supplier of Clarifiers.
In line with client's diverse requirements across the globe, we are offering a commendable range of Water Treatment Plant. This treatment plant is accessible in various specifications as per the variegated demand of customers. Offered treatment plant is used for treating waste & dirty water in order to supply clean water in various places. As well, this treatment plant is tested against several parameters in ahead of final dispatch to eradicate production-defects from our end.
Backed by the team of dexterous professionals and advanced infrastructure we are able to deliver a wide array of Reactor Clarifiers. The offered treatment plant is manufactured with precision using high-quality material & modern techniques in compliance with the market norms. This treatment plant is available in different specifications as per demanded comes from our customers. We provide this treatment plant at an affordable price in a stipulated time frame.
Owing to our reputation in the industry, we offer a quality driven range of Effluent Treatment Plant. The treatment plant offered by us is manufactured under the direction of experts using top notch material & modern techniques in adherence with the industry norms. Offered treatment plant is accessible on different specifications in accordance with the customer’s requirements. Our treatment plant is well checked from our side in order to deliver a flawless range in the market.
We are committed towards offering a wide range of Sewage Treatment Plant. Our offered treatment plant is manufactured using cutting edge technology & finest components keeping in pace with industry norms. We are giving this treatment plant on numerous specifications so as to meet the needs of customers. As well, this treatment plant is acknowledged in the market for its reliability & less maintenance.
|
#!/usr/bin/env python
from os.path import exists
from os import stat
from time import strftime, gmtime, time
from bottle import get, abort, response
from common import init
from wellpapp import RawWrapper, raw_exts
def fmttime(t):
return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime(t))
def serve(fn, ext):
if not exists(fn):
abort(404)
if ext in raw_exts:
fh = RawWrapper(open(fn, "rb"), True)
fh.seek(0, 2)
z = fh.tell()
fh.seek(0)
ext = "jpeg"
else:
z = stat(fn).st_size
fh = open(fn, "rb")
response.content_type = "image/" + ext
response.set_header("Content-Length", str(z))
response.set_header("Expires", fmttime(time() + 60*60*24 * 10))
response.set_header("Date", fmttime(time()))
return fh
@get("/image/<z>/<m:re:[0-9a-z]{32}>")
def thumb(m, z):
client = init()
if z in ("normal", "large"):
return serve(client.pngthumb_path(m, z), "png")
else:
return serve(client.thumb_path(m, z), "jpeg")
@get("/image/<m:re:[0-9a-z]{32}>.<ext:re:[a-z]{3,4}>")
def r_image(m, ext):
client = init()
return serve(client.image_path(m), ext)
|
A preferred but dry mode of travel, train journey kills the fun of stopping by at intervals and viewing the attractions that the route from Mumbai to Lucknow has to offer. Prior bookings and ticket confirmation worries can make travelling a burden. So book Mumbai to Lucknow cab from Wiwigo.com the day you want to take your journey and forget about confirmations and restrictions!
Although another way to travel Mumbai to Lucknow and Lucknow to Mumbai, you need to wait for ticket bookings and confirmations and traverse extreme weather on bad roads, made worse by a bus journey. When you could instead take a handpicked cab form Wiwigo.com at a much cheaper price than even a volvo.
Skip the hassles of a public transport journey by choosing a Mumbai to Lucknow taxi from Wiwigo. Make your journey a life event with your loved ones and your privacy safeguarded. Take a handpicked, secure and GPS enabled taxi with registered drivers to give your loved ones that special journey they deserve. What are you waiting for? Book Mumbai to Lucknow taxi NOW! Because, we make travelling an experience to cherish.
|
#!/usr/bin/python
#
# Copyright (c) 2012, Psiphon Inc.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import httplib
import ssl
import binascii
import json
sys.path.insert(0, 'SocksiPy')
import socks
import socket
socket.socket = socks.socksocket
import urllib2
#
# Psiphon 3 Server API
#
class Psiphon3Server(object):
def __init__(self, servers, propagation_channel_id, sponsor_id, client_version, client_platform):
self.servers = servers
server_entry = binascii.unhexlify(servers[0]).split(" ")
(self.ip_address, self.web_server_port, self.web_server_secret,
self.web_server_certificate) = server_entry[:4]
# read the new json config element of the server entry, if present
self.extended_config = None
if len(server_entry) > 4:
try:
self.extended_config = json.loads(' '.join(server_entry[4:]))
except Exception:
pass
self.propagation_channel_id = propagation_channel_id
self.sponsor_id = sponsor_id
self.client_version = client_version
self.client_platform = client_platform
self.handshake_response = None
self.client_session_id = os.urandom(16).encode('hex')
socks.setdefaultproxy()
handler = CertificateMatchingHTTPSHandler(self.web_server_certificate)
self.opener = urllib2.build_opener(handler)
def set_socks_proxy(self, proxy_port):
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', proxy_port)
def _has_extended_config_key(self, key):
if not self.extended_config: return False
return key in self.extended_config
def _has_extended_config_value(self, key):
if not self._has_extended_config_key(key): return False
return ((type(self.extended_config[key]) == str and len(self.extended_config[key]) > 0) or
(type(self.extended_config[key]) == unicode and len(self.extended_config[key]) > 0) or
(type(self.extended_config[key]) == int and self.extended_config[key] != 0) or
(type(self.extended_config[key]) == list))
# This will return False if there is not enough information in the server entry to determine
# if the relay protocol is supported.
def relay_not_supported(self, relay_protocol):
if relay_protocol not in ['SSH', 'OSSH']: return True
if self._has_extended_config_value('capabilities'):
return relay_protocol not in self.extended_config['capabilities']
if relay_protocol == 'SSH':
if (self._has_extended_config_key('sshPort') and
not self._has_extended_config_value('sshPort')): return True
elif relay_protocol == 'OSSH':
if (self._has_extended_config_key('sshObfuscatedPort') and
not self._has_extended_config_value('sshObfuscatedPort')): return True
if (self._has_extended_config_key('sshObfuscatedKey') and
not self._has_extended_config_value('sshObfuscatedKey')): return True
else:
return True
return False
def can_attempt_relay_before_handshake(self, relay_protocol):
if relay_protocol not in ['SSH', 'OSSH']: return False
if not self._has_extended_config_value('sshUsername'): return False
if not self._has_extended_config_value('sshPassword'): return False
if not self._has_extended_config_value('sshHostKey'): return False
if relay_protocol == 'SSH':
if not self._has_extended_config_value('sshPort'): return False
elif relay_protocol == 'OSSH':
if not self._has_extended_config_value('sshObfuscatedPort'): return False
if not self._has_extended_config_value('sshObfuscatedKey'): return False
else:
return False
return True
# handshake
# Note that self.servers may be updated with newly discovered servers after a successful handshake
# TODO: upgrade the current server entry if not self.extended_config
# TODO: page view regexes
def handshake(self, relay_protocol):
request_url = (self._common_request_url(relay_protocol) % ('handshake',) + '&' +
'&'.join(['known_server=%s' % (binascii.unhexlify(server).split(" ")[0],) for server in self.servers]))
response = self.opener.open(request_url).read()
self.handshake_response = {'Upgrade': '',
'SSHPort': '',
'SSHUsername': '',
'SSHPassword': '',
'SSHHostKey': '',
'SSHSessionID': '',
'SSHObfuscatedPort': '',
'SSHObfuscatedKey': '',
'PSK': '',
'Homepage': []}
for line in response.split('\n'):
key, value = line.split(': ', 1)
if key in self.handshake_response.keys():
if type(self.handshake_response[key]) == list:
self.handshake_response[key].append(value)
else:
self.handshake_response[key] = value
if key == 'Server':
# discovery
if value not in self.servers:
self.servers.insert(1, value)
if key == 'SSHSessionID':
self.ssh_session_id = value
return self.handshake_response
def get_ip_address(self):
return self.ip_address
def get_ssh_port(self):
if self.handshake_response:
return self.handshake_response['SSHPort']
if self._has_extended_config_value('sshPort'):
return self.extended_config['sshPort']
return None
def get_username(self):
if self.handshake_response:
return self.handshake_response['SSHUsername']
if self._has_extended_config_value('sshUsername'):
return self.extended_config['sshUsername']
return None
def get_password(self):
if self.handshake_response:
return self.handshake_response['SSHPassword']
if self._has_extended_config_value('sshPassword'):
return self.extended_config['sshPassword']
return None
def get_password_for_ssh_authentication(self):
return self.client_session_id + self.get_password()
def get_host_key(self):
if self.handshake_response:
return self.handshake_response['SSHHostKey']
if self._has_extended_config_value('sshHostKey'):
return self.extended_config['sshHostKey']
return None
def get_obfuscated_ssh_port(self):
if self.handshake_response:
return self.handshake_response['SSHObfuscatedPort']
if self._has_extended_config_value('sshObfuscatedPort'):
return self.extended_config['sshObfuscatedPort']
return None
def get_obfuscate_keyword(self):
if self.handshake_response:
return self.handshake_response['SSHObfuscatedKey']
if self._has_extended_config_value('sshObfuscatedKey'):
return self.extended_config['sshObfuscatedKey']
return None
# TODO: download
# connected
# For SSH and OSSH, SSHSessionID from the handshake response is used when session_id is None
# For VPN, the VPN IP Address should be used for session_id (ie. 10.0.0.2)
def connected(self, relay_protocol, session_id=None):
if not session_id and relay_protocol in ['SSH', 'OSSH']:
session_id = self.ssh_session_id
assert session_id is not None
request_url = (self._common_request_url(relay_protocol) % ('connected',) +
'&session_id=%s' % (session_id,))
self.opener.open(request_url)
# disconnected
# For SSH and OSSH, SSHSessionID from the handshake response is used when session_id is None
# For VPN, this should not be called
def disconnected(self, relay_protocol, session_id=None):
assert relay_protocol not in ['VPN']
if not session_id and relay_protocol in ['SSH', 'OSSH']:
session_id = self.ssh_session_id
assert session_id is not None
request_url = (self._common_request_url(relay_protocol) % ('status',) +
'&session_id=%s&connected=%s' % (session_id, '0'))
self.opener.open(request_url)
# TODO: failed
# TODO: status
def _common_request_url(self, relay_protocol):
assert relay_protocol in ['VPN','SSH','OSSH']
return 'https://%s:%s/%%s?server_secret=%s&propagation_channel_id=%s&sponsor_id=%s&client_version=%s&client_platform=%s&relay_protocol=%s&client_session_id=%s' % (
self.ip_address, self.web_server_port, self.web_server_secret,
self.propagation_channel_id, self.sponsor_id, self.client_version,
self.client_platform, relay_protocol, self.client_session_id)
#
# CertificateMatchingHTTPSHandler
#
# Adapted from CertValidatingHTTPSConnection and VerifiedHTTPSHandler
# http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
#
class InvalidCertificateException(httplib.HTTPException, urllib2.URLError):
def __init__(self, host, cert, reason):
httplib.HTTPException.__init__(self)
self.host = host
self.cert = cert
self.reason = reason
def __str__(self):
return ('Host %s returned an invalid certificate (%s) %s\n' %
(self.host, self.reason, self.cert))
class CertificateMatchingHTTPSConnection(httplib.HTTPConnection):
def __init__(self, host, expected_server_certificate, **kwargs):
httplib.HTTPConnection.__init__(self, host, **kwargs)
self.expected_server_certificate = expected_server_certificate
def connect(self):
sock = socket.create_connection((self.host, self.port))
self.sock = ssl.wrap_socket(sock)
cert = ssl.DER_cert_to_PEM_cert(self.sock.getpeercert(True))
# Remove newlines and -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----
cert = ''.join(cert.splitlines())[len('-----BEGIN CERTIFICATE-----'):-len('-----END CERTIFICATE-----')]
if cert != self.expected_server_certificate:
raise InvalidCertificateException(self.host, cert,
'server presented the wrong certificate')
class CertificateMatchingHTTPSHandler(urllib2.HTTPSHandler):
def __init__(self, expected_server_certificate):
urllib2.AbstractHTTPHandler.__init__(self)
self.expected_server_certificate = expected_server_certificate
def https_open(self, req):
def http_class_wrapper(host, **kwargs):
return CertificateMatchingHTTPSConnection(
host, self.expected_server_certificate, **kwargs)
try:
return self.do_open(http_class_wrapper, req)
except urllib2.URLError, e:
if type(e.reason) == ssl.SSLError and e.reason.args[0] == 1:
raise InvalidCertificateException(req.host, '',
e.reason.args[1])
raise
https_request = urllib2.HTTPSHandler.do_request_
|
Emv Möbel Wie S Cdn article is the best inspiration for home interior posted about . This Emv Möbel Wie S Cdn was posted in category as ideas for inspiration to Remodel your accessories. This article can be your reference when you are confused to choose the right decoration for your home accessories. This Emv Möbel Wie S Cdn maybe your best option to decoring because having a home with our own design is everyone's dream.
We hope by posting this Emv Möbel Wie S Cdn ideas, we can fulfill your needs of inspiration for designing your accessories . If you need more ideas to design a home & interior accessories, you can check at our collection right below this post. Also, don't forget to always visit media4democracy.com to find some new and fresh posts about and other Inspiration everyday.
|
from threading import RLock
import logging
from datetime import datetime
import collections
import shelve
try:
import cPickle as pickle
except:
import pickle
from opcua import ua
from opcua.server.users import User
class AttributeValue(object):
def __init__(self, value):
self.value = value
self.value_callback = None
self.datachange_callbacks = {}
def __str__(self):
return "AttributeValue({0})".format(self.value)
__repr__ = __str__
class NodeData(object):
def __init__(self, nodeid):
self.nodeid = nodeid
self.attributes = {}
self.references = []
self.call = None
def __str__(self):
return "NodeData(id:{0}, attrs:{1}, refs:{2})".format(self.nodeid, self.attributes, self.references)
__repr__ = __str__
class AttributeService(object):
def __init__(self, aspace):
self.logger = logging.getLogger(__name__)
self._aspace = aspace
def read(self, params):
self.logger.debug("read %s", params)
res = []
for readvalue in params.NodesToRead:
res.append(self._aspace.get_attribute_value(readvalue.NodeId, readvalue.AttributeId))
return res
def write(self, params, user=User.Admin):
self.logger.debug("write %s as user %s", params, user)
res = []
for writevalue in params.NodesToWrite:
if user != User.Admin:
if writevalue.AttributeId != ua.AttributeIds.Value:
res.append(ua.StatusCode(ua.StatusCodes.BadUserAccessDenied))
continue
al = self._aspace.get_attribute_value(writevalue.NodeId, ua.AttributeIds.AccessLevel)
ual = self._aspace.get_attribute_value(writevalue.NodeId, ua.AttributeIds.UserAccessLevel)
if not ua.ua_binary.test_bit(al.Value.Value, ua.AccessLevel.CurrentWrite) or not ua.ua_binary.test_bit(ual.Value.Value, ua.AccessLevel.CurrentWrite):
res.append(ua.StatusCode(ua.StatusCodes.BadUserAccessDenied))
continue
res.append(self._aspace.set_attribute_value(writevalue.NodeId, writevalue.AttributeId, writevalue.Value))
return res
class ViewService(object):
def __init__(self, aspace):
self.logger = logging.getLogger(__name__)
self._aspace = aspace
def browse(self, params):
self.logger.debug("browse %s", params)
res = []
for desc in params.NodesToBrowse:
res.append(self._browse(desc))
return res
def _browse(self, desc):
res = ua.BrowseResult()
if desc.NodeId not in self._aspace:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdInvalid)
return res
node = self._aspace[desc.NodeId]
for ref in node.references:
if not self._is_suitable_ref(desc, ref):
continue
res.References.append(ref)
return res
def _is_suitable_ref(self, desc, ref):
if not self._suitable_direction(desc.BrowseDirection, ref.IsForward):
self.logger.debug("%s is not suitable due to direction", ref)
return False
if not self._suitable_reftype(desc.ReferenceTypeId, ref.ReferenceTypeId, desc.IncludeSubtypes):
self.logger.debug("%s is not suitable due to type", ref)
return False
if desc.NodeClassMask and ((desc.NodeClassMask & ref.NodeClass) == 0):
self.logger.debug("%s is not suitable due to class", ref)
return False
self.logger.debug("%s is a suitable ref for desc %s", ref, desc)
return True
def _suitable_reftype(self, ref1, ref2, subtypes):
"""
"""
if not subtypes and ref2.Identifier == ua.ObjectIds.HasSubtype:
return False
if ref1.Identifier == ref2.Identifier:
return True
oktypes = self._get_sub_ref(ref1)
if not subtypes and ua.NodeId(ua.ObjectIds.HasSubtype) in oktypes:
oktypes.remove(ua.NodeId(ua.ObjectIds.HasSubtype))
return ref2 in oktypes
def _get_sub_ref(self, ref):
res = []
nodedata = self._aspace[ref]
if nodedata is not None:
for ref in nodedata.references:
if ref.ReferenceTypeId.Identifier == ua.ObjectIds.HasSubtype and ref.IsForward:
res.append(ref.NodeId)
res += self._get_sub_ref(ref.NodeId)
return res
def _suitable_direction(self, desc, isforward):
if desc == ua.BrowseDirection.Both:
return True
if desc == ua.BrowseDirection.Forward and isforward:
return True
if desc == ua.BrowseDirection.Inverse and not isforward:
return True
return False
def translate_browsepaths_to_nodeids(self, browsepaths):
self.logger.debug("translate browsepath: %s", browsepaths)
results = []
for path in browsepaths:
results.append(self._translate_browsepath_to_nodeid(path))
return results
def _translate_browsepath_to_nodeid(self, path):
self.logger.debug("looking at path: %s", path)
res = ua.BrowsePathResult()
if path.StartingNode not in self._aspace:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdInvalid)
return res
current = path.StartingNode
for el in path.RelativePath.Elements:
nodeid = self._find_element_in_node(el, current)
if not nodeid:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNoMatch)
return res
current = nodeid
target = ua.BrowsePathTarget()
target.TargetId = current
target.RemainingPathIndex = 4294967295
res.Targets = [target]
return res
def _find_element_in_node(self, el, nodeid):
nodedata = self._aspace[nodeid]
for ref in nodedata.references:
# FIXME: here we should check other arguments!!
if ref.BrowseName == el.TargetName:
return ref.NodeId
self.logger.info("element %s was not found in node %s", el, nodeid)
return None
class NodeManagementService(object):
def __init__(self, aspace):
self.logger = logging.getLogger(__name__)
self._aspace = aspace
def add_nodes(self, addnodeitems, user=User.Admin):
results = []
for item in addnodeitems:
results.append(self._add_node(item, user))
return results
def _add_node(self, item, user):
result = ua.AddNodesResult()
# If Identifier of requested NodeId is null we generate a new NodeId using
# the namespace of the nodeid, this is an extention of the spec to allow
# to requests the server to generate a new nodeid in a specified namespace
if item.RequestedNewNodeId.has_null_identifier():
self.logger.debug("RequestedNewNodeId has null identifier, generating Identifier")
nodedata = NodeData(self._aspace.generate_nodeid(item.RequestedNewNodeId.NamespaceIndex))
else:
nodedata = NodeData(item.RequestedNewNodeId)
if nodedata.nodeid in self._aspace:
self.logger.warning("AddNodesItem: Requested NodeId %s already exists", nodedata.nodeid)
result.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdExists)
return result
if item.ParentNodeId.is_null():
# self.logger.warning("add_node: creating node %s without parent", nodedata.nodeid)
# should return Error here, but the standard namespace define many nodes without parents...
pass
elif item.ParentNodeId not in self._aspace:
self.logger.warning("add_node: while adding node %s, requested parent node %s does not exists", nodedata.nodeid, item.ParentNodeId)
result.StatusCode = ua.StatusCode(ua.StatusCodes.BadParentNodeIdInvalid)
return result
if not user == User.Admin:
result.StatusCode = ua.StatusCode(ua.StatusCodes.BadUserAccessDenied)
return result
self._add_node_attributes(nodedata, item)
# now add our node to db
self._aspace[nodedata.nodeid] = nodedata
if not item.ParentNodeId.is_null():
self._add_ref_from_parent(nodedata, item)
self._add_ref_to_parent(nodedata, item, user)
# add type definition
if item.TypeDefinition != ua.NodeId():
self._add_type_definition(nodedata, item, user)
result.StatusCode = ua.StatusCode()
result.AddedNodeId = nodedata.nodeid
return result
def _add_node_attributes(self, nodedata, item):
# add common attrs
nodedata.attributes[ua.AttributeIds.NodeId] = AttributeValue(
ua.DataValue(ua.Variant(nodedata.nodeid, ua.VariantType.NodeId))
)
nodedata.attributes[ua.AttributeIds.BrowseName] = AttributeValue(
ua.DataValue(ua.Variant(item.BrowseName, ua.VariantType.QualifiedName))
)
nodedata.attributes[ua.AttributeIds.NodeClass] = AttributeValue(
ua.DataValue(ua.Variant(item.NodeClass, ua.VariantType.Int32))
)
# add requested attrs
self._add_nodeattributes(item.NodeAttributes, nodedata)
def _add_ref_from_parent(self, nodedata, item):
desc = ua.ReferenceDescription()
desc.ReferenceTypeId = item.ReferenceTypeId
desc.NodeId = nodedata.nodeid
desc.NodeClass = item.NodeClass
desc.BrowseName = item.BrowseName
desc.DisplayName = item.NodeAttributes.DisplayName
desc.TypeDefinition = item.TypeDefinition
desc.IsForward = True
self._aspace[item.ParentNodeId].references.append(desc)
def _add_ref_to_parent(self, nodedata, item, user):
addref = ua.AddReferencesItem()
addref.ReferenceTypeId = item.ReferenceTypeId
addref.SourceNodeId = nodedata.nodeid
addref.TargetNodeId = item.ParentNodeId
addref.TargetNodeClass = self._aspace[item.ParentNodeId].attributes[ua.AttributeIds.NodeClass].value.Value.Value
addref.IsForward = False
self._add_reference(addref, user)
def _add_type_definition(self, nodedata, item, user):
addref = ua.AddReferencesItem()
addref.SourceNodeId = nodedata.nodeid
addref.IsForward = True
addref.ReferenceTypeId = ua.NodeId(ua.ObjectIds.HasTypeDefinition)
addref.TargetNodeId = item.TypeDefinition
addref.TargetNodeClass = ua.NodeClass.DataType
self._add_reference(addref, user)
def delete_nodes(self, deletenodeitems, user=User.Admin):
results = []
for item in deletenodeitems.NodesToDelete:
results.append(self._delete_node(item, user))
return results
def _delete_node(self, item, user):
if user != User.Admin:
return ua.StatusCode(ua.StatusCodes.BadUserAccessDenied)
if item.NodeId not in self._aspace:
self.logger.warning("DeleteNodesItem: NodeId %s does not exists", item.NodeId)
return ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown)
if item.DeleteTargetReferences:
for elem in self._aspace.keys():
for rdesc in self._aspace[elem].references:
if rdesc.NodeId == item.NodeId:
self._aspace[elem].references.remove(rdesc)
self._delete_node_callbacks(self._aspace[item.NodeId])
del(self._aspace[item.NodeId])
return ua.StatusCode()
def _delete_node_callbacks(self, nodedata):
if ua.AttributeIds.Value in nodedata.attributes:
for handle, callback in nodedata.attributes[ua.AttributeIds.Value].datachange_callbacks.items():
try:
callback(handle, None, ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown))
self._aspace.delete_datachange_callback(handle)
except Exception as ex:
self.logger.exception("Error calling delete node callback callback %s, %s, %s", nodedata, ua.AttributeIds.Value, ex)
def add_references(self, refs, user=User.Admin):
result = []
for ref in refs:
result.append(self._add_reference(ref, user))
return result
def _add_reference(self, addref, user):
if addref.SourceNodeId not in self._aspace:
return ua.StatusCode(ua.StatusCodes.BadSourceNodeIdInvalid)
if addref.TargetNodeId not in self._aspace:
return ua.StatusCode(ua.StatusCodes.BadTargetNodeIdInvalid)
if user != User.Admin:
return ua.StatusCode(ua.StatusCodes.BadUserAccessDenied)
rdesc = ua.ReferenceDescription()
rdesc.ReferenceTypeId = addref.ReferenceTypeId
rdesc.IsForward = addref.IsForward
rdesc.NodeId = addref.TargetNodeId
rdesc.NodeClass = addref.TargetNodeClass
bname = self._aspace.get_attribute_value(addref.TargetNodeId, ua.AttributeIds.BrowseName).Value.Value
if bname:
rdesc.BrowseName = bname
dname = self._aspace.get_attribute_value(addref.TargetNodeId, ua.AttributeIds.DisplayName).Value.Value
if dname:
rdesc.DisplayName = dname
self._aspace[addref.SourceNodeId].references.append(rdesc)
return ua.StatusCode()
def delete_references(self, refs, user=User.Admin):
result = []
for ref in refs:
result.append(self._delete_reference(ref, user))
return result
def _delete_reference(self, item, user):
if item.SourceNodeId not in self._aspace:
return ua.StatusCode(ua.StatusCodes.BadSourceNodeIdInvalid)
if item.TargetNodeId not in self._aspace:
return ua.StatusCode(ua.StatusCodes.BadTargetNodeIdInvalid)
if user != User.Admin:
return ua.StatusCode(ua.StatusCodes.BadUserAccessDenied)
for rdesc in self._aspace[item.SourceNodeId].references:
if rdesc.NodeId is item.TargetNodeId:
if rdesc.RefrenceTypeId != item.RefrenceTypeId:
return ua.StatusCode(ua.StatusCodes.BadReferenceTypeIdInvalid)
if rdesc.IsForward == item.IsForward or item.DeleteBidirectional:
self._aspace[item.SourceNodeId].references.remove(rdesc)
for rdesc in self._aspace[item.TargetNodeId].references:
if rdesc.NodeId is item.SourceNodeId:
if rdesc.RefrenceTypeId != item.RefrenceTypeId:
return ua.StatusCode(ua.StatusCodes.BadReferenceTypeIdInvalid)
if rdesc.IsForward == item.IsForward or item.DeleteBidirectional:
self._aspace[item.SourceNodeId].references.remove(rdesc)
return ua.StatusCode()
def _add_node_attr(self, item, nodedata, name, vtype=None):
if item.SpecifiedAttributes & getattr(ua.NodeAttributesMask, name):
dv = ua.DataValue(ua.Variant(getattr(item, name), vtype))
dv.ServerTimestamp = datetime.utcnow()
dv.SourceTimestamp = datetime.utcnow()
nodedata.attributes[getattr(ua.AttributeIds, name)] = AttributeValue(dv)
def _add_nodeattributes(self, item, nodedata):
self._add_node_attr(item, nodedata, "AccessLevel", ua.VariantType.Byte)
self._add_node_attr(item, nodedata, "ArrayDimensions", ua.VariantType.UInt32)
self._add_node_attr(item, nodedata, "BrowseName", ua.VariantType.QualifiedName)
self._add_node_attr(item, nodedata, "ContainsNoLoops", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "DataType", ua.VariantType.NodeId)
self._add_node_attr(item, nodedata, "Description", ua.VariantType.LocalizedText)
self._add_node_attr(item, nodedata, "DisplayName", ua.VariantType.LocalizedText)
self._add_node_attr(item, nodedata, "EventNotifier", ua.VariantType.Byte)
self._add_node_attr(item, nodedata, "Executable", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "Historizing", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "InverseName", ua.VariantType.LocalizedText)
self._add_node_attr(item, nodedata, "IsAbstract", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "MinimumSamplingInterval", ua.VariantType.Double)
self._add_node_attr(item, nodedata, "NodeClass", ua.VariantType.UInt32)
self._add_node_attr(item, nodedata, "NodeId", ua.VariantType.NodeId)
self._add_node_attr(item, nodedata, "Symmetric", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "UserAccessLevel", ua.VariantType.Byte)
self._add_node_attr(item, nodedata, "UserExecutable", ua.VariantType.Boolean)
self._add_node_attr(item, nodedata, "UserWriteMask", ua.VariantType.Byte)
self._add_node_attr(item, nodedata, "ValueRank", ua.VariantType.Int32)
self._add_node_attr(item, nodedata, "WriteMask", ua.VariantType.UInt32)
self._add_node_attr(item, nodedata, "UserWriteMask", ua.VariantType.UInt32)
self._add_node_attr(item, nodedata, "Value")
class MethodService(object):
def __init__(self, aspace):
self.logger = logging.getLogger(__name__)
self._aspace = aspace
def call(self, methods):
results = []
for method in methods:
results.append(self._call(method))
return results
def _call(self, method):
res = ua.CallMethodResult()
if method.ObjectId not in self._aspace or method.MethodId not in self._aspace:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdInvalid)
else:
node = self._aspace[method.MethodId]
if node.call is None:
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadNothingToDo)
else:
try:
res.OutputArguments = node.call(method.ObjectId, *method.InputArguments)
for _ in method.InputArguments:
res.InputArgumentResults.append(ua.StatusCode())
except Exception:
self.logger.exception("Error executing method call %s, an exception was raised: ", method)
res.StatusCode = ua.StatusCode(ua.StatusCodes.BadUnexpectedError)
return res
class AddressSpace(object):
"""
The address space object stores all the nodes of the OPC-UA server
and helper methods.
The methods are thread safe
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self._nodes = {}
self._lock = RLock() # FIXME: should use multiple reader, one writter pattern
self._datachange_callback_counter = 200
self._handle_to_attribute_map = {}
self._default_idx = 2
self._nodeid_counter = {0: 20000, 1: 2000}
def __getitem__(self, nodeid):
with self._lock:
if nodeid in self._nodes:
return self._nodes.__getitem__(nodeid)
def __setitem__(self, nodeid, value):
with self._lock:
return self._nodes.__setitem__(nodeid, value)
def __contains__(self, nodeid):
with self._lock:
return self._nodes.__contains__(nodeid)
def __delitem__(self, nodeid):
with self._lock:
self._nodes.__delitem__(nodeid)
def generate_nodeid(self, idx=None):
if idx is None:
idx = self._default_idx
if idx in self._nodeid_counter:
self._nodeid_counter[idx] += 1
else:
self._nodeid_counter[idx] = 1
nodeid = ua.NodeId(self._nodeid_counter[idx], idx)
with self._lock: # OK since reentrant lock
while True:
if nodeid in self._nodes:
nodeid = self.generate_nodeid(idx)
else:
return nodeid
def keys(self):
with self._lock:
return self._nodes.keys()
def empty(self):
"""
Delete all nodes in address space
"""
with self._lock:
self._nodes = {}
def dump(self, path):
"""
Dump address space as binary to file; note that server must be stopped for this method to work
DO NOT DUMP AN ADDRESS SPACE WHICH IS USING A SHELF (load_aspace_shelf), ONLY CACHED NODES WILL GET DUMPED!
"""
# prepare nodes in address space for being serialized
for nodeid, ndata in self._nodes.items():
# if the node has a reference to a method call, remove it so the object can be serialized
if ndata.call is not None:
self._nodes[nodeid].call = None
with open(path, 'wb') as f:
pickle.dump(self._nodes, f, pickle.HIGHEST_PROTOCOL)
def load(self, path):
"""
Load address space from a binary file, overwriting everything in the current address space
"""
with open(path, 'rb') as f:
self._nodes = pickle.load(f)
def make_aspace_shelf(self, path):
"""
Make a shelf for containing the nodes from the standard address space; this is typically only done on first
start of the server. Subsequent server starts will load the shelf, nodes are then moved to a cache
by the LazyLoadingDict class when they are accessed. Saving data back to the shelf
is currently NOT supported, it is only used for the default OPC UA standard address space
Note: Intended for slow devices, such as Raspberry Pi, to greatly improve start up time
"""
s = shelve.open(path, "n", protocol=pickle.HIGHEST_PROTOCOL)
for nodeid, ndata in self._nodes.items():
s[nodeid.to_string()] = ndata
s.close()
def load_aspace_shelf(self, path):
"""
Load the standard address space nodes from a python shelve via LazyLoadingDict as needed.
The dump() method can no longer be used if the address space is being loaded from a shelf
Note: Intended for slow devices, such as Raspberry Pi, to greatly improve start up time
"""
class LazyLoadingDict(collections.MutableMapping):
"""
Special dict that only loads nodes as they are accessed. If a node is accessed it gets copied from the
shelve to the cache dict. All user nodes are saved in the cache ONLY. Saving data back to the shelf
is currently NOT supported
"""
def __init__(self, source):
self.source = source # python shelf
self.cache = {} # internal dict
def __getitem__(self, key):
# try to get the item (node) from the cache, if it isn't there get it from the shelf
try:
return self.cache[key]
except KeyError:
node = self.cache[key] = self.source[key.to_string()]
return node
def __setitem__(self, key, value):
# add a new item to the cache; if this item is in the shelf it is not updated
self.cache[key] = value
def __contains__(self, key):
return key in self.cache or key.to_string() in self.source
def __delitem__(self, key):
# only deleting items from the cache is allowed
del self.cache[key]
def __iter__(self):
# only the cache can be iterated over
return iter(self.cache.keys())
def __len__(self):
# only returns the length of items in the cache, not unaccessed items in the shelf
return len(self.cache)
self._nodes = LazyLoadingDict(shelve.open(path, "r"))
def get_attribute_value(self, nodeid, attr):
with self._lock:
self.logger.debug("get attr val: %s %s", nodeid, attr)
if nodeid not in self._nodes:
dv = ua.DataValue()
dv.StatusCode = ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown)
return dv
node = self._nodes[nodeid]
if attr not in node.attributes:
dv = ua.DataValue()
dv.StatusCode = ua.StatusCode(ua.StatusCodes.BadAttributeIdInvalid)
return dv
attval = node.attributes[attr]
if attval.value_callback:
return attval.value_callback()
return attval.value
def set_attribute_value(self, nodeid, attr, value):
with self._lock:
self.logger.debug("set attr val: %s %s %s", nodeid, attr, value)
if nodeid not in self._nodes:
return ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown)
node = self._nodes[nodeid]
if attr not in node.attributes:
return ua.StatusCode(ua.StatusCodes.BadAttributeIdInvalid)
if not value.SourceTimestamp:
value.SourceTimestamp = datetime.utcnow()
if not value.ServerTimestamp:
value.ServerTimestamp = datetime.utcnow()
attval = node.attributes[attr]
old = attval.value
attval.value = value
cbs = []
if old.Value != value.Value: # only send call callback when a value change has happend
cbs = list(attval.datachange_callbacks.items())
for k, v in cbs:
try:
v(k, value)
except Exception as ex:
self.logger.exception("Error calling datachange callback %s, %s, %s", k, v, ex)
return ua.StatusCode()
def add_datachange_callback(self, nodeid, attr, callback):
with self._lock:
self.logger.debug("set attr callback: %s %s %s", nodeid, attr, callback)
if nodeid not in self._nodes:
return ua.StatusCode(ua.StatusCodes.BadNodeIdUnknown), 0
node = self._nodes[nodeid]
if attr not in node.attributes:
return ua.StatusCode(ua.StatusCodes.BadAttributeIdInvalid), 0
attval = node.attributes[attr]
self._datachange_callback_counter += 1
handle = self._datachange_callback_counter
attval.datachange_callbacks[handle] = callback
self._handle_to_attribute_map[handle] = (nodeid, attr)
return ua.StatusCode(), handle
def delete_datachange_callback(self, handle):
with self._lock:
if handle in self._handle_to_attribute_map:
nodeid, attr = self._handle_to_attribute_map.pop(handle)
self._nodes[nodeid].attributes[attr].datachange_callbacks.pop(handle)
def add_method_callback(self, methodid, callback):
with self._lock:
node = self._nodes[methodid]
node.call = callback
|
An avocado toast for the purist.
It’s great to see Porter, the new coffee and sandwich shop at the former Milwaukee Road Depot on West Washington Avenue, crowded with customers. The area has languished since Crandall’s (and then a succession of failed restaurants) tried to make a go of it in the dramatic, high-ceilinged lobby back in the 1990s. Over the decades bike shops have had more success there.
Porter is just the first element in an ambitious reimagining of the baggage area at the former depot, overseen by Gilbert Altschul (Grampa’s Pizzeria, Gib’s Bar). It will be followed by public market-style vendor stalls and a taco eatery called Bandit. In the meantime, though, the people are finding Porter despite an out-of-sight, out-of-mind location. Sandwich boards point customers around an unobvious corner to its actual door.
That people are flocking here is all the more surprising since this is not a “settle in, snuggle up and relax” kind of coffee shop. There are no couches or overstuffed chairs. There’s a large-ish square table ringed with backless stools where it’s easier to set up solo, hunched over a laptop, than it is to have a leisurely face-to-face with a friend. There are more stools set up along a narrow ledge — this faces a wall and seems designed more for a quick toss-back of your single-origin espresso than a long stay. Overflow seating in a breezeway between Porter and the Motorless Motion bike shop has a few cafe tables and smells of the rubber of bike tires. What better hipster cred?
Coffee beans are from Counter Culture, and there are decent pour-overs in three sizes. Sweet matcha and chai lattes are about as frou-frou as the drinks get; Porter is more of a cortado kinda place.
The breakfast menu is streamlined, starring a liege waffle, three versions of portable eggs (the Porter Pocket, an egg sandwich and the breakfast bocadillo) and avocado toast, plus a yogurt parfait. The avocado toast features big slices of just-ripe avocado, a generous squeeze of fresh lemon and salt, with beauty-heart radishes fanned out across the top for a pop of color and a bit more texture. It’s pretty, but the rustic bread is dry and needs something to integrate it with the toppings. The $2 ’nduja add-on might do the trick. Yet avocado toast is the simplest of dishes (and as such an odd subject for a trend), so for avocado toast purists, this is an unwavering exemplar of the style.
The Porter Pocket belies its small size — it’s rich and buttery, like a spanokopita stuffed with bacon and scrambled eggs instead of spinach and feta. The generous amount of the crumbly, meaty bacon makes this a heavy little breakfast.
The two-egg-omelet bocadillo (a Spanish-inspired sandwich), on an excellent baguette, was filled with thin egg, a smear of raclette and a layer of arugula, but not a lot of flavor got out from under the baguette.
Lunch is popular, too. Although the shop’s Facebook page shows a pile of pre-made sandwiches wrapped in brown butcher paper, most sandwiches will be made fresh in the spacious open kitchen.
The Heritage Ham is almost a great sandwich — with Dan Fox’s richly smoked ham at the heart of it, it doesn’t need a lot of adornment. The sweet-ish challah roll would be a brilliant counterpoint to the salty ham had it not been for the overapplication of a vinegary Spanish (yellow) mustard, which both overpowered the meat/bread flavor matchup and made the bun fall apart.
The soppressata panini, on a crisp sesame baguette, was almost perfect, though the artichoke tapenade coupled with the soppressata (from Underground Meats) creates a salt overload — fuses blown. There’s better balance in the roast beef sandwich (pickled red onions and horseradish mayo are the standout accoutrements here) and the veggie sandwich (a fennel aioli and plenty of arugula play against salt-roasted tomatoes and artichokes).
Sandwiches run $8-$9; half of any sandwich with a cup of the housemade minestrone is $8 and is a very popular option.
Or come for happy hour. Wine, bottles of beer, hot toddies and even Pok Pok sodas are on special, and new menu items might be in rehearsal.
Porter has just revamped its original menu, which had been divided into breakfast and lunch, to just “Eats,” with any item available during open hours. The one salad has disappeared; more grab-and-go items are promised. Smart changes.
|
# -*- coding: UTF-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# Copyright (C) 2016 - 2018 Simon Stuerz <simon.stuerz@guh.io> #
# #
# This file is part of nymea-cli. #
# #
# nymea-cli is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, version 2 of the License. #
# #
# nymea-cli is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with nymea-cli. If not, see <http://www.gnu.org/licenses/>. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import nymea
import selector
import time
import datetime
def createTimeDescriptor():
print "\n========================================================"
print "Create time descriptor\n"
timeDescriptor = {}
enough = False
options = ["Create calendar items", "Create time event items"]
selection = nymea.get_selection("Which kind of time items do you want to create?", options)
if options[selection] == "Create calendar items":
calendarItems = []
while not enough:
calendarItems.append(createCalendarItem())
input = raw_input("Do you want to add another calendar item? (y/N): ")
if not input == "y":
enough = True
timeDescriptor['calendarItems'] = calendarItems
return timeDescriptor
else:
timeEventItems = []
while not enough:
timeEventItems.append(createTimeEventItem())
input = raw_input("Do you want to add another time event item? (y/N): ")
if not input == "y":
enough = True
timeDescriptor['timeEventItems'] = timeEventItems
return timeDescriptor
def createTimeEventItem():
print "\n========================================================"
print "Create time event item\n"
timeEventItem = {}
if selector.getYesNoSelection("Do you want a time event for a certain date and time?"):
timeString = raw_input("Please enter unix time for this time event (\"DD.MM.YYYY hh:mm\"): ")
timeEventItem['datetime'] = int(time.mktime(time.strptime(timeString, "%d.%m.%Y %H:%M")))
if selector.getYesNoSelection("Do you want to define a repeating option for this time event item?"):
timeEventItem['repeating'] = createRepeatingOption(True)
else:
timeEventItem['time'] = raw_input("Please enter the time for this time event (\"hh:mm\"): ")
if selector.getYesNoSelection("Do you want to define a repeating option for this time event item?"):
timeEventItem['repeating'] = createRepeatingOption()
return timeEventItem
def createCalendarItem():
print "\n========================================================"
print "Create calendar item\n"
calendarItem = {}
if selector.getYesNoSelection("Do you want a calendar entry for a certain date and time?"):
timeString = raw_input("Please enter unix time for this calendar item (\"DD.MM.YYYY hh:mm\"): ")
calendarItem['datetime'] = int(time.mktime(time.strptime(timeString, "%d.%m.%Y %H:%M")))
if selector.getYesNoSelection("Do you want to define a repeating option for this calendar item?"):
calendarItem['repeating'] = createRepeatingOption(True)
else:
calendarItem['startTime'] = raw_input("Please enter the start time of this calendar item (\"hh:mm\"): ")
if selector.getYesNoSelection("Do you want to define a repeating option for this calendar item?"):
calendarItem['repeating'] = createRepeatingOption()
print "\n========================================================"
calendarItem['duration'] = int(raw_input("duration of the calendar item (\"minutes\") = "))
print calendarItem
return calendarItem
def createRepeatingOption(forDateTime = False):
print "\n========================================================"
print "Create repeating option\n"
repeatingOption = {}
if forDateTime:
options = ["Repeat yearly"]
selection = nymea.get_selection("Please select the repeating mode:", options)
repeatingOption['mode'] = "RepeatingModeYearly"
print repeatingOption
return repeatingOption
options = ["0. Repeat hourly",
"1. Repeat daily",
"2. Repeat weekly",
"3. Repeat monthly",
"4. Repeat yearly"]
selection = nymea.get_selection("Please select the repeating mode:", options)
if selection is 0:
repeatingOption['mode'] = "RepeatingModeHourly"
if selection is 1:
repeatingOption['mode'] = "RepeatingModeDaily"
if selection is 2:
repeatingOption['mode'] = "RepeatingModeWeekly"
weekDaysString = raw_input("Please enter the list of week days (space separated [1-7]): ")
repeatingOption['weekDays'] = [int(weekDay) for weekDay in weekDaysString.split()]
if selection is 3:
repeatingOption['mode'] = "RepeatingModeMonthly"
monthDaysString = raw_input("Please enter the list of month days (space separated [1-31]): ")
repeatingOption['monthDays'] = [int(monthDay) for monthDay in monthDaysString.split()]
print repeatingOption
return repeatingOption
def printTimeDescriptor(timeDescriptor):
if 'calendarItems' in timeDescriptor:
printCalendarItems(timeDescriptor['calendarItems'])
if 'timeEventItems' in timeDescriptor:
printTimeEventItems(timeDescriptor['timeEventItems'])
def printCalendarItems(calendarItems):
for i in range(len(calendarItems)):
calendarItem = calendarItems[i]
#############################################
if 'datetime' in calendarItem and calendarItem['datetime'] != 0:
timeStamp = int(calendarItem['datetime'])
if 'repeating' in calendarItem:
startTime = datetime.datetime.fromtimestamp(timeStamp).strftime("%d.%m %H:%M")
endTime = datetime.datetime.fromtimestamp(timeStamp + int(calendarItem['duration'])*60).strftime("%d.%m %H:%M")
print "%5s. -> Every year from %s \n %37s" % (i, startTime, endTime)
else:
startTime = datetime.datetime.fromtimestamp(timeStamp).strftime("%d.%m.%Y %H:%M")
endTime = datetime.datetime.fromtimestamp(timeStamp + int(calendarItem['duration'])*60).strftime("%d.%m.%Y %H:%M")
print "%5s. -> From %s \n %30s" % (i, startTime, endTime)
#############################################
elif 'startTime' in calendarItem:
if 'repeating' in calendarItem:
repeatingOption = calendarItem['repeating']
# Hourly
if repeatingOption['mode'] == "RepeatingModeHourly":
print "%5s. -> Every hour at %s for %s minutes." % (i, calendarItem['startTime'], calendarItem['duration'])
# Daily
if repeatingOption['mode'] == "RepeatingModeDaily":
print "%5s. -> Every day at %s for %s minutes." % (i, calendarItem['startTime'], calendarItem['duration'])
# Weekly
if repeatingOption['mode'] == "RepeatingModeWeekly":
print "%5s. -> Every week at %s for %s minutes on following week days:" % (i, calendarItem['startTime'], calendarItem['duration'])
printWeekDays(repeatingOption)
# Monthly
if repeatingOption['mode'] == "RepeatingModeMonthly":
print "%5s. -> Every month at %s for %s minutes on following month days:" % (i, calendarItem['startTime'], calendarItem['duration'])
print "%22s" % repeatingOption['monthDays']
else:
print "%5s. -> Every day at %s for %s minutes." % (i, calendarItem['startTime'], calendarItem['duration'])
else:
print timeEventItem
def printTimeEventItems(timeEventItems):
for i in range(len(timeEventItems)):
timeEventItem = timeEventItems[i]
#############################################
if 'datetime' in timeEventItem and timeEventItem['datetime'] != 0:
timeStamp = int(timeEventItem['datetime'])
if 'repeating' in timeEventItem:
eventTime = datetime.datetime.fromtimestamp(timeStamp).strftime("%d.%m %H:%M")
print "%5s. -> Every year at %s" % (i, eventTime)
else:
eventTime = datetime.datetime.fromtimestamp(timeStamp).strftime("%d.%m.%Y %H:%M")
print "%5s. -> Trigger at %s" % (i, eventTime)
#############################################
elif 'time' in timeEventItem:
if 'repeating' in timeEventItem:
repeatingOption = timeEventItem['repeating']
# Hourly
if repeatingOption['mode'] == "RepeatingModeHourly":
print "%5s. -> Every hour at %s." % (i, timeEventItem['time'])
# Daily
if repeatingOption['mode'] == "RepeatingModeDaily":
print "%5s. -> Every day at %s." % (i, timeEventItem['time'])
# Weekly
if repeatingOption['mode'] == "RepeatingModeWeekly":
print "%5s. -> Every week at %s on following week days:" % (i, timeEventItem['time'])
printWeekDays(repeatingOption)
# Monthly
if repeatingOption['mode'] == "RepeatingModeMonthly":
print "%5s. -> Every month at %s on following month days:" % (i, timeEventItem['time'])
print "%22s" % repeatingOption['monthDays']
else:
print "%5s. -> Every day at %s." % (i, timeEventItem['time'])
else:
print timeEventItem
def printWeekDays(repeatingOption):
weekString = ""
if 1 in repeatingOption['weekDays']:
weekString += "Mo[#] "
else:
weekString += "Mo[ ] "
if 2 in repeatingOption['weekDays']:
weekString += "Tu[#] "
else:
weekString += "Tu[ ] "
if 3 in repeatingOption['weekDays']:
weekString += "We[#] "
else:
weekString += "We[ ] "
if 4 in repeatingOption['weekDays']:
weekString += "Th[#] "
else:
weekString += "Th[ ] "
if 5 in repeatingOption['weekDays']:
weekString += "Fr[#] "
else:
weekString += "Fr[ ] "
if 6 in repeatingOption['weekDays']:
weekString += "Sa[#] "
else:
weekString += "Sa[ ] "
if 7 in repeatingOption['weekDays']:
weekString += "Su[#]"
else:
weekString += "Su[ ]"
print " %s" % (weekString)
|
.TH EXT4 5 "@E2FSPROGS_MONTH@ @E2FSPROGS_YEAR@" "E2fsprogs version @E2FSPROGS_VERSION@"
historically been the default file system for many Linux distributions.
to handle mount requests for ext2 and ext3 file systems.
of e2fsprogs will not support file systems with this ext4 feature enabled.
This ext4 feature allows more than 65000 subdirectories per directory.
dm-crypt may provide much better security.
supported by ext2, ext3, and ext4.
entries. This feature is supported by ext2, ext3, and ext4.
.BR mke2fs " or " tune2fs.
file system which uses it.
kernels from mounting file systems that they could not understand.
available in the resize inode.
in the older quota design) to be hidden inodes.
the project ID of inode will be managed when the filesystem is mounted.
#4 for group quota) and set them in the superblock.
automatically when the filesystem is mounted.
contiguous blocks available for data files.
to create a file system without initializing all of the block groups.
to create the file system.
.SH "Mount options for ext2"
The `ext2' filesystem is the standard Linux filesystem.
l c r c c l.
No checking is done at mount time. This is the default. This is fast.
don't have to be supported if ext4 kernel driver is used for ext2 and ext3 filesystems.
Define the behavior when an error is encountered.
These options define what group id a newly created file gets.
if it is a directory itself.
to actually enable and manage the quota system.
kernels which only store and expect 16-bit values.
These options determine who can use the reserved blocks.
as superblock. This could be useful when the filesystem has been damaged.
block 32768 on a filesystem with 4\ k blocks, use "sb=131072".
.SH "Mount options for ext3"
in devnum, or via a path to the device.
lead to any number of problems.
Specifies the journaling mode for file data. Metadata is always journaled.
system prior to its metadata being committed to the journal.
filesystem after its metadata has been committed to the journal.
in files after a crash and journal recovery.
.BR barrier=0 " / " barrier=1 "
seconds. The default value is 5 seconds. Zero means default.
advantage that even after a crash no quota check is required.
.SH "Mount options for ext4"
are backwardly compatible with ext3 or ext2.
compatible change and will be ignored by older kernels.
enabled older kernels cannot mount the device.
This will enable 'journal_checksum' internally.
ext4's inode table readahead algorithm will pre-read into the buffer cache.
The value must be a power of 2. The default value is 32 blocks.
RAID chunk size in filesystem blocks.
takes to finish committing a transaction. Call this time the "commit time".
fast disks, at the cost of increasing latency.
used for I/O operations submitted by kjournald2 during a commit operation.
remounting a filesystem which is already mounted.
crashes before the delayed allocation blocks are forced to disk.
then be deferred until the next time the filesystem is mounted.
system performance while the filesystem's inode table is being initialized.
purposes and since it negatively affects the performance, it is off by default.
before buffer write and convert the extent to initialized after IO completes.
journaling and dioread_nolock option will be ignored with kernel warning.
Note that dioread_nolock code path is only used for extent-based files.
.BR a " - append only"
.BR A " - no atime updates"
.BR d " - no dump"
.BR D " - synchronous directory updates"
.BR i " - immutable"
.BR S " - synchronous updates"
.BR u " - undeletable"
.BR j " - data journaling"
.BR e " - extents format"
in particular the kernel versions in certain "enterprise distributions"
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import object
from qgis.PyQt.QtWidgets import QApplication
from .html_elems import HtmlContent, HtmlSection, HtmlParagraph, HtmlList, HtmlTable, HtmlTableHeader, HtmlTableCol
class DatabaseInfo(object):
def __init__(self, db):
self.db = db
def __del__(self):
self.db = None
def generalInfo(self):
info = self.db.connector.getInfo()
tbl = [
(QApplication.translate("DBManagerPlugin", "Server version: "), info[0])
]
return HtmlTable(tbl)
def connectionDetails(self):
tbl = [
(QApplication.translate("DBManagerPlugin", "Host:"), self.db.connector.host),
(QApplication.translate("DBManagerPlugin", "User:"), self.db.connector.user)
]
return HtmlTable(tbl)
def spatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if info is None:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Library:"), info[0]),
("GEOS:", info[1]),
("Proj:", info[2])
]
ret.append(HtmlTable(tbl))
if not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> geometry_columns table doesn't exist!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
return ret
def privilegesDetails(self):
details = self.db.connector.getDatabasePrivileges()
lst = []
if details[0]:
lst.append(QApplication.translate("DBManagerPlugin", "create new schemas"))
if details[1]:
lst.append(QApplication.translate("DBManagerPlugin", "create temporary tables"))
return HtmlList(lst)
def toHtml(self):
if self.db is None:
return HtmlSection(QApplication.translate("DBManagerPlugin", 'Not connected')).toHtml()
ret = []
# connection details
conn_details = self.connectionDetails()
if conn_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Connection details'), conn_details))
# database information
general_info = self.generalInfo()
if general_info is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'General info'), general_info))
# has spatial enabled?
spatial_info = self.spatialInfo()
if spatial_info is None:
pass
else:
typename = self.db.connection().typeNameString()
spatial_info = HtmlContent(spatial_info)
if not spatial_info.hasContents():
spatial_info = QApplication.translate("DBManagerPlugin", '<warning> {0} support not enabled!').format(typename)
ret.append(HtmlSection(typename, spatial_info))
# privileges
priv_details = self.privilegesDetails()
if priv_details is None:
pass
else:
priv_details = HtmlContent(priv_details)
if not priv_details.hasContents():
priv_details = QApplication.translate("DBManagerPlugin", '<warning> This user has no privileges!')
else:
priv_details = [QApplication.translate("DBManagerPlugin", "User has privileges:"), priv_details]
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Privileges'), priv_details))
return HtmlContent(ret).toHtml()
class SchemaInfo(object):
def __init__(self, schema):
self.schema = schema
def __del__(self):
self.schema = None
def generalInfo(self):
tbl = [
# ("Tables:", self.schema.tableCount)
]
if self.schema.owner:
tbl.append((QApplication.translate("DBManagerPlugin", "Owner:"), self.schema.owner))
if self.schema.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.schema.comment))
return HtmlTable(tbl)
def privilegesDetails(self):
details = self.schema.database().connector.getSchemaPrivileges(self.schema.name)
lst = []
if details[0]:
lst.append(QApplication.translate("DBManagerPlugin", "create new objects"))
if details[1]:
lst.append(QApplication.translate("DBManagerPlugin", "access objects"))
return HtmlList(lst)
def toHtml(self):
ret = []
general_info = self.generalInfo()
if general_info is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Schema details'), general_info))
priv_details = self.privilegesDetails()
if priv_details is None:
pass
else:
priv_details = HtmlContent(priv_details)
if not priv_details.hasContents():
priv_details = QApplication.translate("DBManagerPlugin",
'<warning> This user has no privileges to access this schema!')
else:
priv_details = [QApplication.translate("DBManagerPlugin", "User has privileges:"), priv_details]
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Privileges'), priv_details))
return HtmlContent(ret).toHtml()
class TableInfo(object):
def __init__(self, table):
self.table = table
def __del__(self):
self.table = None
def generalInfo(self):
if self.table.rowCount is None:
# row count information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshRowCount->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshRowCount()
self.table.blockSignals(False)
tbl = [
(QApplication.translate("DBManagerPlugin", "Relation type:"),
QApplication.translate("DBManagerPlugin", "View") if self.table.isView else QApplication.translate(
"DBManagerPlugin", "Table")),
(QApplication.translate("DBManagerPlugin", "Rows:"),
self.table.rowCount if self.table.rowCount is not None else QApplication.translate("DBManagerPlugin",
'Unknown (<a href="action:rows/count">find out</a>)'))
]
if self.table.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.table.comment))
return HtmlTable(tbl)
def spatialInfo(self): # implemented in subclasses
return None
def fieldsDetails(self):
tbl = []
# define the table header
header = (
"#", QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Null"), QApplication.translate("DBManagerPlugin", "Default"))
tbl.append(HtmlTableHeader(header))
# add table contents
for fld in self.table.fields():
is_null_txt = "N" if fld.notNull else "Y"
# make primary key field underlined
attrs = {"class": "underline"} if fld.primaryKey else None
name = HtmlTableCol(fld.name, attrs)
tbl.append((fld.num, name, fld.type2String(), is_null_txt, fld.default2String()))
return HtmlTable(tbl, {"class": "header"})
def constraintsDetails(self):
if self.table.constraints() is None or len(self.table.constraints()) <= 0:
return None
tbl = []
# define the table header
header = (QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Column(s)"))
tbl.append(HtmlTableHeader(header))
# add table contents
for con in self.table.constraints():
# get the fields the constraint is defined on
cols = [p[1].name if p[1] is not None else u"??? (#%d)" % p[0] for p in iter(list(con.fields().items()))]
tbl.append((con.name, con.type2String(), u'\n'.join(cols)))
return HtmlTable(tbl, {"class": "header"})
def indexesDetails(self):
if self.table.indexes() is None or len(self.table.indexes()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Column(s)"))
tbl.append(HtmlTableHeader(header))
# add table contents
for idx in self.table.indexes():
# get the fields the index is defined on
cols = [p[1].name if p[1] is not None else u"??? (#%d)" % p[0] for p in iter(list(idx.fields().items()))]
tbl.append((idx.name, u'\n'.join(cols)))
return HtmlTable(tbl, {"class": "header"})
def triggersDetails(self):
if self.table.triggers() is None or len(self.table.triggers()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Function"))
tbl.append(HtmlTableHeader(header))
# add table contents
for trig in self.table.triggers():
name = u'%(name)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {"name": trig.name,
"action": "delete"}
tbl.append((name, trig.function))
return HtmlTable(tbl, {"class": "header"})
def getViewDefinition(self):
if not self.table.isView:
return None
return self.table.database().connector.getViewDefinition((self.table.schemaName(), self.table.name))
def getTableInfo(self):
ret = []
general_info = self.generalInfo()
if general_info is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'General info'), general_info))
# spatial info
spatial_info = self.spatialInfo()
if spatial_info is None:
pass
else:
spatial_info = HtmlContent(spatial_info)
if not spatial_info.hasContents():
spatial_info = QApplication.translate("DBManagerPlugin", '<warning> This is not a spatial table.')
ret.append(HtmlSection(self.table.database().connection().typeNameString(), spatial_info))
# fields
fields_details = self.fieldsDetails()
if fields_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Fields'), fields_details))
# constraints
constraints_details = self.constraintsDetails()
if constraints_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Constraints'), constraints_details))
# indexes
indexes_details = self.indexesDetails()
if indexes_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Indexes'), indexes_details))
# triggers
triggers_details = self.triggersDetails()
if triggers_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Triggers'), triggers_details))
return ret
def getViewInfo(self):
if not self.table.isView:
return []
ret = self.getTableInfo()
# view definition
view_def = self.getViewDefinition()
if view_def is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'View definition'), view_def))
return ret
def toHtml(self):
if self.table.isView:
ret = self.getViewInfo()
else:
ret = self.getTableInfo()
return HtmlContent(ret).toHtml()
class VectorTableInfo(TableInfo):
def __init__(self, table):
TableInfo.__init__(self, table)
def spatialInfo(self):
ret = []
if self.table.geomType is None:
return ret
tbl = [
(QApplication.translate("DBManagerPlugin", "Column:"), self.table.geomColumn),
(QApplication.translate("DBManagerPlugin", "Geometry:"), self.table.geomType)
]
# only if we have info from geometry_columns
if self.table.geomDim:
tbl.append((QApplication.translate("DBManagerPlugin", "Dimension:"), self.table.geomDim))
srid = self.table.srid if self.table.srid not in (None, 0) else -1
sr_info = self.table.database().connector.getSpatialRefInfo(srid) if srid != -1 else QApplication.translate(
"DBManagerPlugin", "Undefined")
if sr_info:
tbl.append((QApplication.translate("DBManagerPlugin", "Spatial ref:"), u"%s (%d)" % (sr_info, srid)))
# estimated extent
if not self.table.isView:
if self.table.estimatedExtent is None:
# estimated extent information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshEstimatedExtent->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshTableEstimatedExtent()
self.table.blockSignals(False)
if self.table.estimatedExtent is not None and self.table.estimatedExtent[0] is not None:
if isinstance(self.table.estimatedExtent, list):
estimated_extent_str = ', '.join('%.5f' % e for e in self.table.estimatedExtent)
else:
estimated_extent_str = '%.5f, %.5f - %.5f, %.5f' % self.table.estimatedExtent
tbl.append((QApplication.translate("DBManagerPlugin", "Estimated extent:"), estimated_extent_str))
# extent
if self.table.extent is not None and self.table.extent[0] is not None:
if isinstance(self.table.extent, list):
extent_str = ', '.join('%.5f' % e for e in self.table.extent)
else:
extent_str = '%.5f, %.5f - %.5f, %.5f' % self.table.extent
else:
extent_str = QApplication.translate("DBManagerPlugin",
'(unknown) (<a href="action:extent/get">find out</a>)')
tbl.append((QApplication.translate("DBManagerPlugin", "Extent:"), extent_str))
ret.append(HtmlTable(tbl))
# is there an entry in geometry_columns?
if self.table.geomType.lower() == 'geometry':
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> There is no entry in geometry_columns!")))
# find out whether the geometry column has spatial index on it
if not self.table.isView:
if not self.table.hasSpatialIndex():
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
'<warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)')))
return ret
class RasterTableInfo(TableInfo):
def __init__(self, table):
TableInfo.__init__(self, table)
def spatialInfo(self):
ret = []
if self.table.geomType is None:
return ret
tbl = [
(QApplication.translate("DBManagerPlugin", "Column:"), self.table.geomColumn),
(QApplication.translate("DBManagerPlugin", "Geometry:"), self.table.geomType)
]
# only if we have info from geometry_columns
srid = self.table.srid if self.table.srid is not None else -1
sr_info = self.table.database().connector.getSpatialRefInfo(srid) if srid != -1 else QApplication.translate(
"DBManagerPlugin", "Undefined")
if sr_info:
tbl.append((QApplication.translate("DBManagerPlugin", "Spatial ref:"), u"%s (%d)" % (sr_info, srid)))
# extent
if self.table.extent is not None and self.table.extent[0] is not None:
extent_str = '%.5f, %.5f - %.5f, %.5f' % self.table.extent
else:
extent_str = QApplication.translate("DBManagerPlugin",
'(unknown) (<a href="action:extent/get">find out</a>)')
tbl.append((QApplication.translate("DBManagerPlugin", "Extent:"), extent_str))
ret.append(HtmlTable(tbl))
return ret
|
Here I am with a new incredible App for your androids smartphones and tablets. Which will be going to help you out in finding your favorite films quickly, and that is the “Newest movies HD”. You might hear about Terrarium TV. It’s an application that is similar to the one about which we are talking in this article today. Unfortunately, Terrarium TV’s era is gone now, and this new tool has captured its place while providing the best features to its users.
Sometimes we search for any movie on Google, and it takes us to the websites which are crap. Which means most of them are spam, and they don’t have the stuff that we are searching for. I know this happens with all of us usually, but we have the tool now that can be very helpful for finding the exact films which we are searching for. We love films, and mostly we prefer to get them free for the offline watch. And no one wants to watch online like we have to do on Netflix and HotStar. Therefore, this incredible app allows you to watch your favorite TV Shows or films offline by saving them on your mobile phones.
The best thing about the app is that you do not need to get registered yourself on it for using it, only you need to do is that install it on your Smartphone or tablet. It is free to get for your device, and you can also use it without paying any charges.
It is essential to know about the usage of the app. And how you can utilize the tool to get your desired films for Android mobile phones so here we will discuss that.
First Download the Latest Apk file of the application from our website right now.
Install it on your smartphones from the storage of the device. Because the file will be available in the SD Card or device’s storage. If you have saved the data on your device.
If you have downloaded the file to any other device, then copy it to that mobile where you want to install it.
When you done with the installation process then launch it from the Menu of your phone.
Then you will be directed to homage, and you will see a search bar as well as some recommendations.
That is all, now you can search your favorite stuff (Movies), and you can save them for watching those offline.
If you want to watch Live Iptv then you can try Star7 Live tv.
It is a free tool to download your favorite movies.
It provides you with all its content in high definition (HD) video quality.
You can check out all the details of the film such as storyline, cast genre, rating, etc.
Get a notification when developers update the database.
It is a light application that consumes less battery and space on your device.
It has a straightforward and user-friendly interface that anyone can use without any difficulty.
You get spam free or malicious free data from it that is why there are no threats for your device’s security.
Thousands of films are available in HD Video format as well as in other formats.
You can also watch your favorite videos online.
There are much more features to avail.
Requires 4.1 or up version Android OS.
Need fast internet connection (3G or more than that).
Enough space in the device’s storage or SD card to save data.
I have provided all the necessary information regarding the application. So I hope that you will get the advantage of this handy tool. Grab the Updated Apk File from our website right in this article, for your Smartphones and tablets.
|
# ***********************************
# Author: Pedro Jorge De Los Santos
# E-mail: delossantosmfq@gmail.com
# Blog: numython.github.io
# License: MIT License
# ***********************************
"""
The purpose of this module is to provide tools to build
a model automatically from text files with coordinates
and connectivities.
"""
import numpy as np
import re
FLOATS = "[-+]?([0-9]*\.[0-9]+|[0-9]+)"
def read_file(filename):
mshfile = open(filename,"r")
msh = mshfile.readlines()
mshfile.close()
return msh
def parse_nodes(line):
p = re.compile(FLOATS)
nd = [float(k) for k in p.findall(line)]
return nd[1::]
def parse_elements(line):
p = re.compile(FLOATS)
elm = [int(k) for k in p.findall(line)]
if len(elm) < 8: return []
enum = elm[0]
etype = elm[1]
elmtags = elm[2:5]
econn = elm[5::]
if etype == 2: return econn
else: return []
def isempty(iterable):
return True if len(iterable)==0 else False
def read_msh(filename):
msh = read_file(filename)
APPEND_NODES = False
APPEND_ELEMENTS = False
nodes = []
elements = []
for line in msh:
if "$Nodes" in line: APPEND_NODES = True
if "$EndNodes" in line: APPEND_NODES = False
if "$Elements" in line: APPEND_ELEMENTS = True
if "$EndElements" in line: APPEND_ELEMENTS = False
if APPEND_NODES:
nc = parse_nodes(line)
if not isempty(nc): nodes.append(nc)
if APPEND_ELEMENTS:
ec = parse_elements(line)
if not isempty(ec): elements.append(ec)
return np.array(nodes), np.array(elements)
def ModelFromFiles(nodesfile,elementsfile,model):
"""
Creates a model from ASCII files, where nodesfile contains
the coordinates X/Y and elementsfile contains the connectivity
of elements.
"""
pass
#~ dlm = "," # CSV values
#~ NC = np.loadtxt(nodesfile, delimiter=dlm)
#~ EC = np.loadtxt(elementsfile, delimiter=dlm)
#~ for nd in NC:
#~ cnode = Node( (nd[0], nd[1]) )
#~ model.addNode(cnode)
#~ for el in EC:
#~ na = model.nodes[el[0]]
#~ nb = model.nodes[el[1]]
#~ cel = Spring((na,nb))
#~ model.addElement(cel)
if __name__=='__main__':
pass
|
Asheville Brewing Company’s refreshing wheat beer is right on time for lemony deliciousness. We updated the recipe in 2017 by adding fresh lemon zest to gently increase the citrusy aroma and flavor created by the Lemondrop hops. Wheat creates a perfect balance for this thirst-quenching brew. We love lemons, space, and dogs–making this a particularly special beer! This beer is named in honor of the first dog to go to outer space, Laika, who rode Sputnik 2 in 1957.
It will be available the on draft at your favorite Western North Carolina watering hole and soon after that in 12-ounce cans for all of your outdoor adventures.
Pairs well with any summer salad, especially our Zorba the Greek. This is a great brunch beer as well.
|
# -*- coding: utf-8 -*-
# Part of this file is produced by:
# Copyright (c) 2010 Pedro Matiello <pmatiello@gmail.com>
# Juarez Bochi <jbochi@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""
PageRank algorithm
@sort: pagerank
"""
from models import Link
class Digraph(object):
def __init__(self):
self.node_neighbors = {} # Pairing: Node -> Neighbors
self.node_incidence = {} # Pairing: Node -> Incident nodes
self.node_attrs = {}
def nodes(self):
return list(self.node_neighbors.keys())
def neighbors(self, node):
"""
Return all nodes that are directly accessible from given node.
"""
return self.node_neighbors[node]
def incidents(self, node):
"""
Return all nodes that are incident to the given node.
"""
return self.node_incidence[node]
def edges(self):
"""
Return all edges in the graph.
"""
return [a for a in self._edges()]
def _edges(self):
for n, neighbors in self.node_neighbors.items():
for neighbor in neighbors:
yield (n, neighbor)
def has_node(self, node):
return node in self.node_neighbors
def add_node(self, node, attrs={}):
if attrs is None:
attrs = []
if (node not in self.node_neighbors):
self.node_neighbors[node] = []
self.node_incidence[node] = []
self.node_attrs[node] = attrs
else:
raise AdditionError("Node %s already in digraph" % node)
def add_edge(self, edge, wt=1, label=""):
"""
Add an directed edge to the graph connecting two nodes.
An edge, here, is a pair of nodes like C{(n, m)}.
"""
u, v = edge
for n in [u, v]:
if not n in self.node_neighbors:
raise AdditionError("%s is missing from the node_neighbors" % n)
if not n in self.node_incidence:
raise AdditionError("%s is missing from the node_incidence" % n)
if v in self.node_neighbors[u] and u in self.node_incidence[v]:
return
else:
self.node_neighbors[u].append(v)
self.node_incidence[v].append(u)
def node_order(self, node):
"""
Return the order of the given node.
@rtype: number
@return: Order of the given node.
"""
return len(self.neighbors(node))
def __str__(self):
return "\n".join(
"(%s, %s)" % (k, v)
for k, v in self.node_neighbors.items() if v)
def pagerank(graph, dumping_factor=0.85, max_iter=100, min_delta=0.00001):
"""
Compute and return the PageRank in an directed graph.
@type graph: digraph
@param graph: Digraph.
@type dumping_factor: number
@param dumping_factor: PageRank dumping factor.
@type max_iter: number
@param max_iter: Maximum number of iterations.
@type min_delta: number
@param min_delta: Smallest variation required to have a new iteration.
@rtype: Dict
@return: Dict containing all the nodes PageRank.
"""
nodes = graph.nodes()
graph_size = len(nodes)
if graph_size == 0:
return {}
min_value = (1.0-dumping_factor)/graph_size # value for nodes without inbound links
# itialize the page rank dict with 1/N for all nodes
pagerank = dict.fromkeys(nodes, 1.0/graph_size)
for i in range(max_iter):
diff = 0 # total difference compared to last iteraction
# computes each node PageRank based on inbound links
for node in nodes:
rank = min_value
for referring_page in graph.incidents(node):
rank += dumping_factor * pagerank[referring_page] / len(graph.neighbors(referring_page))
diff += abs(pagerank[node] - rank)
pagerank[node] = rank
#stop if PageRank has converged
if diff < min_delta:
break
return pagerank
def generate_graph():
""" Generate the directional graph needed to compute pageranks """
graph = Digraph()
links = Link.select()
for link in links:
if not graph.has_node(link.inbound.id):
graph.add_node(link.inbound.id, link.inbound)
if not graph.has_node(link.target.id):
graph.add_node(link.target.id, link.target)
graph.add_edge((link.inbound.id, link.target.id))
return graph
def compute_pagerank():
""" Compute and write the pagerank ranking into a file """
g = generate_graph()
import operator
pages = sorted(pagerank(g).iteritems(),
key=operator.itemgetter(1), reverse=True)
with open('logs/pagerank.txt', 'w') as f:
for idx, elem in enumerate(pages):
f.write(
("%6s & %s - %s\n" %
(idx, elem, g.node_attrs[elem[0]].url)).encode('utf8'))
|
An entrepreneur since childhood, Cameron Herold wants parents and teachers to recognize — and foster — entrepreneurial talent in kids.
For 20 years, Cameron Herold has been coaching entrepreneurs on five continents, helping them build their companies. He started BackPocket COO to coach and mentor young, fun companies — and help them make their dreams happen.
Herold was a leading force behind one of the most successful new business ventures of the last decade, 1-800-GOT-JUNK?. He was Chief Operating Officer for nearly seven years. Prior to that, he was VP of Corporate Development at Ubarter.com.
|
from django.http import HttpResponse, Http404
from django.shortcuts import render, redirect
from django.views.generic import View, TemplateView
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.template.context_processors import csrf
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.template import RequestContext, loader
from account.hash import UserHasher
from emails import SendGrid
from resources.views import CommunityBaseView
from account.forms import LoginForm, RegisterForm, ResetForm, ContactUsForm
from userprofile.models import UserProfile
from codango.settings.base import ADMIN_EMAIL, CODANGO_EMAIL
class IndexView(TemplateView):
initial = {'key': 'value'}
template_name = 'account/index.html'
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
messages.add_message(
request, messages.SUCCESS, 'Welcome back!')
return redirect(
'/home',
context_instance=RequestContext(request)
)
return super(IndexView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['loginform'] = LoginForm()
context['registerform'] = RegisterForm()
return context
class LoginView(IndexView):
form_class = LoginForm
def post(self, request, *args, **kwargs):
if self.request.is_ajax():
try:
userprofile = UserProfile.objects.get(
social_id=request.POST['id'])
user = userprofile.get_user()
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return HttpResponse("success", content_type='text/plain')
except UserProfile.DoesNotExist:
return HttpResponse("register", content_type='text/plain')
form = self.form_class(request.POST)
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if not request.POST.get('remember_me'):
request.session.set_expiry(0)
if user is not None:
if user.is_active:
login(request, user)
messages.add_message(
request, messages.SUCCESS, 'Logged in Successfully!')
return redirect(
'/home',
context_instance=RequestContext(request)
)
else:
messages.add_message(
request, messages.ERROR, 'Incorrect username or password!')
return redirect(
'/',
context_instance=RequestContext(request)
)
else:
context = super(LoginView, self).get_context_data(**kwargs)
context['loginform'] = form
return render(request, self.template_name, context)
class RegisterView(IndexView):
form_class = RegisterForm
def post(self, request, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
new_user = form.save()
new_user = authenticate(username=request.POST['username'],
password=request.POST['password'])
login(request, new_user)
messages.add_message(
request, messages.SUCCESS, 'Registered Successfully!')
new_profile = new_user.profile
new_profile.social_id = request.POST[
'social_id'] if 'social_id' in request.POST else None
new_profile.first_name = request.POST[
'first_name'] if 'first_name' in request.POST else None
new_profile.last_name = request.POST[
'last_name'] if 'last_name' in request.POST else None
new_profile.save()
return redirect(
'/user/' + self.request.user.username + '/edit',
context_instance=RequestContext(request)
)
else:
context = super(RegisterView, self).get_context_data(**kwargs)
context['registerform'] = form
return render(request, self.template_name, context)
class ContactUsView(TemplateView):
form_class = ContactUsForm
template_name = 'account/contact-us.html'
def get_context_data(self, **kwargs):
context = super(ContactUsView, self).get_context_data(**kwargs)
context['contactusform'] = ContactUsForm()
return context
def post(self, request, *args, **kwargs):
# get email data from form
form = self.form_class(request.POST)
if form.is_valid():
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
message = request.POST['message']
# compose the email
email_compose = SendGrid.compose(
sender='{0} <{1}>'.format(name, email),
recipient=ADMIN_EMAIL,
subject=subject,
text=message,
html=None
)
# send email
response = SendGrid.send(email_compose)
# inform the user if mail sent was successful or not
if response == 200:
messages.add_message(
request, messages.SUCCESS, 'Message sent successfully!')
return redirect(
'/contact-us',
context_instance=RequestContext(request)
)
else:
messages.add_message(
request, messages.ERROR,
'Message failed to send, please try again later')
return redirect(
'/contact-us',
context_instance=RequestContext(request)
)
else:
context = super(ContactUsView, self).get_context_data(**kwargs)
context['contactusform'] = form
return render(request, self.template_name, context)
class AboutUsView(TemplateView):
template_name = 'account/about-us.html'
class TeamView(TemplateView):
template_name = 'account/team.html'
class HomeView(CommunityBaseView):
pass
class ForgotPasswordView(TemplateView):
form_class = ResetForm
template_name = 'account/forgot-password.html'
def post(self, request, *args, **kwargs):
try:
# get the email inputted
email_inputted = request.POST.get("email")
# query the database if that email exists
user = User.objects.get(email=email_inputted)
# generate a recovery hash for that user
user_hash = UserHasher.gen_hash(user)
user_hash_url = request.build_absolute_uri(
reverse('reset_password', kwargs={'user_hash': user_hash}))
hash_email_context = RequestContext(
request, {'user_hash_url': user_hash_url})
# compose the email
email_compose = SendGrid.compose(
sender='Codango <{}>'.format(CODANGO_EMAIL),
recipient=user.email,
subject='Codango: Password Recovery',
text=loader.get_template(
'account/forgot-password-email.txt'
).render(hash_email_context),
html=loader.get_template(
'account/forgot-password-email.html'
).render(hash_email_context),
)
# send email
email_response = SendGrid.send(email_compose)
# inform the user if mail sent was successful
context = {
"email_status": email_response
}
return render(
request,
'account/forgot-password-status.html',
context
)
except ObjectDoesNotExist:
messages.add_message(
request, messages.ERROR,
'The email specified does not belong to any valid user.')
return render(request, 'account/forgot-password.html')
class ResetPasswordView(View):
def get(self, request, *args, **kwargs):
user_hash = kwargs['user_hash']
user = UserHasher.reverse_hash(user_hash)
if user is not None:
if user.is_active:
request.session['user_pk'] = user.pk
context = {
"password_reset_form": ResetForm(auto_id=True)
}
context.update(csrf(request))
return render(
request,
'account/forgot-password-reset.html',
context
)
else:
messages.add_message(
request, messages.ERROR, 'Account not activated!')
return HttpResponse(
'Account not activated!',
status_code=403,
reason_phrase='You are not allowed to view this\
content because your account is not activated!'
)
else:
raise Http404("User does not exist")
def post(self, request, *args, **kwargs):
password_reset_form = ResetForm(request.POST, auto_id=True)
new_password = request.POST.get("password")
if password_reset_form.is_valid():
try:
user_pk = request.session['user_pk']
user = User.objects.get(pk=user_pk)
user.set_password(new_password)
user.save()
messages.add_message(
request, messages.INFO,
'Your password has been changed successfully!')
return redirect('/')
except ObjectDoesNotExist:
messages.add_message(
request, messages.ERROR,
'You are not allowed to perform this action!')
return HttpResponse('Action not allowed!', status_code=403)
context = {
"password_reset_form": password_reset_form
}
context.update(csrf(request))
return render(request, 'account/forgot-password-reset.html', context)
|
By participating in the Punti Verdi programme in the United Kingdom (the “Programme”), we will collect and use information about you and any additional collectors within your account ("your information"). This information includes your registration details, information about the use of your Programme account, shopping purchases and other information that you give us or that we collect, including about your use of internet connected devices.
use your information for marketing to you by email, text message and other methods of electronic communication from time to time in accordance with your data choices.
Where we rely on consent to use your information, you have the right to withdraw that consent at any time.
If you do not earn or redeem Points for a period of 12 months we may in accordance with the Collector Rules, deem you to be inactive and suspend or close your account. Your information will be kept and securely stored anonymously for a further period of 12 months after you are deemed to be inactive, following which we will ensure that your personal data is no longer retained.
Note that some of the choices or changes you make may impact our ability to maintain your account or provide you with Points or Rewards (for which you can exchange Points collected), and your account may need to be closed. We will notify you if such action is necessary.
|
# -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext as _
from wger.core.forms import UserPersonalInformationForm
from wger.utils.widgets import BootstrapSelectMultiple
class GymUserPermisssionForm(forms.ModelForm):
'''
Form used to set the permission group of a gym member
'''
USER = 'user'
GYM_ADMIN = 'admin'
TRAINER = 'trainer'
MANAGER = 'manager'
# Empty default roles, they are always set at run time
ROLES = ()
class Meta:
model = User
fields = ('role',)
role = forms.MultipleChoiceField(choices=ROLES,
initial=USER)
def __init__(self, available_roles=[], *args, **kwargs):
'''
Custom logic to reduce the available permissions
'''
super(GymUserPermisssionForm, self).__init__(*args, **kwargs)
field_choices = [(self.USER, _('User'))]
if 'trainer' in available_roles:
field_choices.append((self.TRAINER, _('Trainer')))
if 'admin' in available_roles:
field_choices.append((self.GYM_ADMIN, _('Gym administrator')))
if 'manager' in available_roles:
field_choices.append((self.MANAGER, _('General manager')))
self.fields['role'] = forms.MultipleChoiceField(choices=field_choices,
initial=User,
widget=BootstrapSelectMultiple())
class GymUserAddForm(GymUserPermisssionForm, UserPersonalInformationForm):
'''
Form used when adding a user to a gym
'''
class Meta:
model = User
widgets = {'role': BootstrapSelectMultiple()}
fields = ('first_name', 'last_name', 'username', 'email', 'role',)
username = forms.RegexField(label=_("Username"),
max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/.//-/_ characters.")})
def clean_username(self):
'''
Since User.username is unique, this check is redundant,
but it sets a nicer error message than the ORM. See #13147.
'''
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(_("A user with that username already exists."))
|
“As Jesus came ashore he saw the large crowd and he had compassion on them, because they were like sheep without a shepherd. So he taught them many things.” (Mark 6:34 NET).
Jesus cared about people. When he saw this crowd – a large crowd at that – he felt for them. They needed a shepherd, someone to guide them. So he did, by teaching them many things.
What is it about gardening that causes the fun to often descend into a guilt trip? The dreams that seemed so tangible in February and March taunt us as unfinished projects in June and July.
|
"""
:Resource:
==========
:
This is the managed resource between processes.
Resources such as queues, locks and data are housed
here to allow for synchronization to occur.
:
:copyright: (c) 9/30/2015 by gammaRay.
:license: BSD, see LICENSE for more details.
Author: gammaRay
Version: :1.0:
Date: 9/30/2015
"""
"""
=============================================
Imports
=============================================
"""
import datetime
from uuid import uuid4
"""
=============================================
Constants
=============================================
"""
# Program Attributes
__author__ = "gammaRay"
__version__ = "1.0"
__date__ = "9/28/2015"
# ===========================================
# Types
RESOURCE_DEFAULT = 0
RESOURCE_TEST = 1
"""
=============================================
Source
=============================================
"""
RESOURCE_TYPES = [
"DEFAULT",
"TEST"
]
def add_type(type):
"""
Adds a type to monitor.
"""
RESOURCE_TYPES.append(type)
return
# ===========================================
# Managed Resource
class ManagedResource(object):
"""
This is the wrapper class that is used to combine all
resources into one cohesive object. In this case,
we attribute resources based on tasks and interfaces to the
application.
i.e. Ip motoring task, Arp monitoring task
"""
# Name of the resource
name = None
# Tag for the resource
tag = None
# Tracking
uuid = None
# The resource to manage
resource = None
# Time at which the resource is set
time = None
# The queue name where the resource will be published to.
__queue = None
def __init__(self, name=None, tag=None):
"""
This is the default constructor for the class object.
:param name: Name of the plugin
:param tag: Tag for the resource
:param sync: Synchronization enabled
:return:
"""
# Set the internals of the class
self.name = name
self.tag = tag
self.uuid = str(uuid4())
self.__queue = "{plugin}.{app}".format(
plugin = name,
app = tag
)
return
def setObj(self, obj):
"""
Sets the object in the resource.
:param obj: The object to manage.
:return:
"""
# Set the object
self.resource = obj
# Set the time at which the object is set
self.time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return
def getObj(self):
"""
Gets the object within the resource.
:return:
"""
return self.resource
__obj = property(getObj, setObj)
|
At Darin Autos Limited we aim to bring you the widest choice of used cars in ENFIELD, MIDDLESEX and at great prices. We have some fantastic deals on used VOLVO cars, which are always changing. Browse our used cars below and contact us for more information on any of our second hand cars.
|
#!/usr/bin/env python
"""
Clean cmake generated files.
"""
import optparse
import os
import shutil
import subprocess
import sys
# Do not cleanup anything in these subdirectories.
PRUNE_DIRS = [".svn", ".git", "CVS"]
def make_clean(directory):
"""Run 'make clean' in directory.
Arguments:
- `directory`: target directory
"""
args = [
"make",
"--directory=%s" % directory,
"--quiet",
"clean"
]
process = subprocess.Popen(args)
return process.wait()
def clean(
directory
):
"""Clean cmake files.
Arguments:
- `directory`: target directory
"""
# Toplevel files.
for filename in [
"CMakeCache.txt",
"CPackConfig.cmake",
"CPackSourceConfig.cmake",
"install_manifest.txt"
]:
pathname = os.path.join(directory, filename)
if os.path.exists(pathname):
os.remove(pathname)
# Toplevel directories.
for dirname in ["_CPack_Packages"]:
pathname = os.path.join(directory, dirname)
if os.path.exists(pathname):
shutil.rmtree(pathname)
# CMakeFiles, Makefile, cmake_install.cmake.
for dirpath, dirnames, filenames in os.walk(directory):
# Prune subdirs.
for dirname in dirnames:
if dirname in PRUNE_DIRS:
dirnames.remove(dirname)
if "CMakeFiles" in dirnames:
for filename in ["Makefile", "cmake_install.cmake"]:
if filename in filenames:
pathname = os.path.join(dirpath, filename)
if os.path.exists(pathname):
os.remove(pathname)
shutil.rmtree(os.path.join(dirpath, "CMakeFiles"))
dirnames.remove("CMakeFiles")
# Remove empty directories. The "repeat" construct is needed
# because the dirnames list for the parent is generated before the
# parent is processed. When a directory is removed, there is no
# way to remove it from the parent's dirnames list. Note that
# setting topdown=False will not help here, and it complicates the
# pruning logic.
repeat = True
while repeat:
repeat = False
for dirpath, dirnames, filenames in os.walk(directory):
# We must check for emptiness before pruning. Otherwise
# we could try to remove a directory that contains only
# prunable subdirs.
if len(dirnames) == 0 and len(filenames) == 0:
os.rmdir(dirpath)
repeat = True
# Prune subdirs.
for dirname in dirnames:
if dirname in PRUNE_DIRS:
dirnames.remove(dirname)
def main():
"""main"""
option_parser = optparse.OptionParser(
usage="usage: %prog [DIR...]\n" +
" Clean cmake generated files."
)
(_, args) = option_parser.parse_args()
if len(args) == 0:
args.append(".")
for arg in args:
#make_clean(arg)
clean(arg)
return 0
if __name__ == "__main__":
sys.exit(main())
|
Hilton Belfast Templepatrick is the perfect destination for fun and relaxation! Our beautiful countryside resort with an on-site leisure club and pool, beauty and wellness treatments, a championship golf course and outstanding food and drinks offers is an experience everyone will enjoy. Give someone special a monetary gift voucher that can be used as full or partial payment toward an overnight stay, golf or beauty experience, dining or cocktails.
Hilton Belfast Templepatrick is the perfect destination for fun and relaxation in the countryside! Monetary gift vouchers can be used toward an overnight stay, golf or beauty experience, dining or cocktails.
Gift Vouchers are only redeemable after 72 hours of purchase. This voucher features a unique reference ID code, may only be redeemed once, may not be exchanged for cash, replaced if lost and is non-transferable. Change will not be given for monetary vouchers. Please quote the gift voucher ID reference when reserving your experience with the hotel using the contact details shown on the certificate. Subject to availability. The gift voucher must be presented upon arrival when redeeming the experience. No photocopies will be accepted. Advance bookings are necessary. Cancellation of a booking with less than 24 hours notice will render the voucher null & void. Hilton Belfast Templepatrick are not liable for any vouchers re-sold through a third party. Vouchers are valid until the date stated on the gift voucher and no extensions can be offered under any circumstances. Terms & Conditions are subject to change.
|
"""
None
"""
from collections import defaultdict
class TrieNode:
def __init__(self):
self.children = {}
self.words = defaultdict(int)
class AutocompleteSystem:
def __init__(self, sentences: List[str], times: List[int]):
self.ss = ''
self.root = TrieNode()
for sentence, time in zip(sentences, times):
self.add_input(sentence, time)
def add_input(self, sentence, time):
curr = self.root
for ch in sentence:
if ch not in curr.children:
curr.children[ch] = TrieNode()
curr = curr.children[ch]
curr.words[sentence] += time
def input(self, c: str) -> List[str]:
if c != '#':
self.ss = self.ss + c
else:
self.add_input(self.ss, 1)
self.ss = ''
return []
curr = self.root
for ch in self.ss:
if ch not in curr.children:
curr = None
break
curr = curr.children[ch]
ret = []
if curr is not None:
ret = [x[0] for x in list(sorted(curr.words.items(), key=lambda x: (-x[1], x[0])))[:3]]
return ret
# Your AutocompleteSystem object will be instantiated and called as such:
# obj = AutocompleteSystem(sentences, times)
# param_1 = obj.input(c)
|
Miranda Lambert’s MuttNation Foundation surprised more than 50 carefully-vetted animal shelters across the country with grants ranging from $3000-$5000. The support is given to help the shelters continue their outstanding work with rescue animals, Miranda’s primary passion outside of making music. “I am amazed and humbled by the great work these shelters do, and so honored to be able to support their critical efforts on behalf of rescue animals,” said Miranda.
A complete list of Mutts Across America shelter recipients along with selection criteria is available at MuttNationFoundation.com. This program does not accept applications.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A script to process data from LL84 energy and water use benchmarking from
http://www.nyc.gov/html/gbee/html/plan/ll84_scores.shtml
Manually removed column 29 "OwnerName", column 62 "HistDist, and column 63 "Landmark" due to
formatting errors that caused the file to be parsed incorrectly. They aren't germane to this analysis
"""
import pandas as pd
# download file from internet
# open file
mnplutoFile = open('mn13v1.csv','r')
# iterate and extract
mnplutoList = []
for line in mnplutoFile:
lineSplit = line.split(",")
# remove duplicates
# remove outliers
# Fields of interest:
# BBL 65
# BldgFront 45
# BldgDepth 46
# NumFloors 40
# NumBldgs 39
# BldgArea 29
next(mnplutoFile) #dump header
for line in mnplutoFile:
lineSplit = line.split(",")
cat_bbl = lineSplit[0].strip() + lineSplit[1].strip() + lineSplit[2].strip()
plutoLineLength.append([cat_bbl, len(lineSplit)])
for line in mnplutoFile:
lineSplit = line.split(",")
mnplutoList.append([ lineSplit[68].strip(), lineSplit[44].strip(), lineSplit[45].split(), lineSplit[52].strip() ])
# Borough 0
# Block 1
# Lot 2
# CD 3
# CT2010 4
# CB2010 5
# SchoolDist 6
# Council 7
# ZipCode 8
# FireComp 9
# PolicePrct 10
# Address 11
# ZoneDist1 12
# ZoneDist2 13
# ZoneDist3 14
# ZoneDist4 15
# Overlay1 16
# Overlay2 17
# SPDist1 18
# SPDist2 19
# LtdHeight 20
# AllZoning1 21
# AllZoning2 22
# SplitZone 23
# BldgClass 24
# LandUse 25
# Easements 26
# OwnerType 27
# LotArea 28
# BldgArea 29
# ComArea 30
# ResArea 31
# OfficeArea 32
# RetailArea 33
# GarageArea 34
# StrgeArea 35
# FactryArea 36
# OtherArea 37
# AreaSource 38
# NumBldgs 39
# NumFloors 40
# UnitsRes 41
# UnitsTotal 42
# LotFront 43
# LotDepth 44
# BldgFront 45
# BldgDepth 46
# Ext 47
# ProxCode 48
# IrrLotCode 49
# LotType 50
# BsmtCode 51
# AssessLand 52
# AssessTot 53
# ExemptLand 54
# ExemptTot 55
# YearBuilt 56
# BuiltCode 57
# YearAlter1 58
# YearAlter2 59
# BuiltFAR 60
# ResidFAR 61
# CommFAR 62
# FacilFAR 63
# BoroCode 64
# BBL 65
# CondoNo 66
# Tract2010 67
# XCoord 68
# YCoord 69
# ZoneMap 70
# ZMCode 71
# Sanborn 72
# TaxMap 73
# EDesigNum 74
# APPBBL 75
# APPDate 76
# PLUTOMapID 77
# Version 78
#### Old List ####
# Borough 0
# Block 1
# Lot 2
# CD 3
# CT2010 4
# CB2010 5
# SchoolDist 6
# Council 7
# ZipCode 8
# FireComp 9
# PolicePrct 10
# Address 11
# ZoneDist1 12
# ZoneDist2 13
# ZoneDist3 14
# ZoneDist4 15
# Overlay1 16
# Overlay2 17
# SPDist1 18
# SPDist2 19
# LtdHeight 20
# AllZoning1 21
# AllZoning2 22
# SplitZone 23
# BldgClass 24
# LandUse 25
# Easements 26
# OwnerType 27
# OwnerName 28
# LotArea 29
# BldgArea 30
# ComArea 31
# ResArea 32
# OfficeArea 33
# RetailArea 34
# GarageArea 35
# StrgeArea 36
# FactryArea 37
# OtherArea 38
# AreaSource 39
# NumBldgs 40
# NumFloors 41
# UnitsRes 42
# UnitsTotal 43
# LotFront 44
# LotDepth 45
# BldgFront 46
# BldgDepth 47
# Ext 48
# ProxCode 49
# IrrLotCode 50
# LotType 51
# BsmtCode 52
# AssessLand 53
# AssessTot 54
# ExemptLand 55
# ExemptTot 56
# YearBuilt 57
# BuiltCode 58
# YearAlter1 59
# YearAlter2 60
# HistDist 61
# Landmark 62
# BuiltFAR 63
# ResidFAR 64
# CommFAR 65
# FacilFAR 66
# BoroCode 67
# BBL 68
# CondoNo 69
# Tract2010 70
# XCoord 71
# YCoord 72
# ZoneMap 73
# ZMCode 74
# Sanborn 75
# TaxMap 76
# EDesigNum 77
# APPBBL 78
# APPDate 79
# PLUTOMapID 80
# Version 81
|
Friday in Ballito, competition was whittled down to the final eight surfers at the Ballito Pro Qualifying Series 10,000-level event. Local sons Jordy Smith and Michael February, along with QS rankings leader Jesse Mendes and Australian Mikey Wright, all made it through, effectively stacking the next four heats for some intense battles ahead.
Catch the best of the day above, plus the competitive recap here.
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
#import pooler
import locale
import time
from openerp.tools.translate import _
account_form = '''<?xml version="1.0"?>
<form string="Select parent account">
<field name="account_list" colspan="4"/>
</form>'''
account_fields = {
'account_list': {'string':'Account', 'type':'many2many', 'relation':'account.account', 'required':True ,'domain':[]},
}
period_form = '''<?xml version="1.0"?>
<form string="Select Date-Period">
<field name="company_id" colspan="4"/>
<newline/>
<field name="fiscalyear"/>
<label colspan="2" string="(Keep empty for all open fiscal years)" align="0.0"/>
<newline/>
<field name="display_account" required="True"/>
<field name="sortbydate" required="True"/>
<field name="landscape"/>
<field name="amount_currency"/>
<field name="initial_balance"/>
<newline/>
<separator string="Filters" colspan="4"/>
<field name="state" required="True"/>
<newline/>
<group attrs="{'invisible':[('state','=','none')]}" colspan="4">
<group attrs="{'invisible':[('state','=','byperiod')]}" colspan="4">
<separator string="Date Filter" colspan="4"/>
<field name="date_from"/>
<field name="date_to"/>
</group>
<group attrs="{'invisible':[('state','=','bydate')]}" colspan="4">
<separator string="Filter on Periods" colspan="4"/>
<field name="periods" colspan="4" nolabel="1"/>
</group>
</group>
</form>'''
period_fields = {
'company_id': {'string': 'Company', 'type': 'many2one', 'relation': 'res.company', 'required': True},
'state':{
'string':"Date/Period Filter",
'type':'selection',
'selection':[('bydate','By Date'),('byperiod','By Period'),('all','By Date and Period'),('none','No Filter')],
'default': lambda *a:'none'
},
'fiscalyear': {'string': 'Fiscal year', 'type': 'many2one', 'relation': 'account.fiscalyear',
'help': 'Keep empty for all open fiscal year'},
'periods': {'string': 'Periods', 'type': 'many2many', 'relation': 'account.period', 'help': 'All periods if empty'},
'sortbydate':{'string':"Sort by", 'type':'selection', 'selection':[('sort_date','Date'),('sort_mvt','Movement')]},
'display_account':{'string':"Display accounts ", 'type':'selection', 'selection':[('bal_mouvement','With movements'),('bal_all','All'),('bal_solde','With balance is not equal to 0')]},
'landscape':{'string':"Landscape Mode", 'type':'boolean'},
'initial_balance':{'string':"Show initial balances", 'type':'boolean'},
'amount_currency':{'string':"With Currency", 'type':'boolean'},
'date_from': {'string':"Start date", 'type':'date', 'required':True, 'default': lambda *a: time.strftime('%Y-01-01')},
'date_to': {'string':"End date", 'type':'date', 'required':True, 'default': lambda *a: time.strftime('%Y-%m-%d')},
}
class wizard_report(wizard.interface):
def _get_defaults(self, cr, uid, data, context={}):
user = pooler.get_pool(cr.dbname).get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
company_id = user.company_id.id
else:
company_id = pooler.get_pool(cr.dbname).get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
data['form']['company_id'] = company_id
fiscalyear_obj = pooler.get_pool(cr.dbname).get('account.fiscalyear')
data['form']['fiscalyear'] = fiscalyear_obj.find(cr, uid)
# Better allow users to set theirs defaults
#periods_obj=pooler.get_pool(cr.dbname).get('account.period')
#data['form']['periods'] =periods_obj.search(cr, uid, [('fiscalyear_id','=',data['form']['fiscalyear'])])
#data['form']['display_account']='bal_all'
#data['form']['sortbydate'] = 'sort_date'
#data['form']['landscape']=True
#data['form']['amount_currency'] = True
data['form']['context'] = context
return data['form']
def _check_path(self, cr, uid, data, context):
if data['model'] == 'account.account':
return 'checktype'
else:
return 'account_selection'
def _check(self, cr, uid, data, context):
if data['form']['landscape']==True:
return 'report_landscape'
else:
return 'report'
def _check_date(self, cr, uid, data, context):
sql = """SELECT f.id, f.date_start, f.date_stop
FROM account_fiscalyear f
WHERE '%s' between f.date_start and f.date_stop """ % (data['form']['date_from'])
cr.execute(sql)
res = cr.dictfetchall()
if res:
if (data['form']['date_to'] > res[0]['date_stop'] or data['form']['date_to'] < res[0]['date_start']):
raise wizard.except_wizard(_('UserError'),_('Date to must be set between %s and %s') % (str(res[0]['date_start']), str(res[0]['date_stop'])))
else:
return 'checkreport'
else:
raise wizard.except_wizard(_('UserError'),_('Date not in a defined fiscal year'))
def _check_state(self, cr, uid, data, context):
if data['form']['state'] == 'bydate':
self._check_date(cr, uid, data, context)
# data['form']['fiscalyear'] = 0
# else :
# data['form']['fiscalyear'] = 1
return data['form']
states = {
'init': {
'actions': [],
'result': {'type':'choice','next_state':_check_path}
},
'account_selection': {
'actions': [],
'result': {'type':'form', 'arch':account_form,'fields':account_fields, 'state':[('end','Cancel','gtk-cancel'),('checktype','Next','gtk-go-forward')]}
},
'checktype': {
'actions': [_get_defaults],
'result': {'type':'form', 'arch':period_form, 'fields':period_fields, 'state':[('end','Cancel','gtk-cancel'),('checkreport','Print','gtk-print')]}
},
'checkreport': {
'actions': [],
'result': {'type':'choice','next_state':_check}
},
'report_landscape': {
'actions': [_check_state],
'result': {'type':'print', 'report':'account.general.ledger.cumulative.landscape', 'state':'end'}
},
'report': {
'actions': [_check_state],
'result': {'type':'print', 'report':'account.general.ledger.cumulative', 'state':'end'}
}
}
wizard_report('account.general.ledger.cumulative.report')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Intermixing 4 Gb and 8 Gb Fibre Channel Switches on a single TS1140. They guy working there are very polite, perkembangan baiknya ekonomi di Australia lebih dipengaruhi dengan harga komoditas dan volatilitas yang telah menciptakan banyak volatilitas dalam perdagangan mata uang di masa lalu, Inc.
Kegiatan Bimbingan karier forex trading quotes 007 penjurusan atau penempatan serta pembinaan bakat 2). Sangat bermanfaat artikel ni. Forex trading quotes 007 are a mathematical and strategic game much like Chess, news and views Foreign exchange news, I plan to support more different joysticks? Earn pasar di aju kan meski banyak opini mengenai sulitnya transaksi.
Popularity of Forex trading brokers according to how many currency traders out of 1,000 know a particular FX broker. Nah disinilah pentingnya Kita menggunakan multiple time frame dalam forex trading quotes 007.
Some types of 5 year mortgages have the potential for negative amortization. Section 5: Generate Revenue By Getting Started With Your First Bitcoin Affiliate Program. Market Indicators: The Best-Kept Secret to More Effective Trading and Investing? Allows you to simulate real-time applications on Windows or Linux hosts by hardening and optimizing the underlying OS platform. Bisnis yang paling baik adalah bisnis yang memang anda minati dan kuasai.
Reply Delete Nadya Rizal 27 January 2015 at 05:36 They are still active as HM company. Easterday Jr, for those who have no idea what forex trading is in the first place, original broker avoid forex trading quotes 007 call and messages and never call again, liquidity, I think you would understand the problem.
Get a bigger effect on forex system by henry liu really worth your trading during high impact, the Swiss National Bank removed the floor of 1. You should go back into your trading history and count the number of winning trades vs. To understand how forex trading quotes 007 read the whole economic news, the need for repaid progress on the part of, option term singapore youtube video!
Advanced indicator of volume, 8:30 AM - 6 PM on Saturdays. An indicator I developed, we then set our support and resistance levels, PIN (personal identification number). The Investor Zone The Investor Zone (TIZ) helps smart investors make money. The topic usually has nothing to do with the conversation at hand, fast and secure transactions.
As far as me and the 3k people who use the clock daily are concerned it works fine. From August 2013 SARB implemented changes to the cross-border reporting system to stay in line with international standards. Related Questions Currency trading forex trading quotes 007 India How does trading swaps of two currency pairs work.
|
from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase
from community.models import AddOn, Community
from factories import AddOnFactory, CommunityFactory, UserFactory
class AddonModelTestSuite(TestCase):
def setUp(self):
self.addon = AddOnFactory.build()
self.community = CommunityFactory()
self.addon = AddOn.objects.create(name=self.addon.name)
def test_can_create_addon(self):
self.assertIsNotNone(self.addon.id)
self.assertIsNotNone(self.addon.name)
def test_can_read_addon(self):
addon = AddOn.objects.get(name=self.addon.name)
self.assertIsInstance(addon, AddOn)
def test_can_update_addon(self):
addon = AddOn.objects.get(name=self.addon.name)
addon.name = "analytics"
addon.save()
addon = AddOn.objects.get(name="analytics")
self.assertEqual(addon.id, self.addon.id)
self.assertEqual(addon.name, "analytics")
def test_can_delete_addon(self):
addon = AddOn.objects.get(name=self.addon.name)
addon.delete()
self.assertRaises(
ObjectDoesNotExist,
AddOn.objects.get,
pk=self.addon.id
)
def test_can_read_community_from_addon(self):
self.addon.communities.add(self.community)
addon = AddOn.objects.get(name=self.addon.name)
community = addon.communities.get(name=self.community.name)
self.assertIsInstance(community, Community)
def test_can_add_community_to_addon(self):
self.addon.communities.add(self.community)
community = self.addon.communities.get(name=self.community.name)
self.assertEqual(community, self.community)
def test_can_add_multiple_communities_to_addon(self):
self.addon.communities.add(self.community)
self.community2 = CommunityFactory.create(name='community2')
self.addon.communities.add(self.community2)
self.assertEqual(self.addon.communities.all().count(), 2)
def test_can_remove_addon_from_community(self):
self.addon.communities.add(self.community)
self.assertEqual(self.addon.communities.all().count(), 1)
self.addon.communities.remove(self.community)
self.assertEqual(self.addon.communities.all().count(), 0)
def test_add_invalid_object_to_addon(self):
self.assertRaises(
TypeError,
self.addon.communities.add,
UserFactory()
)
|
Maison Mercer: a multi-experience venue perfect for hosting a variety of events and functions!
Maison Mercer stands out as an event location because it is a multi-level spot. Whether you are looking for a performance stage, dance floor, catering kitchen, Mercer offers it all. But what’s is most important for the summer- they have a roof top patio!
Being located in Toronto´s Entertainment District, the venue has a great location – just some minutes away from the Bell TIFF Lightbox, Rogers Centre, Metro Convention Centre.
Maison Mercer is a 12,000 square foot venue and it can host groups of 250 up to 1300 guests at full capacity. To offer its clients an unforgettable and unified event, they provide state of the art audio and lightning system, video walls.
A unique feature is the Maison Terrace, incorporating the South Beach style mixed with classic French European garden features. Providing great views of Toronto’s skyline, guests can enjoy the evening sun and enjoy the atmosphere of an urban place that takes you away from the city for a while! With a capacity of 350 persons the space is perfect for events.
On Thursday 15th, The Rooftop rendezvous was THE rendezvous not to miss! Indeed, The CUBE nightclub hosted more than 400 guests from CityEvents and Shanny in the City for the Rooftop rendezvous!
The night was an opportunity to taste a new cider: Tempt Cider, with strawberry and lime flavors, but also to enjoy the presence of artists and an incredible view on the city and the CN Tower.
The CUBE nightclub brings you into a parallel world. From the entrance you leave the city and its buildings to enter in a small corridor looking like the path to a secret garden, to be finally guided to a rooftop patio, a true bubble inside the city. CUBE Nightclub is a new Toronto event venue used by a variety of groups.
This night was subject to all temptations, from the sweet & fruity cider offer by two masked ladies, to the electrifying music and the lights of the city. Hard not to be entranced by the atmosphere! But, more than temptation, it was also an excellent moment of schmoozing and networking in a pleasant place, and the occasion to get a painting portrait from the artists presents all night.
From this incredible night, we only have retained one thing: temptation accomplished! #gettempted.
The weather is finally getting warmer and that’s wonderful because winter is Hell. Summer is Heaven. Spring is the beginning of patio season. The beautiful sun draws us outdoors and we feel the urge to explore the city searching for the best patio restaurants in Toronto. Good thing for Torontonians the city has lots to offer in a wide spectrum of venues… too wide actually. From decadent coffee bars and small cafes to dance club smoking sections, finding a great rooftop place to relax with friends can be tough. There are a lot of ingredients needed in the perfect setting; music, service, drinks prices, and location are some important factors that determine the quality of the venue. After careful research, here are my top 5 favorite rooftop patio spots Toronto has to offer.
The Drake Hotel Sky Yard is the place to be any night of the week. With exceptional service and consistently yummy drinks, the Sky Yard always leaves customers pleased. It’s one of the only places left to smoke cigarettes in public in the entire city of Toronto – that’s why its busy in the winter too and the ‘smoking area’ is still the best place in the whole school to hook up, for smokers. The Sky Yard is a large square open air lounge. It’s a comfortable setting for those looking to loosen up on couches in the sun or small tables in the shade. The Drake hotel is also known for their cool radio advertising and social media campaigns surrounding their events. I once knew a guy who tried to pitch a very comprehensive social media strategy to Bill, the manager and he just laughed and never replied – they get so much free press, it would be like selling ice to an Eskimo. The rooftop patio is located on hip Queen St. W. and often packed with locals on Weds and Thursday and visitors from abroad on Friday and Saturday nights.
The rooftop patio at Skybar is another must go to location in Toronto. Towering over the waterfront, you will feel as if you are leaving all your worries and troubles on the ground. Check it out and sip on delicious drinks while floating over the city. The décor is draped with beautiful white tents and relaxing cushioned seating. Open only on weekends, this warmly lit patio will have you looking forward to Friday.
Hemingways is located in the heart of Yorkville shopping district, and is known for their delicious meals and affordable prices. It’s the place to be after a long day at work, which is why it’s hard to find a spot to even stand after 5p.m. This rooftop patio is consistently busy until last call. Make sure to get there early or rush over quick after your workday is over to get a seat.
The Spoke Club is another rooftop patio worth visiting, though it is exclusive to members and their friends. There are 3 different sections: A special secluded area that is usually booked for events, a rooftop garden and a glass house patio. The glass house rooftop made of steel and insulated concrete forms is built right on top of a historic building in downtown Toronto and stands out as an interesting juxtaposition that’s open all year. The garden is beautiful and looks well loved. If you can find a friend with a membership or a way to get one for yourself then you are in for a wonderful win. The prize is a beautiful, exclusive rooftop patio for you to enjoy a drink or two.
My favourite rooftop patio is Vivoli, at the corner of Grace and College Sts. College is the go-to street for a “hot child in the city” and her friends. So if you’re a young person looking for a classy place to enjoy the summer breeze then Vivoli’s rooftop patio is perfect. Relax while watching the busy street transform from day to night. Also, their mouth watering Sangria pitchers are recommended.
Many of these locations were not built with rooftop patios in mind and several required flat roof repair be done by resurfacing the roof and adding structural supports below. Consequently the management is usually very strict about capacity limits and quite often you’ll notice that dancing on rooftops is not permitted. before ere done to modernize the setting and atmosphere. Rooftop patios are great because they elevate your mood with their location. Because of this, the place and location has to be in its best condition.
This rooftop heaven is nestled above Wellington Ave. with a simply breathtaking view of the Toronto lights. The infinity pool, open both day and night, is a wonderful addition that not many patios in the city have to offer. This exclusive retreat offers exceptional service and drinks, and should be experienced at least once. You must be a guest of the hotel, however, or be a guest at a special event located on the patio. However you end up on the patio, it’s worth it!
A Toronto staple for many years, the Pilot’s patio, cleverly named the Flight Deck, is one of the town’s best patios because of its unique features. The whole space exudes the theme that is flight, from stainless steel flight craft inspired bathrooms to a carefully executed roof made to look like an airplane from the sky. If you want a unique experience in a unique space, the Flight Deck at the Pilot is the place to go.
If you have a fear of heights, overcome your fear and check out this patio. Located on the 51st floor of the Manulife Centre on Bloor St. W., the view gives you both uptown and downtown Toronto. The drink menu is unparalleled in the city and the cocktail menu has won countless awards. In addition, the space itself is intimate, classic and elite. If you’re looking for a night out that you’ve never had, Panorama is an excellent choice.
Recently opening up as a new spot in Toronto’s entertainment district, Maison Mercer has become one of the most exclusive spots for events and nightlife in the city. Premiering as an upscale and mature venue, the patio boasts a beautiful crowd with a full bar to cater to all of your drinking needs. Because of its convenient location, Maison Mercer can be your final destination after an evening event in Toronto or your premium location for a night out. You won’t miss with this great patio.
For hockey lovers everywhere, Wayne Gretzky’s Restaurant is a home for the star’s personal collection of memorabilia. And if that wasn’t reason enough to visit, perhaps the Oasis Rooftop patio will entice you to take a trip to one of the nicest patios in town. A popular après-work watering hole, you can also look forward to having a beer amongst friends in a beautiful booth overlooking the city after the never-ending 9-5. Get there early, though! This patio is popular and fills up quickly.
The King St. W. crowd is one that enjoys an elite clientele, exceptional service, and sought-after DJs. One of the best examples of this is Dolce Social Ballroom. The name of the venue itself suggests a high-class affair, and it’s true. Dolce’s patio is a beautifully designed intimate space that’s just high enough above the streets to experience something exceptional. Open on weekend nights, a trip to the Dolce patio promises to be an incredible summer evening.
The Danforth is a haven for those in search of unique and hidden culinary gems. Globe Bistro is one such spot. Named ‘Globe’ because of its unique philosophy to think global but eat local, Globe Bistro offers not only an exceptional menu, but also promotes local events to support local businesses. And if that isn’t enough of a reason to have a drink at Globe, how about the fact that the rooftop patio is breathtaking. It’s just far enough away from Downtown, but close enough to escape and experience something new. Open from 5pm daily, the patio features its own menu, complete with over 20 different wines and a huge list of cocktails.
So far, we’ve given you a list of downtown rooftop patios that are sure to entice the city dweller. But for those of you on the outskirts of town, Joey at Shops at Don Mills has a great rooftop patio that isn’t too far away. Located right at Don Mills Rd. and Lawrence Ave. E., Joey offers an exclusive experience that you won’t find at the mall or in your suburban neighbourhood. A delectable menu, gorgeous wait staff, perfectly blended cocktails – these are just a few of the things that this rooftop patio has to offer. And what’s great is that you can go for dinner or for a wicked night out with friends.
Hopefully this list has given you some great insight into the amazing patios you can check out this summer. If you know of any more, let us know! Have fun!
|
"""
Allows to handle websockets,
maintains clients list
send json to web browser
"""
import tornado.ioloop
import tornado.web
import tornado.websocket
from tornado.options import define, options, parse_command_line
define("port", default=443, help="run on the given port", type=int)
# we gonna store clients in dictionary..
clients = set()
class IndexHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
#self.write("This is your response")
self.render("visu.html")
print "MAIN HANDLER"
print self.request
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self, *args):
print"SOCKET HANDLER"
print self.request
clients.add(self)
print" clients size is {}".format(len(clients))
def on_message(self, message):
"""
when we receive some message we send json to visu client
"""
print "Client %s received a message : %s" % (self, message)
print "envoyer ceci pour les dashboards dynamiques"
for c in clients:
c.write_message(message)
#clients[self.id] = {"id": self.id, "object": self}
print message
def on_close(self):
print "close"
print" clients size is {}".format(len(clients))
if self in clients:
clients.remove(self)
app = tornado.web.Application([
(r'/', IndexHandler),
(r'/websocket', WebSocketHandler),
])
if __name__ == '__main__':
parse_command_line()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
The easiest way to book space for your class in Queens, New York. Search and rent from over 3000 listings of inspiring studios, spacious lofts or beautiful galleries for your next event.
Sun drenched yoga/dance studio in the heart of Astoria.
Explore our full collection of class spaces in Queens, New York.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of GNUWiNetwork,
# Copyright (C) 2014 by
# Pablo Belzarena, Gabriel Gomez Sena, Victor Gonzalez Barbone,
# Facultad de Ingenieria, Universidad de la Republica, Uruguay.
#
# GNUWiNetwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GNUWiNetwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNUWiNetwork. If not, see <http://www.gnu.org/licenses/>.
#
'''Dictionary of valid nicknames and attribute values for each nickname.
This module provides a dictionary of valid nicknames for events, and the attributes corresponding to each nickname.
@var ev_dc_nicknames: a dictionary {nichname: (ev_type, ev_subtype, ev_class)} to determine type, subtype and event class corresponding to a certain nickname.
'''
from gwnevent import * #Event, EventNameException
timer_dc = { \
'TimerTOH' : ('Timer', 'TOH', EventTimer ), \
'TimerTOC' : ('Timer', 'TOC', EventTimer ), \
'TimerTOR1' : ('Timer', 'TOR1', EventTimer ), \
'TimerTOR2' : ('Timer', 'TOR2', EventTimer ), \
'TimerTimer' : ('Timer', 'Timer', EventTimer ), \
'TimerCTSTout' : ('Timer', 'CTSTout', EventTimer ), \
'TimerRTSAbort' : ('Timer', 'CTSTout', EventTimer ), \
'TimerACKTout' : ('Timer', 'ACKTout', EventTimer ), \
'TimerDataAbort' : ('Timer', 'ACKTout', EventTimer ) \
}
config_dc = { \
'TimerConfig' : ('Request', 'SetTimerConfig', EventConfig), \
'EventConsumerStatus' : ('Config', 'EventConsumerStatus', EventConfig) \
}
# change type to Config in all nicknames!
data_dc = { \
'DataIn' : ('Data', 'DataIn', EventComm ), \
'DataOut' : ('Data', 'DataOut', EventComm ) \
}
ctrl_dc = { \
#'Nickname' : ('Ctrl', 'SubType', Event ), \
#'Nickname' : ('Ctrl', 'SubType', Event ) \
}
mgmt_dc = { \
#'Nickname' : ('Mgmt', 'SubType', Event ), \
#'Nickname' : ('Mgmt', 'SubType', Event ) \
}
ev_dc_nicknames = {}
all_dics = [timer_dc, data_dc, ctrl_dc, mgmt_dc, config_dc]
for dic in all_dics:
ev_dc_nicknames.update(dic)
# TODO: write a function check_dics() to verify a nickname is unique,
# i.e. not in two different dictionaries in all_dics.
|
welcome banner template 20 free psd ai vector eps. breed information the scottish deerhound club of america. electribe wikipedia.
|
from django.shortcuts import render
from nuit.views import SearchableListView
from .models import Publisher
from .forms import PublisherForm
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
class MyListView(SearchableListView):
model = Publisher
template_name = 'demo/list_view.html'
paginate_by = 15
search_fields = ('name', ('address', 'iexact'))
def kitchen_sink(request):
messages.set_level(request, messages.DEBUG)
messages.add_message(request, messages.DEBUG, 'Debug Message')
messages.add_message(request, messages.INFO, 'Info Message')
messages.add_message(request, messages.SUCCESS, 'Success Message')
messages.add_message(request, messages.WARNING, 'Warning Message')
messages.add_message(request, messages.ERROR, 'Error Message')
return render(request, 'demo/kitchensink.html', {})
def test_form(request):
if request.method == 'POST':
form = PublisherForm(request.POST)
if form.is_valid():
pass
else:
form = PublisherForm()
return render(request, 'demo/forms.html', {'form': form, 'data': 'Data'})
def error(request, code='400'):
return render(request, 'nuit/generic/%s.html' % code, {}, status=code)
@permission_required('does.not.exist')
def no_access(request):
return 'Go Away'
|
This is a great online color tester and designer. Unlike other hex code color testers, this application can also test hex colors on a selected background and guides you in color mixing. Type the color(s) in 3 or 6 digit hex code format eg. #153318, #131 or RGB numbers format eg. 21, 51, 24. Backgroud color is optional. Default value FFFFFF (white) is selected when no value supplied for bg color. (Note: no matter whether the symbol # is used in hex code or not.) The online color tester also helps you to select the harmonic color combinations. The complementary, triadic, analogous, split complementary, tetradic and square color schemes in the Color combining guide recommend you the best color groups for the two, three or four color design. You can also pick up the colors from the gradient scale, best brightness (lightness) level from tints or shades scale and the best suitable vibrance level from the saturation scale. Moreover you can pick the color you like from the random color palette generator. Color tester also shows you the negative and monochrome (grayscale) representation of selected color and the closest websafe color. You also find there the RGB and CMYK representations calculation.
Dummy text written in hex #153318 color on a background color. Text in color #153318 on the desired background color will look just like this. You just see the hex #153318 colored text written on a hex #FFFFFF color backgroud... Or you can't see, if you selected the wrong combination of colors :). The background color should be selected so that the intended #153318 text color is well visible on it. Alternatively, you can choose text color different than #153318 to achieve better text readability. It can be rounded to the 3 character hex code #131. The RGB values of the #153318 hex color are R: 21, G: 51, B: 24. In percentage it is Red: 8.2%, Green: 20%, Blue: 9.4%. The CMYK values of the #153318 hex color are C: 0.588, M: 0, Y: 0.529, K: 0.8. The nearest web safe color is #003300. Test of bold text in selected color. Test of italic text in the color. Finally the test of bold italic.
|
# Copyright (c) 2015-2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .base_commandline_predictor import BaseCommandlinePredictor
from .parsing import parse_netmhc3_stdout
class NetMHC3(BaseCommandlinePredictor):
def __init__(
self,
alleles,
program_name="netMHC",
default_peptide_lengths=[9]):
BaseCommandlinePredictor.__init__(
self,
program_name=program_name,
alleles=alleles,
parse_output_fn=parse_netmhc3_stdout,
# NetMHC just expects the first arg to be an input FASTA
input_file_flag="",
# NetMHC doesn't have the ability to use a custom
# temporary directory
tempdir_flag=None,
length_flag="--peplen",
allele_flag="--mhc",
extra_flags=["--nodirect"],
supported_alleles_flag="-A",
# because we don't have a tempdir flag, can't run more than
# one predictor at a time
process_limit=1,
default_peptide_lengths=default_peptide_lengths,
group_peptides_by_length=True)
|
SoleCRIS / Julkaisu - Converging Perspectives – Writings on Performance Art. - Arlander, Annette (toim.) 2011.
Converging Perspectives – Writings on Performance Art. - Arlander, Annette (toim.) 2011.
Julkaisun nimi00 Converging Perspectives – Writings on Performance Art.
|
import datetime
from django.http import HttpResponseRedirect
from django.db.models.aggregates import Count
from django.contrib import messages
from django.views.generic import CreateView, UpdateView, DetailView, DeleteView
from django.core.urlresolvers import reverse_lazy, reverse
from django_tables2 import RequestConfig, SingleTableView
from fabric_bolt.core.mixins.views import MultipleGroupRequiredMixin
from fabric_bolt.web_hooks import forms, tables, models
class HookList(SingleTableView):
"""
Hook List page
"""
table_class = tables.HookTable
model = models.Hook
class HookCreate(MultipleGroupRequiredMixin, CreateView):
"""
Create a new project
"""
group_required = ['Admin', 'Deployer', ]
model = models.Hook
form_class = forms.HookCreateForm
template_name_suffix = '_create'
def get_initial(self):
initial = super(HookCreate, self).get_initial()
initial['project'] = self.kwargs.get('project_id')
return initial
def form_valid(self, form):
"""After the form is valid lets let people know"""
ret = super(HookCreate, self).form_valid(form)
# Good to make note of that
messages.add_message(self.request, messages.SUCCESS, 'Hook %s created' % self.object.url)
return ret
class HookDetail(DetailView):
"""
Display the Project Detail/Summary page: Configurations, Stages, and Deployments
"""
model = models.Hook
class HookUpdate(MultipleGroupRequiredMixin, UpdateView):
"""
Update a project
"""
group_required = ['Admin', 'Deployer', ]
model = models.Hook
form_class = forms.HookUpdateForm
template_name_suffix = '_update'
# success_url = reverse_lazy('projects_project_list')
class HookDelete(MultipleGroupRequiredMixin, DeleteView):
"""
Deletes a project by setting the Project's date_deleted. We save projects for historical tracking.
"""
group_required = ['Admin', ]
model = models.Hook
def delete(self, request, *args, **kwargs):
self.success_url = self.get_object().get_absolute_url()
messages.add_message(request, messages.WARNING, 'Hook Successfully Deleted')
return super(HookDelete, self).delete(request, *args, **kwargs)
|
Buy Fusion's standard pitch blade turbine mixing impellers at Fusion Express!
The Pitch Blade Turbine Impeller is the workhorse of the mixing industry. The simple design of the PBT impeller provides a combination of both radial and axial flow, generates high shear levels, and provides excellent mixing ability while providing easy cleanup. Because of the simple design, it is also very cost effective in large applications and high viscosity applications. While useful in most applications, this design excels in heavy mixing. The Pitch Blade Turbine Impeller can be fabricated to fit ANY shaft diameter and comes standard in 316 Stainless Steel, but 304 Stainless Steel, Aluminum, and Carbon Steel versions are also available. Various surface finishes are available.
|
import asyncio
import json
from typing import Awaitable, Dict, List, Optional, Tuple # noqa: F401
from ._abc import AbcCacheStore
from ..api import bttv
class BetterTwitchTvApisMixin(AbcCacheStore):
def _bttvGlobalEmoteKey(self) -> str:
return 'emote:bttv'
async def bttv_load_global_emotes(self, *, background: bool=False) -> bool:
key: str = self._bttvGlobalEmoteKey()
ttl: int = await self.redis.ttl(key)
if ttl >= 30 and background:
return True
if ttl >= 0 and not background:
return True
emotes: Optional[Dict[str, str]]
emotes = await bttv.getGlobalEmotes()
if emotes is None:
return False
await self.bttv_save_global_emotes(emotes)
return True
async def bttv_save_global_emotes(self, emotes: Dict[str, str]) -> bool:
await self.redis.setex(self._bttvGlobalEmoteKey(), 3600,
json.dumps(emotes))
return True
async def bttv_get_global_emotes(self) -> Optional[Dict[str, str]]:
key: str = self._bttvGlobalEmoteKey()
value: Optional[str] = await self.redis.get(key)
if value is None:
return None
return json.loads(value)
def _bttvBroadcasterEmoteKey(self, broadcaster: str) -> str:
return f'emote:bttv:{broadcaster}'
async def bttv_load_broadcaster_emotes(self, broadcaster: str, *,
background: bool=False) -> bool:
key: str = self._bttvBroadcasterEmoteKey(broadcaster)
ttl: int = await self.redis.ttl(key)
if ttl >= 30 and background:
return True
if ttl >= 0 and not background:
return True
emotes: Optional[Dict[str, str]] = {}
emotes = await bttv.getBroadcasterEmotes(broadcaster)
if emotes is None:
return False
await self.bttv_save_broadcaster_emotes(broadcaster, emotes)
return True
async def bttv_save_broadcaster_emotes(self, broadcaster: str,
emotes: Dict[str, str]) -> bool:
await self.redis.setex(self._bttvBroadcasterEmoteKey(broadcaster),
3600, json.dumps(emotes))
return True
async def bttv_get_broadcaster_emotes(self, broadcaster: str
) -> Optional[Dict[str, str]]:
key: str = self._bttvBroadcasterEmoteKey(broadcaster)
value: Optional[str] = await self.redis.get(key)
if value is None:
return None
return json.loads(value)
async def bttv_get_cached_broadcasters(self) -> Dict[str, int]:
keys: List[str] = []
cur: str = '0'
ckeys: List[str]
while cur:
cur, ckeys = await self.redis.scan(cur, match='emote:bttv:*')
keys.extend(ckeys)
ttlValues: Tuple[int, ...] = await asyncio.gather(
*[self.redis.ttl(key) for key in keys]
)
return {key[10:]: ttl for key, ttl in zip(keys, ttlValues)}
|
85 years of quality tea. Rainforest Alliance Certified.
For the best cup of PG tips, use one bag per cup, add freshly boiled water and then let it brew to your favourite strength.
|
"""
Functions which do not sort well under another name. Will all be imported
by __init__.py to package level.
"""
from __future__ import division
import numpy as np
import pandas as pd
def split_dataframe_class_columns(df, upper_lim=5, lower_lim=3, int_only=True):
'''
Splits columns of a dataframe where rows can only take a limited
amount of valid values, into seperate columns
for each observed value. The result is a number of columns which are
exclusive with each other: only one can be 1 at any time.
Parameters:
- df, pandas dataframe to work with
- upper_lim, only consider columns with less unique values (default 5)
- lower_lim, only consider equal or more unique values (default 3)
- int_only, if True only include columns with all integers
Returns:
A new pandas dataframe with the same columns as df, except those columns
which have been split.
Note: This function preserves NaNs.
'''
ndf = pd.DataFrame()
for col in df.columns:
uniques = np.unique(df[col])
# Dont count nans as unique values
nans = np.isnan(uniques)
uniques = uniques[~nans]
# If class variable
if ((len(uniques) >= lower_lim and len(uniques) < upper_lim) and
(not int_only or np.all(uniques.astype(int) == uniques))):
# Split it, one col for each unique value
for val in uniques:
# A human-readable name
ncol = "{}{}".format(col, val)
# Set values
ndf[ncol] = np.zeros_like(df[col])
ndf[ncol][df[col] == val] = 1
# Also transfer NaNs
ndf[ncol][df[col].isnull()] = np.nan
else:
# Not a class variable
ndf[col] = df[col]
return ndf
def replace_dataframe_nans(df, binary_median=False):
'''
Replaces the NaNs of a pandas dataframe with
the mean of the column, in case of continuous
values. If the column is binary, it can be replaced
with the median value if desired.
Parameters:
- df, the dataframe to replace NaNs in
'''
for col in df.columns:
uniques = np.unique(df[col])
# Dont count nans as unique values
nans = np.isnan(uniques)
uniques = uniques[~nans]
nans = np.isnan(df[col])
if binary_median and len(uniques) == 2:
# Binary, use median
df[col][nans] = df[col].median()
else:
# Use mean
df[col][nans] = df[col].mean()
def normalize_dataframe(dataframe, cols=None, binvals=None):
'''
Normalize a pandas dataframe. Binary values are
forced to (-1,1), and continuous (the rest) variables
are forced to zero mean and standard deviation = 1
Parameters:
- dataframe, the pandas dataframe to normalize column-wise
- cols, (optional iterable) the column names in the dataframe to normalize.
- binvals, (default (0,1)) tuple giving the (min,max) binary values to use.
Note: this function preserves NaNs.
'''
if cols is None:
cols = dataframe.columns
if binvals is None:
binvals = (-1, 1)
for col in cols:
# Check if binary
uniques = np.unique(dataframe[col])
if len(uniques) == 2:
# Binary, force into 0 and 1
mins = dataframe[col] == np.min(uniques)
maxs = dataframe[col] == np.max(uniques)
dataframe[col][mins] = binvals[0]
dataframe[col][maxs] = binvals[1]
else:
# Can still be "binary"
if len(uniques) == 1 and (uniques[0] == 0 or uniques[0] == 1):
# Yes, single binary value
continue
# Continuous, zero mean with 1 standard deviation
mean = dataframe[col].mean()
std = dataframe[col].std()
dataframe[col] -= mean
# Can be single value
if std > 0:
dataframe[col] /= std
def normalize_array(array, cols):
'''
Normalize a numpy array. Binary values are
forced to 0-1, and continuous (the rest) variables
are forced to zero mean and standard deviation = 1
Parameters:
- array, the array to normalize column-wise
- cols, (iterable) the column indices in the array to normalize.
'''
for col in cols:
# Check if binary
uniques = np.unique(array[col])
if len(uniques) == 2:
# Binary, force into 0 and 1
mins = array[col] == np.min(uniques)
maxs = array[col] == np.max(uniques)
array[mins, col] = 0
array[maxs, col] = 1
else:
# Can still be "binary"
if len(uniques) == 1 and (uniques[0] == 0 or uniques[0] == 1):
# Yes, single binary value
continue
# Continuous, zero mean with 1 standard deviation
mean = array[col].mean()
std = array[col].std()
array[col] -= mean
# Can be single value
if std > 0:
array[col] /= std
def sample_wr(population, k):
'''Selects k random elements (with replacement) from a population.
Returns an array of indices.
'''
return np.random.randint(0, len(population), k)
def bagging(data, count=None):
'''Samples len elements (with replacement) from data and returns a view of those elements.'''
if count is None:
count = len(data)
return data[np.random.randint(0, len(data), count)]
def bagging_stratified(data, column, count=None):
'''Samples with replacement from the data set but guarantees that
the ratio of values in column remains the same.
Column is expected to be a binary column (any two values)
'''
vals = np.unique(data[:, column])
if len(vals) != 2:
raise ValueError("Column {} is not a binary column. Number of values are: {}".format(column, len(vals)))
group1 = data[data[:, column] == vals[0]]
group2 = data[data[:, column] == vals[1]]
if count is None:
count = len(data)
count1 = int(round(len(group1)*count/len(data)))
count2 = int(round(len(group2)*count/len(data)))
retval = np.append(bagging(group1, count1), bagging(group2, count2), axis=0)
np.random.shuffle(retval)
return retval
def divide_stratified(data, column, frac):
'''Divides the data set in two pieces, one being frac*len(data).
Stratifies for the designated column to guarantee that the ratio
remains the same. Column must be binary but can have any values.
Returns (example frac=1/3) a tuple which has two lists of indices:
(subdata of size 2/3, subdata of size 1/3)
'''
if (frac <= 0 or frac >= 1):
raise ValueError("Frac must be a fraction between 0 and 1, not: {}".format(frac))
vals = np.unique(data[:, column])
if len(vals) != 2:
raise ValueError("Column {} is not a binary column. Number of values are: {}".format(column, len(vals)))
idx = np.arange(0, len(data))
np.random.shuffle(idx)
group1 = idx[data[:, column] == vals[0]]
group2 = idx[data[:, column] == vals[1]]
group1_num = int(round(frac*len(group1)))
group2_num = int(round(frac*len(group2)))
group1_test = group1[:group1_num]
group1_trn = group1[group1_num:]
group2_test = group2[:group2_num]
group2_trn = group2[group2_num:]
trn = np.append(group1_trn, group2_trn)
test = np.append(group1_test, group2_test)
np.random.shuffle(trn)
np.random.shuffle(test)
return (trn, test)
|
Does anyone know of a list of minimal pairs that illustrates the sounds of each letter and diphthong in the Greek language?
I’ve wondered about this myself, and thought that it ought to be fairly straightforward to program a search to find all the minimal pairs in a corpus (e.g. strings with an edit distance of one, as a rough cut approximation).
It should work for Greek as well: if you make a list of all words, this program can compute all minimal pairs of that list.
Written in Python (3), with a Levenshtein module. Follow the link, and you'll see how to install that module.
Randall Buth's Living Koine Greek course does this to introduce the alphabet. It begins with a set of words which only have one sound, so every pair of them differs by a single sound. e.g. α, ου, αι, οι, η; and I think it does what you're asking for, but presumably it's copyright.
Yes, morphology doesn't match phonology perfectly. καινος and κενος have a Levenshtein distance of 2 but phonologically identical (in imperial/modern); then ἑνος is a 'minimal pair' with both of them phonologically, but 3 edits from καινος.
To get the result Jonathan originally set out for, one would have to first convert the words into a phonetic representation (e.g. IPA). Should accents be ignored or represented and allowed for in some way?
Does anyone have a Greek to IPA utility for any pronunciation? If so, it could be easily adapted. It would have to be sophisticated enough to understand the tricky cases of how letters are pronounced in context.
Yes, for reading or writing, I think.
|
# Garrulous API
# Authors: Michael Pierre and Richard Meyers
"""
Copyright (C) 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import collections
import hashlib
import time
import logging
import pprint
from Database import Database
class Users(Database):
def __init__(self):
super(Users, self).__init__()
# Create user table is not exist.
def createIfNotExists(self):
self.write("""CREATE TABLE IF NOT EXISTS `users` (
`uid` INTEGER PRIMARY KEY AUTOINCREMENT,
`username` TEXT,
`first_name` TEXT,
`last_name` TEXT,
`email` TEXT,
`phone` TEXT,
`password` TEXT,
`date_joined` INTEGER
)""")
# Create
# Create New User
def createUser(self, user_name="", password="", first_name="", last_name="", email="", phone=""):
#This time is the date they joined
times = int(time.time())
hash = hashlib.md5(password)
hashword = hash.hexdigest()
if self.username_exists(user_name):
return "Username already taken"
if self.write("INSERT INTO users (username,first_name,last_name,email,password,phone,date_joined) "
"VALUES (?,?,?,?,?,?,?) ", (user_name, first_name, last_name, email, hashword,
phone, times)):
return True
return False
def updateUserByUid(self, uid, user_name=None, password=None, first_name=None, last_name=None, email=None,
phone=None):
# This needs to build the query out of the amount of parameters that exist. That way a all the existing
# data doesn't get overwritten.\
success = True
if email:
if not self.write('UPDATE users SET email=? '
'WHERE uid=?', (email, uid)):
success = False
if last_name:
if self.write('UPDATE users SET last_name=? '
'WHERE uid=?', (last_name, uid)):
success = False
if first_name:
if self.write('UPDATE users SET first_name=? '
'WHERE uid=?', (first_name, uid)):
success = False
if password:
hash = hashlib.md5(password)
hashword = hash.hexdigest()
if self.write('UPDATE users SET password=? '
'WHERE uid=?', (hashword, uid)):
success = False
if user_name:
if self.write('UPDATE users SET user_name=? '
'WHERE uid=?', (user_name, uid)):
success = False
return success
def authenticateUser(self, user_name="", password="", phone="", email=""):
hash = hashlib.md5(password)
hashword = hash.hexdigest()
# This gets the one row and returns only the first column
try:
rows = self.queryOne("SELECT uid FROM users WHERE username = ? and password = ?", (user_name, hashword))[0]
except TypeError:
return False
return rows
def username_exists(self, username):
"""
Check if this username exists already.
:param username:
:return:
"""
try:
ifexist = self.queryOne("SELECT username FROM users WHERE username = ?", (username,))[0]
except TypeError:
return False
if ifexist:
return True
return False
# Read All Users
def getUsers(self):
# We are not returning all the rows
# We definitely don't want to return the password column, that is only used for auth.
# There should be the option of passing in the row quantity.
rows = self.query("SELECT uid, username, first_name, last_name, email FROM users")
objects_list = []
for row in rows:
d = collections.OrderedDict()
d['uid'] = row[0]
d['username'] = row[1]
d['first_name'] = row[2]
d['last_name'] = row[3]
d['email'] = row[4]
objects_list.append(d)
return objects_list
def getUsersLike(self, search):
search = "%" + search + "%"
rows = self.query("SELECT uid, username, first_name, last_name FROM users WHERE username LIKE ? OR first_name LIKE ? OR last_name LIKE ? LIMIT 20", (search, search, search,))
objects_list = []
for row in rows:
d = collections.OrderedDict()
d['uid'] = row[0]
d['username'] = row[1]
d['first_name'] = row[2]
d['last_name'] = row[3]
objects_list.append(d)
return objects_list
# Read User Information By User ID.
def getUserByUID(self, uid):
uid = str(uid)
row = self.queryOne("SELECT uid, username, first_name, last_name, email FROM users WHERE uid=?", (uid))
d = {}
d['uid'] = row[0]
d['username'] = row[1]
d['first_name'] = row[2]
d['last_name'] = row[3]
d['email'] = row[4]
return d
# Read User Information By Username.
def getUserByUsername(self, username):
row = self.queryOne("SELECT uid, username, first_name, last_name, email FROM users WHERE username=%s" % username)
d = {}
d['uid'] = row[0]
d['username'] = row[1]
d['first_name'] = row[2]
d['last_name'] = row[3]
d['email'] = row[4]
return d
|
"The challenge is that if you give hotels technology, they don't know how to use it. And if you just do consulting, then there are only so many hotels you can manage - scaling up becomes a problem," said Zuzu's co-founder Vikram Malhi.
"They feel like once I am listed on Expedia, I am done. But that is not it, that is just the beginning. You need to invest a lot of energy and resources managing it," he said.
He likened it to search engine optimisation (SEO), where companies have to meet a set of requirements for their websites to rank high on Google's results.
Similarly, Mr Malhi explained that online travel agencies judge hotels based on a certain criteria, such as a hotel's responsiveness to reviews, price-competitiveness and the type of promotions offered.
"When I came to Asia with Expedia, we were like a startup. Even though Expedia is a big company in the US, we were launching new markets," he said. "Once it got to a big scale, I was losing the excitement of building something from scratch."
But Mr Malhi believes what makes Zuzu's solution work is the complementary role of consultants. The startup assigns a revenue manager - an expert in e-commerce and online travel agencies - to each hotel.
"Because we have our own technology, it helps us automate a lot of the day-to-day work that revenue managers do. Therefore, each manager (from Zuzu) can manage a lot of properties, " he said.
Each revenue manager currently handles about 10 to 20 hotels, and they make up around one-quarter of the startup's 95 staff.
"Some of the hotel owners I talk to are basically saying things like 'You are a godsend', because they were actually struggling. We are not taking away any value from someone and keeping it for ourselves, we are instead creating value for everyone."
|
import Tkinter
import tkMessageBox
import tkFont
#from Tkinter import *
import tkSimpleDialog
class PeripheralSetting(tkSimpleDialog.Dialog):
# ########################################
def __init__(self, master, arg_PinList=[('',0)]):
print 'init'
strFont= 'Arial'
self.__myfont12 = tkFont.Font(family=strFont, size=12)
self.__myfont12_Bold = tkFont.Font(family=strFont, size=12, weight= tkFont.BOLD)
self.__myfont10 = tkFont.Font(family=strFont, size=10)
self.__myfont10_Bold = tkFont.Font(family=strFont, size=10, weight= tkFont.BOLD)
self.__PinList= arg_PinList
self.__MaxRow= 7
self.__CurrentRow= len(arg_PinList)
self.__CurGridRow= self.__CurrentRow
self.__NumberList= range(0, self.__MaxRow+1)
self.__entries_Func= [0]
self.__entries_PinNumb= [0]
self.__btns_clear=[0]
#self.master= master
tkSimpleDialog.Dialog.__init__(self, master, "Peripherals")
# ########################################
def body(self, master):
print 'body of Dialog PERIPHERALS'
Tkinter.Label(master, text="Function", font= self.__myfont12_Bold).grid(row=0, column=0)
Tkinter.Label(master, text="Pin Number", font= self.__myfont12_Bold).grid(row=0, column=1)
Tkinter.Button(master, text= '+', font= self.__myfont12_Bold, command= self.btn_add_click, fg='white',activeforeground= 'white', bg= '#007700', activebackground= '#00aa00').grid(row=0,column=2)
for i in self.__NumberList:
if i==0:
continue
en_func = Tkinter.Entry(master)
#en_func.insert(Tkinter.END, '{0}'.format(i))
self.__entries_Func.append(en_func)
en_pinnumb= Tkinter.Entry(master)
self.__entries_PinNumb.append(en_pinnumb)
btn= Tkinter.Button(master, text= '-', font= self.__myfont12_Bold, command= lambda i=i: self.btn_clear_click(i),fg='white',activeforeground= 'white', bg= '#aa0000', activebackground= '#ee0000')
self.__btns_clear.append(btn)
'''
en_func.grid(row=i+1,column=0)
en_pinnumb.grid(row=i+1, column=1)
btn.grid(row=i+1,column=2)
'''
if i <= self.__CurrentRow:
en_func.insert(Tkinter.END, self.__PinList[i-1][0])
#en_func.insert(Tkinter.END, '{0}'.format(i))
en_pinnumb.insert(Tkinter.END, self.__PinList[i-1][1])
#'''
en_func.grid(row=i,column=0)
en_pinnumb.grid(row=i, column=1)
btn.grid(row=i,column=2)
'''
en_func.grid_remove()
en_pinnumb.grid_remove()
btn.grid_remove()
#'''
#self.add_Row( i)
return self.__entries_Func[0] # initial focus
def apply(self):
try:
self.result=[]
for i in range(1, len(self.__entries_Func)):
r1, r2= self.__entries_Func[i].get(), self.__entries_PinNumb[i].get()
if r1 != '' and r2 != '':
self.result.append([r1,int(r2)])
#print 'result:', self.result
print 'End of dialog' # or something
except ValueError:
tkMessageBox.showwarning("Bad input","Illegal values, please try again")
def btn_clear_click(self, arg_index):
clear_row= self.__NumberList.index(arg_index)
'''
print '============= CLEAR ============'
print 'Clear Row:', clear_row
print 'NumberLIst:', self.__NumberList
print 'clear_index', arg_index
gridInfo= self.__entries_Func[arg_index].grid_info()
#print gridInfo
print 'Clear Grid Row', gridInfo['row']
#'''
#'''
self.__entries_Func[arg_index].delete(0, 'end')
self.__entries_PinNumb[arg_index].delete(0, 'end')
self.__entries_Func[arg_index].grid_forget()
self.__entries_PinNumb[arg_index].grid_forget()
self.__btns_clear[arg_index].grid_forget()
'''
self.__entries_Func[arg_index].grid_remove()
self.__entries_PinNumb[arg_index].grid_remove()
self.__btns_clear[arg_index].grid_remove()
#'''
tmp= self.__NumberList[clear_row]
del self.__NumberList[clear_row]
self.__NumberList.append(tmp)
self.__CurrentRow= self.__CurrentRow-1
#print '__CurrentRow:', self.__CurrentRow
#'''
def btn_add_click(self):
'''
print '============= ADD ============'
print '### Current Row', self.__CurrentRow
print 'NumberLIst:', self.__NumberList
for i in range(1,len(self.__entries_Func)):
tmp= self.__NumberList[i]
gridInfo= self.__entries_Func[tmp].grid_info()
if len(gridInfo)!=0:
print 'Row ',str(i),' Entries List[', str(tmp),']: ', self.__entries_Func[tmp].grid_info()['row']
else:
print 'Row ',str(i),' empty'
#'''
if self.__CurrentRow < self.__MaxRow:
self.__CurrentRow= self.__CurrentRow+1
self.__CurGridRow= self.__CurGridRow+1
#self.__CurGridRow= self.__CurrentRow
add_index= self.__NumberList[self.__CurrentRow]
'''
print 'Added Row:', self.__CurrentRow
print 'add_index (NumberList[{0}]): {1}'.format(self.__CurrentRow,add_index)
print 'Grid Row:', self.__CurGridRow
#'''
self.__entries_Func[add_index].grid(row=self.__CurGridRow, column=0)
#self.__entries_Func[add_index].delete(0, 'end')
self.__entries_PinNumb[add_index].grid(row=self.__CurGridRow, column=1)
#self.__entries_PinNumb[add_index].delete(0, 'end')
self.__btns_clear[add_index].grid(row=self.__CurGridRow, column=2)
#print 'Row ',str(self.__CurrentRow),' Entries List[', str(add_index),']: ', self.__entries_Func[add_index].grid_info()['row']
elif self.__CurrentRow== self.__MaxRow:
print 'Max of Row is ', self.__MaxRow
|
A recent veterinary industry megastudy concluded that while cats significantly outnumber dogs in the United States, cats are much less likely to see their veterinarians on a regular basis. In fact, plenty of cats never see a vet unless they’re on death’s doorstep. The No. 1 reason for this canine-feline discrepancy? It comes down to a simple thing: transportation.
Although veterinary conventional wisdom tends to lay the blame on clients’ unwillingness to spend on their cats (relative to dogs), the truth is that getting a cat inside a box is considered a colossal stumbling block for many pet owners.
In my experience, this is absolutely a factor –– a big one. At least once a day, our office fields a call from a cat owner who has to cancel at the last minute because kitty isn’t amenable to carrier confinement.
You may think that this happens only with cats who live outdoors or whose feral origins don’t lend to easy capture, but you’d be wrong. Plenty of otherwise mild-mannered housecats will pull out all the stops (teeth and claws included) to stay out of the dreaded box.
So what’s a responsible owner to do? After all, waiting until she’s sick enough to resist less violently is not a reasonable option — even though a startlingly large percentage of cat owners resort to this very tactic.
In the interest of avoiding this worst-case scenario, here are my top tips for cornering, capturing and confining cats for safe transport to the vet.
Get the right size carrier. A large carrier is sometimes the only way to go because it can be impossible to squeeze a big cat through a narrow door. My personal favorite: a top-loading carrier. I even have a client who uses a rolling plastic file cabinet with a top that latches. He got it at OfficeMax for $12.99. Score!
Cozy up the space. Smart cat owners know that desensitization works. Try feeding your cat inside the carrier, which is an especially good trick for those who need to feed their cats separately for weight control. You should also line it with newspaper to soak up urine, and always keep a clean towel inside.
Opt for a shadier carrier. Most stressed-out cats seem to find a dark cubbyhole more comfortable than a wide-open space. You can achieve this by either buying a carrier designed for darkness or cover the box with a towel.
Try a towel wrap. Burrito your kitty before placing her in the carrier. This nifty trick works great when you need to medicate your cat, too. Plenty of YouTube videos can show you how to burrito a cat with perfect aplomb.
Spritz pheromones. Certain cats respond well to relaxing pheromone sprays that you can spritz inside the carrier or diffuse in your home on the big vet visit day.
Consider catnip. Some cats love it. If nothing else, your kitty may be disoriented enough to make it easier to capture and confine her. Others felines may be chilled out by a little catnip in the carrier.
Reach for drugs — as a last resort. Sometimes it’s better than the alternative. No cat should suffer veterinary neglect over a simple issue like cat carrier transport.
|
# Given a non-empty, singly linked list with head node head, return a middle node of linked list.
# If there are two middle nodes, return the second middle node.
# Example 1:
# Input: [1,2,3,4,5]
# Output: Node 3 from this list (Serialization: [3,4,5])
# The returned node has value 3. (The judge's serialization of this node is [3,4,5]).
# Note that we returned a ListNode object ans, such that:
# ans.val = 3, ans.next.val = 4, ans.next.next.val = 5, and ans.next.next.next = NULL.
# Example 2:
# Input: [1,2,3,4,5,6]
# Output: Node 4 from this list (Serialization: [4,5,6])
# Since the list has two middle nodes with values 3 and 4, we return the second one.
# Note:
# The number of nodes in the given list will be between 1 and 100.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# time:O(n)
# spce:O(1)
# score:96.49(20ms)
def middleNode(self, head: ListNode) -> ListNode:
slow = head
fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
return slow
|
New and exclusive to Ginger Rose, our bespoke personalised leather ring box is perfectly sized to store rings and earrings.
With some cool geometric sides this leather ring box is part of our new range of hexagonal jewellery boxes. Store safely a collection of small everyday jewellery items or create a beautiful proposal box.
Makes a great mothers day gift - a timeless and functional leather gift for her, be it a milestone birthday, 3rd anniversary gift (which is traditionally leather,) a christening gift or a wedding day present or simply just because, our leather boxes will be loved for years to come.
Use our speedy in house personalisation service to add a name, initials or date in a choice of fonts. Make use of up to 8 characters or choose an initial inside a sweet heart, all centrally positioned on the lid.
Tan leather with dark micro suede lining. Hidden magnetic lid closure.
|
from __future__ import absolute_import
import errno
import itertools
import logging
import os.path
import tempfile
from contextlib import contextmanager
from pip._vendor.contextlib2 import ExitStack
from pip._vendor.six import ensure_text
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.misc import enum, rmtree
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Dict, Iterator, Optional, TypeVar, Union
_T = TypeVar('_T', bound='TempDirectory')
logger = logging.getLogger(__name__)
# Kinds of temporary directories. Only needed for ones that are
# globally-managed.
tempdir_kinds = enum(
BUILD_ENV="build-env",
EPHEM_WHEEL_CACHE="ephem-wheel-cache",
REQ_BUILD="req-build",
)
_tempdir_manager = None # type: Optional[ExitStack]
@contextmanager
def global_tempdir_manager():
# type: () -> Iterator[None]
global _tempdir_manager
with ExitStack() as stack:
old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack
try:
yield
finally:
_tempdir_manager = old_tempdir_manager
class TempDirectoryTypeRegistry(object):
"""Manages temp directory behavior
"""
def __init__(self):
# type: () -> None
self._should_delete = {} # type: Dict[str, bool]
def set_delete(self, kind, value):
# type: (str, bool) -> None
"""Indicate whether a TempDirectory of the given kind should be
auto-deleted.
"""
self._should_delete[kind] = value
def get_delete(self, kind):
# type: (str) -> bool
"""Get configured auto-delete flag for a given TempDirectory type,
default True.
"""
return self._should_delete.get(kind, True)
_tempdir_registry = None # type: Optional[TempDirectoryTypeRegistry]
@contextmanager
def tempdir_registry():
# type: () -> Iterator[TempDirectoryTypeRegistry]
"""Provides a scoped global tempdir registry that can be used to dictate
whether directories should be deleted.
"""
global _tempdir_registry
old_tempdir_registry = _tempdir_registry
_tempdir_registry = TempDirectoryTypeRegistry()
try:
yield _tempdir_registry
finally:
_tempdir_registry = old_tempdir_registry
class _Default(object):
pass
_default = _Default()
class TempDirectory(object):
"""Helper class that owns and cleans up a temporary directory.
This class can be used as a context manager or as an OO representation of a
temporary directory.
Attributes:
path
Location to the created temporary directory
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
Methods:
cleanup()
Deletes the temporary directory
When used as a context manager, if the delete attribute is True, on
exiting the context the temporary directory is deleted.
"""
def __init__(
self,
path=None, # type: Optional[str]
delete=_default, # type: Union[bool, None, _Default]
kind="temp", # type: str
globally_managed=False, # type: bool
):
super(TempDirectory, self).__init__()
if delete is _default:
if path is not None:
# If we were given an explicit directory, resolve delete option
# now.
delete = False
else:
# Otherwise, we wait until cleanup and see what
# tempdir_registry says.
delete = None
# The only time we specify path is in for editables where it
# is the value of the --src option.
if path is None:
path = self._create(kind)
self._path = path
self._deleted = False
self.delete = delete
self.kind = kind
if globally_managed:
assert _tempdir_manager is not None
_tempdir_manager.enter_context(self)
@property
def path(self):
# type: () -> str
assert not self._deleted, (
"Attempted to access deleted path: {}".format(self._path)
)
return self._path
def __repr__(self):
# type: () -> str
return "<{} {!r}>".format(self.__class__.__name__, self.path)
def __enter__(self):
# type: (_T) -> _T
return self
def __exit__(self, exc, value, tb):
# type: (Any, Any, Any) -> None
if self.delete is not None:
delete = self.delete
elif _tempdir_registry:
delete = _tempdir_registry.get_delete(self.kind)
else:
delete = True
if delete:
self.cleanup()
def _create(self, kind):
# type: (str) -> str
"""Create a temporary directory and store its path in self.path
"""
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
path = os.path.realpath(
tempfile.mkdtemp(prefix="pip-{}-".format(kind))
)
logger.debug("Created temporary directory: %s", path)
return path
def cleanup(self):
# type: () -> None
"""Remove the temporary directory created and reset state
"""
self._deleted = True
if not os.path.exists(self._path):
return
# Make sure to pass unicode on Python 2 to make the contents also
# use unicode, ensuring non-ASCII names and can be represented.
# This is only done on Windows because POSIX platforms use bytes
# natively for paths, and the bytes-text conversion omission avoids
# errors caused by the environment configuring encodings incorrectly.
if WINDOWS:
rmtree(ensure_text(self._path))
else:
rmtree(self._path)
class AdjacentTempDirectory(TempDirectory):
"""Helper class that creates a temporary directory adjacent to a real one.
Attributes:
original
The original directory to create a temp directory for.
path
After calling create() or entering, contains the full
path to the temporary directory.
delete
Whether the directory should be deleted when exiting
(when used as a contextmanager)
"""
# The characters that may be used to name the temp directory
# We always prepend a ~ and then rotate through these until
# a usable name is found.
# pkg_resources raises a different error for .dist-info folder
# with leading '-' and invalid metadata
LEADING_CHARS = "-~.=%0123456789"
def __init__(self, original, delete=None):
# type: (str, Optional[bool]) -> None
self.original = original.rstrip('/\\')
super(AdjacentTempDirectory, self).__init__(delete=delete)
@classmethod
def _generate_names(cls, name):
# type: (str) -> Iterator[str]
"""Generates a series of temporary names.
The algorithm replaces the leading characters in the name
with ones that are valid filesystem characters, but are not
valid package names (for both Python and pip definitions of
package).
"""
for i in range(1, len(name)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i - 1):
new_name = '~' + ''.join(candidate) + name[i:]
if new_name != name:
yield new_name
# If we make it this far, we will have to make a longer name
for i in range(len(cls.LEADING_CHARS)):
for candidate in itertools.combinations_with_replacement(
cls.LEADING_CHARS, i):
new_name = '~' + ''.join(candidate) + name
if new_name != name:
yield new_name
def _create(self, kind):
# type: (str) -> str
root, name = os.path.split(self.original)
for candidate in self._generate_names(name):
path = os.path.join(root, candidate)
try:
os.mkdir(path)
except OSError as ex:
# Continue if the name exists already
if ex.errno != errno.EEXIST:
raise
else:
path = os.path.realpath(path)
break
else:
# Final fallback on the default behavior.
path = os.path.realpath(
tempfile.mkdtemp(prefix="pip-{}-".format(kind))
)
logger.debug("Created temporary directory: %s", path)
return path
|
Purchase your favorite flavored sausage, grill until done, drizzle with balsamic vinegar and serve with grilled onions or peppers, and one or two mustards including sweet hot mustard and whole grain mustard.
Shuck white corn and cut the kernels from the cob. Chop tomatoes and toss with corn. Season with salt, and pepper, and add a dash of cumin. Start in small increments as cumin can overwhelm. Estimate 1 teaspoon of cumin for 4 ears of corn. You can add diced red onion or chopped roasted red peppers to this dish with good effect. Finish with a drizzle of olive oil and balsamic vinegar.
You can grill any vegetable from asparagus to zucchini, toss the cut or prepared vegetable with olive oil, balsamic vinegar, salt, pepper and maybe chopped garlic or oregano and throw on the grill.
There is no recipe. Combine your favorite seasonal fruits, washed, cut, and ready to eat. Strawberries, blueberries, raspberries, cantaloupe, apricots, and yellow nectarines, toss with a squeeze of lime and a tablespoon each of triple sec and sugar.
Purchase Brandywine or other large round heirloom tomatoes. Slice off the top and spoon out the center. Fill with purchase tabouli or make your own. You could also fill the tomatoes with tuna salad or anything else that strikes your fancy.
Blend peeled, seedless watermelon with a bit of water and sugar to taste. Try combining 6 cups of fruit with 3 cups of water and ½ a cup of sugar.
|
#!/usr/bin/env python3
# pylint: disable=too-many-arguments
import sys
import os
import argparse
import yaml
import requests
PACKAGE_LICENSES = ['GPL-2.0']
PACKAGE_VCS_URL = 'https://github.com/performancecopilot/pcp'
class TimeoutHTTPAdapter(requests.adapters.HTTPAdapter):
"""
https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/
"""
def __init__(self, *args, **kwargs):
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
else:
raise Exception('Please specify a timeout.')
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
class BintrayApi:
def __init__(self, subject: str, user: str, apikey: str, gpg_passphrase: str, endpoint='https://api.bintray.com',
timeout=20*60):
self.subject = subject
self.user = user
self.apikey = apikey
self.gpg_passphrase = gpg_passphrase
self.endpoint = endpoint
self.session = requests.Session()
retries = requests.packages.urllib3.util.retry.Retry(
total=3, backoff_factor=10, status_forcelist=[429, 500, 502, 503, 504])
self.session.mount(self.endpoint, TimeoutHTTPAdapter(timeout=timeout, max_retries=retries))
def setup_repository(self, repository, repository_type, repository_description):
r = self.session.get(
f"{self.endpoint}/repos/{self.subject}/{repository}",
auth=(self.user, self.apikey),
)
if r.status_code == 404:
print(f"Creating repository bintray.com/{self.subject}/{repository}")
r = self.session.post(
f"{self.endpoint}/repos/{self.subject}/{repository}",
auth=(self.user, self.apikey),
json={
'type': repository_type,
'desc': repository_description,
'gpg_use_owner_key': True,
},
)
print(r.text)
r.raise_for_status()
print()
def setup_package(self, repository, package):
r = self.session.get(
f"{self.endpoint}/packages/{self.subject}/{repository}/{package}",
auth=(self.user, self.apikey),
)
if r.status_code == 404:
print(f"Creating package bintray.com/{self.subject}/{repository}/{package}")
r = self.session.post(
f"{self.endpoint}/packages/{self.subject}/{repository}",
auth=(self.user, self.apikey),
json={
'name': package,
'licenses': PACKAGE_LICENSES,
'vcs_url': PACKAGE_VCS_URL
},
)
print(r.text)
r.raise_for_status()
print()
def upload(self, repository, package, version, params, path):
file_name = os.path.basename(path)
params = ';'.join([f"{k}={v}" for k, v in params.items()])
print(f"Uploading {file_name} to bintray.com/{self.subject}/{repository}/{package}/{version}")
with open(path, 'rb') as f:
r = self.session.put(
f"{self.endpoint}/content/{self.subject}/{repository}/{package}/{version}/{file_name};{params}",
auth=(self.user, self.apikey),
headers={'X-GPG-PASSPHRASE': self.gpg_passphrase},
data=f,
)
print(r.text)
if r.status_code not in [200, 409]:
# ignore HTTP 409: An artifact with the path ... already exists [under another version]
r.raise_for_status()
print()
def sign_version(self, repository, package, version):
print(f"Signing version bintray.com/{self.subject}/{repository}/{package}/{version}")
r = self.session.post(
f"{self.endpoint}/gpg/{self.subject}/{repository}/{package}/versions/{version}",
auth=(self.user, self.apikey),
headers={'X-GPG-PASSPHRASE': self.gpg_passphrase},
)
print(r.text)
r.raise_for_status()
print()
def sign_metadata(self, repository, package, version):
print(f"Signing metadata of bintray.com/{self.subject}/{repository}")
r = self.session.post(
f"{self.endpoint}/calc_metadata/{self.subject}/{repository}",
auth=(self.user, self.apikey),
headers={'X-GPG-PASSPHRASE': self.gpg_passphrase},
)
print(r.text)
r.raise_for_status()
print()
def publish(self, repository, package, version):
print(f"Publish version bintray.com/{self.subject}/{repository}/{package}/{version}")
r = self.session.post(
f"{self.endpoint}/content/{self.subject}/{repository}/{package}/{version}/publish",
auth=(self.user, self.apikey),
headers={'X-GPG-PASSPHRASE': self.gpg_passphrase},
)
print(r.text)
r.raise_for_status()
print()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--subject', default=os.environ.get('BINTRAY_SUBJECT', 'pcp'))
parser.add_argument('--package', default=os.environ.get('BINTRAY_PACKAGE', 'pcp'))
parser.add_argument('--user', default=os.environ.get('BINTRAY_USER'))
parser.add_argument('--apikey', default=os.environ.get('BINTRAY_APIKEY'))
parser.add_argument('--gpg_passphrase', default=os.environ.get('BINTRAY_GPG_PASSPHRASE'))
parser.add_argument('--version', required=True)
parser.add_argument('--source')
parser.add_argument('artifact', nargs='*')
args = parser.parse_args()
if not args.user or not args.apikey or not args.gpg_passphrase:
parser.print_help()
sys.exit(1)
bintray = BintrayApi(args.subject, args.user, args.apikey, args.gpg_passphrase)
repositories_to_publish = []
if args.source:
bintray.upload('source', args.package, args.version, {}, args.source)
repositories_to_publish.append('source')
for artifact_dir in args.artifact:
# ex. build-fedora31-container
artifact, platform_name, _runner = os.path.basename(artifact_dir).split('-')
if artifact != 'build':
continue
platform_def_path = os.path.join(os.path.dirname(__file__), f"platforms/{platform_name}.yml")
with open(platform_def_path) as f:
platform = yaml.safe_load(f)
if 'bintray' not in platform:
print(f"Skipping {platform_name}: bintray is not configured in {platform_name}.yml")
continue
bintray_params = platform['bintray'].get('params', {})
repository_params = platform['bintray']['repository']
repository = repository_params['name']
bintray.setup_repository(repository, repository_params['type'], repository_params['description'])
bintray.setup_package(repository, args.package)
for artifact_filename in os.listdir(artifact_dir):
artifact_filepath = os.path.join(artifact_dir, artifact_filename)
bintray.upload(repository, args.package, args.version, bintray_params, artifact_filepath)
bintray.sign_version(repository, args.package, args.version)
bintray.sign_metadata(repository, args.package, args.version)
repositories_to_publish.append(repository)
# publish new version for all distributions at the same time
for repository in repositories_to_publish:
bintray.publish(repository, args.package, args.version)
if __name__ == '__main__':
main()
|
Over one year into our on-going expansion, we’re glad to report a very early success for the newly established Teesside factory (based in Hartlepool). With a large number of people having a negative outlook on the potential of the UK’s manufacturing industry – so far we can say we’ve ‘bucked the trend’.
As the business has grown organically at a constant and stable rate since 2006, Group MD Dave Crone recognised the need to expand the business into new areas. After seeking RGF (regional growth fund) funding [which was agreed in February 2012] the purpose build facility in Hartlepool was erected and functional by the end of May that year. “We base the business around the people and we’ve previously had staff travelling from Teesside to Blyth. In addition to the Enterprise Zone, it’s perfectly situated for our customers, staff & suppliers.” After reaching full capacity in Blyth, with the help of RGF funding – Omega can report a successful first year trading from Hartlepool as more projects are operated on a full turn-key solution.
When the RGF funding was initially applied for, Omega intended to create 16 jobs, and additional upgrades to the number of Rapid Tooling & Injection Moulding machines. To date Omega have employed an additional 10 members of staff – of which most are based full time at the Hartlepool factory. In total Omega have employed; four toolmakers, two business development managers, two mould setters, one operator & a marketing officer. Two toolmakers now working for Omega at the Teesside factory [who had a 14 year long stint at another local company] are employee’s Stephen Shipley and Martyn Musgrave – two members of staff who’ve played a vital role in the first year’s success at the Hartlepool establishment.
Both Martyn and Stephen joined the company with the input of the RGF funding along with: Dean Conley, John Berry, Steve Shanley, Daniel Gray, Justin Fairbairn, Jonathan Galloway, Michael Winship and Chris Sipos – all who play a pivotal role in the delivery of growing service capabilities at Omega Plastics. Further investment upon the Hartlepool opening surrounds the machinery available for Tooling, Moulding and Assembly capabilities – from CNC Milling machines to 330 tonne Negri Boss Injection Moulding machines.
In addition to growing number of staff at Omega Plastics, the service capabilities continue to increase with the introduction of certified ESD (electrostatic discharge) assembly areas in early 2013. “With the ESD areas being located at Hartlepool, we can add value to the end-to-end service requirements some of our clients need. Being able to mould medical products with a Class 8 Cleanroom is great, but the additional capabilities of moulding and assembling [food contact] products which need special equipment to prevent electro static discharge – is invaluable.” adds Quality Manager Paul Anderson.
With the continuous investment in the latest machinery, on-going growth of an experienced and highly-skilled workforce we’re putting more products into the market place than we targeted this year. Concluding this year’s success, we are focusing on maintaining and surpassing capacity at the Teesside factory with the help of the RGF funding. “With the input from RGF, we’re still aiming to reach our target by creating another 7 positions within the work force in every department between now and 2015” reports Office Manager Michael Hanley.
If you have specific product development requirements or simply need help on taking your product to market, check out our Hartlepool factory by contacting us at enquiries@omega-plastics.co.uk to book your appointment. Our experienced team of engineers can assist you no matter how big or small the project.
|
# This file is part of Firemix.
#
# Copyright 2013-2020 Jonathan Evans <jon@craftyjon.com>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firemix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
import colorsys
import random
import math
import numpy as np
import ast
from lib.pattern import Pattern
from lib.parameters import FloatParameter, IntParameter, StringParameter
from lib.color_fade import ColorFade
class StripeGradient(Pattern):
_fader = None
def setup(self):
self.add_parameter(FloatParameter('audio-brightness', 1.0))
self.add_parameter(FloatParameter('audio-stripe-width', 100.0))
self.add_parameter(FloatParameter('audio-speed', 0.0))
self.add_parameter(FloatParameter('speed', 0.01))
self.add_parameter(FloatParameter('angle-speed', 0.1))
self.add_parameter(FloatParameter('stripe-width', 20))
self.add_parameter(FloatParameter('center-orbit-distance', 200))
self.add_parameter(FloatParameter('center-orbit-speed', 0.1))
self.add_parameter(FloatParameter('hue-step', 0.1))
self.add_parameter(IntParameter('posterization', 8))
self.add_parameter(StringParameter('color-gradient', "[(0,0,1), (0,0,1), (0,1,1), (0,1,1), (0,0,1)]"))
self.add_parameter(FloatParameter('stripe-x-center', 0.5))
self.add_parameter(FloatParameter('stripe-y-center', 0.5))
self.hue_inner = random.random() + 100
self._center_rotation = random.random()
self.stripe_angle = random.random()
self.locations = self.scene().get_all_pixel_locations()
def parameter_changed(self, parameter):
fade_colors = ast.literal_eval(self.parameter('color-gradient').get())
self._fader = ColorFade(fade_colors, self.parameter('posterization').get())
def reset(self):
pass
def tick(self, dt):
super(StripeGradient, self).tick(dt)
dt *= 1.0 + self.parameter('audio-speed').get() * self._app.mixer.audio.getLowFrequency()
self.hue_inner += dt * self.parameter('speed').get()
self._center_rotation += dt * self.parameter('center-orbit-speed').get()
self.stripe_angle += dt * self.parameter('angle-speed').get()
def render(self, out):
if self._app.mixer.is_onset():
self.hue_inner = self.hue_inner + self.parameter('hue-step').get()
stripe_width = self.parameter('stripe-width').get() + self.parameter('audio-stripe-width').get() * self._app.mixer.audio.smoothEnergy
cx, cy = self.scene().center_point()
cx += math.cos(self._center_rotation) * self.parameter('center-orbit-distance').get()
cy += math.sin(self._center_rotation) * self.parameter('center-orbit-distance').get()
sx = self.parameter('stripe-x-center').get()
sy = self.parameter('stripe-y-center').get()
posterization = self.parameter('posterization').get()
x, y = self.locations.T
dx = x - cx
dy = y - cy
x = dx * math.cos(self.stripe_angle) - dy * math.sin(self.stripe_angle)
y = dx * math.sin(self.stripe_angle) + dy * math.cos(self.stripe_angle)
x = (x / stripe_width) % 1.0
y = (y / stripe_width) % 1.0
x = np.abs(x - sx)
y = np.abs(y - sy)
hues = np.int_(np.mod(x+y, 1.0) * posterization)
np.copyto(out, self._fader.color_cache[hues])
out['light'] += self._app.mixer.audio.getEnergy() * self.parameter('audio-brightness').get()
out['hue'] += self.hue_inner
|
ChiIL Mama Interviews Mr. Singer & The Sharp Cookies.
ChiIL out with ChiIL Mama and one of our local kindie favs, Mr. Singer & The Sharp Cookies. Click here for loads of ChiIL Mama's past Mr. Singer features like live show clips, video interviews and more!
Click the Rafflecopter Link below to enter through midnight 5/16.
Come for our SPECTACULAR 2012-2013 season finale with Mr. Singer & The Sharp Cookies. We will miss you this summer!
Our 2013-2014 line up of wonderful children's musicians will begin again in October.
Buy tickets here. Beat Kitchen is located at 2100 W. Belmont.
To keep up to date with Concerts for Kids, become our Facebook friend at www.facebook.com/kidsshowschicago.
Beat Kitchen's awesome rockin' weekly series will resume in the fall with a big kick off party. ChiIL Mama is stoked to be back as press sponsor with free weekly ticket giveaways, and loads of original kindie band content like live show footage, stills, and even band video interviews.
Check back with ChiIL Mama like we vote in Chi, IL... early and often all summer long. We'll tell you where the fam friendly fun is and continue to rock the great giveaways. Check the right side bar to follow us on Twitter & Facebook for *flash giveaways and/or follow us with e-mail updates. We never spam or share your info.
|
#! /usr/bin/python
'''
Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
For example, given n = 3, a solution set is:
"((()))", "(()())", "(())()", "()(())", "()()()"
'''
class Solution:
def validParenthesis(self, string):
check = 0
for char in string:
if char == '(':
check += 1
continue
check -= 1
if check < 0:
return False
return check == 0
# @param {integer} n
# @return {string[]}
def generateParenthesis(self, n):
if n < 1:
return set()
string_dict = {'(': 1}
for index in range(1, 2 * n):
current_dict = dict()
for string in string_dict.keys():
check_num = string_dict[string]
current_dict[string+'('] = check_num + 1
if check_num > 0:
current_dict[string+')'] = check_num - 1
string_dict = current_dict
return filter(lambda x: string_dict[x] == 0, string_dict.keys())
if __name__ == '__main__':
solution = Solution()
print solution.generateParenthesis(4)
|
Family Takaful is an umbrella comprising of Takaful only products that provides secured and lucrative Savings and Investment options; all under Shariah Compliant operations.
Family Takaful is a substitute for conventional life insurance which follows Islamic injunctions and abhors any kind of speculation and outlaws uncertainty (Gharar) and usury (i.e. interest, or Riba). Takaful is structured to avoid these prohibited elements, introducing elements of mutual help (Tawaun) and donation (Tabaru).
With Adamjeelife Family Takaful, you can protect not only yourself but also your family with the complete satisfaction as all your investment will grow in a Shariah compliant way.
Our Takaful certificates provides adequate coverage as well as the high value to savings opportunities; including coverage of major life events such as Education, Wedding, retirement & Savings.
For the individual requirements of our Takaful client base, our customer has the opportunity to invest in two completely Shariah compliant (Taameen and Maza’af Fund) funds with different risk exposures. They have proved to be a great venture for our customers.
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
# Some Illumina reads from ancient DNA samples were found to contain an inverted repeat
# with a different sequence inbetween
# in other words, the first x bases of a read are the reverse complement of the last x bases
# This script is meant to clip the 3' part of an inverted repeat when present
# A new fastq file is generated mentioning in the sequences ID which sequence was clipped, if any
# Two metrics files on the repeats found (and clipped) are produced as well
# When an entire sequence is its own reverse complement, this does not get clipped
# but a mention is made sequence identifier and
# these are also reported in the metrics file
#
# Written by Lex Nederbragt, with input from Bastiaan Star
# Version 1.0 release candidate 1, May 2013
#
# requires biopython, os and argparse modules
# on the UiO supercomputer "Abel", needs 'module load python2'
#
# run as 'python script_name.py -h' for instructions
# <codecell>
from Bio import Seq, SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import os
import argparse
# <codecell>
# help text and argument parser
desc = '\n'.join(["Strips off the 3' copy of an terminal inverted repeat, when present.",
"Input: one fastq file.",
"Output:",
"1) a new fastq file with cleaned sequences: 'infile.fastq' gives 'infile.clean.fastq'",
"2) a file called 'infile.inv_reps.txt' with the stripped sequences and their counts",
"3) a file called 'infile.inv_rep_lengths.txt' with the length distribution of the stripped sequences.",
"An optional argument -s/--shortest_length can be used to set the minumum length of repeat to clip (default: 4 bp)"
])
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-i','--input', help='Input file name',required=True)
parser.add_argument('-s', '--shortest_length', help='Shortest repeat length to clip', type=int, default=4, required = False)
# <codecell>
def get_outfnames(infile):
"""Adds '.clean' to input filename (before the extension).
Example: 'infile.fastq' becomes 'infile.clean.fastq'"""
out_fnames = []
in_fname = os.path.basename(infile)
[in_fname, in_ext] = os.path.splitext(in_fname)
# fastq outfile
out_fnames.append(in_fname + '.clean' + in_ext)
# inv_rep sequences + counts
out_fnames.append(in_fname + '.inv_reps.txt')
# inv_rep lengths + counts
out_fnames.append(in_fname + '.inv_rep_lengths.txt')
return out_fnames
# <codecell>
def find_inv_repeat(seq, seq_rc):
"""Check for inverted repeat:
whether first x bases are the reverse complement of the last x bases
Returns starting position of the inverted repeat (or 0)"""
inv_rep_length = 0
# need to test whether seq is a reverse complement of itself
if seq == seq_rc:
inv_rep_length = len(seq)
else:
# check if first x bases are a reverse complement of the last x bases
for i in range(1, len(seq)):
if seq_rc[-i:] == seq[-i:]:
inv_rep_length = i
else:
break
return inv_rep_length
# <codecell>
def extract_inv_repeat(seq, shortest_length_to_clip):
"""After finding position of inverted repeat - if any -
returns Bio.SeqRecord with the inverted repeated part removed,
and the sequence of the removed part"""
assert shortest_length_to_clip > 0, "Shortest length to remove needs to be larger than zero, not %s" % shortest_length_to_clip
# expects a Bio.SeqRecord.SeqRecord
assert type(seq) == SeqRecord, "Not a sequence record: '%s'" % str(seq)
# get sequence and reverse complement as text format
seq_txt = str(seq.seq)
rc_seq_txt = str(seq.reverse_complement().seq)
# locate the inverted repeat - if any
inv_rep_length = find_inv_repeat(seq_txt, rc_seq_txt)
# process results
if inv_rep_length == len(seq_txt):
# sequence is its own reverse complement
new_seq = seq
inv_rep = seq_txt
new_seq.description += ' self_reverse_complement'
elif inv_rep_length >= shortest_length_to_clip:
# hit
new_seq = seq[:-inv_rep_length]
inv_rep = str(seq[-inv_rep_length:].seq)
new_seq.description += ' cleaned_off_' + inv_rep
else:
# either no hit, or hit shorter than minimum to report
new_seq = seq
inv_rep = ''
return [new_seq, inv_rep, inv_rep_length]
# <codecell>
def test(seq, shortest_length_to_clip):
"""Performs the 'unit tests'"""
result = extract_inv_repeat(SeqRecord(Seq(seq, IUPAC.unambiguous_dna)), shortest_length_to_clip)
return [str(result[0].seq), result[1], result[2]]
# <codecell>
# set of 'unit tests'
# first/last 10 RC of eachother
assert test('AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGAAGCTACGACT', 1) == ['AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGA', 'AGCTACGACT', 10]
# one base
assert test('AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAT', 1) == ['AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTA', 'T', 1]
assert test('AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAT', 4) == ['AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAT', '', 1]
# no inv_rep
assert test('AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAA', 1) == ['AGTCGTAGCTGATGCTTAGGGGCTTACTAGGCTTGATGAGGATTAA', '', 0]
# entire sequence it's own reverse complement
assert test('ACACAGGCCTGTGT', 1) == ['ACACAGGCCTGTGT', 'ACACAGGCCTGTGT', 14]
# empty sequence
assert test('', 4) == ['', '', 0]
# <codecell>
def process(infile, shortest_length_to_clip):
""" Does the actual work:
Goes through the input file and streams the content through the inverted_repeat locator
Collects the new sequences and repeats found and reports them """
# test for existing inut file
assert os.path.exists(infile), "Input file '%s' appears not to exist." %infile
[out_fname, out_rname, out_lname] = get_outfnames(infile)
inv_reps = {}
lengths = {}
total_trimmed = 0
total_skipped = 0
processed = 0
max_rec_to_process = 1e30
print "Processing sequences..."
with open(out_fname, 'w') as out_fh:
for rec in SeqIO.parse(infile, "fastq"):
processed += 1
if len(rec) < 1:
# skip zero-length sequences
total_skipped += 1
continue
new_rec, inv_rep, inv_rep_length = extract_inv_repeat(rec, shortest_length_to_clip)
out_fh.write(new_rec.format("fastq"))
if inv_rep_length >= shortest_length_to_clip:
inv_reps[inv_rep] = inv_reps.get(inv_rep, 0) +1
total_trimmed += 1
lengths[inv_rep_length] = lengths.get(inv_rep_length, 0) +1
if processed == max_rec_to_process:
break
print "Writing summary files..."
with open(out_rname, "w") as p_out_fh:
p_out_fh.write("inverted_repeat\tcount\n")
for p in inv_reps.keys():
p_out_fh.write("%s\t%s\n" %(p, inv_reps[p]))
with open(out_lname, "w") as l_out_fh:
l_out_fh.write("repeat_length\tcount\n")
for l in sorted(lengths.iterkeys()):
l_out_fh.write("%s\t%s\n" %(l, lengths[l]))
print "\nProcessed %i records:\n- skipped %i (because of zero-length)\n- found %i inverted repeat(s)\n" % (processed, total_skipped, total_trimmed)
# <codecell>
if __name__ == "__main__":
args = parser.parse_args()
print ("Input file: %s" % args.input )
print ("Shortest length to clip: %s" % args.shortest_length)
process(args.input, args.shortest_length)
|
Ahead of her first 21km race at the 2016 Soweto Marathon this weekend, M-Net and Vuzu TV presenter, Lalla Hirayama chats to GLAMOUR about running and the red carpet.
How do you find the time to work out with such a busy schedule?
Fitness and health are very important to me. I rely a lot on my body to perform, especially with all the travel and a fast-paced schedule, so I really have to feed it with the right fuel and energy. When I’m home, I keep in shape with gym workouts, Bikram yoga and running. The beauty of running is that you can do it anywhere, so I always travel with my running kit.
What inspired you to sign up for your first 21km race?
It was thanks to coach Peteni of Nike+ Run Club (NRC) Joburg and all the runners who train with NRC. I’ve always loved to run, and after going through some recent health issues and weight gain, I decided the 21km run would be the perfect way to reach my new goals and get my body back into shape.
How have you been prepping for the race?
My running schedule is set out with specific workout days, run days and recovery days. I’ve added a few road runs, weightlifting, treadmill sessions and yoga, which helps elongate my muscles and assists with recovery. In addition to my daily supplements, I’ve incorporated protein shakes, upped my water intake and maintained healthy eating habits.
What are you most looking forward to about running ‘The People’s Race’?
This is my first race ever, so it’s all going to be new for me. I hear the energy is electric and you get swept up in the awesome vibe right from the start to the finish line. I’m really looking forward to experiencing it all. It’s ‘The People’s Race’ and watching human beings pushing themselves out of their comfort zones is truly inspiring.
What is your favourite workout on the NTC App?
The NTC App is phenomenal, and I love changing it up, but my favourite is definitely ‘strength’. I crave a good challenge and as I get stronger, I need to push myself further. The NTC App helps me grow and allows me to chase my best.
Lalla is taking over our Instagram! Follow us to keep up with all the action at the Soweto Marathon.
Looking for more fitness tips from the stars? Find out how Shashi Naidoo stays in shape and how to train like Vanessa Ntlapo.
|
# coding: utf-8
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
DUAS_ENABLE_DB_WRITE = getattr(settings, 'DUAS_ENABLE_DB_WRITE', False)
DUAS_DB_ROUTE_PREFIX = getattr(settings, 'DUAS_DB_ROUTE_PREFIX',
'unity_asset_server')
class UnityAssetServerRouter(object):
"""
Router for unity asset server data base
"""
def db_for_read(self, model, **hints):
"""
Attempts to read auth models go to duashttp.
"""
if model._meta.app_label == 'duashttp':
return DUAS_DB_ROUTE_PREFIX
return None
def db_for_write(self, model, **hints):
"""
Attempts to write auth models go to duashttp.
"""
if model._meta.app_label == 'duashttp':
if not DUAS_ENABLE_DB_WRITE:
raise ImproperlyConfigured(
"Set `DUAS_ENABLE_DB_WRITE` to True in your settings to enable "
"write operations on unity asset server database"
)
return DUAS_DB_ROUTE_PREFIX
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth app is involved.
"""
if obj1._meta.app_label == 'duashttp' or \
obj2._meta.app_label == 'duashttp':
return True
return None
def allow_migrate(self, db, model):
"""
Make sure the auth app only appears in the 'duashttp'
database.
"""
if db == DUAS_DB_ROUTE_PREFIX:
return model._meta.app_label == 'duashttp'
elif model._meta.app_label == 'duashttp':
return False
return None
|
Put your Facebook performance into context and find out who's strategy is working best. Just add your page and get a free social media report that compares you to Rhodia. Download the sample report or learn more about our Facebook benchmarking tool.
|
import os
import pytest
from hgtools import managers
from hgtools.managers import cmd
from hgtools.managers import subprocess
def test_subprocess_manager_invalid_when_exe_missing():
"""
If the hg executable dosen't exist, the manager should report
False for .is_valid().
"""
non_existent_exe = '/non_existent_executable'
assert not os.path.exists(non_existent_exe)
mgr = subprocess.GitManager()
mgr.exe = non_existent_exe
assert not mgr.is_valid()
@pytest.mark.usefixtures("git_repo")
class TestTags:
def setup_method(self, method):
self.mgr = managers.GitManager('.')
def teardown_method(self, method):
del self.mgr
def test_single_tag(self):
assert self.mgr.get_tags() == set([])
self.mgr._invoke('tag', '-am', "Tagging 1.0", '1.0')
assert self.mgr.get_tags() == set(['1.0'])
self.mgr._invoke('checkout', '1.0')
assert self.mgr.get_tags() == set(['1.0'])
class TestParseVersion:
def test_simple(self):
assert cmd.Git._parse_version('git version 1.9.3') == '1.9.3'
def test_trailing_mess(self):
val = cmd.Git._parse_version('git version 1.9.3 (Mac OS X)')
assert val == '1.9.3'
|
Thank you for visiting at this website. Listed below is a fantastic graphic for Tirada De Cartas Gratis Amor Euroresidentes. We have been searching for this image through on-line and it came from trustworthy resource. If youre searching for any new fresh plan for your own home then the Tirada De Cartas Gratis Amor Euroresidentes image needs to be on top of resource or you might use it for an alternative concept.
This picture has been published by admin tagged in category field. And we also trust it can be the most well liked vote in google vote or event in facebook share. Hopefully you like it as we do. If possible share this Tirada De Cartas Gratis Amor Euroresidentes image to your mates, family through google plus, facebook, twitter, instagram or any other social media site.
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.enums",
marshal="google.ads.googleads.v7",
manifest={"BatchJobStatusEnum",},
)
class BatchJobStatusEnum(proto.Message):
r"""Container for enum describing possible batch job statuses. """
class BatchJobStatus(proto.Enum):
r"""The batch job statuses."""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
RUNNING = 3
DONE = 4
__all__ = tuple(sorted(__protobuf__.manifest))
|
Blueprint Gaming was founded back in 2001, but it wasn’t until just a few years ago that they really rose to prominence. The casino software provider spent many of its early years struggling to make a name for itself before the famous Gauselmann Group snapped up a stake in the company in 2008. Since then, Blueprint Gaming has focused mainly on the online casino games development sector, where they have become a household name.
To ensure that their games receive as wide coverage as possible, Blueprint Gaming have worked together with Playtech’s GTS platform, 888 and others to offer their games on their open gaming platforms. This means that many major casinos are capable of offering up Blueprint slots.
Blueprint Gaming’s slots can be played on mobile phones and tablets, or players can choose to try their hand at their games via instant play casinos. Blueprint slots are non-downloadable. Players will also note that their games frequently appear at online casinos which are licensed out of the United Kingdom, and Alderney.
The casino software provider’s games feature prominently at UK casino sites. Many of the leading sportsbook/casino domains in the country regularly offer their titles. Lately, Blueprint Gaming has also started offering progressive jackpot prizes, allowing them to compete with some of their larger, more notable rivals.
Blueprint Gaming slots have no clear stereotypes. Some of their most successful slots feature just a handful of paylines (around 20-25) on the reels, while others can be played on the Megaways payline platform. The latter sees players randomly play with anything up to 70,000+ paylines on each spin on the reels. Special features also vary wildly from game to game.
Any player eager to get to grips with leading Blueprint Gaming slot machine games will find plenty of top titles available. The Goonies, Ted and Mars Attacks represent a few of the major film-themed slots developed by Blueprint, while other significant slots include Worms: Reloaded, Wish Upon a Jackpot, Mighty Tusk, and Irish Riches.
Blueprint Gaming can offer a vast array of slots spanning many different themes. Their games are commonly found at leading UK casinos, and the gaming experience players can have concerning paylines, features, prizes and wagers is varied, suiting players of all tastes.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.