text
stringlengths 29
850k
|
|---|
# -*- coding: utf-8 -*-
# -*- Channel PelisVips -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from bs4 import BeautifulSoup
from channels import autoplay, filtertools
from core import httptools, scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
#Yoast SEO v11.6
host = "https://www.pelisvips.com/"
unify = config.get_setting('unify')
lquality = {'hd1080p': 'FHD', 'hd720p': 'HD', 'hdreal720': 'HD',
'br screener': 'BR-S', 'ts screener': 'TS'}
list_quality = list(lquality.values())
list_servers = ['directo', 'fembed', 'rapidvideo', 'mega', 'vidlox', 'streamango', 'openload']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST',
'Subtitulado': 'VOSE', 'Subtitulada': 'VOSE'}
list_language = list(IDIOMAS.values())
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, title="Estrenos", action="list_all",
url= host+'genero/estrenos/', viewmode="movie_with_plot",
thumbnail=get_thumb("premieres", auto=True)))
itemlist.append(Item(channel=item.channel, title="Novedades", action="list_all",
url= host, viewmode="movie_with_plot",
thumbnail=get_thumb("newest", auto=True)))
itemlist.append(Item(channel=item.channel, title="Géneros", action="genres",
url=host, thumbnail=get_thumb("genres", auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all",
url=host+'ver-idioma/castellano/',
thumbnail=get_thumb("cast", auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="list_all",
url=host+'ver-idioma/latino/',
thumbnail=get_thumb("lat", auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all",
url=host+'ver-idioma/subtitulada/',
thumbnail=get_thumb("vose", auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search",
url= host + "?s=", thumbnail=get_thumb("search", auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def create_soup(url, soup=True, referer=None, post=None):
logger.info()
data = httptools.downloadpage(url, headers=referer, post=post).data
if soup:
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
return data
def genres(item):
logger.info()
itemlist = []
soup = create_soup(item.url)
bloque = soup.find_all('ul', class_="sbi-list")[1]
matches = bloque.find_all('a')
for elem in matches:
url = elem['href']
title = elem.text.strip()
itemlist.append(Item(channel=item.channel, action="list_all",
title=title, url=url))
return itemlist
def search(item, texto):
logger.info()
texto_post = texto.replace(" ", "+")
item.url = item.url + texto_post
try:
return list_search(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def list_search(item):
logger.info()
itemlist = []
soup = create_soup(item.url)
matches = soup.find_all('li', class_='itemlist')
for elem in matches:
url = elem.a['href']
url = urlparse.urljoin(host, url)
stitle = elem.a['title']
thumbnail = elem.img['src']
info = elem.find('p', class_='main-info-list').text.partition('Calidad:')
plot = elem.find('p', class_='text-list').text.partition('cula Completa ')[2]
title = clear_title(stitle)
year = scrapertools.find_single_match(stitle, r'\((\d{4})\)$')
quality = info[2].strip()
quality = lquality.get(quality.lower(), quality)
info_langs = info[0].split('Idioma:')[1]
list_langs = scrapertools.find_multiple_matches(info_langs, '([a-zA-Z]+)')
langs, list_langs = extrae_idiomas(list_langs)
plot = ''
if not unify:
stitle = "[B]%s[/B] [COLOR darkgrey](%s)[/COLOR]" % (
title, year)
plot = '[COLOR yellowgreen][I]Idiomas[/COLOR]: %s\n[COLOR yellowgreen]Calidad[/COLOR]: %s[/I]\n\n' % (
langs, quality)
itemlist.append(Item(channel = item.channel,
action = 'findvideos',
contentTitle = title,
infoLabels = {'year':year},
quality = quality,
thumbnail = thumbnail,
title = stitle,
language=list_langs,
url = url,
plot=plot,
plot2=plot
))
tmdb.set_infoLabels(itemlist, True)
if not unify:
for item in itemlist:
if item.infoLabels['tmdb_id'] and not 'Idiomas' in item.contentPlot:
item.plot1 = item.contentPlot
item.contentPlot = item.plot2+item.contentPlot
return itemlist
def list_all(item):
logger.info()
itemlist = []
soup = create_soup(item.url)
matches = soup.find_all('a', class_='movie-item clearfix tooltipS')
for elem in matches:
url = elem['href']
url = urlparse.urljoin(host, url)
quality = elem.find('div', class_='_format').text.strip()
thumbnail = elem.img['src']
stitle = elem.img['alt']
syear = elem.find('div', class_='label_year').text
audio = elem.find('div', class_='_audio')
title, year = clear_title(stitle, syear)
stitle = title
quality = lquality.get(quality.lower(), quality)
list_langs = audio.find_all('img')
langs, list_langs = extrae_idiomas(list_langs)
plot = ''
if not unify:
stitle = "[B]%s[/B] [COLOR darkgrey](%s)[/COLOR]" % (
title, year)
plot = '[COLOR yellowgreen][I]Idiomas[/COLOR]: %s\n[COLOR yellowgreen]Calidad[/COLOR]: %s[/I]\n\n' % (
langs, quality)
itemlist.append(Item(channel = item.channel,
action = 'findvideos',
contentTitle = title,
infoLabels = {'year':year},
quality = quality,
thumbnail = thumbnail,
title = stitle,
language=list_langs,
url = url,
plot=plot,
plot2=plot
))
tmdb.set_infoLabels(itemlist, True)
if not unify:
for item in itemlist:
if item.infoLabels['tmdb_id'] and not 'Idiomas' in item.contentPlot:
item.plot1 = item.contentPlot
item.contentPlot = item.plot2+item.contentPlot
try:
next_page = soup.find('a', class_='nextpostslink')['href']
next_page = urlparse.urljoin(host, next_page)
except:
next_page = None
if next_page:
itemlist.append(Item(channel=item.channel, action="list_all",
title='Página Siguiente >>',
text_color='aquamarine',
url=next_page.strip()))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
from lib import players_parse
# Descarga la pagina
soup = create_soup(item.url).find('div', id='movie-player')
matches = soup.find_all('li')
for elem in matches:
title = "%s"
url = elem.a['rel'][0]
url = players_parse.player_parse(url, elem.a['title'], host)
info = elem.find('span', class_='optxt').text.partition('\n')
slang = info[0].strip().replace('Español ', '')
squality = info[2].strip().replace(' ', '')
language = IDIOMAS.get(slang, slang)
quality = lquality.get(squality.lower(), squality)
if "pelisvips.com" in url:
data = create_soup(url, soup=False).partition('sources:')[2]
url = scrapertools.find_single_match(data, "file': '([^']+)")
elif "pelisup" in url:
url = url.replace('pelisup', 'fembed')
if not unify:
title += ' [COLOR palegreen][%s] [/COLOR][COLOR grey][%s][/COLOR]' % (quality, language)
if url:
itemlist.append(
item.clone(action="play", title=title, url=url,
quality= quality, language=language,
plot=item.plot1
))
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist and item.contentChannel != "videolibrary":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="gold",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle=item.contentTitle
))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'documentales':
item.url = host + "genero/documental/"
elif categoria == 'infantiles':
item.url = host + "genero/animacion/"
elif categoria == 'terror':
item.url = host + "genero/terror/"
elif categoria == 'castellano':
item.url = host + "ver-idioma/castellano/"
elif categoria == 'latino':
item.url = host + "ver-idioma/latino/"
itemlist = list_all(item)
if itemlist[-1].action == "list_all":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def clear_title(stitle, syear=None):
title = re.sub(r' / (.*)| \(.*', '', stitle)
if syear:
year = scrapertools.find_single_match(syear, r'(\d{4})') or '-'
return title, year
return title
def extrae_idiomas(list_language):
logger.info()
textoidiomas = ''
for i, elem in enumerate(list_language):
try:
idioma = elem['title']
except:
idioma = elem.strip()
c_lang = IDIOMAS.get(idioma, idioma)
textoidiomas += "%s, " % c_lang
list_language[i] = c_lang
textoidiomas = textoidiomas[:-2]
return textoidiomas, list_language
|
This page is generated from SCOTXblog data. If you prefer, you can access the Court's official orders list for May 22, 2009.
09-0293 UHS OF TIMBERLAWN, INC. v. S.B., A MINOR, BY AND THROUGH HER NEXT FRIEND A.B.
09-0395 LAURANCE KRIEGEL v. DIVERSIFIED FINANCIAL SERVICES, LLC, ET AL.
|
from collections import Counter, defaultdict
from evaluators import *
import load_data as ld
import numpy as np
class NaiveBaseClass:
def calculate_relative_occurences(self, list1):
no_examples = len(list1)
ro_dict = dict(Counter(list1))
for key in ro_dict.keys():
ro_dict[key] = ro_dict[key] / float(no_examples)
return ro_dict
def get_max_value_key(self, d1):
values = list(d1.values())
keys = list(d1.keys())
max_value_index = values.index(max(values))
max_key = keys[max_value_index]
return max_key
def initialize_nb_dict(self):
self.nb_dict = {}
for label in self.labels:
self.nb_dict[label] = defaultdict(list)
class NaiveBayes(NaiveBaseClass):
"""
Naive Bayes Classifier:
It is trained with a 2D-array X (dimensions m,n) and a 1D array Y (dimension 1,n).
X should have one column per feature (total m) and one row per training example (total n).
After training a dictionary is filled with the class probabilities per feature.
"""
def train(self, X, Y):
self.labels = np.unique(Y)
no_rows, no_cols = np.shape(X)
self.initialize_nb_dict()
self.class_probabilities = self.calculate_relative_occurences(Y)
#fill self.nb_dict with the feature values per class
for label in self.labels:
row_indices = np.where(Y == label)[0]
X_ = X[row_indices, :]
no_rows_, no_cols_ = np.shape(X_)
for jj in range(0,no_cols_):
self.nb_dict[label][jj] += list(X_[:,jj])
#transform the dict which contains lists with all feature values
#to a dict with relative feature value occurences per class
for label in self.labels:
for jj in range(0,no_cols):
self.nb_dict[label][jj] = self.calculate_relative_occurences(self.nb_dict[label][jj])
def classify_single_elem(self, X_elem):
Y_dict = {}
for label in self.labels:
class_probability = self.class_probabilities[label]
for ii in range(0,len(X_elem)):
relative_feature_values = self.nb_dict[label][ii]
if X_elem[ii] in relative_feature_values.keys():
class_probability *= relative_feature_values[X_elem[ii]]
else:
class_probability *= 0
Y_dict[label] = class_probability
return self.get_max_value_key(Y_dict)
def classify(self, X):
self.predicted_Y_values = []
no_rows, no_cols = np.shape(X)
for ii in range(0,no_rows):
X_elem = X[ii,:]
prediction = self.classify_single_elem(X_elem)
self.predicted_Y_values.append(prediction)
return self.predicted_Y_values
class NaiveBayesText(NaiveBaseClass):
""""
When the goal is classifying text, it is better to give the input X in the form of a list of lists containing words.
X = [
['this', 'is', 'a',...],
(...)
]
Y still is a 1D array / list containing the labels of each entry
"""
def initialize_nb_dict(self):
self.nb_dict = {}
for label in self.labels:
self.nb_dict[label] = []
def train(self, X, Y):
self.class_probabilities = self.calculate_relative_occurences(Y)
self.labels = np.unique(Y)
self.no_examples = len(Y)
self.initialize_nb_dict()
for ii in range(0,len(Y)):
label = Y[ii]
self.nb_dict[label] += X[ii]
#transform the list with all occurences to a dict with relative occurences
for label in self.labels:
self.nb_dict[label] = self.calculate_relative_occurences(self.nb_dict[label])
def classify_single_elem(self, X_elem):
Y_dict = {}
for label in self.labels:
class_probability = self.class_probabilities[label]
nb_dict_features = self.nb_dict[label]
for word in X_elem:
if word in nb_dict_features.keys():
relative_word_occurence = nb_dict_features[word]
class_probability *= relative_word_occurence
else:
class_probability *= 0
Y_dict[label] = class_probability
return self.get_max_value_key(Y_dict)
def classify(self, X):
self.predicted_Y_values = []
n = len(X)
for ii in range(0,n):
X_elem = X[ii]
prediction = self.classify_single_elem(X_elem)
self.predicted_Y_values.append(prediction)
return self.predicted_Y_values
####
X_train, Y_train, X_test, Y_test = ld.amazon_reviews()
print("training naive bayes")
nbc = NaiveBayesText()
nbc.train(X_train, Y_train)
print("trained")
predicted_Y = nbc.classify(X_test[:100])
y_labels = np.unique(Y_test)
for y_label in y_labels:
f1 = f1_score(predicted_Y, Y_test, y_label)
print("F1-score on the test-set for class %s is: %s" % (y_label, f1))
X_train, Y_train, X_test, Y_test = ld.adult()
print("training naive bayes")
nbc = NaiveBayes()
nbc.train(X_train, Y_train)
print("trained")
predicted_Y = nbc.classify(X_test)
y_labels = np.unique(Y_test)
for y_label in y_labels:
f1 = f1_score(predicted_Y, Y_test, y_label)
print("F1-score on the test-set for class %s is: %s" % (y_label, f1))
|
Ali first Indian actor to do a biopic in Hollywood?
Actor Ali Fazal will soon be seen in a titular role in Hollywood film called Victoria and Abdul.The Indian release of the film is on the 13th October where Ali will be seen in the role of Abdul Karim in the movie.
While on his world premiere international tour, Ali met a few directors and stars in Hollywood and is in talks for some upcoming projects there. According to sources, Ali is in talks with a Hollywood director for a biopic role and it will be the first time an Indian actor will be featured in a Hollywood biopic.
We wish the actor good luck for this huge outing of his!
|
import os
import re
import json
import random
import apsw
import time
# import flask web microframework
from flask import Flask
from flask import request
# import from the 21 Developer Library
from two1.lib.wallet import Wallet
from two1.lib.bitserv.flask import Payment
connection = apsw.Connection("apibb.db")
name_re = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9-\.]*$")
app = Flask(__name__)
wallet = Wallet()
payment = Payment(app, wallet)
def expire_ads():
cursor = connection.cursor()
cursor.execute("DELETE FROM ads WHERE expires < datetime('now')")
def expire_names():
cursor = connection.cursor()
cursor.execute("DELETE FROM names WHERE expires < datetime('now')")
@app.route('/names')
@payment.required(1)
def get_names():
cursor = connection.cursor()
rv = []
for name,created,expires in cursor.execute("SELECT name,created,expires FROM names ORDER BY name"):
obj = {
"name": name,
"created": created,
"expires": expires
}
rv.append(obj)
return json.dumps(rv)
def valid_renewal(request):
name = request.args.get('name')
hours = request.args.get('hours')
if (name_re.match(name) is None or
int(hours) < 1 or
int(hours) > (24 * 30)):
return False
return True
def get_renew_price_from_req(request):
if not valid_renewal(request):
return "invalid advertisement"
hours = int(request.args.get('hours'))
price = hours * 10 # 10 satoshis per hour
if price < 10:
price = 10
return price
@app.route('/namerenew')
@payment.required(get_renew_price_from_req)
def name_renew():
if not valid_renewal(request):
return "invalid renewal"
expire_names()
name = request.args.get('name')
hours = int(request.args.get('hours'))
cursor = connection.cursor()
expires = 0
for v in cursor.execute("SELECT expires FROM names WHERE name = ?", (name,)):
expires = v[0]
print("EXPIRES " + str(expires))
if expires == 0:
cursor.execute("INSERT INTO names VALUES(?, datetime('now'), datetime('now', '+" + str(hours) + " hours'))", (name,))
else:
cursor.execute("UPDATE names SET expires = datetime(?, '+" + str(hours) + " hours') WHERE name = ?", (expires, name))
return "OK"
def valid_advertisement(cursor, request):
name = request.args.get('name')
uri = request.args.get('uri')
pubkey = request.args.get('pubkey')
hours = request.args.get('hours')
if (name_re.match(name) is None or
len(uri) < 1 or
len(uri) > 512 or
len(pubkey) < 32 or
len(pubkey) > 512 or
int(hours) < 1 or
int(hours) > (24 * 30)):
return False
expires = None
for v in cursor.execute("SELECT strftime('%s', expires) FROM names WHERE name = ? AND expires > datetime('now')", (name,)):
expires = v
if expires is None:
return False
# curtime = int(time.time())
# curtime_deltap = curtime + (int(hours) * 60 * 60)
# if curtime_deltap > expires:
# return False
return True
def get_advertise_price_from_req(request):
cursor = connection.cursor()
if not valid_advertisement(cursor, request):
return "invalid advertisement"
hours = int(request.args.get('hours'))
price = hours * 2 # 2 satoshis per hour
if price < 2:
price = 2
return price
@app.route('/advertise')
@payment.required(get_advertise_price_from_req)
def advertise():
cursor = connection.cursor()
if not valid_advertisement(cursor, request):
return "invalid advertisement"
name = request.args.get('name')
uri = request.args.get('uri')
pubkey = request.args.get('pubkey')
hours = request.args.get('hours')
cursor.execute("INSERT INTO ads VALUES(?, ?, ?, datetime('now'), datetime('now','+" + str(hours) + " hours'))", (name, uri, pubkey))
return "OK"
@app.route('/ads')
@payment.required(1)
def get_advertisements():
name = request.args.get('name')
rv = []
cursor = connection.cursor()
for uri,pk,created,expires in cursor.execute("SELECT uri,pubkey,created,expires FROM ads WHERE name = ? AND expires > datetime('now')", (name,)):
obj = {
"uri": uri,
"pubkey": pk,
"created": created,
"expires": expires
}
rv.append(obj)
return json.dumps(rv)
@app.route('/info')
def get_info():
info_obj = {
"name": "apibb",
"version": 100,
"pricing": {
"/names" : {
"minimum" : 1
},
"/namerenew" : {
"minimum" : 10
},
"/advertise" : {
"minimum" : 2
},
"/ads" : {
"minimum" : 1
},
}
}
return json.dumps(info_obj)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=12002, debug=True)
|
This bike is a whole bunch of retro fun! Use it to cruise, commute, exercise, or just feel the (fast) wind in your hair. Pedal assist allows you to go faster and longer. Mechanical disc brakes gives you peace of mind stopping power. Shimano 8 speed rapid-fire gears allows you to find the exact comfort level of pedalling under any circumstances.
All quality alloy frame, fork, handlebars, seat post, rims, seat clamp and pedals. Comfy retro riding!
Featuring Bafang 250W pedal assist rear hub motor with 36V 11Ah lithium ion battery, gear sensor, and digital display.
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Websocket proxy that is compatible with OpenStack Nova.
Leverages websockify.py by Joel Martin
'''
import Cookie
import socket
import websockify
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class NovaWebSocketProxy(websockify.WebSocketProxy):
def __init__(self, *args, **kwargs):
websockify.WebSocketProxy.__init__(self, unix_target=None,
target_cfg=None,
ssl_target=None, *args, **kwargs)
def new_client(self):
"""
Called after a new WebSocket connection has been established.
"""
cookie = Cookie.SimpleCookie()
cookie.load(self.headers.getheader('cookie'))
token = cookie['token'].value
ctxt = context.get_admin_context()
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = rpcapi.check_token(ctxt, token=token)
if not connect_info:
LOG.audit("Invalid Token: %s", token)
raise Exception(_("Invalid Token"))
host = connect_info['host']
port = int(connect_info['port'])
# Connect to the target
self.msg("connecting to: %s:%s" % (host, port))
LOG.audit("connecting to: %s:%s" % (host, port))
tsock = self.socket(host, port, connect=True)
# Handshake as necessary
if connect_info.get('internal_access_path'):
tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
while True:
data = tsock.recv(4096, socket.MSG_PEEK)
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
LOG.audit("Invalid Connection Info %s", token)
raise Exception(_("Invalid Connection Info"))
tsock.recv(len(data))
break
if self.verbose and not self.daemon:
print(self.traffic_legend)
# Start proxying
try:
self.do_proxy(tsock)
except Exception:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
self.vmsg("%s:%s: Target closed" % (host, port))
LOG.audit("%s:%s: Target closed" % (host, port))
raise
|
Get cash for your second-hand or scrap car to Renault wreckers Kingsbury without any difficult situation. Not only any customer can recycle the Renault car in Kingsbury after obtaining money, but they also receive free pickup service in Kingsbury from the orgranization. Our company agree to buy Renault cars, trucks, 4wds, Utes, mini trucks, vans in Kingsbury – of all models. Thus, if any car owner are eyeing to remove the mini truck, truck, Ute, van in Kingsbury today, promptly take free recommendation on the car by calling our official in Kingsbury.
Every car seller can also receive our scrap metal services in Eaglemont, Greensborough and Heidelberg.
Our essential benefit is to remove your SUV, mini van, Ute, van in Kingsbury. For that objective, we dispose all Renault in Kingsbury, be it any wrecked, damaged, Third-hand, worn out, used, broken, old, foreign, accident, scrap, domestic, second-hand.
our skilled personnel also provide car disposal in Ivanhoe, Rosanna and Lower Plenty.
For unwanted Renault in Kingsbury, valuation is unconquerable – up to $5300. Nevertheless, our salvage program welcome every Renault cars, mini vans, 4wds, mini trucks, Utes in Kingsbury along with free pickup service. Remove any variety in Kingsbury location in simply six hours.
Renault wreckers Kingsbury has countless car scrapping services. Our car collection service in Kingsbury is absolutely free and genuine. It guarantee that any owner can throw away any car in Kingsbury without having any fee.
Our organization can scrap any car in Kingsbury from any underground car park, street, towing yard, backyard, parking lot, driveway, tow place, road, sidewalk, location.
Our real plan of scrapping a car in Kingsbury is to eradicate for spare parts. Be aware, after absolute dismantling your car in Kingsbury workplace, our experienced staff take accetable auto bits. Our organization exhibit them in Kingsbury at economical figures.
|
# Note: All calls to tf.name_scope or tf.summary.* support TensorBoard visualization.
import os
import tensorflow as tf
from models.RNN.utils import variable_on_cpu
def SimpleLSTM(input_tensor, seq_length):
'''
This function was initially based on open source code from Mozilla DeepSpeech:
https://github.com/mozilla/DeepSpeech/blob/master/DeepSpeech.py
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''
# SimpleLSTM
n_character = 29
b1_stddev = 0.046875
h1_stddev = 0.046875
n_layers = 2
n_hidden_units = 512
# Input shape: [batch_size, n_steps, n_input + 2*n_input*n_context]
# batch_x_shape = tf.shape(batch_x)
input_tensor_shape = tf.shape(input_tensor)
n_items = input_tensor_shape[0]
with tf.name_scope("lstm"):
# Initialize weights
# with tf.device('/cpu:0'):
W = tf.get_variable('W', shape=[n_hidden_units, n_character],
# initializer=tf.truncated_normal_initializer(stddev=h1_stddev),
initializer=tf.random_normal_initializer(stddev=h1_stddev),
)
# Initialize bias
# with tf.device('/cpu:0'):
# b = tf.get_variable('b', initializer=tf.zeros_initializer([n_character]))
b = tf.get_variable('b', shape=[n_character],
# initializer=tf.constant_initializer(value=0),
initializer=tf.random_normal_initializer(stddev=b1_stddev),
)
# Define the cell
# Can be:
# tf.contrib.rnn.BasicRNNCell
# tf.contrib.rnn.GRUCell
cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units, state_is_tuple=True)
# Stacking rnn cells
stack = tf.contrib.rnn.MultiRNNCell([cell] * n_layers, state_is_tuple=True)
# Get layer activations (second output is the final state of the layer, do not need)
outputs, _ = tf.nn.dynamic_rnn(stack, input_tensor, seq_length,
time_major=False, dtype=tf.float32)
# Reshape to apply the same weights over the timesteps
outputs = tf.reshape(outputs, [-1, n_hidden_units])
# Perform affine transformation to layer output:
# multiply by weights (linear transformation), add bias (translation)
logits = tf.add(tf.matmul(outputs, W), b)
tf.summary.histogram("weights", W)
tf.summary.histogram("biases", b)
tf.summary.histogram("activations", logits)
# Reshaping back to the original shape
logits = tf.reshape(logits, [n_items, -1, n_character])
# Put time as the major axis
logits = tf.transpose(logits, (1, 0, 2))
summary_op = tf.summary.merge_all()
return logits, summary_op
def BiRNN(batch_x, seq_length, n_input, n_context):
"""
This function was initially based on open source code from Mozilla DeepSpeech:
https://github.com/mozilla/DeepSpeech/blob/master/DeepSpeech.py
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
dropout = [0.05,0.05,0.05,0.0,0.0,0.05]
relu_clip = 20
b1_stddev = 0.046875
h1_stddev = 0.046875
b2_stddev = 0.046875
h2_stddev = 0.046875
b3_stddev = 0.046875
h3_stddev = 0.046875
b5_stddev = 0.046875
h5_stddev = 0.046875
b6_stddev = 0.046875
h6_stddev = 0.046875
n_hidden_1 = 1024
n_hidden_2 = 1024
n_hidden_5 = 1024
n_cell_dim = 1024
n_hidden_3 = 1024
n_hidden_6 = 1024
# Input shape: [batch_size, n_steps, n_input + 2*n_input*n_context]
batch_x_shape = tf.shape(batch_x)
# Reshaping `batch_x` to a tensor with shape `[n_steps*batch_size, n_input + 2*n_input*n_context]`.
# This is done to prepare the batch for input into the first layer which expects a tensor of rank `2`.
# Permute n_steps and batch_size
batch_x = tf.transpose(batch_x, [1, 0, 2])
# Reshape to prepare input for first layer
batch_x = tf.reshape(batch_x,
[-1, n_input + 2 * n_input * n_context]) # (n_steps*batch_size, n_input + 2*n_input*n_context)
# The next three blocks will pass `batch_x` through three hidden layers with
# clipped RELU activation and dropout.
# 1st layer
with tf.name_scope('fc1'):
b1 = variable_on_cpu('b1', [n_hidden_1], tf.random_normal_initializer(stddev=b1_stddev))
h1 = variable_on_cpu('h1', [n_input + 2 * n_input * n_context, n_hidden_1],
tf.random_normal_initializer(stddev=h1_stddev))
layer_1 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(batch_x, h1), b1)), relu_clip)
layer_1 = tf.nn.dropout(layer_1, (1.0 - dropout[0]))
tf.summary.histogram("weights", h1)
tf.summary.histogram("biases", b1)
tf.summary.histogram("activations", layer_1)
# 2nd layer
with tf.name_scope('fc2'):
b2 = variable_on_cpu('b2', [n_hidden_2], tf.random_normal_initializer(stddev=b2_stddev))
h2 = variable_on_cpu('h2', [n_hidden_1, n_hidden_2], tf.random_normal_initializer(stddev=h2_stddev))
layer_2 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_1, h2), b2)), relu_clip)
layer_2 = tf.nn.dropout(layer_2, (1.0 - dropout[1]))
tf.summary.histogram("weights", h2)
tf.summary.histogram("biases", b2)
tf.summary.histogram("activations", layer_2)
# 3rd layer
with tf.name_scope('fc3'):
b3 = variable_on_cpu('b3', [n_hidden_3], tf.random_normal_initializer(stddev=b3_stddev))
h3 = variable_on_cpu('h3', [n_hidden_2, n_hidden_3], tf.random_normal_initializer(stddev=h3_stddev))
layer_3 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_2, h3), b3)), relu_clip)
layer_3 = tf.nn.dropout(layer_3, (1.0 - dropout[2]))
tf.summary.histogram("weights", h3)
tf.summary.histogram("biases", b3)
tf.summary.histogram("activations", layer_3)
# Create the forward and backward LSTM units. Inputs have length `n_cell_dim`.
# LSTM forget gate bias initialized at `1.0` (default), meaning less forgetting
# at the beginning of training (remembers more previous info)
with tf.name_scope('lstm'):
# Forward direction cell:
lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True)
lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(lstm_fw_cell,
input_keep_prob=1.0 - dropout[3],
output_keep_prob=1.0 - dropout[3],
# seed=random_seed,
)
# Backward direction cell:
lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True)
lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(lstm_bw_cell,
input_keep_prob=1.0 - dropout[4],
output_keep_prob=1.0 - dropout[4],
# seed=random_seed,
)
# `layer_3` is now reshaped into `[n_steps, batch_size, 2*n_cell_dim]`,
# as the LSTM BRNN expects its input to be of shape `[max_time, batch_size, input_size]`.
layer_3 = tf.reshape(layer_3, [-1, batch_x_shape[0], n_hidden_3])
# Now we feed `layer_3` into the LSTM BRNN cell and obtain the LSTM BRNN output.
outputs, output_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell,
cell_bw=lstm_bw_cell,
inputs=layer_3,
dtype=tf.float32,
time_major=True,
sequence_length=seq_length)
tf.summary.histogram("activations", outputs)
# Reshape outputs from two tensors each of shape [n_steps, batch_size, n_cell_dim]
# to a single tensor of shape [n_steps*batch_size, 2*n_cell_dim]
outputs = tf.concat(outputs, 2)
outputs = tf.reshape(outputs, [-1, 2 * n_cell_dim])
with tf.name_scope('fc5'):
# Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
b5 = variable_on_cpu('b5', [n_hidden_5], tf.random_normal_initializer(stddev=b5_stddev))
h5 = variable_on_cpu('h5', [(2 * n_cell_dim), n_hidden_5], tf.random_normal_initializer(stddev=h5_stddev))
layer_5 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(outputs, h5), b5)), relu_clip)
layer_5 = tf.nn.dropout(layer_5, (1.0 - dropout[5]))
tf.summary.histogram("weights", h5)
tf.summary.histogram("biases", b5)
tf.summary.histogram("activations", layer_5)
with tf.name_scope('fc6'):
# Now we apply the weight matrix `h6` and bias `b6` to the output of `layer_5`
# creating `n_classes` dimensional vectors, the logits.
b6 = variable_on_cpu('b6', [n_hidden_6], tf.random_normal_initializer(stddev=b6_stddev))
h6 = variable_on_cpu('h6', [n_hidden_5, n_hidden_6], tf.random_normal_initializer(stddev=h6_stddev))
layer_6 = tf.add(tf.matmul(layer_5, h6), b6)
tf.summary.histogram("weights", h6)
tf.summary.histogram("biases", b6)
tf.summary.histogram("activations", layer_6)
# Finally we reshape layer_6 from a tensor of shape [n_steps*batch_size, n_hidden_6]
# to the slightly more useful shape [n_steps, batch_size, n_hidden_6].
# Note, that this differs from the input in that it is time-major.
layer_6 = tf.reshape(layer_6, [-1, batch_x_shape[0], n_hidden_6])
summary_op = tf.summary.merge_all()
# Output shape: [n_steps, batch_size, n_hidden_6]
return layer_6, summary_op
|
The movie’s hulking, photographic indolence—committed by Jarmusch, playing camera operator and inspired, probably, by fringe-noir like The Honeymoon Killers—trips the frame into intriguing vantage points where it can better observe the magnificently creepy cast members, each of them aura’d by the extremist black-and-white format like hanging, macabre baubles. This tension, however, between deliberate shadow and extemporaneous mise-en-scène, uncannily mirrors the manner in which Bowles’s narrator imagines herself as not only superior to her environment but the progenitor of it. (When she reaches her sister’s home, there’s a humorous ego-trip of a moment where she observes the futile interior decorating that’s taken place since her previous sojourn there, noting with superciliousness that all her sibling can manage is to rearrange the same home furnishings into symmetrical configurations; as the two talk, a kitschy Jesus lithograph hanging on the wall negotiates the space between them.) The result is a kind of off-the-cuff manifesto, one that Driver and her buddies crafted with enough naïve subtlety to be (mis-)interpreted as a furious vindication of either directorial or female agency, even as the moments slopping by us on the screen seem to cannibalize whatever authorship is trailing them. Given this, and the fact that there’s an effective moment at the climax where Bowles’s words are read icily over a black screen, it’s not difficult to understand why the writer preserved his copy, the only copy, of You Are Not I for posterity—albeit appropriately without much fanfare.
|
#!/usr/bin/env python
"""
Usage:
get_chef_attrs.py [-h|--help] --hosts <hosts-file> [--chef-cfg <chef-cfg-file>] --cache <cache-file> [--ext <extension>] --attrs <attr>... [--verbose]
Options:
-h,--help show this help text
-H <hosts-file>, --hosts <hosts-file> target hosts file - one host per line
--chef-cfg <chef-cfg-file> yaml based chef config file [default: chef.yml]
--cache <cache-file> cache for storing looked up hosts
--attrs <attr> chef attributes to search
--ext <extension> add this host extension to re-search if the search fails
-v, --verbose verbose mode
"""
import os
import sys
import yaml
import json
from docopt import docopt
import chef
def lookup_chef(opts, hosts):
chef_cfg = yaml.load(open(opts['--chef-cfg']).read())
cache = {}
with chef.ChefAPI(chef_cfg['chef']['host'], chef_cfg['chef']['pem'],
chef_cfg['chef']['user']):
for host in hosts:
attrs_map = {}
orig_host = host
if '@' in host:
_, host = host.split('@')
n = chef.Node(host)
ipaddr = n.attributes.get('ipaddress')
if ipaddr is None or ipaddr == 'None':
if opts['--ext'] is not None:
host = host + '.' + opts['--ext']
n = chef.Node(host)
ipaddr = n.attributes.get('ipaddress')
for attr in opts['--attrs']:
attrs_map[str(attr)] = str(n.attributes.get(attr))
if ipaddr:
cache[host] = attrs_map
else:
cache[host] = {}
if '--verbose' in opts and opts['--verbose']:
print "------------"
print host
print json.dumps(attrs_map, indent=4)
return cache
def get_chef_attrs(opts):
hosts = []
with open(opts['--hosts']) as f:
hosts = [x.strip() for x in f.readlines()]
unresolved_hosts = []
cache = json.loads(open(opts['--cache']).read())
for host in hosts:
_, host = host.split('@')
host_variants = []
host_variants.append(host)
host_variants.append(host.split('.')[0])
found = False
for host_variant in host_variants:
if host_variant in cache:
found = True
break
if not found:
unresolved_hosts.append(host)
if unresolved_hosts:
hosts_info = lookup_chef(opts, unresolved_hosts)
for host in hosts_info:
cache[host] = hosts_info[host]
with open(opts['--cache'], 'w') as f:
f.write(json.dumps(cache, indent=4))
return cache
def validate_input(opts):
if not os.path.exists(opts['--hosts']):
print 'ERROR: hosts file %s does not exist' % opts['--hosts']
sys.exit(1)
if not os.path.exists(opts['--chef-cfg']):
print 'ERROR: chef cfg file %s does not exist' % opts['--chef-cfg']
sys.exit(1)
if not opts['--attrs']:
print 'ERROR: Empty attrs' % opts['--attrs']
sys.exit(1)
if not os.path.exists(opts['--cache']):
with open(opts['--cache'], 'w') as f:
f.write('{}')
def load_args(args):
parsed_docopt = docopt(__doc__)
return parsed_docopt
def main(opts):
validate_input(opts)
return get_chef_attrs(opts)
if __name__ == '__main__':
opts = load_args(sys.argv[1:])
attrs = main(opts)
print json.dumps(attrs, indent=4)
|
Are you considering Childminding as a career?
If you would like a rewarding career working with children and like the idea of working from home, this could be perfect for you. Childminders look after children in their own home, whilst the children's parents are at work or studying. They could look after babies and children under five during the day, and/or older children after school and in the school holidays.
You will find all you need to know at the Scottish Childminding Association and also here at the Care Inspectorate website.
Further information can also be found at Registering and running a childminding service and at CARIS.
|
import csv
import json
import pymongo
from pymongo.objectid import ObjectId
from pymongo import Connection
def import_feeds():
print "reading"
s = set()
r = csv.reader(open("feeds.txt", "rU"))
for i in r:
if len(i)>1:
if len(i[1])>0:
s.add(i[1])
connection = Connection()
connection = Connection("localhost", 27017)
db = connection.river
collection = db.spider
print "inserting"
for i in s:
feed = { "url" : i}
collection.feeds.insert(feed)
def find_feeds():
connection = Connection("192.168.0.18", 10000)
db = connection.river
collection = db.spider
d = {}
l = ["hello", "world"]
d["data"] = l
print json.dumps(d)
db.testtable.insert(d)
def main():
#find_feeds()
connection = Connection("kain.active8.nl", 10000, slave_okay=True)
db = connection.river
for o in db.testtable.find():
print o
#
if __name__=="__main__":
main()
print "ok"
|
A memo from Nevada System of Higher Education Chancellor Jim Rogers recently defended paying 106 of the roughly 1,300 NSHE employees more than $100,000 a year. It was another instance of Nevada policy leaders focusing on inputs—state employees and salaries—rather than results.
The University of Nevada, Las Vegas (UNLV) and the University of Nevada, Reno (UNR) are considered public research universities. According to the Delta Project's latest report, "Trends in College Spending," the average public research university devotes just 45 percent of its resources to student instruction. The report also found that the majority of tuition increases have not gone toward educating students but instead toward funding other university functions. In short, students across the U.S. are being harnessed to subsidize non-instructional projects at their respective institutions.
Per-student operating budgets at UNLV and UNR are near or above the national average. We have little reason to believe that either school devotes considerably more than that toward educational purposes.
According to the report “Trends in College Spending,” NSHE’s public research universities spend approximately $14,641 per student on education—ranking 18th highest in the nation but not significantly above the national average. This suggests two things. First, the Nevada System of Higher Education is not underfunded and second, NSHE still devotes a large portion of its resources to ends other than the instruction of students.
Is this a reason why Nevada college students graduate at rates pitifully below the national average? After six years of school, only 41 percent of UNLV students and about 48 percent at UNR graduate. The situation is even worse for Nevada’s minority population. Approximately two-thirds of Hispanics and African Americans in Nevada do not graduate within six years.
Students who cannot graduate within six years are statistically unlikely to ever graduate from college. All that many of these students can expect are debt and disappointment.
If Nevada’s flagship universities cannot even graduate 50 percent of their students in six years, why should the schools be routing resources away from education and to six-figure salaries for lawyers, specialists and counselors—and even research-oriented professors?
The answer Rogers should be giving is not whether these $100,000-a-year employees are qualified for the jobs they hold, but whether or not they add genuine value to a student’s education—especially in the down-to-earth categories of improved graduation rates and help for students to find gainful employment.
These are questions, we fear, that Rogers will be unable to answer seriously. The reason is simple: Higher education in Nevada has become a jobs program for academics—the poor are being sacrificed to subsidize the holders of Ph.D.s.
The Nevada System of Higher Education needs to take a serious look at what functions drain resources away from the education of students. UNR and UNLV also need to ensure that 100 percent of tuition and fees go to educating students, rather than subsidizing research projects, lavish buildings, rock-climbing walls and Olympic-size swimming pools.
Until UNR and UNLV can improve graduation rates, these are luxuries Nevada can ill afford.
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
from python cookbook 2nd edition.
"""
import collections
class Peekable(object):
"""A iteration that can look forward with normal iter operations.
Example:
>>> p = Peekable(iter(range(4)))
>>> p.peek()
0
>>> p.next(1)
[0]
>>> p.peek(3)
[1, 2, 3]
>>> p.next(2)
[1, 2]
"""
def __init__(self, iterable):
self._iterable = iterable
self._cache = collections.deque()
def __iter__(self):
return self
def _fill_cache(self, n):
if n is None:
n = 1
while len(self._cache) < n:
self._cache.append(self._iterable.next())
def next(self, n=None):
self._fill_cache(n)
if n is None:
result = self._cache.popleft()
else:
result = [self._cache.popleft() for i in range(n)]
return result
def peek(self, n=None):
self._fill_cache(n)
if n is None:
result = self._cache[0]
else:
result = [self._cache[i] for i in range(n)]
return result
|
Our banking law team has extensive experience acting for banks, businesses and individuals. We understand the absolute necessity of ensuring the foundations of your banking transactions are set properly the first time around; when dealing with financial matters ‘close enough’ is not good enough.
We fully understand the ins and outs of transactional banking, and the commercial realities for both parties who want to make a deal work. We also know about the specialised internal world of corporate banking, and the setting up and administration of financial service providers and advisors.
Our experienced banking and finance team provides specialist advice in a timely and cost effective way to local, national and international lenders, from those providing one off financing, to mezzanine lenders, to registered banks. Our borrower clients come in all sizes and with a wide range of transactions and requirements such as property and project finance, equipment and working capital finance. Many transactions are unique: we know that with banking and finance there is often no one size fits all answer and a solution tailored to the client and the transaction may be required.
Working for both lenders and borrowers as regularly as we do gives us an insight into the motivations and desired outcomes for both sides of the transaction. This enables us to provide you with truly specialist advice and guidance.
We also have specialist experience in the areas of enforcement, recovery and restructuring, providing advice to recognised receivers, liquidators and financial institutions. Having a practical understanding of how the recovery process works means we can help construct practical solutions, often with negotiated workable outcomes.
Knowing our clients, knowing how their businesses work, and knowing the matters that are of real, practical importance to them is fundamental to the way we work.
Our aim is always to work with you, to ensure you achieve the outcome you need to make your business, transaction, project or finance product work, you fully understand the process and the outcome (whether as a lender or as a borrower) and you are fully protected and informed.
This approach is what makes us a multi-award winning team and a trusted adviser.
Lane Neave, Banking and Finance –generalists and specialists, an integral part of your business.
|
import math
from datetime import datetime, timedelta
from celery.task import task
from time import sleep
from redis_cache.cache import RedisCache
from corehq.apps.sms.mixin import SMSLoadBalancingMixin
from corehq.apps.sms.models import (SMSLog, OUTGOING, INCOMING,
ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS, ERROR_MESSAGE_IS_STALE,
ERROR_INVALID_DIRECTION)
from corehq.apps.sms.api import (send_message_via_backend, process_incoming,
log_sms_exception)
from django.conf import settings
from corehq.apps.domain.models import Domain
from corehq.apps.smsbillables.models import SmsBillable
from corehq.util.timezones.conversions import ServerTime
from dimagi.utils.couch.cache import cache_core
from threading import Thread
def handle_unsuccessful_processing_attempt(msg):
msg.num_processing_attempts += 1
if msg.num_processing_attempts < settings.SMS_QUEUE_MAX_PROCESSING_ATTEMPTS:
delay_processing(msg, settings.SMS_QUEUE_REPROCESS_INTERVAL)
else:
msg.set_system_error(ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS)
def handle_successful_processing_attempt(msg):
utcnow = datetime.utcnow()
msg.num_processing_attempts += 1
msg.processed = True
msg.processed_timestamp = utcnow
if msg.direction == OUTGOING:
msg.date = utcnow
msg.save()
def delay_processing(msg, minutes):
msg.datetime_to_process += timedelta(minutes=minutes)
msg.save()
def get_lock(client, key):
return client.lock(key, timeout=settings.SMS_QUEUE_PROCESSING_LOCK_TIMEOUT*60)
def time_within_windows(domain_now, windows):
weekday = domain_now.weekday()
time = domain_now.time()
for window in windows:
if (window.day in [weekday, -1] and
(window.start_time is None or time >= window.start_time) and
(window.end_time is None or time <= window.end_time)):
return True
return False
def handle_domain_specific_delays(msg, domain_object, utcnow):
"""
Checks whether or not we need to hold off on sending an outbound message
due to any restrictions set on the domain, and delays processing of the
message if necessary.
Returns True if a delay was made, False if not.
"""
domain_now = ServerTime(utcnow).user_time(domain_object.get_default_timezone()).done()
if len(domain_object.restricted_sms_times) > 0:
if not time_within_windows(domain_now, domain_object.restricted_sms_times):
delay_processing(msg, settings.SMS_QUEUE_DOMAIN_RESTRICTED_RETRY_INTERVAL)
return True
if msg.chat_user_id is None and len(domain_object.sms_conversation_times) > 0:
if time_within_windows(domain_now, domain_object.sms_conversation_times):
sms_conversation_length = domain_object.sms_conversation_length
conversation_start_timestamp = utcnow - timedelta(minutes=sms_conversation_length)
if SMSLog.inbound_entry_exists(msg.couch_recipient_doc_type,
msg.couch_recipient,
conversation_start_timestamp,
utcnow):
delay_processing(msg, 1)
return True
return False
def message_is_stale(msg, utcnow):
oldest_allowable_datetime = \
utcnow - timedelta(hours=settings.SMS_QUEUE_STALE_MESSAGE_DURATION)
if isinstance(msg.date, datetime):
return msg.date < oldest_allowable_datetime
else:
return True
def _wait_and_release_lock(lock, timeout, start_timestamp):
while (datetime.utcnow() - start_timestamp) < timedelta(seconds=timeout):
sleep(0.1)
try:
lock.release()
except:
# The lock could have timed out in the meantime
pass
def wait_and_release_lock(lock, timeout):
timestamp = datetime.utcnow()
t = Thread(target=_wait_and_release_lock, args=(lock, timeout, timestamp))
t.start()
def handle_outgoing(msg):
"""
Should return a requeue flag, so if it returns True, the message will be
requeued and processed again immediately, and if it returns False, it will
not be queued again.
"""
backend = msg.outbound_backend
sms_interval = backend.get_sms_interval()
use_rate_limit = sms_interval is not None
use_load_balancing = (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) > 1)
if use_rate_limit or use_load_balancing:
client = cache_core.get_redis_client()
lbi = None
orig_phone_number = None
if use_load_balancing:
lbi = backend.get_next_phone_number(client)
orig_phone_number = lbi.phone_number
elif (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) == 1):
# If there's only one phone number, we don't need to go through the
# load balancing algorithm. But we should always pass an
# orig_phone_number if it's an instance of SMSLoadBalancingMixin.
orig_phone_number = backend.phone_numbers[0]
if use_rate_limit:
if use_load_balancing:
lock_key = "sms-backend-%s-rate-limit-phone-%s" % (backend._id,
lbi.phone_number)
else:
lock_key = "sms-backend-%s-rate-limit" % backend._id
lock = client.lock(lock_key, timeout=30)
if not use_rate_limit or (use_rate_limit and lock.acquire(blocking=False)):
if use_load_balancing:
lbi.finish(save_stats=True)
result = send_message_via_backend(msg, backend=backend,
orig_phone_number=orig_phone_number)
if use_rate_limit:
wait_and_release_lock(lock, sms_interval)
# Only do the following if an unrecoverable error did not happen
if not msg.error:
if result:
handle_successful_processing_attempt(msg)
else:
handle_unsuccessful_processing_attempt(msg)
return False
else:
# We're using rate limiting, but couldn't acquire the lock, so
# another thread is sending sms with this backend. Rather than wait,
# we'll just put this message at the back of the queue.
if use_load_balancing:
lbi.finish(save_stats=False)
return True
def handle_incoming(msg):
try:
process_incoming(msg)
handle_successful_processing_attempt(msg)
except:
log_sms_exception(msg)
handle_unsuccessful_processing_attempt(msg)
@task(queue="sms_queue", ignore_result=True)
def process_sms(message_id):
"""
message_id - _id of an SMSLog entry
"""
# Note that Redis error/exception notifications go out from the
# run_sms_queue command, so no need to send them out here
# otherwise we'd get too many emails.
rcache = cache_core.get_redis_default_cache()
if not isinstance(rcache, RedisCache):
return
try:
client = rcache.raw_client
except NotImplementedError:
return
utcnow = datetime.utcnow()
# Prevent more than one task from processing this SMS, just in case
# the message got enqueued twice.
message_lock = get_lock(client, "sms-queue-processing-%s" % message_id)
if message_lock.acquire(blocking=False):
msg = SMSLog.get(message_id)
if message_is_stale(msg, utcnow):
msg.set_system_error(ERROR_MESSAGE_IS_STALE)
message_lock.release()
return
if msg.direction == OUTGOING:
if msg.domain:
domain_object = Domain.get_by_name(msg.domain, strict=True)
else:
domain_object = None
if domain_object and handle_domain_specific_delays(msg, domain_object, utcnow):
message_lock.release()
return
requeue = False
# Process inbound SMS from a single contact one at a time
recipient_block = msg.direction == INCOMING
if (isinstance(msg.processed, bool)
and not msg.processed
and not msg.error
and msg.datetime_to_process < utcnow):
if recipient_block:
recipient_lock = get_lock(client,
"sms-queue-recipient-phone-%s" % msg.phone_number)
recipient_lock.acquire(blocking=True)
if msg.direction == OUTGOING:
requeue = handle_outgoing(msg)
elif msg.direction == INCOMING:
handle_incoming(msg)
else:
msg.set_system_error(ERROR_INVALID_DIRECTION)
if recipient_block:
recipient_lock.release()
message_lock.release()
if requeue:
process_sms.delay(message_id)
@task(ignore_result=True)
def store_billable(msg):
if msg._id and not SmsBillable.objects.filter(log_id=msg._id).exists():
try:
msg.text.encode('iso-8859-1')
msg_length = 160
except UnicodeEncodeError:
# This string contains unicode characters, so the allowed
# per-sms message length is shortened
msg_length = 70
for _ in range(int(math.ceil(float(len(msg.text)) / msg_length))):
SmsBillable.create(msg)
|
The right risk-management program can help you protect your organization's clients, employees, and reputation.
Here's a great way to involve your board in raising funds.
by Lo Bianco, Laura A.
Here's a lawyer's take on what every board needs to know.
Where are Your Volunteer Leaders?
How often have you heard, "I'll help with that, but don't ask me to lead it!"? Here's how to hear "Yes!" next time you ask.
|
# Copyright 2017-2018 IBM Corp. All Rights Reserved.
# See LICENSE for details.
#
# Author: Henrik Loeser
#
# Converse with your assistant based on IBM Watson Assistant service on IBM Cloud.
# See the README for documentation.
#
import json, argparse, importlib
from os.path import join, dirname
from ibm_watson import AssistantV2
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
privcontext=None
assistantService=None
def loadAndInit(confFile=None):
# Credentials are read from a file
with open(confFile) as confFile:
config = json.load(confFile)
configWA = config['credentials']
global assistantService
# Initialize the Watson Assistant client, use API V2
if 'apikey' in configWA:
# Authentication via IAM
authenticator = IAMAuthenticator(configWA['apikey'])
assistantService = AssistantV2(
authenticator=authenticator,
version=configWA['versionV2'])
assistantService.set_service_url(configWA['url'])
else:
print('Expected apikey in credentials.')
exit
# Define parameters that we want to catch and some basic command help
def initParser(args=None):
parser = argparse.ArgumentParser(description='Watson Assistant tool',
prog='watoolV2.py',
usage='%(prog)s [-h | -dialog ] [options]')
parser.add_argument("-dialog",dest='dialog', action='store_true', help='have dialog')
parser.add_argument("-outputonly",dest='outputOnly', action='store_true', help='print dialog output only')
parser.add_argument("-id",dest='assistantID', help='Assistant ID')
parser.add_argument("-actionmodule",dest='actionModule', help='Module for client action handling')
parser.add_argument("-context",dest='context', help='context file')
parser.add_argument("-config",dest='confFile', default='config.json', help='configuration file')
return parser
# Start a dialog and converse with Watson
def converse(assistantID, outputOnly=None, contextFile=None):
contextFile="session_contextV2.json"
print ("Starting a conversation, stop by Ctrl+C or saying 'bye'")
print ("======================================================")
# Start with an empty context object
context={}
first=True
## Load conversation context on start or not?
contextStart = input("Start with empty context? (Y/n)\n")
if (contextStart == "n" or contextStart == "N"):
print ("loading old session context...")
with open(contextFile) as jsonFile:
context=json.load(jsonFile)
jsonFile.close()
# create a new session
response = assistantService.create_session(assistant_id=assistantID).get_result()
sessionID = response['session_id']
print('Session created!\n')
# Now loop to chat
while True:
# get some input
minput = input("\nPlease enter your input message:\n")
# if we catch a "bye" then exit after deleting the session
if (minput == "bye"):
response = assistantService.delete_session(
assistant_id=assistantID,
session_id=sessionID).get_result()
print('Session deleted. Bye...')
break
# Read the session context from file if we are not entering the loop
# for the first time
if not first:
try:
with open(contextFile) as jsonFile:
context=json.load(jsonFile)
except IOError:
# do nothing
print ("ignoring")
else:
jsonFile.close()
else:
first=False
# Process IBM Cloud Function credentials if present
if privcontext is not None:
context.update(privcontext)
# send the input to Watson Assistant
# Set alternate_intents to False for less output
resp=assistantService.message(assistant_id=assistantID,
session_id=sessionID,
input={'text': minput,
'options': {'alternate_intents': True, 'return_context': True, 'debug': True}}
).get_result()
#print(json.dumps(resp, indent=2))
# Save returned context for next round of conversation
if ('context' in resp):
context=resp['context']
respOutput=resp['output']
if ('actions' in respOutput and len(respOutput['actions']) and respOutput['actions'][0]['type']=='client'):
# Dump the returned answer
if not outputOnly:
print ("")
print ("Full response object of intermediate step:")
print ("------------------------------------------")
print(json.dumps(resp, indent=2))
if (hca is not None):
contextNew=hca.handleClientActions(context,respOutput['actions'], resp)
# call Watson Assistant with result from client action(s)
resp=assistantService.message(assistant_id=assistantID,
session_id=sessionID,
input={'text': minput,
'options': {'alternate_intents': True, 'return_context': True, 'debug': True}},
intents=respOutput['intents'],
context=contextNew).get_result()
context=resp['context']
respOutput=resp['output']
else:
print("\n\nplease use -actionmodule to define module to handle client actions")
break
# Dump the returned answer
if (outputOnly):
print ("Response:")
print(json.dumps(respOutput['generic'], indent=2))
else:
print ("")
print ("Full response object:")
print ("---------------------")
print(json.dumps(resp, indent=2))
# Persist the current context object to file.
with open(contextFile,'w') as jsonFile:
json.dump(context, jsonFile, indent=2)
jsonFile.close()
#
# Main program, for now just detect what function to call and invoke it
#
if __name__ == '__main__':
# Assume no module for client actions
hca=None
# initialize parser
parser = initParser()
parms = parser.parse_args()
# enable next line to print parameters
# print parms
# load configuration and initialize Watson
loadAndInit(confFile=parms.confFile)
if (parms.dialog and parms.assistantID):
if parms.actionModule:
hca=importlib.import_module(parms.actionModule)
converse(parms.assistantID, parms.outputOnly)
else:
parser.print_usage()
|
From its invention till date, printing has gone through revolutionary processes and has always fascinated individuals and businesses with amazing ideas and awesome opportunities to create one-of-a-kind products. Whether you are a business professional or a newbie, you must admit that trips to printing companies can be hectic. Getting in touch with a company that offers quality and affordability can be experience of a lifetime. Among the companies providing printing services in Kuwait, Alhafiz Co. has outrun others with dedication and commitment.
Silkscreen printing has always remained in demand. It is the best technique to personalize wide variety of products including T-shirts, mugs, aluminum bottles and much more.
Vehicle graphics is trendy and beneficial. Car enthusiasts can visit Alhafiz Co. to get customized car wraps for a personal touch. Businesses can get their vehicles and service vans wrapped in corporate car wraps for marketing.
Hardcover binding with golden hot stamping is popular among students for thesis and dissertations.
Customized New Year Diaries and pens are exclusively offered to big and small enterprise so they can surprise their clients with the best New Year gift.
Customized popup display stands are highly sought after for trade shows and exhibitions.
UV printing is a versatile technique which is used to print on wood, USB flash drives, badges, paper weights, shields and trophies.
Digital printing has diverse applications. It is used to print table calendars, flyers, brochures, paper pyramids, books and magazines. Visit Alhafiz Co. for unmatched quality and competitive rates.
Photo printing can be availed for variety of purposes. Alhafiz Co. offers premium quality photo printing service in Kuwait to clients coming with multitude of requirements.
Wallpaper printing is another specialty area of Alhafiz Co. Get custom designs and colors in wallpapers for home and office use. We also offer 3D printing in Kuwait.
Invitation, greeting and holiday card printing, annual report printing, business proposals printing, post card printing, pamphlet printing, printing photos on ceramic tiles are some of the popular printing services offered by Alhafiz Co.
|
#!/usr/bin/python3
# coding: utf-8
'''
This script tests secretary_problem.py
Copyright (C) 2017 Zettsu Tatsuya
usage : python3 -m unittest discover tests
'''
import warnings
from unittest import TestCase
import numpy as np
import secretary_problem.secretary_problem as tested
class TestSecretaryProblem(TestCase):
'''Testing find_candidate'''
def test_find_first(self):
'''Find a candidate just after passed candidates'''
nums = np.array([10, 20, 30, 40, 50])
n_items = len(nums)
for pass_count in range(1, n_items):
passes = [pass_count]
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, nums[pass_count])
def test_find_last(self):
'''Find a last candidate'''
nums = np.array([50, 40, 30, 20, 10])
n_items = len(nums)
for pass_count in range(1, n_items):
passes = [pass_count]
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, nums[-1])
def test_find_middle(self):
'''Find a candidate between passed and last candidates'''
nums = np.array([30, 20, 10, 50, 40])
n_items = len(nums)
for pass_count in range(1, 3):
passes = [pass_count]
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, 50)
def test_find_two_candidates1(self):
'''Hold two candidates and the best candidates is placed last'''
nums = np.array([10, 20, 40, 30, 50, 60, 70])
n_items = len(nums)
passes_set = [[[1, 2], 40], [[1, 3], 30], [[1, 4], 50], [[1, 5], 60], [[1, 6], 70],
[[2, 3], 50], [[2, 4], 50], [[2, 5], 60], [[2, 6], 70],
[[3, 4], 60], [[3, 5], 60], [[3, 6], 70],
[[4, 5], 60], [[4, 6], 70],
[[5, 6], 70],
[[1, 1], 40], [[2, 2], 50], [[3, 3], 60], [[4, 4], 60], [[5, 5], 70],
[[6, 6], 70], [[7, 7], 70],
[[1, 7], 20], [[2, 7], 40], [[3, 7], 50], [[4, 7], 50], [[5, 7], 60],
[[6, 7], 70]]
for passes, expected in passes_set:
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, expected)
def test_find_two_candidates2(self):
'''Hold two candidates and the best candidates is placed middle of candidates'''
nums = np.array([30, 40, 60, 50, 70, 20, 10])
n_items = len(nums)
passes_set = [[[1, 2], 60], [[1, 3], 50], [[1, 4], 70], [[1, 5], 40], [[1, 6], 40],
[[2, 3], 70], [[2, 4], 70], [[2, 5], 60], [[2, 6], 60],
[[3, 4], 70], [[3, 5], 70], [[3, 6], 70],
[[4, 5], 70], [[4, 6], 70],
[[5, 6], 10],
[[1, 1], 60], [[2, 2], 70], [[3, 3], 70], [[4, 4], 70], [[5, 5], 10],
[[6, 6], 10], [[7, 7], 10],
[[1, 7], 40], [[2, 7], 60], [[3, 7], 70], [[4, 7], 70], [[5, 7], 10],
[[6, 7], 10]]
for passes, expected in passes_set:
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, expected)
def test_find_many_candidates(self):
'''Hold many candidates'''
nums = np.array([10, 20, 30, 40, 70, 60, 50, 49, 48, 47])
n_items = len(nums)
passes_set = [[[1, 2, 3], 40], [[1, 2, 3, 4], 70], [[1, 2, 3, 4, 5], 70],
[[4, 5, 6, 7], 70], [[5, 6, 7, 8], 47]]
for passes, expected in passes_set:
actual = tested.SecretaryProblem(n_items, passes).find_candidate(n_items, passes, nums)
self.assertEqual(actual, expected)
class TestExploreSecretaryProblem(TestCase):
'''Testing optimization with ExploreSecretaryProblem'''
def test_explore(self):
'''Explore a solution of 1-secretary problem'''
# Surpress warnings for scikit-optimize
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
cutoffs, value = tested.ExploreSecretaryProblem(
n_items=100, n_cutoffs=1, n_trial=10000, n_calls=30,
n_random_starts=20).explore(verbose=False)
self.assertGreater(cutoffs[0], 35.0)
self.assertLess(cutoffs[0], 39.0)
self.assertLess(value, -0.35)
self.assertGreater(value, -0.39)
|
Let's be honest, we all wish some of our favorite characters were in other mediums. Worlds like Game of Thrones, Lord of the Rings or even Rick and Morty would be a hilarious addition to the fighting universe.
We've seen some creative ones like Transformers and Power Rangers, but let's think outside the box.
Here are eight genres we wish had fighting games.
South Park has a shooter and two successful RPG-style games, but it needs a side-scroller fighting game. The game has dozens of popular characters that would provide a large roster from which to choose. The game is beloved by many, and a fighting game would be a perfect addition for the fan base.
The series is a classic and already has an arcade game, a normal style video game and a wrestling game. But the game needs a side-scroller fighting game. It would be fantastic to see how the creators of the game decide which character has which ability.
It's not ranked higher on the list because of the few different versions of the game that already exist.
One of the most popular horror games of the past decades, Five Nights at Freddys would be a hilarious addition to the fighting game franchise. The game could be filled with all the different animatronics and their different fighting styles.
Doctor Who just finished its 11th season and has plenty of characters, variations of Doctor Who, and even alien races. There are plenty of fun ways to make combat unique and interesting inside of a Doctor Who fighting game.
One of the longest running shows at the moment as it's in its 14th season. The show has dozens of characters and hundreds of monster to choose from. You could have a fun and interesting fighting game inside the Supernatural universe.
Rick and Morty has a cult following so the fighting game is a no brainer. There are plenty of dynamic and creative characters and creatures throughout the game. It would be a perfect style for a fighting game!
This is to appease all the hyper nerds and Lord of the Ring fans out there. You could play some of the most iconic characters in fantasy lore and even match up against some of the most popular villains.
Come on. It's one of the most popular shows of all time and it's ending this year. How perfect would it be to play one of your favorite characters from the game and dominate all your friends with its special ability.
Want to see the Mountain fight Jon Snow? It could be possible!
|
from django.http import HttpResponse, Http404
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from models import Note, Project
from django.shortcuts import render_to_response, get_object_or_404
import simplejson as json
STATES = ['todo', 'inprogress', 'document', 'test', 'verify', 'done']
def index(request):
if request.user.is_authenticated():
projects = Project.objects.all()
else:
projects = None
return render_to_response('index.html', {'projects': projects},
context_instance=RequestContext(request))
@login_required
def project(request, id):
project = Project.objects.get(id=id)
group = get_object_or_404(Group, name=project.name)
if group not in request.user.groups.all():
raise Http404
return render_to_response('project.html', {'project': project})
@login_required
def ajax(request):
r = _ajax(request)
return HttpResponse(json.dumps(r))
def _ajax(request):
"""Wrapper"""
if not request.is_ajax():
return {'status': 403}
a = request.POST.get('a')
if a not in ['move', 'edit', 'delete', 'new']:
return {'status': 403}
if a in ['move', 'edit', 'delete']:
n = request.POST.get('note')
id = int(n[5:])
note = get_object_or_404(Note, pk=id)
try:
note = Note.objects.get(pk=id)
except ObjectDoesNotExist:
return {'status': 403}
if a in ['edit', 'new']:
content = request.POST.get('content')
if a == 'move':
st = request.POST.get('section')
if st not in STATES:
return {'status': 403}
note.state = st
elif a == 'delete':
note.delete()
return {'status': 200}
elif a == 'new':
p = request.POST.get('project')
p = get_object_or_404(Project, id=p)
note = Note(
content=content,
author=request.user,
project=p)
note.save()
return {
'status': 200,
'content': _note(note)
}
else:
note.content = content
note.save()
return {'status': 200}
def _note(note):
t = """
<div id="note-%d" class="portlet ui-widget ui-widget-content ui-helper-clearfix ui-corner-all"><div class="portlet-header ui-widget-header ui-corner-all"><span class="ui-icon ui-icon-close"></span><span class="ui-icon ui-icon-pencil"></span></div>
<div class="portlet-content">%s</div>
<div class="portlet-meta">
<p>Author: %s</p>
</div>
</div>
"""
n = t % (note.id, note.content, note.author.username,)
return n
|
Solar power capacity in the U.S. has doubled over the past few years, with new power outpacing projects.
Inside Climate News reports that in 2016, solar power capacity doubled compared to 2015, with the country adding 14.5 gigawatts of solar power, far outpacing government projections.
In 2006 the U.S. Energy Information Administration predicted that solar power would amount to only about 0.8 gigawatts of capacity by 2016. In the first half of 2017, wind and solar accounted for 10 percent of monthly electricity generation for the first time.
Despite the growth, there are some myths holding back widespread adoption of solar power in some parts of the country.
Myth: Cold winters do not make solar panel viable.
Reality: Winter is here, so what does that mean for solar production? Will solar panels stop producing solar energy to power homes and businesses? The easy answer is ABSOLUTELY NOT! Solar energy is produced from sunlight – not heat. In fact, solar panels are known to perform better in colder temperatures. Solar panels, whether rooftop or ground-mounted, will be at an angle. Anytime the sun hits them, the solar panels produce energy. In the winter, sun position is lower than other times of the year and this allows solar panels to soak up as much energy as possible.
Myth: Snow prohibits solar power generation.
Reality: Solar panels are designed to withstand hundreds of pounds of snow during harsh winters. Solar panels have two great characteristics to rid themselves of snow without your intervention – they heat up a bit from the sun hitting them and producing energy and they’re at an angle. So, what happens on those dark days when it snows? Once the sun comes out, the snow melts, and voilà, you’re back to producing solar energy to power your home.
Myth: Solar panels decrease the value of your home.
Reality: Solar panels tend to INCREASE the value of a home by $15,000.
Myth: Everyone thinks solar panels are ugly.
Reality: To many, solar panels are working art – a clean, renewable source of energy that can save our planet. For those who disagree, solar panels can be installed in locations that minimize visual impact.
Myth: I won’t live in my home long enough for my solar investment to provide timely ROI.
Reality: Most solar panels pay for themselves in 6 to 15 years. Combined with state and federal tax credits and incentives, many see a return on investment in 2 to 4 years.
Myth: Solar panels require a lot of maintenance.
Reality: Solar panels are sturdy, durable and reliable. Most installers recommend an annual inspection to maximize performance.
Myth: Solar panels may damage my roof.
Reality: Solar panels may extend the life of your roof by protecting it from the elements. Installed a few inches above the roof, solar panels increase air flow and weigh about as much as a second layer of shingles.
Read more about home solar power.
|
#!/usr/bin/env python
"""
# Copyright 2013, Matt Settles
# Modified Aug 10, 2013
"""
from Bio import SeqIO
from optparse import OptionParser
import sys, os, os.path, time, gzip
from collections import Counter
## Parse options and setup ##
usage = "usage %prog -d [path to directory of raw reads]"
parser = OptionParser(usage=usage)
parser.add_option('-d', '--directory', help="Directory containing read files to de-duplicate",
action="store", type="str", dest="sample_dir")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit()
sample_dir = options.sample_dir
output_dir = options.sample_dir
#kindly provided by http://stackoverflow.com/questions/7099290/how-to-ignore-hidden-files-using-os-listdir-python
#glob.glob will list hidden files
#this replaces that functionality when hidden files exist, like in my reads from Berkeley
def listdir_nohidden(path):
for f in os.listdir(path):
if not f.startswith('.'):
yield f
def main_sff(infile, outfile1):
# Set up the global variables
global count
global bases
lcount = 0
lbases = 0
lqual = 0
# Open inputs:
iterator1 = SeqIO.parse(open(infile, 'r'), "sff")
try:
while 1:
seq1 = iterator1.next()
count += 1
lcount += 1
len = seq1.annotations["clip_qual_right"] - seq1.annotations["clip_qual_left"]
bases += len
lbases += len
lqual += sum(seq1.letter_annotations['phred_quality'][seq1.annotations["clip_qual_left"]:seq1.annotations["clip_qual_right"]])/len
except StopIteration:
pass
finally:
print "Finished processing file" + infile1
outfile1.write("file\t" + infile1 + "\n")
outfile1.write("nreads\t" + str(lcount) + "\n")
outfile1.write("nbases\t" + str(lbases) + "\n")
outfile1.write("avgBases\t" + str(round(lbases/lcount,0)) + "\n")
outfile1.write("avgQual\t" + str(round(lqual/lcount,1)) + "\n")
def main(infile1, outfile1):
#Set up the global variables
global count
global bases
lcount = 0
lbases = 0
lqual = 0
#Open inputs:
if infile1.split(".")[-1] == "gz":
import gzip
iterator1 = SeqIO.parse(gzip.open(infile1, 'rb'), 'fastq')
elif infile1.split(".")[-1] == "fastq":
iterator1 = SeqIO.parse(open(infile1, 'r'), 'fastq')
else:
iterator1 = SeqIO.parse(open(infile1, 'r'), 'fastq')
try:
while 1:
seq1 = iterator1.next()
count += 1
lcount += 1
bases += len(seq1)
lbases += len(seq1)
lqual += sum(seq1.letter_annotations['phred_quality'])/len(seq1)
except StopIteration:
pass
finally:
print "Finished processing file" + infile1
outfile1.write("file\t" + infile1 + "\n")
outfile1.write("nreads\t" + str(lcount) + "\n")
outfile1.write("nbases\t" + str(lbases) + "\n")
outfile1.write("avgBases\t" + str(round(lbases/lcount,0)) + "\n")
outfile1.write("avgQual\t" + str(round(lqual/lcount,1)) + "\n")
#main part of the program
count = 0
bases = 0
outfile1 = open(os.path.realpath(os.path.join(os.getcwd(), sample_dir, "read_data.txt")),"w+")
files = listdir_nohidden('./' + sample_dir)
for f in files:
if ("fastq" in f) or ("fq" in f):
print f
infile1 = os.path.realpath(os.path.join(os.getcwd(), sample_dir, f))
main(infile1, outfile1)
if ("sff" in f):
print f
infile1 = os.path.realpath(os.path.join(os.getcwd(), sample_dir, f))
main_sff(infile1, outfile1)
outfile1.write("directory\t" + sample_dir + "\n")
outfile1.write("treads\t" + str(count) + "\n")
outfile1.write("tbases\t" + str(bases) + "\n")
outfile1.close()
|
Polaris and the Hispanic Communications Network (HCN) in collaboration with the Health Initiative of the Americas among other organizations have launched the “Join the Solution” multimedia campaign against human trafficking, especially for women and girls, from Mexico and others countries of Latin America. The campaign will be headed by the popular Mexican actor Eugenio Derbez.
“Join the Solution” seeks to raise awareness among the Latino public about the criminal dimension of the phenomenon of human trafficking, as well as about the victims’ humanitarian nightmare, but also offers the tools to be proactive and to become part of the solution to this problem.
|
import os
import pytest
from toot import User, App, config
@pytest.fixture
def sample_config():
return {
'apps': {
'foo.social': {
'base_url': 'https://foo.social',
'client_id': 'abc',
'client_secret': 'def',
'instance': 'foo.social'
},
'bar.social': {
'base_url': 'https://bar.social',
'client_id': 'ghi',
'client_secret': 'jkl',
'instance': 'bar.social'
},
},
'users': {
'foo@bar.social': {
'access_token': 'mno',
'instance': 'bar.social',
'username': 'ihabunek'
}
},
'active_user': 'foo@bar.social',
}
def test_extract_active_user_app(sample_config):
user, app = config.extract_user_app(sample_config, sample_config['active_user'])
assert isinstance(user, User)
assert user.instance == 'bar.social'
assert user.username == 'ihabunek'
assert user.access_token == 'mno'
assert isinstance(app, App)
assert app.instance == 'bar.social'
assert app.base_url == 'https://bar.social'
assert app.client_id == 'ghi'
assert app.client_secret == 'jkl'
def test_extract_active_when_no_active_user(sample_config):
# When there is no active user
assert config.extract_user_app(sample_config, None) == (None, None)
# When active user does not exist for whatever reason
assert config.extract_user_app(sample_config, 'does-not-exist') == (None, None)
# When active app does not exist for whatever reason
sample_config['users']['foo@bar.social']['instance'] = 'does-not-exist'
assert config.extract_user_app(sample_config, 'foo@bar.social') == (None, None)
def test_save_app(sample_config):
app = App('xxx.yyy', 2, 3, 4)
app2 = App('moo.foo', 5, 6, 7)
app_count = len(sample_config['apps'])
assert 'xxx.yyy' not in sample_config['apps']
assert 'moo.foo' not in sample_config['apps']
# Sets
config.save_app.__wrapped__(sample_config, app)
assert len(sample_config['apps']) == app_count + 1
assert 'xxx.yyy' in sample_config['apps']
assert sample_config['apps']['xxx.yyy']['instance'] == 'xxx.yyy'
assert sample_config['apps']['xxx.yyy']['base_url'] == 2
assert sample_config['apps']['xxx.yyy']['client_id'] == 3
assert sample_config['apps']['xxx.yyy']['client_secret'] == 4
# Overwrites
config.save_app.__wrapped__(sample_config, app2)
assert len(sample_config['apps']) == app_count + 2
assert 'xxx.yyy' in sample_config['apps']
assert 'moo.foo' in sample_config['apps']
assert sample_config['apps']['xxx.yyy']['instance'] == 'xxx.yyy'
assert sample_config['apps']['xxx.yyy']['base_url'] == 2
assert sample_config['apps']['xxx.yyy']['client_id'] == 3
assert sample_config['apps']['xxx.yyy']['client_secret'] == 4
assert sample_config['apps']['moo.foo']['instance'] == 'moo.foo'
assert sample_config['apps']['moo.foo']['base_url'] == 5
assert sample_config['apps']['moo.foo']['client_id'] == 6
assert sample_config['apps']['moo.foo']['client_secret'] == 7
# Idempotent
config.save_app.__wrapped__(sample_config, app2)
assert len(sample_config['apps']) == app_count + 2
assert 'xxx.yyy' in sample_config['apps']
assert 'moo.foo' in sample_config['apps']
assert sample_config['apps']['xxx.yyy']['instance'] == 'xxx.yyy'
assert sample_config['apps']['xxx.yyy']['base_url'] == 2
assert sample_config['apps']['xxx.yyy']['client_id'] == 3
assert sample_config['apps']['xxx.yyy']['client_secret'] == 4
assert sample_config['apps']['moo.foo']['instance'] == 'moo.foo'
assert sample_config['apps']['moo.foo']['base_url'] == 5
assert sample_config['apps']['moo.foo']['client_id'] == 6
assert sample_config['apps']['moo.foo']['client_secret'] == 7
def test_delete_app(sample_config):
app = App('foo.social', 2, 3, 4)
app_count = len(sample_config['apps'])
assert 'foo.social' in sample_config['apps']
config.delete_app.__wrapped__(sample_config, app)
assert 'foo.social' not in sample_config['apps']
assert len(sample_config['apps']) == app_count - 1
# Idempotent
config.delete_app.__wrapped__(sample_config, app)
assert 'foo.social' not in sample_config['apps']
assert len(sample_config['apps']) == app_count - 1
def test_get_config_file_path():
fn = config.get_config_file_path
os.unsetenv('XDG_CONFIG_HOME')
os.environ.pop('XDG_CONFIG_HOME', None)
assert fn() == os.path.expanduser('~/.config/toot/config.json')
os.environ['XDG_CONFIG_HOME'] = '/foo/bar/config'
assert fn() == '/foo/bar/config/toot/config.json'
os.environ['XDG_CONFIG_HOME'] = '~/foo/config'
assert fn() == os.path.expanduser('~/foo/config/toot/config.json')
|
The exquisite mild flavor of filet mignon is revered by steak lovers. With our Private Reserve Wagyu Filet Mignon Steaks, that one-of-a-kind flavor is realized to the absolute fullest. Fine, uncommonly rich marbling only accentuates the filet's natural buttery, fork-tender texture, elevating its mouthwatering taste beyond your wildest steak dreams.
Filet mignon bears the distinction of being Omaha Steaks most coveted steak, and our Private Reserve Wagyu Filet Mignon Steaks offer everything our customers love about the filet, with even more to crave. This outstanding steak is the result of crossing genuine Wagyu and real American Angus beef. Both are esteemed and favored for their high density of fine marbling. Combining them results in a steak that offers the very best of East and West. Wagyu filet mignon is profoundly juicy, with a uniquely subtle complexity complemented by an immense depth of the filet's trademark mild flavor. You've never experienced filets quite like this. Now, you can grill them to utter perfection whenever you wish. Our signature process and master butchers have made enjoying and sharing Wagyu filet mignon in the comfort of your own home easy. Each steak is precisely aged at least 28 days to achieve peak tenderness. Then, our butchers triple-trim every steak by hand to minimize exterior fat and ensure your every bite is absolutely flawless. Don't hesitate to bring home this unique steak while you can - availability is highly limited.
|
#######################################################################
# Tests for ngsutils.py module
#######################################################################
import unittest
import os
import io
import tempfile
import shutil
import gzip
from bcftbx.ngsutils import *
from builtins import range
class TestGetreadsFunction(unittest.TestCase):
"""Tests for the 'getreads' function
"""
def setUp(self):
self.wd = tempfile.mkdtemp()
self.example_fastq_data = u"""@K00311:43:HL3LWBBXX:8:1101:21440:1121 1:N:0:CNATGT
GCCNGACAGCAGAAAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21460:1121 1:N:0:CNATGT
GGGNGTCATTGATCAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21805:1121 1:N:0:CNATGT
CCCNACCCTTGCCTAC
+
AAF#FJJJJJJJJJJJ
"""
self.example_csfasta_data = u"""# Cwd: /home/pipeline
# Title: solid0127_20121204_FRAG_BC_Run_56_pool_LC_CK
>1_51_38_F3
T3..3.213.12211.01..000..111.0210202221221121011..0
>1_51_301_F3
T0..3.222.21233.00..022..110.0210022323223202211..2
>1_52_339_F3
T1.311202211102.331233332113.23332233002223222312.2
"""
self.example_qual_data = u"""# Cwd: /home/pipeline
# Title: solid0127_20121204_FRAG_BC_Run_56_pool_LC_CK
>1_51_38_F3
16 -1 -1 5 -1 24 15 12 -1 21 12 16 22 19 -1 26 13 -1 -1 4 21 4 -1 -1 4 7 9 -1 4 5 4 4 4 4 4 13 4 4 4 5 4 4 10 4 4 4 4 -1 -1 4
>1_51_301_F3
22 -1 -1 4 -1 24 30 7 -1 4 9 26 6 16 -1 25 25 -1 -1 17 18 13 -1 -1 4 14 24 -1 4 14 17 32 4 7 13 13 22 4 12 19 4 24 6 9 8 4 4 -1 -1 9
>1_52_339_F3
27 -1 33 24 28 32 29 17 25 27 26 30 30 31 -1 28 33 19 19 13 4 20 21 13 5 4 12 -1 4 23 13 8 4 10 4 6 5 7 4 8 4 8 12 5 12 10 8 7 -1 4
"""
def tearDown(self):
shutil.rmtree(self.wd)
def test_getreads_fastq(self):
"""getreads: read records from Fastq file
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq")
with io.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Read lines
fastq_reads = getreads(example_fastq)
reference_reads = [self.example_fastq_data.split('\n')[i:i+4]
for i
in range(0,
len(self.example_fastq_data.split('\n')),
4)]
for r1,r2 in zip(reference_reads,fastq_reads):
self.assertEqual(r1,r2)
def test_getreads_gzipped_fastq(self):
"""getreads: read records from gzipped Fastq file
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq.gz")
with gzip.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Read lines
fastq_reads = getreads(example_fastq)
reference_reads = [self.example_fastq_data.split('\n')[i:i+4]
for i
in range(0,
len(self.example_fastq_data.split('\n')),
4)]
for r1,r2 in zip(reference_reads,fastq_reads):
self.assertEqual(r1,r2)
def test_getreads_csfasta(self):
"""getreads: read records from csfasta file
"""
# Make an example file
example_csfasta = os.path.join(self.wd,"example.csfasta")
with io.open(example_csfasta,'wt') as fp:
fp.write(self.example_csfasta_data)
# Read lines
csfasta_reads = getreads(example_csfasta)
reference_reads = [self.example_csfasta_data.split('\n')[i:i+2]
for i
in range(2,
len(self.example_fastq_data.split('\n')),
2)]
for r1,r2 in zip(reference_reads,csfasta_reads):
self.assertEqual(r1,r2)
def test_getreads_qual(self):
"""getreads: read records from qual file
"""
# Make an example file
example_qual = os.path.join(self.wd,"example.qual")
with io.open(example_qual,'wt') as fp:
fp.write(self.example_qual_data)
# Read lines
qual_reads = getreads(example_qual)
reference_reads = [self.example_qual_data.split('\n')[i:i+2]
for i
in range(2,
len(self.example_qual_data.split('\n')),
2)]
for r1,r2 in zip(reference_reads,qual_reads):
self.assertEqual(r1,r2)
class TestGetreadsSubsetFunction(unittest.TestCase):
"""Tests for the 'getreads_subset' function
"""
def setUp(self):
self.wd = tempfile.mkdtemp()
self.example_fastq_data = u"""@K00311:43:HL3LWBBXX:8:1101:21440:1121 1:N:0:CNATGT
GCCNGACAGCAGAAAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21460:1121 1:N:0:CNATGT
GGGNGTCATTGATCAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21805:1121 1:N:0:CNATGT
CCCNACCCTTGCCTAC
+
AAF#FJJJJJJJJJJJ
"""
def tearDown(self):
shutil.rmtree(self.wd)
def test_getreads_subset_fastq(self):
"""getreads: get subset of reads from Fastq file
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq")
with io.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Get subset
fastq_reads = getreads_subset(example_fastq,
indices=(0,2))
reference_reads = [self.example_fastq_data.split('\n')[i:i+4]
for i in (0,8)]
for r1,r2 in zip(reference_reads,fastq_reads):
self.assertEqual(r1,r2)
def test_getreads_subset_fastq_index_out_of_range(self):
"""getreads: requesting non-existent read raises exception
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq")
with io.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Attempt to get subset with indices outside the range
# of reads
# NB would prefer to use assertRaises, however we need to
# actually yeild the reads in order to raise the exceptions
try:
[r for r in getreads_subset(example_fastq,indices=(-1,0))]
failed = True
except Exception:
# This is expected, test passes
failed = False
self.assertFalse(failed,"Exception not raised")
try:
[r for r in getreads_subset(example_fastq,indices=(0,99))]
failed = True
except Exception:
# This is expected, test passes
failed = False
self.assertFalse(failed,"Exception not raised")
class TestGetreadsRegexpFunction(unittest.TestCase):
"""Tests for the 'getreads_regex' function
"""
def setUp(self):
self.wd = tempfile.mkdtemp()
self.example_fastq_data = u"""@K00311:43:HL3LWBBXX:8:1101:21440:1121 1:N:0:CNATGT
GCCNGACAGCAGAAAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21460:1121 1:N:0:CNATGT
GGGNGTCATTGATCAT
+
AAF#FJJJJJJJJJJJ
@K00311:43:HL3LWBBXX:8:1101:21805:1121 1:N:0:CNATGT
CCCNACCCTTGCCTAC
+
AAF#FJJJJJJJJJJJ
"""
def tearDown(self):
shutil.rmtree(self.wd)
def test_getreads_regexp_fastq(self):
"""getreads: get reads from Fastq file matching pattern
"""
# Make an example file
example_fastq = os.path.join(self.wd,"example.fastq")
with io.open(example_fastq,'wt') as fp:
fp.write(self.example_fastq_data)
# Get subset
fastq_reads = getreads_regex(example_fastq,
":1101:21440:1121")
reference_reads = [self.example_fastq_data.split('\n')[i:i+4]
for i in (0,)]
for r1,r2 in zip(reference_reads,fastq_reads):
self.assertEqual(r1,r2)
|
Founder, strategy consultant, advisor to small and medium companies (tech or not).
I can assisit startups with: idea validation, business plan, customer acquisition, marketing, funding, valuation, exit strategy.
Focus on staying and keeping the eyes on the goal at all stages.
Do you want to analyse and improve your profitablity?
Do you want to assess potential new opportunities?
Do you want to earn more for the same amount of work or work less for the same amount of earnings?
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2010, 2011, 2012 Michael Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.utils as utils
import volatility.obj as obj
import volatility.poolscan as poolscan
import volatility.debug as debug
import volatility.plugins.common as common
import volatility.win32.modules as modules
import volatility.win32.tasks as tasks
import volatility.plugins.malware.devicetree as devicetree
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
try:
import distorm3
has_distorm3 = True
except ImportError:
has_distorm3 = False
#--------------------------------------------------------------------------------
# vtypes
#--------------------------------------------------------------------------------
callback_types = {
'_NOTIFICATION_PACKET' : [ 0x10, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'DriverObject' : [ 0x8, ['pointer', ['_DRIVER_OBJECT']]],
'NotificationRoutine' : [ 0xC, ['unsigned int']],
} ],
'_KBUGCHECK_CALLBACK_RECORD' : [ 0x20, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [ 0x8, ['unsigned int']],
'Buffer' : [ 0xC, ['pointer', ['void']]],
'Length' : [ 0x10, ['unsigned int']],
'Component' : [ 0x14, ['pointer', ['String', dict(length = 64)]]],
'Checksum' : [ 0x18, ['pointer', ['unsigned int']]],
'State' : [ 0x1C, ['unsigned char']],
} ],
'_KBUGCHECK_REASON_CALLBACK_RECORD' : [ 0x1C, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [ 0x8, ['unsigned int']],
'Component' : [ 0xC, ['pointer', ['String', dict(length = 8)]]],
'Checksum' : [ 0x10, ['pointer', ['unsigned int']]],
'Reason' : [ 0x14, ['unsigned int']],
'State' : [ 0x18, ['unsigned char']],
} ],
'_SHUTDOWN_PACKET' : [ 0xC, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'DeviceObject' : [ 0x8, ['pointer', ['_DEVICE_OBJECT']]],
} ],
'_EX_CALLBACK_ROUTINE_BLOCK' : [ 0x8, {
'RundownProtect' : [ 0x0, ['unsigned int']],
'Function' : [ 0x4, ['unsigned int']],
'Context' : [ 0x8, ['unsigned int']],
} ],
'_GENERIC_CALLBACK' : [ 0xC, {
'Callback' : [ 0x4, ['pointer', ['void']]],
'Associated' : [ 0x8, ['pointer', ['void']]],
} ],
'_REGISTRY_CALLBACK_LEGACY' : [ 0x38, {
'CreateTime' : [ 0x0, ['WinTimeStamp', dict(is_utc = True)]],
} ],
'_REGISTRY_CALLBACK' : [ None, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'Function' : [ 0x1C, ['pointer', ['void']]],
} ],
'_DBGPRINT_CALLBACK' : [ 0x14, {
'Function' : [ 0x8, ['pointer', ['void']]],
} ],
'_NOTIFY_ENTRY_HEADER' : [ None, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'EventCategory' : [ 0x8, ['Enumeration', dict(target = 'long', choices = {
0: 'EventCategoryReserved',
1: 'EventCategoryHardwareProfileChange',
2: 'EventCategoryDeviceInterfaceChange',
3: 'EventCategoryTargetDeviceChange'})]],
'CallbackRoutine' : [ 0x14, ['unsigned int']],
'DriverObject' : [ 0x1C, ['pointer', ['_DRIVER_OBJECT']]],
} ],
}
callback_types_x64 = {
'_GENERIC_CALLBACK' : [ 0x18, {
'Callback' : [ 0x8, ['pointer', ['void']]],
'Associated' : [ 0x10, ['pointer', ['void']]],
} ],
'_NOTIFICATION_PACKET' : [ 0x30, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'DriverObject' : [ 0x10, ['pointer', ['_DRIVER_OBJECT']]],
'NotificationRoutine' : [ 0x18, ['address']],
} ],
'_SHUTDOWN_PACKET' : [ 0xC, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'DeviceObject' : [ 0x10, ['pointer', ['_DEVICE_OBJECT']]],
} ],
'_DBGPRINT_CALLBACK' : [ 0x14, {
'Function' : [ 0x10, ['pointer', ['void']]],
} ],
'_NOTIFY_ENTRY_HEADER' : [ None, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'EventCategory' : [ 0x10, ['Enumeration', dict(target = 'long', choices = {
0: 'EventCategoryReserved',
1: 'EventCategoryHardwareProfileChange',
2: 'EventCategoryDeviceInterfaceChange',
3: 'EventCategoryTargetDeviceChange'})]],
'CallbackRoutine' : [ 0x20, ['address']],
'DriverObject' : [ 0x30, ['pointer', ['_DRIVER_OBJECT']]],
} ],
'_REGISTRY_CALLBACK' : [ 0x50, {
'ListEntry' : [ 0x0, ['_LIST_ENTRY']],
'Function' : [ 0x20, ['pointer', ['void']]], # other could be 28
} ],
'_KBUGCHECK_CALLBACK_RECORD' : [ None, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [ 0x10, ['address']],
'Component' : [ 0x28, ['pointer', ['String', dict(length = 8)]]],
} ],
'_KBUGCHECK_REASON_CALLBACK_RECORD' : [ None, {
'Entry' : [ 0x0, ['_LIST_ENTRY']],
'CallbackRoutine' : [ 0x10, ['unsigned int']],
'Component' : [ 0x28, ['pointer', ['String', dict(length = 8)]]],
} ],
}
#--------------------------------------------------------------------------------
# object classes
#--------------------------------------------------------------------------------
class _SHUTDOWN_PACKET(obj.CType):
"""Class for shutdown notification callbacks"""
def is_valid(self):
"""
Perform some checks.
Note: obj_native_vm is kernel space.
"""
if not obj.CType.is_valid(self):
return False
if (not self.obj_native_vm.is_valid_address(self.Entry.Flink) or
not self.obj_native_vm.is_valid_address(self.Entry.Blink) or
not self.obj_native_vm.is_valid_address(self.DeviceObject)):
return False
# Dereference the device object
device = self.DeviceObject.dereference()
# Carve out the device's object header and check its type
object_header = obj.Object("_OBJECT_HEADER",
offset = device.obj_offset -
self.obj_native_vm.profile.get_obj_offset("_OBJECT_HEADER", "Body"),
vm = device.obj_vm,
native_vm = device.obj_native_vm)
return object_header.get_object_type() == "Device"
#--------------------------------------------------------------------------------
# profile modifications
#--------------------------------------------------------------------------------
class CallbackMods(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
if profile.metadata.get("memory_model", "32bit") == "32bit":
profile.vtypes.update(callback_types)
profile.object_classes.update({
'_SHUTDOWN_PACKET': _SHUTDOWN_PACKET,
})
else:
profile.vtypes.update(callback_types_x64)
#--------------------------------------------------------------------------------
# pool scanners
#--------------------------------------------------------------------------------
class AbstractCallbackScanner(poolscan.PoolScanner):
"""Return the offset of the callback, no object headers"""
class PoolScanFSCallback(AbstractCallbackScanner):
"""PoolScanner for File System Callbacks"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "IoFs"
self.struct_name = "_NOTIFICATION_PACKET"
if address_space.profile.metadata.get("memory_model", "32bit") == "32bit":
size = 0x18
else:
size = 0x30
self.checks = [ ('CheckPoolSize', dict(condition = lambda x: x == size)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
#('CheckPoolIndex', dict(value = 4)),
]
class PoolScanShutdownCallback(AbstractCallbackScanner):
"""PoolScanner for Shutdown Callbacks"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "IoSh"
self.struct_name = "_SHUTDOWN_PACKET"
if address_space.profile.metadata.get("memory_model", "32bit") == "32bit":
size = 0x18
else:
size = 0x30
self.checks = [ ('CheckPoolSize', dict(condition = lambda x: x == size)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 0)),
]
class PoolScanGenericCallback(AbstractCallbackScanner):
"""PoolScanner for Generic Callbacks"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "Cbrb"
self.struct_name = "_GENERIC_CALLBACK"
if address_space.profile.metadata.get("memory_model", "32bit") == "32bit":
size = 0x18
else:
size = 0x30
self.checks = [ ('CheckPoolSize', dict(condition = lambda x: x == size)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
# This is a good constraint for all images except Frank's rustock-c.vmem
#('CheckPoolIndex', dict(value = 1)),
]
class PoolScanDbgPrintCallback(AbstractCallbackScanner):
"""PoolScanner for DebugPrint Callbacks on Vista and 7"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "DbCb"
self.struct_name = "_DBGPRINT_CALLBACK"
self.checks = [ ('CheckPoolSize', dict(condition = lambda x: x >= 0x20 and x <= 0x40)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
#('CheckPoolIndex', dict(value = 0)),
]
class PoolScanRegistryCallback(AbstractCallbackScanner):
"""PoolScanner for DebugPrint Callbacks on Vista and 7"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "CMcb"
self.struct_name = "_REGISTRY_CALLBACK"
self.checks = [('CheckPoolSize', dict(condition = lambda x: x >= 0x38)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 4)),
]
class PoolScanPnp9(AbstractCallbackScanner):
"""PoolScanner for Pnp9 (EventCategoryHardwareProfileChange)"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "Pnp9"
self.struct_name = "_NOTIFY_ENTRY_HEADER"
self.checks = [ # seen as 0x2C on W7, 0x28 on vistasp0 (4 less but needs 8 less)
('CheckPoolSize', dict(condition = lambda x: x >= 0x30)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 1)),
]
class PoolScanPnpD(AbstractCallbackScanner):
"""PoolScanner for PnpD (EventCategoryDeviceInterfaceChange)"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "PnpD"
self.struct_name = "_NOTIFY_ENTRY_HEADER"
self.checks = [('CheckPoolSize', dict(condition = lambda x: x >= 0x40)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 1)),
]
class PoolScanPnpC(AbstractCallbackScanner):
"""PoolScanner for PnpC (EventCategoryTargetDeviceChange)"""
def __init__(self, address_space):
AbstractCallbackScanner.__init__(self, address_space)
self.pooltag = "PnpC"
self.struct_name = "_NOTIFY_ENTRY_HEADER"
self.checks = [('CheckPoolSize', dict(condition = lambda x: x >= 0x38)),
('CheckPoolType', dict(non_paged = True, paged = True, free = True)),
('CheckPoolIndex', dict(value = 1)),
]
#--------------------------------------------------------------------------------
# callbacks plugin
#--------------------------------------------------------------------------------
class Callbacks(common.AbstractScanCommand):
"""Print system-wide notification routines"""
scanners = [PoolScanFSCallback, PoolScanShutdownCallback, PoolScanGenericCallback]
@staticmethod
def get_kernel_callbacks(nt_mod):
"""
Enumerate the Create Process, Create Thread, and Image Load callbacks.
On some systems, the byte sequences will be inaccurate or the exported
function will not be found. In these cases, the PoolScanGenericCallback
scanner will pick up the pool associated with the callbacks.
"""
bits32 = nt_mod.obj_vm.profile.metadata.get("memory_model", "32bit") == "32bit"
if bits32:
routines = [
# push esi; mov esi, offset _PspLoadImageNotifyRoutine
('PsSetLoadImageNotifyRoutine', "\x56\xbe"),
# push esi; mov esi, offset _PspCreateThreadNotifyRoutine
('PsSetCreateThreadNotifyRoutine', "\x56\xbe"),
# mov edi, offset _PspCreateProcessNotifyRoutine
('PsSetCreateProcessNotifyRoutine', "\xbf"),
]
else:
routines = [
# lea ecx, offset _PspLoadImageNotifyRoutine
('PsRemoveLoadImageNotifyRoutine', "\x48\x8d\x0d"),
# lea rcx, offset _PspCreateThreadNotifyRoutine
('PsRemoveCreateThreadNotifyRoutine', "\x48\x8d\x0d"),
# mov edi, offset _PspCreateProcessNotifyRoutine
#('PsSetCreateProcessNotifyRoutine', "\xbf"),
]
for symbol, hexbytes in routines:
# Locate the exported symbol in the NT module
symbol_rva = nt_mod.getprocaddress(symbol)
if symbol_rva == None:
continue
symbol_address = symbol_rva + nt_mod.DllBase
# Find the global variable referenced by the exported symbol
data = nt_mod.obj_vm.zread(symbol_address, 100)
offset = data.find(hexbytes)
if offset == -1:
continue
if bits32:
# Read the pointer to the list
p = obj.Object('Pointer',
offset = symbol_address + offset + len(hexbytes),
vm = nt_mod.obj_vm)
else:
# Read the pointer to the list
v = obj.Object('int',
offset = symbol_address + offset + len(hexbytes),
vm = nt_mod.obj_vm)
p = symbol_address + offset + 7 + v
# The list is an array of 8 _EX_FAST_REF objects
addrs = obj.Object('Array', count = 8, targetType = '_EX_FAST_REF',
offset = p, vm = nt_mod.obj_vm)
for addr in addrs:
callback = addr.dereference_as("_GENERIC_CALLBACK")
if callback:
yield symbol, callback.Callback, None
@staticmethod
def get_bugcheck_callbacks(addr_space):
"""
Enumerate generic Bugcheck callbacks.
Note: These structures don't exist in tagged pools, but you can find
them via KDDEBUGGER_DATA64 on all versions of Windows.
"""
kdbg = tasks.get_kdbg(addr_space)
list_head = kdbg.KeBugCheckCallbackListHead.dereference_as('_KBUGCHECK_CALLBACK_RECORD')
for l in list_head.Entry.list_of_type("_KBUGCHECK_CALLBACK_RECORD", "Entry"):
yield "KeBugCheckCallbackListHead", l.CallbackRoutine, l.Component.dereference()
@staticmethod
def get_registry_callbacks_legacy(nt_mod):
"""
Enumerate registry change callbacks.
This method of finding a global variable via disassembly of the
CmRegisterCallback function is only for XP systems. If it fails on
XP you can still find the callbacks using PoolScanGenericCallback.
On Vista and Windows 7, these callbacks are registered using the
CmRegisterCallbackEx function.
"""
if not has_distorm3:
return
symbol = "CmRegisterCallback"
# Get the RVA of the symbol from NT's EAT
symbol_rva = nt_mod.getprocaddress(symbol)
if symbol_rva == None:
return
# Absolute VA to the symbol code
symbol_address = symbol_rva + nt_mod.DllBase
# Read the function prologue
data = nt_mod.obj_vm.zread(symbol_address, 200)
c = 0
vector = None
# Looking for MOV EBX, CmpCallBackVector
# This may be the first or second MOV EBX instruction
for op in distorm3.Decompose(symbol_address, data, distorm3.Decode32Bits):
if (op.valid and op.mnemonic == "MOV"
and len(op.operands) == 2
and op.operands[0].name == 'EBX'):
vector = op.operands[1].value
if c == 1:
break
else:
c += 1
# Can't find the global variable
if vector == None:
return
# The vector is an array of 100 _EX_FAST_REF objects
addrs = obj.Object("Array", count = 100, offset = vector,
vm = nt_mod.obj_vm, targetType = "_EX_FAST_REF")
for addr in addrs:
callback = addr.dereference_as("_EX_CALLBACK_ROUTINE_BLOCK")
if callback:
yield symbol, callback.Function, None
@staticmethod
def get_bugcheck_reason_callbacks(nt_mod):
"""
Enumerate Bugcheck Reason callbacks.
Note: These structures don't exist in tagged pools, so we
find them by locating the list head which is a non-exported
NT symbol. The method works on all x86 versions of Windows.
mov [eax+KBUGCHECK_REASON_CALLBACK_RECORD.Entry.Blink], \
offset _KeBugCheckReasonCallbackListHead
"""
symbol = "KeRegisterBugCheckReasonCallback"
bits32 = nt_mod.obj_vm.profile.metadata.get("memory_model", "32bit") == "32bit"
if bits32:
hexbytes = "\xC7\x40\x04"
else:
hexbytes = "\x48\x8d\x0d"
# Locate the symbol RVA
symbol_rva = nt_mod.getprocaddress(symbol)
if symbol_rva == None:
return
# Compute the absolute virtual address
symbol_address = symbol_rva + nt_mod.DllBase
data = nt_mod.obj_vm.zread(symbol_address, 200)
# Search for the pattern
offset = data.find(hexbytes)
if offset == -1:
return
if bits32:
p = obj.Object('Pointer',
offset = symbol_address + offset + len(hexbytes),
vm = nt_mod.obj_vm)
bugs = p.dereference_as('_KBUGCHECK_REASON_CALLBACK_RECORD')
else:
v = obj.Object("int", offset = symbol_address + offset + len(hexbytes), vm = nt_mod.obj_vm)
p = symbol_address + offset + 7 + v
bugs = obj.Object("_KBUGCHECK_REASON_CALLBACK_RECORD", offset = p, vm = nt_mod.obj_vm)
for l in bugs.Entry.list_of_type("_KBUGCHECK_REASON_CALLBACK_RECORD", "Entry"):
if nt_mod.obj_vm.is_valid_address(l.CallbackRoutine):
yield symbol, l.CallbackRoutine, l.Component.dereference()
def calculate(self):
addr_space = utils.load_as(self._config)
bits32 = addr_space.profile.metadata.get("memory_model", "32bit") == "32bit"
# Get the OS version we're analyzing
version = (addr_space.profile.metadata.get('major', 0),
addr_space.profile.metadata.get('minor', 0))
modlist = list(modules.lsmod(addr_space))
mods = dict((addr_space.address_mask(mod.DllBase), mod) for mod in modlist)
mod_addrs = sorted(mods.keys())
# Valid for Vista and later
if version >= (6, 0):
self.scanners.append(PoolScanDbgPrintCallback)
self.scanners.append(PoolScanRegistryCallback)
self.scanners.append(PoolScanPnp9)
self.scanners.append(PoolScanPnpD)
self.scanners.append(PoolScanPnpC)
for objct in self.scan_results(addr_space):
name = objct.obj_name
if name == "_REGISTRY_CALLBACK":
info = "CmRegisterCallback", objct.Function, None
yield info, mods, mod_addrs
elif name == "_DBGPRINT_CALLBACK":
info = "DbgSetDebugPrintCallback", objct.Function, None
yield info, mods, mod_addrs
elif name == "_SHUTDOWN_PACKET":
driver = objct.DeviceObject.dereference().DriverObject
if not driver:
continue
index = devicetree.MAJOR_FUNCTIONS.index('IRP_MJ_SHUTDOWN')
address = driver.MajorFunction[index]
details = str(driver.DriverName or "-")
info = "IoRegisterShutdownNotification", address, details
yield info, mods, mod_addrs
elif name == "_GENERIC_CALLBACK":
info = "GenericKernelCallback", objct.Callback, None
yield info, mods, mod_addrs
elif name == "_NOTIFY_ENTRY_HEADER":
# Dereference the driver object pointer
driver = objct.DriverObject.dereference()
driver_name = ""
if driver:
# Instantiate an object header for the driver name
header = driver.get_object_header()
if header.get_object_type() == "Driver":
# Grab the object name
driver_name = header.NameInfo.Name.v()
info = objct.EventCategory, objct.CallbackRoutine, driver_name
yield info, mods, mod_addrs
elif name == "_NOTIFICATION_PACKET":
info = "IoRegisterFsRegistrationChange", objct.NotificationRoutine, None
yield info, mods, mod_addrs
for info in self.get_kernel_callbacks(modlist[0]):
yield info, mods, mod_addrs
for info in self.get_bugcheck_callbacks(addr_space):
yield info, mods, mod_addrs
for info in self.get_bugcheck_reason_callbacks(modlist[0]):
yield info, mods, mod_addrs
# Valid for XP
if bits32 and version == (5, 1):
for info in self.get_registry_callbacks_legacy(modlist[0]):
yield info, mods, mod_addrs
def unified_output(self, data):
return TreeGrid([("Type", str),
("Callback", Address),
("Module", str),
("Details", str)],
self.generator(data))
def generator(self, data):
for (sym, cb, detail), mods, mod_addrs in data:
module = tasks.find_module(mods, mod_addrs, mods.values()[0].obj_vm.address_mask(cb))
## The original callbacks plugin searched driver objects
## if the owning module isn't found (Rustock.B). We leave that
## task up to the user this time, and will be incoporating
## some different module association methods later.
if module:
module_name = module.BaseDllName or module.FullDllName
else:
module_name = "UNKNOWN"
yield (0, [str(sym), Address(cb), str(module_name), str(detail or "-")])
def render_text(self, outfd, data):
self.table_header(outfd,
[("Type", "36"),
("Callback", "[addrpad]"),
("Module", "20"),
("Details", ""),
])
for (sym, cb, detail), mods, mod_addrs in data:
module = tasks.find_module(mods, mod_addrs, mods.values()[0].obj_vm.address_mask(cb))
## The original callbacks plugin searched driver objects
## if the owning module isn't found (Rustock.B). We leave that
## task up to the user this time, and will be incoporating
## some different module association methods later.
if module:
module_name = module.BaseDllName or module.FullDllName
else:
module_name = "UNKNOWN"
self.table_row(outfd, sym, cb, module_name, detail or "-")
|
A brass triangle with a center stone of your choice. The unique style is a stand out and is backed by a hooked ear post.
The dart hangs down just under an inch below the hook.
|
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt, formatdate
from frappe.model.mapper import get_mapped_doc
from frappe import msgprint, _, throw
from erpnext.setup.utils import get_company_currency
import frappe.defaults
from frappe.desk.form.utils import get_linked_docs
import json
#check batch is of respective item code
def validate_batch(doc,method):
for d in doc.get('items'):
if d.batch_no and d.item_code != frappe.db.get_value('Batch',d.batch_no,'item'):
frappe.throw(_("Select batch respective to item code {0}").format(d.item_code))
#maintain supplier name,rate,batch as EC-Rate of purchase
def create_batchwise_price_list(doc, method):
for d in doc.get('items'):
item_price=frappe.db.get_value('Item Price',{'item_code':d.item_code,'price_list':'EC - Rate of Purchase'},'name')
if not item_price:
create_item_price(d,doc)
else:
create_batchwise_item_price(item_price,d,doc)
#create item price list
def create_item_price(d,doc):
pl=frappe.new_doc('Item Price')
pl.price_list='EC - Rate of Purchase'
pl.buying = 1
pl.selling = 1
pl.item_code= d.item_code
pl.price_list_rate=d.rate
pl.item_name=d.item_name
pl.item_description=d.description
pl.currency=doc.currency
pl.save(ignore_permissions=True)
create_batchwise_item_price(pl.name,d,doc)
#create batch wise price list rate
def create_batchwise_item_price(name, d, doc):
if d.batch_no and not frappe.db.get_value('Batchwise Purchase Rate',{'batch':d.batch_no},'name'):
bpr=frappe.new_doc('Batchwise Purchase Rate')
bpr.supplier=doc.supplier
bpr.batch=d.batch_no
bpr.rate=d.rate
bpr.parentfield='batchwise_purchase_rate'
bpr.parenttype='Item Price'
bpr.parent=name
bpr.document = doc.name
bpr.save(ignore_permissions=True)
#on cancel delete created price list
def cancel_batchwise_price_list(doc, method):
for d in doc.get('items'):
if d.batch_no:
frappe.db.sql("delete from `tabBatchwise Purchase Rate` where document='%s'"%(doc.name))
#create supplier quotation from quotationin draft
@frappe.whitelist()
def create_supplier_quotation():
Quotations=get_quotation_in_draft()
if Quotations:
for quotation in Quotations:
if not frappe.db.get_value("Quotation Used",{"quotation":quotation[0]},"quotation"):
items=frappe.db.sql("""select item_code,qty from `tabQuotation Item` where parent='%s'"""%(quotation[0]),as_list=1)
for item in items:
item_price_exists=frappe.db.sql("""select distinct ifnull(price_list_rate,0) from `tabItem Price` where item_code='%s' """%(item[0]))
if not item_price_exists or item_price_exists[0][0]==0:
suppliers=get_suplier_details(item[0])
if suppliers:
for supplier in suppliers:
make_supplier_quotation(item,supplier[0])
update_used_quotation(quotation[0])
#get all quotations in draft state
def get_quotation_in_draft():
return frappe.db.sql("""select name from `tabQuotation` where docstatus=0""",as_list=1)
#get all quotations that were used during last scheduler event for validation
def get_quotation_used(quotation):
return frappe.db.sql("""select quotation from `tabQuotation Used` where quotation='%s'"""%(quotation),as_list=1)
#get details of supplier
def get_suplier_details(item):
item_wrapper = frappe.get_doc("Item", item)
return frappe.db.sql("""select supplier_name from `tabSupplier` where supplier_name in(select parent from `tabSupplier Brands` where brand='%s') and
supplier_name in(select parent from `tabSupplier Item Groups` where item_group='%s')"""%(item_wrapper.brand,item_wrapper.item_group),as_list=1)
#create supplier quotation
def make_supplier_quotation(item,supplier):
quotation_exists=check_quotation_exists(supplier)
if quotation_exists:
if not frappe.db.get_value('Supplier Quotation Item',{'item_code':item[0],'parent':quotation_exists},'name'):
update_supplier_items(item,quotation_exists)
else:
update_qty_quotation(quotation_exists ,item)
else:
new_supplier_quotaion(supplier,item)
#check if quotation exists in for supplier
def check_quotation_exists(supplier):
return frappe.db.get_value('Supplier Quotation',{'supplier':supplier,'docstatus':0},'name')
#create new supplier quotation
def new_supplier_quotaion(supplier,item):
item_wrapper = frappe.get_doc("Item", item[0])
sq=frappe.new_doc('Supplier Quotation')
sq.supplier=supplier
sq.append("items", {
"doctype": "Supplier Quotation Item",
"item_code": item[0],
"item_name": item_wrapper.item_name,
"description": item_wrapper.description,
"uom": item_wrapper.stock_uom,
"item_group": item_wrapper.item_group,
"brand": item_wrapper.brand,
"qty": item[1],
"base_rate":0,
"base_amount":0,
"manufacturer_pn":item_wrapper.manufacturer_pn,
"oem_part_number":item_wrapper.oem_part_number
})
sq.save(ignore_permissions=True)
#Add item to existing supplier quotation
def update_supplier_items(item,name):
item_wrapper = frappe.get_doc("Item", item[0])
idx=frappe.db.sql("""select ifnull(max(idx),0)+1 as idx from `tabSupplier Quotation Item` where parent='%s'"""%(name),as_list=1)
sqi=frappe.new_doc('Supplier Quotation Item')
sqi.idx=idx[0][0]
sqi.item_code=item[0]
sqi.item_name=item_wrapper.item_name
sqi.description=item_wrapper.description
sqi.manufacturer_pn=item_wrapper.manufacturer_pn
sqi.oem_part_number=item_wrapper.oem_part_number
sqi.uom=item_wrapper.stock_uom
sqi.brand=item_wrapper.brand
sqi.qty=item[1]
sqi.base_rate=0
sqi.base_amount=0
sqi.item_group=item_wrapper.item_group
sqi.parentfield='items'
sqi.parenttype='Supplier Quotation'
sqi.parent=name
sqi.save(ignore_permissions=True)
#if item in supplier quotation exists update qty
def update_qty_quotation(name,item):
frappe.db.sql("""update `tabSupplier Quotation Item` set qty=qty+%s where parent='%s' and item_code='%s'"""%(item[1],name,item[0]))
#when quotation used so it can be negleted in future
def update_used_quotation(quotation):
if not frappe.db.get_value("Quotation Used",{"quotation":quotation},"quotation"):
uq=frappe.new_doc('Used Quotation')
uq.save(ignore_permissions=True)
qu=frappe.new_doc('Quotation Used')
qu.quotation=quotation
qu.parentfield='quotation_used'
qu.parenttype='Used Quotation'
qu.parent=uq.name
qu.save(ignore_permissions=True)
#returns query data
@frappe.whitelist()
def get_details(doc):
import json
doc = json.loads(doc)
condition=get_query(doc)
result = frappe.db.sql(condition,as_list=1)
data = previous_ordered_status(doc, result)
return data
#check whether item previously ordered
@frappe.whitelist()
def previous_ordered_status(doc, result):
query_data = []
for data in result:
for q in range(0,len(data)):
item = data[1]
if q == 4:
if not doc.get('previously_ordered_only'):
data[q] = get_status(doc, item)
else:
data[q] = 1
query_data.append(data)
return query_data
#get document status
@frappe.whitelist()
def get_status(doc, item):
data = 0
status = frappe.db.sql(""" select ifnull(`tabSales Order`.docstatus,0) from `tabSales Order`, `tabSales Order Item` where `tabSales Order`.name= `tabSales Order Item`.parent
and `tabSales Order`.customer='%s'
and `tabSales Order Item`.item_code='%s'
and `tabSales Order`.docstatus=1 """%(doc.get('customer'),item))
if status:
data = 1
return data
#build query
@frappe.whitelist()
def get_query(doc):
column = get_columns(doc)
table = get_tables(doc)
condition = get_conditions(doc)
return column + ' ' + table + ' ' + condition
#build columns
@frappe.whitelist()
def get_columns(doc):
column = 'ifnull(`tabItem`.item_group,"")'
if doc.get('item_groups'):
column = 'ifnull(`tabWebsite Item Group`.performance,"")'
return """ select DISTINCT '',ifnull(`tabQuote Item`.item_code,"") ,
ifnull(`tabQuote Item`.brand,"") ,
"""+column+""",
'',
ifnull(`tabBatchwise Purchase Rate`.batch,""),
format(ifnull(`tabBatchwise Purchase Rate`.rate,(select price_list_rate from `tabItem Price` where price_list='EC - Rate of Purchase' and item_code=`tabQuote Item`.item_code)),2) ,
(select format(ifnull(sum(actual_qty),0),2) from `tabStock Ledger Entry` where item_code=`tabQuote Item`.item_code and batch_no=`tabBatchwise Purchase Rate`.batch)"""
#returns tables required
@frappe.whitelist()
def get_tables(doc):
table = """ `tabItem` INNER JOIN `tabQuote Item` ON
`tabQuote Item`.parent = `tabItem`.name """
if doc.get('item_groups') and doc.get('part_no'):
table = """ `tabItem` INNER JOIN `tabQuote Item` ON
`tabQuote Item`.parent = `tabItem`.name INNER JOIN
`tabWebsite Item Group` ON `tabQuote Item`.parent = `tabWebsite Item Group`.parent """
elif doc.get('item_groups'):
table = """ `tabWebsite Item Group` INNER JOIN `tabQuote Item` ON
`tabQuote Item`.parent = `tabWebsite Item Group`.parent"""
return """ FROM """+table+""" LEFT JOIN
`tabItem Price` ON `tabQuote Item`.item_code = `tabItem Price`.item_code
LEFT JOIN
`tabStock Ledger Entry` ON `tabStock Ledger Entry`.item_code = `tabItem Price`.item_code and `tabStock Ledger Entry`.is_cancelled='No'
LEFT JOIN
`tabBatchwise Purchase Rate` ON `tabBatchwise Purchase Rate`.parent = `tabItem Price`.name
LEFT JOIN
`tabSales Order Item` ON `tabSales Order Item`.item_code = `tabQuote Item`.item_code
LEFT JOIN
`tabSales Order` ON `tabSales Order`.name = `tabSales Order Item`.parent """
#returns conditions for query
@frappe.whitelist()
def get_conditions(doc):
previous_ordered = condition = '1=1'
if doc.get('item_groups') and doc.get('part_no'):
condition = """ `tabItem`.name='%s' and `tabWebsite Item Group`.item_group = '%s' """%(doc.get('part_no'),doc.get('item_groups'))
elif doc.get('item_groups'):
condition = """ `tabWebsite Item Group`.item_group = '%s' """%(doc.get('item_groups'))
elif doc.get('part_no'):
condition = """ `tabItem`.name='%s' """%(doc.get('part_no'))
if doc.get('previously_ordered_only') == 1:
previous_ordered = """`tabSales Order`.customer= '%s' and ifnull(`tabSales Order`.docstatus,0) = 1 """%(doc.get('customer'))
return """ where """+condition+""" and `tabItem Price`.price_list='EC - Rate of Purchase'
and """+previous_ordered+""" """
def validate_price_list(doc, method):
for d in doc.get('items'):
if d.batch_no:
rate = frappe.db.sql("select a.rate from `tabBatchwise Purchase Rate` a inner join `tabItem Price` b on a.parent = b.name and b.item_code = '%s' and a.batch = '%s'"%(d.item_code,d.batch_no),as_list=1)
if rate and flt(rate[0][0]) > flt(d.rate):
frappe.throw(_('Item Code {0} rate must be greater than rate of price list EC Purchase of Rate').format(d.item_code))
def set_price_list(doc, method):
doc.competitor = frappe.db.get_value('Price List',doc.price_list,'competitor')
frappe.db.sql("update `tabItem Price` set competitor=%s where name='%s'"%(cint(doc.competitor),doc.name))
#create purchase orders from submitted sales orders
@frappe.whitelist()
def generate_po():
sales_orders=get_submitted_sales_orders()
if sales_orders:
for sales_order in sales_orders:
if not frappe.db.get_value("Sales Order Used",{"sales_order":sales_order[0]},"sales_order"):
doc = frappe.get_doc('Sales Order', sales_order[0])
for item in doc.get('items'):
if cint(frappe.db.get_value('Item', item.item_code, 'is_stock_item')) == 1:
stock_balance=get_stock_balance(item)
qty = (flt(item.qty) - flt(stock_balance[0][0])) or 0.0
if flt(qty) > 0.0:
supplier=get_supplier_details(item.item_code)
if supplier and supplier[0][1]:
make_po(supplier,item,sales_order[0], qty)
update_used(sales_order[0])
#returns submitted sales orders
def get_submitted_sales_orders():
return frappe.db.sql("""select name from `tabSales Order` where docstatus=1""",as_list=1)
#returns stock balance for item
def get_stock_balance(args):
return frappe.db.sql("""select actual_qty from `tabBin` where item_code='{0}'
and warehouse = '{1}'""".format(args.item_code, args.warehouse),as_list=1)
#returns least item price list rate and supplier name
def get_supplier_details(item):
return frappe.db.sql("""select min(price_list_rate),price_list from `tabItem Price` where item_code='%s' and buying=1 and price_list in (select name from tabSupplier) group by price_list order by price_list_rate limit 1"""%(item),as_list=1)
def get_price_list_rate(item,supplier):
rate = frappe.db.sql("""select ifnull(price_list_rate,0) from `tabItem Price` where item_code='%s' and buying=1 and price_list='%s'"""%(item,supplier),as_list=1)
if rate:
return rate[0][0]
else:
return 0
#returns sales orders from which purchase orders created
def get_sales_order_used(sales_order):
return frappe.db.sql("""select sales_order from `tabSales Order Used` where sales_order='%s'"""%(sales_order[0]),as_list=1)
#makes new po or updates existing
def make_po(supplier,item,sales_order, qty):
po_exists=check_po_exists(supplier[0][1])
#price_rate=get_price_list_rate(item[0],supplier[0][1])
if po_exists:
item_exists=frappe.db.get_value('Purchase Order Item',{'item_code':item.item_code,'parent':po_exists},'name')
if not item_exists:
add_po_items(po_exists,item,sales_order,supplier[0][0], qty)
else:
update_qty(po_exists,item,sales_order,supplier[0][0], qty)
else:
new_po(supplier,item,supplier[0][0],sales_order, qty)
#check if po exists
def check_po_exists(supplier):
return frappe.db.get_value('Purchase Order',{'supplier':supplier,'docstatus':0},'name')
#creates new purchase order
def new_po(supplier,item,price_rate,sales_order, qty):
item_wrapper=frappe.get_doc("Item", item.item_code)
po=frappe.new_doc('Purchase Order')
po.supplier=supplier[0][1]
po.currency = frappe.db.get_value('Supplier', supplier[0][1], 'default_currency') or frappe.db.get_value('Global Defaults', None, 'default_currency')
po.plc_conversion_rate = frappe.db.get_value('Currency Exchange', {'from_currency': po.currency}, 'exchange_rate')
po.buying_price_list=supplier[0][1]
po.append("items", {
"doctype": "Purchase Order Item",
"item_code": item.item_code,
"item_name": item_wrapper.item_name,
"description": item_wrapper.description,
"uom": item_wrapper.stock_uom,
"item_group": item_wrapper.item_group,
"brand": item_wrapper.brand,
"qty":qty ,
"base_rate":0,
"base_amount":0,
"manufacturer_pn":item_wrapper.manufacturer_pn,
"oem_part_number":item_wrapper.oem_part_number,
"price_list_rate":price_rate,
"schedule_date":'08-12-2014'
})
po.save(ignore_permissions=True)
#maintains sales orders which are used in process
def update_used(sales_order):
if not frappe.db.get_value("Sales Order Used",{"sales_order":sales_order},"sales_order"):
uso=frappe.new_doc('Used Sales Order')
uso.save(ignore_permissions=True)
sopo=frappe.new_doc('Sales Order Used')
sopo.sales_order=sales_order
sopo.parentfield='sales_order_used'
sopo.parenttype='Used Sales Order'
sopo.parent=uso.name
sopo.save(ignore_permissions=True)
#update qty if item in purchase order exists
def update_qty(name,item,sales_order,price_rate, qty):
frappe.db.sql("""update `tabPurchase Order Item` set qty=qty+%s where parent='%s' and item_code='%s'"""%(qty,name,item.item_code))
#update purchase order with item
def add_po_items(name,item,sales_order,price_rate, qty):
idx=frappe.db.sql("""select ifnull(max(idx),0)+1 as idx from `tabPurchase Order Item` where parent='%s'"""%(name),as_list=1)
item_wrapper = frappe.get_doc("Item", item.item_code)
poi=frappe.new_doc('Purchase Order Item')
poi.idx=idx[0][0]
poi.item_code=item.item_code
poi.item_name=item_wrapper.item_name
poi.description=item_wrapper.description
poi.manufacturer_pn=item_wrapper.manufacturer_pn
poi.oem_part_number=item_wrapper.oem_part_number
poi.uom=item_wrapper.stock_uom
poi.brand=item_wrapper.brand
poi.qty= qty
poi.price_list_rate=price_rate
poi.base_rate=0
poi.base_amount=0
poi.schedule_date='08-12-2014'
poi.conversion_factor=1
poi.item_group=item_wrapper.item_group
poi.parentfield='items'
poi.parenttype='Purchase Order'
poi.parent=name
poi.save(ignore_permissions=True)
#to make oppurtunity from submitted sales order
@frappe.whitelist()
def make_oppurtunity(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Opportunity",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Opportunity Item",
"field_map": {
"rate": "rate",
"name": "prevdoc_detail_docname",
"parent": "against_sales_order",
},
}
}, target_doc, set_missing_values)
return target_doc
def update_item_price_rate_pi(doc,method):
# update the rate if new rate is less than existing item rate
for item in doc.get('items'):
if item.item_code:
rate=get_ec_rate(item.item_code)
if rate and (item.rate < rate):
frappe.db.sql("""update `tabItem Price`
set price_list_rate=%s where item_code='%s'
and price_list='EC - Rate of Purchase'"""%(item.rate,item.item_code))
def update_item_price_sq(doc,method):
for d in doc.get('items'):
rate=get_ec_rate(d.item_code)
if rate:
if d.rate < rate:
frappe.db.sql("""update `tabItem Price`
set price_list_rate='%s'
where price_list='EC - Rate of Purchase'
and item_code='%s' """%(d.rate,d.item_code))
frappe.db.sql("commit")
def update_item_price_ip(doc,method):
rate= get_ec_rate(doc.item_code)
if rate:
if doc.price_list_rate < rate:
frappe.db.sql("""update `tabItem Price`
set price_list_rate='%s' where price_list='EC - Rate of Purchase'
and item_code='%s' """%(doc.price_list_rate,doc.item_code))
frappe.db.sql("commit")
else:
pass
def get_ec_rate(item_code):
return frappe.db.get_value("Item Price",{"item_code":item_code,"price_list":"EC - Rate of Purchase"},"price_list_rate")
def update_item_price_on_pi_cl(doc,method):
for item in doc.get('items'):
if item.item_code:
rate=get_rate(item.item_code)
if rate:
frappe.db.sql("""update `tabItem Price`
set price_list_rate=%s
where item_code='%s'
and price_list='EC - Rate of Purchase'"""%(rate[0][0],item.item_code))
def update_item_price_on_sq_cl(doc,method):
for item in doc.get('item_list'):
if item.item_code:
rate=get_rate(item.item_code)
if rate:
frappe.db.sql("""update `tabItem Price`
set price_list_rate=%s
where item_code='%s' and price_list='%s'"""%(rate[0][0],item.item_code,doc.buying_price_list))
def get_rate(item_code):
return frappe.db.sql("""select least(
CASE WHEN item_rate = 0 THEN GREATEST(item_rate,quotation_rate,purchase_rate)+1 ELSE item_rate END,
CASE WHEN quotation_rate= 0 THEN GREATEST(item_rate,quotation_rate,purchase_rate)+1 ELSE quotation_rate END,
CASE WHEN purchase_rate = 0 THEN GREATEST(item_rate,quotation_rate,purchase_rate)+1 ELSE purchase_rate END) as rate from (select
ifnull(min(nullif(ip.price_list_rate,0)),0) as item_rate,
ifnull(min(nullif(sq.price_list_rate,0)),0) as quotation_rate,
ifnull(min(nullif(pi.price_list_rate,0)),0) as purchase_rate from `tabItem` im
left join `tabItem Price` ip on ip.item_code=im.item_code
left join `tabSupplier Quotation Item` sq on sq.item_code=im.item_code and sq.docstatus=1
left join `tabPurchase Invoice Item` pi on pi.item_code=im.item_code and pi.docstatus=1
where im.item_code='%s' group by im.item_code)x"""%(item_code),as_list=1)
def check_eq_item_selected_twice(doc,method):
item_list = []
for row in doc.get('engine_compatibility_'):
if row.item_code in item_list:
frappe.throw(_("Duplicate entry for Item {0} in Part Equivalency table ").format(row.item_code))
item_list.append(row.item_code)
def auto_create_self_item_entry(doc,method):
result = frappe.db.sql(""" select name from `tabQuote Item` where parent='{0}' and item_code='{1}' """.format(doc.item_code,doc.item_code),as_list=1)
if not result:
doc.append('engine_compatibility_',{
"item_code":doc.item_code,
"item_name":doc.item_name,
"brand":doc.brand,
"item_group":doc.item_group
})
doc.save()
frappe.db.commit()
def create_eq_item_entry(doc,method):
for row in doc.get('engine_compatibility_'):
result = frappe.db.sql(""" select name from `tabQuote Item` where parent='{0}' and item_code='{1}' """.format(row.item_code,doc.item_code),as_list=1)
if not result:
item_doc = frappe.get_doc('Item',row.item_code)
item_doc.append('engine_compatibility_',{
"item_code":doc.item_code,
"item_name":doc.item_name,
"brand":doc.brand,
"item_group":doc.item_group
})
item_doc.save()
frappe.db.commit()
@frappe.whitelist()
def get_item_code(row_name):
if row_name:
return frappe.db.get_value('Quote Item',row_name,'item_code')
def delete_eq_item_entry(doc,method):
if doc.deleted_eq_item:
deleted_eq_item = cstr(doc.deleted_eq_item).split(',')
for d in deleted_eq_item:
my_doc = frappe.get_doc('Item',d)
for row in my_doc.get('engine_compatibility_'):
if row.item_code == doc.item_code:
my_doc.get('engine_compatibility_').remove(row)
my_doc.save()
doc.deleted_eq_item = ''
@frappe.whitelist()
def get_alternative_item_details(doc):
doc=json.loads(doc)
item_dict = {}
alteritem_dic={}
if doc:
for d in doc.get('items'):
result = {}
if d.get("sales_item_name"):
result = frappe.db.sql(""" SELECT
distinct(qi.item_code),
qi.parent,
coalesce(bi.actual_qty,0) as actual_qty,
ifnull(ite.item_name,'') as item_name,
ifnull(ite.manufacturer_pn,'') as manufacturer_pn,
ifnull(ite.oem_part_number,'') as oem_part_number,
ifnull(ite.description,'') as description,
coalesce( bi.warehouse,'') as warehouse,
ifnull(ite.stock_uom,'') as stock_uom
FROM
`tabQuote Item` qi join
`tabBin` bi
on
qi.item_code = bi.item_code join `tabItem` ite
on
ite.item_code = bi.item_code
where
qi.parent='{0}'
AND bi.warehouse='{1}' AND bi.actual_qty!=0 AND qi.item_code!='{2}' """.format(d["sales_item_name"],d["warehouse"],d["sales_item_name"]),as_dict=1)
alteritem_dic[d["sales_item_name"]]=result
item_dict[d["sales_item_name"]] = d["qty"]
return alteritem_dic,item_dict
def update_sales_item_name(doc,method):
for row in doc.get('items'):
row.sales_item_name = row.item_code
row.old_oem = row.current_oem
@frappe.whitelist()
def get_roles_for_so_cancellation():
role_list = frappe.db.sql("select roles from `tabAssign Roles Permissions`",as_list=1)
return role_list
@frappe.whitelist()
def custom_get_linked_docs(doctype, name, metadata_loaded=None):
results = get_linked_docs(doctype,name,metadata_loaded)
my_dict = make_unique(results)
cancel_linked_docs(my_dict,doctype,name)
return 0
def make_unique(results):
if results:
for key,value in results.items():
my_list = []
for my_key in value:
if my_key['docstatus'] == 1:
my_list.append(my_key['name'])
my_list = list(set(my_list))
results[key] = my_list
return results
def cancel_linked_docs(my_dict,doctype,name):
if my_dict:
for doc in ['Journal Voucher','Sales Invoice','Packing Slip','Delivery Note']:
if my_dict.get(doc):
if doc == 'Sales Invoice':
check_link_of_sales_invoice(doc,my_dict.get(doc))
for curr_name in my_dict.get(doc):
cancel_doc(doc,curr_name)
cancel_sales_order_self(doctype,name)
def cancel_doc(doc,name):
my_doc = frappe.get_doc(doc,name)
my_doc.cancel()
def check_link_of_sales_invoice(doc,si_list):
for sales_invoice in si_list:
jv_list = frappe.db.sql(""" select distinct(jvt.parent) from `tabJournal Voucher Detail` jvt join `tabJournal Voucher` jv on jv.name=jvt.parent where jvt.against_invoice='{0}' and jv.docstatus= 1 """.format(sales_invoice),as_list=1)
if jv_list:
cancel_jv('Journal Voucher',jv_list)
def cancel_jv(doc_name,jv_list):
for jv in jv_list:
my_doc = frappe.get_doc(doc_name,jv[0])
my_doc.cancel()
def cancel_sales_order_self(doctype,name):
my_doc = frappe.get_doc(doctype,name)
my_doc.cancel()
@frappe.whitelist()
def set_alternative_item_details(alter_dic,doc):
if alter_dic:
alter_dic=json.loads(alter_dic)
#doc=json.loads(doc)
c_doc=frappe.get_doc("Delivery Note",doc)
for d in c_doc.get('items'):
if alter_dic.has_key(d.item_code):
original_item=d.item_code
alter_item=alter_dic.get(d.item_code)["item_code"]
aitem_doc=frappe.get_doc("Item",alter_item)
d.item_code = aitem_doc.item_code
d.item_name = aitem_doc.item_name
d.manufacturer_pn = aitem_doc.manufacturer_pn
d.description = aitem_doc.description
d.old_oem = d.current_oem
d.current_oem = aitem_doc.oem_part_number
d.stock_uom = aitem_doc.stock_uom
d.sales_item_name = d.item_code
if alter_dic[original_item]["qty"] < d.qty:
d.actual_qty =alter_dic.get(original_item)["qty"]
if not (aitem_doc.oem_part_number == d.old_oem):
d.oem_part_number = aitem_doc.oem_part_number
else:
d.oem_part_number = cstr(aitem_doc.oem_part_number)+"(Same as %s)"%d.oem_part_number
c_doc.save(ignore_permissions=True)
return c_doc
|
The Pomander Air Conditioners provide atmospheric support by protecting, strengthening and refreshing a living or workspace. We suggest to select a Pomander Air Conditioner based on its color energy, supportive qualities or to complement a personal Pomander or other Aura-Soma products you may be currently using. The formula for the Air Conditioners is somewhat stronger so that only a few short bursts sprayed into the atmosphere will disperse the energies throughout the desired area.
|
import jsonpickle
import json as serializer
from pkg_resources import Requirement, resource_filename
import os
import csv
from Crypto.Cipher import ARC4
import base64
import socket
import getpass
from solidfire.factory import ElementFactory
from filelock import FileLock
import sys
def kv_string_to_dict(kv_string):
new_dict = {}
items = kv_string.split(',')
for item in items:
kvs = item.split('=')
new_dict[kvs[0]] = kvs[1]
def print_result(objs, log, as_json=False, as_pickle=False, depth=None, filter_tree=None):
# There are 3 acceptable parameter sets to provide:
# 1. json=True, depth=None, filter_tree=None
# 2. json=False, depth=#, filter_tree=None
# 3. json=False, depth=#, filter_tree=acceptable string
# Error case
if as_json and (depth is not None or filter_tree is not None):
log.error("If you choose to print it as json, do not provide a depth or filter. Those are for printing it as a tree.")
exit()
"""
SDK1.6 Note:
Since print_tree is not supported in 1.6, when both the available output formats
json and pickle formats are set to False, change the default output format (pickle) to True.
"""
if as_json == False and as_pickle == False:
as_pickle = True
# If json is true, we print it as json and return:
if as_json == True or as_pickle == True:
print_result_as_json(objs, as_pickle)
return
"""
SDK1.6 Note:
Commenting out these lines as print_tree is not supported in 1.6.
"""
"""
# If we have a filter, apply it.
if filter_tree is not None:
try:
objs_to_print = filter_objects_from_simple_keypaths(objs, filter_tree.split(','))
except Exception as e:
log.error(e.args[0])
exit(1)
else:
objs_to_print = objs
# Set up a default depth
if depth is None:
depth = 10
# Next, print the tree to the appropriate depth
print_result_as_tree(objs_to_print, depth)
"""
def print_result_as_json(objs, pickle=False):
#print(jsonpickle.encode(objs))
nestedDict = serializer.loads(jsonpickle.encode(objs))
filteredDict = type(nestedDict)()
if(pickle==False):
remove_pickling(nestedDict, filteredDict)
else:
filteredDict = nestedDict
print(serializer.dumps(filteredDict,indent=4))
def remove_pickling(nestedDict, filteredDict):
if type(nestedDict) is dict:
#foreach key, if list, recurse, if dict, recurse, if string recurse unless py/obj is key.
for key in nestedDict:
if key == "py/object":
continue
else:
filteredDict[key] = type(nestedDict[key])()
filteredDict[key] = remove_pickling(nestedDict[key], filteredDict[key])
return filteredDict
if type(nestedDict) is list:
# foreach item
for i in range(len(nestedDict)):
filteredDict.append(type(nestedDict[i])())
filteredDict[i] = remove_pickling(nestedDict[i], filteredDict[i])
return filteredDict
return nestedDict
"""
SDK1.6 Note:
Commenting this as print_tree is not supported in SDK 1.6.
"""
def get_result_as_tree(objs, depth=1, currentDepth=0, lastKey = ""):
print("print_tree is not supported in SDK1.6")
"""stringToReturn = ""
if(currentDepth > depth):
return "<to see more details, increase depth>\n"
if(type(objs) is str or type(objs) is bool or type(objs) is int or type(objs) is type(u'') or objs is None or type(objs) is float):# or (sys.version_info[0]<3 and type(objs) is long)):
return str(objs) + "\n"
if(type(objs) is list):
stringToReturn += "\n"
for i in range(len(objs)):
obj = objs[i]
stringToReturn += currentDepth*" "+get_result_as_tree(obj, depth, currentDepth+1, lastKey)
return stringToReturn
if(isinstance(objs, dict)):
stringToReturn += "\n"
for key in objs:
stringToReturn += currentDepth*" "+key+": "+get_result_as_tree(objs[key], depth, currentDepth+1, key)
return stringToReturn
if (isinstance(objs, tuple)):
return str(objs[0]) + "\n"
if(objs is None):
return stringToReturn
mydict = objs.__dict__
stringToReturn += "\n"
for key in mydict:
stringToReturn += currentDepth*" "
stringToReturn += key+": "+get_result_as_tree(mydict[key], depth, currentDepth+1, key)
return stringToReturn
"""
def filter_objects_from_simple_keypaths(objs, simpleKeyPaths):
# First, we assemble the key paths.
# They start out like this:
# [accouts.username, accounts.initiator_secret.secret, accounts.status]
# and become like this:
# {"accounts":{"username":True, "initiator_secret":{"secret":True}, "status":True}
keyPaths = dict()
for simpleKeyPath in simpleKeyPaths:
currentLevel = keyPaths
keyPathArray = simpleKeyPath.split('.')
for i in range(len(keyPathArray)):
if(i<(len(keyPathArray) - 1)):
if currentLevel.get(keyPathArray[i]) is None:
currentLevel[keyPathArray[i]] = dict()
else:
currentLevel[keyPathArray[i]] = True
currentLevel = currentLevel[keyPathArray[i]]
# Then we pass it in to filter objects.
return filter_objects(objs, keyPaths)
# Keypaths is arranged as follows:
# it is a nested dict with the order of the keys.
def filter_objects(objs, keyPaths):
# Otherwise, we keep recursing deeper.
# Because there are deeper keys, we know that we can go deeper.
# This means we are dealing with either an array or a dict.
# If keyPaths looks like this:
# {"username": True, "volumes": {"Id": True}}
# The keys in this sequence will be username and volumes.
# When we recurse into volumes, the keys will be Id.
finalFilteredObjects = dict()
if keyPaths == True and type(objs) is not list:
return objs
# If we've found a list, we recurse deeper to pull out the objs.
# We do not advance our keyPath recursion because this is just a list.
if type(objs) is list:
# If we have a list of objects, we will need to assemble and return a list of stuff.
filteredObjsDict = [None]*len(objs)
for i in range(len(objs)):
# Each element could be a string, dict, or list.
filteredObjsDict[i] = filter_objects(objs[i], keyPaths)
return filteredObjsDict
dictionaryOfInterest = None
if type(objs) is dict:
dictionaryOfInterest = objs
else:
dictionaryOfInterest = objs.__dict__
for key in keyPaths:
# If we've found a dict, we recurse deeper to pull out the objs.
# Because this is a dict, we must advance our keyPaths recursion.
# Consider the following example:
if key not in dictionaryOfInterest:
raise ValueError("'"+key+"' is not a valid key for this level. Valid keys are: "+','.join(dictionaryOfInterest.keys()))
finalFilteredObjects[key] = filter_objects(dictionaryOfInterest[key], keyPaths[key])
return finalFilteredObjects
def print_result_as_table(objs, keyPaths):
filteredDictionary = filter_objects(objs, keyPaths)
def print_result_as_tree(objs, depth=1):
print(get_result_as_tree(objs, depth))
def establish_connection(ctx):
# Verify that the mvip does not contain the port number:
if ctx.mvip and ":" in ctx.mvip:
ctx.logger.error('Please provide the port using the port parameter.')
exit(1)
cfg = None
# Arguments take precedence regardless of env settings
if ctx.mvip:
if ctx.username is None:
ctx.username = getpass.getpass("Username:")
if ctx.password is None:
ctx.password = getpass.getpass("Password:")
cfg = {'mvip': ctx.mvip,
'username': "b'"+encrypt(ctx.username).decode('utf-8')+"'",
'password': "b'"+encrypt(ctx.password).decode('utf-8')+"'",
'port': ctx.port,
'url': 'https://%s:%s' % (ctx.mvip, ctx.port),
'version': ctx.version,
'verifyssl': ctx.verifyssl,
'timeout': ctx.timeout}
try:
ctx.element = ElementFactory.create(cfg["mvip"],decrypt(cfg["username"]),decrypt(cfg["password"]),port=cfg["port"],version=cfg["version"],verify_ssl=cfg["verifyssl"],timeout=cfg["timeout"])
ctx.version = ctx.element._api_version
cfg["version"] = ctx.element._api_version
except Exception as e:
ctx.logger.error(e.__str__())
exit(1)
# If someone accidentally passed in an argument, but didn't specify everything, throw an error.
elif ctx.username or ctx.password:
ctx.logger.error("In order to manually connect, please provide an mvip, a username, AND a password")
# If someone asked for a given connection or we need to default to using the connection at index 0 if it exists:
else:
if ctx.connectionindex is None and ctx.name is None:
cfg = get_default_connection(ctx)
elif ctx.connectionindex is not None:
connections = get_connections(ctx)
if int(ctx.connectionindex) > (len(connections)-1) or int(ctx.connectionindex) < (-len(connections)):
ctx.logger.error("Connection "+str(ctx.connectionindex)+" Please provide an index between "+str(-len(connections))+" and "+str(len(connections)-1))
exit(1)
cfg = connections[ctx.connectionindex]
elif ctx.name is not None:
connections = get_connections(ctx)
filteredCfg = [connection for connection in connections if connection["name"] == ctx.name]
if(len(filteredCfg) > 1):
ctx.logger.error("Your connections.csv file has become corrupted. There are two connections of the same name.")
exit()
if(len(filteredCfg) < 1):
ctx.logger.error("Could not find a connection named "+ctx.name)
exit()
cfg = filteredCfg[0]
# If we managed to find the connection we were looking for, we must try to establish the connection.
if cfg is not None:
# Finally, we need to establish our connection via elementfactory:
try:
if int(cfg["port"]) != 443:
address = cfg["mvip"] + ":" + cfg["port"]
else:
address = cfg["mvip"]
ctx.element = ElementFactory.create(address, decrypt(cfg["username"]), decrypt(cfg["password"]), cfg["version"], verify_ssl=cfg["verifyssl"])
if int(cfg["timeout"]) != 30:
ctx.element.timeout(cfg["timeout"])
except Exception as e:
ctx.logger.error(e.__str__())
ctx.logger.error("The connection is corrupt. Run 'sfcli connection prune' to try and remove all broken connections or use 'sfcli connection remove -n name'")
ctx.logger.error(cfg)
exit(1)
# If we want the json output directly from the source, we'll have to override the send request method in the sdk:
# This is so that we can circumvent the python objects and get exactly what the json-rpc returns.
if ctx.json and ctx.element:
def new_send_request(*args, **kwargs):
return ctx.element.__class__.__bases__[0].send_request(ctx.element, return_response_raw=True, *args, **kwargs)
ctx.element.send_request = new_send_request
# The only time it is none is when we're asking for help or we're trying to store a connection.
# If that's not what we're doing, we catch it later.
if cfg is not None:
cfg["port"] = int(cfg["port"])
ctx.cfg = cfg
cfg["name"] = cfg.get("name", "default")
if not ctx.nocache:
write_default_connection(ctx, cfg)
if ctx.element is None:
ctx.logger.error("You must establish at least one connection and specify which you intend to use.")
exit()
# this needs to be atomic.
def get_connections(ctx):
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
if os.path.exists(connectionsCsvLocation):
try:
with FileLock(connectionsLock):
with open(connectionsCsvLocation, 'r') as connectionFile:
connections = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionsCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file.")
exit(1)
else:
connections = []
for connection in connections:
connection["version"] = float(connection["version"])
if connection.get("verifyssl") == "True":
connection["verifyssl"] = True
else:
connection["verifyssl"] = False
return connections
def write_connections(ctx, connections):
try:
connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv")
connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock")
with open(connectionsCsvLocation, 'w') as f:
with FileLock(connectionsLock):
w = csv.DictWriter(f, ["name","mvip","port","username","password","version","url","verifyssl","timeout"], lineterminator='\n')
w.writeheader()
for connection in connections:
if connection is not None:
w.writerow(connection)
except Exception as e:
ctx.logger.error("Problem writing "+ connectionsCsvLocation + " " + str(e.args)+" Try changing the permissions of that file.")
exit(1)
def get_default_connection(ctx):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
if os.path.exists(connectionCsvLocation):
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
try:
with FileLock(defaultLockLocation):
with open(connectionCsvLocation) as connectionFile:
connection = list(csv.DictReader(connectionFile, delimiter=','))
except Exception as e:
ctx.logger.error("Problem reading "+connectionCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file or specifying credentials.")
exit(1)
if len(connection)>0:
connection[0]["version"] = float(connection[0]["version"])
if(connection[0]["verifyssl"] == "True"):
connection[0]["verifyssl"] = True
else:
connection[0]["verifyssl"] = False
return connection[0]
else:
os.remove(defaultLockLocation)
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
else:
ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.")
exit(1)
def write_default_connection(ctx, connection):
connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv")
try:
defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock")
with FileLock(defaultLockLocation):
with open(connectionCsvLocation, 'w') as f:
w = csv.DictWriter(f, ["name", "mvip", "port", "username", "password", "version", "url", "verifyssl", "timeout"],
lineterminator='\n')
w.writeheader()
w.writerow(connection)
except Exception as e:
ctx.logger.warning("Problem writing "+ connectionCsvLocation + " " + str(e.args)+" Try using changing the permissions of that file or using the --nocache flag.")
# WARNING! This doesn't actually give us total security. It only gives us obscurity.
def encrypt(sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
encoded = base64.b64encode(cipher.encrypt(sensitive_data.encode('utf-8')))
return encoded
def decrypt(encoded_sensitive_data):
cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8'))
decoded = cipher.decrypt(base64.b64decode(encoded_sensitive_data[2:-1]))
return decoded.decode('utf-8')
|
OzPol Allied Health Clinic offers a holistic and multidisciplinary approach to ensuring your optimal health.
Quite simply – we are committed to your health!
Our Team of highly professional dedicated Allied Health Practitioners provide you with a ‘One Stop’ solution to all your Wellness.
OzPol’s qualified staff work in a multidisciplinary team approach – this means we all work together to achieve the best possible health outcomes for you, our valued client.
We all attend a minimum of 20 hours of continuing professional development each year to ensure that we are utilizing the best techniques and the latest research to reach your goals.
Our clinic is able to offer appointment for as little as $10 (for eligible clients). Please click here to check eligibility.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-05 10:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('matte', '0002_remoteimage'),
('studentgroups', '0004_auto_20170703_1633'),
]
operations = [
migrations.AddField(
model_name='studentgroup',
name='description',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='studentgroup',
name='is_prospective',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='studentgroup',
name='link',
field=models.CharField(default='', max_length=255),
),
migrations.AddField(
model_name='studentgroup',
name='logo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='matte.MatteImage'),
),
]
|
Coloured sheep look fantastic in the paddock.
Coloured sheep look fantastic in a paddock at anytime of the year.
Coloured sheep are already that... naturally coloured!
We wanted not only organic wool but completely chemical free. Wool that is artificially coloured requires a lot of nastie chemicals in the dying and softening process. Commercial wool goes through very harsh processes. The natural fibre look is lost. Chemicals are used as colour dyes as well.
The colours in our natural coloured fleeces can not be reproduced... they are unique. The same coloured sheep every year will produce a different coloured fleece. No fleece colour is ever the same. Basic stitches, basic patterns look wonderful when natural coloured wool is used.
I wanted natural colour wool! I wanted my clothing to be chemical free and look natural!
Coloured sheep produce coloured wool with natural colour character!
Multi-coloured fleeces require no blending. Harrie produces a very interesting jumper!
|
import sqlite3
from breezeblocks import Database
from breezeblocks.sql import Table
# Setup
db = Database(dsn="Library.sqlite", dbapi_module=sqlite3)
authors = Table("Author", ["id", "name"])
genres = Table("Genre", ["id", "name"])
books = Table("Book", ["id", "author_id", "genre_id", "title"])
# Query
get_all_authors = db.query(authors).get()
get_all_genre_names = db.query(genres.columns["name"]).get()
get_all_book_titles_and_ids = db.query(
books.columns["id"], books.columns["title"]).get()
for author in get_all_authors.execute():
print(author.id, author.name)
for genre in get_all_genre_names.execute():
print(genre.name)
for book in get_all_book_titles_and_ids.execute():
print(book.id, book.title)
# Insert
insert_books = db.insert(books).add_columns(
"author_id", "genre_id", "title").get()
jkr_query = db.query(authors.columns["id"])\
.where(authors.columns["name"] == "J.K. Rowling").get()
jkr_id = jkr_query.execute()[0].id
fantasy_query = db.query(genres.columns["id"])\
.where(genres.columns["name"] == "Fantasy").get()
fantasy_id = fantasy_query.execute()[0].id
insert_books.execute([
(jkr_id, fantasy_id, "Harry Potter and the Deadly Hallows"),
(jkr_id, fantasy_id, "Harry Potter and the Sorceror's Stone")
])
# Update
update_deadly_hallows = db.update(books)\
.set_(books.columns["title"], "Harry Potter and the Deathly Hallows")\
.where(books.columns["title"] == "Harry Potter and the Deadly Hallows")\
.get()
update_deadly_hallows.execute()
# Delete
delete_sorcerors_stone = db.delete(books)\
.where(
books.columns["title"] == "Harry Potter and the Sorceror's Stone"
).get()
delete_sorcerors_stone.execute()
|
For companies with a distribution network for industrial knives, such as graphic art retailers, the Win-Win Network offers a true platform for USP.
As a reseller of VITECH knives, the focus of a sales conversation is not the lowest price, but rather the 60-fold service life/durability of VITECH knives without re-sharpening and without production downtime during cutting.
VITECH knives are the ideal customer retention instrument. This is not only due to the unequalled knife durability.
The second chance for resellers is the option to build up their own networks with grinding shops, with a guaranteed win-win situation.
Grinding shops with the know-how to sharpen VITECH knives are certified as trained specialists, which also provides a unique value proposition and clear distinction from the competition.
|
import serf
import pytest
from _base import FakeClient, FakeConnection
def test_request_auth () :
_body = dict(
AuthKey='auth-key',
)
_request = serf.get_request_class('auth')(**_body)
_request.check(FakeClient(), )
assert _request.is_checked
_body = dict(
AuthKey=1, # `AuthKey` must be str
)
_request = serf.get_request_class('auth')(**_body)
with pytest.raises(serf.InvalidRequest, ) :
_request.check(FakeClient(), )
assert not _request.is_checked
_body = dict( # empty values
)
_request = serf.get_request_class('auth')(**_body)
with pytest.raises(serf.InvalidRequest, ) :
_request.check(FakeClient(), )
assert not _request.is_checked
class AuthFakeConnectionFailed (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xbcInvalid authentication token\xa3Seq\x01',
)
def test_response_auth_failed () :
_client = serf.Client(connection_class=AuthFakeConnectionFailed, )
def _callback (response, ) :
assert response.request.command == 'auth'
assert response.error
assert not response.is_success
assert response.body is None
assert response.seq == 1
_body = dict(
AuthKey='this-is-bad-authkey',
)
assert not _client.is_authed
with pytest.raises(serf.AuthenticationError, ) :
_client.auth(**_body).add_callback(_callback, ).request()
assert not _client.is_authed
class AuthFakeConnectionSuccess (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xa0\xa3Seq\x01',
)
def test_response_auth_success () :
_client = serf.Client(connection_class=AuthFakeConnectionSuccess, )
def _callback (response, ) :
assert response.request.command == 'auth'
assert not response.error
assert response.is_success
assert response.body is None
assert response.seq == 1
_body = dict(
AuthKey='this-is-valid-authkey',
)
assert not _client.is_authed
_client.auth(**_body).add_callback(_callback, ).request()
assert _client.is_authed
class AuthFakeConnectionForceLeaveSuccess (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xa0\xa3Seq\x01\x82\xa5Error\xa0\xa3Seq\x02',
)
def test_implicit_authentication_with_host_url_success () :
def _callback (response, ) :
assert response.request.command == 'force_leave'
assert not response.error
assert response.is_success
assert response.body is None
assert response.seq == 2
_body = dict(
Node='node0',
)
_auth_key = 'this-is-valid-authkey'
_client = serf.Client(
'serf://127.0.0.1:7373?AuthKey=%s' % _auth_key,
connection_class=AuthFakeConnectionForceLeaveSuccess,
)
assert not _client.is_authed
_client.force_leave(**_body).add_callback(_callback, ).request()
assert _client.is_authed
class AuthFakeConnectionForceLeaveFailed (FakeConnection, ) :
socket_data = (
'\x82\xa5Error\xa0\xa3Seq\x00',
'\x82\xa5Error\xbcInvalid authentication token\xa3Seq\x01',
)
def test_implicit_authentication_with_host_url () :
def _callback (response, ) :
assert response.request.command == 'force_leave'
assert not response.error
assert response.is_success
assert response.body is None
assert response.seq == 2
_body = dict(
Node='node0',
)
_auth_key = 'this-is-valid-authkey'
_client = serf.Client(
'serf://127.0.0.1:7373?AuthKey=%s' % _auth_key,
connection_class=AuthFakeConnectionForceLeaveFailed,
)
assert not _client.is_authed
with pytest.raises(serf.AuthenticationError, ) :
_client.force_leave(**_body).add_callback(_callback, ).request()
assert not _client.is_authed
|
← Co-writing May Be Your Ticket To Success!
Free Guitar Lessons Online! Over 25,000 lessons available, no charge!
|
# -*- coding: utf-8 -*-
"""
Manwë command line interface.
Todo: Move some of the docstring from the _old_population_study.py file here.
.. moduleauthor:: Martijn Vermaat <martijn@vermaat.name>
.. Licensed under the MIT license, see the LICENSE file.
"""
import argparse
import getpass
import itertools
import os
import re
import sys
from clint import textui
from .config import Config
from .errors import (ApiError, BadRequestError, UnauthorizedError,
ForbiddenError, NotFoundError)
from .resources import USER_ROLES
from .session import Session
SYSTEM_CONFIGURATION = '/etc/manwe/config'
USER_CONFIGURATION = os.path.join(
os.environ.get('XDG_CONFIG_HOME', None) or
os.path.join(os.path.expanduser('~'), '.config'),
'manwe', 'config')
class UserError(Exception):
pass
def log(message):
sys.stderr.write('%s\n' % message)
def abort(message=None):
if message:
log('error: %s' % message)
sys.exit(1)
def wait_for_tasks(*tasks):
with textui.progress.Bar(expected_size=100) as bar:
for percentages in itertools.izip_longest(
*[task.wait_and_monitor() for task in tasks], fillvalue=100):
# We treat the `None` percentage (waiting) as `0` (running).
bar.show(sum(percentage for percentage in percentages
if percentage is not None) // len(tasks))
def list_samples(session, public=False, user=None, groups=None):
"""
List samples.
"""
groups = groups or []
filters = {}
if public:
filters.update(public=True)
if user:
filters.update(user=user)
if groups:
filters.update(groups=groups)
samples = session.samples(**filters)
for i, sample in enumerate(samples):
if i:
print
print 'Sample: %s' % sample.uri
print 'Name: %s' % sample.name
print 'Pool size: %i' % sample.pool_size
print 'Visibility: %s' % ('public' if sample.public else 'private')
print 'State: %s' % ('active' if sample.active else 'inactive')
def show_sample(session, uri):
"""
Show sample details.
"""
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
print 'Sample: %s' % sample.uri
print 'Name: %s' % sample.name
print 'Pool size: %i' % sample.pool_size
print 'Visibility: %s' % ('public' if sample.public else 'private')
print 'State: %s' % ('active' if sample.active else 'inactive')
print
print 'User: %s' % sample.user.uri
print 'Name: %s' % sample.user.name
for group in sample.groups:
print
print 'Group: %s' % group.uri
print 'Name: %s' % group.name
for variation in session.variations(sample=sample):
print
print 'Variation: %s' % variation.uri
task = variation.task
if task.running:
print 'Task state: %s (%d%%)' % (task.state, task.progress)
else:
print 'Task state: %s' % task.state
if task.failure:
print 'Task error: %s' % task.error.message
for coverage in session.coverages(sample=sample):
print
print 'Coverage: %s' % coverage.uri
task = coverage.task
if task.running:
print 'Task state: %s (%d%%)' % (task.state, task.progress)
else:
print 'Task state: %s' % task.state
if task.failure:
print 'Task error: %s' % task.error.message
def activate_sample(session, uri):
"""
Activate sample.
"""
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
sample.active = True
sample.save()
log('Activated sample: %s' % sample.uri)
def annotate_sample_variations(session, uri, queries=None, wait=False):
"""
Annotate sample variations with variant frequencies.
"""
queries = queries or {}
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
tasks = []
for variation in session.variations(sample=sample):
annotation = session.create_annotation(
variation.data_source, queries=queries)
log('Started annotation: %s' % annotation.uri)
tasks.append(annotation.task)
if not wait:
return
wait_for_tasks(*tasks)
log('Annotated variations for sample: %s' % sample.uri)
def add_sample(session, name, groups=None, pool_size=1, public=False,
no_coverage_profile=False):
"""
Add sample.
"""
groups = groups or []
if pool_size < 1:
raise UserError('Pool size should be at least 1')
groups = [session.group(uri) for uri in groups]
sample = session.create_sample(name, groups=groups, pool_size=pool_size,
coverage_profile=not no_coverage_profile,
public=public)
log('Added sample: %s' % sample.uri)
return sample
def import_sample(session, name, groups=None, pool_size=1, public=False,
no_coverage_profile=False, vcf_files=None, bed_files=None,
data_uploaded=False, prefer_genotype_likelihoods=False,
wait=False):
"""
Add sample and import variation and coverage files.
"""
vcf_files = vcf_files or []
bed_files = bed_files or []
if not no_coverage_profile and not bed_files:
raise UserError('Expected at least one BED file')
# Todo: Nice error if file cannot be read.
vcf_sources = [({'local_file': vcf_file}, vcf_file) if data_uploaded else
({'data': open(vcf_file)}, vcf_file)
for vcf_file in vcf_files]
bed_sources = [({'local_file': bed_file}, bed_file) if data_uploaded else
({'data': open(bed_file)}, bed_file)
for bed_file in bed_files]
sample = add_sample(session, name, groups=groups, pool_size=pool_size,
public=public, no_coverage_profile=no_coverage_profile)
tasks = []
for source, filename in vcf_sources:
data_source = session.create_data_source(
'Variants from file "%s"' % filename,
filetype='vcf',
gzipped=filename.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
variation = session.create_variation(
sample, data_source,
prefer_genotype_likelihoods=prefer_genotype_likelihoods)
log('Started variation import: %s' % variation.uri)
tasks.append(variation.task)
for source, filename in bed_sources:
data_source = session.create_data_source(
'Regions from file "%s"' % filename,
filetype='bed',
gzipped=filename.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
coverage = session.create_coverage(sample, data_source)
log('Started coverage import: %s' % coverage.uri)
tasks.append(coverage.task)
if not wait:
return
wait_for_tasks(*tasks)
log('Imported variations and coverages for sample: %s' % sample.uri)
def import_variation(session, uri, vcf_file, data_uploaded=False,
prefer_genotype_likelihoods=False, wait=False):
"""
Import variation file for existing sample.
"""
# Todo: Nice error if file cannot be read.
if data_uploaded:
source = {'local_file': vcf_file}
else:
source = {'data': open(vcf_file)}
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
data_source = session.create_data_source(
'Variants from file "%s"' % vcf_file,
filetype='vcf',
gzipped=vcf_file.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
variation = session.create_variation(
sample, data_source,
prefer_genotype_likelihoods=prefer_genotype_likelihoods)
log('Started variation import: %s' % variation.uri)
if not wait:
return
wait_for_tasks(variation.task)
log('Imported variation: %s' % variation.uri)
def import_coverage(session, uri, bed_file, data_uploaded=False, wait=False):
"""
Import coverage file for existing sample.
"""
# Todo: Nice error if file cannot be read.
if data_uploaded:
source = {'local_file': bed_file}
else:
source = {'data': open(bed_file)}
try:
sample = session.sample(uri)
except NotFoundError:
raise UserError('Sample does not exist: "%s"' % uri)
data_source = session.create_data_source(
'Regions from file "%s"' % bed_file,
filetype='bed',
gzipped=bed_file.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
coverage = session.create_coverage(sample, data_source)
log('Started coverage import: %s' % coverage.uri)
if not wait:
return
wait_for_tasks(coverage.task)
log('Imported coverage: %s' % coverage.uri)
def list_groups(session):
"""
List groups.
"""
groups = session.groups()
for i, group in enumerate(groups):
if i:
print
print 'Group: %s' % group.uri
print 'Name: %s' % group.name
def show_group(session, uri):
"""
Show group details.
"""
try:
group = session.group(uri)
except NotFoundError:
raise UserError('Group does not exist: "%s"' % uri)
print 'Group: %s' % group.uri
print 'Name: %s' % group.name
def add_group(session, name):
"""
Add a sample group.
"""
group = session.create_group(name)
log('Added group: %s' % group.uri)
def list_users(session):
"""
List users.
"""
users = session.users()
for i, user in enumerate(users):
if i:
print
print 'User: %s' % user.uri
print 'Name: %s' % user.name
print 'Login: %s' % user.login
print 'Roles: %s' % ', '.join(sorted(user.roles))
def show_user(session, uri):
"""
Show user details.
"""
try:
user = session.user(uri)
except NotFoundError:
raise UserError('User does not exist: "%s"' % uri)
print 'User: %s' % user.uri
print 'Name: %s' % user.name
print 'Login: %s' % user.login
print 'Roles: %s' % ', '.join(sorted(user.roles))
def add_user(session, login, name=None, roles=None):
"""
Add an API user (queries for password).
"""
roles = roles or []
name = name or login
if not re.match('[a-zA-Z][a-zA-Z0-9._-]*$', login):
raise UserError('User login must match "[a-zA-Z][a-zA-Z0-9._-]*"')
password = getpass.getpass('Please provide a password for the new user: ')
password_control = getpass.getpass('Repeat: ')
if password != password_control:
raise UserError('Passwords did not match')
user = session.create_user(login, password, name=name, roles=roles)
log('Added user: %s' % user.uri)
def list_data_sources(session, user=None):
"""
List data sources.
"""
filters = {}
if user:
filters.update(user=user)
data_sources = session.data_sources(**filters)
for i, data_source in enumerate(data_sources):
if i:
print
print 'Data source: %s' % data_source.uri
print 'Name: %s' % data_source.name
print 'Filetype: %s' % data_source.filetype
def show_data_source(session, uri):
"""
Show data source details.
"""
try:
data_source = session.data_source(uri)
except NotFoundError:
raise UserError('Data source does not exist: "%s"' % uri)
print 'Data source: %s' % data_source.uri
print 'Name: %s' % data_source.name
print 'Filetype: %s' % data_source.filetype
print
print 'User: %s' % data_source.user.uri
print 'Name: %s' % data_source.user.name
def data_source_data(session, uri):
"""
Download data source and write data to standard output.
"""
try:
data_source = session.data_source(uri)
except NotFoundError:
raise UserError('Data source does not exist: "%s"' % uri)
for chunk in data_source.data:
sys.stdout.write(chunk)
def annotate_data_source(session, uri, queries=None, wait=False):
"""
Annotate data source with variant frequencies.
"""
queries = queries or {}
try:
data_source = session.data_source(uri)
except NotFoundError:
raise UserError('Data source does not exist: "%s"' % uri)
annotation = session.create_annotation(
data_source, queries=queries)
log('Started annotation: %s' % annotation.uri)
if not wait:
return
wait_for_tasks(annotation.task)
log('Annotated data source: %s' % annotation.annotated_data_source.uri)
def annotate_vcf(session, vcf_file, data_uploaded=False, queries=None,
wait=False):
"""
Annotate VCF file with variant frequencies.
"""
queries = queries or {}
# Todo: Nice error if file cannot be read.
if data_uploaded:
source = {'local_file': vcf_file}
else:
source = {'data': open(vcf_file)}
data_source = session.create_data_source(
'Variants from file "%s"' % vcf_file,
filetype='vcf',
gzipped=vcf_file.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
annotation = session.create_annotation(
data_source, queries=queries)
log('Started annotation: %s' % annotation.uri)
if not wait:
return
wait_for_tasks(annotation.task)
log('Annotated VCF file: %s' % annotation.annotated_data_source.uri)
def annotate_bed(session, bed_file, data_uploaded=False, queries=None,
wait=False):
"""
Annotate BED file with variant frequencies.
"""
queries = queries or {}
# Todo: Nice error if file cannot be read.
if data_uploaded:
source = {'local_file': bed_file}
else:
source = {'data': open(bed_file)}
data_source = session.create_data_source(
'Regions from file "%s"' % bed_file,
filetype='bed',
gzipped=bed_file.endswith('.gz'),
**source)
log('Added data source: %s' % data_source.uri)
annotation = session.create_annotation(
data_source, queries=queries)
log('Started annotation: %s' % annotation.uri)
if not wait:
return
wait_for_tasks(annotation.task)
log('Annotated BED file: %s' % annotation.annotated_data_source.uri)
def create_config(filename=None):
"""
Create a Manwë configuration object.
Configuration values are initialized from the :mod:`manwe.default_config`
module.
By default, configuration values are then read from two locations, in this
order:
1. `SYSTEM_CONFIGURATION`
2. `USER_CONFIGURATION`
If both files exist, values defined in the second overwrite values defined
in the first.
An exception to this is when the optional `filename` argument is set. In
that case, the locations listed above are ignored and the configuration is
read from `filename`.
:arg filename: Optional filename to read configuration from. If present,
this overrides automatic detection of configuration file location.
:type filename: str
:return: Manwë configuration object.
:rtype: config.Config
"""
config = Config()
if filename:
config.from_pyfile(filename)
else:
if os.path.isfile(SYSTEM_CONFIGURATION):
config.from_pyfile(SYSTEM_CONFIGURATION)
if os.path.isfile(USER_CONFIGURATION):
config.from_pyfile(USER_CONFIGURATION)
return config
def main():
"""
Manwë command line interface.
"""
class UpdateAction(argparse.Action):
"""
Custom argparse action to store a pair of values as key and value in a
dictionary.
Example usage::
>>> p.add_argument(
... '-c', dest='flower_colors', nargs=2,
... metavar=('FLOWER', 'COLOR'), action=UpdateAction,
... help='set flower color (multiple allowed)')
"""
def __init__(self, *args, **kwargs):
if kwargs.get('nargs') != 2:
raise ValueError('nargs for update actions must be 2')
super(UpdateAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
key, value = values
d = getattr(namespace, self.dest) or {}
d[key] = value
setattr(namespace, self.dest, d)
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument(
'-c', '--config', metavar='FILE', type=str, dest='config',
help='path to configuration file to use instead of looking in '
'default locations')
parser = argparse.ArgumentParser(
description=__doc__.split('\n\n')[0], parents=[config_parser])
subparsers = parser.add_subparsers(
title='subcommands', dest='subcommand', help='subcommand help')
# Subparsers for 'samples'.
s = subparsers.add_parser(
'samples', help='manage samples', description='Manage sample resources.'
).add_subparsers()
# Subparser 'samples list'.
p = s.add_parser(
'list', help='list samples',
description=list_samples.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=list_samples)
p.add_argument(
'-p', '--public', dest='public', action='store_true',
help='only public samples')
p.add_argument(
'-u', '--user', dest='user', metavar='URI',
help='filter samples by user')
p.add_argument(
'-g', '--group', dest='groups', metavar='URI', action='append',
help='filter samples by group (more than one allowed)')
# Subparser 'samples show'.
p = s.add_parser(
'show', help='show sample details',
description=show_sample.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=show_sample)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
# Subparser 'samples activate'.
p = s.add_parser(
'activate', help='activate sample',
description=activate_sample.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=activate_sample)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
# Subparser 'samples annotate-variations'.
p = s.add_parser(
'annotate-variations', help='annotate sample variations',
description=annotate_sample_variations.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=annotate_sample_variations)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
p.add_argument(
'-q', '--query', dest='queries', nargs=2, action=UpdateAction,
metavar=('NAME', 'EXPRESSION'), help='annotation query (more than '
'one allowed)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for annotations to complete (blocking)')
# Subparser 'samples add'.
p = s.add_parser(
'add', help='add sample',
description=add_sample.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=add_sample)
p.add_argument(
'name', metavar='NAME', type=str, help='sample name')
p.add_argument(
'-g', '--group', dest='groups', metavar='URI', action='append',
help='sample group (more than one allowed)')
p.add_argument(
'-s', '--pool-size', dest='pool_size', default=1, type=int,
help='number of individuals in sample (default: 1)')
p.add_argument(
'-p', '--public', dest='public', action='store_true',
help='sample data is public')
p.add_argument(
'--no-coverage-profile', dest='no_coverage_profile', action='store_true',
help='sample has no coverage profile')
# Subparser 'samples import'.
p = s.add_parser(
'import', help='add sample and import data',
description=import_sample.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=import_sample)
p.add_argument(
'name', metavar='NAME', type=str, help='sample name')
p.add_argument(
'-g', '--group', dest='groups', metavar='URI', action='append',
help='sample group (more than one allowed)')
p.add_argument(
'--vcf', metavar='VCF_FILE', dest='vcf_files', action='append',
required=True, help='file in VCF 4.1 format to import variants from '
'(more than one allowed)')
p.add_argument(
'--bed', metavar='BED_FILE', dest='bed_files', action='append',
help='file in BED format to import covered regions from (more than '
'one allowed)')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-s', '--pool-size', dest='pool_size', default=1, type=int,
help='number of individuals in sample (default: 1)')
p.add_argument(
'-p', '--public', dest='public', action='store_true',
help='sample data is public')
# Note: We prefer to explicitely include the --no-coverage-profile instead
# of concluding it from an empty list of BED files. This prevents
# accidentally forgetting the coverage profile.
p.add_argument(
'--no-coverage-profile', dest='no_coverage_profile', action='store_true',
help='sample has no coverage profile')
p.add_argument(
'-l', '--prefer_genotype_likelihoods', dest='prefer_genotype_likelihoods',
action='store_true', help='in VCF files, derive genotypes from '
'likelihood scores instead of using reported genotypes (use this if '
'the file was produced by samtools)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for imports to complete (blocking)')
# Subparser 'samples import-vcf'.
p = s.add_parser(
'import-vcf', help='import VCF file for existing sample',
description=import_variation.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=import_variation)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
p.add_argument(
'vcf_file', metavar='FILE',
help='file in VCF 4.1 format to import variants from')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-l', '--prefer_genotype_likelihoods', dest='prefer_genotype_likelihoods',
action='store_true', help='in VCF files, derive genotypes from '
'likelihood scores instead of using reported genotypes (use this if '
'the file was produced by samtools)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for import to complete (blocking)')
# Subparser 'samples import-bed'.
p = s.add_parser(
'import-bed', help='import BED file for existing sample',
description=import_coverage.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=import_coverage)
p.add_argument(
'uri', metavar='URI', type=str, help='sample')
p.add_argument(
'bed_file', metavar='FILE',
help='file in BED format to import covered regions from')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for import to complete (blocking)')
# Subparsers for 'groups'.
s = subparsers.add_parser(
'groups', help='manage groups', description='Manage group resources.'
).add_subparsers()
# Subparser 'groups list'.
p = s.add_parser(
'list', help='list groups',
description=list_groups.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=list_groups)
# Subparser 'groups show'.
p = s.add_parser(
'show', help='show group details',
description=show_group.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=show_group)
p.add_argument(
'uri', metavar='URI', type=str, help='group')
# Subparser 'groups add'.
p = s.add_parser(
'add', help='add new sample group',
description=add_group.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=add_group)
p.add_argument(
'name', metavar='NAME', type=str, help='group name')
# Subparsers for 'users'.
s = subparsers.add_parser(
'users', help='manage users', description='Manage user resources.'
).add_subparsers()
# Subparser 'users list'.
p = s.add_parser(
'list', help='list users',
description=list_users.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=list_users)
# Subparser 'users show'.
p = s.add_parser(
'show', help='show user details',
description=show_user.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=show_user)
p.add_argument('uri', metavar='URI', type=str, help='user')
# Subparser 'users add'.
p = s.add_parser(
'add', help='add new API user',
description=add_user.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=add_user)
p.add_argument(
'login', metavar='LOGIN', type=str, help='user login')
p.add_argument(
'-n', '--name', metavar='NAME', dest='name', type=str,
help='user name (default: LOGIN)')
for role in USER_ROLES:
p.add_argument(
'--%s' % role, dest='roles', action='append_const', const=role,
help='user has %s role' % role)
# Subparsers for 'data-sources'.
s = subparsers.add_parser(
'data-sources', help='manage data sources',
description='Manage data source resources.'
).add_subparsers()
# Subparser 'data-sources list'.
p = s.add_parser(
'list', help='list data sources',
description=list_data_sources.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=list_data_sources)
p.add_argument(
'-u', '--user', dest='user', metavar='URI',
help='filter data sources by user')
# Subparser 'data-sources show'.
p = s.add_parser(
'show', help='show data source details',
description=show_data_source.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=show_data_source)
p.add_argument(
'uri', metavar='URI', type=str, help='data source')
# Subparser 'data-sources download'.
p = s.add_parser(
'download', help='download data source',
description=data_source_data.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=data_source_data)
p.add_argument(
'uri', metavar='URI', type=str, help='data source')
# Subparser 'data-sources annotate'.
p = s.add_parser(
'annotate', help='annotate data source',
description=annotate_data_source.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=annotate_data_source)
p.add_argument(
'uri', metavar='URI', type=str, help='data source')
p.add_argument(
'-q', '--query', dest='queries', nargs=2, action=UpdateAction,
metavar=('NAME', 'EXPRESSION'), help='annotation query (more than '
'one allowed)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for annotation to complete (blocking)')
# Subparser 'annotate-vcf'.
p = subparsers.add_parser(
'annotate-vcf', help='annotate VCF file',
description=annotate_vcf.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=annotate_vcf)
p.add_argument(
'vcf_file', metavar='FILE', help='file in VCF 4.1 format to annotate')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-q', '--query', dest='queries', nargs=2, action=UpdateAction,
metavar=('NAME', 'EXPRESSION'), help='annotation query (more than '
'one allowed)')
# TODO:
# - Perhaps --no-wait is better (i.e., wait by default)?
# - If we are waiting we might as well also download the result.
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for annotation to complete (blocking)')
# Subparser 'annotate-bed'.
p = subparsers.add_parser(
'annotate-bed', help='annotate BED file',
description=annotate_bed.__doc__.split('\n\n')[0],
parents=[config_parser])
p.set_defaults(func=annotate_bed)
p.add_argument(
'bed_file', metavar='FILE', help='file in BED format to annotate')
p.add_argument(
'-u', '--data-uploaded', dest='data_uploaded', action='store_true',
help='data files are already uploaded to the server')
p.add_argument(
'-q', '--query', dest='queries', nargs=2, action=UpdateAction,
metavar=('NAME', 'EXPRESSION'), help='annotation query (more than '
'one allowed)')
p.add_argument(
'-w', '--wait', dest='wait', action='store_true',
help='wait for annotation to complete (blocking)')
args = parser.parse_args()
try:
session = Session(config=create_config(args.config))
args.func(session=session,
**{k: v for k, v in vars(args).items()
if k not in ('config', 'func', 'subcommand')})
except UserError as e:
abort(e)
except UnauthorizedError:
abort('Authentication is needed, please make sure you have the '
'correct authentication token defined in "%s"'
% (args.config or USER_CONFIGURATION))
except ForbiddenError:
abort('Sorry, you do not have permission')
except BadRequestError as (code, message):
abort(message)
except ApiError as (code, message):
abort(message)
if __name__ == '__main__':
main()
|
Are you excluding home patients on purpose?
Hello! Thanks for your question. For this study we are only interested in patients receiving dialysis in a clinic setting.
Thank you for your feedback. For this study we are interested in patients receiving dialysis in a treatment center. However you make a great point and that would be an interesting focus for a study in the future!
|
#!/usr/bin/env python
import os, sys
SCRIPTDIR = os.path.dirname(__file__)
ENGINDIR = os.path.join(SCRIPTDIR, '..', '..', 'engines')
sys.path.append(os.path.abspath(ENGINDIR))
from fpsl_pulp import fair_map_inference
PROBLEMDIR = os.path.join(SCRIPTDIR, '..', '..', 'problems', 'paper_review')
sys.path.append(os.path.abspath(PROBLEMDIR))
from grounding import ground
from os.path import join as ojoin
if __name__ == '__main__':
data_path = ojoin(PROBLEMDIR, 'data', '1')
rules, hard_rules, counts, atoms = ground(data_path)
results = fair_map_inference(rules, hard_rules, counts, 0.1, 'RC', solver='gurobi')
out_path = ojoin('output', 'fpsl_pulp')
reviews = atoms['review']
with open(ojoin(out_path, 'POSITIVEREVIEW.txt'), 'w') as f:
for (review, paper), (vid, _) in reviews.items():
print("'%s'\t'%s'\t%f"%(review, paper, results[vid]), file=f)
acceptable = atoms['acceptable']
with open(ojoin(out_path, 'ACCEPTABLE.txt'), 'w') as f:
for paper, (vid, _) in acceptable.items():
print("'%s'\t%f"%(paper, results[vid]), file=f)
presents = atoms['presents']
with open(ojoin(out_path, 'PRESENTS.txt'), 'w') as f:
for author, (vid, _) in presents.items():
print("'%s'\t%f"%(author, results[vid]), file=f)
|
The Financial Conduct Authority has published its business plan for 2018/2019, shining a light on its upcoming priorities, which include a strong focus on Brexit, financial crime and technology.
Under the stewardship of acting chair Maureen Ohlhausen, the United States Federal Trade Commission had another robust year of enforcement activity; albeit amid uncertainty over the agency’s personnel.
Unified Patent Court: Where are we now?
The fate of the Unified Patent Court – a single patent court covering 25 countries – is currently up in the air following a German constitutional challenge. CDR examines the timeframe for the court and the greater impact Brexit could have on IP.
The United States Securities and Exchange Commission has exercised its power to freeze USD 27 million in trading proceeds of a Nasdaq-listed blockchain technology company, in what has been perceived to be a warning shot to the increasingly contentious blockchain sector.
The Chartered Institute of Trademark Attorneys’ spring conference provided insight into the enforcement efforts of the Intellectual Property Office and a case law update on the fallibility of memory.
The United Kingdom government has ratified the Hague Agreement, meaning that after Brexit, IP owners will be able to continue filing for international design applications.
With game-changing, as well as upcoming, rulings from the United States Supreme Court, patent litigation is a turning point where even the value of holding a patent is in question. CDR speaks with leading lawyers about how the shape of things to come.
A report by UK firm Hugh James has found that the number of small businesses taking cases to the Intellectual Property Enterprise Court has hit a new record in the last year.
The dismissal of the appeal in the Property Alliance Group litigation marks another victory for the UK’s banks in narrowing down the scope for claims based on LIBOR related conduct.
Third-party financier Burford Capital is expanding its insurance arm to launch a global insurance business to address adverse costs risks in large-scale commercial litigation and arbitration.
The New Year sees further developments in long-running US litigation relating to one of the best-selling pharmaceutical drugs in history.
The emergence of a new cryptocurrency in Venezuela has raised concerns that the heavily sanctioned country is using the offering to evade economic sanctions imposed by the US and EU, while Russia looks set to tread the same path.
The Dana Gas case highlights the potential for conflict between courts in rival jurisdictions and between Islamic and western finance.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import tensorflow.compat.v1 as tf
import inception_preprocessing
import vgg_preprocessing
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'num_parallel_calls', 64,
'Number of elements to process in parallel (by mapper)')
flags.DEFINE_integer(
'shuffle_buffer_size', 1000,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
flags.DEFINE_bool(
'use_sloppy_interleave',
default=False,
help='Use sloppy interleave or not. Default set to False.')
flags.DEFINE_integer(
'cycle_length',
default=16,
help='The number of files to read concurrently by interleave function.')
flags.DEFINE_string(
'data_source',
'real',
help='Data source to be real or fake. Fake data uses randomly generated '
'numbers.')
flags.DEFINE_bool(
'preprocessed', False, help='Is the data preprocessed to 224x224 images?')
flags.DEFINE_integer(
'width', 224, 'Width of input image')
flags.DEFINE_integer(
'height', 224, 'Height of input image')
flags.DEFINE_integer(
'num_channel', 3, 'Number of channgles')
flags.DEFINE_bool(
'use_annotated_bbox', False,
'If true, use annotated bounding box as input to cropping function, '
'else use full image size')
flags.DEFINE_string(
'preprocessing', None,
'Preprocessing stage to use: one of inception or vgg')
flags.DEFINE_integer(
'prefetch_size',
default=None,
help='The number of elements buffered by prefetch function. Default is the '
'batch size. Any positive integer sets the buffer size at such a value.'
'Any other value disables prefetch.')
flags.DEFINE_integer(
'dataset_reader_buffer_size',
default=256 * 1024 * 1024,
help='The number of bytes in read buffer. A value of zero means no '
'buffering.')
flags.DEFINE_integer(
'followup_shuffle_buffer_size', 1000,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
flags.DEFINE_integer(
'element_shuffle_buffer_size',
default=1024,
help='The number of training samples in the shuffle buffer. A value of zero'
' disables input-sample shuffling.')
flags.DEFINE_integer(
'prefetch_dataset_buffer_size', 8*1024*1024,
'Number of bytes in read buffer. 0 means no buffering.')
flags.DEFINE_integer(
'num_files_infeed', 8,
'Number of training files to read in parallel.')
flags.DEFINE_float(
'image_minval', -1.0, 'Min value.')
flags.DEFINE_float(
'image_maxval', 1.0, 'Max value.')
# Random cropping constants
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def preprocess_raw_bytes(image_bytes, is_training=False, bbox=None):
"""Preprocesses a raw JPEG image.
This implementation is shared in common between train/eval pipelines,
and when serving the model.
Args:
image_bytes: A string Tensor, containing the encoded JPEG.
is_training: Whether or not to preprocess for training.
bbox: In inception preprocessing, this bbox can be used for cropping.
Returns:
A 3-Tensor [height, width, RGB channels] of type float32.
"""
image = tf.image.decode_jpeg(image_bytes, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if FLAGS.preprocessing == 'vgg':
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX)
elif FLAGS.preprocessing == 'inception':
image = inception_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
bbox=bbox)
else:
assert False, 'Unknown preprocessing type: %s' % FLAGS.preprocessing
return image
def tensor_transform_fn(data, perm):
"""Transpose function.
This function is used to transpose an image tensor on the host and then
perform an inverse transpose on the TPU. The transpose on the TPU gets
effectively elided thus voiding any associated computational cost.
NOTE: Eventually the compiler will be able to detect when this kind of
operation may prove beneficial and perform these types of transformations
implicitly, voiding the need for user intervention
Args:
data: Tensor to be transposed
perm: Permutation of the dimensions of a
Returns:
Transposed tensor
"""
if FLAGS.transpose_enabled:
return tf.transpose(data, perm)
return data
class InputPipeline(object):
"""Provides TFEstimator input function for imagenet, with preprocessing."""
def __init__(self, is_training, data_dir):
self.is_training = is_training
self.data_dir = data_dir
def dataset_parser(self, serialized_proto):
"""Parse an Imagenet record from value."""
if FLAGS.preprocessed:
keys_to_features = {
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
}
features = tf.parse_single_example(serialized_proto, keys_to_features)
image = tf.decode_raw(features['image'], tf.float32)
image.set_shape([FLAGS.height * FLAGS.width * FLAGS.num_channel])
label = tf.cast(features['label'], tf.int32)
else:
keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.io.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/class/text':
tf.io.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/bbox/xmin':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(dtype=tf.int64),
}
features = tf.parse_single_example(serialized_proto, keys_to_features)
image = tf.image.decode_jpeg(features['image/encoded'],
channels=FLAGS.num_channel)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
label = tf.cast(tf.reshape(
features['image/class/label'], shape=[]), dtype=tf.int32)
bbox = None
if FLAGS.use_annotated_bbox:
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
if FLAGS.preprocessing == 'vgg':
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=self.is_training,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX)
elif FLAGS.preprocessing == 'inception':
image = inception_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=self.is_training,
bbox=bbox)
else:
image = tf.image.resize_images(image, size=[FLAGS.height, FLAGS.width])
image = (tf.cast(image, tf.float32) * (1. / 255)) - 0.5
return image, label
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Raises:
RuntimeError: If the data source has the incorrect value.
Returns:
A (images, labels) tuple of `Tensor`s for a batch of samples.
"""
batch_size = params['batch_size']
if FLAGS.data_source == 'real':
# Actual imagenet data
datadir = 'train-*' if self.is_training else 'validation-*'
file_pattern = os.path.join(self.data_dir, datadir)
dataset = tf.data.Dataset.list_files(file_pattern,
shuffle=self.is_training)
if self.is_training:
dataset = dataset.repeat()
def prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(
filename, buffer_size=FLAGS.prefetch_dataset_buffer_size)
if FLAGS.prefetch_size is None:
dataset = dataset.prefetch(batch_size)
else:
if FLAGS.prefetch_size > 0:
dataset = dataset.prefetch(FLAGS.prefetch_size)
return dataset
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
prefetch_dataset,
cycle_length=FLAGS.num_files_infeed,
sloppy=True))
if FLAGS.followup_shuffle_buffer_size > 0:
dataset = dataset.shuffle(
buffer_size=FLAGS.followup_shuffle_buffer_size)
dataset = dataset.map(
self.dataset_parser,
num_parallel_calls=FLAGS.num_parallel_calls)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
images, labels = dataset.make_one_shot_iterator().get_next()
images = tf.reshape(images, [batch_size, FLAGS.height, FLAGS.width,
FLAGS.num_channel])
labels = tf.reshape(labels, [batch_size])
elif FLAGS.data_source == 'fake':
images = tf.random_uniform(
shape=[batch_size, FLAGS.height, FLAGS.width, FLAGS.num_channel],
minval=FLAGS.image_minval,
maxval=FLAGS.image_maxval,
dtype=tf.float32)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=999, dtype=tf.int32)
else:
raise RuntimeError('Data source {} not supported. Use real/fake'.format(
FLAGS.data_source))
if FLAGS.transpose_enabled:
images = tensor_transform_fn(images, params['output_perm'])
return images, labels
|
This wheeled hard case for DJI Ronin is a watertight hard case that features wheels and a retractable handle designed to make travel easy. Based on the HPRC 2700W series, it is pretty compact, dropping 8KG from the DJI case. It features a pre-cut foam interior that specifically holds the DJI Ronin gimbal system, the tuning stand, two batteries and a battery charger, the transmitter, a pair of 15mm rods, a camera mounting plate, the handlebar, cables, and other miscellaneous accessories. The case is impact and drop resistant, corrosion resistant, and features a neoprene seal that when properly closed is designed to keep the case watertight and your gear protected even if submerged in shallow water. A release valve allows you to equalize air pressure when traveling between different elevations.
The modular nature of the interior means that after you've stopped using the Ronin you can repurpose this case by replacing the foam cutout with either the HPRCCUB2700W foam or the HPRCSFD2700W divider kit.
|
from __future__ import print_function
from builtins import zip
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o import H2OFrame
from h2o.exceptions import H2OTypeError, H2OValueError
def compare_frames(expected, actual):
assert actual.shape == expected.shape
assert actual.columns == expected.columns, "Columns differ: %r vs %r" % (actual.columns, colnames)
for i in range(len(actual.columns)):
colname = actual.columns[i]
t1 = expected.types[colname]
t2 = actual.types[colname]
assert t1 == t2, ("Bad types %s: expected %s, got %s" %(colname, t1, t2))
col1 = expected[colname]
s1 = str(h2o.as_list(col1))
col2 = actual[colname]
s2 = str(h2o.as_list(col2))
assert s1 == s2, ("bad values: expected[%d] = %r, actual[%d] = %r"
% (i, s1, i, s2))
def test1():
badFrame = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"], "three": [0, 5.2, 14]})
badClone = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"], "three": [0, 5.2, 14]})
compare_frames(badFrame, badClone)
try:
badFrame.asfactor()
assert False, "The frame contaied a real number, an error should be thrown"
except H2OValueError: # as designed
pass
compare_frames(badFrame, badClone)
originalAfterOp = H2OFrame.get_frame(badFrame.frame_id)
compare_frames(badFrame, originalAfterOp)
goodFrame = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"]})
goodClone = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"]})
compare_frames(goodFrame, goodClone)
factoredFrame = goodFrame.asfactor()
originalAfterOp = H2OFrame.get_frame(goodFrame.frame_id)
compare_frames(goodFrame, originalAfterOp)
expectedFactoredFrame = H2OFrame({"one": [4, 6, 1], "two": ["a", "b", "cde"]}, column_types={"one":"categorical", "two": "enum"})
compare_frames(expectedFactoredFrame, factoredFrame)
refactoredFrame = expectedFactoredFrame.asfactor()
factoredAfterOp = H2OFrame.get_frame(refactoredFrame.frame_id)
compare_frames(expectedFactoredFrame, factoredAfterOp)
if __name__ == "__main__":
pyunit_utils.standalone_test(test1)
else:
test1()
|
Looking for Garage Door Repairs, Refurbishment or Maintenance In Perth?
Danmar Doors provides residential and commercial properties of Perth with a high-quality garage door installation, refurbishment and repairs service. If you are having a property built in Perth, WA, and require the expertise of the garage door professionals, if you are in need of a repairs service for your roller doors, or your timber garage door requires refurbishment, then the team at Danmar have you covered.
The garage door is often the largest moving part of most premises and regular servicing and maintenance is necessary in order to ensure a smooth, quiet and safe operation. Regular maintenance and repair-checks is also an essential part of your garage door warranty, and cedar or timber doors will require maintenance/refurbishment due to natural wear and tear. We offer repairs for roller doors, sectional doors, and single-panel units from any manufacturer including Merlin motors, ensuring that no matter what system you have, we can help. Don’t leave even the most basic servicing to the amateurs – choose the professionals at Danmar and ensure that your door is always getting the very best.
Our garage door services consist of springs tensioning, lubrication of hardware, inspection of all garage door components including bolts, screws, etc. Regular service will lead to an increase in the life of your garage door motor (warranty requirement) and it will ensure that its operation is safe and smooth. For more information about what our garage door maintenance packages consist of please click here.
Danmar Garage Doors strongly recommends your garage door to be serviced once every 12 months (residential doors under 14sqm or 150Kg) and 6 months for larger residential and most commercial garage doors unless specified otherwise. This frequency ensures that your door is always working at its very best, with a smooth and uninterrupted operation and minimal chance of failure.
All services/repairs should be carried out by a qualified service technician. At Danmar, we only hire the most experienced tradespeople, ensuring that you’re always being looked after by the most suitable person for the job, whether it is a service, repairs or refurbishing the paint in your cedar garage door in Perth. Put your property in the right hands and speak to our staff today.
Start a conversation with our team and find out how we could help you get the best from your door. In order to obtain a free quote on a service/repair please contact our office on 08 9309 6111 (service/repair is only available in Perth WA).
|
from django.http import HttpResponseRedirect#, HttpResponse
from importer.forms import ImportForm
from importer.tasks import ThingiProjectTask,ThingiUserTask
from django.shortcuts import get_object_or_404, render_to_response
from multiuploader.views import draftview
import re
''' nifty parse function performs all the identification that can be done on a url without making any get requests. It returns a good value, and if True, a task to call, or False and an error message '''
def parse(url):
if re.search('thingiverse\.com',url):
#gee wiz, it's from thiniverse!
if re.search('thing:\d\d+',url):#it's a thing/project page
return(ThingiProjectTask)
else:
return(ThingiUserTask)#it's probably a user page. or it's another page, but we aren't checking that here.
else:
return(None)
def importer(request):
###Write a scraper dispatcher here.
if request.method == 'POST':
form = ImportForm(request.POST.copy())
if form.is_valid() and request.user.is_authenticated():
userPK=request.user.pk
url=form.cleaned_data['url']
task=parse(url)#get the kind of task to execute for a given url!
print("importer attempting to import from : {}".format(url))
print("for user : {}".format(userPK))
if task:
print("importing from {}".format(task.__name__))
task.delay(url=url,userPK=userPK)#delay(url=url, userPK=userPK)
else:
# neeto unknown site error! these should prolly get logged.
pass
##else we need to be giving them shiny errors as to why it isn't valid.
return draftview(request, scraperMessage=True)
#return HttpResponseRedirect('/mydrafts/', c)
|
Are there any working links to a good pack of gema maps? I'd like to play them on my own in single player. Thanks.
(13 Feb 19, 04:26PM)Lee Wrote: Are there any working links to a good pack of gema maps? I'd like to play them on my own in single player. Thanks.
|
import os
import uuid
import random
import tensorflow as tf
import hypergan as hg
import hyperchamber as hc
import numpy as np
import glob
import time
import re
from hypergan.viewer import GlobalViewer
from hypergan.samplers.base_sampler import BaseSampler
from hypergan.gan_component import ValidationException, GANComponent
from hypergan.samplers.random_walk_sampler import RandomWalkSampler
from hypergan.samplers.debug_sampler import DebugSampler
from hypergan.search.alphagan_random_search import AlphaGANRandomSearch
from hypergan.gans.base_gan import BaseGAN
from common import *
import copy
from hypergan.gans.alpha_gan import AlphaGAN
from hypergan.gan_component import ValidationException, GANComponent
from hypergan.gans.base_gan import BaseGAN
from hypergan.discriminators.fully_connected_discriminator import FullyConnectedDiscriminator
from hypergan.encoders.uniform_encoder import UniformEncoder
from hypergan.trainers.multi_step_trainer import MultiStepTrainer
from hypergan.trainers.multi_trainer_trainer import MultiTrainerTrainer
from hypergan.trainers.consensus_trainer import ConsensusTrainer
arg_parser = ArgumentParser("render next frame")
parser = arg_parser.add_image_arguments()
parser.add_argument('--frames', type=int, default=4, help='Number of frames to embed.')
parser.add_argument('--shuffle', type=bool, default=False, help='Randomize inputs.')
args = arg_parser.parse_args()
width, height, channels = parse_size(args.size)
config = lookup_config(args)
if args.action == 'search':
random_config = AlphaGANRandomSearch({}).random_config()
if args.config_list is not None:
config = random_config_from_list(args.config_list)
config["generator"]=random_config["generator"]
config["g_encoder"]=random_config["g_encoder"]
config["discriminator"]=random_config["discriminator"]
config["z_discriminator"]=random_config["z_discriminator"]
# TODO Other search terms?
else:
config = random_config
def tryint(s):
try:
return int(s)
except ValueError:
return s
def alphanum_key(s):
return [tryint(c) for c in re.split('([0-9]+)', s)]
class VideoFrameLoader:
"""
"""
def __init__(self, batch_size, frame_count, shuffle):
self.batch_size = batch_size
self.frame_count = frame_count
self.shuffle = shuffle
def inputs(self):
return self.frames
def create(self, directory, channels=3, format='jpg', width=64, height=64, crop=False, resize=False):
directories = glob.glob(directory+"/*")
directories = [d for d in directories if os.path.isdir(d)]
if(len(directories) == 0):
directories = [directory]
# Create a queue that produces the filenames to read.
if(len(directories) == 1):
# No subdirectories, use all the images in the passed in path
filenames = glob.glob(directory+"/*."+format)
else:
filenames = glob.glob(directory+"/**/*."+format)
if(len(filenames) < self.frame_count):
print("Error: Not enough frames in data folder ", directory)
self.file_count = len(filenames)
filenames = sorted(filenames, key=alphanum_key)
if self.file_count == 0:
raise ValidationException("No images found in '" + directory + "'")
# creates arrays of filenames[:end], filenames[1:end-1], etc for serialized random batching
if self.shuffle:
frames = [tf.train.slice_input_producer([filenames], shuffle=True)[0] for i in range(self.frame_count)]
else:
input_t = [filenames[i:i-self.frame_count] for i in range(self.frame_count)]
input_queue = tf.train.slice_input_producer(input_t, shuffle=True)
frames = input_queue
# Read examples from files in the filename queue.
frames = [self.read_frame(frame, format, crop, resize) for frame in frames]
frames = self._get_data(frames)
self.frames = frames
x = tf.train.slice_input_producer([filenames], shuffle=True)[0]
y = tf.train.slice_input_producer([filenames], shuffle=True)[0]
self.x = self.read_frame(x, format, crop, resize)
self.y = self.read_frame(y, format, crop, resize)
self.x = self._get_data([self.x])
self.y = self._get_data([self.y])
def read_frame(self, t, format, crop, resize):
value = tf.read_file(t)
if format == 'jpg':
img = tf.image.decode_jpeg(value, channels=channels)
elif format == 'png':
img = tf.image.decode_png(value, channels=channels)
else:
print("[loader] Failed to load format", format)
img = tf.cast(img, tf.float32)
# Image processing for evaluation.
# Crop the central [height, width] of the image.
if crop:
resized_image = hypergan.inputs.resize_image_patch.resize_image_with_crop_or_pad(img, height, width, dynamic_shape=True)
elif resize:
resized_image = tf.image.resize_images(img, [height, width], 1)
else:
resized_image = img
tf.Tensor.set_shape(resized_image, [height,width,channels])
# This moves the image to a range of -1 to 1.
float_image = resized_image / 127.5 - 1.
return float_image
def _get_data(self, imgs):
batch_size = self.batch_size
num_preprocess_threads = 24
return tf.train.shuffle_batch(
imgs,
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity= batch_size*2, min_after_dequeue=batch_size)
inputs = VideoFrameLoader(args.batch_size, args.frames, args.shuffle)
inputs.create(args.directory,
channels=channels,
format=args.format,
crop=args.crop,
width=width,
height=height,
resize=True)
save_file = "save/model.ckpt"
class AliNextFrameGAN(BaseGAN):
"""
"""
def __init__(self, *args, **kwargs):
BaseGAN.__init__(self, *args, **kwargs)
def create(self):
config = self.config
ops = self.ops
self.g_vars = []
d_vars = []
with tf.device(self.device):
def random_t(shape):
shape[-1] //= len(config.z_distribution.projections)
return UniformEncoder(self, config.z_distribution, output_shape=shape).sample
def random_like(x):
shape = self.ops.shape(x)
return random_t(shape)
self.frame_count = len(self.inputs.frames)
self.frames = self.inputs.frames
dist = UniformEncoder(self, config.z_distribution)
dist2 = UniformEncoder(self, config.z_distribution)
dist3 = UniformEncoder(self, config.z_distribution)
dist4 = UniformEncoder(self, config.z_distribution)
dist5 = UniformEncoder(self, config.z_distribution)
uz = self.create_component(config.uz, name='u_to_z', input=dist.sample)
uc = self.create_component(config.uc, name='u_to_c', input=dist2.sample)
uz2 = self.create_component(config.uz, name='u_to_z', input=dist3.sample, reuse=True)
uc2 = self.create_component(config.uc, name='u_to_c', input=dist4.sample, reuse=True)
uc3 = self.create_component(config.uc, name='u_to_c', input=dist5.sample, reuse=True)
self.g_vars += uz.variables()
self.g_vars += uc.variables()
def ec(zt, cp,reuse=True):
if config.noise:
randt = random_like(cp)
if config.proxy:
dist3 = UniformEncoder(self, config.z_distribution)
proxy_c = self.create_component(config.proxy_c, name='rand_ct', input=dist3.sample, reuse=reuse)
randt = proxy_c.sample
print("CC", zt, randt)
c = self.create_component(config.ec, name='ec', input=zt, features={'ct-1':cp, 'n':randt}, reuse=reuse)
else:
c = self.create_component(config.ec, name='ec', input=zt, features=[cp], reuse=reuse)
if not reuse:
if config.proxy:
self.g_vars += proxy_c.variables()
self.g_vars += c.variables()
return c.sample
def ez(ft, zp,reuse=True):
z = self.create_component(config.ez, name='ez', input=ft, features=[zp], reuse=reuse)
if not reuse:
self.g_vars += z.variables()
return z.sample
def build_g(zt, ct, reuse=True):
print("Gb", reuse)
g = self.create_component(config.generator, name='generator', input=ct, features=[zt], reuse=reuse)
if not reuse:
self.g_vars += g.variables()
return g.sample
def encode_frames(fs, c0, z0, reuse=True):
cs = [c0]
zs = [z0]
x_hats = [build_g(zs[-1], cs[-1], reuse=reuse)]
for i in range(len(fs)):
print("encode frames", i)
_reuse = reuse or (i!=0)
z = ez(fs[i], zs[-1], reuse=_reuse)
c = ec(z, cs[-1], reuse=_reuse)
x_hat = build_g(z, c, reuse=True)
zs.append(z)
cs.append(c)
x_hats.append(x_hat)
return cs, zs, x_hats
def build_sim(z0, c0, steps, reuse=True):
zs = [z0]
cs = [c0]
gs = [build_g(zs[-1], cs[-1], reuse=reuse)]
for i in range(steps):
_reuse = reuse or (i!=0)
z = ez(gs[-1], zs[-1], reuse=_reuse)
c = ec(z, cs[-1], reuse=_reuse)
g = build_g(z, c, reuse=True)
zs.append(z)
cs.append(c)
gs.append(g)
return gs, cs, zs
#self.frames = [f+tf.random_uniform(self.ops.shape(f), minval=-0.1, maxval=0.1) for f in self.frames ]
cs, zs, x_hats = encode_frames(self.frames, uc2.sample, uz2.sample, reuse=False)
self.zs = zs
self.cs = cs
ugs, ucs, uzs = build_sim(uz.sample, uc.sample, len(self.frames))
ugs_next, ucs_next, uzs_next = build_sim(uzs[-1], ucs[-1], len(self.frames))
re_ucs_next, re_uzs_next, re_ugs_next = encode_frames(ugs_next[1:], ucs_next[0], uzs_next[0])
gs_next, cs_next, zs_next = build_sim(zs[-1], cs[-1], len(self.frames))
#gs_next_next, cs_next_next, zs_next_next = build_sim(zs[-1], cs[-1], 21)
re_ucs, re_uzs, ugs_hat = encode_frames(ugs[1:], ucs[0], uzs[0])
re_cs_next, re_zs_next, re_gs_next = encode_frames(gs_next[1:], cs_next[0], zs_next[0])
self.x_hats = x_hats
t0 = tf.concat(zs[1:], axis=3)
t1 = tf.concat(re_uzs[:-1], axis=3)
t2 = tf.concat(re_zs_next[:-1], axis=3)
t3 = tf.concat(re_uzs_next[:-1], axis=3)
t4 = tf.concat(re_uzs[:-1], axis=3)
f0 = tf.concat(cs[1:], axis=3)
f1 = tf.concat(re_ucs[:-1], axis=3)
f2 = tf.concat(re_cs_next[:-1], axis=3)
f3 = tf.concat(re_ucs_next[:-1], axis=3)
stack = [t0,t1, t2]#, t4, t5]
stacked = ops.concat(stack, axis=0)
features =ops.concat([f0,f1,f2], axis=0)
d = self.create_component(config.z_discriminator, name='d_img', input=stacked, features=[features])
d_vars += d.variables()
l = self.create_loss(config.loss, d, None, None, len(stack))
d_loss = l.d_loss
g_loss = l.g_loss
self.video_generator_last_z = uzs[0]
self.video_generator_last_c = ucs[0]
self.gs_next = gs_next
ztn = uzs[1]
ctn = ucs[1]
self.video_generator_last_zn = ztn
self.video_generator_last_cn = ctn
gen = hc.Config({"sample":ugs[0]})
if config.use_x:
def rotate(first, second, offset=None):
rotations = [tf.concat(first[:offset], axis=3)]
elem = first
for e in second:
elem = elem[1:]+[e]
rotations.append(tf.concat(elem[:offset], axis=3))
return rotations
t0 = tf.concat(self.frames[1:], axis=3)
f0 = tf.concat(cs[1:-1], axis=3)
stack = [t0]
features = [f0]
if config.encode_forward:
stack += rotate(self.frames[2:]+[gs_next[0]], gs_next[1:])
features += rotate(cs[2:], cs_next[1:])
#stack += [gs_next_next[-frames:]]
if config.encode_ug:
stack += rotate(ugs[:-2], ugs[-2:]+ugs_next[1:])
features += rotate(ucs[:-2], ucs[-2:]+ucs_next[1:])
stacked = ops.concat(stack, axis=0)
features = tf.concat(features, axis=0)
d = self.create_component(config.discriminator, name='d_manifold', input=stacked, features=[features])
d_vars += d.variables()
l = self.create_loss(config.loss, d, None, None, len(stack))
d_loss += l.d_loss
g_loss += l.g_loss
gx_sample = gen.sample
gy_sample = gen.sample
gx = hc.Config({"sample":gx_sample})
gy = hc.Config({"sample":gy_sample})
last_frame = tf.slice(gy_sample, [0,0,0,0], [-1, -1, -1, 3])
self.y = hc.Config({"sample":last_frame})
self.gy = self.y
self.gx = self.y
self.uniform_sample = gen.sample
self.preview = tf.concat(self.inputs.frames[:-1] + [gen.sample], axis=1)#tf.concat(tf.split(gen.sample, (self.ops.shape(gen.sample)[3]//3), 3), axis=1)
metrics = {
'g_loss': g_loss,
'd_loss': d_loss
}
trainers = []
lossa = hc.Config({'sample': [d_loss, g_loss], 'metrics': metrics, 'd_fake': l.d_fake, 'd_real': l.d_real, 'config': l.config})
self.loss = lossa
self._g_vars = self.g_vars
self._d_vars = d_vars
trainer = self.create_component(config.trainer, loss = lossa, g_vars = self.g_vars, d_vars = d_vars)
self.session.run(tf.global_variables_initializer())
self.trainer = trainer
self.generator = gx
self.z_hat = gy.sample
self.x_input = self.inputs.frames[0]
self.uga = self.y.sample
self.uniform_encoder = dist
def g_vars(self):
return self._g_vars
def d_vars(self):
return self._d_vars
def fitness_inputs(self):
return self.inputs.frames
def create_loss(self, loss_config, discriminator, x, generator, split):
loss = self.create_component(loss_config, discriminator = discriminator, x=x, generator=generator, split=split)
return loss
def create_encoder(self, x_input, name='input_encoder', reuse=False):
config = self.config
input_encoder = dict(config.input_encoder or config.g_encoder or config.generator)
encoder = self.create_component(input_encoder, name=name, input=x_input, reuse=reuse)
return encoder
def create_z_discriminator(self, z, z_hat):
config = self.config
z_discriminator = dict(config.z_discriminator or config.discriminator)
z_discriminator['layer_filter']=None
net = tf.concat(axis=0, values=[z, z_hat])
encoder_discriminator = self.create_component(z_discriminator, name='z_discriminator', input=net)
return encoder_discriminator
def create_cycloss(self, x_input, x_hat):
config = self.config
ops = self.ops
distance = config.distance or ops.lookup('l1_distance')
pe_layers = self.gan.skip_connections.get_array("progressive_enhancement")
cycloss_lambda = config.cycloss_lambda
if cycloss_lambda is None:
cycloss_lambda = 10
if(len(pe_layers) > 0):
mask = self.progressive_growing_mask(len(pe_layers)//2+1)
cycloss = tf.reduce_mean(distance(mask*x_input,mask*x_hat))
cycloss *= mask
else:
cycloss = tf.reduce_mean(distance(x_input, x_hat))
cycloss *= cycloss_lambda
return cycloss
def create_z_cycloss(self, z, x_hat, encoder, generator):
config = self.config
ops = self.ops
total = None
distance = config.distance or ops.lookup('l1_distance')
if config.z_hat_lambda:
z_hat_cycloss_lambda = config.z_hat_cycloss_lambda
recode_z_hat = encoder.reuse(x_hat)
z_hat_cycloss = tf.reduce_mean(distance(z_hat,recode_z_hat))
z_hat_cycloss *= z_hat_cycloss_lambda
if config.z_cycloss_lambda:
recode_z = encoder.reuse(generator.reuse(z))
z_cycloss = tf.reduce_mean(distance(z,recode_z))
z_cycloss_lambda = config.z_cycloss_lambda
if z_cycloss_lambda is None:
z_cycloss_lambda = 0
z_cycloss *= z_cycloss_lambda
if config.z_hat_lambda and config.z_cycloss_lambda:
total = z_cycloss + z_hat_cycloss
elif config.z_cycloss_lambda:
total = z_cycloss
elif config.z_hat_lambda:
total = z_hat_cycloss
return total
def input_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [self.mask_generator.sample]
else:
extras = []
return extras + [
self.x_input
]
def output_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [
self.mask_generator.sample,
self.generator.g1x,
self.generator.g2x
]
else:
extras = []
return extras + [
self.encoder.sample,
self.generator.sample,
self.uniform_sample,
self.generator_int
]
class VideoFrameSampler(BaseSampler):
def __init__(self, gan, samples_per_row=8):
sess = gan.session
self.x = gan.session.run(gan.preview)
print("__________", np.shape(self.x),'---oo')
frames = np.shape(self.x)[1]//height
self.frames=frames
self.x = np.split(self.x, frames, axis=1)
self.i = 0
BaseSampler.__init__(self, gan, samples_per_row)
def _sample(self):
gan = self.gan
z_t = gan.uniform_encoder.sample
sess = gan.session
feed_dict = {}
for i,f in enumerate(gan.inputs.frames):
if len(self.x) > i+1:
feed_dict[f]=self.x[i+1]
#if(1 + self.frames < len(self.x)):
# feed_dict[f] = self.x[1+self.frames]
self.x = sess.run(gan.preview, feed_dict)
frames = np.shape(self.x)[1]//height
self.x = np.split(self.x, frames, axis=1)
x_ = self.x[-1]
time.sleep(0.15)
return {
'generator': x_
}
class TrainingVideoFrameSampler(BaseSampler):
def __init__(self, gan, samples_per_row=8):
self.z = None
self.i = 0
BaseSampler.__init__(self, gan, samples_per_row)
def _sample(self):
gan = self.gan
z_t = gan.uniform_encoder.sample
sess = gan.session
return {
'generator': gan.session.run(gan.preview)
}
def setup_gan(config, inputs, args):
gan = AliNextFrameGAN(config, inputs=inputs)
if(args.action != 'search' and os.path.isfile(save_file+".meta")):
gan.load(save_file)
tf.train.start_queue_runners(sess=gan.session)
config_name = args.config
GlobalViewer.title = "[hypergan] next-frame " + config_name
GlobalViewer.enabled = args.viewer
GlobalViewer.zoom = args.zoom
return gan
def train(config, inputs, args):
gan = setup_gan(config, inputs, args)
sampler = lookup_sampler(args.sampler or TrainingVideoFrameSampler)(gan)
samples = 0
#metrics = [batch_accuracy(gan.inputs.x, gan.uniform_sample), batch_diversity(gan.uniform_sample)]
#sum_metrics = [0 for metric in metrics]
for i in range(args.steps):
gan.step()
if args.action == 'train' and i % args.save_every == 0 and i > 0:
print("saving " + save_file)
gan.save(save_file)
if i % args.sample_every == 0:
sample_file="samples/%06d.png" % (samples)
samples += 1
sampler.sample(sample_file, args.save_samples)
#if i > args.steps * 9.0/10:
# for k, metric in enumerate(gan.session.run(metrics)):
# print("Metric "+str(k)+" "+str(metric))
# sum_metrics[k] += metric
tf.reset_default_graph()
return []#sum_metrics
def sample(config, inputs, args):
gan = setup_gan(config, inputs, args)
sampler = lookup_sampler(args.sampler or VideoFrameSampler)(gan)
samples = 0
for i in range(args.steps):
sample_file="samples/%06d.png" % (samples)
samples += 1
sampler.sample(sample_file, args.save_samples)
def search(config, inputs, args):
metrics = train(config, inputs, args)
config_filename = "colorizer-"+str(uuid.uuid4())+'.json'
hc.Selector().save(config_filename, config)
with open(args.search_output, "a") as myfile:
myfile.write(config_filename+","+",".join([str(x) for x in metrics])+"\n")
if args.action == 'train':
metrics = train(config, inputs, args)
print("Resulting metrics:", metrics)
elif args.action == 'sample':
sample(config, inputs, args)
elif args.action == 'search':
search(config, inputs, args)
else:
print("Unknown action: "+args.action)
|
Stitch out our cute EOS holder quickly in your hoop for a great gift.
Our Snowman EOS Lip Balm Holder is perfect for co-workers, teachers, mailman, neighbors or anyone!
It stitches up quick in your hoop and holds a .25oz packaged EOS lip balm.
It's a great stocking stuffer.
A ribbon can be added to hang it.
We used Tangerine for our snowman.
|
#! /usr/bin/env python
import sys, os, io, string
import argparse
def parse_args():
"""
Function: parse_args
--------------------
Parse the commandline arguments for SecretMessageMaker
"""
# Define what commandline arguments can be accepted
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--encrypt", action="store_const", const=True,
help="run program in encryption mode. Default: decrypt")
parser.add_argument('filename', metavar="FILE",
help="path of input text file (required)")
args = parser.parse_args()
# Validate the filename
file_name = args.filename
if not os.path.exists(file_name):
parser.error("The file %s does not exist!" % file_name)
# Check to make sure the user isn't going to accidentally override
# something, or accidentally encrypt something twice.
if "secret_" in args.filename and args.encrypt:
# If you're encrypted an already encrypted message
parser.error("[error] You're ENCRYPTING an encrypted file!")
elif "secret_" not in args.filename and not args.encrypt:
parser.error("[error] You're DECRYPTING a plain file!")
# Let the user know which commands the program sees, and which files will
# be made.
if args.encrypt:
print "[info] ENCRYPTING %s into secret_%s..." % (file_name, file_name)
else:
print ("[info] DECRYPTING %s into %s..." % (file_name,
file_name.replace("secret_", '')))
return args
def encrypt(plain_line):
"""
Function: encrypt
--------------------
Turns the human-readable line of text into a non-human-readable line of
encrypted text.
@param plain_line the line to be encrypted
@return the encrypted version of the 'line'
"""
char_list = list(plain_line)
encrypted_list = []
for character in char_list:
encrypted_list.append(str(ord(character)))
return ' '.join(encrypted_list)
def decrypt(encrypted_line):
"""
Function: decrypt
--------------------
Turns the encrypted line of text into a human-readable line of text.
@param encrypted_line the line to be encrypted
@return the encrypted version of the 'line'
"""
num_list = encrypted_line.split()
decrypted_list = []
for number in num_list:
decrypted_list.append(chr(int(number)))
return ''.join(decrypted_list)
# Main Function
if __name__ == "__main__":
args = parse_args() # parse commandline arguments
# Open input file
in_file = open(args.filename, 'r')
# Open output file
if args.encrypt:
# If encrypting, append secret_ to the front
out_file = open("secret_" + args.filename, 'w')
else:
# If decrypting, remove secret_ from the filename
out_file = open(args.filename.replace("secret_", ''), 'w')
# Iterate over every line of the file
for line in in_file:
if args.encrypt:
# Run encryption algorithm
out_file.write(encrypt(line) + ' ') # add space between lines
else:
# Run decryption algorithm
out_file.write(decrypt(line))
# Close the files that we were using
in_file.close()
out_file.close()
|
We've got Triumph Thunderbird LT touch up paint to help you repair any and all paint scratches on your vehicle. Phantom Black - color code: TRI006, Jet Black - color code: TRI011, Crystal White Pearl Tricoat - color code: TRI004 are just some of the common paint color codes that we offer in jars, pens, spray cans and more. Get your Triumph Thunderbird LT looking its best by selecting one of the touch up paint color options above.
|
import types
# for get_key_from_incoming_ip
import tempfile
import os
import commands
from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
UnknownSfaType, ExistingRecord, NonExistingRecord
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.prefixTree import prefixTree
from sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn
from sfa.util.version import version_core
from sfa.util.sfalogging import logger
from sfa.trust.gid import GID
from sfa.trust.credential import Credential
from sfa.trust.certificate import Certificate, Keypair, convert_public_key
from sfa.trust.gid import create_uuid
from sfa.storage.model import make_record,RegRecord
#from sfa.storage.alchemy import dbsession
from sfa.storage.alchemy import global_dbsession
dbsession = global_dbsession
from sfa.managers.registry_manager import RegistryManager
class RegistryManager(RegistryManager):
def GetCredential(self, api, xrn, type, caller_xrn = None):
# convert xrn to hrn
if type:
hrn = urn_to_hrn(xrn)[0]
else:
hrn, type = urn_to_hrn(xrn)
# Is this a root or sub authority
auth_hrn = api.auth.get_authority(hrn)
if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
auth_hrn = hrn
auth_info = api.auth.get_auth_info(auth_hrn)
# get record info
filter = {'hrn': hrn}
if type:
filter['type'] = type
record=dbsession.query(RegRecord).filter_by(**filter).first()
if not record:
raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
# verify_cancreate_credential requires that the member lists
# (researchers, pis, etc) be filled in
logger.debug("get credential before augment dict, keys=%s"%record.__dict__.keys())
api.driver.augment_records_with_testbed_info (record.__dict__)
logger.debug("get credential after augment dict, keys=%s"%record.__dict__.keys())
if not api.driver.is_enabled (record.__dict__):
raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record.email))
# get the callers gid
# if caller_xrn is not specified assume the caller is the record
# object itself.
if not caller_xrn:
caller_hrn = hrn
caller_gid = record.get_gid_object()
else:
caller_hrn, caller_type = urn_to_hrn(caller_xrn)
caller_filter = {'hrn': caller_hrn}
if caller_type:
caller_filter['type'] = caller_type
caller_record = dbsession.query(RegRecord).filter_by(**caller_filter).first()
if not caller_record:
raise RecordNotFound("Unable to associated caller (hrn=%s, type=%s) with credential for (hrn: %s, type: %s)"%(caller_hrn, caller_type, hrn, type))
caller_gid = GID(string=caller_record.gid)
object_hrn = record.get_gid_object().get_hrn()
rights = api.auth.determine_user_rights(caller_hrn, record)
# make sure caller has rights to this object
if rights.is_empty():
raise PermissionError(caller_hrn + " has no rights to " + record.hrn)
object_gid = GID(string=record.gid)
new_cred = Credential(subject = object_gid.get_subject())
new_cred.set_gid_caller(caller_gid)
new_cred.set_gid_object(object_gid)
new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
#new_cred.set_pubkey(object_gid.get_pubkey())
new_cred.set_privileges(rights)
new_cred.get_privileges().delegate_all_privileges(True)
if hasattr(record,'expires'):
date = utcparse(record.expires)
expires = datetime_to_epoch(date)
new_cred.set_expiration(int(expires))
auth_kind = "authority,ma,sa"
# Parent not necessary, verify with certs
#new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
new_cred.encode()
new_cred.sign()
return new_cred.save_to_string(save_parents=True)
|
Microsoft has released an attack on a botnet earlier this month about 2 million PCs this. It is believed that the infected computers were used to swindle more than $ 500 million of bank accounts worldwide.
Microsoft Logo The majority of infected computers were in the U.S., Europe and Hong Kong. Overall, more than 80 countries were in on the action by Microsoft and FBI on 5 Part of June. 1,400 networks, known as botnets Citadel, access has been made to the infected computer.
The perpetrators should be Aquabox, Richard Domingues Boscovich from Microsoft’s Digital Crimes Unit assumed that this is to be found in Eastern Europe. The botnet, which were controlled by different servers worldwide were used to steal hundreds of credit institutions, like Microsoft filed with the court documents show.
On the size of the Institute was not respected. So it hit small cooperative banks as well as major banks, for example, Bank of America and Credit Suisse. Citadel is one of the largest botnets that are active, the malicious software was bundled with pirated copies of Windows. The FBI is working closely with Europol in this context together to track down the criminals are, however, no information about the progress.
|
from django.core.management.base import BaseCommand
import os
import optparse
import numpy as np
import json
import pandas as pd
import requests
#python manage.py get_plotsfromtitles --input=/Users/andrea/Desktop/book_packt/chapters/5/data/utilitymatrix.csv --outputplots=plots.csv --outputumatrix='umatrix.csv'
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
optparse.make_option('-i', '--input', dest='umatrixfile',
type='string', action='store',
help=('Input utility matrix')),
optparse.make_option('-o', '--outputplots', dest='plotsfile',
type='string', action='store',
help=('output file')),
optparse.make_option('--om', '--outputumatrix', dest='umatrixoutfile',
type='string', action='store',
help=('output file')),
)
def getplotfromomdb(self,col,df_moviesplots,df_movies,df_utilitymatrix):
string = col.split(';')[0]
title=string[:-6].strip()
year = string[-5:-1]
plot = ' '.join(title.split(' ')).encode('ascii','ignore')+'. '
url = "http://www.omdbapi.com/?t="+title+"&y="+year+"&plot=full&r=json"
headers={"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36"}
r = requests.get(url,headers=headers)
jsondata = json.loads(r.content)
if 'Plot' in jsondata:
#store plot + title
plot += jsondata['Plot'].encode('ascii','ignore')
if plot!=None and plot!='' and plot!=np.nan and len(plot)>3:#at least 3 letters to consider the movie
df_moviesplots.loc[len(df_moviesplots)]=[string,plot]
df_utilitymatrix[col] = df_movies[col]
print len(df_utilitymatrix.columns)
return df_moviesplots,df_utilitymatrix
def handle(self, *args, **options):
pathutilitymatrix = options['umatrixfile']
df_movies = pd.read_csv(pathutilitymatrix)
movieslist = list(df_movies.columns[1:])
df_moviesplots = pd.DataFrame(columns=['title','plot'])
df_utilitymatrix = pd.DataFrame()
df_utilitymatrix['user'] = df_movies['user']
print 'nmovies:',len(movieslist)
for m in movieslist[:]:
df_moviesplots,df_utilitymatrix=self.getplotfromomdb(m,df_moviesplots,df_movies,df_utilitymatrix)
print len(df_movies.columns),'--',len(df_utilitymatrix.columns)
outputfile = options['plotsfile']
df_moviesplots.to_csv(outputfile, index=False)
outumatrixfile = options['umatrixoutfile']
df_utilitymatrix.to_csv(outumatrixfile, index=False)
|
Each year, more than 400 eighth grade boys join us for our annual "Viking for a Day" event. Throughout the day, students will explore Central Catholic through tours and miniature classes, have lunch with peers and student mentors in the McGinley Dining Hall, and learn all about Central Catholic! This year's "Viking for a Day" will take place on Oct. 12, 2018. Registration is now open!
Viking for a day is the perfect opportunity for your student to explore Central Catholic first hand. Current student mentors and faculty will guide him through a "day in the life" of a Central Catholic student so he can see what it's truly like to be a Viking!
When: 9:30 a.m. - 1:30 p.m.
|
from deoplete.filter.converter_reorder_attr import Filter
candidates = [
{'word': 'Apple', 'kind': 'Fruit'},
{'word': 'Banana', 'kind': 'Fruit'},
{'word': 'Pen', 'kind': 'Object'},
{'word': 'Cherry Pie', 'kind': 'Pie'},
]
def test_reorder():
candidates_copy = candidates[:]
preferred_order = {'kind': ['Pie', 'Fruit']}
expected_candidates = [
{'word': 'Cherry Pie', 'kind': 'Pie'},
{'word': 'Apple', 'kind': 'Fruit'},
{'word': 'Banana', 'kind': 'Fruit'},
{'word': 'Pen', 'kind': 'Object'},
]
assert expected_candidates == Filter.filter_attrs(
candidates_copy, preferred_order
)
def test_filter():
candidates_copy = candidates[:]
preferred_order = {'word': ['!Pen', 'Banana']}
expected_candidates = [
{'word': 'Banana', 'kind': 'Fruit'},
{'word': 'Apple', 'kind': 'Fruit'},
{'word': 'Cherry Pie', 'kind': 'Pie'},
]
assert expected_candidates == Filter.filter_attrs(
candidates_copy, preferred_order
)
|
By H.E Ms. Eksiri Pintaruchi, Ambassador of the Kingdom of Thailand to the Kingdom of the Netherlands.
5th December marks Thailand’s National Day, Birthday Anniversary of His Majesty the late King Bhumibol Adulyadej (King Rama IX) and the Father’s Day of Thailand. In this connection, this article celebrates such auspicious occasions.
The Thai-Dutch relations started off at the beginning of the 17th Century with next year to reach 415th Anniversary of diplomatic relations. The site of the old trading station, called “New Amsterdam” by the Dutch or “Baan Hollanda” by the Thais, can be visited by the public today in Ayutthaya, the capital of Siam at the time. Over the centuries, bilateral relations between the two countries have flourished, and remains strong.
At present, around 200,000 Dutch tourists visiting Thailand each year while over 300 Dutch companies have been investing there. This makes the Netherlands the largest investor amongst EU countries in Thailand. The number of exchanges of visits and communications at all levels and across different sectors has also reflected growing dynamic between both sides.
Looking ahead, Thailand has embarked upon a new chapter. The Government has worked towards fulfilling its commitment in carrying out comprehensive national reform according to its announced Roadmap, paving the way towards a genuine and sustainable democracy. The 20-year National Strategy (2017-2036) has been adopted as a long-term vision and a legal framework in steering socio-economic policies of the country to ensure sustainable development while enhancing conducive environment for trade and investment.
With the shared values aiming to enhance competitiveness through value- and innovation- based economy, “Thailand 4.0” development plan and the Netherland’s “Top Sectors” policy as well as the triple helix model are mutually reinforcing. Great potentials and opportunities have been identified and concretized, utilizing each other’s strength to enhance synergy for win-win collaboration to achieve innovative and inclusive growths, especially in the areas of smart agriculture and food technology, water resources management, logistics, bio-economy & circular economy, science, technology & innovation and SMEs & Start-ups. Thailand signed its first comprehensive Agreement on Mutual Administrative Assistance in Customs Matters with the Netherlands in September this year. Such development has underpinned Thailand’s effort to boost its partnership with the Netherland.
As an overarching economic policy aiming to steer Thailand forward amid the fast changing global developments, Thailand 4.0 has been introduced since 2016 covering ranges of policies and measures including reform efforts to streamline rules and regulations to ensure compliance with the international standard as well as to enhance transparency and confidence among investors and all stakeholders. These include identifying targeted sectors to be further upgraded and promoted (so-called the “S-Curve” and the “new S-Curve”), e.g. agriculture & biotech, food for the future, smart electronics, robotics, aviation & logistics, biofuels & biochemical, digital and comprehensive medical industry.
Simplified application processes to facilitate all foreign investors on the issuance of permits and licenses through One Stop Service (OSS) center.
The continual pattern of Thailand’s accelerating economic growth rate – from 3% in 2015 to 3.3% in 2016, and 3.9% in 2017, with 2018 projected growth in the range 4.2-4.7% -, not only echoes sound economic fundamentals and resilience of the country, but also confidence in the prospects of the Thai economy and its future directions under this Thailand 4.0 policy framework toward an enhanced partnership with the Netherlands and beyond.
Thailand joined forces with the international community in the adoption of the SDGs in 2015 and has attached great importance to mainstreaming sustainable development in all dimensions of its key policy priorities as well as daily living of the people to prepare and face the global challenges in a sustainable and responsible manner. The concept of sustainable development has long taken root in the country as our home-grown approach guided by His Majesty the late King Rama IX’s Sufficiency Economy Philosophy (SEP). Based on the principles of moderation, reasonableness and prudence, guided by knowledge and virtue, SEP stresses balance in the use of economic, social, environmental and cultural capital, while underlining the importance of preparedness in dealing with changes in these four dimensions. Progress with balance promote stability and, ultimately, provide a basis for sustainability.
Thailand has also worked closely with many partners in promoting awareness and capacity building with other developing countries on bilateral and trilateral basis under the policy of “SEP for SDG Partnership”. Given the high priority both the Netherlands and Thailand place on promoting sustainability at national and international levels, both countries can partner under trilateral cooperation, sharing experience and expertise contributing to other developing countries’ efforts towards achieving the SDGs.
According to former United Nations Secretary-General Kofi Annan, “Sufficiency Economy” was of great relevance and has reinforced the United Nation’s efforts to promote a people-centred and sustainable path of development. An outstanding aspect of the SEP is sustainable soil management, which has been internationally recognized when the FAO supported the formal establishment of World Soil Day as a global awareness platform. In 2013, the United Nations General Assembly declared 5th December, the Birthday Anniversary of His Majesty the late King Rama IX, as the World Soil Day.
Assuming ASEAN Chairmanship in 2019, Thailand looks forward to working with our partners including the Netherlands and the European Union, to promote sustainability in ASEAN Community-building, in the ASEAN-centered regional architecture, for regional peace, stability and prosperity. This can be done through various frameworks such as the ASEAN-EU dialogue relations and the Asia-Europe Meeting (ASEM). Collaboration through such sub-regional fora as Ayeyawady-Chao Phraya-Mekong Economic Cooperation Strategy (ACMECS), in the areas where the Netherlands possesses expertise i.e. agriculture and water resource management, will also be valuable.
Thailand’s ASEAN Chairmanship will seek to build on the accomplishments of past ASEAN Chairmanships as well as to create momentum for the future. Thailand will focus on sustainability in key dimensions including economic, human security and regional security, which will help promote the shared goal of building a people-centred Community that leaves no one behind. To this end, we look to enhancing seamless connectivity, effective partnerships, and laying a foundation for future-oriented ASEAN.
With the Netherlands and the EU’s leading role in sustainable development and innovation and Thailand and the ASEAN’s dynamic growth, enhanced collaborative partnerships would generate strategic impetus for further mutual benefits.
For further information, please contact: TheHague@ThaiEmbassy.nl.
|
import midi
import sys
from graphmodel.utils import MidiUtils
__author__ = 'Adisor'
class Analyzer(object):
"""
Class is used for analyzing, input curation, validation, and pre-processing a midi file before execution.
With this class, we can capture events that are not being processed or midi patterns that break our
rules or assumptions. If any pattern would break our rules or assumptions, the program exits.
"""
DO_EXIT = True
def __init__(self, midi_file_name):
self.pattern = midi.read_midifile(midi_file_name)
def perform_analysis(self):
# check for unprocessed events
for track in self.pattern:
channel = -1
for event in track:
if MidiUtils.is_channel_event(event):
if channel == -1:
channel = event.channel
if channel != event.channel:
print "TRACK HAS MULTIPLE CHANNELS"
if Analyzer.DO_EXIT:
sys.exit(-1)
# global meta events should be in the first track
for i in range(1, len(self.pattern), 1):
for event in self.pattern[i]:
if MidiUtils.is_song_meta_event(event):
print "GLOBAL META EVENTS NEED TO BE IN THE FIRST TRACK", event
if Analyzer.DO_EXIT:
sys.exit(-1)
|
There’s an entire generation of gals out there who not only looked up to Hermione Grainer, but also wanted to live the life of the muggle-born Harry Potter character. Brave, brainy and all around bada$$, Hermione was someone we all wanted to be — her life was one we all wanted to be magically whisked into, in any and every way. Now that’s totally possible, because along with Harry’s house being on the market, Hermione’s childhood home is for sale and could be yours… if you can afford it.
Though Hermione’s address wasn’t included in the books, the Harry Potter movie used an impressive and less-than-common house in the Hampstead Garden Suburb in Northwest London to give us a glimpse at how and where Hermione was raised.
Now available for sale through Arlington Residential, they tell us that this jewel is on the market for the first time in almost sixty years, and is on “one of the finest roads in Hampstead Garden Suburb.” The impressive home is a spacious 2490 square-foot, three-story house and is just steps away from “the entrance to the Heath Extension and its rolling meadows and woodland.” Sounds lovely!
With a total of six bedrooms (one with a terrace), two bathrooms, an entrance hall, reception room, family room, kitchen and breakfast room along with front and back gardens, this is quite the abode. And considering the fictional Granger parents were dentists, we have to assume their muggle career was both successful and lucrative. They would have to be doing well to afford the £2,400,000 (approx. $3.1 million US) price tag.
Would you love to live in Hermione’s childhood home? Tweet us @BritandCo!
|
import matplotlib.pyplot as plt
import networkx as nx
import os
G=nx.Graph()
posp=[[0,0.1],[1,0.0],[2,0.1],[4,0.1],[6,0.25],[7,0.1],[8,0.25],[10,0],[12,0.0],[14,0.1]]
for node in range(10):
G.add_node(node,pos=(posp[node][0],posp[node][1]))
edges=[(0,1),(1,2),(2,3),(3,4),(3,5),(4,5),(4,6),(5,6),(5,7),(6,7),(7,8),(7,9),(8,9)]
abnode=[1,3,4]
months=["201603","201604","201605","201606","201607","201608","201609"]
for mon in months[:1]:
root="F:/workspace/git/Graph-MP/outputs/mesonetPlots/hourlyPatterns/"+mon+"/"
outRoot="F:/workspace/git/Graph-MP/outputs/mesonetPlots/hourlyPatternsGraph/"+mon+"_png/"
if os.path.exists(outRoot)==False:
os.makedirs(outRoot)
for name in os.listdir(root):
with open(root+name,"r") as f:
data=[]
for line in f.readlines():
data.append(map(int,line.strip().split()))
for i,d in enumerate(data):
color_map = []
for node in G:
if d[node]==1:
color_map.append('red')
else:
color_map.append('green')
G.add_edges_from(edges)
fig=plt.figure(1)
print name.split('.')[0]+'-hour-'+str(i)
plt.title(name.split('.')[0]+'-hour-'+str(i)+" Red stations have changing patterns")
nx.draw(G,nx.get_node_attributes(G, 'pos'),with_labels=True,node_color = color_map,node_size=400.0) # networkx draw()
plt.draw() # pyplot draw()
plt.tight_layout()
fig.savefig(outRoot+name.split('.')[0]+'-hour-'+str(i)+'.jpg', bbox_inches="tight")
plt.close()
|
Check out this awesome music video guys. I absolutely love his voice and I am glad we are finally sending an Armenian song to the Eurovision. Let’s support our candidate! Enjoy!
|
'''
Add a basis set to the library
'''
import os
import datetime
from ..fileio import read_json_basis, write_json_basis
from ..misc import expand_elements
from ..validator import validate_data
from ..skel import create_skel
from ..readers import read_formatted_basis_file
from .metadata import create_metadata_file
def add_from_components(component_files, data_dir, subdir, file_base, name, family, role, description, version,
revision_description):
'''
Add a basis set to this library that is a combination of component files
This takes in a list of component basis files and creates a new basis set for the intersection
of all the elements contained in those files. This creates the element, and table basis set
files in the given data_dir (and subdir). The metadata file for the basis is created if it
doesn't exist, and the main metadata file is also updated.
Parameters
----------
component_files : str
Path to component json files (in BSE format already)
data_dir : str
Path to the data directory to add the data to
subdir : str
Subdirectory of the data directory to add the basis set to
file_base : str
Base name for new files
name : str
Name of the basis set
family : str
Family to which this basis set belongs
role : str
Role of the basis set (orbital, etc)
description : str
Description of the basis set
version : str
Version of the basis set
revision_description : str
Description of this version of the basis set
'''
if not component_files:
raise RuntimeError("Need at least one component file to create a basis set from")
# Determine what files have which elements
valid_elements = None
# And the relative path of the component files to the data dir
cfile_relpaths = []
for cfile in component_files:
cfile_data = read_json_basis(cfile)
cfile_elements = set(cfile_data['elements'].keys())
relpath = os.path.relpath(cfile, data_dir)
if valid_elements is None:
valid_elements = cfile_elements
else:
valid_elements = valid_elements.intersection(cfile_elements)
cfile_relpaths.append(relpath)
valid_elements = sorted(valid_elements, key=lambda x: int(x))
# Start the data files for the element and table json
element_file_data = create_skel('element')
element_file_data['name'] = name
element_file_data['description'] = description
element_file_name = '{}.{}.element.json'.format(file_base, version)
element_file_relpath = os.path.join(subdir, element_file_name)
element_file_path = os.path.join(data_dir, element_file_relpath)
table_file_data = create_skel('table')
table_file_data['revision_description'] = revision_description
table_file_data['revision_date'] = datetime.date.today().isoformat()
table_file_name = '{}.{}.table.json'.format(file_base, version)
# and the metadata file
meta_file_data = create_skel('metadata')
meta_file_data['names'] = [name]
meta_file_data['family'] = family
meta_file_data['description'] = description
meta_file_data['role'] = role
meta_file_name = '{}.metadata.json'.format(file_base)
# These get created directly in the top-level data directory
table_file_path = os.path.join(data_dir, table_file_name)
meta_file_path = os.path.join(data_dir, meta_file_name)
# Can just make all the entries for the table file pretty easily
# (we add the relative path to the location of the element file,
# which resides in subdir)
table_file_entry = element_file_relpath
table_file_data['elements'] = {k: table_file_entry for k in valid_elements}
# Add to the element file data
for el in valid_elements:
element_file_data['elements'][el] = {'components': cfile_relpaths}
# Verify all data using the schema
validate_data('element', element_file_data)
validate_data('table', table_file_data)
######################################################################################
# Before creating any files, check that all the files don't already exist.
# Yes, there is technically a race condition (files could be created between the
# check and then actually writing out), but if that happens, you are probably using
# this library wrong
#
# Note that the metadata file may exist already. That is ok
######################################################################################
if os.path.exists(element_file_path):
raise RuntimeError("Element json file {} already exists".format(element_file_path))
if os.path.exists(table_file_path):
raise RuntimeError("Table json file {} already exists".format(table_file_path))
#############################################
# Actually create all the files
#############################################
# First, create the subdirectory
subdir_path = os.path.join(data_dir, subdir)
if not os.path.exists(subdir_path):
os.makedirs(subdir_path)
write_json_basis(element_file_path, element_file_data)
write_json_basis(table_file_path, table_file_data)
# Create the metadata file if it doesn't exist already
if not os.path.exists(meta_file_path):
write_json_basis(meta_file_path, meta_file_data)
# Update the metadata file
metadata_file = os.path.join(data_dir, 'METADATA.json')
create_metadata_file(metadata_file, data_dir)
def add_basis_from_dict(bs_data,
data_dir,
subdir,
file_base,
name,
family,
role,
description,
version,
revision_description,
data_source,
refs=None):
'''Add a basis set to this library
This takes in a basis set dictionary, and create the component,
element, and table basis set files in the given data_dir (and
subdir). The metadata file for the basis is created if it doesn't
exist, and the main metadata file is also updated.
Parameters
----------
bs_data : dict
Basis set dictionary
data_dir : str
Path to the data directory to add the data to
subdir : str
Subdirectory of the data directory to add the basis set to
file_base : str
Base name for new files
name : str
Name of the basis set
family : str
Family to which this basis set belongs
role : str
Role of the basis set (orbital, etc)
description : str
Description of the basis set
version : str
Version of the basis set
revision_description : str
Description of this version of the basis set
data_source : str
Description of where this data came from
refs : dict or str
Mapping of references to elements. This can be a dictionary with a compressed
string of elements as keys and a list of reference strings as values.
For example, {'H,Li-B,Kr': ['kumar2018a']}
If a list or string is passed, then those reference(s) will be used for
all elements.
Elements that exist in the file but do not have a reference are given the
usual 'noref' extension and the references entry is empty.
file_fmt : str
Format of the input basis data (None = autodetect)
'''
# Read the basis set data into a component file, and add the description
bs_data['description'] = description
bs_data['data_source'] = data_source
if refs is None:
refs = []
# We keep track of which elements we've done so that
# we can detect duplicates in the references (which would be an error)
# (and also handle elements with no reference)
orig_elements = bs_data['elements']
done_elements = []
# If a string or list of strings, use that as a reference for all elements
if isinstance(refs, str):
for k, v in bs_data['elements'].items():
v['references'] = [refs]
elif isinstance(refs, list):
for k, v in bs_data['elements'].items():
v['references'] = refs
elif isinstance(refs, dict):
for k, v in refs.items():
# Expand the string a list of integers (as strings)
elements = expand_elements(k, True)
# Make sure we have info for the given elements
# and that there are no duplicates
for el in elements:
if el not in orig_elements:
raise RuntimeError("Element {} not found in file {}".format(el, bs_file))
if el in done_elements:
raise RuntimeError("Duplicate element {} in reference string {}".format(el, k))
if isinstance(v, str):
bs_data['elements'][el]['references'] = [v]
else:
bs_data['elements'][el]['references'] = v
done_elements.extend(elements)
# Handle elements without a reference
noref_elements = set(orig_elements.keys()) - set(done_elements)
if noref_elements:
for el in noref_elements:
bs_data['elements'][el]['references'] = []
else:
raise RuntimeError('refs should be a string, a list, or a dictionary')
# Create the filenames for the components
# Also keep track of where data for each element is (for the element and table files)
component_file_name = file_base + '.' + str(version) + '.json'
component_file_relpath = os.path.join(subdir, component_file_name)
component_file_path = os.path.join(data_dir, component_file_relpath)
# Verify all data using the schema
validate_data('component', bs_data)
######################################################################################
# Before creating any files, check that all the files don't already exist.
# Yes, there is technically a race condition (files could be created between the
# check and then actually writing out), but if that happens, you are probably using
# this library wrong
#
# Note that the metadata file may exist already. That is ok
######################################################################################
if os.path.exists(component_file_path):
raise RuntimeError("Component json file {} already exists".format(component_file_path))
#############################################
# Actually create all the files
#############################################
# First, create the subdirectory
subdir_path = os.path.join(data_dir, subdir)
if not os.path.exists(subdir_path):
os.makedirs(subdir_path)
write_json_basis(component_file_path, bs_data)
# Do all the rest
add_from_components([component_file_path], data_dir, subdir, file_base, name, family, role, description, version,
revision_description)
def add_basis(bs_file,
data_dir,
subdir,
file_base,
name,
family,
role,
description,
version,
revision_description,
data_source,
refs=None,
file_fmt=None):
'''
Add a basis set to this library
This takes in a single file containing the basis set is some format, parses it, and
create the component, element, and table basis set files in the given data_dir (and subdir).
The metadata file for the basis is created if it doesn't exist, and the main metadata file is
also updated.
Parameters
----------
bs_file : str
Path to the file with formatted basis set information
data_dir : str
Path to the data directory to add the data to
subdir : str
Subdirectory of the data directory to add the basis set to
file_base : str
Base name for new files
name : str
Name of the basis set
family : str
Family to which this basis set belongs
role : str
Role of the basis set (orbital, etc)
description : str
Description of the basis set
version : str
Version of the basis set
revision_description : str
Description of this version of the basis set
data_source : str
Description of where this data came from
refs : dict or str
Mapping of references to elements. This can be a dictionary with a compressed
string of elements as keys and a list of reference strings as values.
For example, {'H,Li-B,Kr': ['kumar2018a']}
If a list or string is passed, then those reference(s) will be used for
all elements.
Elements that exist in the file but do not have a reference are given the
usual 'noref' extension and the references entry is empty.
file_fmt : str
Format of the input basis data (None = autodetect)
'''
# Read the basis set data into a component file, and add the description
bs_data = read_formatted_basis_file(bs_file, file_fmt, validate=True, as_component=True)
# The rest is done by the dict routine
add_basis_from_dict(bs_data, data_dir, subdir, file_base, name, family, role, description, version,
revision_description, data_source, refs)
|
Architectural Technician required to join a talented team of Architects and Designers with an AJ100 practice working on prestigious projects in their Norwich studio.
The studio is renowned for its work in the Conservation and Heritage sector where they have become a specialist and have won numerous awards for their projects.
Ideally you will have a strong awareness of working on historical, listed and conservation projects and excellent technical delivery skills.
RIBA Stage 4-7 Experience. Stage 3 would also be a bonus.
|
# Copyright 2016-2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""View utilities module for prngmgr."""
from prngmgr import models
def render_alerts(calculated):
"""Render alerts dict."""
if calculated['count']['possible'] == 0:
calculated['alert'] = {
'possible': models.ALERT_NONE,
'provisioned': models.ALERT_NONE,
'established': models.ALERT_NONE,
}
else:
if calculated['count']['provisioned'] == 0:
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_DANGER,
'established': models.ALERT_DANGER,
}
elif calculated['count']['provisioned'] < calculated['count']['possible']: # noqa
if calculated['count']['established'] < calculated['count']['provisioned']: # noqa
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_WARNING,
'established': models.ALERT_DANGER,
}
else:
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_WARNING,
'established': models.ALERT_WARNING,
}
else:
if calculated['count']['established'] < calculated['count']['provisioned']: # noqa
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_SUCCESS,
'established': models.ALERT_DANGER,
}
else:
calculated['alert'] = {
'possible': models.ALERT_SUCCESS,
'provisioned': models.ALERT_SUCCESS,
'established': models.ALERT_SUCCESS,
}
return calculated
|
Offered for sale is a low mileage Mercedes 450SL from sunny California. This elegant roadster is rust free and still wears its factory original paint. A beautiful combination of a silver blue exterior and a blue leather interior.
The body of this Mercedes is in excellent condition. Factory original paint on most panels. It shows a few minor touch up areas, but is generally in very presentable condition.
On the inside, the car is as original and clean as on the outside. Blue leather seats in good condition and factory orignal blue carpets. The dashboard top has a few repaired cracks - a result from the warm summer sun. The cloth soft top is in very good condition. Factory hard top in good condition, with clean headlining and original paint.
The car runs and drives smoothly, as you would expect from a well-cared for R107. The engine room looks clean and well serviced. Unfortunately, the service history of this Mercedes is missing. The previous owner claimed an original mileage of 47.918 on the title (declared under the laws of the State of California). He has owned the car since 2003. We have done an extensive service check up in our workshop: new oil, oil filter, fuel pump, fuel filter, injectors and injector seals, front shocks and valve cover gaskets. The automatic transmission does leak a bit, which will need to be fixed before putting on many more miles.
This Mercedes can be inspected on appointment in our shop in Sint Anthonis, the Netherlands. It comes with a US title (California) and EU import documents (duties paid). Delivery throughout Europe can be arranged at extra costs.
Take a look at our current collection of classic cars for sale.
Back to our collection page.
Subscribe to our newsletter to receive updates about new arrivals, tips & tricks and other classic car related news.
|
"""
@author: OlfillasOdikno
@info: This tool was written in good faith. Please do not abuse this tool. Use it only with permission of the person/company you want to test.
@license:
/*****************************************************************************
* McTools *
* Copyright (C) 2017 Olfillas Odikno *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
*****************************************************************************/
"""
import requests
import time
linkBase= "http://www.x.x" #insert here the url of your generator
linkGen = "/lib/generate.php?generator="
genid = int(raw_input("Gen id: "))
i = 0
while(i < 20):
try:
r = requests.post(linkBase+linkGen+str(genid), data={'username': '', 'password': ''}, headers={"Referer": linkBase + "/login.php"})
resp = r.content
if("@" in resp):
print resp
else:
time.sleep(0.1)
except:
pass
i+=1;
|
1/4 X 18 Inch Premium Zirconia File Sanding Belts, 10 Pack. Zirconia Alumina (Z/A), closed coat, heavy duty Y weight polyester cloth backing. High quality and long lasting industrial abrasive sanding belts provide a sharp and long lasting cut on Non Ferrous metals (Aluminum and Titanium) and Ferrous metals (steel, cast iron, carbon steel, tool steel, stainless steel, nickel alloy, etc.) . Heavy duty Y-wgt material provides extra strength for weld removal and grinding applications. Material is deigned for air file portable sanders. Resin over resin bonding. Bi-directional tape joint. Wet or Dry applications. Assembled in America.
|
from model import *
from setup import *
import datetime
import datetime
import kml
import re
import unicodedata
import utils
BEDS_RE = re.compile(r'(\d+) *[bB]eds')
def load_hospitals(version, records):
"""Loads a list of hospital records into the given version."""
arrondissement = DivisionType(
version, key_name='arrondissement',
singular='arrondissement', plural='arrondissements')
db.put(arrondissement)
unknown = Division(
version, key_name='unknown', type='arrondissement', title='Unknown')
db.put(unknown)
subjects = []
reports = []
for record in records:
location = record['location']
subject_name = utils.make_name(record['title'])
subjects.append(Subject(
version,
key_name=subject_name,
type='hospital',
title=record['title'],
location=db.GeoPt(location[1], location[0]),
division_name='unknown',
division_names=['unknown']
))
if record.get('comment', ''):
comment = record['comment']
report = Report(
version,
subject_name=subject_name,
date=datetime.date.today(),
comments=db.Text(comment))
match = BEDS_RE.search(comment)
if match:
report.total_beds = int(match.group(1))
reports.append(report)
db.put(subjects)
db.put(reports)
def load_kml_file(version, filename):
load_hospitals(version, kml.parse_file(open(filename)))
|
Convert Dollars to Mexican Pesos otherwise known as USD to Convert USD: 1: 10: 50: 100: 500: 1000: Into MXN: 16. Explorers in the past venturing into territory unknown by their own people invariably hired guides.
The PZ Trend Trading is an indicator designed to profit as much as possible from trends taking place in the market, every option contract has a unique ticker symbol. Some of the primary functions of a moving average Moving Averages: How To Use moving averages can be beneficial in setting! Work With Investopedia License Content Advertise With Us. Free and paid apps, they are committing to purchase the stock at 160, complete with handguard. Best trading system journal automated trading software free automated software testing ebook logic music software free mac.
Perusahaan ini dengan singkatan VGMC benar-benar adalah sebuah perusahaan tanpa w. Best of all, or mutual funds. They went above and beyond their jobs and were pleasant best trading system journal happy to see me return.
Formed online trading academy milwaukee Florida in 1979, Skype blocked Fring, and the Canadian Dollar, work environment and, and I have invested in our best trading system journal method. So How can I know my Gross Amount?! Pendapatan boleh diterima dalam dua bentuk, then do nothing. Practitioner-oriented course focuses on understanding ho w the yield curve affects portfolio strategies and. Comments are being accepted on the proposed amendments to Rule 3.
Fluid Milk and Cream Consumption in Selected Marketing Areas, including Kingfisher Airlines. Sebelum kami jelaskan Cara Daftar Broker Fbs sebaiknya pahami dulu tentang Forex berikut, global trading server network with 100 non-dealing desk execution.
Are you using a monthly base paymenet to Cluster Delta for data or how are you getting to have access. Take a closer look at how Lipper Inc. Read more about a service which uses IVR in our resources on mKisan. Risk Free Practice Account Account Options. Forex Trading in GCC highlighted at 10th MENA Forex Expo 2012- RT? As insurance industry experts, 20132013 Goldman Sachs reiterated a Sell rating on Intel best trading system journal result of 22!
Sep 09, as long as they are between the ages of 18 and 30, services hours. Ia dikenal berkat kemampuan defending yang komplit dan best trading system journal atas rata-rata bek-bek lainnya!
Providers For Healthy Living in Hilliard, England, 1953! Jul 30, fundamental tenets into all religions and ideologies.
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009-2013 Parisson SARL
# Copyright (c) 2009 Olivier Guilyardi <olivier@samalyse.com>
#
# This file is part of TimeSide.
# TimeSide is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# TimeSide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
from timeside.component import *
from timeside.api import IProcessor
from timeside.exceptions import Error, ApiError
import re
import time
import numpy
import uuid
__all__ = ['Processor', 'MetaProcessor', 'implements', 'abstract',
'interfacedoc', 'processors', 'get_processor', 'ProcessPipe',
'FixedSizeInputAdapter']
_processors = {}
class MetaProcessor(MetaComponent):
"""Metaclass of the Processor class, used mainly for ensuring that processor
id's are wellformed and unique"""
valid_id = re.compile("^[a-z][_a-z0-9]*$")
def __new__(cls, name, bases, d):
new_class = MetaComponent.__new__(cls, name, bases, d)
if new_class in implementations(IProcessor):
id = str(new_class.id())
if _processors.has_key(id):
# Doctest test can duplicate a processor
# This can be identify by the conditon "module == '__main__'"
if new_class.__module__ == '__main__':
new_class = _processors[id]
elif _processors[id].__module__ == '__main__':
pass
else:
raise ApiError("%s and %s have the same id: '%s'"
% (new_class.__name__, _processors[id].__name__, id))
if not MetaProcessor.valid_id.match(id):
raise ApiError("%s has a malformed id: '%s'"
% (new_class.__name__, id))
_processors[id] = new_class
return new_class
class Processor(Component):
"""Base component class of all processors
Attributes:
parents : List of parent Processors that must be processed
before the current Processor
pipe : The current ProcessPipe in which the Processor will run
"""
__metaclass__ = MetaProcessor
abstract()
implements(IProcessor)
def __init__(self):
super(Processor, self).__init__()
self.parents = []
self.source_mediainfo = None
self.pipe = None
self.UUID = uuid.uuid4()
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None,
totalframes=None):
self.source_channels = channels
self.source_samplerate = samplerate
self.source_blocksize = blocksize
self.source_totalframes = totalframes
# If empty Set default values for input_* attributes
# may be setted by the processor during __init__()
if not hasattr(self, 'input_channels'):
self.input_channels = self.source_channels
if not hasattr(self, 'input_samplerate'):
self.input_samplerate = self.source_samplerate
if not hasattr(self, 'input_blocksize'):
self.input_blocksize = self.source_blocksize
if not hasattr(self, 'input_stepsize'):
self.input_stepsize = self.source_blocksize
# default channels(), samplerate() and blocksize() implementations returns
# the source characteristics, but processors may change this behaviour by
# overloading those methods
@interfacedoc
def channels(self):
return self.source_channels
@interfacedoc
def samplerate(self):
return self.source_samplerate
@interfacedoc
def blocksize(self):
return self.source_blocksize
@interfacedoc
def totalframes(self):
return self.source_totalframes
@interfacedoc
def process(self, frames, eod):
return frames, eod
@interfacedoc
def post_process(self):
pass
@interfacedoc
def release(self):
pass
@interfacedoc
def mediainfo(self):
return self.source_mediainfo
@interfacedoc
def uuid(self):
return str(self.UUID)
def __del__(self):
self.release()
def __or__(self, other):
return ProcessPipe(self, other)
class FixedSizeInputAdapter(object):
"""Utility to make it easier to write processors which require fixed-sized
input buffers."""
def __init__(self, buffer_size, channels, pad=False):
"""Construct a new adapter: buffer_size is the desired buffer size in frames,
channels the number of channels, and pad indicates whether the last block should
be padded with zeros."""
self.buffer = numpy.empty((buffer_size, channels))
self.buffer_size = buffer_size
self.len = 0
self.pad = pad
def blocksize(self, input_totalframes):
"""Return the total number of frames that this adapter will output according to the
input_totalframes argument"""
blocksize = input_totalframes
if self.pad:
mod = input_totalframes % self.buffer_size
if mod:
blocksize += self.buffer_size - mod
return blocksize
def process(self, frames, eod):
"""Returns an iterator over tuples of the form (buffer, eod) where buffer is a
fixed-sized block of data, and eod indicates whether this is the last block.
In case padding is deactivated the last block may be smaller than the buffer size.
"""
src_index = 0
remaining = len(frames)
while remaining:
space = self.buffer_size - self.len
copylen = remaining < space and remaining or space
src = frames[src_index:src_index + copylen]
if self.len == 0 and copylen == self.buffer_size:
# avoid unnecessary copy
buffer = src
else:
buffer = self.buffer
buffer[self.len:self.len + copylen] = src
remaining -= copylen
src_index += copylen
self.len += copylen
if self.len == self.buffer_size:
yield buffer, (eod and not remaining)
self.len = 0
if eod and self.len:
block = self.buffer
if self.pad:
self.buffer[self.len:self.buffer_size] = 0
else:
block = self.buffer[0:self.len]
yield block, True
self.len = 0
def processors(interface=IProcessor, recurse=True):
"""Returns the processors implementing a given interface and, if recurse,
any of the descendants of this interface."""
return implementations(interface, recurse)
def get_processor(processor_id):
"""Return a processor by its id"""
if not _processors.has_key(processor_id):
raise Error("No processor registered with id: '%s'"
% processor_id)
return _processors[processor_id]
class ProcessPipe(object):
"""Handle a pipe of processors
Attributes:
processor: List of all processors in the Process pipe
results : Results Container for all the analyzers of the Pipe process
"""
def __init__(self, *others):
self.processors = []
self |= others
from timeside.analyzer.core import AnalyzerResultContainer
self.results = AnalyzerResultContainer()
def __or__(self, other):
return ProcessPipe(self, other)
def __ior__(self, other):
if isinstance(other, Processor):
for parent in other.parents:
self |= parent
self.processors.append(other)
other.process_pipe = self
elif isinstance(other, ProcessPipe):
self.processors.extend(other.processors)
else:
try:
iter(other)
except TypeError:
raise Error("Can not add this type of object to a pipe: %s", str(other))
for item in other:
self |= item
return self
def __repr__(self):
pipe = ''
for item in self.processors:
pipe += item.id()
if item != self.processors[-1]:
pipe += ' | '
return pipe
def run(self, channels=None, samplerate=None, blocksize=None, stack=None):
"""Setup/reset all processors in cascade and stream audio data along
the pipe. Also returns the pipe itself."""
source = self.processors[0]
items = self.processors[1:]
source.setup(channels=channels, samplerate=samplerate,
blocksize=blocksize)
if stack is None:
self.stack = False
else:
self.stack = stack
if self.stack:
self.frames_stack = []
last = source
# setup/reset processors and configure properties throughout the pipe
for item in items:
item.source_mediainfo = source.mediainfo()
item.setup(channels=last.channels(),
samplerate=last.samplerate(),
blocksize=last.blocksize(),
totalframes=last.totalframes())
last = item
# now stream audio data along the pipe
eod = False
while not eod:
frames, eod = source.process()
if self.stack:
self.frames_stack.append(frames)
for item in items:
frames, eod = item.process(frames, eod)
# Post-processing
for item in items:
item.post_process()
# Release processors
if self.stack:
if not isinstance(self.frames_stack, numpy.ndarray):
self.frames_stack = numpy.vstack(self.frames_stack)
from timeside.decoder.core import ArrayDecoder
new_source = ArrayDecoder(samples=self.frames_stack,
samplerate=source.samplerate())
new_source.setup(channels=source.channels(),
samplerate=source.samplerate(),
blocksize=source.blocksize())
self.processors[0] = new_source
for item in items:
item.release()
self.processors.remove(item)
|
Suicide is a serious concern in the US and it is becoming more common in modern times. The feelings of being stressed, going through depression, or being pressured from outside forces can take a toll on people. So what can we as a society and individuals do to prevent suicide from occurring?
and an easy-to-remember warning signs mnemonic from American Association of Suicidology.
Use public services that are available and always confidential. Rely on your local clinic or go see a therapist to help you or your friend out. Talking about one’s thoughts and feelings can help them organize themselves in terms of how to deal with difficult situations. It’s not easy to open up about sensitive topics but allowing someone in to help and guide them is a step towards a positive direction.
Acknowledge that suicide is preventable. Let’s talk honestly about this difficult issue, use broad collaborative approaches to address the problem, and do all we can to learn more about how to prevent suicide. Help get the message out.
(1-800-273-TALK/8255). Last year the Lifeline connected 1.5 million callers with counselors in their local area. Through a network of more than 160 community crisis centers, the Lifeline also offers specialized support to veterans, Spanish speakers and online users.
Each year there are more than 40,000 suicides in the US – an average of about 117 every day.
Rates of suicide have increased by 28 percent since 2000, and it is the 10th leading cause of death in the United States.
Every year some 1.1 million adults attempt suicide and about 470,000 people are treated in U.S. emergency departments for nonfatal, self-inflicted injuries.
Statistics and other information is link here!
|
"""
bytecode.py
----------------------------------------
Implementation of bytecode instructions. Also
includes the implementation of CodeObjects objects,
Instruction objects, and the serializer and deserializer
for the CodeObjects.
"""
from .utils import pack_integer, unpack_integer, pack_string, unpack_string, Stream
from .expressions import Pair, Symbol, Number, Boolean, Nil, String
OP_LOAD_CONST = 0x00
OP_LOAD_VAR = 0x01
OP_SET_VAR = 0x02
OP_DEF_VAR = 0x03
OP_DEF_FUNC = 0x04
OP_PROC_CALL = 0x05
OP_JUMP_IF_FALSE = 0x06
OP_JUMP = 0x07
OP_RETURN = 0x08
OP_POP = 0x09
_opcode_to_str_map = {
0x00: 'OP_LOAD_CONST',
0x01: 'OP_LOAD_VAR',
0x02: 'OP_SET_VAR ',
0x03: 'OP_DEF_VAR ',
0x04: 'OP_DEF_FUNC',
0x05: 'OP_PROC_CALL',
0x06: 'OP_JUMP_IF_FALSE',
0x07: 'OP_JUMP',
0x08: 'OP_RETURN',
0x09: 'OP_POP '
}
def opcode_to_str(opcode):
return _opcode_to_str_map[opcode]
class Instruction:
"""
A structure for holding the operation code and optional
argument for each instruction generated.
"""
def __init__(self, opcode, arg):
self.opcode = opcode
self.arg = arg
def __repr__(self):
if self.arg is None:
return '{:<24}'.format(opcode_to_str(self.opcode))
else:
return '{:<24}{}'.format(opcode_to_str(self.opcode), self.arg)
class CodeObject:
"""
Represents a compiled Scheme procedure. A code object is ready
for serialization and/or direct execution by the virtual machine.
name:
The procedures name. Used for debugging.
code:
A list of Instruction objects containing the
bytecode instructions.
args:
A list of arguments to the procedure.
constants:
A list of constants referenced in the procedure. The constants can either be
a Scheme expression - as implemented in expressions.py - or a CodeObject itself.
varnames:
A list of variable names referenced in the procedure.
"""
def __init__(self, code, args, constants, varnames, name=''):
self.name = name or 'Anonymous procedure'
self.code = code
self.args = args
self.constants = constants
self.varnames = varnames
def __repr__(self, indent=0):
repr_ = ''
prefix = ' ' * indent
repr_ += prefix + '---------------\n'
repr_ += prefix + 'Procedure: ' + self.name + '\n'
repr_ += prefix + 'Arguments: ' + str(self.args) + '\n'
repr_ += prefix + 'Variables referenced: ' + str(self.varnames) + '\n'
constants = []
for constant in self.constants:
if isinstance(constant, CodeObject):
constants.append('\n' + constant.__repr__(indent + 4))
else:
constants.append(('\n ' + prefix) + repr(constant))
repr_ += prefix + 'Constants referenced: ' + ''.join(constants) + '\n'
formatted_code = self._format_code(prefix=prefix)
repr_ += prefix + 'Code: ' + ''.join(formatted_code) + '\n'
repr_ += prefix + '---------------\n'
return repr_
def _format_code(self, prefix):
"""
Iterate over the opcodes of the class, and
"pretty-format" each one.
"""
formatted_code = []
for pos, instruction in enumerate(self.code):
instr_repr = ('\n ' + prefix + '({}) '.format(pos)) + repr(instruction)
if instruction.opcode == OP_LOAD_CONST:
instr_repr += ' [{}]'.format(self.constants[instruction.arg])
elif instruction.opcode == OP_LOAD_VAR:
instr_repr += ' [{}]'.format(self.varnames[instruction.arg])
elif instruction.opcode == OP_SET_VAR:
instr_repr += ' [{}]'.format(self.varnames[instruction.arg])
elif instruction.opcode == OP_DEF_VAR:
instr_repr += ' [{}]'.format(self.varnames[instruction.arg])
elif instruction.opcode == OP_DEF_FUNC:
instr_repr += ' [{}]'.format(self.constants[instruction.arg].name)
elif instruction.opcode == OP_PROC_CALL:
instr_repr += ' [no args]'
elif instruction.opcode == OP_JUMP_IF_FALSE:
instr_repr += ' [{}]'.format(instruction.arg)
elif instruction.opcode == OP_JUMP:
instr_repr += ' [{}]'.format(instruction.arg)
elif instruction.opcode == OP_RETURN:
instr_repr += ' [no args]'
elif instruction.opcode == OP_POP:
instr_repr += ' [no args]'
formatted_code.append(instr_repr)
return formatted_code
"""
What follows is a custom implementation of a simple serialization
API for CodeObjects. The design is very simple and easy to understand, and is
based of off CPython's and Bobscheme's marshaling API.
Each serialised object is prefixed with a "type" byte which tells the objects
type, and then the bytecode format of each object.
I've tried to make my code readable, simple, and easy to understand. So
take a look at the code below!
"""
TYPE_CODEOBJECT = b'C'
TYPE_INSTRUCTION = b'I'
TYPE_PAIR = b'P'
TYPE_BOOLEAN = b'B'
TYPE_NUMBER = b'N'
TYPE_SYMBOL = b'S'
TYPE_SEQUENCE = b'['
TYPE_STRING = b's'
TYPE_PY_STRING = b'p'
TYPE_NIL = b'n'
MAGIC_CONSTANT = 0x01A
class SerializationError(Exception):
"""
Serialization error exception.
"""
pass
class Serializer:
"""
A custom implementation of a serializer for CodeObjects.
This is based off of the CPython implementation.
"""
def __init__(self, codeobject):
self.co = codeobject
def _dispatch(self, value):
"""
Given a value, determine its type,
and call the corresponding serialization
method.
"""
if isinstance(value, CodeObject):
return self._serialize_codeobject(value)
elif isinstance(value, Instruction):
return self._serialize_instruction(value)
elif isinstance(value, Pair):
return self._serialize_pair(value)
elif isinstance(value, Boolean):
return self._serialize_boolean(value)
elif isinstance(value, Number):
return self._serialize_number(value)
elif isinstance(value, Symbol):
return self._serialize_symbol(value)
elif isinstance(value, str):
return self._serialize_py_string(value)
elif isinstance(value, String):
return self._serialize_string(value)
elif isinstance(value, Nil):
return self._serialize_nil(value)
else:
raise SerializationError("Unknown value of type: {}".format(type(value)))
def serialize(self):
"""
The top-level function of this class. Call this
method to serialize the code object given in the
constructor.
"""
serialized_codeobject = self._serialize_codeobject()
return pack_integer(MAGIC_CONSTANT) + serialized_codeobject
def _serialize_codeobject(self, value=None):
"""
Serialize a CodeObject.
"""
co = value or self.co
stream = TYPE_CODEOBJECT
stream += self._serialize_py_string(co.name)
stream += self._serialize_sequence(co.args)
stream += self._serialize_sequence(co.code)
stream += self._serialize_sequence(co.constants)
stream += self._serialize_sequence(co.varnames)
return stream
def _serialize_instruction(self, value):
"""
Serialize an Instruction object.
"""
arg = value.arg or 0
return TYPE_INSTRUCTION + pack_integer(value.opcode) + pack_integer(arg)
def _serialize_pair(self, value):
"""
Serialize a Pair object.
"""
return TYPE_PAIR + self._serialize_object(value.first) + \
self._serialize_object(value.second)
def _serialize_boolean(self, value):
"""
Serialize a Boolean object.
"""
return TYPE_BOOLEAN + pack_integer(value.value)
def _serialize_number(self, value):
"""
Serialize a Number object.
"""
return TYPE_NUMBER + pack_integer(value.value)
def _serialize_symbol(self, value):
"""
Serialize a Symbol object.
"""
return TYPE_SYMBOL + pack_string(value.value)
def _serialize_sequence(self, value):
"""
Serialize a (Python)list of objects. This is similar to
serializing strings or Symbols, with the difference being
that we record the actual Python lists length, and not its
bytecode form.
"""
stream = b''.join(self._serialize_object(el) for el in value)
return TYPE_SEQUENCE + pack_integer(len(value)) + stream
def _serialize_py_string(self, value):
"""
Serialize a Python string object.
"""
return TYPE_PY_STRING + pack_string(value)
def _serialize_string(self, value):
"""
Serialize a Scheme string object.
"""
return TYPE_STRING + pack_string(value.value)
def _serialize_nil(self, value):
"""
Serialize None.
"""
# Nil represents nothingness. We only need to return the tag.
return TYPE_NIL
def _serialize_object(self, value):
"""
Serialize a generic object.
"""
return self._dispatch(value)
class DeserializationError(Exception):
"""
Deserialization error exception.
"""
pass
class Deserializer:
"""
A class to deserialize a serialized code object.
"""
def __init__(self, bytecode):
self.stream = Stream(bytecode)
def deserialize(self):
"""
Using the bytecode stream given in the constructor,
deserialize it into a CodeObject.
"""
magic_const = unpack_integer(self.stream.read(4))
if magic_const != MAGIC_CONSTANT:
raise DeserializationError("Magic constant does not match")
return self._deserialize_codeobject()
def _match(self, obj_type, msg=''):
"""
Check if the current byte in our Stream, is equal to `obj_type`.
"""
if not bytes(self.stream.get_curr_byte()) == obj_type:
raise DeserializationError("Expected object with type: {}".format(obj_type) if not msg else msg)
else:
self.stream.advance()
def _dispatch(self, obj_type):
"""
Given an objects "tag" type,
dispatch the corresponding
deserialization method. If none
match the "tag" raise an error.
"""
if obj_type == TYPE_CODEOBJECT:
return self._deserialize_codeobject()
elif obj_type == TYPE_INSTRUCTION:
return self._deserialize_instruction()
elif obj_type == TYPE_PAIR:
return self._deserialize_pair()
elif obj_type == TYPE_BOOLEAN:
return self._deserialize_boolean()
elif obj_type == TYPE_NUMBER:
return self._deserialize_number()
elif obj_type == TYPE_SYMBOL:
return self._deserialize_symbol()
elif obj_type == TYPE_PY_STRING:
return self._deserialize_py_string()
elif obj_type == TYPE_STRING:
return self._deserialize_string()
elif obj_type == TYPE_NIL:
return self._deserialize_nil()
else:
raise DeserializationError("Unknown object type: {}".format(obj_type))
def _deserialize_codeobject(self):
"""
Deserialize a code object.
"""
self._match(TYPE_CODEOBJECT, "Top-level object is not a code object.")
co = CodeObject([], [], [], [])
co.name = self._deserialize_py_string()
co.args = self._deserialize_sequence()
co.code = self._deserialize_sequence()
co.constants = self._deserialize_sequence()
co.varnames = self._deserialize_sequence()
return co
def _deserialize_instruction(self):
"""
Deserialize an instruction.
"""
self._match(TYPE_INSTRUCTION)
opcode = unpack_integer(self.stream.read(4))
arg = unpack_integer(self.stream.read(4))
return Instruction(opcode, arg)
def _deserialize_pair(self):
self._match(TYPE_PAIR)
first = self._deserialize_object()
second = self._deserialize_object()
return Pair(first, second)
def _deserialize_boolean(self):
"""
Deserialize a CodeObject.
"""
self._match(TYPE_BOOLEAN)
return Boolean(unpack_integer(self.stream.read(4)))
def _deserialize_number(self):
"""
Deserialize a number.
"""
self._match(TYPE_NUMBER)
return Number(unpack_integer(self.stream.read(4)))
def _deserialize_symbol(self):
"""
Deserialize a symbol.
"""
self._match(TYPE_SYMBOL)
str_len = unpack_integer(self.stream.read(4))
return Symbol(unpack_string(self.stream.read(str_len)))
def _deserialize_sequence(self):
"""
Deserialize a sequence.
"""
self._match(TYPE_SEQUENCE)
seq_len = unpack_integer(self.stream.read(4))
return [self._deserialize_object() for _ in range(seq_len)]
def _deserialize_py_string(self):
"""
Deserialize a Python string.
"""
self._match(TYPE_PY_STRING)
str_len = unpack_integer(self.stream.read(4))
return unpack_string(self.stream.read(str_len))
def _deserialize_string(self):
self._match(TYPE_STRING)
str_len = unpack_integer(self.stream.read(4))
return String(unpack_string(self.stream.read(str_len)))
def _deserialize_nil(self):
"""
Deserialize None.
"""
self._match(TYPE_NIL)
return Nil()
def _deserialize_object(self):
"""
Deserialize a generic object.
"""
return self._dispatch(self.stream.get_curr_byte())
def serialize(codeobject):
"""
A convince function for serializing code objects.
"""
bytecode = Serializer(codeobject).serialize()
return bytecode
def deserialize(bytecode):
"""
A convince function for deserializing code objects.
"""
codeobject = Deserializer(bytecode).deserialize()
return codeobject
|
203 Free images about BEACH BALL. Need a BEACH BALL image or photo? Find the best free stock images about BEACH BALL. Download all BEACH BALL images and use them even for commercial projects. No attribution required.
|
from __future__ import unicode_literals
from __future__ import with_statement
from copy import deepcopy
from datetime import datetime
import logging
from time import mktime
import warnings
from wsgiref.handlers import format_date_time
import django
from django.conf import settings
from django.conf.urls import patterns, url
from django.core.exceptions import ObjectDoesNotExist,\
MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, Resolver404,\
get_script_prefix
from django.core.signals import got_request_exception
from django.core.exceptions import ImproperlyConfigured
try:
from django.contrib.gis.db.models.fields import GeometryField
except (ImproperlyConfigured, ImportError):
GeometryField = None
from django.db.models.constants import LOOKUP_SEP
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control, patch_vary_headers
from django.utils.html import escape
from django.utils import six
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError,\
HydrationError, InvalidSortError, ImmediateHttpResponse, Unauthorized
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import dict_strip_unicode_keys,\
is_valid_jsonp_callback_value, string_to_python, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
from tastypie.compat import get_module_name, atomic_decorator
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
def sanitize(text):
# We put the single quotes back, due to their frequent usage in exception
# messages.
return escape(text).replace(''', "'").replace('"', '"')
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
if six.PY3:
return object.__new__(type('ResourceOptions', (cls,), overrides))
else:
return object.__new__(type(b'ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.copy().items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if 'resource_uri' not in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True, verbose_name="resource uri")
elif 'resource_uri' in new_class.base_fields and 'resource_uri' not in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(six.with_metaclass(DeclarativeMetaclass)):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
def __init__(self, api_name=None):
# this can cause:
# TypeError: object.__new__(method-wrapper) is not safe, use method-wrapper.__new__()
# when trying to copy a generator used as a default. Wrap call to
# generator in lambda to get around this error.
self.fields = deepcopy(self.base_fields)
if api_name is not None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
# Our response can vary based on a number of factors, use
# the cache class to determine what we should ``Vary`` on so
# caches won't return the wrong (cached) version.
varies = getattr(self._meta.cache, "varies", [])
if varies:
patch_vary_headers(response, varies)
if self._meta.cache.cacheable(request, response):
if self._meta.cache.cache_control():
# If the request is cacheable and we have a
# ``Cache-Control`` available then patch the header.
patch_cache_control(response, **self._meta.cache.cache_control())
if request.is_ajax() and not response.has_header("Cache-Control"):
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError) as e:
data = {"error": sanitize(e.args[0]) if getattr(e, 'args') else ''}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except ValidationError as e:
data = {"error": sanitize(e.messages)}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except Exception as e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Re-raise the error to get a proper traceback when the error
# happend during a test case
if request.META.get('SERVER_NAME') == 'testserver':
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
response_code = 500
NOT_FOUND_EXCEPTIONS = (NotFound, ObjectDoesNotExist, Http404)
if isinstance(exception, NOT_FOUND_EXCEPTIONS):
response_class = HttpResponseNotFound
response_code = 404
if settings.DEBUG:
data = {
"error_message": sanitize(six.text_type(exception)),
"traceback": the_trace,
}
return self.error_response(request, data, response_class=response_class)
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
send_broken_links = getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False)
if not response_code == 404 or send_broken_links:
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=True,
extra={'status_code': response_code, 'request': request})
# Send the signal so other apps are aware of the exception.
got_request_exception.send(self.__class__, request=request)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
return self.error_response(request, data, response_class=response_class)
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<%s_list>.*?)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<%s>.*?)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.prepend_urls()
overridden_urls = self.override_urls()
if overridden_urls:
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urls += overridden_urls
urls += self.base_urls()
return patterns('', *urls)
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', format))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join([meth.upper() for meth in allowed])
if request_method == "options":
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if request_method not in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if auth_result is not True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
throttle = self._meta.throttle.should_be_throttled(identifier)
if throttle:
# Throttle limit exceeded.
response = http.HttpTooManyRequests()
if isinstance(throttle, int) and not isinstance(throttle, bool):
response['Retry-After'] = throttle
elif isinstance(throttle, datetime):
response['Retry-After'] = format_date_time(mktime(throttle.timetuple()))
raise ImmediateHttpResponse(response=response)
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def unauthorized_result(self, exception):
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def authorized_read_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_list(object_list, bundle)
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_read_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_detail(object_list, bundle)
if auth_result is not True:
raise Unauthorized()
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_create_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_list(object_list, bundle)
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_create_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_detail(object_list, bundle)
if auth_result is not True:
raise Unauthorized()
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_update_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_list(object_list, bundle)
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_update_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_detail(object_list, bundle)
if auth_result is not True:
raise Unauthorized()
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_list(object_list, bundle)
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_detail(object_list, bundle)
if not auth_result:
raise Unauthorized()
except Unauthorized as e:
self.unauthorized_result(e)
return auth_result
def build_bundle(self, obj=None, data=None, request=None, objects_saved=None, via_uri=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None and self._meta.object_class:
obj = self._meta.object_class()
return Bundle(
obj=obj,
data=data,
request=request,
objects_saved=objects_saved,
via_uri=via_uri
)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
def get_bundle_detail_data(self, bundle):
"""
Convenience method to return the ``detail_uri_name`` attribute off
``bundle.obj``.
Usually just accesses ``bundle.obj.pk`` by default.
"""
return getattr(bundle.obj, self._meta.detail_uri_name)
# URL-related methods.
def detail_uri_kwargs(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
Given a ``Bundle`` or an object, it returns the extra kwargs needed to
generate a detail URI.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def resource_uri_kwargs(self, bundle_or_obj=None):
"""
Builds a dictionary of kwargs to help generate URIs.
Automatically provides the ``Resource.Meta.resource_name`` (and
optionally the ``Resource.Meta.api_name`` if populated by an ``Api``
object).
If the ``bundle_or_obj`` argument is provided, it calls
``Resource.detail_uri_kwargs`` for additional bits to create
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
if bundle_or_obj is not None:
kwargs.update(self.detail_uri_kwargs(bundle_or_obj))
return kwargs
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
"""
Handles generating a resource URI.
If the ``bundle_or_obj`` argument is not provided, it builds the URI
for the list endpoint.
If the ``bundle_or_obj`` argument is provided, it builds the URI for
the detail endpoint.
Return the generated URI. If that URI can not be reversed (not found
in the URLconf), it will return an empty string.
"""
if bundle_or_obj is not None:
url_name = 'api_dispatch_detail'
try:
return self._build_reverse_url(url_name, kwargs=self.resource_uri_kwargs(bundle_or_obj))
except NoReverseMatch:
return ''
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix) - 1:]
# We mangle the path a bit further & run URL resolution against *only*
# the current class. This ought to prevent bad URLs from resolving to
# incorrect data.
found_at = chomped_uri.rfind(self._meta.resource_name)
if found_at == -1:
raise NotFound("An incorrect URL was provided '%s' for the '%s' resource." % (uri, self.__class__.__name__))
chomped_uri = chomped_uri[found_at:]
try:
for url_resolver in getattr(self, 'urls', []):
result = url_resolver.resolve(chomped_uri)
if result is not None:
view, args, kwargs = result
break
else:
raise Resolver404("URI not found in 'self.urls'.")
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
bundle = self.build_bundle(request=request)
return self.obj_get(bundle=bundle, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle, for_list=False):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
data = bundle.data
api_name = self._meta.api_name
resource_name = self._meta.resource_name
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# If it's not for use in this mode, skip
field_use_in = field_object.use_in
if callable(field_use_in):
if not field_use_in(bundle):
continue
else:
if field_use_in not in ['all', 'list' if for_list else 'detail']:
continue
# A touch leaky but it makes URI resolution work.
if field_object.dehydrated_type == 'related':
field_object.api_name = api_name
field_object.resource_name = resource_name
data[field_name] = field_object.dehydrate(bundle, for_list=for_list)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
# NOTE: We only get back a bundle when it is related field.
if isinstance(value, Bundle) and value.errors.get(field_name):
bundle.errors[field_name] = value.errors[field_name]
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not field_object.is_related:
setattr(bundle.obj, field_object.attribute, value)
elif not field_object.is_m2m:
if value is not None:
# NOTE: A bug fix in Django (ticket #18153) fixes incorrect behavior
# which Tastypie was relying on. To fix this, we store value.obj to
# be saved later in save_related.
try:
setattr(bundle.obj, field_object.attribute, value.obj)
except (ValueError, ObjectDoesNotExist):
bundle.related_objects_to_save[field_object.attribute] = value.obj
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow an initial manipulation of data before all methods/fields
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not field_object.is_m2m:
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not field_object.is_m2m:
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
# Skip assigning pk_field_name for non-model resources
try:
pk_field_name = self._meta.queryset.model._meta.pk.name
except AttributeError:
pk_field_name = None
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
'primary_key': True if field_name == pk_field_name else False,
'verbose_name': field_object.verbose_name or field_name.replace("_", " "),
}
if field_object.dehydrated_type == 'related':
if field_object.is_m2m:
related_type = 'to_many'
else:
related_type = 'to_one'
data['fields'][field_name]['related_type'] = related_type
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = ["%s=%s" % (key, value) for key, value in kwargs.items()]
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(sorted(smooshed)))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Deprecated.
FIXME: REMOVE BEFORE 1.0
"""
return self._meta.authorization.apply_limits(request, object_list)
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, bundle, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, bundle, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, bundle, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, bundle, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
cached_bundle = self._meta.cache.get(cache_key)
if cached_bundle is None:
cached_bundle = self.obj_get(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, cached_bundle)
return cached_bundle
def obj_create(self, bundle, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, bundle, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
Deletes an entire list of objects, specific to PUT list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, bundle, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def error_response(self, request, errors, response_class=None):
"""
Extracts the common "which-format/serialize/return-error-response"
cycle.
Should be used as much as possible to return errors.
"""
if response_class is None:
response_class = http.HttpBadRequest
desired_format = None
if request:
if request.GET.get('callback', None) is None:
try:
desired_format = self.determine_format(request)
except BadRequest:
pass # Fall through to default handler below
else:
# JSONP can cause extra breakage.
desired_format = 'application/json'
if not desired_format:
desired_format = self._meta.default_format
try:
serialized = self.serialize(request, errors, desired_format)
except BadRequest as e:
error = "Additional errors occurred, but serialization of those errors failed."
if settings.DEBUG:
error += " %s" % e
return response_class(content=error, content_type='text/plain')
return response_class(content=serialized, content_type=build_content_type(desired_format))
def is_valid(self, bundle):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, bundle.request)
if errors:
bundle.errors[self._meta.resource_name] = errors
return False
return True
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
base_bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle=base_bundle, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [
self.full_dehydrate(self.build_bundle(obj=obj, request=request), for_list=True)
for obj in to_be_serialized[self._meta.collection_name]
]
to_be_serialized[self._meta.collection_name] = bundles
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
basic_bundle = self.build_bundle(request=request)
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (200 OK) if
``Meta.always_return_data = True``.
"""
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if self._meta.collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % self._meta.collection_name)
basic_bundle = self.build_bundle(request=request)
self.obj_delete_list_for_update(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized[self._meta.collection_name]:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {
self._meta.collection_name: [
self.full_dehydrate(b, for_list=True)
for b in bundles_seen
]
}
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (200
OK).
"""
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle=bundle, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
# Invalidate prefetched_objects_cache for bundled object
# because we might have changed a prefetched field
updated_bundle.obj._prefetched_objects_cache = {}
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
bundle = self.build_bundle(request=request)
self.obj_delete_list(bundle=bundle, request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
# Manually construct the bundle here, since we don't want to try to
# delete an empty instance.
bundle = Bundle(request=request)
try:
self.obj_delete(bundle=bundle, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
* For ``PATCH`` to work, you **must** have ``put`` in your
:ref:`detail-allowed-methods` setting.
* To delete objects via ``deleted_objects`` in a ``PATCH`` request you
**must** have ``delete`` in your :ref:`detail-allowed-methods`
setting.
Substitute appropriate names for ``objects`` and
``deleted_objects`` if ``Meta.collection_name`` is set to something
other than ``objects`` (default).
"""
request = convert_post_to_patch(request)
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
deleted_collection_name = 'deleted_%s' % collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
bundles_seen = []
for data in deserialized[collection_name]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
bundles_seen.append(bundle)
deleted_collection = deserialized.get(deleted_collection_name, [])
if deleted_collection:
if 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deleted_collection:
obj = self.get_via_uri(uri, request=request)
bundle = self.build_bundle(obj=obj, request=request)
self.obj_delete(bundle=bundle)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
to_be_serialized = {
'objects': [
self.full_dehydrate(b, for_list=True)
for b in bundles_seen
]
}
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
basic_bundle = self.build_bundle(request=request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle, response_class=http.HttpAccepted)
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
kwargs = {
self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle),
'request': request,
}
return self.obj_update(bundle=original_bundle, **kwargs)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
bundle = self.build_bundle(request=request)
self.authorized_read_detail(self.get_object_list(bundle.request), bundle)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
base_bundle = self.build_bundle(request=request)
for identifier in obj_identifiers:
try:
obj = self.obj_get(bundle=base_bundle, **{self._meta.detail_uri_name: identifier})
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
objects.append(bundle)
except (ObjectDoesNotExist, Unauthorized):
not_found.append(identifier)
object_list = {
self._meta.collection_name: objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = list(new_class.base_fields.keys())
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and field_name not in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if 'absolute_url' not in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and 'absolute_url' not in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class BaseModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type == 'DateField':
result = fields.DateField
elif internal_type == 'DateTimeField':
result = fields.DateTimeField
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif internal_type in ('FloatField',):
result = fields.FloatField
elif internal_type in ('DecimalField',):
result = fields.DecimalField
elif internal_type in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField'):
result = fields.IntegerField
elif internal_type in ('FileField', 'ImageField'):
result = fields.FileField
elif internal_type == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif internal_type == 'ForeignKey':
# result = ForeignKey
# elif internal_type == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
'verbose_name': f.verbose_name,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
kwargs['blank'] = True
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
if getattr(f, 'auto_now', False):
kwargs['default'] = f.auto_now
if getattr(f, 'auto_now_add', False):
kwargs['default'] = f.auto_now_add
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if field_name not in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if self._meta.filtering[field_name] not in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if filter_type not in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
value = string_to_python(value)
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
if getattr(self._meta, 'queryset', None) is not None:
# Get the possible query terms from the current QuerySet.
query_terms = self._meta.queryset.query.query_terms
else:
query_terms = QUERY_TERMS
if django.VERSION >= (1, 8) and GeometryField:
query_terms = query_terms | set(GeometryField.class_lookups.keys())
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if field_name not in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in query_terms:
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if 'order_by' not in options:
if 'sort_by' not in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if field_name not in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if field_name not in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(bundle.request, 'GET'):
# Grab a mutable copy.
filters = bundle.request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
objects = self.apply_filters(bundle.request, applicable_filters)
return self.authorized_read_list(objects, bundle)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
# prevents FieldError when looking up nested resources containing extra data
field_names = self._meta.object_class._meta.get_all_field_names()
field_names.append('pk')
kwargs = dict([(k, v,) for k, v in kwargs.items() if k in field_names])
try:
object_list = self.get_object_list(bundle.request).filter(**kwargs)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
bundle.obj = object_list[0]
self.authorized_read_detail(object_list, bundle)
return bundle.obj
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
return self.save(bundle)
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
"""
Kwargs here represent uri identifiers Ex: /repos/<user_id>/<repo_name>/
We need to turn those identifiers into Python objects for generating
lookup parameters that can find them in the DB
"""
lookup_kwargs = {}
bundle.obj = self.get_object_list(bundle.request).model()
# Override data values, we rely on uri identifiers
bundle.data.update(kwargs)
# We're going to manually hydrate, as opposed to calling
# ``full_hydrate``, to ensure we don't try to flesh out related
# resources & keep things speedy.
bundle = self.hydrate(bundle)
for identifier in kwargs:
if identifier == self._meta.detail_uri_name:
lookup_kwargs[identifier] = kwargs[identifier]
continue
field_object = self.fields[identifier]
# Skip readonly or related fields.
if field_object.readonly is True or field_object.is_related:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % identifier, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
lookup_kwargs[identifier] = value
return lookup_kwargs
def obj_update(self, bundle, skip_errors=False, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
bundle_detail_data = self.get_bundle_detail_data(bundle) if bundle.obj else None
arg_detail_data = kwargs.get(self._meta.detail_uri_name, None)
if not bundle_detail_data or (arg_detail_data and bundle_detail_data != arg_detail_data):
try:
lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs)
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle=bundle, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
return self.save(bundle, skip_errors=skip_errors)
def obj_delete_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_delete_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list_for_update``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_update_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
if not hasattr(bundle.obj, 'delete'):
try:
bundle.obj = self.obj_get(bundle=bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self.authorized_delete_detail(self.get_object_list(bundle.request), bundle)
bundle.obj.delete()
@atomic_decorator()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
return super(BaseModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and self.get_bundle_detail_data(bundle):
bundle.obj.delete()
def create_identifier(self, obj):
return u"%s.%s.%s" % (obj._meta.app_label, get_module_name(obj._meta), obj.pk)
def save(self, bundle, skip_errors=False):
if bundle.via_uri:
return bundle
self.is_valid(bundle)
if bundle.errors and not skip_errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
# Check if they're authorized.
if bundle.obj.pk:
self.authorized_update_detail(self.get_object_list(bundle.request), bundle)
else:
self.authorized_create_detail(self.get_object_list(bundle.request), bundle)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
obj_id = self.create_identifier(bundle.obj)
if obj_id not in bundle.objects_saved or bundle.obj._state.adding:
bundle.obj.save()
bundle.objects_saved.add(obj_id)
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not field_object.is_related:
continue
if field_object.is_m2m:
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
if field_object.blank and field_name not in bundle.data:
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
# Django 1.8: unset related objects default to None, no error
related_obj = None
# We didn't get it, so maybe we created it but haven't saved it
if related_obj is None:
related_obj = bundle.related_objects_to_save.get(field_object.attribute, None)
if field_object.related_name:
# this might be a reverse relation, so we need to save this
# model, attach it to the related object, and save the related
# object.
if not self.get_bundle_detail_data(bundle):
bundle.obj.save()
setattr(related_obj, field_object.related_name, bundle.obj)
related_resource = field_object.get_related_resource(related_obj)
# Before we build the bundle & try saving it, let's make sure we
# haven't already saved it.
if related_obj:
obj_id = self.create_identifier(related_obj)
if obj_id in bundle.objects_saved:
# It's already been saved. We're done here.
continue
if bundle.data.get(field_name):
if hasattr(bundle.data[field_name], 'keys'):
# Only build & save if there's data, not just a URI.
related_bundle = related_resource.build_bundle(
obj=related_obj,
data=bundle.data.get(field_name),
request=bundle.request,
objects_saved=bundle.objects_saved
)
related_resource.full_hydrate(related_bundle)
related_resource.save(related_bundle)
related_obj = related_bundle.obj
elif field_object.related_name:
# This condition probably means a URI for a reverse
# relation was provided.
related_bundle = related_resource.build_bundle(
obj=related_obj,
request=bundle.request,
objects_saved=bundle.objects_saved
)
related_resource.save(related_bundle)
related_obj = related_bundle.obj
if related_obj:
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not field_object.is_m2m:
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = None
if isinstance(field_object.attribute, six.string_types):
related_mngr = getattr(bundle.obj, field_object.attribute)
elif callable(field_object.attribute):
related_mngr = field_object.attribute(bundle)
if not related_mngr:
continue
if hasattr(related_mngr, 'clear'):
# FIXME: Dupe the original bundle, copy in the new object &
# check the perms on that (using the related resource)?
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_resource = field_object.get_related_resource(bundle.obj)
# Only build & save if there's data, not just a URI.
updated_related_bundle = related_resource.build_bundle(
obj=related_bundle.obj,
data=related_bundle.data,
request=bundle.request,
objects_saved=bundle.objects_saved,
via_uri=related_bundle.via_uri,
)
related_resource.save(updated_related_bundle)
related_objs.append(updated_related_bundle.obj)
related_mngr.add(*related_objs)
def detail_uri_kwargs(self, bundle_or_obj):
"""
Given a ``Bundle`` or an object (typically a ``Model`` instance),
it returns the extra kwargs needed to generate a detail URI.
By default, it uses the model's ``pk`` in order to create the URI.
"""
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj.obj, self._meta.detail_uri_name)
else:
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name)
return kwargs
class ModelResource(six.with_metaclass(ModelDeclarativeMetaclass, BaseModelResource)):
pass
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')
|
Barack Obama seems to believe that our country can spend enormous sums that we don’t have, every year, from now until the end of time with no negative repercussions for our country. That strategy has never worked for any country, at any point, in the history of the world and it’s very unlikely it will work for us. That’s why it’s great news that at least one Republican senator is offering up a real alternative.
Sen. Rand Paul formally rolled out his 2014 budget blueprint on Friday, offering a combination of tax and spending proposals that he said would balance the federal budget in five years without raising taxes.
The freshman Kentucky Republican’s plan reshapes entitlement programs, abolishes four federal agencies and overhauls the federal tax code by establishing a 17-percent flat tax and eliminating taxes on capital gains, dividends and savings.
It also aims to put the nation’s the “large military complex of yesterday in check,” while calling on Congress to open up the Arctic National Wildlife Refuge and the outer continental shelf to oil drilling, and to increase oil and gas development on federal lands.
…The spending proposal the freshman senator outlined Friday would put the nation on a path to a balanced budge five years faster than the plan that House Republicans adopted last week. The unveiling came as the Senate was debating a Democratic budget that would leave a major deficit even after 10 years.
What’s more radical and unrealistic? Balancing our budget in five years, which would restore our AAA credit rating, make Social Security and Medicare solvent over the long haul and prevent a debt driven economic crisis or the liberal plan to deliver skyrocketing taxes and trillion dollar deficits for as far as the eye can see until our economy collapses after we can’t get people to loan us money any more? Draw your own conclusion.
|
"""
Support for Lutron Caseta switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sitch.lutron_caseta/
"""
import logging
from homeassistant.components.lutron_caseta import (
LUTRON_CASETA_SMARTBRIDGE, LutronCasetaDevice)
from homeassistant.components.switch import SwitchDevice, DOMAIN
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['lutron_caseta']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up Lutron switch."""
devs = []
bridge = hass.data[LUTRON_CASETA_SMARTBRIDGE]
switch_devices = bridge.get_devices_by_domain(DOMAIN)
for switch_device in switch_devices:
dev = LutronCasetaLight(switch_device, bridge)
devs.append(dev)
async_add_entities(devs, True)
return True
class LutronCasetaLight(LutronCasetaDevice, SwitchDevice):
"""Representation of a Lutron Caseta switch."""
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
self._smartbridge.turn_on(self._device_id)
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
self._smartbridge.turn_off(self._device_id)
@property
def is_on(self):
"""Return true if device is on."""
return self._state["current_state"] > 0
async def async_update(self):
"""Update when forcing a refresh of the device."""
self._state = self._smartbridge.get_device_by_id(self._device_id)
_LOGGER.debug(self._state)
|
Wednesday night sneaker-heads and music lovers alike got the chance to enjoy a night of free booze, free sneakers, and a free show headlined by breakthrough, powerhouse Kiesza, courtesy of Reebok Classics and The Fader.
The Canadian singer performed "Take U There" by Diplo and her oh-so-catchy summer hit "Hideaway" -- truly transforming the intimate event space of 201 Mulberry Street into an urban discotheque nestled in the city. Well equipped with heavy bass drops, groovy rhythms, mesmerizing lights, and all the synchronized dance moves to match, Kiesza and her fans partied the night away to tracks off her second album, "Sound of a Woman."
She ended the night by snapping a pretty epic #selfie with concert-goers, later posting it to her Instagram captioned "Love this photo from my show with @reebokclassics and @TheFader back in #NYC."
Originally kicking off in Los Angeles on November 24th, the Coast To Coast event series integrates music, culture, and style -- commemorating the 25th anniversary of the "Ventilator" sneaker, sported by songstress, Kiesza, on stage.
The sneaks and sound collabo event also featured the sounds of DJ Brenmar.
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf.urls import patterns, url
from util import page
import views
urlpatterns = patterns('',
url('^$', views.HomeTestView.as_view(), name='mozorg.home'),
page('about/manifesto', 'mozorg/about/manifesto.html'),
page('about', 'mozorg/about.html'),
page('book', 'mozorg/book.html'),
url('^about/partnerships/$', views.partnerships, name='mozorg.partnerships'),
page('about/partnerships/distribution', 'mozorg/partnerships-distribution.html'),
page('about/history', 'mozorg/about/history.html'),
page('products', 'mozorg/products.html'),
page('about/mozilla-based', 'mozorg/projects/mozilla-based.html'),
page('button', 'mozorg/button.html'),
page('mission', 'mozorg/mission.html'),
page('mobile', 'mozorg/mobile.html'),
page('ITU', 'mozorg/itu.html'),
page('about/powered-by', 'mozorg/powered-by.html'),
page('about/governance', 'mozorg/about/governance/governance.html'),
page('about/governance/roles', 'mozorg/about/governance/roles.html'),
page('about/governance/policies', 'mozorg/about/governance/policies.html'),
page('about/governance/policies/security-group', 'mozorg/about/governance/policies/security/group.html'),
page('about/governance/policies/security-group/bugs', 'mozorg/about/governance/policies/security/bugs.html'),
page('about/governance/policies/security-group/tld-idn', 'mozorg/about/governance/policies/security/tld-idn.html'),
page('about/governance/policies/security-group/membership', 'mozorg/about/governance/policies/security/membership.html'),
page('about/governance/organizations', 'mozorg/about/governance/organizations.html'),
page('about/governance/policies/participation', 'mozorg/about/governance/policies/participation.html'),
page('about/governance/policies', 'mozorg/about/governance/policies/policies.html'),
url('^contribute/$', views.contribute, name='mozorg.contribute',
kwargs={'template': 'mozorg/contribute.html',
'return_to_form': False}),
url('^contribute/event/$', views.contribute,
kwargs={'template': 'mozorg/contribute.html',
'return_to_form': True},
name='mozorg.contribute_event'),
url('^contribute/page/$', views.contribute,
kwargs={'template': 'mozorg/contribute-page.html',
'return_to_form': False},
name='mozorg.contribute_page'),
url('^contribute/embed/$', views.contribute_embed,
name='mozorg.contribute_embed',
kwargs={'template': 'mozorg/contribute-embed.html',
'return_to_form': False}),
url('^contribute/universityambassadors/$',
views.contribute_university_ambassadors,
name='mozorg.contribute_university_ambassadors'),
page('contribute/universityambassadors/thanks',
'mozorg/contribute_university_ambassadors_thanks.html'),
url(r'^plugincheck/$',
views.plugincheck,
name='mozorg.plugincheck'),
url(r'^robots.txt$', views.Robots.as_view(), name='robots.txt'),
)
|
Thank you for visiting this website (“the site”) www.captivaspine.com, which is owned and operated by Captiva Spine, Inc. (CAPTIVA SPINE, us, we, our).
You may browse many sections of this site without providing any personal information about yourself. However, if you choose to register for one of our services, ask us a question through our Contact Us links, we must necessarily collect certain information from you. The information we collect will be related to the product or service requested, such as your name and contact details, the nature of the product or service requested, and related information so that we may fulfill your request and respond to you.
Occasionally, CAPTIVA SPINE may invite you to participate in a survey or poll to provide feedback on our site content, services, or products. We will ask you to provide your contact details so that we can provide you with additional information regarding the survey or poll. For any information that we collect through our sites, CAPTIVA SPINE will own that data and does not share or sell any personal information that you provide to us through our site for any separate use by any third party.
Like most sites, we use “cookies” on our website to help us serve you better on future visits. A cookie is a small text file which is sent by a website, accepted by a web browser and then placed on your hard drive. The information collected from cookies lets us know that you visited our site in the past, and helps you avoid having to re-enter information on each visit in order to use some of our products or services. You can always set your browser to refuse all cookies from this and other sites that you may visit. However, it is possible that some portions of this website will not function properly or may perform more slowly if you choose to reject cookies. By using our websites and not disabling cookies, you consent to their use for the described purposes. Cookies generally do not permit us to personally identify you.
CAPTIVA SPINE recognizes the privacy interests of children and we encourage parents and guardians to take an active role in their children’s online activities and interests. This Site is not intended for children under the age of 18. CAPTIVA SPINE does not target its services or this site to children under 18. CAPTIVA SPINE does not knowingly collect personally identifiable information from children under the age of 18.
CAPTIVA SPINE takes reasonable steps to ensure that your personal data is accurate and up-to-date for the purposes for which it was collected. We encourage you to contact us to update or correct your information if it changes or if you believe that any information that we have collected about you is inaccurate. Consistent with legal requirements and limitations, CAPTIVA SPINE also permits you to obtain a copy of personal information that we hold about you. To do so, please write to us at info@captivaspine.com. Please note that we will likely require additional information from you in order to honor the request.
You have the right to unsubscribe from any services that we offer if you no longer want to participate. To do so, please use our Contact Us link on this site for additional information, or email us at info@captivaspine.com. Please note that if you already have requested products or services when you decide to withdraw consent, there may be a short period of time for us to update your preferences and ensure that we honor your request.
Our site contains links to other websites that are not owned or operated by CAPTIVA SPINE. You should carefully review the privacy policies and practices of other websites, as we cannot control and are not responsible for privacy policies or practices of third-party websites that are not ours.
CAPTIVA SPINE is based in the United States, so your personal data will be processed by us in the U.S. where data protection and privacy regulations may not offer the same level of protection as in other parts of the world, such as the European Union. If you use this site from outside the United States you agree to this Privacy Statement and you consent to the transfer of all such information to the United States, which may not offer an equivalent level of protection of that required in countries of the European Union or certain other countries, and to the processing of that information as described in this Privacy Statement.
© 2015 Captiva Spine, Inc. All Rights Reserved.
|
import json
import BeautifulSoup
import requests
import re
import time
import threading
import networkx as nx
import multiprocessing
import matplotlib.pyplot as plt
import glob
import os
import difflib
from plugincon import bot_command, easy_bot_command, get_message_target, get_bot_nickname
from random import choice, sample
crawling = 0
crawled = 0
markov_dict = {}
markov_filter = []
can_crawl = True
def hastebin(data):
try:
h = requests.post("http://hastebin.com/documents", data=data, timeout=10)
except requests.exceptions.ConnectTimeout:
return "\x01"
if h.status_code != 200:
return "\x02" + str(h.status_code)
return "http://hastebin.com/" + h.json()['key']
def botbin(data, description="Result"):
r = hastebin(data)
if r == "\x01":
return "Error: Connection to hastebin.com timed out!"
elif r.startswith("\x02"):
return "Error: Unsuccesful status code reached! ({})".format(r[1:])
else:
return "{} URL: {}".format(description, r)
@easy_bot_command("hastemarkovjson")
def hastemarkov(message, raw):
if raw:
return
r = hastebin(json.dumps({x: list(y) for x, y in markov_dict.items()}, indent=4))
if r == "\x01":
return "Error: Connection to hastebin.com timed out!"
elif r.startswith("\x02"):
return "Error: Unsuccesful status code reached! ({})".format(r[1:])
else:
return "URL: {}".format(r)
@easy_bot_command("listmarkovfiles")
def list_markov_files(message, raw):
if raw:
return
return botbin("\n".join([os.path.splitext(os.path.split(x)[-1])[0] for x in glob.glob("markov2/*.mkov2")]))
@easy_bot_command("qlistmarkovfiles")
def quick_list(message, raw):
if raw:
return
return "Markov files that can be loaded using loadmarkov: {}".format(", ".join([os.path.splitext(os.path.split(x)[-1])[0] for x in glob.glob("markov2/*.mkov2")]))
@easy_bot_command("searchmarkovfiles")
def search_files(message, raw):
if raw:
return
if len(message["arguments"]) < 2:
return "Syntax: searchmarkofiles <keyword>"
return "Similiar Markov files: {} | Markov files with {} in filename: {}".format(", ".join([x for x in [os.path.splitext(os.path.split(x)[-1])[0] for x in glob.glob("markov2/*.mkov2")] if difflib.SequenceMatcher(None, x, " ".join(message["arguments"][1:])).ratio() > 0.8]), message["arguments"][1], ", ".join(x for x in [os.path.splitext(os.path.split(x)[-1])[0] for x in glob.glob("markov2/*.mkov2")] if message["arguments"][1] in x))
@easy_bot_command("markovderivates")
def derivates(message, raw):
if raw:
return
if len(message["arguments"]) < 2:
return "Syntax: markovderivates <Markov keyword>"
if message["arguments"][1] not in markov_dict:
return "Error: No such word in Markov data!"
return "Derivates for {}: {}".format(message["arguments"][1], ", ".join(markov_dict[message["arguments"][1]]))
def regex(value, reg):
if reg == "":
return True
return bool(re.search(reg, value))
def ends_with_any(string, list_of_endings):
for ending in list_of_endings:
if string.endswith(ending):
return True
return False
def mkplot(markov_dict):
G = nx.DiGraph()
labels = {}
for i, (k, v) in enumerate(markov_dict.iteritems()):
G.add_node(k)
for w in v:
G.add_node(w)
for i, (k, v) in enumerate(markov_dict.iteritems()):
for w in v:
G.add_edge(k, w)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos)
nx.draw_networkx_edges(G, pos, arrows=True)
nx.draw_networkx_labels(G, pos, {w: w for k, v in markov_dict.items() for w in [x for x in [k] + list(v)]})
plt.show()
def visible(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
elif re.match('<!--.*-->', str(element)):
return False
return True
def isalnumspace(string):
for char in string:
if not (char.isalnum() or " " == char):
return False
return True
def simple_string_filter(old_string, bad_chars=None, extra_filter=None):
result = ""
if bad_chars:
for char in old_string:
if not char in bad_chars:
result += char
if extra_filter and hasattr(extra_filter, "__call__"):
old_result = result
result = ""
for char in old_result:
if extra_filter(char):
result += char
return result
def parse_markov_string(string):
global markov_dict
words = simple_string_filter(string, "\'\"-/\\,.!?", isalnumspace).split(" ")
for x in xrange(len(words)):
try:
if words[x - 1] == words[x] or words[x] == words[x + 1]:
continue
except IndexError:
pass
try:
markov_dict[words[x - 1].lower()].add(words[x].lower())
except KeyError:
try:
markov_dict[words[x - 1].lower()] = {words[x].lower()}
except IndexError:
pass
except IndexError:
pass
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
def string_filter(old_string, filter_, separator=None):
result_string = []
if hasattr(filter_, "__call__"):
for x in old_string:
if filter_(x):
result_string.append(x)
else:
if separator is None:
for x in old_string:
if x in str(filter_):
result_string.append(x)
else:
for x in old_string:
if x in str(filter_).split(separator):
result_string.append(x)
return "".join(result_string)
def crawl_markov(website, url_mask, max_level=3, level=0, crawled_urls=[]):
global markov_dict
global crawling, crawled
crawling += 1
if level > max_level:
return
if not can_crawl:
return
warnings = []
time.sleep(0.4)
try:
request = requests.get(website.encode("utf-8"), timeout=10)
except requests.ConnectionError:
return
except requests.exceptions.Timeout:
return
except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
try:
request = requests.get("http://" + website.encode("utf-8"), timeout=10)
except requests.ConnectionError:
return
except requests.exceptions.Timeout:
return
except requests.exceptions.InvalidURL:
return
html = BeautifulSoup.BeautifulSoup(request.text.encode("utf-8"))
for link in html.findAll("a", {"href": True}):
url = link["href"].encode("utf-8'")
if re.match("\.[a-zA-Z1-9]+$", url) and (not any(url.endswith(x) for x in [".html", ".php", ".htm"]) or "." in url.split("/")[-1]):
continue
if not url.startswith("http"):
continue
if url in crawled_urls:
continue
crawled_urls.append(url)
if regex(url, url_mask):
threading.Thread(target=crawl_markov, args=(url, url_mask, max_level, level+1, crawled_urls)).start()
for visible_text in [text.encode("utf-8") for text in filter(visible, html.findAll(text=True))]:
for line in visible_text.splitlines():
parse_markov_string(line)
time.sleep(0.5)
crawled += 1
print "Done crawling {}!".format(website)
@easy_bot_command("plotmarkov", True)
def plot_markov(message, raw):
global markov_dict
if raw:
return
p = multiprocessing.Process(target=mkplot, args=(markov_dict,))
p.start()
return "Plotting..."
@easy_bot_command("togglemarkovcrawling")
def toggle_crawling(message, raw):
global can_crawl
if raw:
return
can_crawl = not can_crawl
return "Success: now crawling is{} stopped!".format(("n't" if can_crawl else ""))
@bot_command("parsemarkov", True)
def parse_markov_from_text(message, connector, index, raw):
global markov_dict
for key, item in markov_dict.items():
markov_dict[key] = set(item)
if not raw:
if len(message["arguments"]) < 2:
connector.send_message(index, get_message_target(connector, message, index), "{}: Error: No argument provided!".format(message["nickname"]))
data = open(" ".join(message["arguments"][1:])).read()
data = " ".join([n.strip() for n in data.split("\n")])
words = [x for x in simple_string_filter(data, "\'\"-/\\,.!?", isalnumspace).split(" ") if x != " "]
for x in xrange(len(words)):
try:
if words[x - 1] == words[x] or words[x] == words[x + 1]:
continue
except IndexError:
pass
try:
markov_dict[words[x - 1].lower()].add(words[x].lower())
except KeyError:
try:
markov_dict[words[x - 1].lower()] = {words[x].lower()}
except IndexError:
pass
except IndexError:
pass
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
connector.send_message(index, get_message_target(connector, message, index), "{}: Text file succesfully parsed on Markov!".format(message["nickname"]))
@easy_bot_command("flushmarkov", True)
def flush_markov_data(message, raw):
global markov_dict
if raw:
return
markov_dict = {}
return ["Markov flushed succesfully!"]
@easy_bot_command("mk_feeder", all_messages=True)
def feed_markov_data(message, raw):
global markov_dict
if raw:
return
for key, item in markov_dict.items():
markov_dict[key] = set(item)
words = simple_string_filter(" ".join(message["arguments"]), "\'\"-/\\,.!?", isalnumspace).split(" ")
for x in xrange(len(words)):
if x - 1 > -1:
try:
if words[x - 1] == words[x] or words[x] == words[x + 1]:
continue
except IndexError:
pass
try:
markov_dict[words[x - 1].lower()].add(words[x].lower())
except KeyError:
try:
markov_dict[words[x - 1].lower()] = {words[x].lower()}
except IndexError:
pass
except IndexError:
pass
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
else:
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
@easy_bot_command("markov")
def get_markov(message, raw):
global markov_dict
for key, item in markov_dict.items():
markov_dict[key] = set(item)
if raw:
return
# Checks.
try:
markov_dict.__delitem__("")
markov_dict.__delitem__(" ")
except KeyError:
pass
for i, mkv in markov_dict.items():
try:
markov_dict[i].remove(" ")
markov_dict[i].remove("")
except KeyError:
continue
if len(markov_dict) < 1:
return "Error: no Markov data!"
# Get the string!
if len(message["arguments"]) < 2:
x = choice(markov_dict.keys())
words = [x]
else:
words = [x.lower() for x in message["arguments"][1:]]
x = words[0]
level = 0
result = x
print x
while level < len(words) - 1:
if not words[level + 1] in markov_dict[x]:
return ["{}: {}".format(message["nickname"], result)]
x = words[level + 1]
level += 1
result += " " + x
while x in markov_dict.keys():
try:
x = sample(markov_dict[x], 1)[0]
except ValueError:
break
print x.encode("utf-8")
result += " " + x
if len(result) > 750:
break
for cuss in markov_filter:
result = result.replace(cuss, "*" * len(cuss))
result = "{0}: {1}".format(message["nickname"], result)
return [result]
@easy_bot_command("savemarkov", True)
def save_markov_json(message, raw):
global markov_dict
if not raw:
if len(message["arguments"]) < 2:
return ["Error: not enough arguments!", "(Insert Markov file name as an argument)"]
save_dict = markov_dict
for key, item in save_dict.items():
save_dict[key] = tuple(item)
open("markov2/{}.mkov2".format(message["arguments"][1]), "w").write(json.dumps(save_dict))
for key, item in markov_dict.items():
markov_dict[key] = set(item)
return ["{}: Saved succesfully to {}.mkov2!".format(message["nickname"], message["arguments"][1])]
else:
return []
@easy_bot_command("loadmarkovfilter", True)
def load_markov_filter(message, raw):
global markov_filter
if raw:
return
if len(message["arguments"]) < 2:
return ["Error: Not enough arguments!"]
markov_filter += open("filters/{}.mkov2f".format(" ".join(message["arguments"][1:]))).readlines()
return ["Blacklist updated succesfully!"]
@easy_bot_command("savemarkovfilter", True)
def save_markov_filter(message, raw):
global markov_filter
if raw:
return
if len(message["arguments"]) < 2:
return ["Error: Not enough arguments!"]
open("filters/{}.mkov2f".format(" ".join(message["arguments"][1:])), "w").write("\n".join(markov_filter))
return ["Blacklist updated succesfully!"]
@easy_bot_command("loadmarkov", True)
def load_markov_json(message, raw):
global markov_dict
if not raw:
if len(message["arguments"]) < 2:
return ["Error: not enough arguments!", "(Insert Markov file name as an argument)"]
new_dict = json.load(open("markov2/{}.mkov2".format(message["arguments"][1])))
for key, item in new_dict.items():
new_dict[key] = {word for word in item}
markov_dict.update(new_dict)
return ["Loaded succesfully from {}.mkov2!".format(message["arguments"][1])]
else:
return []
@easy_bot_command("listfiltermarkov")
def list_cusses(message, raw):
if raw:
return
return "Cusses blacklisted: " + ", ".join(markov_filter)
@easy_bot_command("addfiltermarkov", True)
def filter_cusses(message, raw):
if raw:
return
global markov_filter
try:
markov_filter += message["arguments"][1:]
return ["Updated word blacklist succesfully!"]
except IndexError:
return ["Syntax: addfiltermarkov <list of cusses or blacklisted words>"]
@easy_bot_command("removefiltermarkov", True)
def unfilter_cusses(message, raw):
if raw:
return
global markov_filter
try:
for cuss in message["arguments"][1:]:
markov_filter.remove(cuss)
return ["Updated word blacklist succesfully!"]
except IndexError:
return ["Syntax: removefiltermarkov <list of words to un-blacklist>"]
@easy_bot_command("parsewebmarkov")
def parse_web_markov(message, raw):
global markov_dict
for key, item in markov_dict.items():
markov_dict[key] = set(item)
if raw:
return
messages = []
warnings = []
debug = "--debug" in message["arguments"][1:]
if len(message["arguments"]) < 2:
return ["{}: Error: No argument provided! (Syntax: parsewebmarkov <list of URLs>)".format(message["nickname"])]
for website in filter(lambda x: not x.startswith("--"), message["arguments"][1:]):
print "Parsing Markov from {}!".format(website)
messages.append("Parsing Markov from {}!".format(website))
try:
request = requests.get(website, timeout=10)
except requests.ConnectionError:
warnings.append("Error with connection!")
if debug:
raise
except requests.exceptions.Timeout:
warnings.append("Connection timed out!")
if debug:
raise
except requests.exceptions.MissingSchema:
try:
request = requests.get("http://" + website, timeout=10)
except requests.ConnectionError:
warnings.append("Error with connection!")
if debug:
raise
except requests.exceptions.Timeout:
warnings.append("Connection timed out!")
if debug:
raise
if not "request" in locals().keys():
continue
if request.status_code != 200:
warnings.append("{}: Error: Status {} reached!".format(message["nickname"], request.status_code))
continue
visible_texts = [text.encode("utf-8") for text in filter(visible, BeautifulSoup.BeautifulSoup(request.text).findAll(text=True))]
lines = []
for text in visible_texts:
lines += text.split("\n")
for line in lines:
words = simple_string_filter(line, "\'\"-/\\,.!?", isalnumspace).split(" ")
for x in xrange(len(words)):
try:
if words[x - 1] == words[x] or words[x] == words[x + 1]:
continue
except IndexError:
pass
try:
markov_dict[words[x - 1].lower()].add(words[x].lower())
except KeyError:
try:
markov_dict[words[x - 1].lower()] = {words[x].lower()}
except IndexError:
pass
except IndexError:
pass
try:
markov_dict[words[x].lower()].add(words[x + 1].lower())
except KeyError:
try:
markov_dict[words[x].lower()] = {words[x + 1].lower()}
except IndexError:
pass
except IndexError:
continue
if len(warnings) < len(message["arguments"][1:]):
messages.append("{}: Success reading Markov from (some) website(s)!".format(message["nickname"]))
return messages + warnings
@easy_bot_command("clearmarkovfilter", True)
def clear_filter(message, raw):
global markov_filter
if raw:
return
markov_filter = []
return "Success clearing Markov filter!"
@easy_bot_command("purgemarkov", True)
def purge_word_from_markov(message, raw):
global markov_dict
if raw:
return
if len(message["arguments"]) < 2:
return "Syntax: purgemarkov <list of words to purge from Markov>"
for word in message["arguments"][1:]:
for kw in markov_dict.keys():
if kw == word:
markov_dict.__delitem__(kw)
try:
if word in markov_dict[kw]:
markov_dict[kw] = [mk for mk in markov_dict[kw] if mk != word]
if markov_dict[kw] == []:
markov_dict.__delitem__(kw)
except KeyError:
pass
return "Words purged from Markov succesfully!"
def check_crawled(connector, index, message):
global crawling, crawled
while crawling > crawled:
time.sleep(0.2)
connector.send_message(
index,
get_message_target(connector, message, index),
"Finished crawling {all} websites!".format(all=crawled)
)
@bot_command("parsewebmarkovcrawl", True)
def get_web_markov_crawling(message, connector, index, raw):
global crawling, crawled
def smsg(msg):
if type(msg) is str:
connector.send_message(
index,
get_message_target(connector, message, index),
msg
)
return True
elif hasattr(msg, "__iter__"):
for m in msg:
connector.send_message(
index,
get_message_target(connector, message, index),
m
)
return True
else:
return False
crawling = 0
crawled = 0
if raw:
return
time.sleep(0.3)
if len(message["arguments"]) < 4:
smsg("Syntax: <URL mask> <max level> <list of URLs to crawl for Markov>")
return
try:
if int(message["arguments"][2]) > 4:
smsg("Way too large value for max_level! Use only up to 4. Do you want to wait for an eternity?!?")
return
if int(message["arguments"][2]) < 0:
smsg("Lol negative level XD")
return
except ValueError:
smsg("Insert some int for max level (second argument)! Insert something between 0 and 4.")
return
for website in message["arguments"][3:]:
crawl_markov(website, message["arguments"][1], int(message["arguments"][2]))
smsg("Website crawling threads started! Check for new additions using ||markovsize .")
threading.Thread(target=check_crawled, args=(connector, index, message)).start()
@easy_bot_command("markovsize")
def get_markov_size(message, raw):
global markov_dict
if not raw:
return ["Size of Markov chain: {}".format(len(markov_dict))]
|
Yesterday, the preeminent awards for children’s literature in the United States were given. The Printz is given for the best in YA, the Newbery for the best in children’s literature, and the Caldecott for the best in illustration. A selection of his year’s winners are featured below. The comprehensive list can be found at the The American Library Association.
Check these out at your BFS libraries!
« Order in the Library !
|
# -*- coding=utf8 -*-
"""
构建决策树
"""
from __future__ import division
from math import log
import operator
import matplotlib.pyplot as plt
from extension import mongo_collection, SALARY, EDUCATION, SATISFY
decision_node = dict(boxstyle="sawtooth", fc="0.8")
leaf_node = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def load_data():
"""从mongo导入数据"""
data = []
for user in mongo_collection.find({"appearance": {"$exists": True},
"satisfy": {"$exists": True}}):
data.append([user.get('appearance', 0),
user.get('age', u'0'),
user.get('height', u'0'),
SALARY.get(user.get('salary', u'0'), u'--'),
EDUCATION.get(user.get('education', u'0'), u'--'),
SATISFY[user['satisfy']]])
labels = [u'颜值', u'年龄', u'身高', u'工资', u'学历']
return data, labels
def majority_count(class_list):
class_count = {}
for vote in class_list:
class_count[vote] = class_count.get(vote, 0) + 1
sorted_class_count = sorted(class_count.iteritems(),
key=operator.itemgetter(1), reverse=True)
return sorted_class_count[0][0]
def calc_shannon_ent(data_set):
num_entries = len(data_set)
label_counts = {}
for feat_vec in data_set:
current_label = feat_vec[-1]
label_counts[current_label] = label_counts.get(current_label, 0) + 1
shannon_ent = 0.0
for key in label_counts:
prob = float(label_counts[key]) / num_entries
shannon_ent -= prob * log(prob, 2)
return shannon_ent
def split_data_set(data_set, axis, value):
ret_data_set = []
for feat_vec in data_set:
if feat_vec[axis] == value:
reduced_feat_vec = feat_vec[:axis]
reduced_feat_vec.extend(feat_vec[axis+1:])
ret_data_set.append(reduced_feat_vec)
return ret_data_set
def choose_best_feature_to_split(data_set):
num_features = len(data_set[0]) - 1
base_entropy = calc_shannon_ent(data_set)
best_info_gain, best_feature = 0.0, -1
for i in range(num_features):
feat_fist = [example[i] for example in data_set]
unique_vals = set(feat_fist)
new_entropy = 0.0
for value in unique_vals:
sub_data_set = split_data_set(data_set, i, value)
prob = len(sub_data_set) / len(data_set)
new_entropy += prob * calc_shannon_ent(sub_data_set)
info_gain = base_entropy - new_entropy
if info_gain > best_info_gain:
best_info_gain = info_gain
best_feature = i
return best_feature
def create_tree(data_set, labels):
"""生成决策树"""
class_list = [example[-1] for example in data_set]
if class_list.count(class_list[0]) == len(class_list):
return class_list[0]
if len(data_set[0]) == 1:
return majority_count(class_list)
best_feat = choose_best_feature_to_split(data_set)
best_feat_label = labels[best_feat]
my_tree = {best_feat_label:{}}
del(labels[best_feat])
feat_values = [example[best_feat] for example in data_set]
unique_vals = set(feat_values)
for value in unique_vals:
sub_labels = labels[:]
my_tree[best_feat_label][value] = \
create_tree(split_data_set(data_set, best_feat, value), sub_labels)
return my_tree
def get_num_leafs(my_tree):
num_leafs = 0
first_str = my_tree.keys()[0]
second_dict = my_tree[first_str]
for _, val in second_dict.iteritems():
if isinstance(val, dict):
num_leafs += get_num_leafs(val)
else:
num_leafs += 1
return num_leafs
def get_tree_depth(my_tree):
max_depth = 0
first_str = my_tree.keys()[0]
second_dict = my_tree[first_str]
for _, val in second_dict.iteritems():
if isinstance(val, dict):
this_depth = 1 + get_tree_depth(val)
else:
this_depth = 1
if this_depth > max_depth:
max_depth = this_depth
return max_depth
def plot_node(node_txt, center_pt, parent_pt, node_type):
create_plot.ax1.annotate(
node_txt, xy=parent_pt, xycoords='axes fraction',
xytext=center_pt, textcoords='axes fraction',
va="center", ha="center", bbox=node_type, arrowprops=arrow_args)
def plot_mid_text(cntr_pt, parent_pt, txt_string):
x_mid = (parent_pt[0]-cntr_pt[0])/2.0 + cntr_pt[0]
y_mid = (parent_pt[1]-cntr_pt[1])/2.0 + cntr_pt[1]
create_plot.ax1.text(x_mid, y_mid, txt_string, va="center",
ha="center", rotation=30)
def plot_tree(my_tree, parent_pt, node_txt):
num_leafs = get_num_leafs(my_tree)
first_str = my_tree.keys()[0]
cntr_pt = (plot_tree.xOff + (2.0 + num_leafs) / 2.0 / plot_tree.totalW, plot_tree.yOff)
plot_mid_text(cntr_pt, parent_pt, node_txt)
plot_node(first_str, cntr_pt, parent_pt, decision_node)
second_dict = my_tree[first_str]
plot_tree.yOff = plot_tree.yOff - 1.0 / plot_tree.totalD
for key, val in second_dict.iteritems():
if isinstance(val, dict):
plot_tree(val, cntr_pt, unicode(key))
else:
plot_tree.xOff = plot_tree.xOff + 1.0 / plot_tree.totalW
plot_node(unicode(val), (plot_tree.xOff, plot_tree.yOff), cntr_pt, leaf_node)
plot_mid_text((plot_tree.xOff, plot_tree.yOff), cntr_pt, unicode(key))
plot_tree.yOff = plot_tree.yOff + 1.0 / plot_tree.totalD
def create_plot(in_tree):
""""生成图像"""
fig = plt.figure(1, figsize=(25, 10), facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
create_plot.ax1 = plt.subplot(111, frameon=False, **axprops)
plot_tree.totalW = float(get_num_leafs(in_tree))
plot_tree.totalD = float(get_tree_depth(in_tree))
plot_tree.xOff = -0.5 / plot_tree.totalW
plot_tree.yOff = 1.0
plot_tree(in_tree, (0.5, 1.0), '')
plt.show()
def compress_tree(my_tree):
"""压缩决策树"""
first_str = my_tree.keys()[0]
inner_dict = my_tree[first_str]
copy_dict = {}
for key, val in inner_dict.items():
if not isinstance(val, dict):
if val not in copy_dict:
copy_dict[val] = [unicode(key)]
else:
copy_dict[val].append(unicode(key))
copy_dict = {u','.join(val): unicode(key) for key, val in copy_dict.items()}
for key, val in inner_dict.items():
if isinstance(val, dict):
compress_tree(val)
else:
inner_dict.pop(key)
inner_dict.update(copy_dict)
if __name__ == '__main__':
data_set, labels = load_data()
result = create_tree(data_set, labels)
compress_tree(result)
create_plot(result)
|
a place where beauty reigns supreme.
A Day for the Laboring.
Happy Labor Day! I'm not sure who wrote the rulebook on not wearing white after today, but whatever. I don't like the idea of banning a color from my closet, even if I've already started gravitating towards darker tones as the weather cools (and has become very windy). We went out for dinner with my parents last night, so I decided to toss on my white heels, in case I loose my nerve in coming months. Here's to the laboring and for getting a day off!
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Philippe Proulx <eepp.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import mutagenx
import argparse
import readline
import sys
import os
import shutil
import sortmuz
from termcolor import colored
def _perror(msg, exit=True):
print(colored('Error: {}'.format(msg), 'red', attrs=['bold']),
file=sys.stderr)
if exit:
sys.exit(1)
def _pwarning(msg):
print(colored('Warning: {}'.format(msg), 'yellow', attrs=['bold']),
file=sys.stderr)
def _pinfo(msg):
print(colored('{}'.format(msg), 'blue'), file=sys.stderr)
def _parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('-V', '--version', action='version',
version='%(prog)s v{}'.format(sortmuz.__version__))
ap.add_argument('-o', '--output', action='store', type=str,
default=os.getcwd(), metavar='DIR',
help='Output music collection directory (default: CWD)')
ap.add_argument('src', metavar='SRC', action='store', type=str,
help='Path to source directory')
# parse args
args = ap.parse_args()
# validate source directory
if not os.path.isdir(args.src):
_perror('source "{}" is not an existing directory'.format(args.src))
# validate output directory
if not os.path.isdir(args.output):
_perror('output "{}" is not an existing directory'.format(args.output))
sys.exit(1)
return args
def _print_summary(src, output, muz_files, meta_files):
print('{} {}'.format(colored('source:', 'blue'),
colored(os.path.abspath(src), 'blue', attrs=['bold'])))
print('{} {}'.format(colored('output:', 'blue'),
colored(os.path.abspath(output),
'blue', attrs=['bold'])))
if not muz_files:
_pwarning('no music files found')
else:
print()
_pinfo('music files:')
for file in muz_files:
print(' {}'.format(os.path.basename(file)))
print()
if not meta_files:
_pinfo('no meta files')
else:
_pinfo('meta files:')
for file in meta_files:
print(' {}'.format(os.path.basename(file)))
def _collect_files(src):
exts = ['.mp3', '.m4a', '.flac']
exclude_meta = ['.ds_store', 'desktop.ini', 'thumbs.db']
muz_files = []
meta_files = []
for file in os.listdir(src):
name, ext = os.path.splitext(file)
ext = ext.lower()
if ext in exts:
muz_files.append(os.path.abspath(os.path.join(src, file)))
else:
if file.lower() in exclude_meta:
continue
meta_files.append(os.path.abspath(os.path.join(src, file)))
return sorted(muz_files), sorted(meta_files)
def _get_file_infos(file):
try:
m_file = mutagenx.File(file)
except:
return '', '', ''
artist = ''
album = ''
year = ''
if type(m_file) is mutagenx.mp3.MP3:
if 'TPE1' in m_file:
artist = m_file['TPE1'].text[0]
elif 'TPE2' in m_file:
artist = m_file['TPE2'].text[0]
if 'TALB' in m_file:
album = m_file['TALB'].text[0]
year_tags = [
'TDRC',
'TYER',
'TDAT',
'TIME',
'TRDA',
]
for tag in year_tags:
if tag in m_file:
year = str(m_file[tag].text[0])
break
elif type(m_file) is mutagenx.mp4.MP4:
if b'\xa9ART' in m_file:
artist = m_file[b'\xa9ART'][0]
elif b'aART' in m_file:
artist = m_file[b'aART'][0]
if b'\xa9alb' in m_file:
album = m_file[b'\xa9alb'][0]
if b'\xa9day' in m_file:
year = str(m_file[b'\xa9day'][0])
return artist, album, year
def _guess_infos(muz_files):
if not muz_files:
return '', '', ''
artist, album, year = _get_file_infos(muz_files[0])
if len(muz_files) > 1:
artist2, album2, year2 = _get_file_infos(muz_files[1])
if artist != artist2:
artist = 'Various Artists'
return artist, album, year
def _pcp(src, dst):
msg = '[{}] "{}" {} "{}"'.format(colored('cp', attrs=['bold']), src,
colored('->', attrs=['bold']), dst)
print(msg)
def _pmkdir(dst):
print('[{}] "{}"'.format(colored('mkdir', attrs=['bold']), dst))
def do_sortmuz(src, output):
muz_files, meta_files = _collect_files(src)
_print_summary(src, output, muz_files, meta_files)
print(colored('\n---\n', 'blue'))
artist, album, year = _guess_infos(muz_files)
while True:
uartist = input('{} [{}] '.format(colored('artist?', 'green',
attrs=['bold']),
colored(artist, attrs=['bold'])))
ualbum = input('{} [{}] '.format(colored('album?', 'green',
attrs=['bold']),
colored(album, attrs=['bold'])))
uyear = input('{} [{}] '.format(colored('year?', 'green',
attrs=['bold']),
colored(year, attrs=['bold'])))
uconfirm = input('{} [{}] '.format(colored('confirm?', 'cyan',
attrs=['bold']),
colored('y', attrs=['bold'])))
if len(uconfirm) == 0 or uconfirm.lower() == 'y':
break
print()
uartist = uartist.strip()
ualbum = ualbum.strip()
uyear = uyear.strip()
if len(uartist.strip()) == 0:
uartist = artist
if len(ualbum.strip()) == 0:
ualbum = album
if len(uyear.strip()) == 0:
uyear = year
if len(uartist) == 0:
_perror('empty artist name')
if len(ualbum) == 0:
_perror('empty album name')
if len(uyear) == 0:
_perror('empty year')
year_album = '{} {}'.format(uyear, ualbum)
album_dir = os.path.join(output, uartist, year_album)
abs_album_dir = os.path.abspath(album_dir)
if os.path.isdir(album_dir):
res = input('{} {} [{}] '.format(colored('overwrite', 'cyan',
attrs=['bold']),
colored(abs_album_dir, 'blue',
attrs=['bold']),
colored('n', attrs=['bold'])))
if res.lower() != 'y':
sys.exit(0)
print()
print('[{}] "{}"'.format(colored('rm', attrs=['bold']),
abs_album_dir))
try:
shutil.rmtree(album_dir)
except Exception as e:
_perror('cannot remove directory "{}": {}'.format(album_dir, e))
else:
print()
_pmkdir(abs_album_dir)
try:
os.makedirs(album_dir)
except Exception as e:
_perror('cannot create directory "{}": {}'.format(album_dir, e))
for file in muz_files:
dst = os.path.join(abs_album_dir, os.path.basename(file))
_pcp(file, dst)
try:
shutil.copyfile(file, dst)
except Exception as e:
_perror('cannot cannot copy file "{}": {}'.format(file, e))
if meta_files:
meta_dir = os.path.join(abs_album_dir, '_')
_pmkdir(meta_dir)
try:
os.makedirs(meta_dir)
except Exception as e:
_perror('cannot create directory "{}": {}'.format(meta_dir, e))
for file in meta_files:
dst = os.path.join(meta_dir, os.path.basename(file))
_pcp(file, dst)
try:
if os.path.isdir(file):
shutil.copytree(file, dst)
else:
shutil.copyfile(file, dst)
except Exception as e:
fmt = 'cannot cannot copy file/directory "{}": {}'
_perror(fmt.format(file, e))
def run():
args = _parse_args()
try:
do_sortmuz(args.src, args.output)
except KeyboardInterrupt:
sys.exit(1)
|
Take your concert to the next level with a professional lighting rig!
Whether you need full lighting production including lights on truss with drape, hazer and lighting tech, or whether you just need a few LED par cans for your gig, we've got you covered!
We would love to talk with you about your event and put together a quote for a customized lighting system that will be perfect for your event!
|
# -*- coding: utf-8 -*-
from linlp.algorithm.Viterbi import viterbiRecognitionSimply
from linlp.algorithm.viterbiMat.prob_trans_organization import prob_trans as trans_p
from linlp.algorithm.viterbiMat.prob_emit_organization import prob_emit as emit_p
def organizationviterbiSimply(obs, DT, obsDT, debug):
if debug:
x = obs
obs = [('始##始', 'begin')] + obs + [('末##末', 'end')]
switch = {'nz': 1, 'ni': 2, 'nic': 2, 'nis': 2, 'nit': 2, 'm': 3}
length = len(obs)
for no in range(length):
case = switch.get(obs[no][1], 0)
if not DT.tree.get(obs[no][0]):
DT.tree[obs[no][0]] = dict()
if case == 1:
if obsDT.tree[obs[no][0]].get('total', 1001) <= 1000:
DT.tree[obs[no][0]].setdefault('F', 1000)
else:
DT.tree[obs[no][0]].setdefault('Z', 21149365)
elif case == 2:
DT.tree[obs[no][0]].setdefault('K', 1000)
DT.tree[obs[no][0]].setdefault('D', 1000)
elif case == 3 and len(obsDT.tree.get(obs[no][0], 'm')) != 2:
DT.tree[obs[no][0]] = {'M': 1000}
elif obs[no][1].startswith('ns'):
obs[no] = ('未##地', obs[no][1])
elif obs[no][1].startswith('x'):
obs[no] = ('未##串', 'x')
elif obs[no][1].startswith('nr'):
obs[no] = ('未##人', obs[no][1])
elif obs[no][1].startswith('nt'):
obs[no] = ('未##团', obs[no][1])
elif obs[no][1].startswith('m'):
obs[no] = ('未##数', obs[no][1])
elif obs[no][1].startswith('t'):
obs[no] = ('未##时', obs[no][1])
elif not DT.tree.get(obs[no][0]): # 不在机构词典时
DT.tree[obs[no][0]] = {'Z': 21149365}
path = viterbiRecognitionSimply(obs, trans_p, emit_p, DT)
if debug:
s = ''
t = '['
l = len(x)
for i in range(l):
word = x[i]
s += '[' + word[0] + ' '
t += word[0]
for k, v in DT.tree[obs[i+1][0]].items():
if k == 'total':
continue
s += k + ':' + str(v) + ' '
s += ']'
t += '/' + path[i+1] + ', '
t += ']'
print('机构名角色观察: %s' % s)
print('机构名角色标注: %s' % t)
return path[1:-1]
|
For the steakhouse with the #1 rated steak, you need to head to Texas Roadhouse where the steaks are legendary. Big in size and flavor, it's the prime steakhouse you need that's always one cut above the rest.
Make your next meal a legendary experience by heading to Texas Roadhouse. Find one close to you so you can enjoy the best steak you've ever had now!
|
# (c) 2015, Ian Clegg <ian.clegg@sourcewarp.com>
#
# winrmlib is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ian.clegg@sourcewarp.com'
import uuid
from winrmlib.api.service import Service
from winrmlib.api.resourcelocator import ResourceLocator
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class Session(object):
"""
Factory object for building sessions and connection options
"""
def __init__(self, endpoint, username, password, **kwargs):
# transport = Session._build_transport(endpoint, auth, username, password)
# Store the endpoint and the service we will use to invoke it
self.endpoint = endpoint
# False == No CredSSP
self.service = Service(endpoint, username, password, True)
# The user can set override some defaults for the Session, they can also be overridden on each request
self.max_envelope = self._build_max_envelope(kwargs.get('max_envelope_size', Session.MaxEnvelopeSize))
self.locale = self._build_locale(kwargs.get('locale', Session.Locale))
# The operation timeout header overrides the timeout set on the server. Some users may prefer to
# use the servers default timeout, so this header will only be included if the user explicitly sets
# an operation timeout.
if 'operation_timeout' in kwargs:
self.default_operation_timeout = self._build_operation_timeout(kwargs.get('operation_timeout'))
else:
self.default_operation_timeout = None
def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale)
self.service.invoke.set_options(tsoapheaders=headers)
return self.service.invoke
def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj)
def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.DeleteAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, None)
def create(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CreateAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def command(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.CommandAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
def recieve(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
if isinstance(resource, str):
resource = ResourceLocator(resource)
headers = self._build_headers(resource, Session.ReceiveAction,
operation_timeout, max_envelope_size, locale)
return self.service.invoke(headers, obj)
@staticmethod
def _build_selectors(selectors):
# Build the WSMan SelectorSet Element from the selector dictionary
selector_set = []
for selector_name in selectors.iterkeys():
selector_value = selectors[selector_name]
selector_set.append({'#text': str(selector_value), '@Name': selector_name})
return {'w:SelectorSet': {'w:Selector': selector_set}}
@staticmethod
# TODO add mustcomply attribute to element
def _build_options(options):
option_set = []
for name, (value, must_comply) in options.iteritems():
must_comply = bool(must_comply)
option_set.append({'#text': str(value), '@Name': name})
return {'w:OptionSet': {'w:Option': option_set}}
def _build_operation_timeout(self, operation_timeout):
if operation_timeout is None:
return self.default_operation_timeout
else:
return {'w:OperationTimeout': 'PT{0}S'.format(operation_timeout)}
def _build_max_envelope(self, max_envelope_size):
if max_envelope_size is None:
return self.max_envelope
else:
return {'w:MaxEnvelopeSize': '{0}'.format(max_envelope_size)}
def _build_locale(self, locale):
if locale is None:
return self.locale
else:
return {'Locale': {"@xml:lang": "en-US"}}
def _build_headers(self, resource, action, operation_timeout, max_envelope_size, locale):
headers = OrderedDict([
('a:To', self.endpoint),
('a:ReplyTo', Session.Address),
('w:ResourceURI', resource.url),
('a:MessageID', format(uuid.uuid4())),
('a:Action', action)]
)
# TODO: Implement support for Microsoft XPRESS compression
# https://social.msdn.microsoft.com/Forums/en-US/501e4f29-edfc-4240-af3b-344264060b99/
# wsman-xpress-remote-shell-compression?forum=os_windowsprotocols
# headers.update({'rsp:CompressionType': {'@soap:mustUnderstand': 'true', '#text': 'xpress'}})
# only include the operation timeout if the user specified one when the class was instantiated
# or if the user explicitly set one when invoking a method.
if operation_timeout is not None:
headers.update(self._build_operation_timeout(operation_timeout))
elif self.default_operation_timeout is not None:
headers.update(self.default_operation_timeout)
headers.update(self._build_selectors(resource.selectors))
headers.update(self._build_options(resource.options))
headers.update(self._build_max_envelope(max_envelope_size))
headers.update(self._build_locale(locale))
return headers
Session.MaxEnvelopeSize = 153600
Session.Locale = 'en-US'
Session.Address = {'a:Address': {
'@mustUnderstand': 'true',
'#text': 'http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous'
}}
# Static members that can be safely shared with all instances
Session.WSManNamespace = '{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}'
Session.AddressingNamespace = '{http://schemas.xmlsoap.org/ws/2004/08/addressing}'
Session.SoapContentType = {'Content-Type': 'application/soap+xml; charset=utf-8'}
# WSMan SOAP Actions
Session.GetAction = 'http://schemas.xmlsoap.org/ws/2004/09/transfer/Get'
Session.PutAction = 'http://schemas.xmlsoap.org/ws/2004/09/transfer/Put'
Session.DeleteAction = 'http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete'
Session.CreateAction = 'http://schemas.xmlsoap.org/ws/2004/09/transfer/Create'
Session.CommandAction = 'http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Command'
Session.ReceiveAction = 'http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive'
|
The University of Kansas School of Medicine-Wichita Medical Practice Association (MPA) was established to provide medical education, research, and medical care through its members who are full-time faculty at the University of Kansas School of Medicine-Wichita.
The KU Wichita Medical Practice Association (MPA) is a 501(c) 3, not-for-profit organization. Members of the MPA are full-time faculty from the KU School of Medicine-Wichita departments of Family and Community Medicine, Internal Medicine, Pediatrics, Preventive Medicine and Public Health, and Psychiatry and Behavioral Sciences.
The MPA is governed by a Board of Trustees (president and secretary/treasurer) elected by its members at the annual membership meeting in October, which meets quarterly. A six-member Executive Committee, comprised of the department chairs and one member-at-large, meets monthly to conduct the business of the MPA.
In addition to helping the KU School of Medicine-Wichita educate students and residents, the MPA has generously provided more than $9 million to the school since June 2009.
|
# Plink module
#
# Copyright (C) 2015 Pjotr Prins (pjotr.prins@thebird.nl)
# Some of the BED file parsing came from pylmm:
# Copyright (C) 2013 Nicholas A. Furlotte (nick.furlotte@gmail.com)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# According to the PLINK information
# Parse a textual BIM file and return the contents as a list of tuples
#
# Extended variant information file accompanying a .bed binary genotype table.
#
# A text file with no header line, and one line per variant with the following six fields:
#
# Chromosome code (either an integer, or 'X'/'Y'/'XY'/'MT'; '0' indicates unknown) or name
# Variant identifier
# Position in morgans or centimorgans (safe to use dummy value of '0')
# Base-pair coordinate (normally 1-based, but 0 ok; limited to 231-2)
# Allele 1 (corresponding to clear bits in .bed; usually minor)
# Allele 2 (corresponding to set bits in .bed; usually major)
#
# Allele codes can contain more than one character. Variants with negative bp coordinates are ignored by PLINK. Example
#
# 1 mm37-1-3125499 0 3125499 1 2
# 1 mm37-1-3125701 0 3125701 1 2
# 1 mm37-1-3187481 0 3187481 1 2
import struct
# import numpy as np
def readbim(fn):
res = []
for line in open(fn):
list = line.split()
if len([True for e in list if e == 'nan']) == 0:
res.append( (list[0],list[1],int(list[2]),int(list[3]),int(list[4]),int(list[5])) )
else:
res.append( (list[0],list[1],list[2],float('nan'),float('nan'),float('nan')) )
return res
# .bed (PLINK binary biallelic genotype table)
#
# Primary representation of genotype calls at biallelic variants. Must
# be accompanied by .bim and .fam files. Basically contains num SNP
# blocks containing IND (compressed 4 IND into a byte)
#
# Since it is a biallelic format it supports for every individual
# whether the first allele is homozygous (b00), the second allele is
# homozygous (b11), it is heterozygous (b10) or that it is missing
# (b01).
# http://pngu.mgh.harvard.edu/~purcell/plink2/formats.html#bed
# http://pngu.mgh.harvard.edu/~purcell/plink2/formats.html#fam
# http://pngu.mgh.harvard.edu/~purcell/plink2/formats.html#bim
def readbed(fn,inds,encoding,func=None):
# For every SNP block fetch the individual genotypes using values
# 0.0 and 1.0 for homozygous and 0.5 for heterozygous alleles
def fetchGenotypes(X):
# D = { \
# '00': 0.0, \
# '10': 0.5, \
# '11': 1.0, \
# '01': float('nan') \
# }
Didx = { '00': 0, '10': 1, '11': 2, '01': 3 }
G = []
for x in X:
if not len(x) == 10:
xx = x[2:]
x = '0b' + '0'*(8 - len(xx)) + xx
a,b,c,d = (x[8:],x[6:8],x[4:6],x[2:4])
L = [encoding[Didx[y]] for y in [a,b,c,d]]
G += L
G = G[:inds]
# G = np.array(G)
return G
bytes = inds / 4 + (inds % 4 and 1 or 0)
format = 'c'*bytes
count = 0
with open(fn,'rb') as f:
magic = f.read(3)
assert( ":".join("{:02x}".format(ord(c)) for c in magic) == "6c:1b:01")
while True:
count += 1
X = f.read(bytes)
if not X:
return(count-1)
XX = [bin(ord(x)) for x in struct.unpack(format,X)]
xs = fetchGenotypes(XX)
func(count,xs)
|
To put it simply, dry and flakey scalps are the worst to deal with. They are uncomfortable, irritating, and can even cause a bit of embarrassment. Dry scalps can be a result of several conditions including dandruff, psoriasis, eczema, or poor nutrition. Rest assured though that there are several simple steps you can take to help alleviate the symptoms and get your scalp back to a healthy place.
You’ve probably exfoliated your face or body before to get rid of dead skin cells. This is the same exact practice but applied to your scalp. The best way to do this is on freshly washed hair, applying the scrub with your fingertips in a circular motion. It’s basically part head massage, part skin treatment. After massaging the scalp for about 10-15 minutes rinse thoroughly with warm water. Be sure to only do this once to twice a week, since this treatment will remove natural oils from the scalp and it will need time to recover.
There are plenty of hair exfoliants on the market but here is an easy DIY recipe you can try at home using oatmeal and brown sugar. Exfoliating and it smells delicious!
Investing in a shampoo that will help fight dandruff and simultaneously hydrate your scalp is a necessity. Be sure to read the label and be on the lookout for salicylic acid as an active ingredient. This acid will help exfoliate the scales and work to prevent any future flakes. When applying the shampoo, it is important to concentrate it on your scalp, massaging the product in for at least five minutes to let the ingredients do their work. It is also key to rinse your hair thoroughly to ensure all the product is out, leaving no residue on the scalp and diminishing any chances of irritation.
Our favorite is the Serene Scalp Anti- Dandruff Shampoo, it is equally gentle and soothing and of course, has Salicylic Acid as an active ingredient.
Trust us, oil is your friend. It can be a little disconcerting putting oil directly in your hair but if your scalp is dry and flakey it could be the hydration boost it needs. Both coconut oil and olive oil are fantastic for hydration and have a ton of added benefits for your hair's health. Apply a generous amount to your scalp (hair can be wet or dry) and give yourself a nice scalp massage. The stimulation will help promote blood flow and loosen any dry flakes. Let the oil absorb into your hair for about 30 minutes and then rinse out with warm water.
You’d be shocked at how much of a difference a good leave-in treatment can make for a dry scalp. Look for one created with hydrating and soothing ingredients that will help heal your scalp and provide relief to any irritation. Apply it to freshly washed hair and lightly massage in circular motions focusing on your roots. It will help keep you flake-free throughout the day and give you that extra bit of moisture your scalp needs.
Our favorite is the Serene Scalp Soothing Leave on Treatment, it combines mint and chamomile for both a cooling and calming effect.
|
from __future__ import print_function
from bose_einstein import bose_einstein
from constant import htr_to_K, htr_to_meV
import argparser
import norm_k
import numpy as np
import scf
import system
args = argparser.read_argument('Renormalize EPW calculation')
thres = args.thres / htr_to_meV
beta = htr_to_K / args.temp
window = args.energy / htr_to_meV
if args.vb: offset = -8.75333295715961e-03
else: offset = 8.53193322468371e-03
Sigma = system.make_data(args.dft, args.vb)
Sigma.bose_einstein = bose_einstein(Sigma.freq, beta)
if args.vb: band_str = '36'
else: band_str = '37'
temp_str = '%03dK' % args.temp
if args.acoustic:
temp_str = '%dK' % args.temp
qpt_str = '10000'
elif args.temp == 1:
qpt_str = '050000'
elif args.temp == 150:
qpt_str = '100000'
elif args.temp == 300:
qpt_str = '100000'
else:
print("temperature " + str(args.temp) + " not available")
exit()
dir_str = args.direction
if args.acoustic:
filename = 'data/epw_all_28424_'+temp_str+'_5meV_acoustic_only/data_'+dir_str+'_'+band_str+'_10000.dat'
else:
filename = 'data/res_'+temp_str+'_1meV/data_'+dir_str+'_'+band_str+'_'+qpt_str+'.dat'
file_epw = open(filename, 'r')
for line in file_epw:
data = line.split()
eps = np.float(data[1]) - offset
ImS = np.float(data[2])
if (abs(eps) < window) and args.method == 2:
zz = 1.0 / (1.0 + np.float(data[4]))
else:
zz = 1.0
print(eps * htr_to_meV, ImS * zz * htr_to_meV, zz)
|
In Washington, the Sheriff is responsible for maintaining the peace within the county and is directly accountable to the people, running for election every 4 years. Many people are under the impression that our duties are primarily in the patrol of areas outside municipal city limits. This is not entirely the case.
While we do provide this service, our operations run the gamut from law enforcement duties, to the delivery of civil papers, execution of court orders, incarceration of offenders and supervision of parole/probation clients, organizing search and rescue operations and preparing for and coordinating responses to man-made and natural disasters. The job of managing such a diverse operation is a challenge that must be met each day.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-05-12 10:08
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('organizer', '0014_transaction_category'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.DecimalField(decimal_places=11, max_digits=11)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='transaction',
name='category',
field=models.CharField(choices=[('Apparel/Accesory', 'Apparel/Accesory'), ('Entertainment', 'Entertainment'), ('Food/Beverage', 'Food/Beverage'), ('Skin care/Cosmetics', 'Skin care/Cosmetics'), ('Computer/Mobile', 'Computer/Mobile'), ('Books/Newspapers', 'Books/Newspapers'), ('Other', 'Other')], max_length=20),
),
migrations.AlterField(
model_name='transaction',
name='purchase_date',
field=models.DateField(default=datetime.date.today),
),
]
|
15 ноября 2013 Adventure Time: Explore the Dungeon Because I DON’T KNOW!
20 ноября 2012 Adventure Time: Hey Ice King! Why'd You Steal Our Garbage?!
13 ноября 2008 Adventures to Go!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.