text
stringlengths 29
850k
|
|---|
# -*- coding: utf-8 -*-
# сортировка массива методом подсчета сравнений
import random
# [65, 59, 46, 62, 14, 25, 78, 22, 47, 79]
# [7, 5, 3, 6, 0, 2, 8, 1, 4, 9]
def ccs(array):
# Создаем массив с нулевыми весами, размером с оригинальный массив
cnt = [0 for i in range(len(array))]
print cnt
# Первый цикл идет по значениям массива от 0 до len-1 (9 итераций)
for i in range(len(array)-1):
print "i",i
# Второй цикл идет от значения i первого цикла 1,2... до len (Сравниваем значения всех старших индексов)
for j in range(i + 1, len(array)):
print "j", j
print "arr i",array[i]
print "arr j",array[j]
# Если старший индекс больше младшего увеличиваем вес его индекса
if array[i] < array[j]:
cnt[j] += 1
else:
# Иначе увеличиваем вес младшего индекса
cnt[i] += 1
print cnt
s = [0 for i in range(len(array))]
# Проставляем значения из старого массива в новый массив, где место в массиве будет его весом.
for i in range(len(array)):
s[cnt[i]] = array[i]
print s
return s
def test_ccs():
# generated_array = [random.randint(1, 100) for i in range(10)]
generated_array = [60, 35, 81, 98, 14, 47]
print generated_array
ccs(generated_array)
test_ccs()
|
Serial Monogamy is a relationship style where a person engages in a committed romantic/sexual relationship with only person at a time. Before engaging in a new romantic/sexual relationship there is a termination of the previous relationship.
Choosing monogamy allows compliance with ideals set forth by various systems of belief (i.e. religion or cultural norms).
Learning creative ways to negotiate and fulfill romantic/sexual needs over the course of the partnership.
The ability to leave a partnership when it is no longer functional, healthy, or fulfilling.
If monogamy is a part of a value system, one may feel pressured to choose monogamy even if it doesn’t fit their personal sexual/relational style.
Misunderstanding over monogamy by those outside of the relationship or those choosing other relationship styles.
Fear or anxiety that a partner will leave and/or find a new partner.
|
"""
Author: RedFantom
License: GNU GPLv3
Source: This repository
"""
# The following sites were used for reference in the creation of this file:
# http://code.activestate.com/recipes/578894-mousewheel-binding-to-scrolling-area-tkinter-multi/
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
import tkinter as tk
from tkinter import ttk
from ttkwidgets import AutoHideScrollbar
class ScrolledFrame(ttk.Frame):
"""
A frame that sports a vertically oriented scrollbar for scrolling.
:ivar interior: :class:`ttk.Frame` in which to put the widgets to be scrolled with any geometry manager.
"""
def __init__(self, master=None, compound=tk.RIGHT, canvasheight=400,
canvaswidth=400, canvasborder=0, autohidescrollbar=True, **kwargs):
"""
Create a ScrolledFrame.
:param master: master widget
:type master: widget
:param compound: "right" or "left": side the scrollbar should be on
:type compound: str
:param canvasheight: height of the internal canvas
:type canvasheight: int
:param canvaswidth: width of the internal canvas
:type canvaswidth: int
:param canvasborder: border width of the internal canvas
:type canvasborder: int
:param autohidescrollbar: whether to use an :class:`~ttkwidgets.AutoHideScrollbar` or a :class:`ttk.Scrollbar`
:type autohidescrollbar: bool
:param kwargs: keyword arguments passed on to the :class:`ttk.Frame` initializer
"""
ttk.Frame.__init__(self, master, **kwargs)
self.rowconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
if autohidescrollbar:
self._scrollbar = AutoHideScrollbar(self, orient=tk.VERTICAL)
else:
self._scrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self._canvas = tk.Canvas(self, borderwidth=canvasborder, highlightthickness=0,
yscrollcommand=self._scrollbar.set, width=canvaswidth, height=canvasheight)
self.__compound = compound
self._scrollbar.config(command=self._canvas.yview)
self._canvas.yview_moveto(0)
self.interior = ttk.Frame(self._canvas)
self._interior_id = self._canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.interior.bind("<Configure>", self.__configure_interior)
self._canvas.bind("<Configure>", self.__configure_canvas)
self.__grid_widgets()
def __grid_widgets(self):
"""Places all the child widgets in the appropriate positions."""
scrollbar_column = 0 if self.__compound is tk.LEFT else 2
self._canvas.grid(row=0, column=1, sticky="nswe")
self._scrollbar.grid(row=0, column=scrollbar_column, sticky="ns")
def __configure_interior(self, *args):
"""
Private function to configure the interior Frame.
:param args: Tkinter event
"""
# Resize the canvas scrollregion to fit the entire frame
(size_x, size_y) = (self.interior.winfo_reqwidth(), self.interior.winfo_reqheight())
self._canvas.config(scrollregion="0 0 {0} {1}".format(size_x, size_y))
if self.interior.winfo_reqwidth() is not self._canvas.winfo_width():
# If the interior Frame is wider than the canvas, automatically resize the canvas to fit the frame
self._canvas.config(width=self.interior.winfo_reqwidth())
def __configure_canvas(self, *args):
"""
Private function to configure the internal Canvas.
Changes the width of the canvas to fit the interior Frame
:param args: Tkinter event
"""
if self.interior.winfo_reqwidth() is not self._canvas.winfo_width():
self._canvas.configure(width=self.interior.winfo_reqwidth())
def __mouse_wheel(self, event):
"""
Private function to scroll the canvas view.
:param event: Tkinter event
"""
self._canvas.yview_scroll(-1 * (event.delta // 100), "units")
def resize_canvas(self, height=400, width=400):
"""
Function for the user to resize the internal Canvas widget if desired.
:param height: new height in pixels
:type height: int
:param width: new width in pixels
:type width: int
"""
self._canvas.configure(width=width, height=height)
|
Initially but Mexico later withdrew from proceedings, nine countries placed bids for the FIFA World Cup Indonesia' s bid was rejected by FIFA in February after the Indonesian government failed to submit. Download naruto shippuden 194 subtitle indonesia lengkap. The bidding procedure to host the 20 FIFA World Cup tournaments began in January national associations had until 2 February to register their interest.
|
############################################################################
# Copyright (C) 2007 by Dennis Schwerdel, Thomas Schmidt #
# #
# #
# This program is free software; you can redistribute it and or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
import struct, collections
LnkFlags = collections.namedtuple("LnkFlags", ["customIcon", "commandLineArgs", "workingDirectory", "relativePath", "description", "pointsToFileDir", "shellIdList"])
FileAttributes = collections.namedtuple("FileAttributes", ["offline", "compressed", "reparse", "sparse", "temporary", "normal", "ntfsEfs", "archive", "directory", "volumeLabel", "system", "hidden", "readOnly"])
SW_HIDE = 0
SW_NORMAL = 1
SW_SHOWMINIMIZED = 2
SW_SHOWMAXIMIZED = 3
SW_SHOWNOACTIVE = 4
SW_SHOW = 5
SW_MINIMIZE = 6
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_RESTORE = 9
SW_SHOWDEFAULT = 10
LnkFile = collections.namedtuple("LnkFile", ["lnkFlags", "timeCreate", "timeAccess", "timeModify", "fileLength", "iconIndex", "showWindow", "hotkey", "fileAttributes", "target", "description", "relativePath", "workingDirectory", "commandLineArgs", "customIcon"])
def getBits(byte):
return map(bool,(byte&0x80, byte&0x40, byte&0x20, byte&0x10, byte&0x08, byte&0x04, byte&0x02, byte&0x01))
def winTimeToUnix(time):
return int(time * 0.0000001 - 11644473600)
def readLnkFromFp(fp):
(magic,) = struct.unpack("B3x", fp.read(4))
if magic != 0x4c:
raise Exception("Not valid LNK format")
(guid, lnkFlags) = struct.unpack("<16sB3x", fp.read(20))
lnkFlags = LnkFlags(*(getBits(lnkFlags)[1:]))
if lnkFlags.pointsToFileDir:
(byte1, byte2) = struct.unpack("<2B2x", fp.read(4))
fileAttributes = FileAttributes(*(getBits(byte1)[3:]+getBits(byte2)))
fp.seek(0x1c)
(timeCreate, timeAccess, timeModify) = map(winTimeToUnix, struct.unpack("<3d", fp.read(24)))
(fileLength, iconIndex, showWindow, hotkey) = struct.unpack("<IIBI", fp.read(13))
fp.seek(0x4c)
if lnkFlags.shellIdList:
(itemIdLen,) = struct.unpack("<H", fp.read(2))
itemId = fp.read(itemIdLen)
start = fp.tell()
(structLength, firstOffset, volumeFlags, localVolumeTableOffset, basePathOffset, networkVolumeTableOffset, remainingPathOffset) = struct.unpack("<2IB3x4I", fp.read(28))
onLocalVolume = bool(volumeFlags)
assert firstOffset == 0x1c
if onLocalVolume:
fp.seek(start+localVolumeTableOffset)
(volLength, volType, volSerial, volOffset) = struct.unpack("<IIII", fp.read(16))
assert volOffset == 0x10
fp.seek(start+localVolumeTableOffset+volOffset)
(volumeName, basePathName) = fp.read(remainingPathOffset-(localVolumeTableOffset+volOffset)).rstrip("\x00").split("\x00")
target = basePathName
else:
fp.seek(start+networkVolumeTableOffset)
(length,) = struct.unpack("<I16x", fp.read(20))
volumeName = fp.read(length)
target = volumeName
fp.seek(start+remainingPathOffset)
remainingPath = fp.read(structLength-remainingPathOffset).rstrip("\x00")
target += remainingPath
description = None
if lnkFlags.description:
(length,) = struct.unpack("<H", fp.read(2))
description = fp.read(length*2).decode("UTF-16").rstrip("\x00")
relativePath = None
if lnkFlags.relativePath:
(length,) = struct.unpack("<H", fp.read(2))
relativePath = fp.read(length*2).decode("UTF-16").rstrip("\x00")
workingDirectory = None
if lnkFlags.workingDirectory:
(length,) = struct.unpack("<H", fp.read(2))
workingDirectory = fp.read(length*2).decode("UTF-16").rstrip("\x00")
commandLineArgs = None
if lnkFlags.commandLineArgs:
(length,) = struct.unpack("<H", fp.read(2))
commandLineArgs = fp.read(length*2).decode("UTF-16").rstrip("\x00")
customIcon = None
if lnkFlags.customIcon:
(length,) = struct.unpack("<H", fp.read(2))
customIcon = fp.read(length*2).decode("UTF-16").rstrip("\x00")
return LnkFile(lnkFlags=lnkFlags, timeCreate=timeCreate, timeAccess=timeAccess, timeModify=timeModify, fileLength=fileLength, iconIndex=iconIndex, showWindow=showWindow, hotkey=hotkey, fileAttributes=fileAttributes, target=target, description=description, relativePath=relativePath, workingDirectory=workingDirectory, commandLineArgs=commandLineArgs, customIcon=customIcon)
def readLnk(filename):
with open(filename, "rb") as fp:
return readLnkFromFp(fp)
if __name__ == "__main__":
import sys
print readLnk(sys.argv[1])
|
angelliqui VickyJason jasmin. dolly4anal. GwenDaisyx. MilaBeautiful.
JolieCruzMerryandFabioGiaCarreraBrittanyWhite .YourPsychologisPrettyNicolleXNiceKiiissbbalexinwonderland .KYLExxxNikiJaneEalasaywhitesnow1995 .juicywatermelonSupermymanBritneyBeautifulMaryRedRose .whitesnow1995GiliSkyladiosamalditaHUGEpassionLOVE .mmcutealinaAlexaPayneladiosamalditaWhitePanther19 .NiceKiiissbbWhitePanther19mmcutealinaBrookSunshine .kimASStattoxBaileyHazeSAMHAMTAOneTruePassion .stevenshotlXXNonnaPopTATTOOEDnSWEETchadtanner .whitesnow1995TravisPoole1Veronika69xOneTruePassion .SamMaxxSamMaxxYourPsychologisladiosamaldita .
HotHorizonsbadboyhotnolimitEllasKissesxoDAVIDANDDANIEL .ladykeishaxrobethcumboyDalmaAffableTestie .AmericanBoy4EvarDalmaAffableamalfejaJolieCruz .JolieCruzCutexsherylDalmaAffableNixia .BadBoy90xxxLouiseYahaMerryandFabioNiceKiiissbb .SupermymanstevenshotlXXHotStoriesbadboyhotnolimit .whitesnow1995AmericanBoy4EvarmisstresbabyAuroraClark .TestieBadBoy90xxxAlexaPayneAlexaPayne .juicywatermelonHUGEpassionLOVEEllasKissesxoHUGEpassionLOVE .TheBoyNextDoor24NogatoMariSweetiEleanoraNatalieAllen .AnalXSquirt22AuroraClarkNogatoMariSamMaxx .
|
# -*- coding: utf-8 -*-
'''
:copyright: (c) 2014 by Carlos Abalde, see AUTHORS.txt for more details.
'''
from __future__ import absolute_import
import datetime
import curses
import threading
class UI(threading.Thread):
KEY_ESC = 27
MIN_WIDTH = 12
INTEGER_FORMAT = '%d'
DECIMAL_FORMAT = '%.5f'
def __init__(self, options):
super(UI, self).__init__()
self.daemon = True
self.__options = options
self.__mutex = threading.Lock()
self.__stopping = threading.Event()
self.__pad = None
def run(self):
# General initializations.
pminrow = 0
pmincolumn = 0
wmessage = ' Waiting for data '
lheader = ' Varnish Custom Counters (name=%s, wsize=%d, nwindows=%d)' % (
self.__options.name if self.__options.name is not None else '-',
self.__options.wsize,
self.__options.nwindows,
)
# Screen initializations.
screen = curses.initscr()
curses.cbreak()
curses.noecho()
screen.keypad(1)
screen.timeout(250)
# Event loop.
while not self.stopping:
# Wait (up to 250 ms) for some user input.
ch = screen.getch()
# Extract current screen dimensions (excluding top bar).
srows, scolumns = screen.getmaxyx()
srows -= 1
# Safely render pad contents.
with self.__mutex:
# Select pad to be rendered.
if self.__pad is None:
wmessage = wmessage[1:] + wmessage[0]
pad = curses.newpad(srows, scolumns)
pad.addstr(
int(srows / 2),
max(int(scolumns / 2 - len(wmessage) / 2), 0),
wmessage, curses.A_REVERSE | curses.A_BOLD)
else:
pad = self.__pad
# Extract pad dimensions, expand & update dimensions.
prows, pcolumns = pad.getmaxyx()
pad.resize(max(srows, prows), max(scolumns, pcolumns))
prows, pcolumns = pad.getmaxyx()
# Check requested action, if any.
if ch == ord('q') or ch == ord('Q') or ch == self.KEY_ESC:
self.stop()
elif ch == curses.KEY_RESIZE:
pminrow = 0
pmincolumn = 0
elif ch == curses.KEY_UP or ch == curses.KEY_PPAGE:
pminrow = max(pminrow - srows, 0)
elif ch == curses.KEY_DOWN or ch == curses.KEY_NPAGE:
pminrow = min(pminrow + srows, prows - srows)
elif ch == curses.KEY_LEFT:
pmincolumn = max(pmincolumn - scolumns, 0)
elif ch == curses.KEY_RIGHT:
pmincolumn = min(pmincolumn + scolumns, pcolumns - scolumns)
elif ch != -1:
curses.beep()
# Update top bar.
screen.addstr(0, 0, ' ' * scolumns, curses.A_REVERSE)
if len(lheader) < scolumns:
screen.addstr(0, 0, lheader, curses.A_REVERSE | curses.A_BOLD)
rheader = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S') + ' '
if len(lheader) + len(rheader) < scolumns:
screen.addstr(0, scolumns - len(rheader), rheader, curses.A_REVERSE | curses.A_BOLD)
# Render pad.
pad.refresh(pminrow, pmincolumn, 1, 0, srows, scolumns - 1)
# Destroy screen.
curses.nocbreak()
curses.echo()
screen.keypad(0)
curses.endwin()
def update(self, counters):
pad = None
if len(counters) > 0:
# Sort incoming counters (incrementally by counter name).
counters.sort(key=lambda item: item[0])
# Create new pad.
prows = 2 + len(counters)
pcolumns = 1 + len(max([name for (name, values) in counters], key=len)) + 1
pad = curses.newpad(prows, pcolumns)
# Add first column (counter names).
pad.addstr(1, 0, '-' * pcolumns)
for (i, (name, values)) in enumerate(counters):
pad.addstr(2 + i, 1, name)
# Add rest of columns (counter values).
for offset in range(self.__options.nwindows):
# Render column.
column = ['N-%d ' % offset if offset > 0 else 'N ', '']
for (i, (name, values)) in enumerate(counters):
if offset < len(values) and values[offset] is not None:
value = values[offset]
if isinstance(value, (int, long)):
value = self.INTEGER_FORMAT % value
elif isinstance(value, float):
value = self.DECIMAL_FORMAT % value
column.append(value + ' ')
else:
column.append('- ')
width = max(len(max(column, key=len)) + 1, self.MIN_WIDTH)
column[1] = '-' * width
# Add column.
pcolumns += width
pad.resize(prows, pcolumns)
for (i, value) in enumerate(column):
pad.addstr(i, pcolumns - len(value) - 1, value)
# Safely update pad.
with self.__mutex:
self.__pad = pad
def stop(self):
self.__stopping.set()
@property
def stopping(self):
return self.__stopping.isSet()
|
In its early years, the business ran exclusively in the European market; however, over the past years, GetResponse has made a concentrated effort to expand its reach to the American market.
GetResponse is a popular e-mail marketing service that just gets the job done. Servicing companies in more than 180 countries with over 1 billion customers monthly, GetResponse projects itself as the world’s simplest e-mail marketing system. The platform makes it easy and problem-free to develop professional-looking emails and landing pages with its editor.
What makes GetResponse such an excellent business software application solution? To start, you do not require a technology background to use its marketing and e-mail automation tools to boost your business. The vendor supplies an useful 30-day free trial and flexible pricing bundles with a lot of additional features. You do not need a credit card to sign up for the free trial which allows up to 1,000 contacts.
The GetReponse website hosts a detailed Help Center where you can quickly look at answers to usual inquiries. The vendor likewise offers 24/7 assistance through live chat and email. The user experience is amazing and the third-party integrations allow you to easily link the application with your existing business software application platforms. We provide the thumbs approximately GetResponse and welcome you to have a look at its leading rate e-mail marketing functionalities.
Marketing Automation – GetResponse’s marketing automation feature lets users build scalable workflows based on client journeys. Action-based autoresponders allow for the creation of messages that are set off by relevant recipient actions – with customized one-to-one responses. In addition, GetResponse supplies users with sophisticated division tools that enable them to divide their contacts into subgroups and tailor emails appropriately.
Landing Page Builder – GetResponse has an intuitive drag and drop landing page builder that permits users to create 100% responsive landing pages and web forms. Company online marketers can construct websites for sales, webinars, thank yous, opt-ins, about-me and downloads in simply a few minutes. Moreover, GetResponse lets subscribers test, analyze and optimize their pages to increase conversion rates.
Comprehensive Reporting – GetReponse has robust reporting abilities. A few basic reports appear straight in the dashboard, offering a quick summary of project success by means of pie chart and raw numbers. The E-mail Analytics section supplies more detail, with line and bar graphs for clicks, unsubscribes, opens, grievances and bounces. Additionally, for each report, users can see which customers within their email list carried out any offered action.
Webinar Integration – The GetResponse webinar platform effortlessly incorporates with GetResponse email marketing, enabling users to host product statements, demos and training sessions. Features consist of presentation sharing, chat moderation, surveys, desktop sharing, guest management and VoIP abilities. Additionally, GetResponse lets users decide whether their webinar will be password-protected or available to everyone.
On the whole, GetResponse is quite uncomplicated to utilize. It’s definitely easy enough to do all the basics: import contacts, make campaigns, set up autoresponders and check statistics and the interface is quite clean and intuitive.
In terms of how it stacks up against its competitors in this regard, I would argue that Campaign Monitor is a little bit more easy to use although not as complete, and Mailchimp has a slicker interface although one that makes finding specific functionality a bit difficult at times).
Whilst its drag-and-drop technique performs in theory offer a really versatile way to produce blocks of material and move them around an e-newsletter, in practice it is a bit cumbersome to utilize and can cause unexpected deletion of material, or placement of it in the incorrect part of the e-newsletter.
If you can get your head around it, and practice using it a bit, it does make for an extremely beneficial tool – it’s simply that the execution of it might be rather much better which I was able to do rather fast!
GetResponse offers a 30 day totally free trial for a list of up to 250 subscribers, no credit card required. Understand, however, that for the trial (and only the trial), if you add, delete, and then re-add a contact, it counts as two contacts. Once the trial has ended, there are various pricing plans readily available. The pricing format is rather complex, with more advanced plans appearing as your business’s list size grows. Pre-paying for one year’s worth of service will save 18 percent. If you think you will be using GetResponse for the foreseeable future, you can save 30 percent by pre-paying 2 years.
The standard e-mail plan gives you unlimited e-mail marketing, autoresponders, and a landing page that can accommodate as much as 1,000 visitors a month.
Pro removes the constraints on landing pages and permits you to make as many of them as you desire. You’ll also have access to webinars (up to 100 attendees). A Pro account can accommodate up to three users.
The Max plan ups the amount of users to five and the optimum webinar guests to 500. You will likewise get a custom-made domain and an account supervisor.
Enterprise offers an entire host of new functions for bigger businesses trying to find highly personalized features. You’ll require to schedule a demonstration before signing up.
GetResponse gives an excellent 1-GB of image storage with each account. All users likewise have access to the company’s image library, which contains over 1000 images.
Webinars –this functionality is not offered at all on the ‘Email’ plan and the number of webinar attendees is capped for the ‘Pro’ and ‘Max’ plans at 100, 500 respectively (it’s uncertain what the limit is on the ‘Enterprise’ plan).
E-commerce –you can only avail of the complete set of GetResponse e-commerce tools (which include integrations with a couple of popular e-commerce platforms) if you are on a ‘Pro’ plan or higher.
The 30-day completely free trial that Getresponse supplies is totally functional (approximately 1,000 subscribers) and it is not contingent upon offering credit card information.
This helps you avoid that annoying “oops, I forgot I registered for that trial”.
We understand that when you make a decision to buy Marketing Softwares it’s important not only to see how experts evaluate it in their evaluations, but likewise to discover if the genuine people and businesses that buy it are really pleased with the item. That is why we have actually found a behavior-based Customer Satisfaction Algorithm that gathers customer evaluations, remarks and GetResponse examines across a wide range of social networks websites. The data is then provided in an easy to digest type demonstrating how many individuals had positive and unfavorable experience with GetResponse. With that details at hand you should be equipped to make a notified buying decision that you will not be sorry for.
GetResponse has an award-winning customer care group, winning gold, silver and bronze Stevie Awards in 2013 and 2014. The business provides e-mail support in 7 languages and is the first e-mail service provider (ESP) to offer 24/7 live chat, also on weekends.
Consumers can reference the Help Center and Learning Center, which both feature resources to help solve any questions or concerns. These knowledge bases include Frequently Asked Questions, video tutorials, webinars, and downloadable files like handbooks, reports and whitepapers.
It’s also one of the most interesting products of its kind – in that it gives e-mail marketing, automation, landing pages, some CRM performance and webinars all under one roofing.
It is difficult to think of any other product that uses this ‘all round’ proposal, and it is what continues to encourage us to utilize it for our businesses e-mail marketing.
|
import django
from django.db.models import Aggregate, Q
from django.db.models.sql.aggregates import Aggregate as SQLAggregate
DJANGO_MAJOR, DJANGO_MINOR, _, _, _ = django.VERSION
def transform_q(q, query):
"""
Replaces (lookup, value) children of Q with equivalent WhereNode objects.
This is a pre-prep of our Q object, ready for later rendering into SQL.
Modifies in place, no need to return.
(We could do this in render_q, but then we'd have to pass the Query object
from ConditionalAggregate down into SQLConditionalAggregate, which Django
avoids to do in their API so we try and follow their lead here)
"""
for i, child in enumerate(q.children):
if isinstance(child, Q):
transform_q(child, query)
else:
# child is (lookup, value) tuple
where_node = query.build_filter(child)
q.children[i] = where_node
def render_q(q, qn, connection):
"""
Renders the Q object into SQL for the WHEN clause.
Uses as much as possible the Django ORM machinery for SQL generation,
handling table aliases, field quoting, parameter escaping etc.
:param q: Q object representing the filter condition
:param qn: db specific 'quote names' function that was passed into
SQLAggregate.as_sql method by Django
:param connection: Django db connection object that was passed into
SQLAggregate.as_sql method by Django
:returns: (SQL template str, params list) tuple
"""
joinstr = u' {} '.format(q.connector)
conditions = []
params = []
if DJANGO_MAJOR == 1 and DJANGO_MINOR == 7:
# in Django 1.7 WhereNode.as_sql expects `qn` to have a `compile`
# method (i.e not really expecting a quote names function any more
# they are expecting a django.db.models.sql.compiler.SQLCompiler)
try:
qn = qn.__self__
except AttributeError:
pass
for child in q.children:
if isinstance(child, Q):
# recurse
condition, child_params = render_q(child, qn, connection)
conditions.append(u'({})'.format(condition))
params.extend(child_params)
else:
try:
# Django 1.7
child, joins_used = child
except TypeError:
# Django 1.6
pass
# we expect child to be a WhereNode (see transform_q)
condition, child_params = child.as_sql(qn, connection)
params.extend(child_params)
conditions.append(condition)
rendered = u'({})'.format(joinstr.join(conditions))
if q.negated:
rendered = u'NOT {}'.format(rendered)
return rendered, params
class SQLConditionalAggregate(SQLAggregate):
"""
An aggregate like Count, Sum, but whose content is a CASE conditional
Like Django Count() and Sum() it can be used in annotate() and aggregate()
"""
is_ordinal = False
is_computed = False
sql_template = (
'%(function)s('
'CASE WHEN %(when_clause)s THEN %(value)s ELSE %(default)s END'
')'
)
def __init__(self, col, when, source=None,
is_summary=False, **extra):
self.when = when
super(SQLConditionalAggregate, self).__init__(col, source=source,
**extra)
def get_value(self, **kwargs):
return kwargs['field_name']
def as_sql(self, qn, connection):
params = []
if hasattr(self.col, 'as_sql'):
field_name, params = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
when_clause, when_params = render_q(
q=self.when,
qn=qn,
connection=connection,
)
params.extend(when_params)
get_val_kwargs = locals()
get_val_kwargs.pop('self')
substitutions = {
'function': self.sql_function,
'when_clause': when_clause,
'value': self.get_value(**get_val_kwargs),
'default': self.default,
}
substitutions.update(self.extra)
return self.sql_template % substitutions, params
class ConditionalAggregate(Aggregate):
"""
Base class for concrete aggregate types
e.g.
ConditionalSum('count', when=Q(stat_type='a', event_type='v'))
First argument is field lookup path, then we expect `when` kwarg
to be a Django Q object representing the filter condition.
"""
SQLClass = None # define on concrete sub-class
def __init__(self, lookup, when, **extra):
self.when = when
super(ConditionalAggregate, self).__init__(lookup, **extra)
def add_to_query(self, query, alias, col, source, is_summary):
# transform simple lookups to WhereNodes:
when = self.when.clone()
transform_q(when, query)
aggregate = self.SQLClass(
col=col,
when=when,
source=source,
is_summary=is_summary,
**self.extra
)
query.aggregates[alias] = aggregate
class ConditionalSum(ConditionalAggregate):
"""
Works like Sum() except only sums rows that match the Q filter.
:param lookup: (as arg) Django __ lookup path to field to sum on
:param when: (as kwarg) a Q object specifying filter condition
Usage:
report = (
Stat.objects
.extra(select={'month': "date_format(time_period, '%%Y-%%m')"})
.values('campaign_id', 'month') # values + annotate = GROUP BY
.annotate(
impressions=ConditionalSum(
'count',
when=Q(stat_type='a', event_type='v')
),
clicks=ConditionalSum(
'count',
when=Q(stat_type='a', event_type='c') & ~Q(detail='e')
)
)
)
"""
name = 'ConditionalSum'
class SQLClass(SQLConditionalAggregate):
sql_function = 'SUM'
is_computed = True
default = 0
class ConditionalCount(ConditionalAggregate):
"""
Works like Count() except only counts rows that match the Q filter.
:param when: (as kwarg) a Q object specifying filter condition
Usage:
report = (
Stat.objects
.extra(select={'month': "date_format(time_period, '%%Y-%%m')"})
.values('campaign_id', 'month') # values + annotate = GROUP BY
.annotate(
impressions=ConditionalCount(
when=Q(stat_type='a', event_type='v')
)
)
)
"""
name = 'ConditionalCount'
def __init__(self, when, **extra):
self.when = when
# NOTE: passing 'id' as the lookup is a bit hacky but Django is
# rigidly expecting a field name here, even though not needed
super(ConditionalAggregate, self).__init__('id', **extra)
class SQLClass(SQLConditionalAggregate):
sql_template = (
'%(function)s('
'CASE WHEN %(when_clause)s THEN %(value)s ELSE %(default)s END'
')'
)
sql_function = 'COUNT'
is_ordinal = True
default = 'NULL'
def get_value(self, **kwargs):
return '1'
|
The Motorbike Geometry Analyzer is a simulation environment of the motorcycle dynamics developed to meet the needs of analysis and development of racing professionals.
The environment elaborates experimentally acquired data and geometrical parameters of the specific motorcycle to determine the temporal evolution of the reciprocal position between the moving parts of the frame.
The creation and management of different motorcycle databases (technical parameters in format. SPC).
The analysis of the parameters of interest at each time of acquisition through a double representation (graphic and numeric).
|
from __future__ import absolute_import
import os
import sys
import traceback
from docker import errors as d_errors
from requests import exceptions as requests_exceptions
from . import compat, docker, push, tar
def pull(client, *args, **kwargs):
try:
for evt in client.pull(*args, **kwargs):
yield compat.json_loads(evt)
except d_errors.NotFound as e:
yield docker.error(e.explanation)
class PullFailedException(Exception):
pass
class CacheMissException(Exception):
pass
_FAILED = object()
class NoCache(object):
def __init__(self, docker_client):
self.docker_client = docker_client
self._pulled_images = {}
def _pull_cache(self, image):
raise CacheMissException()
yield
def tag(self, targets, tags):
for image in targets:
for tag in tags:
yield docker.tag_image(
self.docker_client,
image,
tag,
)
def push(self, targets, tags):
names_and_tags = set()
for image in targets:
names_and_tags.add((image.name, image.ref))
for tag in tags:
names_and_tags.add((image.name, tag))
for evt in push.do_push(self.docker_client, sorted(names_and_tags)):
yield evt
names_and_tags = set()
names_and_tags.add((image.name, image.ref))
for tag in tags:
names_and_tags.add((image.name, tag))
for evt in push.do_push(self.docker_client, sorted(names_and_tags)):
yield evt
def build(self, parent_ref, image):
repo = image.name
tag = image.ref
client = self.docker_client
try:
for evt in self._pull_cache(image):
yield evt
except CacheMissException:
pass
else:
return
# pull the parent if it has not been built because Docker-py fails
# to send the correct credentials in the build command.
if parent_ref:
try:
for evt in self._pull(image.parent, parent_ref):
yield evt
except PullFailedException:
pass
build_evts = client.build(
fileobj=tar.mkcontext(parent_ref, image.path),
rm=True,
custom_context=True,
stream=True,
tag='{0}:{1}'.format(image.name, image.ref),
dockerfile=os.path.basename(image.path),
)
for evt in build_evts:
yield compat.json_loads(evt)
self._pulled_images[(repo, tag)] = True
def _pull(self, repo, tag):
already_pulled = self._pulled_images.get((repo, tag), False)
if already_pulled is _FAILED:
raise PullFailedException()
if already_pulled:
return
client = self.docker_client
failed = False
pull_evts = pull(
client,
repository=repo,
tag=tag,
stream=True,
)
for event in pull_evts:
if 'error' in event:
event['warn'] = event['error']
del event['error']
failed = True
yield event
if failed:
self._pulled_images[(repo, tag)] = _FAILED
raise PullFailedException()
self._pulled_images[(repo, tag)] = True
class Cache(NoCache):
def _pull_cache(self, image):
pull_events = self._pull(repo=image.name, tag=image.ref)
try:
for evt in pull_events:
yield evt
except PullFailedException:
raise CacheMissException()
class DirectRegistry(NoCache):
def __init__(self, docker_client, docker_registry):
super(DirectRegistry, self).__init__(docker_client)
self.drc = docker_registry
self._cache = {}
def _get_manifest(self, tag):
name, ref = tag
try:
return self._cache[tag]
except KeyError:
try:
m = self.drc.get_manifest(name, ref)
except requests_exceptions.HTTPError:
return None
else:
self._cache[tag] = m
return m
def _put_manifest(self, tag, manifest):
name, ref = tag
if manifest is None:
msg = 'manifest does not exist, did the image fail to build?'
yield docker.error(msg)
return
try:
self.drc.put_manifest(name, ref, manifest)
except requests_exceptions.HTTPError:
msg = traceback.format_exception(*sys.exc_info())
yield docker.error(msg)
else:
yield {}
def _pull_cache(self, image):
tag = (image.name, image.ref)
if self._get_manifest(tag) is None:
raise CacheMissException()
return
yield
def tag(self, targets, tags):
"""
A noop operation because we can't tag locally, if we don't have the
built images
"""
return
yield
def push(self, targets, tags):
to_push = set()
to_alias = []
for image in targets:
tag = (image.name, image.ref)
manifest = self._get_manifest(tag)
if manifest is not None:
to_alias.append((tag, manifest))
else:
to_push.add(tag)
sorted_to_push = sorted(to_push)
for evt in push.do_push(self.docker_client, sorted_to_push):
yield evt
for tag in sorted_to_push:
manifest = self._get_manifest(tag)
to_alias.append((tag, manifest))
for (name, ref), manifest in to_alias:
for tag in tags:
dest = (name, tag)
extra = {
'event': 'alias',
'old_image': name + ':' + ref,
'repository': name,
'tag': tag,
}
for evt in self._put_manifest(dest, manifest):
evt.update(extra)
yield evt
|
Quick-start Videos: Choose the video that best describes your desired setup.
Here are videos that cover the three most commonly used PBX configs. Have a look and see which setup best suits your business requirements.
1. I want each phone to have it's own DID.
This video will walk you through giving each of your extensions its own DID for direct PSTN access.
2. I want to organize my phones into ring groups.
3. I need a main business number with an IVR.
Create a ‘virtual receptionist’ for your incoming callers that will route callers based on selections.
Here are more video tutorials covering common features, apps and more.
This video will walk you through creating user extension one-by-one and in bulk.
This video will show you how to setup your email-based fax server, allowing you to send and receive faxes online.
This video will show you how to setup your conference center for multiple party conference calls.
This video will provide an example of how to register SIP phones and other devices to your new PBX.
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import random
import logging
import simplejson as json
import re
import glob
import os
import gzip
import struct
import array
import pickle
def load_pgm(filename, byteorder=">"):
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, "rb") as f:
buff = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buff).groups()
except AttributeError:
raise ValueError(u"Not a raw PGM file: '%s'" % filename)
try:
a = np.frombuffer(buff, dtype=u"u1" if int(maxval) < 256 else byteorder + u"u2", count=int(width) * int(height),
offset=len(header)).reshape((int(height), int(width)))
return a
except Exception as e:
logging.warning("Ignoring image in %s for reason %s", filename, str(e))
return None
def load_mnsit(filename):
logging.info("Loading MNIST data from %s", filename)
with gzip.open(filename) as gf:
magic, size, rows, cols = struct.unpack(">IIII", gf.read(16))
if magic != 2051:
raise IOError("Magic number was expected to be <2049> but was <%d>" % magic)
data = array.array("B", gf.read())
data = [np.array(data[i * rows * cols : (i + 1) * rows * cols]) for i in range(size)]
logging.info("Loaded %d images from %s", len(data), filename)
return data, rows, cols, 255.0
def load_cropped_yale(folder):
paths = [_ for _ in glob.glob(os.path.join(folder, "*.pgm"))]
logging.info("Loading %d images in %s", len(paths), folder)
loaded = [load_pgm(f) for f in glob.glob(os.path.join(folder, "*.pgm"))]
loaded = [x for x in loaded if np.any(x, None)]
logging.info("Successfully loaded %d images out of %d", len(loaded), len(paths))
n_rows, n_cols = loaded[0].shape
logging.info("Images dimensions: %d by %d pixels", n_rows, n_cols)
return loaded, n_rows, n_cols, 255.0
def load_cbcl(filename):
logging.info("Loading data from %s", filename)
with open(filename, "r") as f:
n_examples = int(f.readline())
n_features = int(f.readline())
assert n_features == 361, "Expected number of features to be <361> but was <%d>" % n_features
data = [np.array([float(x) for x in line.strip().split()[:-1]]) for line in f]
logging.info("Loaded %d images from %s", n_features, filename)
return data, 19, 19, 1.0
def load_cifar10(folder):
paths = [_ for _ in glob.glob(os.path.join(folder, "data_batch_*.npy"))]
logging.info("Loading %d batches from %s (and converting images to grayscale)", len(paths), folder)
for p in paths:
logging.info("Loading batch: %s", p)
d = np.load(p)
data = [x for x in d]
logging.info("Loaded %d images from %s", len(data), folder)
return data, 32, 32, 255.0
def load_data(conf):
t = conf["type"]
if t == "Cropped Yale":
data, n_rows, n_cols, norm = load_cropped_yale(conf["path"])
elif t == "MNIST":
data, n_rows, n_cols, norm = load_mnsit(conf["path"])
elif t == "CBCL":
data, n_rows, n_cols, norm = load_cbcl(conf["path"])
elif t == "CIFAR-10":
data, n_rows, n_cols, norm = load_cifar10(conf["path"])
else:
raise ValueError("Invalid type of data: %s (expecting 'Cropped Yale', 'MNIST', 'CBCL' or 'CIFAR-10')" % t)
logging.info("Shuffling images...")
random.shuffle(data)
n_images = min(conf["number"], len(data))
logging.info("Converting to flat vectors, keeping %d images...", n_images)
data = np.vstack((x.flatten() for x in data[:conf["number"]])).transpose() / norm
return data, n_rows, n_cols
class NonnegativeMatrixFactorization:
"""
"Abstract" non-negative matrix factorization.
"""
def __init__(self, n_features, n_examples, components, iterations, loss_name, random_seed=0):
self.n_features = n_features
self.n_examples = n_examples
self.components = components
self.iterations = iterations
self.loss_name = loss_name
np.random.seed(random_seed)
self.W = np.random.random((n_features, components))
self.H = np.random.random((components, n_examples))
class EuclideanLeeSeungNonnegativeMatrixFactorization(NonnegativeMatrixFactorization):
"""
Implementation of the update rules for Mean Squared Error loss as in the paper from Lee & Seung:
Algorithms for non-negative matrix factorization (NIPS 2001)
"""
def __init__(self, n_features, n_examples, components, iterations):
NonnegativeMatrixFactorization.__init__(self, n_features, n_examples, components, iterations, "euclidean")
def update_factors(self, V):
self.H *= np.dot(np.transpose(self.W), V) / np.dot(np.dot(np.transpose(self.W), self.W), self.H)
self.W *= np.dot(V, np.transpose(self.H)) / np.dot(self.W, np.dot(self.H, np.transpose(self.H)))
def compute_loss(self, V):
return np.linalg.norm(V - np.dot(self.W, self.H)) ** 2 / self.n_examples
class DivergenceLeeSeungNonnegativeMatrixFactorization(NonnegativeMatrixFactorization):
"""
Implementation of the update rules for divergence loss (linked to Kullback-Leibler divergence) as in the paper from
Lee & Seung: Algorithms for non-negative matrix factorization (NIPS 2001)
"""
def __init__(self, n_features, n_examples, components, iterations):
NonnegativeMatrixFactorization.__init__(self, n_features, n_examples, components, iterations, "divergence")
def update_factors(self, V):
# The [:, None] is a trick to force correct broadcasting for np.divide
self.H *= np.dot(np.transpose(self.W), V / np.dot(self.W, self.H)) / np.sum(self.W, axis=0)[:, None]
self.W *= np.dot(V / np.dot(self.W, self.H), np.transpose(self.H)) / np.sum(self.H, axis=1)
def compute_loss(self, V):
# Compute WH only once.
WH = np.dot(self.W, self.H)
return np.sum(V * np.log(1e-10 + V / WH) - V + WH) / self.n_examples
class SparseHoyerNonnegativeMatrixFactorization(NonnegativeMatrixFactorization):
"""
Implementation of a sparse nonnegative matrix factorization as in the paper from Patrik O. Hoyer:
Non-negative sparse coding (arXiv)
"""
def __init__(self, n_features, n_examples, components, iterations, sparseness, learning_rate, decay):
NonnegativeMatrixFactorization.__init__(self, n_features, n_examples, components, iterations, "sparse")
self.sparseness = sparseness
self.learning_rate = learning_rate
self.decay = decay
self.W = np.where(self.W < 0.5, 0, self.W)
self.H = np.where(self.H < 0.5, 0, self.H)
def update_factors(self, V):
self.H *= np.dot(np.transpose(self.W), V) / (np.dot(np.dot(np.transpose(self.W), self.W), self.H)
+ self.sparseness)
self.W += self.learning_rate * np.dot(V - np.dot(self.W, self.H), self.H.transpose())
self.W = np.maximum(0, self.W)
self.learning_rate *= self.decay
def compute_loss(self, V):
return np.linalg.norm(V - np.dot(self.W, self.H)) ** 2 / self.n_examples
class SparseL2NonnegativeMatrixFactorization(NonnegativeMatrixFactorization):
"""
Own implementation: sparse on H and L2 on W.
"""
def __init__(self, n_features, n_examples, components, iterations, sparseness, l2, learning_rate, decay):
NonnegativeMatrixFactorization.__init__(self, n_features, n_examples, components, iterations, "sparse L2")
self.sparseness = sparseness
self.learning_rate = learning_rate
self.decay = decay
self.l2 = l2
self.W = np.where(self.W < 0.5, 0, self.W)
self.H = np.where(self.H < 0.5, 0, self.H)
def update_factors(self, V):
self.H *= np.dot(np.transpose(self.W), V) / (np.dot(np.dot(np.transpose(self.W), self.W), self.H)
+ self.sparseness)
self.W += self.learning_rate * (np.dot(V - np.dot(self.W, self.H), self.H.transpose()) - self.l2 * self.W)
self.W = np.maximum(0, self.W)
self.learning_rate *= self.decay
def compute_loss(self, V):
return np.linalg.norm(V - np.dot(self.W, self.H)) ** 2 / self.n_examples
def get_model(n_features, n_examples, conf):
t = conf["type"]
k = conf["components"]
i = conf["iterations"]
if t == "euclidean":
logging.info("Creating nonnegative matrix factorization using Euclidean loss")
return EuclideanLeeSeungNonnegativeMatrixFactorization(n_features, n_examples, k, i)
elif t == "divergence":
logging.info("Creating nonnegative matrix factorization using KL-Divergence loss")
return DivergenceLeeSeungNonnegativeMatrixFactorization(n_features, n_examples, k, i)
elif t == "sparse":
logging.info("Creating nonnegative matrix factorization using Hoyer's sparse loss")
s = conf["sparseness"]
l = conf["learning rate"],
d = conf["learning rate decay"]
return SparseHoyerNonnegativeMatrixFactorization(n_features, n_examples, k, i, s, l, d)
elif t == "sparse-l2":
logging.info("Creating nonnegative matrix factorization using own sparse + L2 loss")
s = conf["sparseness"]
l = conf["learning rate"]
d = conf["learning rate decay"]
l2 = conf["l2"]
return SparseL2NonnegativeMatrixFactorization(n_features, n_examples, k, i, s, l2, l, d)
else:
raise ValueError("Invalid NMF type: {0}".format(conf["type"]))
class ProgressViz:
def __init__(self, model, n_rows, n_cols):
plt.ion()
self.n_rows, self.n_cols = n_rows, n_cols
self.n_comp = model.W.shape[1]
self.sub_rows, self.sub_columns = self.determine_subplots()
self.figure, self.axes = plt.subplots(self.sub_rows, self.sub_columns)
self.figure.suptitle(u"Loss and components -- NMF w/ {0}".format(model.loss_name), size=10)
self.ax_loss = self.axes[0, 0]
self.ax_loss.set_title(u"Loss", size=8)
self.lines, = self.ax_loss.plot([], [], u'o')
self.images = []
for i in range(self.sub_rows * self.sub_columns - 1):
sub_i, sub_j = (1 + i) % self.sub_rows, (1 + i) / self.sub_rows
subplot = self.axes[sub_i, sub_j]
if i < self.n_comp:
self.images.append(subplot.imshow(self.prepare_image(model.W[:, i]), cmap=u"Greys"))
subplot.set_title(u"W[:, %d]" % i, size=8)
subplot.set_axis_off()
else:
# Disable empty subplots
subplot.set_visible(False)
self.ax_loss.set_autoscaley_on(True)
self.ax_loss.set_xlim(0, model.iterations)
self.ax_loss.grid()
self.ax_loss.get_xaxis().set_visible(False)
self.ax_loss.get_yaxis().set_visible(False)
def determine_subplots(self):
nb_plots = self.n_comp + 1
int_squared_root = int(np.sqrt(nb_plots))
return int_squared_root, 1 + int(nb_plots / int_squared_root)
def update_draw(self, iterations, losses, W):
# Update loss
self.lines.set_xdata(iterations)
self.lines.set_ydata(losses)
self.ax_loss.relim()
self.ax_loss.autoscale_view()
# Update mat' fact
for i in range(self.n_comp):
self.images[i].set_data(self.prepare_image(W[:, i]))
self.figure.canvas.draw()
self.figure.canvas.flush_events()
def prepare_image(self, vec):
return 1. - vec.reshape((self.n_rows, self.n_cols))
def wait_end(self):
plt.ioff()
plt.show()
def main(configuration):
logging.info("Setting seed for random generator to %d", configuration["seed"])
data_matrix, n_rows, n_cols = load_data(configuration["data"])
random.seed(configuration["seed"])
n_features, n_examples = data_matrix.shape
logging.info("Data matrix dimensions: %d (features) by %d (examples)", n_features, n_examples)
model = get_model(n_features, n_examples, configuration["nmf"])
p_viz = ProgressViz(model, n_rows, n_cols)
iterations, losses = [], []
for i in range(model.iterations):
model.update_factors(data_matrix)
loss = model.compute_loss(data_matrix)
logging.info("Iteration % 4d => loss: %f", i, loss)
losses.append(loss)
iterations.append(i + 1)
p_viz.update_draw(iterations, losses, model.W)
logging.info(u"Final loss: %f", model.compute_loss(data_matrix))
p_viz.wait_end()
if __name__ == u"__main__":
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
with open("conf.json", "r") as cf:
main(json.load(cf))
|
The Vintec V40SG2E 40 Bottles Single Zone Freestanding Wine Cooler maintains your wine collection at a stable climate with adjustable temperature and humidity control to preserve their flavour and shelf life.
Secures your wine at a stable climate with temperatures set between 6° - 18° and humidity control to preserve their shelf life and flavour.
Maintain a medium sized wine collection with a capacity to hold up to 40 standard 750ml Bordeaux bottles.
Enjoys an energy efficient design using only 1kWh per day so you can keep it running year round.
|
import datetime
import isodate
from collections import defaultdict
from django import http
from django.conf import settings
from django.shortcuts import render, redirect
from django.utils import timezone
from django.utils.http import urlquote
from session_csrf import anonymous_csrf
from crashstats.crashstats import models
from crashstats.crashstats.decorators import (
check_days_parameter,
pass_default_context,
)
from crashstats.supersearch.form_fields import split_on_operator
from crashstats.supersearch.models import (
SuperSearchUnredacted,
)
def get_date_boundaries(parameters):
"""Return the date boundaries in a set of parameters.
Return a tuple with 2 datetime objects, the first one is the lower bound
date and the second one is the upper bound date.
"""
default_date_range = datetime.timedelta(days=7)
greater_than = None
lower_than = None
if not parameters.get('date'):
lower_than = timezone.now()
greater_than = lower_than - default_date_range
else:
for param in parameters['date']:
value = isodate.parse_datetime(split_on_operator(param)[1])
if (
'<' in param and (
not lower_than or
(lower_than and lower_than > value)
)
):
lower_than = value
if (
'>' in param and (
not greater_than or
(greater_than and greater_than < value)
)
):
greater_than = value
if not lower_than:
# add a lower than that is now
lower_than = timezone.now()
if not greater_than:
# add a greater than that is lower_than minus the date range
greater_than = lower_than - default_date_range
return (greater_than, lower_than)
def get_topcrashers_results(**kwargs):
'''Return the results of a search. '''
results = []
params = kwargs
params['_aggs.signature'] = [
'platform',
'is_garbage_collecting',
'hang_type',
'process_type',
'_histogram.uptime',
]
params['_histogram_interval.uptime'] = 60
# We don't care about no results, only facets.
params['_results_number'] = 0
if params.get('process_type') in ('any', 'all'):
params['process_type'] = None
api = SuperSearchUnredacted()
search_results = api.get(**params)
if search_results['total'] > 0:
results = search_results['facets']['signature']
platforms = models.Platforms().get_all()['hits']
platform_codes = [
x['code'] for x in platforms if x['code'] != 'unknown'
]
for i, hit in enumerate(results):
hit['signature'] = hit['term']
hit['rank'] = i + 1
hit['percent'] = 100.0 * hit['count'] / search_results['total']
# Number of crash per platform.
for platform in platform_codes:
hit[platform + '_count'] = 0
sig_platforms = hit['facets']['platform']
for platform in sig_platforms:
code = platform['term'][:3].lower()
if code in platform_codes:
hit[code + '_count'] = platform['count']
# Number of crashes happening during garbage collection.
hit['is_gc_count'] = 0
sig_gc = hit['facets']['is_garbage_collecting']
for row in sig_gc:
if row['term'].lower() == 't':
hit['is_gc_count'] = row['count']
# Number of plugin crashes.
hit['plugin_count'] = 0
sig_process = hit['facets']['process_type']
for row in sig_process:
if row['term'].lower() == 'plugin':
hit['plugin_count'] = row['count']
# Number of hang crashes.
hit['hang_count'] = 0
sig_hang = hit['facets']['hang_type']
for row in sig_hang:
# Hangs have weird values in the database: a value of 1 or -1
# means it is a hang, a value of 0 or missing means it is not.
if row['term'] in (1, -1):
hit['hang_count'] += row['count']
# Number of startup crashes.
hit['startup_percent'] = 0
sig_startup = hit['facets']['histogram_uptime']
for row in sig_startup:
if row['term'] == 0:
ratio = 1.0 * row['count'] / hit['count']
hit['startup_crash'] = ratio > 0.5
# Run the same query but for the previous date range, so we can
# compare the rankings and show rank changes.
dates = get_date_boundaries(params)
delta = (dates[1] - dates[0]) * 2
params['date'] = [
'>=' + (dates[1] - delta).isoformat(),
'<' + dates[0].isoformat()
]
params['_aggs.signature'] = [
'platform',
]
previous_range_results = api.get(**params)
total = previous_range_results['total']
compare_signatures = {}
if total > 0 and 'signature' in previous_range_results['facets']:
signatures = previous_range_results['facets']['signature']
for i, hit in enumerate(signatures):
compare_signatures[hit['term']] = {
'count': hit['count'],
'rank': i + 1,
'percent': 100.0 * hit['count'] / total
}
for hit in results:
sig = compare_signatures.get(hit['term'])
if sig:
hit['diff'] = hit['percent'] - sig['percent']
hit['rank_diff'] = hit['rank'] - sig['rank']
hit['previous_percent'] = sig['percent']
else:
hit['diff'] = 'new'
hit['rank_diff'] = 0
hit['previous_percent'] = 0
return search_results
@pass_default_context
@anonymous_csrf
@check_days_parameter([1, 3, 7, 14, 28], default=7)
def topcrashers(request, days=None, possible_days=None, default_context=None):
context = default_context or {}
product = request.GET.get('product')
versions = request.GET.get('version')
crash_type = request.GET.get('process_type')
os_name = request.GET.get('platform')
result_count = request.GET.get('_facets_size')
tcbs_mode = request.GET.get('_tcbs_mode')
if not tcbs_mode or tcbs_mode not in ('realtime', 'byday'):
tcbs_mode = 'realtime'
if product not in context['releases']:
raise http.Http404('Unrecognized product')
if not versions:
# :(
# simulate what the nav.js does which is to take the latest version
# for this product.
for release in context['currentversions']:
if release['product'] == product and release['featured']:
url = '%s&version=%s' % (
request.build_absolute_uri(), urlquote(release['version'])
)
return redirect(url)
else:
versions = versions.split(';')
if len(versions) == 1:
context['version'] = versions[0]
release_versions = [x['version'] for x in context['releases'][product]]
if context['version'] not in release_versions:
raise http.Http404('Unrecognized version')
if tcbs_mode == 'realtime':
end_date = datetime.datetime.utcnow().replace(microsecond=0)
elif tcbs_mode == 'byday':
end_date = datetime.datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0
)
if crash_type not in settings.PROCESS_TYPES:
crash_type = 'browser'
context['crash_type'] = crash_type
os_api = models.Platforms()
operating_systems = os_api.get()
if os_name not in (os_['name'] for os_ in operating_systems):
os_name = None
context['os_name'] = os_name
# set the result counts filter in the context to use in
# the template. This way we avoid hardcoding it twice and
# have it defined in one common location.
context['result_counts'] = settings.TCBS_RESULT_COUNTS
if result_count not in context['result_counts']:
result_count = settings.TCBS_RESULT_COUNTS[0]
context['result_count'] = result_count
context['query'] = {
'mode': tcbs_mode,
'end_date': end_date,
'start_date': end_date - datetime.timedelta(days=days),
}
api_results = get_topcrashers_results(
product=product,
version=context['version'],
platform=os_name,
process_type=crash_type,
date=[
'<' + end_date.isoformat(),
'>=' + context['query']['start_date'].isoformat()
],
_facets_size=result_count,
)
tcbs = api_results['facets']['signature']
count_of_included_crashes = 0
signatures = []
for crash in tcbs[:int(result_count)]:
signatures.append(crash['signature'])
count_of_included_crashes += crash['count']
context['number_of_crashes'] = count_of_included_crashes
context['total_percentage'] = api_results['total'] and (
100.0 * count_of_included_crashes / api_results['total']
)
bugs = defaultdict(list)
api = models.Bugs()
if signatures:
for b in api.get(signatures=signatures)['hits']:
bugs[b['signature']].append(b['id'])
for crash in tcbs:
crash_counts = []
# Due to the inconsistencies of OS usage and naming of
# codes and props for operating systems the hacky bit below
# is required. Socorro and the world will be a better place
# once https://bugzilla.mozilla.org/show_bug.cgi?id=790642 lands.
for operating_system in operating_systems:
if operating_system['name'] == 'Unknown':
# not applicable in this context
continue
os_code = operating_system['code'][0:3].lower()
key = '%s_count' % os_code
crash_counts.append([crash[key], operating_system['name']])
crash['correlation_os'] = max(crash_counts)[1]
sig = crash['signature']
if sig in bugs:
if 'bugs' in crash:
crash['bugs'].extend(bugs[sig])
else:
crash['bugs'] = bugs[sig]
if 'bugs' in crash:
crash['bugs'].sort(reverse=True)
context['tcbs'] = tcbs
context['days'] = days
context['possible_days'] = possible_days
context['total_crashing_signatures'] = len(signatures)
context['total_number_of_crashes'] = api_results['total']
context['process_type_values'] = (
x for x in settings.PROCESS_TYPES if x != 'all'
)
context['platform_values'] = settings.DISPLAY_OS_NAMES
return render(request, 'topcrashers/topcrashers.html', context)
|
Anushka Sharma and Virat Kohli are one of the most popular and loved couples of today’s time. The duo stands for fearless love and made quite a strong statement by getting married at the peak of their careers paving way for many young couples in both, the cricket world and Bollywood to take the big leap of faith. Recently, Anushka was in Jaipur to promote Sui Dhaaga: Made in India with Varun Dhawan and the actress was received with the crowd cheering Virat Kohli’s name.
The actress couldn’t stop blushing as the chants just got louder with time and she replied with a big smile on her face, “Ji, ji. Haan, haan. Sabko unse prem hai, mujhe bhi hai... Sabko unki yaad aa rahi hai, mujhe bhi aa rahi hai...” Sweet, isn’t it? Take a look at the video.
Anushka and Varun are currently touring around different cities promoting their upcoming film Sui Dhaaga: Made in India ,which will hit the theatres on September 28 this year. While Anushka is busy promoting her film, Virat is currently in England playing a series of matches.
|
# Copyright 2020 Richard Koshak
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains RPI GPIO sensors, actuators, and connections.
Classes:
- RpiGpioSensor: Reports on the state of a GPIO Pin.
- RpiGpioActuator: Sets a pin to HIGH or LOW on command.
"""
from time import sleep
from configparser import NoOptionError
import RPi.GPIO as GPIO
from core.sensor import Sensor
from core.actuator import Actuator
from core.utils import parse_values
# Set to use BCM numbering.
GPIO.setmode(GPIO.BCM)
class RpiGpioSensor(Sensor):
"""Publishes the current state of a configured GPIO pin."""
def __init__(self, publishers, params):
"""Initializes the connection to the GPIO pin and if "EventDetection"
if defined and valid, will subscibe fo events. If missing, than it
requires the "Poll" parameter be defined and > 0. By default it will
publish CLOSED/OPEN for 0/1 which can be overridden by the "Values" which
should be a comma separated list of two paameters, the first one is
CLOSED and second one is OPEN.
Parameters:
- "Pin": the GPIO pin in BCM numbering
- "Values": Alternative values to publish for 0 and 1, defaults to
CLOSED and OPEN for 0 and 1 respectively.
- "PUD": Pull up or down setting, if "UP" uses PULL_UP, all other
values result in PULL_DOWN.
- "EventDetection": when set instead of depending on sensor_reporter
to poll it will reliy on the event detection built into the GPIO
library. Valid values are "RISING", "FALLING" and "BOTH". When not
defined "Poll" must be set to a positive value.
"""
super().__init__(publishers, params)
self.pin = int(params("Pin"))
# Allow users to override the 0/1 pin values.
self.values = parse_values(params, ["CLOSED", "OPEN"])
self.log.debug("Configured %s for CLOSED and %s for OPEN", self.values[0], self.values[1])
pud = GPIO.PUD_UP if params("PUD") == "UP" else GPIO.PUD_DOWN
GPIO.setup(self.pin, GPIO.IN, pull_up_down=pud)
# Set up event detection.
try:
event_detection = params("EventDetection")
event_map = {"RISING": GPIO.RISING, "FALLING": GPIO.FALLING, "BOTH": GPIO.BOTH}
if event_detection not in event_map:
self.log.error("Invalid event detection specified: %s, one of RISING,"
" FALLING, BOTH or NONE are the only allowed values. "
"Defaulting to NONE",
event_detection)
event_detection = "NONE"
except NoOptionError:
self.log.info("No event detection specified, falling back to polling")
event_detection = "NONE"
if event_detection != "NONE":
GPIO.add_event_detect(self.pin, event_map[event_detection],
callback=lambda channel: self.check_state())
self.state = GPIO.input(self.pin)
self.destination = params("Destination")
if self.poll < 0 and event_detection == "NONE":
raise ValueError("Event detection is NONE but polling is OFF")
if self.poll > 0 and event_detection != "NONE":
raise ValueError("Event detection is {} but polling is {}"
.format(event_detection, self.poll))
self.log.info("Configured RpiGpioSensor: pin %d on destination %s with PUD %s"
" and event detection %s", self.pin, self.destination, pud,
event_detection)
# We've a first reading so publish it.
self.publish_state()
def check_state(self):
"""Checks the current state of the pin and if it's different from the
last state publishes it. With event detection this method gets called
when the GPIO pin changed states. When polling this method gets called
on each poll.
"""
value = GPIO.input(self.pin)
if value != self.state:
self.log.info("Pin %s changed from %s to %s", self.pin, self.state, value)
self.state = value
self.publish_state()
def publish_state(self):
"""Publishes the current state of the pin."""
msg = self.values[0] if self.state == GPIO.LOW else self.values[1]
self._send(msg, self.destination)
def cleanup(self):
"""Disconnects from the GPIO subsystem."""
GPIO.cleanup()
class RpiGpioActuator(Actuator):
"""Allows for setting a GPIO pin to high or low on command. Also supports
toggling.
"""
def __init__(self, connections, params):
"""Initializes the GPIO subsystem and sets the pin to the InitialState.
If InitialState is not povided in paams it defaults to GPIO.HIGH. If
"Toggle" is defined on any message will result in the pin being set to
HIGH for half a second and then back to LOW.
Parameters:
- "Pin": The GPIO pin in BCM numbering
- "InitialState": The pin state to set when coming online, defaults
to "OFF".
- "Toggle": Optional parameter that when set to "True" causes any
message received to result in setting the pin to HIGH, sleep for
half a second, then back to LOW.
"""
super().__init__(connections, params)
self.pin = int(params("Pin"))
GPIO.setup(self.pin, GPIO.OUT)
out = GPIO.LOW
try:
out = GPIO.HIGH if params("InitialState") == "ON" else GPIO.LOW
except NoOptionError:
pass
GPIO.output(self.pin, out)
self.toggle = bool(params("Toggle"))
self.log.info("Configued RpoGpuiActuator: pin %d on destination %s with "
"toggle %s", self.pin, self.cmd_src, self.toggle)
def on_message(self, msg):
"""Called when the actuator receives a message. If Toggle is not enabled
sets the pin to HIGH if the message is ON and LOW if the message is OFF.
"""
self.log.info("Received command on %s: %s Toggle = %s Pin = %d",
self.cmd_src, msg, self.toggle, self.pin)
# Toggle on then off.
if self.toggle:
self.log.info("Toggling pin %s HIGH to LOW", self.pin)
GPIO.output(self.pin, GPIO.LOW)
sleep(.5)
self.log.info("Toggling pin %s LOW to HIGH", self.pin)
GPIO.output(self.pin, GPIO.HIGH)
# Turn ON/OFF based on the message.
else:
out = None
if msg == "ON":
out = GPIO.HIGH
elif msg == "OFF":
out = GPIO.LOW
if out == None:
self.log.error("Bad command %s", msg)
else:
self.log.info("Setting pin %d to %s", self.pin,
"HIGH" if out == GPIO.HIGH else "LOW")
GPIO.output(self.pin, out)
|
with. Lets hope you will want it.
with graphics gallery make sure you abide by this specific hyperlink.
We hope you can find what you need here. We always effort to show a picture with HD resolution or at least with perfect images. Mlm Business Plan Template Agriculture Business Plan Template Free – Farm can be beneficial inspiration for those who seek an image according specific categories; you can find it in this site. Finally all pictures we have been displayed in this site will inspire you all..
|
"""
Overview
========
Show errors in html files by running Tidy.
Extern dependencies:
Html Tidy
Key-Commands
============
Namespace: tidy
Mode: HTML
Event: <Key-h>
Description: Run Tidy on the current html file and display
a dialog window with all encountered errors. When the dialog
window is shown with the errors it is possible to jump to the
error line by pressing <Return>.
Commands
========
Command: html_errors()
Description: Same as the keycommand <Key-h>.
"""
from subprocess import Popen, STDOUT, PIPE
from vyapp.widgets import LinePicker
from vyapp.areavi import AreaVi
from vyapp.plugins import ENV
from vyapp.app import root
from re import findall
import sys
class HtmlChecker(object):
PATH = 'tidy'
def __init__(self, area):
self.area = area
def check(self):
child = Popen([self.PATH, '--show-body-only', '1', '-e', '-quiet',
self.area.filename], stdout=PIPE, stderr=STDOUT,
encoding=self.area.charset)
output = child.communicate()[0]
regex = 'line ([0-9]+) column ([0-9]+) - (.+)'
ranges = findall(regex, output)
ranges = map(lambda ind: (self.area.filename, ind[0], ind[2]), ranges)
sys.stdout.write('Errors:\n%s\n' % output)
self.area.chmode('NORMAL')
if child.returncode:
self.display(ranges)
else:
root.status.set_msg('No errors!')
def display(self, ranges):
root.status.set_msg('Errors were found!' )
options = LinePicker()
options(ranges)
def install(area):
html_checker = HtmlChecker(area)
picker = lambda event: html_checker.check()
area.install('tidy', ('HTML', '<Key-h>', picker))
def html_errors():
html_checker = HtmlChecker(AreaVi.ACTIVE)
html_checker.check()
ENV['html_errors'] = html_errors
|
Posted on 4th April 2018 at 7:10 am.
Cape Town is a city close to my heart and after spending time there year after year, there are certain things to do that we’ve either discovered and love, or are old favourites that you just can’t miss if you ever get the chance to travel to the beautiful cape.
Stunning panoramic views over Cape Town.
Although taking the cable car up table mountain is an experience in itself, the short hour walk to the top of Lion’s head is filled with 360 degree views over the whole city, along with exciting step ladders and ropes to help with the climb and once at the top, an absolutely stunning view over the whole city, including the beautiful table mountain, standing strong. If you are visiting over a ‘full moon’ then climbing Lions Head on this night is an absolutely must. Hundreds if not thousands of people scramble their way to the top to catch a glimpse of Cape Town under the light of a the full moon. An adventure you have to experience.
Or better still walk up Lions head and paraglide down!
This world-renowned botanical garden is nestled at the eastern foot of Table Mountain. It lives up to it’s reputation as the most beautiful garden in Africa and not many others around the world can match the sheer grandeur of the national reserve against the slopes of the mountain.
Take a picnic and relax to Kirstenbosch Summer Sunset concert surrounded by the stunning scenery of the Cape Town Mountains.
This really is a place for all the family, with wooden board-walks built up across the penguins home, you can watch them living their lives, building nests, integrating with mates, you can even see the eggs hatch if you’re there in the right season. Then at Boulder’s Beach, where you can sunbath, rock climb and swim in the sea, the penguins are free to roam around so you might even get the luck chance to swim with the penguins.
Food, shops, restaurants and market.
Located in Woodstock in buzzy downtown Cape Town, The Old Biscuit Mill really is the place to be on any evening of the week as it is filled with both local and Michelin star restaurants, ready to tickle your fancy. Along with this, is the Saturday morning buzz of all the local produce and restaurants arriving to set up their stands and fill the area with mini cafes, hot and cold food stalls, along with biltong making, chocolate tasting, soap trying, vegetable markets, you name it, it’s there on a Saturday morning.
For the Nightlife – clubs and bars.
Long Street is the backpacker central of Cape Town, it is buzzy and loud and full of exciting atmosphere. With famous hostels all along the street and crazy pubs and bars on every corner, it is the young soul of Cape Town and the place to be when travelling.
Windy walks and kite flying.
flying. We have huge 3 meter trick kites that lift you off the ground and fill up a whole day with fun-filled activity.
Camps Bay, along with being an the ideal beach location, with the sun staying shining on the sand until late, is also a great evening destination. Camps Bay high street runs along side the beach with bars and restaurants looking over the vast ocean, absolutely perfect for watching the sunset in the evening with a glass of wine. All the restaurants are always full so the atmosphere is always on point and the seafood couldn’t be fresher.
So many places. So many choices.
Over the many years of visiting Cape Town we have been able to experience a wide range of restaurants.
We would like to share below our all time favourites, Cape Town wouldn’t be the same without them!
The Pot Luck Club, situated on the top floor of the Silo of the Old Biscuit Mill on Albert Road, Woodstock opened February 14th 2013. It has been dubbed the coolest place to be in Cape Town and its success is based on innovative cuisine, served in a relaxed yet edgy environment in a part of Cape Town that has never attracted an evening dining crowd.
Luke collaborated with renowned Cape Town based architect Greg Scot along with some of the city’s foremost art and design personalities.
Luke Dale-Roberts is a name synonymous with the finest, most creative and innovative food in South Africa. Since coming to Cape Town, Luke has consistently proven his mettle with a string of awards and accolades.
A luxury Winelands experience at Delaire Graff Estate, a world-leading destination for wine, art and hospitality in an unrivalled setting. Designed with the discerning global traveller in mind, savour breathtaking, uninterrupted views of the Stellenbosch Valley.
Nestled between majestic mountains and overlooking the vineyards of Stellenbosch, the incredible views provide the perfect backdrop to our outstanding restaurants, state-of-the-art winery, exclusive Lodges, exuberant landscaping, destination Spa and luxury boutiques.
Scot Kirton, executive chef of La Colombe in Constantia, Cape Town, has earned the title S.Pellegrino Chef of the Year.
There’s a playfulness to the menu, especially in terms of presentation. Take their signature dish, the tuna tataki served in a can, which also made an appearance on the menu at the Sunday lunch awards ceremony. It’s clear that a lot of thought, hard work, and more than a touch of fun goes into designing the menu.
Greenhouse is the flagship restaurant of The Collection by Liz McGrath, and a celebration of local flavours fashioned into an exquisite experience by the genius of the culinary team working here.
Greenhouse is a place where local ingredients and great African fables come together in a tasting menu that takes guests on a unique culinary experience. It’s why we were ranked number four in the 2015 Eat Out Mercedes-Benz Restaurant Awards’ list of South Africa’s top 10 restaurants, along with the Bsochendal Style Award.
Menus change with every season, and guests are guaranteed fresh, seasonal, local ingredients wherever possible, and often from the Cellars garden. Let the expert sommelier assist you with pairing your meal with the perfect wine for the ultimate dining experience.
To get a sense of the real culture.
In order to get a true representation of the history of such a country, you have to explore back to its roots and for South Africa, this Island located in Table Bay, is hugely significant both to the nation and it’s people. Robben Island is where Nelson Mandela spent 18 of his 27 years in prison, and visiting this tiny cell and the rest of the prison island will allow you a true insight into South Africa’s struggles.
Its on everyone’s list of ‘things to do in Cape Town’ but it really is an opportunity that you can not miss out on. Take the 360 degree cable car up the side of the mountain to reach the table top. Leisurely walk around and explore the wildlife and take in the views of the surrounding Cape. The best time to go is just before sunset. Take a picnic and watch the sun go from the top of the mountain and Cape Town transform from day to night.
|
# encoding: utf-8
"""
Admin site configurations for verify_student.
"""
from config_models.admin import ConfigurationModelAdmin
from ratelimitbackend import admin
from lms.djangoapps.verify_student.models import (
IcrvStatusEmailsConfiguration,
SkippedReverification,
SoftwareSecurePhotoVerification,
VerificationStatus,
)
class SoftwareSecurePhotoVerificationAdmin(admin.ModelAdmin):
"""
Admin for the SoftwareSecurePhotoVerification table.
"""
list_display = ('id', 'user', 'status', 'receipt_id', 'submitted_at', 'updated_at')
raw_id_fields = ('user', 'reviewing_user')
search_fields = (
'receipt_id', 'user__username'
)
class VerificationStatusAdmin(admin.ModelAdmin):
"""
Admin for the VerificationStatus table.
"""
list_display = ('timestamp', 'user', 'status', 'checkpoint')
readonly_fields = ()
search_fields = ('checkpoint__checkpoint_location', 'user__username')
raw_id_fields = ('user',)
def get_readonly_fields(self, request, obj=None):
"""When editing an existing record, all fields should be read-only.
VerificationStatus records should be immutable; to change the user's
status, create a new record with the updated status and a more
recent timestamp.
"""
if obj:
return self.readonly_fields + ('status', 'checkpoint', 'user', 'response', 'error')
return self.readonly_fields
class SkippedReverificationAdmin(admin.ModelAdmin):
"""Admin for the SkippedReverification table. """
list_display = ('created_at', 'user', 'course_id', 'checkpoint')
raw_id_fields = ('user',)
readonly_fields = ('user', 'course_id')
search_fields = ('user__username', 'course_id', 'checkpoint__checkpoint_location')
def has_add_permission(self, request):
"""Skipped verifications can't be created in Django admin. """
return False
admin.site.register(SoftwareSecurePhotoVerification, SoftwareSecurePhotoVerificationAdmin)
admin.site.register(SkippedReverification, SkippedReverificationAdmin)
admin.site.register(VerificationStatus, VerificationStatusAdmin)
admin.site.register(IcrvStatusEmailsConfiguration, ConfigurationModelAdmin)
|
Passing gas in a store aisle and walking away. Then watching people linger through it.
My husbands favorite pass time is "aisle dusting" strangers in the local wal mart.
Get a aisle dusting mug for your cat Bob.
|
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# IMPORTANT NOTE: If you make local mods to this file, you must run:
# % pnacl/build.sh driver
# in order for them to take effect in the scons build. This command
# updates the copy in the toolchain/ tree.
#
from driver_env import env
import driver_tools
import filetype
import pathtools
EXTRA_ENV = {
'ARGS' : '',
}
PATTERNS = [
('(.*)', "env.append('ARGS', $0)"),
]
def main(argv):
env.update(EXTRA_ENV)
driver_tools.ParseArgs(argv, PATTERNS)
args = env.get('ARGS')
input = pathtools.normalize(args[-1])
if filetype.IsPNaClBitcode(input):
env.append('ARGS', '--bitcode-format=pnacl')
driver_tools.Run('"${PNACL_ABICHECK}" ${ARGS}')
return 0;
# Don't just call the binary with -help because most of those options are
# completely useless for this tool.
def get_help(unused_argv):
return """
USAGE: pnacl-abicheck <input bitcode>
If <input bitcode> is -, then standard input will be read.
"""
|
Treatments are specifically formulated to go beyond cleansing, toning and moisturizing. They address particular areas of concern such as puffy eyes, blemishes, rosacea, chronic redness, and inflamed capillaries. From firming the skin to fine lines at the eye, throat and neck areas they are a must for smooth, radiant skin.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Convert GWpy example python files into rst files for sphinx documentation
"""
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org'
import sys
import os
import argparse
import re
METADATA = {
'author': 'sectionauthor',
'currentmodule': 'currentmodule',
}
# -----------------------------------------------------------------------------
# parse command line
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('infile', metavar='example.py',
help='python file to convert',)
parser.add_argument('outfile', metavar='example.rst', nargs='?',
help='rst file to write, default: print to screen')
args = parser.parse_args()
# -----------------------------------------------------------------------------
# parse python file
ref = '-'.join(os.path.splitext(args.infile)[0].split(os.path.sep)[-2:])
lines = open(args.infile, 'rb').read().splitlines()
output = []
header = ['.. _example-%s:\n' % ref]
indoc = False
incode = False
reset = True
for i,line in enumerate(lines):
# skip file header
if len(output) == 0 and line.startswith('#'):
continue
# hide lines
if line.endswith('# hide'):
continue
# find block docs
if line.startswith('"""'):
indoc = not indoc
line = line.strip('"')
# skip empty lines not in a block quote
if not line and not indoc:
if output:
output.append('')
continue
# find code
if incode and line.startswith(('"', '#', '__')):
incode = False
output.append('')
# comments
if line.startswith('#'):
output.append(line[2:])
# metadata
elif line.startswith('__'):
key, value = map(lambda x: x.strip(' _="\'').rstrip(' _="\''),
line.split('=', 1))
if key in METADATA:
header.append('.. %s:: %s\n' % (METADATA[key], value))
# block quote
elif indoc:
output.append(line.strip('"').rstrip('"'))
# code
else:
if not incode:
output.extend(('', '.. plot::', ' :include-source:'))
if reset:
output.append(' :context: reset')
reset = False
else:
output.append(' :context:')
output.append('')
output.append(' %s' % line)
incode = True
# end block quote
if line.endswith('"""') and indoc:
output.append('')
indoc = False
if len(output) == 1:
output.append('#'*len(output[0]))
output = header + output
if args.outfile:
with open(args.outfile, 'w') as f:
f.write('\n'.join(output))
else:
print('\n'.join(output))
|
Quick Links: Aluminum oil filter housing part #? Adding backup sensors Touch screen protector... Sun Shade Power Outlet Installation Plastic bedliner part missing?
equin, BTBAKER, NewImprovedRon and 1 other person like this.
equin and NewImprovedRon like this.
Line-X and bet mat in my Crewmax. Bedmat keeps stuff from sliding.
This seems like one of the best set-ups going. I might have to trash the plastic liner and step up to the bedrug plate.
The BedRug is soo much easier on the knees than the plastic liner I had.
jtwags and NewImprovedRon like this.
|
import os
from apidoc.object.config import Config as ConfigObject
from apidoc.service.parser import Parser
from apidoc.service.merger import Merger
from apidoc.service.validator import Validator
from apidoc.lib.util.decorator import add_property
@add_property("validator", Validator)
@add_property("parser", Parser)
@add_property("merger", Merger)
class Config():
"""Create config objects
"""
def load_from_file(self, config_file):
"""Load a config object from a file
"""
merger = self.merger
parser = self.parser
datas = parser.load_from_file(config_file)
self.validator.validate_config(datas)
if datas is None or datas == {}:
config = ConfigObject()
else:
config = merger.merge_configs(ConfigObject(), [datas])
self.fix_all_path(config, os.path.dirname(config_file))
return config
def fix_all_path(self, config, root_path):
"""Fix config's content's relative path by injecting config location
"""
if config["input"]["locations"] is not None:
config["input"]["locations"] = [self.fix_path(x, root_path) for x in config["input"]["locations"]]
if config["output"]["location"] not in ("stdout", ):
config["output"]["location"] = self.fix_path(config["output"]["location"], root_path)
if config["output"]["template"] not in ("default", ):
config["output"]["template"] = self.fix_path(config["output"]["template"], root_path)
def fix_path(self, path, root_path):
"""Fix a relative path
"""
if path is not None:
if not os.path.exists(path):
path = os.path.realpath(os.path.join(root_path, path))
return path
|
The threshold question in a tort action is whether the alleged tortfeasor owed a duty of care to the injured party. Addressing the proposition that maintaining the bus shelters and bus stops is still a City responsibility, the court notes that, prior to the enactment of NYCAC § 7-210, it was the City that usually had the responsibility for maintaining the bus shelters.
We are constrained to agree with the courts below that section 7-210 does not impose civil liability on property owners for injury that occur in City-owned tree wells. In reaching this result, we are guided by the principle that legislative enactments in derogation of common law, and especially those creating liability where none previously existed, must be strictly construed.
M Realty and A Realty have raised the question of whether or not the enactment of NYCAC § 7-210 has also imposed the duty on them to keep bus shelters or bus stops and the immediate area around them clear of ice and snow, or does this duty still remain with the City.
NYCAC § 7-210 does not specifically state that abutting landowners are responsible for the maintenance of bus stops and bus shelters on sidewalks. As a matter of law, based on the determination of the Court of Appeals in Vucetovic (case law), this court finds that NYCAC § 7-210 does not impose the duty upon abutting landowners the responsibility for maintenance of snow and ice at a bus stop or bus shelter. However, the record does indicate that some sort of maintenance of snow and ice occurred at and near the bus stop where plaintiff was injured. A triable question of fact has been raised as to what entity cleared the three foot path at the bus stop, that being the City or one of the abutting landowners. Though an abutting landowner may not have the duty to remove snow and ice, where the attempted removal makes the condition more hazardous, liability could be imposed.
MABSTOA is a subsidiary of co-defendant NYCTA and is not a City agency or an entity synonymous with the City. Counsel for MABSTOA and NYCTA cites to numerous cases for the proposition that, while it is their duty to deliver a bus passenger safely to his/her destination, it is not their duty to maintain the bus stop or the area surrounding the bus stop. Their duty ceases once the passenger steps off the bus. None of the other co-defendants oppose MABSTOA’s and NYCTA’s motion.
The complaint states that plaintiff’s injury was caused by the negligent maintenance of a “sidewalk/curb.” To establish a prima facie case of negligence, the plaintiff must establish that a defendant owed a duty of reasonable care, that there was a breach of that duty and that she suffered an injury which proximately resulted from the breach of a duty. At this point there is a duty to maintain the bus stop sidewalk area but the question exists whether that duty was the City’s, M Realty’s, or A Realty’s.
|
"""hug/validate.py
Defines hugs built-in validation methods
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
def all(*validators):
"""Validation only succeeds if all passed in validators return no errors"""
def validate_all(fields):
for validator in validators:
errors = validator(fields)
if errors:
return errors
validate_all.__doc__ = " and ".join(validator.__doc__ for validator in validators)
return validate_all
def any(*validators):
"""If any of the specified validators pass the validation succeeds"""
def validate_any(fields):
errors = {}
for validator in validators:
validation_errors = validator(fields)
if not validation_errors:
return
errors.update(validation_errors)
return errors
validate_any.__doc__ = " or ".join(validator.__doc__ for validator in validators)
return validate_any
def contains_one_of(*fields):
"""Enables ensuring that one of multiple optional fields is set"""
message = "Must contain any one of the following fields: {0}".format(", ".join(fields))
def check_contains(endpoint_fields):
for field in fields:
if field in endpoint_fields:
return
errors = {}
for field in fields:
errors[field] = "one of these must have a value"
return errors
check_contains.__doc__ = message
return check_contains
|
Asia's geography, climate, logistics and infrastructure present its own unique challenges and, with this in mind, the Raja Laut was designed with the specific requirements for yacht charter in South East Asia.
Raja Laut means 'King of the Sea' and stepping on board this 'schooner style' inspired yacht, built from tropical hardwood, you will experience a truly regal sense of sailing the Indonesian seas; freedom, style and comfort. Itineraries include the areas of Komodo, Raja Ampat and Banda.
She comfortably accommodates 12 guests in 6 cabins (3 double bed cabins, 3 twin bed cabins), each with ensuite, mini safe, individually controlled air-conditioning and fan.
Tanks: 12 ltr aluminium tanks with INT & DIN adapters. 15 ltr aluminium tanks available at extra charge.
Camera facilities: Power sockets for battery chargers and flash in cabins only. Camera rinsing tank and camera preparation area on upper deck.
|
from django.db.models.signals import pre_save, pre_delete, post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
from django.contrib.auth.models import User, Group
import datetime
from usermanage.models import Customer
from customermanage.models import Coupon
from storemanage.models import Ticket
from social_django.models import UserSocialAuth
from django.db import models
@receiver(post_save, sender = Customer)
def test_handler(sender, **kwargs):
print(sender)
print("user created")
@receiver(post_save, sender = Coupon)
def coupon_handler(sender, instance, **kwargs):
ticket = instance.ticket
if instance.active:
if 'purchase_all' not in ticket.stat:
ticket.stat['purchase_all'] = 0
ticket.stat['purchase_all'] += 1
if 'purchase_by_date' not in ticket.stat:
ticket.stat['purchase_by_date'] = dict()
today = datetime.date.today().strftime("%Y-%m-%d")
if today not in ticket.stat['purchase_by_date']:
ticket.stat['purchase_by_date'][today] = 0
ticket.stat['purchase_by_date'][today] += 1
else:
if 'use_all' not in ticket.stat:
ticket.stat['use_all'] = 0
ticket.stat['use_all'] += 1
if 'use_by_date' not in ticket.stat:
ticket.stat['use_by_date'] = dict()
today = datetime.date.today().strftime("%Y-%m-%d")
if today not in ticket.stat['use_by_date']:
ticket.stat['use_by_date'][today] = 0
ticket.stat['use_by_date'][today] += 1
ticket.save()
@receiver(post_save, sender = UserSocialAuth)
def test_social(sender, instance, **kwargs):
# print("HELLO")
# print(instance)
# print(sender)
user = instance.user
# data = {'username':user.username,'email':user.email,'first_name':user.first_name}
print(user.first_name)
print(user.last_name)
groups = list(user.groups.values_list('name', flat=True))
print('HELLO')
print(groups)
check = 'customer' not in groups
print(check)
if 'customer' not in groups:
g = Group.objects.get(name='customer')
g.user_set.add(user)
user.save()
g.save()
customerprofile = Customer(user = user, first_name = user.first_name, last_name = user.last_name)
customerprofile.save()
# if 'customer' in groups:
# g = Group.objects.get(name='customer')
# g.user_set.add(user)
# user.save()
# g.save()
# customerprofile = Customer(user = user, )
# customerprofile.save()
# data = {'username':user.username,'email':user.email}
# if user.groups.filter(name='customer').exists():
# customer = models.Customer.objects.get(user=user)
# data['first_name']=customer.first_name
# data['last_name']=customer.last_name
# return render(request,'usermanage/profile.html', context={'d':data})
# user = request.user
# data = {'username':user.username,'email':user.email}
# if user.groups.filter(name='store').exists():
# store = models.Store.objects.get(user=user)
# data['store_name']=store.store_name
# elif user.groups.filter(name='customer').exists():
# customer = models.Customer.objects.get(user=user)
# data['first_name']=customer.first_name
# data['last_name']=customer.last_name
# return render(request,'usermanage/profile.html', context={'d':data})
# customerprofile = models.Customer(user = user, first_name = data['first_name'], last_name = data['last_name'])
# customerprofile.save()
# return redirect('index:index')
|
Break out the salami and provolone to make this hearty, deli-style Salami Italiano sandwich for one.
Spread cut surfaces of sandwich roll with mayo; sprinkle with Italian seasoning.
Fill roll with salami, cheese, onion and bell pepper.
To make a Super Salami Italiano Sub, add OSCAR MAYER Sliced Ham or Turkey, lettuce, tomato and a sprinkling of KRAFT Grated Parmesan Cheese to the sandwich.
Serve with fresh crisp CLAUSSEN Pickles, baby carrots and celery sticks.
Substitute KRAFT Light Mayo Reduced Fat Mayonnaise, KRAFT Mayo Fat Free Mayonnaise Dressing, MIRACLE WHIP Salad Dressing, MIRACLE WHIP Light Dressing or MIRACLE WHIP FREE Dressing for the mayo.
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.common.types import ad_type_infos
from google.ads.googleads.v6.common.types import custom_parameter
from google.ads.googleads.v6.common.types import final_app_url
from google.ads.googleads.v6.common.types import url_collection
from google.ads.googleads.v6.enums.types import ad_type
from google.ads.googleads.v6.enums.types import device
from google.ads.googleads.v6.enums.types import system_managed_entity_source
from google.ads.googleads.v6.resources.types import ad
from google.ads.googleads.v6.services.types import ad_service
from .transports.base import AdServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdServiceGrpcTransport
class AdServiceClientMeta(type):
"""Metaclass for the AdService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[AdServiceTransport]]
_transport_registry['grpc'] = AdServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[AdServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdServiceClient(metaclass=AdServiceClientMeta):
"""Service to manage ads."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_path(customer_id: str,ad_id: str,) -> str:
"""Return a fully-qualified ad string."""
return "customers/{customer_id}/ads/{ad_id}".format(customer_id=customer_id, ad_id=ad_id, )
@staticmethod
def parse_ad_path(path: str) -> Dict[str,str]:
"""Parse a ad path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/ads/(?P<ad_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdServiceTransport):
# transport is a AdServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad(self,
request: ad_service.GetAdRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad.Ad:
r"""Returns the requested ad in full detail.
Args:
request (:class:`google.ads.googleads.v6.services.types.GetAdRequest`):
The request object. Request message for
[AdService.GetAd][google.ads.googleads.v6.services.AdService.GetAd].
resource_name (:class:`str`):
Required. The resource name of the ad
to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.Ad:
An ad.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_service.GetAdRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_service.GetAdRequest):
request = ad_service.GetAdRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ad]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def mutate_ads(self,
request: ad_service.MutateAdsRequest = None,
*,
customer_id: str = None,
operations: Sequence[ad_service.AdOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_service.MutateAdsResponse:
r"""Updates ads. Operation statuses are returned.
Updating ads is not supported for TextAd,
ExpandedDynamicSearchAd, GmailAd and ImageAd.
Args:
request (:class:`google.ads.googleads.v6.services.types.MutateAdsRequest`):
The request object. Request message for
[AdService.MutateAds][google.ads.googleads.v6.services.AdService.MutateAds].
customer_id (:class:`str`):
Required. The ID of the customer
whose ads are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v6.services.types.AdOperation]`):
Required. The list of operations to
perform on individual ads.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.services.types.MutateAdsResponse:
Response message for an ad mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_service.MutateAdsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_service.MutateAdsRequest):
request = ad_service.MutateAdsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_ads]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'AdServiceClient',
)
|
The crew at Boulder Motor Sports took one of their 'customer owned' Pierobon X60R bikes down to Barber and then on to Daytona over the last couple of weekends and piled up a heap of wins in all classes in the hands of Shane Turpin.
This very special bike being the only only 2 Valve Pierobon Ducati in the world fitted with full Magneti Marelli SRT electronics, Drive by wire and clutch-less 'auto blip' shifting.
Electronics designed, built and programmed by us here at Bike Sport Developments.
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 3 10:26:23 2017
@author: chosenone
CNN for DUC Doc Summmary
Structure:
A embedding lay,followed by a convolutional lay and a max-pooling lay,at last
regression(maybe) layer.
"""
import tensorflow as tf
import numpy as np
class CNN4DUCSummary(object):
def __init__(self,sequence_length,num_classes,vocab_size,embedding_size
,filter_sizes,feature_size,num_filters,word_embedding=None,
fine_tune=False,l2_reg_lambda=0.0):
# placeholders for input,output and dropout
if fine_tune and word_embedding == None:
raise("Value Error:there must be a copy of initial value of word embedding")
with tf.name_scope(name="input"):
self._input_x = tf.placeholder(tf.int32,[None,sequence_length],name='x')
self._input_y = tf.placeholder(tf.float32,[None,num_classes],name = 'y')
self._keep_prob = tf.placeholder(tf.float32,name = 'keep_prop')
self._features = tf.placeholder(tf.float32,[None,feature_size],name="feature")
# l2 regularization if needed
l2_loss = tf.constant(0.0,tf.float32)
# embedding layer
with tf.device('/cpu:0'),tf.name_scope('embedding'):
# fine-tuning or not
if not fine_tune:
self._embeddings = tf.Variable(tf.random_uniform([vocab_size,embedding_size],-1.0,1.0),
name="embedding",trainable=True)
else:
self._embeddings = tf.Variable(word_embedding,name="embedding-fine-tuned",trainable=True)
self._embedded_words = tf.nn.embedding_lookup(self._embeddings,self._input_x)
self._embedded_words_expanded = tf.expand_dims(self._embedded_words,-1)
# creat a convolution layer and pool layer for each filter size
pooled_output = []
for i,filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# convolution layer
filter_shape = [filter_size,embedding_size,1,num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape,stddev=0.1),name="W")
b = tf.Variable(tf.constant(0.1,shape=[num_filters]))
# convolution operation
conv = tf.nn.conv2d(self._embedded_words_expanded,
W,
strides=[1,1,1,1],
padding="VALID",
name="conv")
# shape of convolution output is [batch_size,sequence_length - filter_size + 1,1,num_filters]
# apply activation function
h = tf.nn.tanh(tf.nn.bias_add(conv,b),name="tanh")
# max-pooling layers
pooled = tf.nn.max_pool(h,
ksize=[1,sequence_length - filter_size + 1,1,1],
strides=[1,1,1,1],
padding="VALID",
name="pool")
# shape of pooled [batch_size,1,1,num_filters]
pooled_output.append(pooled)
# combine all the pooled output
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_output,axis=3)
filters = tf.reshape(self.h_pool,[-1,len(filter_sizes),num_filters])
filters_max = tf.reshape(tf.reduce_max(filters,axis=1),[-1,num_filters],name="latent_feature")
# features
self.h_pool_flatened = tf.concat([filters_max,self._features],axis=1,name="sentences")
# final scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(name="W",shape=[num_filters + feature_size,num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1,tf.float32,shape=[num_classes],name="b"))
l2_loss += tf.nn.l2_loss(W)
# l2_loss += tf.nn.l2_loss(b) # really?
self._scores = tf.nn.xw_plus_b(self.h_pool_flatened,W,b,name="scores")
# calculate cost-function
with tf.name_scope("loss"):
losses = 1.0 / 2 * tf.reduce_mean(tf.pow((self._scores - self._input_y),2))
#==============================================================================
# losses = 1.0 / 2 * tf.reduce_mean(-(self._input_y * tf.log(tf.clip_by_value(self._scores,1e-10,1.0))
# + (1 - self._input_y) * tf.log(tf.clip_by_value(1 - self._scores,1e-10,1.0))))
#==============================================================================
self._loss = losses + l2_reg_lambda * l2_loss
|
HTP FC JohnDrees is taking out an RvB Ganked Fleet this Saturday, 19:00 EVE Time, August 1st, 2015. Here is a video to familiarize you with the fleet doctrine. Fits are below.
|
"""
General utilities for django.
"""
###############################################################################
## Imports
###############################################################################
from django.conf import settings
from django.contrib.auth.models import Group, SiteProfileNotAvailable
from django.core.cache import cache
from django.db.models import get_model
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver, Signal
from django.shortcuts import _get_queryset
###############################################################################
## Utils
###############################################################################
def get_or_none(klass, *args, **kwargs):
"""
Uses get() to return an object or None if the object does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), a MultipleObjectsReturned will be raised if more
than one object is found.
From django-annoying: https://bitbucket.org/offline/django-annoying
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
"""
if (not hasattr(settings, 'AUTH_PROFILE_MODULE') or
not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
profile_mod = get_model(*settings.AUTH_PROFILE_MODULE.split('.'))
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod
def get_group(name):
"""
Return group with given name, if it exists. Check cache first.
"""
group = cache.get('bits.general.group_%s' % name)
if not group:
group = Group.objects.get(name=name)
cache.set('bits.general.group_%s' % name, group, 365 * 24 * 60 * 60)
return group
@receiver(post_delete, sender=Group,
dispatch_uid='bits.general.group_post_delete')
@receiver(post_save, sender=Group,
dispatch_uid='bits.general.group_post_save')
def group_post_save_delete(sender, instance, created, **kwargs):
cache.delete('bits.general.group_%s' % instance.name)
###############################################################################
## Signals
###############################################################################
after_syncdb = Signal()
"""To send after post_syncdb is done. Currently via custom command"""
|
Why do people love Contract Bridge? Because it’s so logical, elegant and strategic. And why do people love Poker? Because its surprising, risky and unpredictable. Now imagine there’s a card game that has features and benefits of Bridge and Poker. This game is called Oh Hell and you just have to give it a try.
When you hear “chess”, you think about the intellectual duel of two people. Therefore you may be surprised by the fact, that one of the Chess ancestors is a game for 4 players and with an element of luck. The goal of the game was no to checkmate but to score points, however, this old game is still “chessy” . The name of this game is Chaturaji.
Matador is a domino game, typical and unique at the same time. Typical because players use tiles to build a line of play and the goal of the game is to get rid of all bones from hand. But matador is unique thanks to the unusual rule of combining tiles and thanks to the existence of special tiles that can be added to the line of play at any time.
I love trick-taking card games in which you have to win certain cards to earn points. They are popular in my part of Europe – Thousand in Poland and Ukraine, and 66 (or Schnapsen) in Germany and Austria. Both games have a common ancestor, an old game called Mariage (in Polish: Mariasz).
It is believed that popular game of Ludo is very old, but is it? It was patented in 1986, but it was a modern variant of really old game called Pachisi. And I must tell you this older game is more strategic and has a unique, ancient nature.
|
import numpy as np
import matplotlib.pyplot as plt
years2 = np.array([1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014])
usaGNI = np.array([612178550047.646,646233886826.65,692328219512.945,753294530375.941,824183577234.192,868295290971.962,952033980993.251,1027990251284.03,1098553055567.61,1183038457083.86,1320921418184.74,1548458249174.67,1711839855738.22,1842214711486.27,1958767403397.59,2117456144199.84,2401109359261.26,2751769589536.9,3048093901726.34,3303883972259.98,3297652203866.24,3411202239818.87,3828479505092.12,4164905103485.73,4601500378186.56,5200354088055.45,5765196251790.1,5888830786924.1,6029529322891.06,6164277951121.71,6612706041742.15,6883086506452.91,7302781827892.38,7760854970064.45,8184808773787.28,8558708987900.82,8869581532268.98,9425292191447.05,10178500697503.7,10498594829042.2,10776200783181,11589035965657.3,12790914724399.8,13693955258225.3,14345564947204.5,14651211130474,15002428215985,14740580035992.9,15143137264678.1,15727290871234.6,16501015978642.4,17001290051112.6,17611490812741.3])
# GNI data atlas method from the worldbank http://databank.worldbank.org/data/reports.aspx?source=2&country=USA&series=&period=#
# CPI data from bureau of labor statistics http://data.bls.gov/pdq/SurveyOutputServlet
usaCPI2 = np.array([30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, 232.957, 236.736])
plt.figure()
plt.plot(years2, usaGNI)
plt.xlabel('Year')
plt.ylabel('GNI in Current USD')
plt.grid(True)
plt.show()
# Adjust GNI for 1962 USD
usaGNI1962 = usaGNI / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNI1962)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# Adjust GNI for 2014 USD
usaGNI2014 = usaGNI / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNI2014)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# population from world bank
usaPop = np.array([186538000,189242000,191889000,194303000,196560000,198712000,200706000,202677000,205052000,207661000,209896000,211909000,213854000,215973000,218035000,220239000,222585000,225055000,227225000,229466000,231664000,233792000,235825000,237924000,240133000,242289000,244499000,246819000,249623000,252981000,256514000,259919000,263126000,266278000,269394000,272657000,275854000,279040000,282162411,284968955,287625193,290107933,292805298,295516599,298379912,301231207,304093966,306771529,309347057,311721632,314112078,316497531,318857056])
usaGNIpercapita = usaGNI / usaPop
plt.figure()
plt.plot(years2, usaGNIpercapita)
plt.xlabel('Year')
plt.ylabel('GNI per capita in Current USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 1962s numbers
usaGNIpercapita1962 = usaGNIpercapita / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNIpercapita1962)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 2014s numbers
usaGNIpercapita2014 = usaGNIpercapita / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNIpercapita2014)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
|
The Jewish Film Institute plays a vital role in the Jewish cultural landscape. But it can only do so with help from forward-thinking, committed supporters - people who want to help our programs reflect the diversity and vitality of Jewish life, for generations to come.
To help ensure the continuity of our role in Jewish cultural life by encouraging our most loyal supporters and friends to include the organization in their intentions for planned gifts.
Loyal supporters and friends who make a planned gift to SFJFF have the satisfaction of knowing that what they care about endures, and have the confidence that their legacy gift will go far.
A legacy gift can solve a funding shortfall, assure financial stability, upgrade systems and significantly enhance programming. Audiences are the beneficiaries of the support of the Future Focus Fund donors. Your support of the Future Focus Fund will ensure our impact for decades to come.
|
from ..broker import Broker
class DeviceEnvironmentMonitorBroker(Broker):
controller = "device_environment_monitors"
def index(self, **kwargs):
"""Lists the available device environment monitors. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which device environment information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which device environment information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device environment monitors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device environment monitor methods. The listed methods will be called on each device environment monitor returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DevEnvMonID
:param sort: The data field(s) to use for sorting the output. Default is DevEnvMonID. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceEnvironmentMonitor. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_environment_monitors: An array of the DeviceEnvironmentMonitor objects that match the specified input criteria.
:rtype device_environment_monitors: Array of DeviceEnvironmentMonitor
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified device environment monitor.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device environment monitor methods. The listed methods will be called on each device environment monitor returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_environment_monitor: The device environment monitor identified by the specified DevEnvMonID.
:rtype device_environment_monitor: DeviceEnvironmentMonitor
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available device environment monitors matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonChangedCols: The fields that changed between this revision of the record and the previous revision.
:type DevEnvMonChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonChangedCols: The fields that changed between this revision of the record and the previous revision.
:type DevEnvMonChangedCols: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonDescr: The NetMRI-determined description of the device environment monitor.
:type DevEnvMonDescr: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonDescr: The NetMRI-determined description of the device environment monitor.
:type DevEnvMonDescr: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonEndTime: The ending effective time of this record, or empty if still in effect.
:type DevEnvMonEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonEndTime: The ending effective time of this record, or empty if still in effect.
:type DevEnvMonEndTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonHighShutdown: The high value of the shut down process in the device environment monitor.
:type DevEnvMonHighShutdown: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonHighShutdown: The high value of the shut down process in the device environment monitor.
:type DevEnvMonHighShutdown: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonHighWarnVal: The high value of the warning message in the device environment monitor.
:type DevEnvMonHighWarnVal: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonHighWarnVal: The high value of the warning message in the device environment monitor.
:type DevEnvMonHighWarnVal: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonIndex: The index of the device in the device environment.
:type DevEnvMonIndex: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonIndex: The index of the device in the device environment.
:type DevEnvMonIndex: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonLowShutdown: The low value of the shut down process in the device environment monitor.
:type DevEnvMonLowShutdown: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonLowShutdown: The low value of the shut down process in the device environment monitor.
:type DevEnvMonLowShutdown: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonLowWarnVal: The low value of the warning message in the device environment monitor.
:type DevEnvMonLowWarnVal: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonLowWarnVal: The low value of the warning message in the device environment monitor.
:type DevEnvMonLowWarnVal: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonMeasure: The measure of the device environment monitor.
:type DevEnvMonMeasure: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonMeasure: The measure of the device environment monitor.
:type DevEnvMonMeasure: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonStartTime: The starting effective time of this record.
:type DevEnvMonStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonStartTime: The starting effective time of this record.
:type DevEnvMonStartTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonState: The current state of the device in the device environment monitor.
:type DevEnvMonState: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonState: The current state of the device in the device environment monitor.
:type DevEnvMonState: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatus: The status of the device in the Device Environment Monitor.
:type DevEnvMonStatus: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatus: The status of the device in the Device Environment Monitor.
:type DevEnvMonStatus: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatusAlert: The alert status of the device environment monitor.
:type DevEnvMonStatusAlert: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatusAlert: The alert status of the device environment monitor.
:type DevEnvMonStatusAlert: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatusMessage: The status message of the device environment monitor.
:type DevEnvMonStatusMessage: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonStatusMessage: The status message of the device environment monitor.
:type DevEnvMonStatusMessage: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonTimestamp: The date and time this record was collected or calculated.
:type DevEnvMonTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonTimestamp: The date and time this record was collected or calculated.
:type DevEnvMonTimestamp: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevEnvMonType: The NetMRI-determined monitor type of Device Environment.
:type DevEnvMonType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevEnvMonType: The NetMRI-determined monitor type of Device Environment.
:type DevEnvMonType: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which device environment information was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which device environment information was collected.
:type DeviceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device environment monitors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device environment monitor methods. The listed methods will be called on each device environment monitor returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DevEnvMonID
:param sort: The data field(s) to use for sorting the output. Default is DevEnvMonID. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceEnvironmentMonitor. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device environment monitors, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DevEnvMonChangedCols, DevEnvMonDescr, DevEnvMonEndTime, DevEnvMonHighShutdown, DevEnvMonHighWarnVal, DevEnvMonID, DevEnvMonIndex, DevEnvMonLowShutdown, DevEnvMonLowWarnVal, DevEnvMonMeasure, DevEnvMonStartTime, DevEnvMonState, DevEnvMonStatus, DevEnvMonStatusAlert, DevEnvMonStatusMessage, DevEnvMonTimestamp, DevEnvMonType, DeviceID.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_environment_monitors: An array of the DeviceEnvironmentMonitor objects that match the specified input criteria.
:rtype device_environment_monitors: Array of DeviceEnvironmentMonitor
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device environment monitors matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DevEnvMonChangedCols, DevEnvMonDescr, DevEnvMonEndTime, DevEnvMonHighShutdown, DevEnvMonHighWarnVal, DevEnvMonID, DevEnvMonIndex, DevEnvMonLowShutdown, DevEnvMonLowWarnVal, DevEnvMonMeasure, DevEnvMonStartTime, DevEnvMonState, DevEnvMonStatus, DevEnvMonStatusAlert, DevEnvMonStatusMessage, DevEnvMonTimestamp, DevEnvMonType, DeviceID.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonChangedCols: The operator to apply to the field DevEnvMonChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonChangedCols: If op_DevEnvMonChangedCols is specified, the field named in this input will be compared to the value in DevEnvMonChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonChangedCols must be specified if op_DevEnvMonChangedCols is specified.
:type val_f_DevEnvMonChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonChangedCols: If op_DevEnvMonChangedCols is specified, this value will be compared to the value in DevEnvMonChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonChangedCols must be specified if op_DevEnvMonChangedCols is specified.
:type val_c_DevEnvMonChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonDescr: The operator to apply to the field DevEnvMonDescr. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonDescr: The NetMRI-determined description of the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonDescr: If op_DevEnvMonDescr is specified, the field named in this input will be compared to the value in DevEnvMonDescr using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonDescr must be specified if op_DevEnvMonDescr is specified.
:type val_f_DevEnvMonDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonDescr: If op_DevEnvMonDescr is specified, this value will be compared to the value in DevEnvMonDescr using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonDescr must be specified if op_DevEnvMonDescr is specified.
:type val_c_DevEnvMonDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonEndTime: The operator to apply to the field DevEnvMonEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonEndTime: If op_DevEnvMonEndTime is specified, the field named in this input will be compared to the value in DevEnvMonEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonEndTime must be specified if op_DevEnvMonEndTime is specified.
:type val_f_DevEnvMonEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonEndTime: If op_DevEnvMonEndTime is specified, this value will be compared to the value in DevEnvMonEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonEndTime must be specified if op_DevEnvMonEndTime is specified.
:type val_c_DevEnvMonEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonHighShutdown: The operator to apply to the field DevEnvMonHighShutdown. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonHighShutdown: The high value of the shut down process in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonHighShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonHighShutdown: If op_DevEnvMonHighShutdown is specified, the field named in this input will be compared to the value in DevEnvMonHighShutdown using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonHighShutdown must be specified if op_DevEnvMonHighShutdown is specified.
:type val_f_DevEnvMonHighShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonHighShutdown: If op_DevEnvMonHighShutdown is specified, this value will be compared to the value in DevEnvMonHighShutdown using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonHighShutdown must be specified if op_DevEnvMonHighShutdown is specified.
:type val_c_DevEnvMonHighShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonHighWarnVal: The operator to apply to the field DevEnvMonHighWarnVal. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonHighWarnVal: The high value of the warning message in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonHighWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonHighWarnVal: If op_DevEnvMonHighWarnVal is specified, the field named in this input will be compared to the value in DevEnvMonHighWarnVal using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonHighWarnVal must be specified if op_DevEnvMonHighWarnVal is specified.
:type val_f_DevEnvMonHighWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonHighWarnVal: If op_DevEnvMonHighWarnVal is specified, this value will be compared to the value in DevEnvMonHighWarnVal using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonHighWarnVal must be specified if op_DevEnvMonHighWarnVal is specified.
:type val_c_DevEnvMonHighWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonID: The operator to apply to the field DevEnvMonID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonID: The internal NetMRI identifier of Device Environment. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonID: If op_DevEnvMonID is specified, the field named in this input will be compared to the value in DevEnvMonID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonID must be specified if op_DevEnvMonID is specified.
:type val_f_DevEnvMonID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonID: If op_DevEnvMonID is specified, this value will be compared to the value in DevEnvMonID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonID must be specified if op_DevEnvMonID is specified.
:type val_c_DevEnvMonID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonIndex: The operator to apply to the field DevEnvMonIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonIndex: The index of the device in the device environment. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonIndex: If op_DevEnvMonIndex is specified, the field named in this input will be compared to the value in DevEnvMonIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonIndex must be specified if op_DevEnvMonIndex is specified.
:type val_f_DevEnvMonIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonIndex: If op_DevEnvMonIndex is specified, this value will be compared to the value in DevEnvMonIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonIndex must be specified if op_DevEnvMonIndex is specified.
:type val_c_DevEnvMonIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonLowShutdown: The operator to apply to the field DevEnvMonLowShutdown. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonLowShutdown: The low value of the shut down process in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonLowShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonLowShutdown: If op_DevEnvMonLowShutdown is specified, the field named in this input will be compared to the value in DevEnvMonLowShutdown using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonLowShutdown must be specified if op_DevEnvMonLowShutdown is specified.
:type val_f_DevEnvMonLowShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonLowShutdown: If op_DevEnvMonLowShutdown is specified, this value will be compared to the value in DevEnvMonLowShutdown using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonLowShutdown must be specified if op_DevEnvMonLowShutdown is specified.
:type val_c_DevEnvMonLowShutdown: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonLowWarnVal: The operator to apply to the field DevEnvMonLowWarnVal. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonLowWarnVal: The low value of the warning message in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonLowWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonLowWarnVal: If op_DevEnvMonLowWarnVal is specified, the field named in this input will be compared to the value in DevEnvMonLowWarnVal using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonLowWarnVal must be specified if op_DevEnvMonLowWarnVal is specified.
:type val_f_DevEnvMonLowWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonLowWarnVal: If op_DevEnvMonLowWarnVal is specified, this value will be compared to the value in DevEnvMonLowWarnVal using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonLowWarnVal must be specified if op_DevEnvMonLowWarnVal is specified.
:type val_c_DevEnvMonLowWarnVal: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonMeasure: The operator to apply to the field DevEnvMonMeasure. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonMeasure: The measure of the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonMeasure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonMeasure: If op_DevEnvMonMeasure is specified, the field named in this input will be compared to the value in DevEnvMonMeasure using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonMeasure must be specified if op_DevEnvMonMeasure is specified.
:type val_f_DevEnvMonMeasure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonMeasure: If op_DevEnvMonMeasure is specified, this value will be compared to the value in DevEnvMonMeasure using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonMeasure must be specified if op_DevEnvMonMeasure is specified.
:type val_c_DevEnvMonMeasure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonStartTime: The operator to apply to the field DevEnvMonStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonStartTime: If op_DevEnvMonStartTime is specified, the field named in this input will be compared to the value in DevEnvMonStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonStartTime must be specified if op_DevEnvMonStartTime is specified.
:type val_f_DevEnvMonStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonStartTime: If op_DevEnvMonStartTime is specified, this value will be compared to the value in DevEnvMonStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonStartTime must be specified if op_DevEnvMonStartTime is specified.
:type val_c_DevEnvMonStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonState: The operator to apply to the field DevEnvMonState. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonState: The current state of the device in the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonState: If op_DevEnvMonState is specified, the field named in this input will be compared to the value in DevEnvMonState using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonState must be specified if op_DevEnvMonState is specified.
:type val_f_DevEnvMonState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonState: If op_DevEnvMonState is specified, this value will be compared to the value in DevEnvMonState using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonState must be specified if op_DevEnvMonState is specified.
:type val_c_DevEnvMonState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonStatus: The operator to apply to the field DevEnvMonStatus. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonStatus: The status of the device in the Device Environment Monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonStatus: If op_DevEnvMonStatus is specified, the field named in this input will be compared to the value in DevEnvMonStatus using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonStatus must be specified if op_DevEnvMonStatus is specified.
:type val_f_DevEnvMonStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonStatus: If op_DevEnvMonStatus is specified, this value will be compared to the value in DevEnvMonStatus using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonStatus must be specified if op_DevEnvMonStatus is specified.
:type val_c_DevEnvMonStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonStatusAlert: The operator to apply to the field DevEnvMonStatusAlert. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonStatusAlert: The alert status of the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonStatusAlert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonStatusAlert: If op_DevEnvMonStatusAlert is specified, the field named in this input will be compared to the value in DevEnvMonStatusAlert using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonStatusAlert must be specified if op_DevEnvMonStatusAlert is specified.
:type val_f_DevEnvMonStatusAlert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonStatusAlert: If op_DevEnvMonStatusAlert is specified, this value will be compared to the value in DevEnvMonStatusAlert using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonStatusAlert must be specified if op_DevEnvMonStatusAlert is specified.
:type val_c_DevEnvMonStatusAlert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonStatusMessage: The operator to apply to the field DevEnvMonStatusMessage. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonStatusMessage: The status message of the device environment monitor. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonStatusMessage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonStatusMessage: If op_DevEnvMonStatusMessage is specified, the field named in this input will be compared to the value in DevEnvMonStatusMessage using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonStatusMessage must be specified if op_DevEnvMonStatusMessage is specified.
:type val_f_DevEnvMonStatusMessage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonStatusMessage: If op_DevEnvMonStatusMessage is specified, this value will be compared to the value in DevEnvMonStatusMessage using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonStatusMessage must be specified if op_DevEnvMonStatusMessage is specified.
:type val_c_DevEnvMonStatusMessage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonTimestamp: The operator to apply to the field DevEnvMonTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonTimestamp: If op_DevEnvMonTimestamp is specified, the field named in this input will be compared to the value in DevEnvMonTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonTimestamp must be specified if op_DevEnvMonTimestamp is specified.
:type val_f_DevEnvMonTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonTimestamp: If op_DevEnvMonTimestamp is specified, this value will be compared to the value in DevEnvMonTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonTimestamp must be specified if op_DevEnvMonTimestamp is specified.
:type val_c_DevEnvMonTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevEnvMonType: The operator to apply to the field DevEnvMonType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevEnvMonType: The NetMRI-determined monitor type of Device Environment. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevEnvMonType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevEnvMonType: If op_DevEnvMonType is specified, the field named in this input will be compared to the value in DevEnvMonType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevEnvMonType must be specified if op_DevEnvMonType is specified.
:type val_f_DevEnvMonType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevEnvMonType: If op_DevEnvMonType is specified, this value will be compared to the value in DevEnvMonType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevEnvMonType must be specified if op_DevEnvMonType is specified.
:type val_c_DevEnvMonType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which device environment information was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device environment monitors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device environment monitor methods. The listed methods will be called on each device environment monitor returned and included in the output. Available methods are: device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DevEnvMonID
:param sort: The data field(s) to use for sorting the output. Default is DevEnvMonID. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceEnvironmentMonitor. Valid values are DevEnvMonID, DeviceID, DataSourceID, DevEnvMonStartTime, DevEnvMonEndTime, DevEnvMonTimestamp, DevEnvMonChangedCols, DevEnvMonIndex, DevEnvMonType, DevEnvMonDescr, DevEnvMonState, DevEnvMonStatus, DevEnvMonMeasure, DevEnvMonLowWarnVal, DevEnvMonLowShutdown, DevEnvMonHighWarnVal, DevEnvMonHighShutdown, DevEnvMonStatusMessage, DevEnvMonStatusAlert. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_environment_monitors: An array of the DeviceEnvironmentMonitor objects that match the specified input criteria.
:rtype device_environment_monitors: Array of DeviceEnvironmentMonitor
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
def infradevice(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DevEnvMonID: The internal NetMRI identifier of Device Environment.
:type DevEnvMonID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
|
Here at savetube our function is to inform the hungry youtube video downloader community about the news on what’s happening in the “scene”. And the most recent news is that keepvid.com is no more a place to download online videos.
As you may know, I am into numbers, so my number crunching here says following: something or someone, some institution perhaps, have caused keepvid to abandon their earlier strategies and abandon whole video downloading process. Plus they do not push the software they advertised for 1080p HD downloads before.
Above is the Alexa Rating for keepvid. One can see the site is top 1200 world’s biggest, while lost some traffic, they are still strongly in the biz, and are probably hoping to stay there. By providing us with information?? Come one, if this was a big secret, my blog would have been a super visited knowledge center about many sites that do video downloads and mp3 converter.
Anyways, I can rant for many paragraphs to come about how i disapprove of this.. But i maybe am forgetting about the juridical side of the matters.. Maybe because their site has gotten so popular, they were targeted by RIAA or other institution full-of-itself & stupid.. And decided to cut losses sooner, because it’s hard to withstand big dawgs’ onslaught..
keepVid doesn’t work anymore. I think its better to use Acethinker’s video keeper for you to download videos on all websites.
i think you never read my article and just commented and left.. cause your comment summarizes my whole article… well, except for the software recommendation part.. i recommended other websites.
|
"""Xbox friends binary sensors."""
from functools import partial
from typing import Dict, List
from homeassistant.core import callback
from homeassistant.helpers.entity_registry import (
async_get_registry as async_get_entity_registry,
)
from homeassistant.helpers.typing import HomeAssistantType
from . import XboxUpdateCoordinator
from .base_sensor import XboxBaseSensorEntity
from .const import DOMAIN
SENSOR_ATTRIBUTES = ["status", "gamer_score", "account_tier", "gold_tenure"]
async def async_setup_entry(hass: HomeAssistantType, config_entry, async_add_entities):
"""Set up Xbox Live friends."""
coordinator: XboxUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id][
"coordinator"
]
update_friends = partial(async_update_friends, coordinator, {}, async_add_entities)
unsub = coordinator.async_add_listener(update_friends)
hass.data[DOMAIN][config_entry.entry_id]["sensor_unsub"] = unsub
update_friends()
class XboxSensorEntity(XboxBaseSensorEntity):
"""Representation of a Xbox presence state."""
@property
def state(self):
"""Return the state of the requested attribute."""
if not self.coordinator.last_update_success:
return None
return getattr(self.data, self.attribute, None)
@callback
def async_update_friends(
coordinator: XboxUpdateCoordinator,
current: Dict[str, List[XboxSensorEntity]],
async_add_entities,
) -> None:
"""Update friends."""
new_ids = set(coordinator.data.presence)
current_ids = set(current)
# Process new favorites, add them to Home Assistant
new_entities = []
for xuid in new_ids - current_ids:
current[xuid] = [
XboxSensorEntity(coordinator, xuid, attribute)
for attribute in SENSOR_ATTRIBUTES
]
new_entities = new_entities + current[xuid]
if new_entities:
async_add_entities(new_entities)
# Process deleted favorites, remove them from Home Assistant
for xuid in current_ids - new_ids:
coordinator.hass.async_create_task(
async_remove_entities(xuid, coordinator, current)
)
async def async_remove_entities(
xuid: str,
coordinator: XboxUpdateCoordinator,
current: Dict[str, XboxSensorEntity],
) -> None:
"""Remove friend sensors from Home Assistant."""
registry = await async_get_entity_registry(coordinator.hass)
entities = current[xuid]
for entity in entities:
if entity.entity_id in registry.entities:
registry.async_remove(entity.entity_id)
del current[xuid]
|
I decided to write this article while I was researching Italian mid-century ceramics for my book Alla Moda: Italian Ceramics of the 1950s-70s. I was surprised that no history of Raymor existed given its clear importance to mid-century decorative arts and interiors in America, and factories and designers in Europe. I was also surprised at the amount of plainly incorrect and conflicting information available in books and on the internet. This article is based on my personal findings through handling objects over the past ten years, and on interviews and a handful of documents which are listed at the end of the article. As such, this article is a work in progress, and is not intended to be the definitive answer or history of the company. If you can contribute more, please contact me.
If you would like to use some of the information in this article in a book, on a website, or in an eBay listing, please play fair and credit me and my website, preferably with a link to this page. Please don’t copy my work and reproduce it anywhere else without my permission as it’s my copyright and I didn’t get paid for researching or writing it. Thank you very much.
Raymor was the brand name of an import company and distributor based in America and active from 1941 until 1980. Although it imported most of its stock, it also oversaw the manufacture of a comparatively small quantity of goods in the US. During its lifetime, Raymor worked with many leading American, Italian, and Scandinavian designers and the companies they worked for. The company entity was incorporated as Russel Wright, Inc in 1936 and was liquidated on April 8th 1980.
Raymor was built by the American entrepreneur Irving Richards (left, 18th May 1907- 19th November 2003), who was born Irving Rappaport in Manhattan, New York. He became one of the most influential figures in mid-late 20thC design, and was at the cutting edge of popular American tastes during the period. He grew to understand the US buyer, their tastes and their homes, so knew what they would buy.
Raymor’s catchphrase, printed on nearly all of its paper labels (right), was ‘Modern in the Tradition of Good Taste’. Richards himself was described as a ‘merchandising tastemaker‘ by Donald Albrecht, co-curator of the Russel Wright exhibition at the Cooper Hewitt National Design Museum in 2001. He forged strong relationships between designers, manufacturers, retailers, and advertising and PR agencies which resulted in the best of modern design at all levels being brought to the wider American public.
Richards’ first business, started when he was 19 in 1926, was a bookshop on Broadway that specialised in selling rare and first edition books, and possibly a small range of desk accessories. This was clearly successful, as he was able to travel to Paris in 1928 to buy stock and to visit the famed Salon d’Automne art and design fair. This was his ‘Damascus moment’, and he recalled “…I was mesmerised by what I saw… It was like ‘seeing the light’.“. This was an apt phrase as, upon his return to New York, Richards closed his shop in 1930 and joined Lightolier for whom he travelled around Europe in search of new lighting designs, and undoubtedly sales contacts. The Depression had caused a drop in book sales, meaning the rent on his shop became too high to bear, and Richards needed a job to earn a living.
Richards’ first, and arguably most critical, relationship was with designer Russel Wright (1904-76). The two were introduced by Richards’ mentor Andy Rouge, a buyer for Stern Brothers and Ovington’s, in 1935. Richards was impressed by Wright’s spun aluminium lamps and kitchen and table wares (below), which were being sold by Rouge and were produced in a small Midtown workshop by Wright, his wife Mary and a couple of other assistants.
This began a fruitful relationship with Wright, and led to the formation of Russel Wright Inc. on March 10th 1936. Richards had presumably left Lightolier by this point. On April 17th 1937, the company’s name was changed to Wright Accessories Inc. The pair’s most celebrated and successful collaboration was over Wright’s landmark ‘American Modern’ ceramic tableware range (advertisement below, and jug at end of article), which was first produced in 1938 by Wright Accessories, but ended up being mass-produced by Steubenville later that year.
In 1941, the Wrights sold their share of the Wright Accessories business to Richards, with the proviso that Richards help them with their ‘American Way Program’ of supporting new American designers. He did this, but it was ultimately unsuccessful. Following the sale, Wright Accessories Inc. was renamed Raymor Mfg Division Inc. on February 19th 1941, and Raymor was born.
In 1941, the Raymor Mfg Division made an announcement about the release of new shapes for the growing ‘American Modern’ range. In the same year, Richards Morgenthau & Co. issued a release saying that ‘American Modern’ was made exclusively for them by Steubenville and sold exclusively by them. This relationship seemed to also include Steubenville’s other dinner ware and lasted for ten years, until 1951. This deal was obviously lucrative for Raymor, even if their commission was very small, as sales of ‘American Modern’ had exceeded $150m by 1959. Richards Morgenthau may have simply lost exclusivity in 1951, as they still offered the range for sale in 1954.
The relationship with Wright clearly blossomed into something enormously profitable for Richards, and led to a number of companies being founded under the Raymor and Richards Morgenthau names. Morgenthau sold one to Simmons (the mattress company) in 1968. Little is known about Richards’ business partner Eugene Morgenthau (1st January 1904 – 11th January 2004), but according to a member of his family, he began his career by selling jewellery. The Great Depression caused him to lose his job and, later, he founded his own company importing Danish furniture and also met and had a business relationship with Russel Wright. It is highly possible that this was the same company and so, together with Richards. The details about how the two men met, their individual responsibilities, and their exact business arrangement are not yet known. According to ‘A Dictionary of Modern Design’ published by the Oxford University Press, a factory was founded under the Richards Morgenthau name in New Jersey to produce lighting, ceramic and glass designs in 1947, but currently there is no other source that supports this information.
The Wright relationship would have also given him a huge range of contacts, and contracts, with distributors, manufacturers and retailers across America and probably beyond. All of these would have combined to make Richards’ company immensely strong and powerful. By 1952, Raymor had an office on the prestigious 5th Avenue in Manhattan.
In the early 1950s, following the loss of exclusivity over American Modern, and the loss of Wright to other achievements, Richards looked for other options to continue and develop his business. He began to work with other designers and, over the subsequent three decades, these would grow to include George Nelson, Gilbert Rhode, Donald Deskey, Walter Dorwin Teague, Ray & Charles Eames, Ben Seibel (teapot, above left, for Roseville), Glidden Parker, David Gil, Michael Lax, Eva Zeisel, Hans Wegner and, in the later years, Peter Max. Richards had also gained experience designing his own ranges. In 1946, he tried unsuccessfully to sell a range of Mexican produced glassware into Gimbel’s as they stocked the far more successful ‘American Modern’ range.
He also resumed the travels in Europe that he had begun when working for Lightolier, and travelled to Denmark which led to him importing Danish furniture and lighting. He also established relationships with many factories in Scandinavia, Mexico, Italy and West Germany from the early to mid 1950s, including Italy’s Bitossi (right) and West Germany’s Carstens. He made at least two trips a year to Europe, and presumably visited many countries and different companies on each trip due to the expense of travel at the time. The 1950s can thus be seen as the start of the importation and distribution side of his company, which grew to include furniture, lighting, glass, wood, and ceramics.
In 1963, Raymor was sold to furniture company Simmons, which itself was later sold to Gulf & Western. The company name was changed to Raymor Richards, Morgenthau Inc. on December 15th 1969. During the mid-late 1960s and early 1970s, Richards was placed in charge of furniture. In interviews he hinted that money was no object as he was no longer in charge of the company’s finances – the new owners seemed keen to finance his decisions based on the success he had enjoyed in previous years. One of his great successes was the ‘Omnibus’ range of Scandinavian-style wall units.
Raymor also imported large amounts of Italian glass and pottery, mainly from factories in and around Florence and Murano. These included Bitossi ceramics designed by Aldo Londi (right), Bagni ceramics designed by Alvino Bagni, ceramics by Mancioli and Ceramica Pozzi, and glass from factories around Empoli. Richards also submitted designs by himself or by other designers Raymor worked with, such as Ben Seibel. He strictly controlled the final outcome by taking part in every part of the decision process, but always took advice from Bagni, Londi, or their equivalents. This focus on importing and promoting Italian products reputedly led to him being awarded a knighthood by the Italian government, who were rebuilding following the devastation of war. This award has never been confirmed, however. Richards was also instrumental in introducing Ettore Sottsass to ceramics around 1956, placing him firmly under the wing of Aldo Londi at Bitossi.
By the late 1960s, Richards must have been able to enjoy a comfortable lifestyle. He collected Latin American art, and took up sculpture (left, by Richards) and painting, presumably to further exercise his creative spirit. In the 1990s, his apartment was filled with design classics by great names in postwar design from across the world, as well as his own artworks. The 1960s & most of the 1970s must have been fruitful years for Raymor and Richards who could reap the rewards of the seeds he had sewn in the 1950s. It is likely that these two decades saw the golden age of the company in terms of its size and levels of sales.
It is currently not known when Richards left Raymor. It is possible that when he retired, the firm lost its driving force, spirit and soul, and no replacement as skilled or foresighted as Richards could be found. It may also be that tastes began to change during the late 1970s and Richards failed to change with them, or could not compete against the competitors he had no doubt inspired. It is also likely that, from the late 1970s onwards, many of his regular suppliers began to suffer from competition from factories in the Far East who could produce similar goods for much less than European makers, even if the quality and longevity suffered.
Further, many previously independent department stores merged into fewer, larger chains who had their own buyers with targets to reach that were set by accountants. When buying, the public also perhaps became a little less discerning, and more willing to change their interior styles on a more frequent basis as goods became less expensive. It is likely that this combination of changes also caused his largest competitor, Rosenthal Netter (founded 1959), to close during the same period, or in the 1980s.
What is known is that on October 4th 1978, the company was merged with a Californian company called Moreddi, who were based in Long Beach. Founded by Maury and Ed Frank, hence the name, Moreddi was one one of the first companies to import Danish furniture into the US. It is likely that Raymor’s owners decided to merge two companies that had similar activities in a declining market in an attempt to make them viable concerns again.
Whatever happened at the end, Irving Richards and Raymor left an enormous and diverse legacy across Europe and the US that deserves to be properly recorded and explored. Without Raymor’s valuable and important input, the work of many important 20thC designers may not have been as important or recognised as it is today.
If you know more about Raymor, Richards Morgenthau, Irving Richards or Rosenthal Netter, or have spotted inaccuracies or errors in this article, please contact me. This article is a work in progress and any additional accurate, correct information would be gratefully accepted. Credits for any information submitted and used will be credited to the donor/owner. Although there is no intention to do so, if you feel that any copyright has been infringed in this article, please contact me. Thank you.
If you would like to use some of the information in this article in a book, on a website, or in an eBay listing, please play fair and credit me and my website, preferably with a link to this page.
Verbal interviews with Gianfranco Ghiretti (Nuoveforme) and Elisabetta Daini (Bitossi) conducted by Mark Hill in Summer 2011.
With thanks to Forrest Poston of ginforsodditiques, Jonathan Goldstein and Kevin & Alison of Bit of Butter for leads to extra information from American state departments.
Text and image of Raymor label are copyright Mark Hill Publishing Ltd, other images with thanks to and courtesy of David Rago and Miller’s. Photo of Irving Richards by Adam Anik and scanned from the reproduction in Echoes magazine (listed above).
|
from __future__ import print_function
from array import array
from collections import defaultdict
import networkx as nx
import six
from ..nodes.types import is_sum_node, is_var_node
from ..nodes.attributes import NodeAttr
def dbg_info(dag, optional_callable=None):
print('-------------------------------------------------------------------')
if optional_callable: optional_callable()
print('Nodes: %d, edges: %d'%(dag.number_of_nodes(),dag.number_of_edges()) )
print('Is DAG?', nx.is_directed_acyclic_graph(dag))
nwcc = nx.number_weakly_connected_components(dag)
print('Weakly connected components:', nwcc)
dbg_pprint_source_sink_types(dag)
print('-------------------------------------------------------------------')
def dbg_pprint_source_sink_types(dag):
source_types = group_node_ids_by_kind(itr_sourcetype_nodeid(dag))
sink_types = group_node_ids_by_kind(itr_sinktype_nodeid( dag))
print('Sources:')
dbg_pprint_kind_nodeids(source_types)
print('Sinks:')
dbg_pprint_kind_nodeids(sink_types)
def dbg_pprint_kind_nodeids(kind_nodeids):
for kind, nodeids in kind_nodeids.items():
count = len(nodeids)
print(' ', kind, nodeids if count <= 20 else '', '(count=%d)' % count)
def group_node_ids_by_kind(itr_kind_nodeid_pairs):
types = defaultdict(list)
for kind, n in itr_kind_nodeid_pairs:
types[kind].append(n)
return types
def itr_sourcetype_nodeid(dag):
return ((get_pretty_type_str(dag, n), n) for n in dag if is_source(dag, n))
def itr_sinktype_nodeid(dag):
return ((get_pretty_type_str(dag, n), n) for n in dag if is_sink(dag, n))
def itr_sink_con_num_nodeid(dag):
'(con_num, node_id) for sinks only; assumes that the problem has been setup'
return ((dag.node[n][NodeAttr.con_num], n) for n in dag if is_sink(dag, n))
def is_source(dag, node_id):
return len(dag.pred[node_id])==0
def is_sink(dag, node_id):
return len(dag.succ[node_id])==0
# FIXME Part of the dispatching mechanism, revise!
def get_pretty_type_str(dag, n):
return dag.node[n][NodeAttr.type] + '_node'
# TODO Would be a nice addition to nx
def iter_attr(G, nbunch, name):
for n in nbunch:
yield n, G.node[n][name]
def itr_var_num(G, var_node_ids):
for n in var_node_ids:
yield n, G.node[n][NodeAttr.var_num]
def itr_sinks(dag, nbunch):
return (n for n in nbunch if is_sink(dag, n))
def itr_sum_nodes(dag):
return (n for n in dag if is_sum_node(dag.node[n]))
def itr_siso_sum_nodes(dag):
return (n for n in itr_sum_nodes(dag) if len(dag.pred[n])==1
and len(dag.succ[n])==1 )
def itr_single_input_nodes(dag, node_ids):
return (n for n in node_ids if len(dag.pred[n])==1)
def get_single_pred(dag, n):
return next(iter(dag.pred[n]))
def get_single_succ(dag, n):
return next(iter(dag.succ[n]))
def deterministic_topological_sort(dag):
# This function is stolen from networkx.algorithms.dag.topological_sort
# made the returned order deterministic by pre-sorting the nodes by their ID
seen = set()
order = []
explored = set()
nbunch = sorted(dag.nodes_iter()) # <-- SORTED
for v in nbunch: # process all vertices in G
if v in explored:
continue
fringe = [v] # nodes yet to look at
while fringe:
w = fringe[-1] # depth first search
if w in explored: # already looked down this branch
fringe.pop()
continue
seen.add(w) # mark as seen
# Check successors for cycles and for new nodes
new_nodes = []
for n in sorted(six.iterkeys(dag[w])): # <-- SORTED
if n not in explored:
if n in seen: #CYCLE !!
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
new_nodes.append(n)
if new_nodes: # Add new_nodes to fringe
fringe.extend(new_nodes)
else: # No new nodes so w is fully explored
explored.add(w)
order.append(w)
fringe.pop() # done considering this node
return list(reversed(order))
def plot(dag):
from matplotlib import pyplot as plt
node_labels = nx.get_node_attributes(dag, NodeAttr.display)
edge_labels = nx.get_edge_attributes(dag, 'weight')
dag_copy = dag.to_directed()
for _, d in dag_copy.nodes_iter(data=True):
d.clear()
# FIXME Why does this try to copy attributes that it cannot?
positions = nx.graphviz_layout(dag_copy, prog='dot')
nx.draw_networkx_edge_labels(dag, positions, edge_labels, rotate=False)
nx.draw_networkx(dag, pos=positions, labels=node_labels, node_size=800)
mng = plt.get_current_fig_manager()
# TODO Post a wrapper to Code Review?
#mng.full_screen_toggle()
mng.resize(1865,1025)
plt.show()
################################################################################
# Respect children order: add_edge, remove_node, remove_edge, reverse_edge
################################################################################
def add_edge(dag, src, dest, attr_dict):
dag.add_edge(src, dest, attr_dict)
dag.node[dest].setdefault(NodeAttr.input_ord, array('l')).append(src)
def reparent(dag, new_parent, node_to_del):
# delete node_to_del and connect all children to new_parent, with edge dict;
# update each child's input order array to contain the new parent
out_edges = dag.edge[node_to_del]
# In case we already deleted the new parent in a previous round; reparent
# would insert it again and that node would have an empty dict
assert new_parent in dag, '{}, {}'.format(new_parent, node_to_del)
assert_source(dag, node_to_del)
remove_node(dag, node_to_del)
for child_id, edge_dict in six.iteritems(out_edges):
dag.add_edge(new_parent, child_id, edge_dict)
replace(dag.node[child_id][NodeAttr.input_ord], node_to_del, new_parent)
def remove_node(dag, n):
d = dag.node[n]
assert NodeAttr.bounds not in d, d
dag.remove_node(n)
def reverse_edge_to_get_def_var(dag, sum_node_id, var_node_id):
# lambda * <var node> + <lin. comb.> + d = bounds
# node id: n+1 n
# <var node> = (-1/lambda) * ( <lin. comb.> + d - bounds)
#
# add the new reversed edge
e = dag[var_node_id][sum_node_id]
e['weight'] = -1.0/e['weight']
add_edge(dag, sum_node_id, var_node_id, e)
# drop the old edge
dag.remove_edge(var_node_id, sum_node_id)
# update the sum node
d = dag.node[sum_node_id]
# d_term -= rhs
d_term = d.get(NodeAttr.d_term, 0.0) - d[NodeAttr.bounds].l# l == u == rhs
d[NodeAttr.d_term] = d_term #already asserted
del d[NodeAttr.bounds]
d[NodeAttr.input_ord].remove(var_node_id)
def replace(arr, old_value, new_value):
for index, item in enumerate(arr):
if item==old_value:
arr[index] = new_value
def add_keep_smaller_value(mapping, key, value):
# mapping[key]=value BUT if key is already present, keeps smaller value
old_value = mapping.get(key, value)
mapping[key] = min(old_value, value)
def assert_source(dag, node_id):
assert is_source(dag, node_id), 'node %d %s' % (node_id, dag.node[node_id])
def assert_CSE_defining_constraints(dag, con_ends, base_vars):
# A constraint (sum) node immediately followed by a defined var node; with
# an edge from the var node to the sum node; and the rhs of the constraint
# is a real number, not an interval.
# Basically: lambda * <var node> + <some sum> + d = bounds
# <N> c 20 145
# <145> b [0,0]: +
# <146> V 20
# <E> 145 146 -1
for n in con_ends:
# check the sum_node
d = dag.node[n]
assert is_sum_node(d), 'expected a sum_node, found: %s' % d
assert NodeAttr.bounds in d,'Should have bounds, node: %s' % d
lb, ub = d[NodeAttr.bounds]
assert lb==ub,'rhs expected to be a constant, node: %s' % d
# check the var_node
assert n+1 in dag,'expected a var_node; not CSE defining constraint: %s'%d
def_var = dag.node[n+1]
assert is_var_node(def_var), 'expected a var_node, found: %s' % def_var
assert n+1 not in base_vars,'expected a defined var, found %s' % def_var
assert NodeAttr.bounds not in def_var, \
'CSEs must not have bounds, found\n %s' % def_var
assert n in dag.edge[n+1],'Nodes not connected:\n %s \n %s'%(d,def_var)
def assert_vars_are_CSEs(dag, var_node_ids, var_num_def_node):
for var_node in var_node_ids:
var_num = dag.node[var_node][NodeAttr.var_num]
assert var_num in var_num_def_node,'var_num: %d' % var_num
|
We offer a comprehensive service of sales process - from developing project to implementation. We also provide some sales functions separately - call center services, creating and updating the client base, processing incoming calls and messages, searching for customers, partners, dealers. We form sales departments and provide trainings.
Sales in Estonia, Latvia, Lithuania.
Sales and marketing support in Estonia.
By clicking on the button, you consent to the processing of personal data.Personal data provided by the Customer is processed in accordance with the GDPR. Data can be transferred to third parties only with the consent of the Client or in cases provided by law.
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Johann Prieur <johann.prieur@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from abstract import AbstractProxy
from papyon.gnet.io import TCPClient
from papyon.gnet.constants import *
from papyon.gnet.errors import *
from papyon.gnet.parser import DelimiterParser
import gobject
import logging
import struct
__all__ = ['SOCKS4Proxy']
logger = logging.getLogger('papyon.proxy.SOCKS4')
class SOCKS4Proxy(AbstractProxy):
PROTOCOL_VERSION = 4
CONNECT_COMMAND = 1
"""Proxy class used to communicate with SOCKS4 proxies."""
def __init__(self, client, proxy_infos):
assert(proxy_infos.type == 'socks4'), \
"SOCKS4Proxy expects a socks4 proxy description"
# TODO : implement version 4a of the protocol to allow proxy-side name resolution
assert(client.domain == AF_INET), \
"SOCKS4 CONNECT only handles INET address family"
assert(client.type == SOCK_STREAM), \
"SOCKS4 CONNECT only handles SOCK_STREAM"
assert(client.status == IoStatus.CLOSED), \
"SOCKS4Proxy expects a closed client"
AbstractProxy.__init__(self, client, proxy_infos)
self._transport = TCPClient(self._proxy.host, self._proxy.port)
self._transport.connect("notify::status", self._on_transport_status)
self._transport.connect("error", self._on_transport_error)
self._delimiter_parser = DelimiterParser(self._transport)
self._delimiter_parser.delimiter = 8
self._delimiter_parser.connect("received", self._on_proxy_response)
# Opening state methods
def _pre_open(self, io_object=None):
AbstractProxy._pre_open(self)
def _post_open(self):
AbstractProxy._post_open(self)
user = self._proxy.user
proxy_protocol = struct.pack('!BBH', SOCKS4Proxy.PROTOCOL_VERSION,
SOCKS4Proxy.CONNECT_COMMAND, self.port)
for part in self.host.split('.'):
proxy_protocol += struct.pack('B', int(part))
proxy_protocol += user
proxy_protocol += struct.pack('B', 0)
self._delimiter_parser.enable()
self._transport.send(proxy_protocol)
# Public API
@property
def protocol(self):
return "SOCKS4"
def open(self):
"""Open the connection."""
if not self._configure():
return
self._pre_open()
try:
self._transport.open()
except:
pass
def close(self):
"""Close the connection."""
self._delimiter_parser.disable()
self._client._proxy_closed()
self._transport.close()
def send(self, buffer, callback=None, errback=None):
self._client.send(buffer, callback, errback=None)
# Callbacks
def _on_transport_status(self, transport, param):
if transport.status == IoStatus.OPEN:
self._post_open()
elif transport.status == IoStatus.OPENING:
self._client._proxy_opening(self._transport._transport)
self._status = transport.status
else:
self._status = transport.status
def _on_transport_error(self, transport, error):
self.close()
self.emit("error", error)
def _on_proxy_response(self, parser, response):
version, response_code = struct.unpack('BB', response[0:2])
assert(version == 0)
if self.status == IoStatus.OPENING:
if response_code == 90:
self._delimiter_parser.disable()
self._transport.disable()
self._client._proxy_open()
else:
logger.error("Connection failed (%s)" % response_code)
self.close()
self.emit("error", SOCKS4Error(self, response_code))
return False
gobject.type_register(SOCKS4Proxy)
|
The eight pleaded not guilty at arraignments Friday. Preliminary hearings were scheduled for 8:30 a.m. July 19.
The bond was lowered from $35,000 to $20,000 for all eight except Michael Johnson. Johnson refused to comply with the judge's request to state his real name.
The Hyatt hotel on West High Street was unable to be reached for comment.
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2021 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Receivers for Zenodo Communities."""
from __future__ import absolute_import, print_function
from .tasks import dispatch_webhook
def send_inclusion_request_webhook(sender, request=None, **kwargs):
"""Signal receiver to send webhooks after a community inclusion request."""
dispatch_webhook.delay(
community_id=str(request.id_community),
record_id=str(request.id_record),
event_type='community.records.inclusion',
)
def send_record_accepted_webhook(
sender, record=None, community=None, **kwargs):
"""Signal receiver to send webhooks on a record accepted in a community."""
dispatch_webhook.delay(
community_id=str(community.id),
record_id=str(record.id),
event_type='community.records.addition',
)
|
In my artwork there is no judgment, no denunciation, only the picture itself. If I could sum up the common theme of my photos, it would be about emptiness, about silence. My pictures try to extract from the mundane urban landscape a form of estheticism. Where most people only pass through, I stop and look for some form of poetic beauty. I like repetition, I like series, and I like driving around.
Emmanuel Monzon, French photographer and plastic artist .Living in Seattle.
He has exhibited his works mainly in France (Paris), Singapore, USA (Los Angeles, New York, Seattle, Portland), China (Hong Kong), and has also participated in several arts fairs and collective exhibitions. He graduated from the Beaux Arts in Paris, with honors (Vladimir Velikovick), and also holds a degree in Visual Arts.
|
import random
import tkinter
from tkinter import ttk
from tkinter import messagebox
class App(object):
def __init__(self):
self.root = tkinter.Tk()
self.style = ttk.Style()
available_themes = self.style.theme_names()
random_theme = random.choice(available_themes)
self.style.theme_use(random_theme)
self.root.title(random_theme)
frm = ttk.Frame(self.root)
frm.pack(expand=True, fill='both')
# create a Combobox with themes to choose from
self.combo = ttk.Combobox(frm, values=available_themes)
self.combo.pack(padx=32, pady=8)
# make the Enter key change the style
self.combo.bind('<Return>', self.change_style)
# make a Button to change the style
button = ttk.Button(frm, text='OK')
button['command'] = self.change_style
button.pack(pady=8)
button2 = ttk.Button(frm, text='Test')
button2['command'] = self.open_dialog
button2.pack(pady=8)
def change_style(self, event=None):
"""set the Style to the content of the Combobox"""
content = self.combo.get()
try:
self.style.theme_use(content)
except tkinter.TclError as err:
messagebox.showerror('Error', err)
else:
self.root.title(content)
def open_dialog(self, event=None):
print(tkinter.filedialog.askopenfilename())
app = App()
app.root.mainloop()
|
Junior A’s started off with a strong 14-0 win over Trinity. This was followed by a nail biting draw against Merbein. Our next match against St Josephs was a disappointing 1 goal loss but our last game was the best of the day, defeating the eventual winners Red Cliffs 12 goals to 8. Overall we came second for the day, which was an excellent effort. Best players on the day were Brooke Hards who intercepted countless balls and was the main driving force in the mid court, Shakara Delcastegne was very accurate in goals and the defence combo of Louisa Stephens and Paige McLean made it difficult for any of the opposing teams to penetrate. Well done girls and hope we go one better next year.
The girls played well and were willing to play in any position allowing for stronger combinations, however were beaten by all, except for Merbein, where the girls won by 2 goals. Thanks to the girls who offered to score on their game breaks, much appreciated. Claudia Fangaloka, Eve Kellet and Gemma Moiler were most consistent across the day.
The Inter A’s played four games coming away with one win, a draw and two losses. All girls showed good determination and sportsmanship throughout the entire day and put up a good fight against tough oppositions. Kendall McLean was very strong in defence, with Courtney Ransome driving it through the mid court. Consistent goaling by Courtney Orwell and Megan Hammond ensured the girls were competitive in all matches.
We started off on the back foot, losing a couple players leading up to the day due to injuries however we were able to put 8 players on to the court on the day. Unfortunately we dint come away with any wins on the day, but the girls did enjoy themselves. Thank you to Amelia Hill who was my assistant and official scorer for the day. Best Players: Maddison Sparrow and Tarsha Hawley.
|
import numpy as np
from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Embedding
from keras.layers.recurrent import LSTM, GRU
from keras.datasets import imdb
'''
Train a LSTM on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF+LogReg.
Notes:
- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Most configurations won't converge.
- LSTM loss decrease during training can be quite different
from what you see with CNNs/MLPs/etc. It's more or less a sigmoid
instead of an inverse exponential.
'''
max_features=20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 16
print "Loading data..."
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features, test_split=0.2)
print len(X_train), 'train sequences'
print len(X_test), 'test sequences'
print "Pad sequences (samples x time)"
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print 'X_train shape:', X_train.shape
print 'X_test shape:', X_test.shape
print 'Build model...'
model = Sequential()
model.add(Embedding(max_features, 256))
model.add(LSTM(256, 128)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(128, 1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
print "Train..."
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=10, verbose=1)
score = model.evaluate(X_test, y_test, batch_size=batch_size)
print 'Test score:', score
classes = model.predict_classes(X_test, batch_size=batch_size)
acc = np_utils.accuracy(classes, y_test)
print 'Test accuracy:', acc
|
Have you ever wanted to bet on the horses but have been put off because you didn’t understand the betting terms? Or do you simply enjoy watching horse racing, but find it difficult to understand due to the terminology? Well, if so, your prayers have been answered, with this brand new, up to date guide which explains all the jargon used in modern day horse racing and betting.
Abandoned – A race meeting is known as being ‘abandoned’ when it is called off as a result of bad weather, or any other reason which would mean racing cannot take place. In the event of an abandonment, all bets are refunded.
Accumulator – A type of bet which requires four or more separate outcomes to occur for a return. If any of the selections fail then the bet loses. Also known as a combination bet.
Allowance – Novice jockeys are often allowed a weight allocation in order to make up for their inexperience in comparison to their opposition. This additional weight is known as an allowance.
Also-ran – A horse that doesn’t place and finishes much further back than those in the prize money.
Antepost Betting – For high profile races, such as the Grand National, punters are allowed to place bets well in advance of the race, often several months before. Odds available at this time are often much higher than they would normally be, as the final line up of the race has not yet been confirmed, so there is a chance that the horse may not compete in the race. That said, Non-Runner No Bet offers at the Cheltenham Festival can help guard against that.
Back Straight – Also known as the backstretch, the back straight is often parallel to the home straight and is the straight on the far side of the course, away from the grandstand.
Banker – Similar to a favourite, although a banker is a horse that is heavily expected to win.
Bismarck – A horse that is priced as a favourite but is widely expected to lose.
Bookmaker – Known as a bookie or a layer, a bookmaker is somebody licenced to accept bets on horse races. Take a look at the best racing betting sites around!
Boxed In – A horse that is surrounded by other horses in a race, making it impossible for it to overtake.
Broke Down – Also known as ‘pulled up’. When a horse is injured or is unable to finish a race that they have started.
Bumper – Otherwise referred to as a National Hunt Flat Race, bumpers are flat races run in the style of jumps races to prepare inexperienced jumpers before they are ready to jump over fences.
Checked – A term used to describe the event of another horse being temporarily blocked in progression by another horse.
Chute – A part of the racecourse that has been lengthened in order to create a straight start for races. Often used for shorter, sprint races.
Classic – A group of five major flat races in the UK, exclusively for three year olds. The five races are: the 2,000 Guineas, the 1,000 Guineas, the Oaks, the Derby and the St Leger and we’ve got betting tips and offers for them all!
Colours – The colours of the silks worn by the jockey, which is referenced alongside the name of the horse in the race card.
Colt – A male horse that is younger than five years old.
Course Specialist – A horse that has experience of a particular track and often has history of winning important races there.
Dam – The mother of a horse.
Dark Horse – A horse that has a lot of potential to be successful in a race, but has not yet shown this level of performance. Selections known as ‘dark horses’ are often tipped to win, despite having long odds.
Dead-heat – When two horses finish at exactly the same time and cannot be separated by a photo finish, either for first place or another finishing position. If a winning bet is placed on one of the selections involved, the stake will be halved and the return will be determined based on the new stake.
Decimal Betting Odds – Odds for a horse expressed in terms of a whole number, which represents the total return. For example, 6.0 represents a return of £6 from a £1 bet and is the equivalent of 5/1 in fractional odds.
Double – An accumulative bet that requires two separate selections to win for the bet to be successful.
Draw – A term used to describe the starting position of a horse in the stalls during flat racing. Different courses are more favourable to particular draws, with an advantageous position described as being ‘well-drawn’.
Drifter – A horse whose odds increase steadily prior to the start of a race as a result of not being backed by punters.
Each-way – A popular ‘insurance bet’ in horse racing. Half the stake placed goes towards the horse winning the race, whilst the other half of the stake goes on the horse to place. If the horse wins the race, the bet is settled with half the stake being returned at the full odds, with the other half returned at a predetermined fraction of the full odds. If the horse does not win the race, but manages to place, half the stake will be returned at this fraction, whilst the other half of the stake will be lost.
Evens – A horse with odds of 1/1. A successful winner at evens is essentially doubling the stake. Otherwise known as even money.
Fillie – A young female horse, generally aged four years old and below.
Flat Racing – Horse racing run over a flat surface without jumps. Traditionally run during the summer months, but more recently run throughout the year due to the introduction of all weather courses.
Forecast Bet – A bet which requires the punter to predict both the winner and the runner-up in a particular race. There are two separate types of forecast bet. A straight forecast is predicting the winner and the runner-up in the precise order, whereas a dual forecast is correctly predicting the winner and the runner-up in any order.
Form – The recent racing record of a horse listed on the race card, which is identifiable by a series of numbers. A ‘1’ denotes first place, ‘2’ equals second place and so on.
Fractional Betting Odds – Odds for a horse expressed in the terms of a fraction. For example, odds of 5/1 indicate a stake of £1 will have potential winnings of £5, generating a £6 return.
Furlong – A unit of measurement used regularly throughout horse racing. There are eight furlongs in one mile and a furlong is just over 201 metres.
Going – The condition of the racecourse on the day of a meeting, as determined by an official steward prior to racing. Going can range from ‘hard’ to ‘heavy’, although hard surfaces are rarely used as it is dangerous to both the horses and the jockeys.
Grade 1 – The most prestigious category of jumps racing in the UK, which includes all championship races such as the Cheltenham Gold Cup.
Group 1 – The most prestigious category of flat racing in the UK, which includes all the Classics, among other traditional races.
Guineas – The currency that horses were traditionally sold in. In modern day currency a guinea is £1.05 and some companies still use it to this day. Two of the Classics – the 1,000 Guineas and the 2,000 Guineas – are named after their original prize fund.
Handicap – In order to make a race as even as possible, each horse is allocated a specific amount of weight to carry, based on a predetermined handicap rating.
Home Straight – The straight on the course in front of the grandstand, leading into the winning post.
Hurdles – A branch of National Hunt racing that requires horses to jump over fences. The fences are smaller than those used in steeplechase race and hurdles races are generally aimed at more inexperienced horses, to get them used to jumping over obstacles.
In-play Betting – Betting on a horse to win or place whilst the race is taking place. Odds change dramatically as horses change position on the field. Also known as in-running betting or live betting.
Jackpot – A type of tote bet which requires the punter to predict the winner of the first six races at a particular race meeting.
Left-handed Track – A racecourse that is run in an anti-clockwise motion, with the rails on the left hand side of the jockeys.
Length – A measurement used to determine the distance between horses in a race. A length is the distance from a horses head to its tail.
Level Weights – A race which requires all horses to carry the same amount of weight.
Listed Race – A prestigious class of race but one which is not as highly regarded as those of Grade or Group standing.
Long Shot – A horse with very high odds that has an outside chance of success.
Lucky 15 – A type of accumulator that consists of 15 separate bets based on four horses. The 15 bets are made up of four singles, six doubles, four trebles and a four-fold accumulator. This bet has less risk than a regular accumulator as there is still a chance of success even if one or more selections lose. The Lucky 15 is said to have been invented by Betfred. Take a look at our weekly Lucky 15 betting tips.
Maiden – A horse that has so far been unsuccessful in efforts to win a race. Specific races cater for these horses, known as maiden races.
NAP – The best tip of a day from a tipster and an almost certain winner, similar to a banker. NAP is an abbreviation from a board game called Napoleon.
National Hunt – Horse racing which involves horses having to jump over fences, ditches and obstacles. Split into two sections in the UK: Hurdles and Steeplechase.
Non-runner – A horse that has been scheduled to take part in a race but withdraws before the race takes place, potentially affecting the odds of the other horses.
Novice – A young horse that has already won a race.
Odds-on – Odds that are very likely to be successful. The potential winnings are not as high as the amount staked, that is to say a horse whose odds are lower than evens.
Pacemaker – A horse that is owned or trained by the same people as another horse in the race, that is entered into a particular race purely to set the pace of the other horse.
Patent – A type of accumulator that consists of seven separate bets, based on three different selections. The bets involve three singles, three doubles and one triple. Only one horse needs to be successful in order to generate any sort of return.
Penalty – Additional weight added to a horse based on its handicap. A penalty often occurs when a horse has won a different race after being entered so there has not been an opportunity to re-evaluate the handicap of the horse.
Photo Finish – If the winner of a race is too close to be determined at pace by the naked eye, a judge will analyse a photograph of the finish line to work out the final result of a race.
Placepot – Similar to the jackpot, a placepot is a type of tote bet that requires a punter to predict a horse to place in the first six races of a particular meeting.
Price – The odds offered on the chances of a horse winning.
Pulling – The term used to describe the stage early on in a race during which a horse is distracted and unfocused, so subsequently pulls against the bridle.
Punter – A member of the public that places a bet.
Race Card – A programme issued for each race meeting, with a list of each race outlining each horse, the jockey and their form.
Return – The total amount of money given to the punter in the result of a successful bet. The amount of money returned is the stake and winnings combined.
Right-handed Track – A racecourse that is run in a clockwise motion, with the rails on the right hand side of the jockey.
Short-price – Very low odds for a horse, meaning any return will generate little profit.
Single – One bet that is settled simply on the odds provided.
Sire – The father of a horse.
Sprint Race – Shorter flat races run at a faster pace, generally over distances of between 5f and 8f.
Starting Price – The last price available for a horse before a race starts and the odds that bets are settled at, unless a punter takes specific odds when backing a horse. Generally shortened to SP. Best Odds Guaranteed is a great offer that helps you avoid deciding which odds to plump for. Some bookies now even offer BOG Plus!
Steeplechase – A branch of National Hunt racing that requires horses to jump over fences and water jumps, as well as various other obstacles.
Steward’s Enquiry – Often shortened to just ‘enquiry’, a steward’s enquiry is a hearing which takes place after a race has finished to make sure no racing rules have been broken during the race.
Stud Farm – Occasionally based on location at some racecourses, a stud farm is a centre where horses are mated.
Tic-tac – A branch of sign language used to communicate changing odds to the bookmakers at a racecourse. The use of the sign language is seldom used at modern day racing due to the prominence of modern day technology.
Tote Bet – Tote bets are pool bets where the return is not fixed by predetermined odds but rather by the total amount of money bet on the race or races in question and the number of people who backed the winner, with a cut taken to cover expenses, taxes and the operator’s profit margin.
Treble – An accumulative bet that requires three separate selections to win in order to generate a return.
Triple Crown – In the UK, winning the 2,000 Guineas, the Derby and the St Leger as a colt in the same season is known as the Triple Crown. The Triple Crown for fillies consists of 1,000 Guineas, the Oaks and the St Leger. Winning all three is a rare and remarkable achievement, with Nijinsky the last horse to achieve the feat in 1970, though Camelot was on for the treble in 2012 but finished second in the St Leger.
Trixie – A type of accumulator consisting of four separate bets based on three horses. The bet involves three doubles and one treble. At least two selections must win in order to generate any return.
Unfancied – A horse with long odds that has not been backed well and is not expected to win.
Walkover – A race that, for whatever reason, only consists of one horse.
Yankee – A type of accumulator involving 11 bets, based on four separate selections. The bet involves six doubles, four trebles and one four-fold accumulator. A minimum of two selections must win in order to achieve any return.
|
import r2pipe
import triton
import struct
class R2Plugin(object):
def __init__(self, name, privdata):
self.privdata = privdata
self.name = name
self.command = _r2_plugin_args[0]
self.args = _r2_plugin_args[1:]
self.r2 = r2pipe.open()
bininfo = self.r2.cmdj("ij")["bin"]
self.arch = bininfo["arch"]
self.bits = bininfo["bits"]
self.regs = self.r2.cmd("drl").split()
self.switch_flagspace(name)
self.commands = {}
def get_reg(self, reg):
res = int(self.r2.cmd("dr {}".format(reg)), 16)
return res
def get_regs(self):
regs = {}
for reg in self.regs:
regs[reg] = self.get_reg(reg)
return regs
def get_maps(self):
return self.r2.cmdj("dmj")
def read_mem(self, address, size):
hexdata = self.r2.cmd("p8 {} @ 0x{:X}".format(size, address))
return hexdata.decode('hex')
def write_mem(self, address, data):
self.r2.cmd("wx {} @ 0x{:X}".format(data.encode("hex"), address))
def seek(self, addr=None):
if addr:
self.r2.cmd("s 0x{:x}".format(addr))
return int(self.r2.cmd("s"), 16)
def switch_flagspace(self, name):
self.r2.cmd("fs {}".format(name))
def set_flag(self, section, name, size, address):
name = "{}.{}.{}".format(self.name, section, name)
self.r2.cmd("f {} {} @ {}".format(name, size, address))
def set_comment(self, comment, address=None):
if address:
self.r2.cmd("CC {} @ 0x{:x}".format(comment, address))
else:
self.r2.cmd("CC {}".format(comment))
def r2cmd(self, name):
def dec(func):
self.command = _r2_plugin_args[0]
self.args = _r2_plugin_args[1:]
func.command = name
self.commands[name] = (func)
return dec
def handle(self):
if self.command in self.commands:
return self.commands[self.command](self.privdata, self.args)
print "[*] Unknown command {}".format(self.command)
def integer(self, s):
regs = self.get_regs()
if s in regs:
v = regs[s]
elif s.startswith("0x"):
v = int(s, 16)
else:
v = int(s)
return v
tritonarch = {
"x86": {
32: triton.ARCH.X86,
64: triton.ARCH.X86_64
}
}
class Pimp(object):
def __init__(self, context=None):
self.r2p = R2Plugin("pimp", self)
arch = self.r2p.arch
bits = self.r2p.bits
self.comments = {}
self.arch = tritonarch[arch][bits]
self.inputs = {}
self.regs = {}
triton.setArchitecture(self.arch)
triton.setAstRepresentationMode(triton.AST_REPRESENTATION.PYTHON)
# Hack in order to be able to get triton register ids by name
self.triton_regs = {}
for r in triton.getAllRegisters():
self.triton_regs[r.getName()] = r
if self.arch == triton.ARCH.X86:
self.pcreg = triton.REG.EIP
elif self.arch == triton.ARCH.X86_64:
self.pcreg = triton.REG.RIP
else:
raise(ValueError("Architecture not implemented"))
setattr(self.memoryCaching, "memsolver", self.r2p)
def reset(self):
triton.resetEngines()
triton.clearPathConstraints()
triton.setArchitecture(self.arch)
triton.enableMode(triton.MODE.ALIGNED_MEMORY, True)
triton.enableMode(triton.MODE.ONLY_ON_SYMBOLIZED, True)
triton.addCallback(self.memoryCaching,
triton.CALLBACK.GET_CONCRETE_MEMORY_VALUE)
triton.addCallback(self.constantFolding,
triton.CALLBACK.SYMBOLIC_SIMPLIFICATION)
for r in self.regs:
if r in self.triton_regs:
triton.setConcreteRegisterValue(
triton.Register(self.triton_regs[r], self.regs[r])
)
for m in cache:
triton.setConcreteMemoryAreaValue(m['start'], bytearray(m["data"]))
for address in self.inputs:
self.inputs[address] = triton.convertMemoryToSymbolicVariable(
triton.MemoryAccess(
address,
triton.CPUSIZE.BYTE
)
)
# Triton does not handle class method callbacks, use staticmethod.
@staticmethod
def memoryCaching(mem):
addr = mem.getAddress()
size = mem.getSize()
mapped = triton.isMemoryMapped(addr)
if not mapped:
dump = pimp.memoryCaching.memsolver.read_mem(addr, size)
triton.setConcreteMemoryAreaValue(addr, bytearray(dump))
cache.append({"start": addr, "data": bytearray(dump)})
return
@staticmethod
def constantFolding(node):
if node.isSymbolized():
return node
return triton.ast.bv(node.evaluate(), node.getBitvectorSize())
def get_current_pc(self):
return triton.getConcreteRegisterValue(self.pcreg)
def disassemble_inst(self, pc=None):
_pc = self.get_current_pc()
if pc:
_pc = pc
opcodes = triton.getConcreteMemoryAreaValue(_pc, 16)
# Create the Triton instruction
inst = triton.Instruction()
inst.setOpcodes(opcodes)
inst.setAddress(_pc)
# disassemble instruction
triton.disassembly(inst)
return inst
def inst_iter(self, pc=None):
while True:
inst = self.process_inst()
if inst.getType() == triton.OPCODE.HLT:
break
yield inst
def process_inst(self, pc=None):
_pc = self.get_current_pc()
if pc:
_pc = pc
opcodes = triton.getConcreteMemoryAreaValue(_pc, 16)
# Create the Triton instruction
inst = triton.Instruction()
inst.setOpcodes(opcodes)
inst.setAddress(_pc)
# execute instruction
triton.processing(inst)
return inst
def add_input(self, addr, size):
for offset in xrange(size):
self.inputs[addr + offset] = triton.convertMemoryToSymbolicVariable(
triton.MemoryAccess(
addr + offset,
triton.CPUSIZE.BYTE
)
)
def is_conditional(self, inst):
return inst.getType() in (triton.OPCODE.JAE, triton.OPCODE.JA, triton.OPCODE.JBE, triton.OPCODE.JB, triton.OPCODE.JCXZ, triton.OPCODE.JECXZ, triton.OPCODE.JE, triton.OPCODE.JGE, triton.OPCODE.JG, triton.OPCODE.JLE, triton.OPCODE.JL, triton.OPCODE.JNE, triton.OPCODE.JNO, triton.OPCODE.JNP, triton.OPCODE.JNS, triton.OPCODE.JO, triton.OPCODE.JP, triton.OPCODE.JS)
def symulate(self, stop=None, stop_on_sj=False):
while True:
inst = self.disassemble_inst()
print inst
if inst.getAddress() == stop or inst.getType() == triton.OPCODE.HLT:
return inst.getAddress()
inst = self.process_inst()
isSymbolized = inst.isSymbolized()
if isSymbolized:
for access, ast in inst.getLoadAccess():
if(access.getAddress() in self.inputs):
self.comments[inst.getAddress()] = "symbolized memory: 0x{:x}".format(access.getAddress())
rr = inst.getReadRegisters()
if rr:
reglist = []
for r, ast in rr:
if ast.isSymbolized():
reglist.append(r.getName())
self.comments[inst.getAddress()] = "symbolized regs: {}".format(" ,".join(reglist))
if (stop_on_sj == True and isSymbolized and inst.isControlFlow() and (inst.getType() != triton.OPCODE.JMP)):
return inst.getAddress()
def process_constraint(self, cstr):
global cache
# request a model verifying cstr
model = triton.getModel(cstr)
if not model:
return False
# apply model to memory cache
for m in model:
for address in self.inputs:
if model[m].getId() == self.inputs[address].getId():
nCache = []
for c in cache:
if c["start"] <= address < c["start"] + len(c["data"]):
c["data"][address-c["start"]] = model[m].getValue()
nCache.append(c)
cache = nCache
return True
def build_jmp_constraint(self, pc=None, take=True):
_pc = self.get_current_pc()
if pc:
_pc = pc
inst = self.disassemble_inst(_pc)
if take:
target = inst.getFirstOperand().getValue()
else:
target = _pc + inst.getSize()
pco = triton.getPathConstraints()
cstr = triton.ast.equal(triton.ast.bvtrue(), triton.ast.bvtrue())
for pc in pco:
if pc.isMultipleBranches():
branches = pc.getBranchConstraints()
for branch in branches:
taken = branch["isTaken"]
src = branch["srcAddr"]
dst = branch["dstAddr"]
bcstr = branch["constraint"]
isPreviousBranchConstraint = (src != _pc) and taken
isBranchToTake = src == _pc and dst == target
if isPreviousBranchConstraint or isBranchToTake:
cstr = triton.ast.land(cstr, bcstr)
cstr = triton.ast.assert_(cstr)
return cstr
@staticmethod
def isMapped(addr):
for m in cache:
if m["start"] <= addr < m["start"] + len(m["data"]):
return True
return False
try:
_r2_plugin_args = _r2_plugin_args.split()
except NameError as e:
print "[*] pimp.py cannot be called directly, use pimp_wrapper.py"
exit()
if "cache" not in globals():
cache = []
if "pimp" not in globals():
pimp = Pimp()
def get_byte(address):
for m in cache:
if m["start"] <= address < m["start"] + len(m["data"]):
idx = address - m["start"]
return struct.pack("B", m["data"][idx])
# initialise the Triton context with current r2 state (registers)
@pimp.r2p.r2cmd("init")
def cmd_init(p, a):
p.regs = p.r2p.get_regs()
p.reset()
# continue until address
@pimp.r2p.r2cmd("dcu")
def cmd_until(p, a):
target = p.r2p.integer(a[0])
addr = p.symulate(stop=target, stop_on_sj=True)
assert(addr==target)
p.r2p.seek(addr)
return
# continue until symbolized jump
@pimp.r2p.r2cmd("dcusj")
def cmd_until_symjump(p, a):
addr = p.symulate(stop_on_sj=True)
for caddr in p.comments:
p.r2p.set_comment(p.comments[caddr], caddr)
p.r2p.seek(addr)
p.r2p.set_flag("regs", p.pcreg.getName(), 1, addr)
# go to current jump target
@pimp.r2p.r2cmd("take")
def cmd_take_symjump(p, a):
addr = p.r2p.seek()
inst = p.disassemble_inst(addr)
if not p.is_conditional(inst):
print "error: invalid instruction type"
return
target = inst.getFirstOperand().getValue()
cstr = p.build_jmp_constraint(pc=addr)
if not p.process_constraint(cstr):
print "error: could not resolve constraint"
return
# reset and execute intil target is reached
p.reset()
for inst in p.inst_iter():
if inst.getAddress() == target:
p.r2p.seek(target)
p.r2p.set_flag("regs", p.pcreg.getName(), 1, target)
return
print "error: end of execution"
# avoid current jump target
@pimp.r2p.r2cmd("avoid")
def cmd_avoid_symjump(p, a):
addr = p.r2p.seek()
inst = p.disassemble_inst(addr)
if not p.is_conditional(inst):
print "error: invalid instruction type"
return
target = inst.getAddress() + inst.getSize()
cstr = p.build_jmp_constraint(pc=addr, take=False)
if not p.process_constraint(cstr):
print "error: could not resolve constraint"
return
# reset and execute intil target is reached
p.reset()
for inst in p.inst_iter():
if inst.getAddress() == target:
p.r2p.seek(target)
p.r2p.set_flag("regs", p.pcreg.getName(), 1, target)
return
print "error: end of execution"
@pimp.r2p.r2cmd("symulate")
def cmd_symulate(p, a):
pass
# define symbolized memory
@pimp.r2p.r2cmd("input")
def cmd_symbolize(p, a):
size = p.r2p.integer(a[0])
addr = p.r2p.integer(a[1])
p.add_input(addr, size)
p.reset()
# sync r2 with input generated by triton
@pimp.r2p.r2cmd("sync")
def cmd_sync_input(p, a):
for address in p.inputs:
p.r2p.write_mem(address, get_byte(address))
# reset memory with r2 current state
@pimp.r2p.r2cmd("reset")
def cmd_reset(p, a):
global cache
ncache = []
for m in cache:
addr = m["start"]
size = len(m["data"])
data = p.r2p.read_mem(addr, size)
triton.setConcreteMemoryAreaValue(addr, bytearray(data))
ncache.append({"start": addr, "data": data})
cache = ncache
pimp.r2p.handle()
|
Castle is a defining figure in the American studio furniture movement. After studying sculpture and industrial design at the University of Kansas, Castle taught at the Rochester Institute of Technology and at the State University of New York in Brockport. In the late 1950s, he developed his Organic Series, whose title refers to both the wood of his furniture and biomorphic form. In this series Castle abandoned traditional furniture methods. Instead, he glued layers of wood into a large block, then shaped the stack with hand and power tools, a subtractive technique more akin to sculpture than to furniture production. Castle made this piece, a rare three-seat settee, using this stack-lamination method. The resulting settee is strong enough to be both functional and sculptural, with a carved base resembling a tree's root system. Castle's later furniture series are equally iconoclastic, including his trompe l'oeil pieces, sculptures of everyday objects made entirely of wood.
|
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import models
from django.forms.formsets import BaseFormSet
from django.forms.widgets import flatatt
from django.utils.encoding import smart_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from what_apps.people.models import GenericParty
from what_apps.utility.functions import daily_crypt
class JqueryDatePicker(forms.DateField):
def __init__(self, *args, **kwargs):
super(JqueryDatePicker, self).__init__(*args, **kwargs)
self.widget.format = '%m/%d/%Y'
self.widget.attrs.update({'class':'datePicker', 'readonly':'true'})
class RequiredFormSet(BaseFormSet):
def __init__(self, *args, **kwargs):
super(RequiredFormSet, self).__init__(*args, **kwargs)
for form in self.forms:
form.empty_permitted = False
class AutoCompleteWidget(forms.TextInput):
'''
This widget is a little whacky and wild.
What actually happens here is that the widget itself is a hidden field that gets populated with encrypted data by ajax.
Additionally, there is a visible 'lookup' field into which the user types the autocomplete terms of their dreams.
'''
def __init__(self, new_buttons = False, models=None, with_description=False, name_visible_field=False, *args, **kwargs):
super(AutoCompleteWidget, self).__init__(*args, **kwargs)
self.name_visible_field = name_visible_field #We want the visible field to be named (ie, make its way into POST) iff we have set the field in the form as such.
#Let's populate the list of models.
list_of_models = ""
for model_info in models:
try:
counter = 0
for counter, property in enumerate(model_info): #model info might be a list (if we have more than one model to autcomplete against) or just a ModelBase. It's EAFP, so we'll presume we've been handed a tuple and deal with the other case in the block below.
#If model_info is in fact a list, we're going to loop through it.
if counter == 0: #The first time through, we know we have the model.
model = property
meta = str(model._meta) #Get the model meta, ie people.member
list_of_models += (meta) #Separate the model _meta by comma
else: #For subsequent iterations, we have a property name of the model. We want to add that to the string.
if counter == 1: #If this is the second iteration, we are beginning a list of the properties against which we are going to autcomplete.
list_of_models += "__" + property #Add the first property
else:#This is not the first property; it's at least the second. We'll separate these by ampersands.
list_of_models += "&" + property #Add the first property
except TypeError:
model = model_info
meta = str(model._meta) #Get the model meta, ie people.member
list_of_models += (meta) #Separate the model _meta by comma
list_of_models += ","
list_of_models = list_of_models[:-1] #Kill that last trailing comma
self.encrypted_models = list_of_models
if new_buttons:
#They have asked for the little buttons to add new instances.
if not models:
#...but they didn't even have the decency to pass the models to us.
raise RuntimeError('The llamas did not march. You must either specify models for this widget or set new_buttons to False.')
#OK, they gave us the models. Let's give them the plus signs.
self.add_html = ''
for model in models:
#Go through the models and add little plus signs, plus the model name.
try: #Maybe the object in question is in fact a model...
app_name = str(model._meta).split('.')[0]
model_name = str(model._meta).split('.')[1]
except AttributeError: #Or maybe it's a list...
#In which cast we want model[0]
app_name = str(model[0]._meta).split('.')[0]
model_name = str(model[0]._meta).split('.')[1]
#INTERUPPTOSAURUS!
#If it's the user model, I want to force them to the contact form (instead of the auth.user admin page, which doesn't really do much for us, now does it?).
if app_name == 'auth' and model_name == 'user':
add_url = '/contact/new_contact'
model_name = 'contact'
else:
add_url = '/admin/' + app_name + '/' + model_name + '/add'
self.add_html += '<a target="_blank" href="'+ add_url + '" class="addButton"><span class="underText">' + model_name + '</span></a>'
else: #They didn't ask for the buttons.
self.add_html = False
def render(self, name, value=None, attrs=None):
'''
Justin here. I'm actually not sure what the fuck is going on here. Lost in my own code.
'''
final_attrs = self.build_attrs(attrs, name=name)
lookup_attrs = self.build_attrs(attrs)
if value:
final_attrs['value'] = escape(smart_unicode(value))
lookup_attrs['value'] = final_attrs['value'].split('___')[1]
if not self.attrs.has_key('id'):
final_attrs['id'] = 'id_%s' % name
lookup_attrs['id'] = final_attrs['id'] + '_lookup'
lookup_attrs['class'] = 'autocompleteField autocompleteFieldIncomplete'
if self.name_visible_field:
lookup_attrs['name'] = 'lookup_%s' % name
lookup_attrs['elephant_data'] = str(self.encrypted_models)
final_attrs['type'] = 'hidden'
input_html = mark_safe(u'<input%s />' % flatatt(final_attrs))
widget_html = input_html #So far, just input_html
lookup_html = mark_safe(u'<input%s />' % flatatt(lookup_attrs))
widget_html += lookup_html #Now add the hidden_html
if self.add_html:
widget_html += self.add_html #Now add the plus signs
return widget_html
class AutoCompleteField(forms.Field):
'''
Takes a tuple for models, autocompletes against their haystack entires.
'''
def __init__(self, models=None, new_buttons=False, with_description=True, name_visible_field=False, *args, **kwargs):
super(AutoCompleteField, self).__init__(*args, **kwargs)
self.widget = AutoCompleteWidget(new_buttons=new_buttons, models=models, name_visible_field=name_visible_field, with_description=with_description)
def to_python(self, value):
'''
At the moment, this is not particularly useful.
Better will be to actually decode the value from the hidden field and pass it if the field is shown to be valid.
'''
if value in validators.EMPTY_VALUES:
return u''
encrypted_result = value.split('___')[0]
#result = daily_crypt(encrypted_result, decrypt=True)
result = encrypted_result
#This is now copied in utilities.function.get_object_from_string - please deprecate
result_meta = result.split('_')[0] #Now just app and model.
result_id = result.split('_')[1] #....and the ID of the object.
result_app = result_meta.split('.')[0] #Everything before the . is the app name
result_model = result_meta.split('.')[1] #Everything after is the model name
#Get model of object in question
model = ContentType.objects.get(app_label = result_app, model = result_model).model_class()
#POSITIVE IDENTIFICATION CONFIRMED. PROCEED WITH EXPERIMENT.
result_object = model.objects.get(id=result_id)
return result_object
class GenericPartyField(AutoCompleteField):
def __init__(self, new_buttons=True, *args, **kwargs):
super(GenericPartyField, self).__init__(models=([User,'first_name', 'last_name', 'username', 'email'], Group), new_buttons=new_buttons)
def to_python(self, value):
try:
object = super(GenericPartyField, self).to_python(value)
generic_party = GenericParty.objects.get(party=object)
except AttributeError: #Catch Justin's Attribute Error on Generic Party, ensure that the form can't validate
raise ValidationError('GenericPartyField must be a User or a Group')
return generic_party
def validate(self, value):
'''
Formerly "holy cow"
'''
pass
class SimplePartyLookup(forms.Form):
party_lookup = GenericPartyField(new_buttons=False)
class ManyGenericPartyField(AutoCompleteField):
def __init__(self, *args, **kwargs):
super(ManyGenericPartyField, self).__init__(models=(User, Group), new_buttons=True)
def to_python(self, value):
object = super(ManyGenericPartyField, self).to_python(value)
if object:
generic_party = GenericParty.objects.get(party=object)
else:
raise ValidationError("Gotta pass me somethin'")
return [generic_party,]
class MustBeUniqueField(forms.Field):
'''
Takes a string in the form of appname.model__field and ensures that the value is unique for that field.
'''
def __init__(self, field=None, *args, **kwargs):
super(MustBeUniqueField, self).__init__(*args, **kwargs)
encrypted_field = daily_crypt(field) #Encrypt the list with today's daily salt
self.widget.attrs['class'] = 'mustBeUniqueField'
self.widget.attrs['elephant_data'] = str(encrypted_field)
def get_bool_from_html(piece):
if piece in ['False', 'false', '0', 0]:
return False
else:
return True
|
Teachers recommend Fiskars® brand scissors because they are designed to meet the specific needs of each age level, and they cut all classroom material every time. But it’s not just teachers who love our products — moms love them too! By designing our scissors to meet the needs of growing hands, kids of any age can make frustration-free cuts safely and successfully. Every little success helps kids build self-confidence, and this can-do attitude becomes the foundation for learning new skills beyond just cutting and crafting. Count on Fiskars® to help your kids down the path to a successful educational experience, no matter what their age.
|
import sys
import os
from cx_Freeze import setup, Executable
path = ["pyjtt"] + sys.path
icon_path = os.path.join("resources", "icons", "clock.ico")
build_exe_options = {'path': path, 'include_msvcr': True}
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa371847(v=vs.85).aspx
shortcut_table = [
("DesktopShortcut", # Shortcut
"DesktopFolder", # Directory_
"pyJTT", # Name
"TARGETDIR", # Component_
"[TARGETDIR]pyjtt.exe", # Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'%APPDATA%\pyjtt' # WkDir
),
("ProgramMenuShortcut", # Shortcut
"ProgramMenuFolder", # Directory_
"pyJTT", # Name
"TARGETDIR", # Component_
"[TARGETDIR]pyjtt.exe", # Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'%APPDATA%\pyjtt' # WkDir
)]
# Now create the table dictionary
msi_data = {"Shortcut": shortcut_table}
# Change some default MSI options and specify the
# use of the above defined tables
bdist_msi_options = {'data': msi_data}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
target_app = os.path.join("pyjtt", "app.py")
setup(name="pyjtt",
version="1.2.3",
description="Jira Time Tracker",
maintainer="Nikolay Golub",
maintainer_email="nikolay.v.golub@gmail.com",
long_description="Allows track time in JIRA online and manage worklogs",
license="GNU GENERAL PUBLIC LICENSE Version 3",
options={"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options, },
executables=[Executable(target_app,
base=base,
targetName="pyjtt.exe",
icon=icon_path,
shortcutName="pyJTT",
shortcutDir="DesktopFolder")])
|
—Microsoft has received more attention in recent weeks for two incidents involving teen girls, a state of affairs that has executives in Redmond asking hard questions and making public apologies.
Maybe Microsoft should have more heavily filtered Tay’s responses, or at least devised a way to indicate they were assimilated comments of humans interacting with it, rather than its own inventions, as University of Washington’s Ryan Calo, an expert in AI law, points out in Wired.
Meanwhile, the very human failure in Microsoft’s games division in booking exotic dancers dressed like school girls to mingle with attendees at its Game Developers Conference party in San Francisco earlier this month was arguably worse for a company—and an industry—trying to create a climate more welcoming to women.
—Google is making available some of its machine learning technologies to developers through Cloud Machine Learning. The company will allow developers to use APIs for speech recognition, translation, and computer vision, accessing the same technology at play in the company’s voice search and voice typing products. This underscores the tough competition from technology giants faced by startups aspiring to deliver similar machine learning or AI capabilities as a service.
—Here’s a new resource for those of us trying to keep tabs on machine learning and AI: From Seattle startup Algorithmia comes Emergent Future, a “weekly, hand-curated dispatch exploring technology through the lens of artificial intelligence, data science, and the shape of things to come.” You can sign up for it here.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Experiment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=128)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='document',
name='experiment',
field=models.ForeignKey(to='basicviz.Experiment', on_delete=models.CASCADE),
preserve_default=True,
),
]
|
This entry was posted on Friday, July 10th, 2009 at 1:21 pm and is filed under Fish Hook, Fishing Equipments. You can follow any responses to this entry through the RSS 2.0 feed. Both comments and pings are currently closed.
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TodoList'
db.create_table(u'todo_todolist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='parent_todo', unique=True, null=True, to=orm['todo.TodoItem'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'todo', ['TodoList'])
# Adding model 'TodoItem'
db.create_table(u'todo_todoitem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('todo_list', self.gf('django.db.models.fields.related.ForeignKey')(related_name='todo_list', to=orm['todo.TodoList'])),
('value', self.gf('django.db.models.fields.TextField')()),
('status', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'todo', ['TodoItem'])
def backwards(self, orm):
# Deleting model 'TodoList'
db.delete_table(u'todo_todolist')
# Deleting model 'TodoItem'
db.delete_table(u'todo_todoitem')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'todo.todoitem': {
'Meta': {'object_name': 'TodoItem'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'todo_list': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'todo_list'", 'to': u"orm['todo.TodoList']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'todo.todolist': {
'Meta': {'object_name': 'TodoList'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_todo'", 'unique': 'True', 'null': 'True', 'to': u"orm['todo.TodoItem']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['todo']
|
There are 1 United States Postal Service post boxes available to the public in Brownsville, MN. You can use any one of these locations to mail your letter or small package. For additional collection boxes in neighboring communities, see the list of post boxes in Minnesota.
Are you looking for a post office? See the list of post offices in Brownsville, MN.
|
"""
desispec.magnitude
========================
Broadband flux and magnitudes
"""
import numpy as np
def compute_broadband_flux(spectrum_wave,spectrum_flux,transmission_wave,transmission_value) :
"""
Computes broadband flux
Args:
spectrum_wave: 1D numpy array (Angstrom)
spectrum_flux: 1D numpy array is some input density unit, same size as spectrum_wave
transmission_wave: 1D numpy array (Angstrom)
transmission_value: 1D numpy array , dimensionless, same size as transmission_wave
Returns:
integrated flux (unit= A x (input density unit)) , scalar
"""
# same size
assert(spectrum_wave.size==spectrum_flux.size)
assert(transmission_wave.size==transmission_value.size)
# sort arrays, just in case
ii=np.argsort(spectrum_wave)
jj=np.argsort(transmission_wave)
# tranmission contained in spectrum
assert(spectrum_wave[ii[0]]<=transmission_wave[jj[0]])
assert(spectrum_wave[ii[-1]]>=transmission_wave[jj[-1]])
kk=(spectrum_wave>=transmission_wave[jj[0]])&(spectrum_wave<=transmission_wave[jj[-1]])
# wavelength grid combining both grids in transmission_wave region
wave=np.unique(np.hstack([spectrum_wave[kk],transmission_wave]))
# value is product of interpolated values
val=np.interp(wave,spectrum_wave[ii],spectrum_flux[ii])*np.interp(wave,transmission_wave[jj],transmission_value[jj])
trapeze_area = (val[1:]+val[:-1])*(wave[1:]-wave[:-1])/2.
return np.sum(trapeze_area)
def ab_flux_in_ergs_s_cm2_A(wave) :
"""
Args:
wave: 1D numpy array (Angstrom)
Returns:
ab flux in units of ergs/s/cm2/A
"""
#import astropy.units
#default_wavelength_unit = astropy.units.Angstrom
#default_flux_unit = astropy.units.erg / astropy.units.cm**2 / astropy.units.s / default_wavelength_unit
#_ab_constant = 3631. * astropy.units.Jansky * astropy.constants.c).to(default_flux_unit * default_wavelength_unit**2)
_ab_constant = 0.10885464 # Angstrom erg / (cm2 s)
return _ab_constant / wave**2
def compute_ab_mag(spectrum_wave,spectrum_flux,transmission_wave,transmission_value) :
"""
Computes ab mag
Args:
spectrum_wave: 1D numpy array (Angstrom)
spectrum_flux: 1D numpy array (in units of 1e-17 ergs/s/cm2/A), same size as spectrum_wave
transmission_wave: 1D numpy array (Angstrom)
transmission_value: 1D numpy array , dimensionless, same size as transmission_wave
Returns:
mag (float scalar)
"""
numerator = 1e-17*compute_broadband_flux(spectrum_wave,spectrum_flux,transmission_wave,transmission_value)
# use same wavelength grid for denominator to limit interpolation biases
denominator = compute_broadband_flux(spectrum_wave,ab_flux_in_ergs_s_cm2_A(spectrum_wave),transmission_wave,transmission_value)
# may return NaN
return - 2.5 * np.log10(numerator/denominator)
|
My whole professional career has been involved with horses. After graduating with a degree in zootechnics from the National Agronomical Institute of Paris in 1974, I spent 2 years teaching at the Mostaganem School of Agriculture in Algeria, this was followed by 3 years as the Deputy Director of the National Stud of Saint Lô in France. From 1979 to 2004 I was Director of SIRE, the French National Equine Registry and database covering 3 million registered horses in 50 stud books for different breeds – including of course the French Arabian Stud Book. Since 1999 I was also a Member of the Executive Committee of WBFSH (World Breeding Federation for Sport Horses). From 2004 until 2010 I was Director of International Relations of the Haras Nationaux and Coordinator of the WHIRDEC (World Horse Identification, Registration And Data Exchange Committee, which promotes UELN and data exchanges projects). From 2010 until my retirement in 2015, I was the Director of International Relations of the IFCE (French Institute for Horse Riding and Breeding, merging Haras Nationaux, Ministry of Agriculture, and Cadre Noir, Ministry of Sports).
In terms of my personal and more direct interests with horses, I started riding at an early age, and in 1968 I was the French Junior Showjumping Champion. Since 1980 I have been a National Judge for Show Jumping and Eventing for the French National Equestrian Federation, since 1999 I have been an International FEI steward in Eventing, and since 1988 I have been an ECAHO judge for Arabian shows, with many judging appointments all over the world, mainly since my professional retirement.
Concerning purebred Arabian horses and WAHO, my connection with the Organization dates back to 1974, when I produced the report on the French Arab Stud Book for the WAHO inspection team of Dr. Pesi Gazder and Mr. Ronald Kydd. In the same year I participated at the Malmö WAHO General Assembly as a member of the French delegation and also produced a final study report on “the Pure Bred Arab in the World”. In 1988 I made a report to the General Assembly in London on Purebred Arabian stud book management around the world. Since 1986 I have attended almost all WAHO and ECAHO General Assemblies as the delegate for France, and have also been the delegate to the EAHRIC Committee of ECAHO, and to the WAHO World Registrars meetings.
I have been a member of the WAHO Executive Committee since 2006, and Chairman of the WAHO World Registrars Meetings since 2014. For many years I have also been WAHO’s liaison person to WHIRDEC meetings and for all dealings with the EU Commission on their directives concerning equines. I have always felt very strongly that international collaboration is of the utmost importance, and I will continue to devote my time to working for WAHO and to further improve and strengthen the communication and cooperation between all the member registries.
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .robottypes import is_string
_CONTROL_WORDS_TO_BE_ESCAPED = ('ELSE', 'ELSE IF', 'AND')
_SEQUENCES_TO_BE_ESCAPED = ('\\', '${', '@{', '%{', '&{', '*{', '=')
def escape(item):
if not is_string(item):
return item
if item in _CONTROL_WORDS_TO_BE_ESCAPED:
return '\\' + item
for seq in _SEQUENCES_TO_BE_ESCAPED:
if seq in item:
item = item.replace(seq, '\\' + seq)
return item
def unescape(item):
if not (is_string(item) and '\\' in item):
return item
return Unescaper().unescape(item)
class Unescaper(object):
_escaped = re.compile(r'(\\+)([^\\]*)')
def unescape(self, string):
return ''.join(self._yield_unescaped(string))
def _yield_unescaped(self, string):
while '\\' in string:
finder = EscapeFinder(string)
yield finder.before + finder.backslashes
if finder.escaped and finder.text:
yield self._unescape(finder.text)
else:
yield finder.text
string = finder.after
yield string
def _unescape(self, text):
try:
escape = str(text[0])
except UnicodeError:
return text
try:
unescaper = getattr(self, '_unescaper_for_' + escape)
except AttributeError:
return text
else:
return unescaper(text[1:])
def _unescaper_for_n(self, text):
if text.startswith(' '):
text = text[1:]
return '\n' + text
def _unescaper_for_r(self, text):
return '\r' + text
def _unescaper_for_t(self, text):
return '\t' + text
def _unescaper_for_x(self, text):
return self._unescape_character(text, 2, 'x')
def _unescaper_for_u(self, text):
return self._unescape_character(text, 4, 'u')
def _unescaper_for_U(self, text):
return self._unescape_character(text, 8, 'U')
def _unescape_character(self, text, length, escape):
try:
char = self._get_character(text[:length], length)
except ValueError:
return escape + text
else:
return char + text[length:]
def _get_character(self, text, length):
if len(text) < length or not text.isalnum():
raise ValueError
ordinal = int(text, 16)
# No Unicode code points above 0x10FFFF
if ordinal > 0x10FFFF:
raise ValueError
# unichr only supports ordinals up to 0xFFFF with narrow Python builds
if ordinal > 0xFFFF:
return eval("u'\\U%08x'" % ordinal)
return unichr(ordinal)
class EscapeFinder(object):
_escaped = re.compile(r'(\\+)([^\\]*)')
def __init__(self, string):
res = self._escaped.search(string)
self.before = string[:res.start()]
escape_chars = len(res.group(1))
self.backslashes = '\\' * (escape_chars // 2)
self.escaped = bool(escape_chars % 2)
self.text = res.group(2)
self.after = string[res.end():]
def split_from_equals(string):
index = _get_split_index(string)
if index == -1:
return string, None
return string[:index], string[index+1:]
def _get_split_index(string):
index = 0
while '=' in string[index:]:
index += string[index:].index('=')
if _not_escaping(string[:index]):
return index
index += 1
return -1
def _not_escaping(name):
backslashes = len(name) - len(name.rstrip('\\'))
return backslashes % 2 == 0
|
This Southwest Pasta One Pot Wonder is a vegetarian meal that takes about 25 minutes to prepare from start to finish. What I love about this dish is that it uses one pot to do everything, including cooking the pasta right with your other ingredients.
If you are unfamiliar with the one pot method of cooking, you simply put your uncooked pasta, veggies, seasoning, and broth into a large pot, cover it, and let it cook for about 12-15 minutes. The pasta absorbs the cooking liquid and creates a nice sauce. That’s it!
I really love utilizing this method of cooking for those busy nights when you just don’t have time to whip up a huge complex meal yet still want something that has a lot of flavors.
Although this dish does utilize pasta, which I recommend limiting, it is still a healthier meal with tons of fresh veggies. I do recommend switching your pasta recipes to whole wheat pasta if you haven’t already.
And as long as you limit this meal to once in a while it can be a great addition to your menu plan! Plus, it is a great way to get the kids to eat more veggies, especially ones they may not usually eat, like onions. You can also make this same recipe utilizing quinoa pasta, but you may have to adjust the cooking time a bit.
The main protein source in this Southwest Pasta One Pot Wonder is black beans. While we made ours from a bag of dried beans, you can certainly use a can of black beans as well. If you are a vegetarian or someone who likes to go meatless a few days a week, this is a great option for you!
This southwest dish is not overly spicy but does have a little kick due to a can of fire roasted tomatoes with chilies that I used (feel free to omit). The addition of sweet corn and some sweet peppers give a great contrast of flavors.
Just look at those awesome little peppers! If you don’t have any sweet peppers feel free to substitute bell peppers or any other of your choice.
You can top this finished dish with some additional cheese, or to cool it down a bit, a dollop of fresh sour cream.
1 box pasta I choose whole wheat pasta, but if wheat/carbs are an issue you can try quinoa pasta with similar results. You may need to adjust the cooking time a bit.
Add all but the beans and cheese to a large pot (my preference is a large cast iron dutch oven). Stir, cover, and bring to a rolling boil.
Reduce heat to medium-low and cook, covered, for 12-15 minutes, stirring occasionally. There should still be 1/4 to 1/2 inch of liquid in the bottom of the pot when it is done.
Remove from heat and stir in the black beans and cheese (optional).
Let rest for 5 minutes to warm the beans, melt the cheese, and absorb the excess liquid.
Serve and top with additional shredded cheese and sour cream if desired.
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
import collections
import tensorflow as tf
from object_detection.predictors import convolutional_box_predictor
from object_detection.predictors import convolutional_keras_box_predictor
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.predictors import mask_rcnn_keras_box_predictor
from object_detection.predictors import rfcn_box_predictor
from object_detection.predictors import rfcn_keras_box_predictor
from object_detection.predictors.heads import box_head
from object_detection.predictors.heads import class_head
from object_detection.predictors.heads import keras_box_head
from object_detection.predictors.heads import keras_class_head
from object_detection.predictors.heads import keras_mask_head
from object_detection.predictors.heads import mask_head
from object_detection.protos import box_predictor_pb2
def build_convolutional_box_predictor(is_training,
num_classes,
conv_hyperparams_fn,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
apply_sigmoid_to_scores=False,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None):
"""Builds the ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: If True, apply the sigmoid on the output
class_predictions.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: Constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
Returns:
A ConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
apply_sigmoid_to_scores=apply_sigmoid_to_scores,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise)
other_heads = {}
return convolutional_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth)
def build_convolutional_keras_box_predictor(is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None,
name='BoxPredictor'):
"""Builds the Keras ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
Returns:
A Keras ConvolutionalBoxPredictor class.
"""
box_prediction_heads = []
class_prediction_heads = []
other_heads = {}
for stack_index, num_predictions_per_location in enumerate(
num_predictions_per_location_list):
box_prediction_heads.append(
keras_box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range,
name='ConvolutionalBoxHead_%d' % stack_index))
class_prediction_heads.append(
keras_class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
name='ConvolutionalClassHead_%d' % stack_index))
return convolutional_keras_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_heads=box_prediction_heads,
class_prediction_heads=class_prediction_heads,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
name=name)
def build_weight_shared_convolutional_box_predictor(
is_training,
num_classes,
conv_hyperparams_fn,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
keyword_args=None):
"""Builds and returns a WeightSharedConvolutionalBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
keyword_args: A dictionary with additional args.
Returns:
A WeightSharedConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = (
class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
kernel_size=kernel_size,
class_prediction_bias_init=class_prediction_bias_init,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
use_depthwise=use_depthwise,
score_converter_fn=score_converter_fn))
other_heads = {}
return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise)
def build_weight_shared_convolutional_keras_box_predictor(
is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
name='WeightSharedConvolutionalBoxPredictor',
keyword_args=None):
"""Builds the Keras WeightSharedConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
keyword_args: A dictionary with additional args.
Returns:
A Keras WeightSharedConvolutionalBoxPredictor class.
"""
if len(set(num_predictions_per_location_list)) > 1:
raise ValueError('num predictions per location must be same for all'
'feature maps, found: {}'.format(
num_predictions_per_location_list))
num_predictions_per_location = num_predictions_per_location_list[0]
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range,
name='WeightSharedConvolutionalBoxHead')
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
score_converter_fn=score_converter_fn,
name='WeightSharedConvolutionalClassHead')
other_heads = {}
return (
convolutional_keras_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise,
name=name))
def build_mask_rcnn_keras_box_predictor(is_training,
num_classes,
fc_hyperparams,
freeze_batchnorm,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNKerasBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for fully connected dense ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNKerasBoxPredictor class.
"""
box_prediction_head = keras_box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = keras_mask_head.MaskRCNNMaskHead(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_keras_box_predictor.MaskRCNNKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
freeze_batchnorm=freeze_batchnorm,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_mask_rcnn_box_predictor(is_training,
num_classes,
fc_hyperparams_fn,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams_fn=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for fully connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNBoxPredictor class.
"""
box_prediction_head = box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead(
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_box_predictor.MaskRCNNBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_score_converter(score_converter_config, is_training):
"""Builds score converter based on the config.
Builds one of [tf.identity, tf.sigmoid] score converters based on the config
and whether the BoxPredictor is for training or inference.
Args:
score_converter_config:
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter.
is_training: Indicates whether the BoxPredictor is in training mode.
Returns:
Callable score converter op.
Raises:
ValueError: On unknown score converter.
"""
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY):
return tf.identity
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID):
return tf.identity if is_training else tf.sigmoid
raise ValueError('Unknown score converter.')
BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange',
['min', 'max'])
def build(argscope_fn, box_predictor_config, is_training, num_classes,
add_background_class=True):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams,
is_training)
conv_hyperparams_fn = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams_fn = argscope_fn(
config_box_predictor.conv_hyperparams, is_training)
return build_mask_rcnn_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof))
def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update,
num_predictions_per_location_list, box_predictor_config,
is_training, num_classes, add_background_class=True):
"""Builds a Keras-based box predictor based on the configuration.
Builds Keras-based box predictor based on the configuration.
See box_predictor.proto for configurable options. Also, see box_predictor.py
for more details.
Args:
hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams
proto and returns a `hyperparams_builder.KerasLayerHyperparams`
for Conv or FC hyperparameters.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.KerasBoxPredictor object.
Raises:
ValueError: On unknown box predictor, or one with no Keras box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly. This is
# required because during TPU inference, model.postprocess is not called.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
add_background_class=add_background_class,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams)
conv_hyperparams = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
return build_mask_rcnn_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams=conv_hyperparams,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError(
'Unknown box predictor for Keras: {}'.format(box_predictor_oneof))
|
A senior banking strategist, who’s held director-level roles at UOB and Standard Chartered in Singapore, has joined a fintech firm based in…India. UOB executive director Karthik Raghupathy moved to PhonePe, a Bengaluru-headquartered start-up that runs a popular payments app, last month as head of strategy and planning.
Raghupathy adds his name to a growing list of banking professionals in Singapore who have left for the tech sector over the past year. But the vast majority of them have stayed in the Republic, joining tech companies (Google has taken on several recent recruits from banks) or setting up their own local businesses. Last year Singapore was rated the world’s best fintech hub by research institute IFZ.
While Raghupathy is currently an outlier, his move could be a sign of things to come as India’s fintech market begins to boom and start-ups there increase their hiring. More than 95% of financial services institutions in the country are exploring fintech partnerships and India offers the highest expected return on fintech investments globally, according to a 2017 PwC report.
Singapore, with its high concentration of Indian tech and banking professionals, is likely to become a prime hunting ground for Indian start-ups over the next 12 months, says a IT recruiter in the city state. Raghupathy’s new sector, online payments, is in particular need of new talent. Morgan Stanley expects India’s digital payments penetration to increase from 5% in 2017 to 20% in 2027.
In a more advanced trend in the Asian job market, Chinese tech companies have been recruiting from banks in Hong Kong for the past three years. Like Raghupathy, many of the new hires have gone into strategy and corporate development roles. In October, for example, Jeff Chen, head of technology investment banking for Asia Pacific at HSBC, moved to online healthcare firm WeDoctor as chief strategy officer. Alibaba has a long history of hiring senior bankers for strategy jobs.
Raghupathy cut his banking-strategy teeth during a seven-year stint consulting for financial services clients at McKinsey & Co in New York, starting in 2005. He moved to Singapore in 2012 as a corporate development director at computer company Dell and then worked for Stan Chart between 2013 and 2015, latterly as head of planning and projects in the COO office. UOB recruited him in 2016 as an executive director in process excellence and he led projects that delivered “improvements in productivity and/or customer experience”.
The talent flow been tech firms and banks like UOB isn’t all one-way in Asia, however. Last month UOB hired Peter Vicente from the Singapore office of Indian IT giant Wipro BPS as a managing director in group operations. UOB is increasingly open to candidates from Amazon, Google, Facebook and Uber, Susan Hwee, the bank’s head of group technology and operations, told us previously.
UOB has been among the more aggressive recruiters in the Singapore banking sector of late. As we reported in November, its headcount rose by 219 year-on-year in the third quarter. This was partly fuelled by hiring of engineers, analysts, digital designers, architects, project managers and data scientists.
|
# -*- coding: utf-8 -*-
import urllib
import re
import urllib2
import sys
import HTMLParser
from youtube_dl.extractor.common import InfoExtractor
html_parser = HTMLParser.HTMLParser()
class WatchCartoonOnlineIE(InfoExtractor):
#IE_NAME = u'WatchCartoonOnline'
_VALID_URL = r'(?:http://)?(?:www\.)?watchcartoononline\.com/([^/]+)'
def _real_extract(self,url):
o=urllib2.build_opener(urllib2.HTTPHandler).open
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
webpage = o('http://www.watchcartoononline.com/{0}'.format(video_id)).read()
title_escaped = re.search(r'<h1.*?>(.+?)</h1>',webpage).group(1)
title = html_parser.unescape(title_escaped)
video_url = re.search(r'<iframe id="(.+?)0" (.+?)>', webpage).group()
video_url = re.search('src="(.+?)"', video_url).group(1).replace(' ','%20')
params = urllib.urlencode({'fuck_you':'','confirm':'Click Here to Watch Free!!'})
request = urllib2.Request(video_url,params)
video_webpage = o(request).read()
final_url = re.findall(r'file: "(.+?)"', video_webpage)
redirect_url=urllib.unquote(final_url[-1]).replace(' ','%20')
flv_url = o(redirect_url).geturl()
return {'url':flv_url, 'title':title, 'id': video_id}
def downloader(fileurl,file_name):
u = urllib2.urlopen(fileurl)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "[watchcartoononline-dl] Downloading %s (%s bytes)" %(file_name, file_size)
file_size_dl = 0
block_size = 8192
#Download loop
while True:
buffer = u.read(block_size)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%s [%3.2f%%]" % (convertSize(file_size_dl), file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
#print status
sys.stdout.write("\r %s" % status)
sys.stdout.flush()
#Download done. Close file stream
f.close()
def convertSize(n, format='%(value).1f %(symbol)s', symbols='customary'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
"""
SYMBOLS = {
'customary' : ('B', 'K', 'Mb', 'G', 'T', 'P', 'E', 'Z', 'Y'),
'customary_ext' : ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',
'zetta', 'iotta'),
'iec' : ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'),
'iec_ext' : ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi',
'zebi', 'yobi'),
}
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
|
As the US Presidential race heats up, check out the top interactive maps and data visualizations already produced.
In just a few hours, the 2016 Presidential election in the United States enters a new level, with the first debate between the Democrat candidate, Hillary Clinton and her Republican opponent, Donald Trump. And in this particular election, to “get to another level” is saying a lot, because, let’s face it, the bar is already pretty high – or pretty low, depending on your point of view.
An unprecedented election in so many ways, none of which as significant as the fact that it’s the first time a woman gets nominated by a major american party to run for President. Hillary Clinton will hopefully make history in November, 8, and today she has an unique opportunity take a major step towards that end. The first of three debates will be held at Hofstra University in Hempstead, N.Y., moderated by Lester Holt, the anchor of NBC Nightly News. It’s expected that a record-breaking audience will follow the event on television, Internet live streams and social media.
We thought this would be a good time to take a look at how the media has been using interactive visualization to tell the stories of these elections. We gathered 50 online visualizations and interactive specials that pretty much cover the uniqueness of this race in all its multiple forms. As expected, all the major news outlets in the US are present in this selection, as well as several international ones, and we close with projects from other websites, companies and independent designers.
And we’ll definitely make a follow-up post closer to November, so feel free to let us know of other interactive visualizations out there about this topic – we’ll probably miss some good ones, so just drop us a message on Twitter or Facebook.
That’s it for this special round up, but for an even bigger selection of US Elections-related graphics and visualizations, check out our Pinterest board.
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
fiberassign.test.fiberassign_test_suite
===================================================
Used to initialize the unit test framework via ``python setup.py test``.
"""
from __future__ import absolute_import, division, print_function
import sys
import unittest
def fiberassign_test_suite():
"""Returns unittest.TestSuite of desiutil tests.
This is factored out separately from runtests() so that it can be used by
``python setup.py test``.
"""
from os.path import dirname
py_dir = dirname(dirname(__file__))
return unittest.defaultTestLoader.discover(py_dir, top_level_dir=dirname(py_dir))
def runtests():
"""Run all tests in fiberassign.test.test_*."""
# Load all TestCase classes from desispec/test/test_*.py
tests = fiberassign_test_suite()
# Run them and force exit with a non-zero process return value if they fail
ret = unittest.TextTestRunner(verbosity=2).run(tests)
if not ret.wasSuccessful():
sys.exit(ret)
if __name__ == "__main__":
runtests()
|
The last 10 years have seen enormous progress in both the understanding of cancer as a disease, as well as the development of targeted therapies, with an unprecedented number of new drugs approved for cancer treatment by the FDA and EMA. The availability of a large set of targeted therapies has created the need for efficient molecular profiling of patients.
A decade ago, the term “liquid biopsy” was coined to describe diagnostic procedures carried out to detect molecular biomarkers found in the blood or in other bodily fluids (e.g., urine or cerebrospinal fluid) of patients.
Blood, and potentially any other bodily fluid, contains cancer-derived elements that allow the molecular characterization of the disease, contributing to early diagnosis, accurate prognosis, personalized therapeutics, and disease monitoring. Materials derived from tumors include circulating tumor cells (CTCs), circulating tumor DNA (ctDNA), as well as RNA, protein markers, and extracellular vesicles, such as exosomes.
When a tumor cell dies, it releases DNA molecules from its fragmented genome into the bloodstream. These nucleic acids in the blood can be purified and analysed using PCR-based methods, next generation sequencing (NGS), or array technologies. Data generated from analysing liquid biopsies have shown the enormous potential in this approach that could have a revolutionary impact on clinical practice. During the last few years, several studies have shown that key cancer mutations can be detected in liquid biopsies, mirroring those detected in traditional tumor biopsies.
Liquid biopsies may be even superior to standard tissue biopsies, as all parts of a tumor and all metastases are potentially sampled. Recent data indicate that, in most cases, analysis of circulating tumor DNA is faithfully reflecting mutations found in all known metastases of a cancer, or is even superior to such an approach (e.g., detecting mutations if standard biopsies fail, or showing more mutations than the standard tissue biopsies), suggesting that sequencing circulating tumor DNA can give a much more complete molecular picture of the systemic cancer disease than standard biopsies. Moreover, access to the blood of a patient is unproblematic, since the procedure presents minimal risk and sample collection is low cost.
Serial liquid biopsies could be easily taken to monitor cancer therapy effects, or to screen for reoccurrence of cancer, as long as the volume of blood needed for the respective analysis is small (i.e., a few mL). Sensitivity of the method may be superior for detecting cancer at a very early stage, such as in cases of reoccurrence of cancer after curative surgery, or in a population-based screening program. If liquid biopsies can improve early detection of tumors in preventive screening programs, this will contribute to higher survival rates, especially for tumor types where means of early detection and preventive screenings are otherwise limited or non-existent.
On the other hand, several studies of liquid biopsy approaches in cancer patients have revealed that the success rate of this approach is related to the tumor mass burden as well as the tumor stage of a patient at the time of liquid biopsy. The approach has been shown to be minimally successful in instances when tumor mass is low, due to the limited number of tumor cells dying and thereby releasing DNA into the blood. Furthermore, it appears that tumor types behave differently, with some tumors, such as colon carcinoma, shedding DNA more abundantly in the blood stream compared to, for instance, glioblastoma. The success rate of liquid biopsy is therefore tumor type, stage, and mass dependent.
Techniques and methodologies for cell-free DNA analysis can be broadly divided into targeted and untargeted approaches. Targeted approaches examine specific and pre-known genetic alterations, while untargeted approaches do not require prior knowledge, and allow for the discovery of novel disease genetic markers. Therefore, untargeted approaches may have more utility for early detection of cancer with high sensitivity, but also for serial analyses of the changing clonal landscape of a tumor after treatment.
One of the main limitations that affect both targeted and untargeted techniques is the limited amount of cell-free DNA that is obtained from liquid biopsies. This is even more relevant in cases where cell-free DNA is analysed by whole exome or whole genome sequencing, as opposed to more sensitive PCR-based targeted approaches such as BEAMing. For example, standard library prep systems for whole-exome sequencing require cell-free DNA in volumes of around 100 ng, although exome sequencing has been performed with more specialized approaches from only a few nanograms. Targeted-sequencing approaches require lower but still significant cell-free DNA volumes to prepare the library (e.g., between 10 to 40 ng of cell-free DNA in the case of QIAseq panels for Illumina sequencers). Therefore, having an adequate amount of cell-free DNA for sequencing purposes can be challenging, and remains as a substantial hurdle that needs to be overcome.
A second issue diminishing the detection capabilities of liquid biopsy approaches is the contamination of tumor cell-derived DNA, with DNA coming from unrelated processes naturally occurring in the body. Cell-free DNA present in the blood plasma or other bodily fluids (CSF, urine, ascites) can be broadly divided into three categories. First, there are the smaller size fragments (160-170 and multiples thereof) that originate from apoptotic breakdown of genomic DNA between nucleosomes inside a cell. Then, there are the larger size fragments that originate mainly from necrotic cell death (necrosis). Finally, one also has DNA from exosome shedding, and a few other less well understood processes. DNA fragments of apoptotic origin can also be detected in healthy people, and may increase naturally, for instance following sports activity or due to a cold. However, the larger size fragments do not normally occur in healthy people, and thus appear to be the more diagnostically relevant species in the oncology field, for example, as the proportion of larger DNA fragments increases significantly with progressing tumor stage, underscoring this view.
Other clinical applications that look particularly promising for the liquid biopsy approach are the diagnosis of chromosomal abnormalities in the fetus (in particular, trisomy) by analysing the blood from the mother (also called noninvasive prenatal testing or NIPT; based on cell-free DNA), and the diagnosis and monitoring of graft rejection in transplantation patients (DNA from donor tissue attacked by immune cells of the host can be detected in the blood of the patient).
As recently pointed out by ASCO and the College of American Pathologists, new research studies and technology developments are required to fully demonstrate the analytical and clinical validity as well as the clinical utility of liquid biopsies*, in order to implement liquid biopsies as part of the future precision medicine. However, once the limitations within this field are overcome, there is great potential for new technologies to have a profound impact on early-stage diagnosis within oncology. Already the liquid biopsy market is expected to reach $2,047.9M by 2022, growing at a CAGR of 23.4%, with kits and consumables leading the sub-segment to the global value of this market.
Angel Picher, Ph.D., is the director of genomics research at Expedeon.
*Analytical validity refers to the ability of a test to accurately and reliably detect the variant(s) of interest and includes measures of accuracy, sensitivity, specificity, and robustness. Clinical validity implies that the test may accurately detect the presence or absence of a pathologic state or predict outcomes for groups of patients whose test results differ. Clinical utility is documented when high levels of evidence exist to demonstrate that the use of the test improves patient outcomes compared with not using it.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2017 Snowflake Computing Inc. All right reserved.
#
import collections
import contextlib
import copy
import gzip
import itertools
import json
import logging
import platform
import sys
import time
import uuid
from io import StringIO, BytesIO
from threading import Thread
import OpenSSL
from botocore.vendored import requests
from botocore.vendored.requests.adapters import HTTPAdapter
from botocore.vendored.requests.auth import AuthBase
from botocore.vendored.requests.exceptions import (ConnectionError, SSLError)
from botocore.vendored.requests.packages.urllib3.exceptions import (
ProtocolError)
from . import ssl_wrap_socket
from .compat import (
BAD_REQUEST, SERVICE_UNAVAILABLE, GATEWAY_TIMEOUT,
FORBIDDEN, BAD_GATEWAY,
UNAUTHORIZED, INTERNAL_SERVER_ERROR, OK, BadStatusLine,
urlsplit, unescape)
from .compat import (Queue, EmptyQueue)
from .compat import (TO_UNICODE, urlencode)
from .compat import proxy_bypass
from .errorcode import (ER_FAILED_TO_CONNECT_TO_DB, ER_CONNECTION_IS_CLOSED,
ER_FAILED_TO_REQUEST, ER_FAILED_TO_RENEW_SESSION,
ER_FAILED_TO_SERVER, ER_IDP_CONNECTION_ERROR,
ER_INCORRECT_DESTINATION)
from .errors import (Error, OperationalError, DatabaseError, ProgrammingError,
GatewayTimeoutError, ServiceUnavailableError,
InterfaceError, InternalServerError, ForbiddenError,
BadGatewayError, BadRequest)
from .gzip_decoder import decompress_raw_data
from .sqlstate import (SQLSTATE_CONNECTION_NOT_EXISTS,
SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
SQLSTATE_CONNECTION_REJECTED)
from .ssl_wrap_socket import set_proxies
from .util_text import split_rows_from_stream
from .version import VERSION
logger = logging.getLogger(__name__)
"""
Monkey patch for PyOpenSSL Socket wrapper
"""
ssl_wrap_socket.inject_into_urllib3()
import errno
REQUESTS_RETRY = 5 # requests retry
QUERY_IN_PROGRESS_CODE = u'333333' # GS code: the query is in progress
QUERY_IN_PROGRESS_ASYNC_CODE = u'333334' # GS code: the query is detached
SESSION_EXPIRED_GS_CODE = u'390112' # GS code: session expired. need to renew
DEFAULT_CONNECT_TIMEOUT = 1 * 60 # 60 seconds
DEFAULT_REQUEST_TIMEOUT = 2 * 60 # 120 seconds
CONTENT_TYPE_APPLICATION_JSON = u'application/json'
ACCEPT_TYPE_APPLICATION_SNOWFLAKE = u'application/snowflake'
REQUEST_TYPE_RENEW = u'RENEW'
REQUEST_TYPE_CLONE = u'CLONE'
REQUEST_TYPE_ISSUE = u'ISSUE'
HEADER_AUTHORIZATION_KEY = u"Authorization"
HEADER_SNOWFLAKE_TOKEN = u'Snowflake Token="{token}"'
SNOWFLAKE_CONNECTOR_VERSION = u'.'.join(TO_UNICODE(v) for v in VERSION[0:3])
PYTHON_VERSION = u'.'.join(TO_UNICODE(v) for v in sys.version_info[:3])
PLATFORM = platform.platform()
IMPLEMENTATION = platform.python_implementation()
COMPILER = platform.python_compiler()
CLIENT_NAME = u"PythonConnector"
CLIENT_VERSION = u'.'.join([TO_UNICODE(v) for v in VERSION[:3]])
PYTHON_CONNECTOR_USER_AGENT = \
u'{name}/{version}/{python_version}/{platform}'.format(
name=CLIENT_NAME,
version=SNOWFLAKE_CONNECTOR_VERSION,
python_version=PYTHON_VERSION,
platform=PLATFORM)
DEFAULT_AUTHENTICATOR = u'SNOWFLAKE' # default authenticator name
NO_TOKEN = u'no-token'
STATUS_TO_EXCEPTION = {
INTERNAL_SERVER_ERROR: InternalServerError,
FORBIDDEN: ForbiddenError,
SERVICE_UNAVAILABLE: ServiceUnavailableError,
GATEWAY_TIMEOUT: GatewayTimeoutError,
BAD_REQUEST: BadRequest,
BAD_GATEWAY: BadGatewayError,
}
def _is_prefix_equal(url1, url2):
"""
Checks if URL prefixes are identical. The scheme, hostname and port number
are compared. If the port number is not specified and the scheme is https,
the port number is assumed to be 443.
"""
parsed_url1 = urlsplit(url1)
parsed_url2 = urlsplit(url2)
port1 = parsed_url1.port
if not port1 and parsed_url1.scheme == 'https':
port1 = '443'
port2 = parsed_url1.port
if not port2 and parsed_url2.scheme == 'https':
port2 = '443'
return parsed_url1.hostname == parsed_url2.hostname and \
port1 == port2 and \
parsed_url1.scheme == parsed_url2.scheme
def _get_post_back_url_from_html(html):
"""
Gets the post back URL.
Since the HTML is not well formed, minidom cannot be used to convert to
DOM. The first discovered form is assumed to be the form to post back
and the URL is taken from action attributes.
"""
logger.debug(html)
idx = html.find('<form')
start_idx = html.find('action="', idx)
end_idx = html.find('"', start_idx + 8)
return unescape(html[start_idx + 8:end_idx])
class RequestRetry(Exception):
pass
class SnowflakeAuth(AuthBase):
"""
Attaches HTTP Authorization header for Snowflake
"""
def __init__(self, token):
# setup any auth-related data here
self.token = token
def __call__(self, r):
# modify and return the request
if HEADER_AUTHORIZATION_KEY in r.headers:
del r.headers[HEADER_AUTHORIZATION_KEY]
if self.token != NO_TOKEN:
r.headers[
HEADER_AUTHORIZATION_KEY] = HEADER_SNOWFLAKE_TOKEN.format(
token=self.token)
return r
class SnowflakeRestful(object):
"""
Snowflake Restful class
"""
def __init__(self, host=u'127.0.0.1', port=8080,
proxy_host=None,
proxy_port=None,
proxy_user=None,
proxy_password=None,
protocol=u'http',
connect_timeout=DEFAULT_CONNECT_TIMEOUT,
request_timeout=DEFAULT_REQUEST_TIMEOUT,
injectClientPause=0,
connection=None):
self._host = host
self._port = port
self._proxy_host = proxy_host
self._proxy_port = proxy_port
self._proxy_user = proxy_user
self._proxy_password = proxy_password
self._protocol = protocol
self._connect_timeout = connect_timeout or DEFAULT_CONNECT_TIMEOUT
self._request_timeout = request_timeout or DEFAULT_REQUEST_TIMEOUT
self._injectClientPause = injectClientPause
self._connection = connection
self._idle_sessions = collections.deque()
self._active_sessions = set()
self._request_count = itertools.count()
# insecure mode (disabled by default)
ssl_wrap_socket.FEATURE_INSECURE_MODE = \
self._connection and self._connection._insecure_mode
# cache file name (enabled by default)
ssl_wrap_socket.FEATURE_OCSP_RESPONSE_CACHE_FILE_NAME = \
self._connection and self._connection._ocsp_response_cache_filename
#
ssl_wrap_socket.PROXY_HOST = self._proxy_host
ssl_wrap_socket.PROXY_PORT = self._proxy_port
ssl_wrap_socket.PROXY_USER = self._proxy_user
ssl_wrap_socket.PROXY_PASSWORD = self._proxy_password
# This is to address the issue where requests hangs
_ = 'dummy'.encode('idna').decode('utf-8')
proxy_bypass('www.snowflake.net:443')
@property
def token(self):
return self._token if hasattr(self, u'_token') else None
@property
def master_token(self):
return self._master_token if hasattr(self, u'_master_token') else None
def close(self):
if hasattr(self, u'_token'):
del self._token
if hasattr(self, u'_master_token'):
del self._master_token
sessions = list(self._active_sessions)
if sessions:
logger.warn("Closing %s active sessions", len(sessions))
sessions.extend(self._idle_sessions)
self._active_sessions.clear()
self._idle_sessions.clear()
for s in sessions:
try:
s.close()
except Exception as e:
logger.warn("Session cleanup failed: %s", e)
def authenticate(self, account, user, password, master_token=None,
token=None, database=None, schema=None,
warehouse=None, role=None, passcode=None,
passcode_in_password=False, saml_response=None,
mfa_callback=None, password_callback=None,
session_parameters=None):
logger.debug(u'authenticate')
if token and master_token:
self._token = token
self._master_token = token
logger.debug(u'token is given. no authentication was done')
return
application = self._connection.application if \
self._connection else CLIENT_NAME
internal_application_name = \
self._connection._internal_application_name if \
self._connection else CLIENT_NAME
internal_application_version = \
self._connection._internal_application_version if \
self._connection else CLIENT_VERSION
request_id = TO_UNICODE(uuid.uuid4())
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": ACCEPT_TYPE_APPLICATION_SNOWFLAKE,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
url = u"/session/v1/login-request"
body_template = {
u'data': {
u"CLIENT_APP_ID": internal_application_name,
u"CLIENT_APP_VERSION": internal_application_version,
u"SVN_REVISION": VERSION[3],
u"ACCOUNT_NAME": account,
u"CLIENT_ENVIRONMENT": {
u"APPLICATION": application,
u"OS_VERSION": PLATFORM,
u"PYTHON_VERSION": PYTHON_VERSION,
u"PYTHON_RUNTIME": IMPLEMENTATION,
u"PYTHON_COMPILER": COMPILER,
}
},
}
body = copy.deepcopy(body_template)
logger.debug(u'saml: %s', saml_response is not None)
if saml_response:
body[u'data'][u'RAW_SAML_RESPONSE'] = saml_response
else:
body[u'data'][u"LOGIN_NAME"] = user
body[u'data'][u"PASSWORD"] = password
logger.debug(
u'account=%s, user=%s, database=%s, schema=%s, '
u'warehouse=%s, role=%s, request_id=%s',
account,
user,
database,
schema,
warehouse,
role,
request_id,
)
url_parameters = {}
url_parameters[u'request_id'] = request_id
if database is not None:
url_parameters[u'databaseName'] = database
if schema is not None:
url_parameters[u'schemaName'] = schema
if warehouse is not None:
url_parameters[u'warehouse'] = warehouse
if role is not None:
url_parameters[u'roleName'] = role
if len(url_parameters) > 0:
url = url + u'?' + urlencode(url_parameters)
# first auth request
if passcode_in_password:
body[u'data'][u'EXT_AUTHN_DUO_METHOD'] = u'passcode'
elif passcode:
body[u'data'][u'EXT_AUTHN_DUO_METHOD'] = u'passcode'
body[u'data'][u'PASSCODE'] = passcode
if session_parameters:
body[u'data'][u'SESSION_PARAMETERS'] = session_parameters
logger.debug(
"body['data']: %s",
{k: v for (k, v) in body[u'data'].items() if k != u'PASSWORD'})
try:
ret = self._post_request(
url, headers, json.dumps(body),
timeout=self._connection._login_timeout)
except ForbiddenError as err:
# HTTP 403
raise err.__class__(
msg=(u"Failed to connect to DB. "
u"Verify the account name is correct: {host}:{port}, "
u"proxies={proxy_host}:{proxy_port}, "
u"proxy_user={proxy_user}. {message}").format(
host=self._host,
port=self._port,
proxy_host=self._proxy_host,
proxy_port=self._proxy_port,
proxy_user=self._proxy_user,
message=TO_UNICODE(err)
),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED)
except (ServiceUnavailableError, BadGatewayError) as err:
# HTTP 502/504
raise err.__class__(
msg=(u"Failed to connect to DB. "
u"Service is unavailable: {host}:{port}, "
u"proxies={proxy_host}:{proxy_port}, "
u"proxy_user={proxy_user}. {message}").format(
host=self._host,
port=self._port,
proxy_host=self._proxy_host,
proxy_port=self._proxy_port,
proxy_user=self._proxy_user,
message=TO_UNICODE(err)
),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED)
# this means we are waiting for MFA authentication
if ret[u'data'].get(u'nextAction') and ret[u'data'][
u'nextAction'] == u'EXT_AUTHN_DUO_ALL':
body[u'inFlightCtx'] = ret[u'data'][u'inFlightCtx']
body[u'data'][u'EXT_AUTHN_DUO_METHOD'] = u'push'
self.ret = None
def post_request_wrapper(self, url, headers, body):
# get the MFA response
self.ret = self._post_request(
url, headers, body,
timeout=self._connection._login_timeout)
# send new request to wait until MFA is approved
t = Thread(target=post_request_wrapper,
args=[self, url, headers, json.dumps(body)])
t.daemon = True
t.start()
if callable(mfa_callback):
c = mfa_callback()
while not self.ret:
next(c)
else:
t.join(timeout=120)
ret = self.ret
if ret[u'data'].get(u'nextAction') and ret[u'data'][
u'nextAction'] == u'EXT_AUTHN_SUCCESS':
body = copy.deepcopy(body_template)
body[u'inFlightCtx'] = ret[u'data'][u'inFlightCtx']
# final request to get tokens
ret = self._post_request(
url, headers, json.dumps(body),
timeout=self._connection._login_timeout)
elif ret[u'data'].get(u'nextAction') and ret[u'data'][
u'nextAction'] == u'PWD_CHANGE':
if callable(password_callback):
body = copy.deepcopy(body_template)
body[u'inFlightCtx'] = ret[u'data'][u'inFlightCtx']
body[u'data'][u"LOGIN_NAME"] = user
body[u'data'][u"PASSWORD"] = password
body[u'data'][u'CHOSEN_NEW_PASSWORD'] = password_callback()
# New Password input
ret = self._post_request(
url, headers, json.dumps(body),
timeout=self._connection._login_timeout)
logger.debug(u'completed authentication')
if not ret[u'success']:
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': (u"failed to connect to DB: {host}:{port}, "
u"proxies={proxy_host}:{proxy_port}, "
u"proxy_user={proxy_user}, "
u"{message}").format(
host=self._host,
port=self._port,
proxy_host=self._proxy_host,
proxy_port=self._proxy_port,
proxy_user=self._proxy_user,
message=ret[u'message'],
),
u'errno': ER_FAILED_TO_CONNECT_TO_DB,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
})
else:
self._token = ret[u'data'][u'token']
self._master_token = ret[u'data'][u'masterToken']
logger.debug(u'token = %s', self._token)
logger.debug(u'master_token = %s', self._master_token)
if u'sessionId' in ret[u'data']:
self._connection._session_id = ret[u'data'][u'sessionId']
if u'sessionInfo' in ret[u'data']:
session_info = ret[u'data'][u'sessionInfo']
if u'databaseName' in session_info:
self._connection._database = session_info[u'databaseName']
if u'schemaName' in session_info:
self._connection.schema = session_info[u'schemaName']
if u'roleName' in session_info:
self._connection._role = session_info[u'roleName']
if u'warehouseName' in session_info:
self._connection._warehouse = session_info[u'warehouseName']
def request(self, url, body=None, method=u'post', client=u'sfsql',
_no_results=False):
if body is None:
body = {}
if not hasattr(self, u'_master_token'):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': u"Connection is closed",
u'errno': ER_CONNECTION_IS_CLOSED,
u'sqlstate': SQLSTATE_CONNECTION_NOT_EXISTS,
})
if client == u'sfsql':
accept_type = ACCEPT_TYPE_APPLICATION_SNOWFLAKE
else:
accept_type = CONTENT_TYPE_APPLICATION_JSON
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": accept_type,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
if method == u'post':
return self._post_request(
url, headers, json.dumps(body),
token=self._token, _no_results=_no_results,
timeout=self._connection._network_timeout)
else:
return self._get_request(
url, headers, token=self._token,
timeout=self._connection._network_timeout)
def _renew_session(self):
if not hasattr(self, u'_master_token'):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': u"Connection is closed",
u'errno': ER_CONNECTION_IS_CLOSED,
u'sqlstate': SQLSTATE_CONNECTION_NOT_EXISTS,
})
logger.debug(u'updating session')
logger.debug(u'master_token: %s', self._master_token)
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": CONTENT_TYPE_APPLICATION_JSON,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
request_id = TO_UNICODE(uuid.uuid4())
logger.debug(u'request_id: %s', request_id)
url = u'/session/token-request?' + urlencode({
u'requestId': request_id})
body = {
u"oldSessionToken": self._token,
u"requestType": REQUEST_TYPE_RENEW,
}
self._session = None # invalidate session object
ret = self._post_request(
url, headers, json.dumps(body),
token=self._master_token,
timeout=self._connection._network_timeout)
if ret[u'success'] and u'data' in ret \
and u'sessionToken' in ret[u'data']:
logger.debug(u'success: %s', ret)
self._token = ret[u'data'][u'sessionToken']
self._master_token = ret[u'data'][u'masterToken']
logger.debug(u'updating session completed')
return ret
else:
logger.debug(u'failed: %s', ret)
err = ret[u'message']
if u'data' in ret and u'errorMessage' in ret[u'data']:
err += ret[u'data'][u'errorMessage']
Error.errorhandler_wrapper(
self._connection, None, ProgrammingError,
{
u'msg': err,
u'errno': ER_FAILED_TO_RENEW_SESSION,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
})
def _delete_session(self):
if not hasattr(self, u'_master_token'):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': u"Connection is closed",
u'errno': ER_CONNECTION_IS_CLOSED,
u'sqlstate': SQLSTATE_CONNECTION_NOT_EXISTS,
})
url = u'/session?' + urlencode({u'delete': u'true'})
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": CONTENT_TYPE_APPLICATION_JSON,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
body = {}
try:
ret = self._post_request(
url, headers, json.dumps(body),
token=self._token, timeout=5, is_single_thread=True)
if not ret or ret.get(u'success'):
return
err = ret[u'message']
if ret.get(u'data') and ret[u'data'].get(u'errorMessage'):
err += ret[u'data'][u'errorMessage']
# no exception is raised
except Exception as e:
logger.debug('error in deleting session. ignoring...: %s', e)
def _get_request(self, url, headers, token=None, timeout=None):
if 'Content-Encoding' in headers:
del headers['Content-Encoding']
if 'Content-Length' in headers:
del headers['Content-Length']
full_url = u'{protocol}://{host}:{port}{url}'.format(
protocol=self._protocol,
host=self._host,
port=self._port,
url=url,
)
ret = self.fetch(u'get', full_url, headers, timeout=timeout,
token=token)
if u'code' in ret and ret[u'code'] == SESSION_EXPIRED_GS_CODE:
ret = self._renew_session()
logger.debug(
u'ret[code] = {code} after renew_session'.format(
code=(ret[u'code'] if u'code' in ret else u'N/A')))
if u'success' in ret and ret[u'success']:
return self._get_request(url, headers, token=self._token)
return ret
def _post_request(self, url, headers, body, token=None,
timeout=None, _no_results=False, is_single_thread=False):
full_url = u'{protocol}://{host}:{port}{url}'.format(
protocol=self._protocol,
host=self._host,
port=self._port,
url=url,
)
ret = self.fetch(u'post', full_url, headers, data=body,
timeout=timeout, token=token,
is_single_thread=is_single_thread)
logger.debug(
u'ret[code] = {code}, after post request'.format(
code=(ret.get(u'code', u'N/A'))))
if u'code' in ret and ret[u'code'] == SESSION_EXPIRED_GS_CODE:
ret = self._renew_session()
logger.debug(
u'ret[code] = {code} after renew_session'.format(
code=(ret[u'code'] if u'code' in ret else u'N/A')))
if u'success' in ret and ret[u'success']:
return self._post_request(
url, headers, body, token=self._token, timeout=timeout)
is_session_renewed = False
result_url = None
if u'code' in ret and ret[
u'code'] == QUERY_IN_PROGRESS_ASYNC_CODE and _no_results:
return ret
while is_session_renewed or u'code' in ret and ret[u'code'] in \
(QUERY_IN_PROGRESS_CODE, QUERY_IN_PROGRESS_ASYNC_CODE):
if self._injectClientPause > 0:
logger.debug(
u'waiting for {inject_client_pause}...'.format(
inject_client_pause=self._injectClientPause))
time.sleep(self._injectClientPause)
# ping pong
result_url = ret[u'data'][
u'getResultUrl'] if not is_session_renewed else result_url
logger.debug(u'ping pong starting...')
ret = self._get_request(
result_url, headers, token=self._token, timeout=timeout)
logger.debug(
u'ret[code] = %s',
ret[u'code'] if u'code' in ret else u'N/A')
logger.debug(u'ping pong done')
if u'code' in ret and ret[u'code'] == SESSION_EXPIRED_GS_CODE:
ret = self._renew_session()
logger.debug(
u'ret[code] = %s after renew_session',
ret[u'code'] if u'code' in ret else u'N/A')
if u'success' in ret and ret[u'success']:
is_session_renewed = True
else:
is_session_renewed = False
return ret
def fetch(self, method, full_url, headers, data=None, timeout=None,
**kwargs):
""" Curried API request with session management. """
if timeout is not None and 'timeouts' in kwargs:
raise TypeError("Mutually exclusive args: timeout, timeouts")
if timeout is None:
timeout = self._request_timeout
timeouts = kwargs.pop('timeouts', (self._connect_timeout,
self._connect_timeout, timeout))
proxies = set_proxies(self._proxy_host, self._proxy_port,
self._proxy_user, self._proxy_password)
with self._use_requests_session() as session:
return self._fetch(session, method, full_url, headers, data,
proxies, timeouts, **kwargs)
def _fetch(self, session, method, full_url, headers, data, proxies,
timeouts=(DEFAULT_CONNECT_TIMEOUT, DEFAULT_CONNECT_TIMEOUT,
DEFAULT_REQUEST_TIMEOUT),
token=NO_TOKEN,
is_raw_text=False,
catch_okta_unauthorized_error=False,
is_raw_binary=False,
is_raw_binary_iterator=True,
use_ijson=False, is_single_thread=False):
""" This is the lowest level of HTTP handling. All arguments culminate
here and the `requests.request` is issued and monitored from this
call using an inline thread for timeout monitoring. """
connection_timeout = timeouts[0:2]
request_timeout = timeouts[2] # total request timeout
request_exec_timeout = 60 # one request thread timeout
conn = self._connection
proxies = set_proxies(conn.rest._proxy_host, conn.rest._proxy_port,
conn.rest._proxy_user, conn.rest._proxy_password)
def request_exec(result_queue):
try:
if not catch_okta_unauthorized_error and data and len(data) > 0:
gzdata = BytesIO()
gzip.GzipFile(fileobj=gzdata, mode=u'wb').write(
data.encode(u'utf-8'))
gzdata.seek(0, 0)
headers['Content-Encoding'] = 'gzip'
input_data = gzdata
else:
input_data = data
raw_ret = session.request(
method=method,
url=full_url,
proxies=proxies,
headers=headers,
data=input_data,
timeout=connection_timeout,
verify=True,
stream=is_raw_binary,
auth=SnowflakeAuth(token),
)
if raw_ret.status_code == OK:
logger.debug(u'SUCCESS')
if is_raw_text:
ret = raw_ret.text
elif is_raw_binary:
raw_data = decompress_raw_data(
raw_ret.raw, add_bracket=True
).decode('utf-8', 'replace')
if not is_raw_binary_iterator:
ret = json.loads(raw_data)
elif not use_ijson:
ret = iter(json.loads(raw_data))
else:
ret = split_rows_from_stream(StringIO(raw_data))
else:
ret = raw_ret.json()
result_queue.put((ret, False))
elif raw_ret.status_code in STATUS_TO_EXCEPTION:
# retryable exceptions
result_queue.put(
(STATUS_TO_EXCEPTION[raw_ret.status_code](), True))
elif raw_ret.status_code == UNAUTHORIZED and \
catch_okta_unauthorized_error:
# OKTA Unauthorized errors
result_queue.put(
(DatabaseError(
msg=(u'Failed to get '
u'authentication by OKTA: '
u'{status}: {reason}'.format(
status=raw_ret.status_code,
reason=raw_ret.reason,
)),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_REJECTED),
False))
else:
result_queue.put(
(InterfaceError(
msg=(u"{status} {reason}: "
u"{method} {url}").format(
status=raw_ret.status_code,
reason=raw_ret.reason,
method=method,
url=full_url),
errno=ER_FAILED_TO_REQUEST,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
), False))
except (BadStatusLine,
SSLError,
ProtocolError,
OpenSSL.SSL.SysCallError,
ValueError,
RuntimeError) as err:
logger.exception('who is hitting error?')
logger.debug(err)
if not isinstance(err, OpenSSL.SSL.SysCallError) or \
err.args[0] in (
errno.ECONNRESET,
errno.ETIMEDOUT,
errno.EPIPE,
-1):
result_queue.put((err, True))
else:
# all other OpenSSL errors are not retryable
result_queue.put((err, False))
except ConnectionError as err:
logger.exception(u'ConnectionError: %s', err)
result_queue.put((OperationalError(
# no full_url is required in the message
# as err includes all information
msg=u'Failed to connect: {0}'.format(err),
errno=ER_FAILED_TO_SERVER,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
), False))
if is_single_thread:
# This is dedicated code for DELETE SESSION when Python exists.
request_result_queue = Queue()
request_exec(request_result_queue)
try:
# don't care about the return value, because no retry and
# no error will show up
_, _ = request_result_queue.get(timeout=request_timeout)
except:
pass
return {}
retry_cnt = 0
while True:
return_object = None
request_result_queue = Queue()
th = Thread(name='RequestExec-%d' % next(self._request_count),
target=request_exec, args=(request_result_queue,))
th.daemon = True
th.start()
try:
logger.debug('request thread timeout: %s, '
'rest of request timeout: %s, '
'retry cnt: %s',
request_exec_timeout,
request_timeout,
retry_cnt + 1)
start_request_thread = time.time()
th.join(timeout=request_exec_timeout)
logger.debug('request thread joined')
if request_timeout is not None:
request_timeout -= min(
int(time.time() - start_request_thread),
request_timeout)
start_get_queue = time.time()
return_object, retryable = request_result_queue.get(
timeout=int(request_exec_timeout / 4))
if request_timeout is not None:
request_timeout -= min(
int(time.time() - start_get_queue), request_timeout)
logger.debug('request thread returned object')
if retryable:
raise RequestRetry()
elif isinstance(return_object, Error):
Error.errorhandler_wrapper(conn, None, return_object)
elif isinstance(return_object, Exception):
Error.errorhandler_wrapper(
conn, None, OperationalError,
{
u'msg': u'Failed to execute request: {0}'.format(
return_object),
u'errno': ER_FAILED_TO_REQUEST,
})
break
except (RequestRetry, AttributeError, EmptyQueue) as e:
# RequestRetry is raised in case of retryable error
# Empty is raised if the result queue is empty
if request_timeout is not None:
sleeping_time = min(2 ** retry_cnt,
min(request_timeout, 16))
else:
sleeping_time = min(2 ** retry_cnt, 16)
if sleeping_time <= 0:
# no more sleeping time
break
if request_timeout is not None:
request_timeout -= sleeping_time
logger.info(
u'retrying: errorclass=%s, '
u'error=%s, '
u'return_object=%s, '
u'counter=%s, '
u'sleeping=%s(s)',
type(e),
e,
return_object,
retry_cnt + 1,
sleeping_time)
time.sleep(sleeping_time)
retry_cnt += 1
if return_object is None:
if data:
try:
decoded_data = json.loads(data)
if decoded_data.get(
'data') and decoded_data['data'].get('PASSWORD'):
# masking the password
decoded_data['data']['PASSWORD'] = '********'
data = json.dumps(decoded_data)
except:
logger.info("data is not JSON")
logger.error(
u'Failed to get the response. Hanging? '
u'method: {method}, url: {url}, headers:{headers}, '
u'data: {data}, proxies: {proxies}'.format(
method=method,
url=full_url,
headers=headers,
data=data,
proxies=proxies
)
)
Error.errorhandler_wrapper(
conn, None, OperationalError,
{
u'msg': u'Failed to get the response. Hanging? '
u'method: {method}, url: {url}, '
u'proxies: {proxies}'.format(
method=method,
url=full_url,
proxies=proxies
),
u'errno': ER_FAILED_TO_REQUEST,
})
elif isinstance(return_object, Error):
Error.errorhandler_wrapper(conn, None, return_object)
elif isinstance(return_object, Exception):
Error.errorhandler_wrapper(
conn, None, OperationalError,
{
u'msg': u'Failed to execute request: {0}'.format(
return_object),
u'errno': ER_FAILED_TO_REQUEST,
})
return return_object
def make_requests_session(self):
s = requests.Session()
s.mount(u'http://', HTTPAdapter(max_retries=REQUESTS_RETRY))
s.mount(u'https://', HTTPAdapter(max_retries=REQUESTS_RETRY))
s._reuse_count = itertools.count()
return s
def authenticate_by_saml(self, authenticator, account, user, password):
u"""
SAML Authentication
1. query GS to obtain IDP token and SSO url
2. IMPORTANT Client side validation:
validate both token url and sso url contains same prefix
(protocol + host + port) as the given authenticator url.
Explanation:
This provides a way for the user to 'authenticate' the IDP it is
sending his/her credentials to. Without such a check, the user could
be coerced to provide credentials to an IDP impersonator.
3. query IDP token url to authenticate and retrieve access token
4. given access token, query IDP URL snowflake app to get SAML response
5. IMPORTANT Client side validation:
validate the post back url come back with the SAML response
contains the same prefix as the Snowflake's server url, which is the
intended destination url to Snowflake.
Explanation:
This emulates the behavior of IDP initiated login flow in the user
browser where the IDP instructs the browser to POST the SAML
assertion to the specific SP endpoint. This is critical in
preventing a SAML assertion issued to one SP from being sent to
another SP.
"""
logger.info(u'authenticating by SAML')
logger.debug(u'step 1: query GS to obtain IDP token and SSO url')
headers = {
u'Content-Type': CONTENT_TYPE_APPLICATION_JSON,
u"accept": CONTENT_TYPE_APPLICATION_JSON,
u"User-Agent": PYTHON_CONNECTOR_USER_AGENT,
}
url = u"/session/authenticator-request"
body = {
u'data': {
u"CLIENT_APP_ID": CLIENT_NAME,
u"CLIENT_APP_VERSION": CLIENT_VERSION,
u"SVN_REVISION": VERSION[3],
u"ACCOUNT_NAME": account,
u"AUTHENTICATOR": authenticator,
},
}
logger.debug(
u'account=%s, authenticator=%s',
account, authenticator,
)
ret = self._post_request(
url, headers, json.dumps(body),
timeout=self._connection._login_timeout)
if not ret[u'success']:
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': (u"Failed to connect to DB: {host}:{port}, "
u"proxies={proxy_host}:{proxy_port}, "
u"proxy_user={proxy_user}, "
u"{message}").format(
host=self._host,
port=self._port,
proxy_host=self._proxy_host,
proxy_port=self._proxy_port,
proxy_user=self._proxy_user,
message=ret[u'message'],
),
u'errno': ER_FAILED_TO_CONNECT_TO_DB,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
})
data = ret[u'data']
token_url = data[u'tokenUrl']
sso_url = data[u'ssoUrl']
logger.debug(u'step 2: validate Token and SSO URL has the same prefix '
u'as authenticator')
if not _is_prefix_equal(authenticator, token_url) or \
not _is_prefix_equal(authenticator, sso_url):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': (u"The specified authenticator is not supported: "
u"{authenticator}, token_url: {token_url}, "
u"sso_url: {sso_url}".format(
authenticator=authenticator,
token_url=token_url,
sso_url=sso_url,
)),
u'errno': ER_IDP_CONNECTION_ERROR,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
logger.debug(u'step 3: query IDP token url to authenticate and '
u'retrieve access token')
data = {
u'username': user,
u'password': password,
}
ret = self.fetch(u'post', token_url, headers, data=json.dumps(data),
timeout=self._connection._login_timeout,
catch_okta_unauthorized_error=True)
one_time_token = ret[u'cookieToken']
logger.debug(u'step 4: query IDP URL snowflake app to get SAML '
u'response')
url_parameters = {
u'RelayState': u"/some/deep/link",
u'onetimetoken': one_time_token,
}
sso_url = sso_url + u'?' + urlencode(url_parameters)
headers = {
u"Accept": u'*/*',
}
response_html = self.fetch(u'get', sso_url, headers,
timeout=self._connection._login_timeout,
is_raw_text=True)
logger.debug(u'step 5: validate post_back_url matches Snowflake URL')
post_back_url = _get_post_back_url_from_html(response_html)
full_url = u'{protocol}://{host}:{port}'.format(
protocol=self._protocol,
host=self._host,
port=self._port,
)
if not _is_prefix_equal(post_back_url, full_url):
Error.errorhandler_wrapper(
self._connection, None, DatabaseError,
{
u'msg': (u"The specified authenticator and destination "
u"URL in the SAML assertion do not match: "
u"expected: {url}, "
u"post back: {post_back_url}".format(
url=full_url,
post_back_url=post_back_url,
)),
u'errno': ER_INCORRECT_DESTINATION,
u'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
return response_html
@contextlib.contextmanager
def _use_requests_session(self):
""" Session caching context manager. Note that the session is not
closed until close() is called so each session may be used multiple
times. """
try:
session = self._idle_sessions.pop()
except IndexError:
session = self.make_requests_session()
self._active_sessions.add(session)
logger.info("Active requests sessions: %s, idle: %s",
len(self._active_sessions), len(self._idle_sessions))
try:
yield session
finally:
self._idle_sessions.appendleft(session)
try:
self._active_sessions.remove(session)
except KeyError:
logger.info(
"session doesn't exist in the active session pool. "
"Ignored...")
logger.info("Active requests sessions: %s, idle: %s",
len(self._active_sessions), len(self._idle_sessions))
|
There is nothing as bad as losing the money you had and nothing as good as making it back after you lost. Thus, on Wednesday [2009/09/30], the market rose 2% from low and you made it back. The contentment, the joy! And the almanaktarian's favorite day coming up, with a beaten favorite play to boot! But the cronies knew. Disaster. One lost what one had on Thursday [2009/10/01]. You see, we're in crisis mode. Unless the health bill is passed, those jobs will not come back; what we need is to prosecute Secretary Mellon or his modern counterpart the way the magnetic radio person did in the 1930s. On Friday, bonds at a one-year high, and the cronies took profits on the number. Someone has to pay. Let us hope that the seasonalists, almanaktarians, and followers of all stripes will fare better in another world where we will not meet them.
What if markets slowly begin to realize "hey, we have come out of the worst crisis in a 100 year with many scars but we are still on our feet," couldn't it justify a much higher valuation level (lower risk premium) for equity markets?
Wednesday shows how desperate bulls were to let the market regain the opening level. Their failure and frustration is quite evident during Thursday's action. On Friday they tried to close the down gap of the open, but they were much less aggressive. Maybe they are realizing that the market needs to move to much lower levels before attracting new buyers who are also now concerned with negative news and W, U, square roots shape recession scenarios… If this is true we should sell any rebound next week.
Its not over till it's over. The retail traders who have seen the minor dips since March and have jumped on to the July bandwagon (even if a bit late) would be seeing this as an opportunity to buy on dips. All lagging indicators as well as price is showing break of resistance trendlines and key levels. On the heels of bad to worse news, we still have not seen a correction in above 10% range. This is why I think, a hint of good news and we break above 10k on the Dow. Just when none of the retailer traders will expect, the commercial loan situation will come to fruition coupled with lack of topline growth, and the long-awaited bear run will start.
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
import copy
import os
import hashlib
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.error import ServiceError
from svtplay_dl.log import log
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.dash import dashparse
from svtplay_dl.utils import ensure_unicode, filenamify, is_py2, decode_html_entities
from svtplay_dl.subtitle import subtitle
from svtplay_dl.utils.urllib import urlparse, parse_qs
from svtplay_dl.info import info
class OppetArkiv(Service, OpenGraphThumbMixin):
supported_domains = ['oppetarkiv.se']
def get(self):
vid = self.find_video_id()
if vid is None:
yield ServiceError("Cant find video id for this video")
return
url = "http://api.svt.se/videoplayer-api/video/%s" % vid
data = self.http.request("get", url)
if data.status_code == 404:
yield ServiceError("Can't get the json file for %s" % url)
return
data = data.json()
if "live" in data:
self.options.live = data["live"]
if self.options.output_auto:
self.options.service = "svtplay"
self.options.output = self.outputfilename(data, self.options.output, ensure_unicode(self.get_urldata()))
if self.exclude():
yield ServiceError("Excluding video")
return
parsed_info = self._parse_info(data)
if self.options.get_info:
if parsed_info:
yield info(copy.copy(self.options), parsed_info)
log.info("Collected info")
else:
log.info("Couldn't collect info for this episode")
if "subtitleReferences" in data:
for i in data["subtitleReferences"]:
if i["format"] == "websrt":
yield subtitle(copy.copy(self.options), "wrst", i["url"])
if len(data["videoReferences"]) == 0:
yield ServiceError("Media doesn't have any associated videos (yet?)")
return
for i in data["videoReferences"]:
parse = urlparse(i["url"])
query = parse_qs(parse.query)
if i["format"] == "hls" or i["format"] == "ios":
streams = hlsparse(self.options, self.http.request("get", i["url"]), i["url"])
if streams:
for n in list(streams.keys()):
yield streams[n]
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
if alt:
streams = hlsparse(self.options, self.http.request("get", alt.request.url), alt.request.url)
if streams:
for n in list(streams.keys()):
yield streams[n]
if i["format"] == "hds" or i["format"] == "flash":
match = re.search(r"\/se\/secure\/", i["url"])
if not match:
streams = hdsparse(self.options, self.http.request("get", i["url"], params={"hdcore": "3.7.0"}),
i["url"])
if streams:
for n in list(streams.keys()):
yield streams[n]
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
if alt:
streams = hdsparse(self.options,
self.http.request("get", alt.request.url, params={"hdcore": "3.7.0"}),
alt.request.url)
if streams:
for n in list(streams.keys()):
yield streams[n]
if i["format"] == "dash264" or i["format"] == "dashhbbtv":
streams = dashparse(self.options, self.http.request("get", i["url"]), i["url"])
if streams:
for n in list(streams.keys()):
yield streams[n]
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
if alt:
streams = dashparse(self.options, self.http.request("get", alt.request.url), alt.request.url)
if streams:
for n in list(streams.keys()):
yield streams[n]
def find_video_id(self):
match = re.search('data-video-id="([^"]+)"', self.get_urldata())
if match:
return match.group(1)
return None
def find_all_episodes(self, options):
page = 1
data = self.get_urldata()
match = re.search(r'"/etikett/titel/([^"/]+)', data)
if match is None:
match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url)
if match is None:
log.error("Couldn't find title")
return
program = match.group(1)
episodes = []
n = 0
if self.options.all_last > 0:
sort = "tid_fallande"
else:
sort = "tid_stigande"
while True:
url = "http://www.oppetarkiv.se/etikett/titel/%s/?sida=%s&sort=%s&embed=true" % (program, page, sort)
data = self.http.request("get", url)
if data.status_code == 404:
break
data = data.text
regex = re.compile(r'href="(/video/[^"]+)"')
for match in regex.finditer(data):
if n == self.options.all_last:
break
episodes.append("http://www.oppetarkiv.se%s" % match.group(1))
n += 1
page += 1
return episodes
def outputfilename(self, data, filename, raw):
directory = os.path.dirname(filename)
if is_py2:
id = hashlib.sha256(data["programVersionId"]).hexdigest()[:7]
else:
id = hashlib.sha256(data["programVersionId"].encode("utf-8")).hexdigest()[:7]
datatitle = re.search('data-title="([^"]+)"', self.get_urldata())
if not datatitle:
return None
datat = decode_html_entities(datatitle.group(1))
name = self.name(datat)
episode = self.seasoninfo(datat)
if is_py2:
name = name.encode("utf8")
if episode:
title = "{0}.{1}-{2}-svtplay".format(name, episode, id)
else:
title = "{0}-{1}-svtplay".format(name, id)
title = filenamify(title)
if len(directory):
output = os.path.join(directory, title)
else:
output = title
return output
def seasoninfo(self, data):
episode = None
match = re.search("S.song (\d+) - Avsnitt (\d+)", data)
if match:
episode = "s{0:02d}e{1:02d}".format(int(match.group(1)), int(match.group(2)))
else:
match = re.search("Avsnitt (\d+)", data)
if match:
episode = "e{0:02d}".format(int(match.group(1)))
return episode
def name(selfs, data):
if data.find(" - S.song") > 0:
title = data[:data.find(" - S.song")]
else:
if data.find(" - Avsnitt") > 0:
title = data[:data.find(" - Avsnitt")]
else:
title = data
return title
def _parse_info(self,json_data):
parsed_info = {}
data = self.get_urldata()
datatitle = re.search('data-title="([^"]+)"', self.get_urldata())
if not datatitle:
return None
datat = decode_html_entities(datatitle.group(1))
parsed_info["title"] = self.name(datat)
match = re.search("S.song (\d+) - Avsnitt (\d+)", datat)
if match:
parsed_info["season"] = match.group(1)
parsed_info["episode"] = match.group(2)
else:
match = re.search("Avsnitt (\d+)", datat)
if match:
parsed_info["episode"] = match.group(1)
meta = re.search("<span class=\"svt-video-meta\">([\w\W]+)<\/span>",data)
if meta:
broadcast_date = re.search("<time .+>(.+)</time>",meta.group(1))
duration = re.search("Längd <strong>(.+)<\/strong>",meta.group(1))
if duration:
parsed_info["duration"] = duration.group(1)
if broadcast_date:
parsed_info["broadcastDate"] = broadcast_date.group(1)
if "subtitleReferences" in json_data:
for i in json_data["subtitleReferences"]:
if i["format"] == "websrt":
parsed_info["subtitle"] = "True"
break
description = re.search("<div class=\"svt-text-bread\">([\w\W]+?)<\/div>",data)
if description:
description = decode_html_entities(description.group(1))
description = description.replace("<br />","\n").replace("<br>","\n").replace("\t","")
description = re.sub('<[^<]+?>', '', description)
parsed_info["description"] = description
return parsed_info
|
This collection of intricate images and empowering phrases will encourage you to keep persisting and keep the fire lit inside of you. Rebel Coloring is the perfect coloring book for any fierce female to unwind. With 47 empowering prints to color and frame, this coloring book will have you ready to take on the world and color like a girl!
|
# Static analyzer import helpers: (STATIC_IMPORT_MARK)
if 0:
import gluon
global cache; cache = gluon.cache.Cache()
global LOAD; LOAD = gluon.compileapp.LoadFactory()
import gluon.compileapp.local_import_aux as local_import #@UnusedImport
from gluon.contrib.gql import GQLDB #@UnusedImport
from gluon.dal import Field #@UnusedImport
global request; request = gluon.globals.Request()
global response; response = gluon.globals.Response()
global session; session = gluon.globals.Session()
from gluon.html import A #@UnusedImport
from gluon.html import B #@UnusedImport
from gluon.html import BEAUTIFY #@UnusedImport
from gluon.html import BODY #@UnusedImport
from gluon.html import BR #@UnusedImport
from gluon.html import CENTER #@UnusedImport
from gluon.html import CODE #@UnusedImport
from gluon.html import DIV #@UnusedImport
from gluon.html import EM #@UnusedImport
from gluon.html import EMBED #@UnusedImport
from gluon.html import embed64 #@UnusedImport
from gluon.html import FIELDSET #@UnusedImport
from gluon.html import FORM #@UnusedImport
from gluon.html import H1 #@UnusedImport
from gluon.html import H2 #@UnusedImport
from gluon.html import H3 #@UnusedImport
from gluon.html import H4 #@UnusedImport
from gluon.html import H5 #@UnusedImport
from gluon.html import H6 #@UnusedImport
from gluon.html import HEAD #@UnusedImport
from gluon.html import HR #@UnusedImport
from gluon.html import HTML #@UnusedImport
from gluon.html import I #@UnusedImport
from gluon.html import IFRAME #@UnusedImport
from gluon.html import IMG #@UnusedImport
from gluon.html import INPUT #@UnusedImport
from gluon.html import LABEL #@UnusedImport
from gluon.html import LEGEND #@UnusedImport
from gluon.html import LI #@UnusedImport
from gluon.html import LINK #@UnusedImport
from gluon.html import MARKMIN #@UnusedImport
from gluon.html import MENU #@UnusedImport
from gluon.html import META #@UnusedImport
from gluon.html import OBJECT #@UnusedImport
from gluon.html import OL #@UnusedImport
from gluon.html import ON #@UnusedImport
from gluon.html import OPTGROUP #@UnusedImport
from gluon.html import OPTION #@UnusedImport
from gluon.html import P #@UnusedImport
from gluon.html import PRE #@UnusedImport
from gluon.html import STYLE #@UnusedImport
from gluon.html import SCRIPT #@UnusedImport
from gluon.html import SELECT #@UnusedImport
from gluon.html import SPAN #@UnusedImport
from gluon.html import TABLE #@UnusedImport
from gluon.html import TAG #@UnusedImport
from gluon.html import TBODY #@UnusedImport
from gluon.html import TD #@UnusedImport
from gluon.html import TEXTAREA #@UnusedImport
from gluon.html import TFOOT #@UnusedImport
from gluon.html import TH #@UnusedImport
from gluon.html import THEAD #@UnusedImport
from gluon.html import TITLE #@UnusedImport
from gluon.html import TR #@UnusedImport
from gluon.html import TT #@UnusedImport
from gluon.html import UL #@UnusedImport
from gluon.html import URL #@UnusedImport
from gluon.html import XHTML #@UnusedImport
from gluon.html import XML #@UnusedImport
from gluon.html import xmlescape #@UnusedImport
from gluon.http import HTTP #@UnusedImport
from gluon.http import redirect #@UnusedImport
import gluon.languages.translator as T #@UnusedImport
from gluon.sql import DAL
global db; db = DAL()
from gluon.sql import SQLDB #@UnusedImport
from gluon.sql import SQLField #@UnusedImport
from gluon.sqlhtml import SQLFORM #@UnusedImport
from gluon.sqlhtml import SQLTABLE #@UnusedImport
from gluon.tools import Auth
global auth; auth = Auth()
from gluon.tools import Crud
global crud; crud = Crud()
from gluon.tools import fetch #@UnusedImport
from gluon.tools import geocode #@UnusedImport
from gluon.tools import Mail
global mail; mail = Mail()
from gluon.tools import PluginManager
global plugins; plugins = PluginManager()
from gluon.tools import prettydate #@UnusedImport
from gluon.tools import Recaptcha #@UnusedImport
from gluon.tools import Service
global service; service = Service()
from gluon.validators import CLEANUP #@UnusedImport
from gluon.validators import CRYPT #@UnusedImport
from gluon.validators import IS_ALPHANUMERIC #@UnusedImport
from gluon.validators import IS_DATE #@UnusedImport
from gluon.validators import IS_DATE_IN_RANGE #@UnusedImport
from gluon.validators import IS_DATETIME #@UnusedImport
from gluon.validators import IS_DATETIME_IN_RANGE #@UnusedImport
from gluon.validators import IS_DECIMAL_IN_RANGE #@UnusedImport
from gluon.validators import IS_EMAIL #@UnusedImport
from gluon.validators import IS_EMPTY_OR #@UnusedImport
from gluon.validators import IS_EQUAL_TO #@UnusedImport
from gluon.validators import IS_EXPR #@UnusedImport
from gluon.validators import IS_FLOAT_IN_RANGE #@UnusedImport
from gluon.validators import IS_IMAGE #@UnusedImport
from gluon.validators import IS_IN_DB #@UnusedImport
from gluon.validators import IS_IN_SET #@UnusedImport
from gluon.validators import IS_INT_IN_RANGE #@UnusedImport
from gluon.validators import IS_IPV4 #@UnusedImport
from gluon.validators import IS_LENGTH #@UnusedImport
from gluon.validators import IS_LIST_OF #@UnusedImport
from gluon.validators import IS_LOWER #@UnusedImport
from gluon.validators import IS_MATCH #@UnusedImport
from gluon.validators import IS_NOT_EMPTY #@UnusedImport
from gluon.validators import IS_NOT_IN_DB #@UnusedImport
from gluon.validators import IS_NULL_OR #@UnusedImport
from gluon.validators import IS_SLUG #@UnusedImport
from gluon.validators import IS_STRONG #@UnusedImport
from gluon.validators import IS_TIME #@UnusedImport
from gluon.validators import IS_UPLOAD_FILENAME #@UnusedImport
from gluon.validators import IS_UPPER #@UnusedImport
from gluon.validators import IS_URL #@UnusedImport
|
Over time, the moving parts and powered pieces that move your magnetic disks around at lightning speed will wear, age, and get noisy. In a desktop computer (a Windows PC, generally), you can quiet the drive with rubber shock absorbers or elastic suspension. Toting a laptop? NotebookReview has a good starter guide to cleaning your laptop, which reduces noise, removes dust, lowers temperatures, and gives your drive a bit more life—never a bad thing. (Original posts: Rubber shocks, elastic).
There's a huge range of tools that offer spy-agency-level data wiping—some of them are complete overkill. What software actually wipes the slate clean? Jason ran through them and picked out the good stuff, along with the physical, take-no-prisoners means of data destruction, in his advice on how to properly erase your physical media. If you need to pass on or reuse a disk, those apps and boot CDs will get you there. When you just need to make sure your credit card numbers are hidden forever, there's always a hammer. No, seriously. Photo by scragz.
Backups keep your data safe, but a complete image of your system on an external drive ensures that everything—applications, data, settings, wallpaper choice, the whole shot—make it back onto your system if things go wrong. Windows users can image their hard drives with DriveImage XML, a great free tool. Another free tool, SuperDuper, makes it easy, if not exactly quick, to mirror your entire Mac onto an external drive.
Whether it's an old desktop with a surprisingly large drive, or a laptop that's getting an upgrade, you can save that once built-in storage and turn it into an external drive, one you can just plug in with a USB cable and use for backup, media storage, or whatever you need.
The strangest stuff ends up clogging up your hard drive unnecessarily. Leftover files from CD rips, huge data folders from games, backup files for apps you don't have installed—the list goes on. We've previously shown how to visualize your usage, but we updated with a new look at more simple tools for analyzing and freeing up space on your hard drive. Once you know what's there, and how big it is, you can start toward getting rid of some of it.
When things go wrong with your hard drive, they usually go really wrong—lost files, no booting, and general panic ensues. Adam's rundown recovering deleted files with free software, with a focus on Windows utilities, with a few cross-platform goodies sprinkled in. When you can't get into your system, we heartily recommend a live Ubuntu thumb drive to grab files and fix things up, though a system rescue CD session can work wonders, too. When we put the call out, the answer that came back for the best recovery tool was Recuva, a Windows utility that can save files from hard drives, SD cards, iPods, and much more.
Tech shops and laptop sellers will charge you a good bit over the parts cost to install a new, likely larger hard drive in a computer. Whether it's a desktop, a MacBook, or an SSD drive, you can likely take an hour and tackle it yourself. Adam explained the desktop hard drive installation, while tech blogger Dwight Silverman has explained a MacBook hard drive upgrade. Each laptop is built differently, but if a solid-state drive is in your future, this tutorial on MacBook installation should give you some general guidance on the job. It's a good skill to have, in general, because as Silverman writes, "Whatever you have now, it's not enough. And when you add more, that won't be enough, either." (Original post: MacBook hard drive).
Just because your computer only came with one operating system doesn't mean it has to stay that way. If you're a Mac owner who'd like a little Windows time now and again, read up on Gina's guide to setting up Boot Camp for Mac and Windows. If Windows 7 looks appealing, and a virtualized XP Mode isn't quite enough oomph, you can still boot Windows 7 with XP or Vista, and just choose your Windows flavor at start-up. And if you're keen on giving Linux a real go, why not dual boot it with Windows 7 in a way that makes it easy on both systems? It's so nice when everybody at the (partition) table just gets along.
Maybe you've settled on a spacious new hard drive for your laptop, a solid-state drive for durability, or just need to rescue your stuff before your drive goes. Time to re-install Windows, right? Not so much. Using the Clonezilla Live CD, it's possible to upgrade to a spacious new hard drive without having to completely re-arrange your operating system and applications, and tediously transfer all your media over. It's a step-by-step process, it's fairly straightforward, and you get to trick Windows, somewhat, into believing that nothing ever happened, which can be its own reward.
Don't sell your computer data short—even if your documents don't seem all that important, the time you spent installing applications and putting everything in order is definitely worth saving. If you've got a good spare hard drive or a web space you can FTP into, you can set up automatic hard drive backup through Windows. Most folks, though, will want to go with one or both of two routes: online backup, for the entire-house-burns-down security, and/or offline backup, for speedy backups and convenient restoring. The best tools for doing so, when we asked, were Dropbox for online syncing—even though it's not exactly proper backup—and Time Machine for external hard drive backups. For two solutions that make the process mostly painless and care-free, try web-based, automated systems like Mozy or Carbonite. Photo by miss karen.
What's the hard drive tool you'd rather not live without? What blog posts or tutorials showed you something new about your hard drive? Share the good stuff in the comments.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-20 10:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('eguard', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(verbose_name='\u65f6\u95f4')),
],
),
migrations.RemoveField(
model_name='user',
name='entrance',
),
migrations.RemoveField(
model_name='user',
name='group',
),
migrations.RemoveField(
model_name='entrance',
name='name',
),
migrations.RemoveField(
model_name='entrance',
name='position',
),
migrations.RemoveField(
model_name='entrance',
name='webId',
),
migrations.AddField(
model_name='entrance',
name='code',
field=models.CharField(choices=[(b'D500', '\u9760500M\u6838\u78c1\u5ba4\u95e8'), (b'D102', '\u503c\u73ed\u5ba4\u5927\u95e8'), (b'D103', '\u503c\u73ed\u5ba4\u91cc\u95e8'), (b'D600', '\u9760600M\u6838\u78c1\u5ba4\u95e8')], default=b'D102', max_length=4, verbose_name='\u95e8\u7981'),
),
migrations.AddField(
model_name='entrance',
name='users',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='\u51fa\u5165\u8005'),
),
migrations.DeleteModel(
name='Team',
),
migrations.DeleteModel(
name='User',
),
migrations.AddField(
model_name='event',
name='entrace',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='eguard.Entrance', verbose_name='\u95e8'),
),
migrations.AddField(
model_name='event',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u4eba'),
),
]
|
You earned your BSW from a CSWE-accredited institution within the past five years.
A 3.25 GPA or above in social work courses.
A grade of B or higher (not B-) in all social work courses.
Students who do not meet the criteria above may still apply to the MSW Traditional program and be considered for transfer credit for any social work courses that earned a B grade or higher.
May 1* is the final deadline for MSW Advanced Standing program; the program begins in the Fall term.
*March 1 - Advanced standing students interested in specializing in School Social Work to obtain the Professional Educator License must apply to the MSW program by March 1. If admitted, students must also submit the declaration of specialization form along with official test scores for the Test of Academic Proficiency (TAP) by March 15. For more information see the Schools Specialization specialization.
You may submit your application form online. Students who have already applied to a graduate or professional program at Loyola University Chicago should fill out the reapplication form.
Topic 2: Discuss your career interests and goals. In what population or area of social work practice are you interested? How will a Social Work education from Loyola University Chicago School of Social Work prepare you for work in your area of interest? What do you hope to accomplish with an MSW?
Topic 3: Discuss your timing to pursue a graduate program now. What personal and/or professional factors have led you to want to pursue an MSW now? How do you intend to complete the program? How do you plan to balance the academic and field responsibilities of the program with outside responsibilities and obligations? How will you finance your studies?
If you are a transfer student currently or previously enrolled in a MSW program elsewhere, in addition to addressing the questions above in your personal statement, please address you interest in transferring to Loyola University Chicago School of Social Work from your current or past institution.
|
import numpy as np
def lagmat(x, maxlag, trim='forward'):
'''create 2d array of lags
Parameters
----------
x : array_like, 1d or 2d
data; if 2d, observation in rows and variables in columns
maxlag : int
all lags from zero to maxlag are included
trim : str {'forward', 'backward', 'both', 'none'} or None
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none', None : no trimming of observations
Returns
-------
lagmat : 2d array
array with lagged observations
Examples
--------
>>> from scikits.statsmodels.sandbox.tsa.tsatools import lagmat
>>> import numpy as np
>>> X = np.arange(1,7).reshape(-1,2)
>>> lagmat(X, maxlag=2, trim="forward")
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="backward")
array([[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
>>> lagmat(X, maxlag=2, trim="both"
array([[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="none")
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
Notes
-----
TODO:
* allow list of lags additional to maxlag
* create varnames for columns
'''
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
nobs, nvar = x.shape
if maxlag >= nobs:
raise ValueError("maxlag should be < nobs")
lm = np.zeros((nobs+maxlag, nvar*(maxlag+1)))
for k in range(0, int(maxlag+1)):
#print k, maxlag-k,nobs-k, nvar*k,nvar*(k+1), x.shape, lm.shape
lm[maxlag-k:nobs+maxlag-k, nvar*(maxlag-k):nvar*(maxlag-k+1)] = x
if trim:
trimlower = trim.lower()
else:
trimlower = trim
if trimlower == 'none' or not trimlower:
return lm
elif trimlower == 'forward':
return lm[:nobs+maxlag-k,:]
elif trimlower == 'both':
return lm[maxlag:nobs+maxlag-k,:]
elif trimlower == 'backward':
return lm[maxlag:,:]
else:
raise ValueError, 'trim option not valid'
def lagmat2ds(x, maxlag0, maxlagex=None, dropex=0, trim='forward'):
'''generate lagmatrix for 2d array, columns arranged by variables
Parameters
----------
x : array_like, 2d
2d data, observation in rows and variables in columns
maxlag0 : int
for first variable all lags from zero to maxlag are included
maxlagex : None or int
max lag for all other variables all lags from zero to maxlag are included
dropex : int (default is 0)
exclude first dropex lags from other variables
for all variables, except the first, lags from dropex to maxlagex are included
trim : string
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none' : no trimming of observations
Returns
-------
lagmat : 2d array
array with lagged observations, columns ordered by variable
Notes
-----
very inefficient for unequal lags, just done for convenience
'''
if maxlagex is None:
maxlagex = maxlag0
maxlag = max(maxlag0, maxlagex)
nobs, nvar = x.shape
lagsli = [lagmat(x[:,0], maxlag, trim=trim)[:,:maxlag0]]
for k in range(1,nvar):
lagsli.append(lagmat(x[:,k], maxlag, trim=trim)[:,dropex:maxlagex])
return np.column_stack(lagsli)
__all__ = ['lagmat', 'lagmat2ds']
if __name__ == '__main__':
# sanity check, mainly for imports
x = np.random.normal(size=(100,2))
tmp = lagmat(x,2)
tmp = lagmat2ds(x,2)
# grangercausalitytests(x, 2)
|
In some ways it seems like I've only been here a few days, things move so fast in Tari – but in other ways it seems like I've been here forever, it is so easy to understand the challenges in Tari they are often so similar to those in Lae.
I have actually been in Tari for about 2½ weeks. Since then I've been woken by VHF radio most nights when the hospital staff need the assistance of the expat nurses, anesthetist or surgeon. Last week the entire team responded at midnight to a lady who had been stabbed, I dealt with the extended family, tried to persuade them to donate blood, ran errands for the medics, fetching oxygen or passing messages to the laboratory technician who was collecting the blood.
Twice since I've been here I've had to stop the vehicle to unexpectedly bring people to the hospital. One lady was unmissable: she was lying motionless, bleeding in the middle of the road. My new boss and I jumped out (fortunately she is also a nurse) picked her up, put her in the back of the vehicle and drove back to the hospital. By way of an explanation of her injuries she said only "Niupella Meri" (new woman) and immediately it was clear, I had heard it all before so many times in Lae. Her husband had a new wife and this was his way of letting her know. She had been stabbed in the hip punched in the face so hard she was unconscious.
Stabbings and choppings seem to be the most common problems that I see. I see a lot more here than I did in the clinic in Lae. There is a lot going on and I'm rushing about between building projects, helping a nurse move a patient or trying to get a generator started and so I see a lot of our patients who all love to chat. I was given some wonderful presents when I left Lae, most items of jewelry or bilums (man bags) so a group of patients call me "Morobe Mangi" (Morobe being the province that Lae is within, Mangi meaning "boy") as they can tell where my bag comes from. Being able to speak Pidgin makes my life easy. Although Huli is the first language of Tari, Tok Pisin is the second language and most people understand it. And everyone likes to talk and knows who MSF are, they all remember when the hospital didn't function and are very glad that it does now. I'm very glad to be here.
|
# coding=utf-8
"""
Collect statistics from Nginx
#### Dependencies
* urllib2
#### Usage
To enable the nginx status page to work with defaults,
add a file to /etc/nginx/sites-enabled/ (on Ubuntu) with the
following content:
<pre>
server {
listen 127.0.0.1:8080;
server_name localhost;
location /nginx_status {
stub_status on;
access_log /data/server/shared/log/access.log;
allow 127.0.0.1;
deny all;
}
}
</pre>
"""
import urllib2
import re
import diamond.collector
class NginxCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(NginxCollector, self).get_default_config_help()
config_help.update({
'req_host': 'Hostname',
'req_port': 'Port',
'req_path': 'Path',
})
return config_help
def get_default_config(self):
default_config = super(NginxCollector, self).get_default_config()
default_config['req_host'] = 'localhost'
default_config['req_port'] = 8080
default_config['req_path'] = '/nginx_status'
default_config['path'] = 'nginx'
return default_config
def collect(self):
url = 'http://%s:%i%s' % (self.config['req_host'],
int(self.config['req_port']),
self.config['req_path'])
activeConnectionsRE = re.compile(r'Active connections: (?P<conn>\d+)')
totalConnectionsRE = re.compile('^\s+(?P<conn>\d+)\s+' +
'(?P<acc>\d+)\s+(?P<req>\d+)')
connectionStatusRE = re.compile('Reading: (?P<reading>\d+) ' +
'Writing: (?P<writing>\d+) ' +
'Waiting: (?P<waiting>\d+)')
req = urllib2.Request(url)
try:
handle = urllib2.urlopen(req)
for l in handle.readlines():
l = l.rstrip('\r\n')
if activeConnectionsRE.match(l):
self.publish_gauge(
'active_connections',
int(activeConnectionsRE.match(l).group('conn')))
elif totalConnectionsRE.match(l):
m = totalConnectionsRE.match(l)
req_per_conn = float(m.group('req')) / \
float(m.group('acc'))
self.publish_counter('conn_accepted', int(m.group('conn')))
self.publish_counter('conn_handled', int(m.group('acc')))
self.publish_counter('req_handled', int(m.group('req')))
self.publish_gauge('req_per_conn', float(req_per_conn))
elif connectionStatusRE.match(l):
m = connectionStatusRE.match(l)
self.publish_gauge('act_reads', int(m.group('reading')))
self.publish_gauge('act_writes', int(m.group('writing')))
self.publish_gauge('act_waits', int(m.group('waiting')))
except IOError, e:
self.log.error("Unable to open %s" % url)
except Exception, e:
self.log.error("Unknown error opening url: %s", e)
|
Here you are at our website. At this time we are pleased to announce that we have discovered a very interesting topic to be reviewed, that is Long Casual Summer Dresses. Some people attempting to find specifics ofLong Casual Summer Dresses and certainly one of them is you, is not it?
Long Casual Summer Dresses is one of raised content at this time. We know it from google search engine data such as adwords or google trends. In an effort to give helpful info to our visitors, we have tried to find the closest relevance photo about Long Casual Summer Dresses. And here you can see now, this image have been extracted from reliable resource.
Long Casual Summer Dresses – is a free Complete Home Decoration Ideas Gallery posted at . This Long Casual Summer Dresses was posted in hope that we can give you an inspiration to Remodel your Home. This article can be your reference when you are confused to choose the right decoration for your home. This Long Casual Summer Dresses This maybe your best option to decor , because having a home with our own design is everyone’s dream.
We hope that , by posting this Long Casual Summer Dresses ideas , we can fulfill your needs of inspiration for designing your home. If you need more ideas to Design a Home , you can check at our collection right below this post. Also , don’t forget to always visit Best Long Casual Summer Dresses to find some new and fresh posts about Kitchen Remodeling , Bathroom Remodel , Bedroom Theme Ideas , Living Room Style and other Home Design Inspiration everyday. They are available for obtain, if you love and want to own it, simply click save badge on the article, and it’ll be directly down loaded to your home computer. Lastly if you’d like to obtain unique and the latest graphic related with Long Casual Summer Dresses, please follow us on google plus or save this site, we try our best to present you regular update with all new and fresh pics. Hope you enjoy staying right here.
Long Casual Summer Dresses is high definition wallpaper and size this wallpaper is 512x871. You can make Long Casual Summer Dresses For your Desktop Background, Tablet, Android or iPhone and another Smartphone device for free. To download and obtain the Long Casual Summer Dresses images by click the download button below to get multiple high-resversions.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
savReaderWriter: A cross-platform Python interface to the IBM SPSS
Statistics Input Output Module. Read or Write SPSS system files (.sav, .zsav)
.. moduleauthor:: Albert-Jan Roskam <fomcl "at" yahoo "dot" com>
"""
# change this to 'True' in case you experience segmentation
# faults related to freeing memory.
segfaults = False
import os
import sys
try:
import psyco
psycoOk = True # reading 66 % faster
except ImportError:
psycoOk = False
try:
import numpy
numpyOk = True
except ImportError:
numpyOk = False
try:
from cWriterow import cWriterow # writing 66 % faster
cWriterowOK = True
except ImportError:
cWriterowOK = False
__author__ = "Albert-Jan Roskam" + " " + "@".join(["fomcl", "yahoo.com"])
__version__ = open(os.path.join(os.path.dirname(__file__),
"VERSION")).read().strip()
allFormats = {
1: (b"SPSS_FMT_A", b"Alphanumeric"),
2: (b"SPSS_FMT_AHEX", b"Alphanumeric hexadecimal"),
3: (b"SPSS_FMT_COMMA", b"F Format with commas"),
4: (b"SPSS_FMT_DOLLAR", b"Commas and floating dollar sign"),
5: (b"SPSS_FMT_F", b"Default Numeric Format"),
6: (b"SPSS_FMT_IB", b"Integer binary"),
7: (b"SPSS_FMT_PIBHEX", b"Positive integer binary - hex"),
8: (b"SPSS_FMT_P", b"Packed decimal"),
9: (b"SPSS_FMT_PIB", b"Positive integer binary unsigned"),
10: (b"SPSS_FMT_PK", b"Positive integer binary unsigned"),
11: (b"SPSS_FMT_RB", b"Floating point binary"),
12: (b"SPSS_FMT_RBHEX", b"Floating point binary hex"),
15: (b"SPSS_FMT_Z", b"Zoned decimal"),
16: (b"SPSS_FMT_N", b"N Format- unsigned with leading 0s"),
17: (b"SPSS_FMT_E", b"E Format- with explicit power of 10"),
20: (b"SPSS_FMT_DATE", b"Date format dd-mmm-yyyy"),
21: (b"SPSS_FMT_TIME", b"Time format hh:mm:ss.s"),
22: (b"SPSS_FMT_DATETIME", b"Date and Time"),
23: (b"SPSS_FMT_ADATE", b"Date format dd-mmm-yyyy"),
24: (b"SPSS_FMT_JDATE", b"Julian date - yyyyddd"),
25: (b"SPSS_FMT_DTIME", b"Date-time dd hh:mm:ss.s"),
26: (b"SPSS_FMT_WKDAY", b"Day of the week"),
27: (b"SPSS_FMT_MONTH", b"Month"),
28: (b"SPSS_FMT_MOYR", b"mmm yyyy"),
29: (b"SPSS_FMT_QYR", b"q Q yyyy"),
30: (b"SPSS_FMT_WKYR", b"ww WK yyyy"),
31: (b"SPSS_FMT_PCT", b"Percent - F followed by %"),
32: (b"SPSS_FMT_DOT", b"Like COMMA, switching dot for comma"),
33: (b"SPSS_FMT_CCA", b"User Programmable currency format"),
34: (b"SPSS_FMT_CCB", b"User Programmable currency format"),
35: (b"SPSS_FMT_CCC", b"User Programmable currency format"),
36: (b"SPSS_FMT_CCD", b"User Programmable currency format"),
37: (b"SPSS_FMT_CCE", b"User Programmable currency format"),
38: (b"SPSS_FMT_EDATE", b"Date in dd/mm/yyyy style"),
39: (b"SPSS_FMT_SDATE", b"Date in yyyy/mm/dd style")}
MAXLENGTHS = {
"SPSS_MAX_VARNAME": (64, "Variable name"),
"SPSS_MAX_SHORTVARNAME": (8, "Short (compatibility) variable name"),
"SPSS_MAX_SHORTSTRING": (8, "Short string variable"),
"SPSS_MAX_IDSTRING": (64, "File label string"),
"SPSS_MAX_LONGSTRING": (32767, "Long string variable"),
"SPSS_MAX_VALLABEL": (120, "Value label"),
"SPSS_MAX_VARLABEL": (256, "Variable label"),
"SPSS_MAX_7SUBTYPE": (40, "Maximum record 7 subtype"),
"SPSS_MAX_ENCODING": (64, "Maximum encoding text")}
supportedDates = { # uses ISO dates wherever applicable.
b"DATE": "%Y-%m-%d",
b"JDATE": "%Y-%m-%d",
b"EDATE": "%Y-%m-%d",
b"SDATE": "%Y-%m-%d",
b"DATETIME": "%Y-%m-%d %H:%M:%S",
b"ADATE": "%Y-%m-%d",
b"WKDAY": "%A",
b"MONTH": "%B",
b"MOYR": "%B %Y",
b"WKYR": "%W WK %Y",
b"QYR": "%m Q %Y", # %m (month) is converted to quarter, see next dict.
b"TIME": "%H:%M:%S.%f",
b"DTIME": "%d %H:%M:%S"}
QUARTERS = {b'01': b'1', b'02': b'1', b'03': b'1',
b'04': b'2', b'05': b'2', b'06': b'2',
b'07': b'3', b'08': b'3', b'09': b'3',
b'10': b'4', b'11': b'4', b'12': b'4'}
userMissingValues = {
"SPSS_NO_MISSVAL": 0,
"SPSS_ONE_MISSVAL": 1,
"SPSS_TWO_MISSVAL": 2,
"SPSS_THREE_MISSVAL": 3,
"SPSS_MISS_RANGE": -2,
"SPSS_MISS_RANGEANDVAL": -3}
version = __version__
sys.path.insert(0, os.path.dirname(__file__))
from py3k import *
from error import *
from generic import *
from header import *
from savReader import *
from savWriter import *
from savHeaderReader import *
__all__ = ["SavReader", "SavWriter", "SavHeaderReader"]
|
60-inch wide clearing path with a 20-inch intake height, 420cc gas powered engine with 13 HP. Electric start with recoil start for your easy starting. Easy to attach and remove from your UTV or ATV. Mounting plate is included.
|
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
"""
Plotter module:
This module contains the Plotter class, which is used to plot various data
from the genetic algorithm structure search.
"""
from pymatgen.core.composition import Composition
from pymatgen.phasediagram.entries import PDEntry
from pymatgen.phasediagram.maker import CompoundPhaseDiagram
from pymatgen.phasediagram.plotter import PDPlotter
import matplotlib.pyplot as plt
import os
class Plotter(object):
"""
Used to to plot various data from a structure search.
"""
def __init__(self, data_file_path):
"""
Makes a Plotter.
Args:
data_file_path: the path to file (called run_data) containing the
data for the search
"""
# get the input file contents
input_file = os.path.abspath(data_file_path)
try:
with open(input_file) as input_data:
self.lines = input_data.readlines()
except:
print('Error reading data file.')
print('Quitting...')
quit()
def get_progress_plot(self):
"""
Returns a plot of the best value versus the number of energy
calculations, as a matplotlib plot object.
"""
# set the font to Times, rendered with Latex
plt.rc('font', **{'family': 'serif', 'serif': ['Times']})
plt.rc('text', usetex=True)
# parse the number of composition space endpoints
endpoints_line = self.lines[0].split()
endpoints = []
for word in endpoints_line[::-1]:
if word == 'endpoints:':
break
else:
endpoints.append(word)
num_endpoints = len(endpoints)
if num_endpoints == 1:
y_label = r'Best value (eV/atom)'
elif num_endpoints == 2:
y_label = r'Area of convex hull'
else:
y_label = r'Volume of convex hull'
# parse the best values and numbers of energy calculations
best_values = []
num_calcs = []
for i in range(4, len(self.lines)):
line = self.lines[i].split()
num_calcs.append(int(line[4]))
best_values.append(line[5])
# check for None best values
none_indices = []
for value in best_values:
if value == 'None':
none_indices.append(best_values.index(value))
for index in none_indices:
del best_values[index]
del num_calcs[index]
# make the plot
plt.plot(num_calcs, best_values, color='blue', linewidth=2)
plt.xlabel(r'Number of energy calculations', fontsize=22)
plt.ylabel(y_label, fontsize=22)
plt.tick_params(which='both', width=1, labelsize=18)
plt.tick_params(which='major', length=8)
plt.tick_params(which='minor', length=4)
plt.xlim(xmin=0)
plt.tight_layout()
return plt
def plot_progress(self):
"""
Plots the best value versus the number of energy calculations.
"""
self.get_progress_plot().show()
def get_system_size_plot(self):
"""
Returns a plot of the system size versus the number of energy
calculations, as a matplotlib plot object.
"""
# set the font to Times, rendered with Latex
plt.rc('font', **{'family': 'serif', 'serif': ['Times']})
plt.rc('text', usetex=True)
# parse the compositions and numbers of energy calculations
compositions = []
num_calcs = []
for i in range(4, len(self.lines)):
line = self.lines[i].split()
compositions.append(line[1])
num_calcs.append(int(line[4]))
# get the numbers of atoms from the compositions
nums_atoms = []
for composition in compositions:
comp = Composition(composition)
nums_atoms.append(comp.num_atoms)
# make the plot
plt.plot(num_calcs, nums_atoms, 'D', markersize=5,
markeredgecolor='blue', markerfacecolor='blue')
plt.xlabel(r'Number of energy calculations', fontsize=22)
plt.ylabel(r'Number of atoms in the cell', fontsize=22)
plt.tick_params(which='both', width=1, labelsize=18)
plt.tick_params(which='major', length=8)
plt.tick_params(which='minor', length=4)
plt.xlim(xmin=0)
plt.ylim(ymin=0)
plt.tight_layout()
return plt
def plot_system_size(self):
"""
Plots the system size versus the number of energy calculations.
"""
self.get_system_size_plot().show()
def get_phase_diagram_plot(self):
"""
Returns a phase diagram plot, as a matplotlib plot object.
"""
# set the font to Times, rendered with Latex
plt.rc('font', **{'family': 'serif', 'serif': ['Times']})
plt.rc('text', usetex=True)
# parse the composition space endpoints
endpoints_line = self.lines[0].split()
endpoints = []
for word in endpoints_line[::-1]:
if word == 'endpoints:':
break
else:
endpoints.append(Composition(word))
if len(endpoints) < 2:
print('There must be at least 2 endpoint compositions to make a '
'phase diagram.')
quit()
# parse the compositions and total energies of all the structures
compositions = []
total_energies = []
for i in range(4, len(self.lines)):
line = self.lines[i].split()
compositions.append(Composition(line[1]))
total_energies.append(float(line[2]))
# make a list of PDEntries
pdentries = []
for i in range(len(compositions)):
pdentries.append(PDEntry(compositions[i], total_energies[i]))
# make a CompoundPhaseDiagram
compound_pd = CompoundPhaseDiagram(pdentries, endpoints)
# make a PhaseDiagramPlotter
pd_plotter = PDPlotter(compound_pd, show_unstable=100)
return pd_plotter.get_plot(label_unstable=False)
def plot_phase_diagram(self):
"""
Plots the phase diagram.
"""
self.get_phase_diagram_plot().show()
|
The natural world, the world in which we humans conduct our lives, often goes unnoticed as we move about our city streets. The change of seasons, birdsong, the necessary work of worms, seems irrelevant to our everyday urban lives. By bringing children and ourselves into the environs of Brower Park’s natural ecosystems we can rediscover our own and encourage our children’s natural curiosity about the world around us.
Use this reading list to select books to read with your children or by yourself under the shade of a 100 year old tree in Brower Park!
|
#!/usr/bin/env python
"""
NAME
iodp_jr6_magic.py
DESCRIPTION
converts shipboard .jr6 format files to measurements format files
This program assumes that you have created the specimens, samples, sites and location
files using convert_2_magic.iodp_samples_csv from files downloaded from the LIMS online
repository and that all samples are in that file. (See iodp_samples_magic.py).
SYNTAX
iodp_jr6_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-ID: directory for input file if not included in -f flag
-f FILE: specify input .csv file, default is all in directory
-WD: directory to output files to (default : current directory)
-F FILE: specify output measurements file, default is measurements.txt
-Fsp FILE: specify output specimens.txt file, default is specimens.txt
-lat LAT: latitude of site (also used as bounding latitude for location)
-lon LON: longitude of site (also used as bounding longitude for location)
-A: don't average replicate measurements
-v NUM: volume in cc, will be used if there is no volume in the input data (default : 12cc (rounded one inch diameter core, one inch length))
-dc FLOAT: if ARM measurements are in the file, this was the DC bias field applied
INPUT
JR6 .jr6 format file
"""
import sys
from pmagpy import convert_2_magic as convert
from pmagpy import pmag
def do_help():
return __doc__
def main():
kwargs = {}
# get command line arguments
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
kwargs['dir_path'] = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
kwargs['input_dir_path'] = sys.argv[ind+1]
if "-h" in sys.argv:
help(__name__)
sys.exit()
if '-F' in sys.argv:
ind = sys.argv.index("-F")
kwargs['meas_file'] = sys.argv[ind+1]
if '-Fsp' in sys.argv:
ind = sys.argv.index("-Fsp")
kwargs['spec_file'] = sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index("-f")
kwargs['jr6_file'] = sys.argv[ind+1]
if "-A" in sys.argv:
kwargs['noave'] = True
if "-lat" in sys.argv:
ind = sys.argv.index("-lat")
kwargs['lat'] = sys.argv[ind+1]
if "-lon" in sys.argv:
ind = sys.argv.index("-lon")
kwargs['lon'] = sys.argv[ind+1]
if "-v" in sys.argv:
ind = sys.argv.index("-v")
kwargs['volume'] = sys.argv[ind+1]
kwargs['dc_field'] = pmag.get_named_arg('-dc', default_val=50e-6)
# do conversion
convert.iodp_jr6_lore(**kwargs)
if __name__ == '__main__':
main()
|
Resort Kaftan – one size fits most, over – body kaftan with a drawstring that allows you to style it your way. Beautiful sky blue tones. Stylish, glamorous and practical. Silk Cotton with digital print of an original artwork by Phillip Ayers. Designed in Paris and hand finished in Australia. Free shipping and import taxes are included.
|
#
# This script is intentionally a mess. This is not meant to be used by you, folks.
#
# Copyright George Notaras
REL_FILES = [
'add-meta-tags.pot',
'add-meta-tags.php',
'amt-cli.php',
'amt-admin-panel.php',
'amt-settings.php',
'amt-template-tags.php',
'amt-utils.php',
'amt-embed.php',
'index.php',
'AUTHORS',
#'CONTRIBUTORS',
'LICENSE',
'NOTICE',
'README.rst',
'readme.txt',
# 'screenshot-1.png',
# 'screenshot-2.png',
# 'screenshot-3.png',
# 'screenshot-4.png',
'uninstall.php',
'wpml-config.xml',
]
REL_DIRS = [
'templates',
'metadata',
# 'languages',
# 'languages-contrib',
'css',
'js',
]
PLUGIN_METADATA_FILE = 'add-meta-tags.php'
POT_HEADER = """# POT (Portable Object Template)
#
# This file is part of the Add-Meta-Tags plugin for WordPress.
#
# Read more information about the Add-Meta-Tags translations at:
#
# http://www.codetrax.org/projects/wp-add-meta-tags/wiki/Translations
#
# Copyright (C) 2006-2016 George Notaras <gnot@g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
# ==============================================================================
import sys
import os
import glob
import zipfile
import shutil
import subprocess
import polib
def get_name_release():
def get_data(cur_line):
return cur_line.split(':')[1].strip()
f = open(PLUGIN_METADATA_FILE)
name = ''
release = ''
for line in f:
if line.lower().startswith('plugin name:'):
name = get_data(line)
elif line.lower().startswith('version:'):
release = get_data(line)
if name and release:
break
f.close()
if not name:
raise Exception('Cannot determine plugin name')
elif not release:
raise Exception('Cannot determine plugin version')
else:
# Replace spaces in name and convert it to lowercase
name = name.replace(' ', '-')
name = name.lower()
return name, release
name, release = get_name_release()
print 'Generating POT file...'
# Translation
pot_domain = os.path.splitext(PLUGIN_METADATA_FILE)[0]
# Generate POT file
args = ['xgettext', '--default-domain=%s' % pot_domain, '--output=%s.pot' % pot_domain, '--language=PHP', '--from-code=UTF-8', '--keyword=__', '--keyword=_e', '--no-wrap', '--package-name=%s' % pot_domain, '--package-version=%s' % release, '--copyright-holder', 'George Notaras <gnot@g-loaded.eu>']
# Add php files as arguments
for rf in REL_FILES:
if rf.endswith('.php'):
args.append(rf)
for rf in os.listdir('metadata'):
if rf.endswith('.php'):
args.append( os.path.join( 'metadata', rf ) )
for rf in os.listdir('templates'):
if rf.endswith('.php'):
args.append( os.path.join( 'templates', rf ) )
print (' ').join(args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# Replace POT Header
f = open('%s.pot' % pot_domain, 'r')
pot_lines = f.readlines()
f.close()
f = open('%s.pot' % pot_domain, 'w')
f.write(POT_HEADER)
for n, line in enumerate(pot_lines):
if n < 4:
continue
f.write(line)
f.close()
print 'Complete'
# Compile language .po files to .mo
print 'Compiling PO files to MO...'
for po_file in os.listdir('languages'):
if not po_file.endswith('.po'):
continue
po_path = os.path.join('languages', po_file)
print 'Converting', po_path
po = polib.pofile(po_path, encoding='utf-8')
mo_path = po_path[:-3] + '.mo'
po.save_as_mofile(mo_path)
print 'Complete'
print
print 'Creating distribution package...'
# Create release dir and move release files inside it
os.mkdir(name)
# Copy files
for p_file in REL_FILES:
shutil.copy(p_file, os.path.join(name, p_file))
# Copy dirs
for p_dir in REL_DIRS:
shutil.copytree(p_dir, os.path.join(name, p_dir))
# Create distribution package
d_package_path = '%s-%s.zip' % (name, release)
d_package = zipfile.ZipFile(d_package_path, 'w', zipfile.ZIP_DEFLATED)
# Append root files
for p_file in REL_FILES:
d_package.write(os.path.join(name, p_file))
# Append language directory
for p_dir in REL_DIRS:
d_package.write(os.path.join(name, p_dir))
# Append files in that directory
for p_file in os.listdir(os.path.join(name, p_dir)):
d_package.write(os.path.join(name, p_dir, p_file))
d_package.testzip()
d_package.comment = 'Official packaging by CodeTRAX'
d_package.printdir()
d_package.close()
# Remove the release dir
shutil.rmtree(name)
print 'Complete'
print
|
Trusted Tree Services is here to serve your needs for tree service in North NJ, whatever they may happen to be. Your property can receive the substantial benefits that trees can provide whether it’s a home or business. However, to receive these benefits, your trees must receive the care they need. When you’d like to ensure your trees remain in the best possible health, routine tree service is essential. One of, if not the most obvious advantage trees which are properly maintained can bring to your property is the fact that they make any property seem more attractive. This additional attractiveness is going to result in the second benefit, which is your property value increasing. This is an incredibly significant concern in the Northern NJ region where property values are incredibly high. Well positioned trees are capable of reducing the energy required for heating by 20-50 percent and decrease the need for air conditioning by 30 percent, according to the USDA Forest Service. When you consider the rising price of energy, this advantage becomes clear. Throughout the year, your trees need continuing care, and these are all reasons why you should ensure that they receive it. For top quality tree care, make the choice that local residents have been counting on for years. When you’re interested in obtaining reliable tree service in North NJ or the rest of the local region, get in touch with Trusted Tree Services today.
We offer a broad variety of kinds of tree service in North NJ, and tree trimming is one of the most essential. With tree trimming, you’re capable of making sure your trees are consistently looking the best they can. Trees become un-attractive if they are overgrown, and to enhance their appearance considerably, count on pro trimming services. Together with purely visual factors, this is capable of helping the tree grow as time goes on. The spread of disease is capable of being avoided by removing branches that are dead or diseased. Ensuring that a full tree removal is not needed down the line is going to be the result. Improvement of the air flow around the leaves and enabling light to reach the tree roots much easier are all advantages of thinning the branches in the tree’s crown. If your tree is a fruit or nut tree, production is capable of being made more effective by tree trimming. On the other hand, no one other than a professional should attempt tree trimming. It requires expert precision and knowledge of trees, regardless of how easy it might sound. This is why anytime you need tree service or tree removal in North NJ or the rest of the local region, it’s essential to call the specialists at Trusted Tree Services.
Do you need tree removal in North NJ? Trusted Tree Services is here to help. Sometimes it doesn’t matter how much you invest in quality routine tree maintenance. There are circumstances where a tree is going to be in capable of being saved. Substantial damage following a storm or disease are reasons why this might be the case. Though these are the most typical, residents find themselves requiring our services for a broad array of additional reasons. Trees encroaching on utility lines or structures is one. Trees that are leaning in a direction you don’t want, like towards your home or your driveway, is another issue which is capable of developing over time. Considering you generally see your trees on a daily basis, you might not even notice something like this right away. Structural problems with your tree are going to go unnoticed by anyone but a trained professional. Thanks to our incredible experience with the trees in the local region, any of these indications of a possible issue can be discovered by our skilled professionals. We utilize cutting edge equipment to get the job done in regards to tree removal in North NJ. We will get the job done effectively and quickly and for a reasonable cost. Residential and commercial clients alike count on our services and make us their top choice for this reason.
When a tree removal in North NJ is going to be something you need, catching the problem right away is essential. This is why you want to rely on an experienced tree service company. We’re able to identify many problems with your trees thanks to our substantial degree of experience. Following an inspection our professionals are going to be able to determine what the right course of action will be. It is always a wise idea to call us sooner as opposed to later. This way, you aren’t going to need to be worried when it comes to being liable for any injury or damage and your safety is going to be ensured. This is the reason relying on us for tree care on a routine basis is a choice that a lot of residential and commercial clients are making. We’re able to better determine what your trees’ needs are in this way as we can check up periodically on your property. Because of this, we are capable of identifying and correcting a large number of potential problems. We’re capable of supplying a lot of services which make it possible for trees who might have required a complete removal down the line to be saved if caught early enough. This makes hiring us on an ongoing basis a good investment. At a regular, but low cost to you as a customer, regular tree care is typically quite simple and manageable. When a full removal is necessary, it is a complex procedure that is going to incur a larger cost, regardless of how affordable our services are compared to other contractors out there. If you’re looking to ensure the greatest protection for you and your family with expert tree care and tree removal in North NJ, there isn’t anyone else you’ve got to contact.
It doesn’t matter whether you require ongoing tree care or full tree removal in North NJ. Don’t hesitate to contact Trusted Tree Service’s experts right away. The care you need to make certain that your trees truly flourish is what we’re standing by to provide you with. When you’re in a situation where you’re faced with a hazardous tree which is leaning or a large limb that’s about to fall, you’re about to depend on us to be there for emergency service. In terms of finding a contractor which has a trusted local reputation for both quality of craftsmanship and customer service, you’d be hard pressed to find a better choice. Only tree service experts that will be certain to get the job done properly the first time are fit to be in our employ.
|
intro = """
blake.py
version 4
BLAKE is a SHA3 round-3 finalist designed and submitted by
Jean-Philippe Aumasson et al.
At the core of BLAKE is a ChaCha-like mixer, very similar
to that found in the stream cipher, ChaCha8. Besides being
a very good mixer, ChaCha is fast.
References:
http://www.131002.net/blake/
http://csrc.nist.gov/groups/ST/hash/sha-3/index.html
http://en.wikipedia.org/wiki/BLAKE_(hash_function)
This implementation assumes all data is in increments of
whole bytes. (The formal definition of BLAKE allows for
hashing individual bits.) Note too that this implementation
does include the round-3 tweaks where the number of rounds
was increased to 14/16 from 10/14.
This version can be imported into both Python2 and Python3
programs.
Here are some comparative run times for different versions
of Python:
64-bit:
2.6 6.28s
2.7 6.34s
3.2 7.62s
pypy (2.7) 2.08s
32-bit:
2.7 13.65s
3.2 12.57s
Another test on a 2.0GHz Core 2 Duo of 10,000 iterations of
BLAKE-256 on a short message produced a time of 5.7 seconds.
Not bad, but if raw speed is what you want, look to the t
he C version. It is 40x faster and did the same thing in
0.13 seconds.
Copyright (c) 2009-2012 by Larry Bugbee, Kent, WA
ALL RIGHTS RESERVED.
blake.py IS EXPERIMENTAL SOFTWARE FOR EDUCATIONAL
PURPOSES ONLY. IT IS MADE AVAILABLE "AS-IS" WITHOUT
WARRANTY OR GUARANTEE OF ANY KIND. USE SIGNIFIES
ACCEPTANCE OF ALL RISK.
To make your learning and experimentation less cumbersome,
blake.py is free for any use.
Enjoy,
Larry Bugbee
March 2011
rev May 2011 - fixed Python version check (tx JP)
rev Apr 2012 - fixed an out-of-order bit set in final()
- moved self-test to a separate test pgm
- this now works with Python2 and Python3
"""
import struct
try:
import psyco # works on some 32-bit Python2 versions only
have_psyco = True
print('psyco enabled')
except:
have_psyco = False
#---------------------------------------------------------------
class BLAKE(object):
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
# initial values, constants and padding
# IVx for BLAKE-x
IV64 = [
0x6A09E667F3BCC908, 0xBB67AE8584CAA73B,
0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1,
0x510E527FADE682D1, 0x9B05688C2B3E6C1F,
0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179,
]
IV48 = [
0xCBBB9D5DC1059ED8, 0x629A292A367CD507,
0x9159015A3070DD17, 0x152FECD8F70E5939,
0x67332667FFC00B31, 0x8EB44A8768581511,
0xDB0C2E0D64F98FA7, 0x47B5481DBEFA4FA4,
]
# note: the values here are the same as the high-order
# half-words of IV64
IV32 = [
0x6A09E667, 0xBB67AE85,
0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C,
0x1F83D9AB, 0x5BE0CD19,
]
# note: the values here are the same as the low-order
# half-words of IV48
IV28 = [
0xC1059ED8, 0x367CD507,
0x3070DD17, 0xF70E5939,
0xFFC00B31, 0x68581511,
0x64F98FA7, 0xBEFA4FA4,
]
# constants for BLAKE-64 and BLAKE-48
C64 = [
0x243F6A8885A308D3, 0x13198A2E03707344,
0xA4093822299F31D0, 0x082EFA98EC4E6C89,
0x452821E638D01377, 0xBE5466CF34E90C6C,
0xC0AC29B7C97C50DD, 0x3F84D5B5B5470917,
0x9216D5D98979FB1B, 0xD1310BA698DFB5AC,
0x2FFD72DBD01ADFB7, 0xB8E1AFED6A267E96,
0xBA7C9045F12C7F99, 0x24A19947B3916CF7,
0x0801F2E2858EFC16, 0x636920D871574E69,
]
# constants for BLAKE-32 and BLAKE-28
# note: concatenate and the values are the same as the values
# for the 1st half of C64
C32 = [
0x243F6A88, 0x85A308D3,
0x13198A2E, 0x03707344,
0xA4093822, 0x299F31D0,
0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377,
0xBE5466CF, 0x34E90C6C,
0xC0AC29B7, 0xC97C50DD,
0x3F84D5B5, 0xB5470917,
]
# the 10 permutations of:0,...15}
SIGMA = [
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15],
[14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3],
[11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4],
[ 7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8],
[ 9, 0, 5, 7, 2, 4,10,15,14, 1,11,12, 6, 8, 3,13],
[ 2,12, 6,10, 0,11, 8, 3, 4,13, 7, 5,15,14, 1, 9],
[12, 5, 1,15,14,13, 4,10, 0, 7, 6, 3, 9, 2, 8,11],
[13,11, 7,14,12, 1, 3, 9, 5, 0,15, 4, 8, 6, 2,10],
[ 6,15,14, 9,11, 3, 0, 8,12, 2,13, 7, 1, 4,10, 5],
[10, 2, 8, 4, 7, 6, 1, 5,15,11, 9,14, 3,12,13, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15],
[14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3],
[11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4],
[ 7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8],
[ 9, 0, 5, 7, 2, 4,10,15,14, 1,11,12, 6, 8, 3,13],
[ 2,12, 6,10, 0,11, 8, 3, 4,13, 7, 5,15,14, 1, 9],
[12, 5, 1,15,14,13, 4,10, 0, 7, 6, 3, 9, 2, 8,11],
[13,11, 7,14,12, 1, 3, 9, 5, 0,15, 4, 8, 6, 2,10],
[ 6,15,14, 9,11, 3, 0, 8,12, 2,13, 7, 1, 4,10, 5],
[10, 2, 8, 4, 7, 6, 1, 5,15,11, 9,14, 3,12,13, 0],
]
MASK32BITS = 0xFFFFFFFF
MASK64BITS = 0xFFFFFFFFFFFFFFFF
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __init__(self, hashbitlen):
"""
load the hashSate structure (copy hashbitlen...)
hashbitlen: length of the hash output
"""
if hashbitlen not in [224, 256, 384, 512]:
raise Exception('hash length not 224, 256, 384 or 512')
self.hashbitlen = hashbitlen
self.h = [0]*8 # current chain value (initialized to the IV)
self.t = 0 # number of *BITS* hashed so far
self.cache = b'' # cached leftover data not yet compressed
self.salt = [0]*4 # salt (null by default)
self.init = 1 # set to 2 by update and 3 by final
self.nullt = 0 # Boolean value for special case \ell_i=0
# The algorithm is the same for both the 32- and 64- versions
# of BLAKE. The difference is in word size (4 vs 8 bytes),
# blocksize (64 vs 128 bytes), number of rounds (14 vs 16)
# and a few very specific constants.
if (hashbitlen == 224) or (hashbitlen == 256):
# setup for 32-bit words and 64-bit block
self.byte2int = self._fourByte2int
self.int2byte = self._int2fourByte
self.MASK = self.MASK32BITS
self.WORDBYTES = 4
self.WORDBITS = 32
self.BLKBYTES = 64
self.BLKBITS = 512
# self.ROUNDS = 14 # was 10 before round 3
self.ROUNDS = 8 # BLAKE 8 for blakecoin
self.cxx = self.C32
self.rot1 = 16 # num bits to shift in G
self.rot2 = 12 # num bits to shift in G
self.rot3 = 8 # num bits to shift in G
self.rot4 = 7 # num bits to shift in G
self.mul = 0 # for 32-bit words, 32<<self.mul where self.mul = 0
# 224- and 256-bit versions (32-bit words)
if hashbitlen == 224:
self.h = self.IV28[:]
else:
self.h = self.IV32[:]
elif (hashbitlen == 384) or (hashbitlen == 512):
# setup for 64-bit words and 128-bit block
self.byte2int = self._eightByte2int
self.int2byte = self._int2eightByte
self.MASK = self.MASK64BITS
self.WORDBYTES = 8
self.WORDBITS = 64
self.BLKBYTES = 128
self.BLKBITS = 1024
self.ROUNDS = 16 # was 14 before round 3
self.cxx = self.C64
self.rot1 = 32 # num bits to shift in G
self.rot2 = 25 # num bits to shift in G
self.rot3 = 16 # num bits to shift in G
self.rot4 = 11 # num bits to shift in G
self.mul = 1 # for 64-bit words, 32<<self.mul where self.mul = 1
# 384- and 512-bit versions (64-bit words)
if hashbitlen == 384:
self.h = self.IV48[:]
else:
self.h = self.IV64[:]
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _compress(self, block):
byte2int = self.byte2int
mul = self.mul # de-reference these for ...speed? ;-)
cxx = self.cxx
rot1 = self.rot1
rot2 = self.rot2
rot3 = self.rot3
rot4 = self.rot4
MASK = self.MASK
WORDBITS = self.WORDBITS
SIGMA = self.SIGMA
# get message (<<2 is the same as *4 but faster)
m = [byte2int(block[i<<2<<mul:(i<<2<<mul)+(4<<mul)]) for i in range(16)]
# initialization
v = [0]*16
v[ 0: 8] = [self.h[i] for i in range(8)]
v[ 8:16] = [self.cxx[i] for i in range(8)]
v[ 8:12] = [v[8+i] ^ self.salt[i] for i in range(4)]
if self.nullt == 0: # (i>>1 is the same as i/2 but faster)
v[12] = v[12] ^ (self.t & MASK)
v[13] = v[13] ^ (self.t & MASK)
v[14] = v[14] ^ (self.t >> self.WORDBITS)
v[15] = v[15] ^ (self.t >> self.WORDBITS)
# - - - - - - - - - - - - - - - - -
# ready? let's ChaCha!!!
def G(a, b, c, d, i):
va = v[a] # it's faster to deref and reref later
vb = v[b]
vc = v[c]
vd = v[d]
sri = SIGMA[round][i]
sri1 = SIGMA[round][i+1]
va = ((va + vb) + (m[sri] ^ cxx[sri1]) ) & MASK
x = vd ^ va
vd = (x >> rot1) | ((x << (WORDBITS-rot1)) & MASK)
vc = (vc + vd) & MASK
x = vb ^ vc
vb = (x >> rot2) | ((x << (WORDBITS-rot2)) & MASK)
va = ((va + vb) + (m[sri1] ^ cxx[sri]) ) & MASK
x = vd ^ va
vd = (x >> rot3) | ((x << (WORDBITS-rot3)) & MASK)
vc = (vc + vd) & MASK
x = vb ^ vc
vb = (x >> rot4) | ((x << (WORDBITS-rot4)) & MASK)
v[a] = va
v[b] = vb
v[c] = vc
v[d] = vd
for round in range(self.ROUNDS):
# column step
G( 0, 4, 8,12, 0)
G( 1, 5, 9,13, 2)
G( 2, 6,10,14, 4)
G( 3, 7,11,15, 6)
# diagonal step
G( 0, 5,10,15, 8)
G( 1, 6,11,12,10)
G( 2, 7, 8,13,12)
G( 3, 4, 9,14,14)
# - - - - - - - - - - - - - - - - -
# save current hash value (use i&0x3 to get 0,1,2,3,0,1,2,3)
self.h = [self.h[i]^v[i]^v[i+8]^self.salt[i&0x3]
for i in range(8)]
# print 'self.h', [num2hex(h) for h in self.h]
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def addsalt(self, salt):
""" adds a salt to the hash function (OPTIONAL)
should be called AFTER Init, and BEFORE update
salt: a bytestring, length determined by hashbitlen.
if not of sufficient length, the bytestring
will be assumed to be a big endian number and
prefixed with an appropriate number of null
bytes, and if too large, only the low order
bytes will be used.
if hashbitlen=224 or 256, then salt will be 16 bytes
if hashbitlen=384 or 512, then salt will be 32 bytes
"""
# fail if addsalt() was not called at the right time
if self.init != 1:
raise Exception('addsalt() not called after init() and before update()')
# salt size is to be 4x word size
saltsize = self.WORDBYTES * 4
# if too short, prefix with null bytes. if too long,
# truncate high order bytes
if len(salt) < saltsize:
salt = (chr(0)*(saltsize-len(salt)) + salt)
else:
salt = salt[-saltsize:]
# prep the salt array
self.salt[0] = self.byte2int(salt[ : 4<<self.mul])
self.salt[1] = self.byte2int(salt[ 4<<self.mul: 8<<self.mul])
self.salt[2] = self.byte2int(salt[ 8<<self.mul:12<<self.mul])
self.salt[3] = self.byte2int(salt[12<<self.mul: ])
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def update(self, data):
""" update the state with new data, storing excess data
as necessary. may be called multiple times and if a
call sends less than a full block in size, the leftover
is cached and will be consumed in the next call
data: data to be hashed (bytestring)
"""
self.init = 2
BLKBYTES = self.BLKBYTES # de-referenced for improved readability
BLKBITS = self.BLKBITS
datalen = len(data)
if not datalen: return
left = len(self.cache)
fill = BLKBYTES - left
# if any cached data and any added new data will fill a
# full block, fill and compress
if left and datalen >= fill:
self.cache = self.cache + data[:fill]
self.t += BLKBITS # update counter
self._compress(self.cache)
self.cache = b''
data = data[fill:]
datalen -= fill
# compress new data until not enough for a full block
while datalen >= BLKBYTES:
self.t += BLKBITS # update counter
self._compress(data[:BLKBYTES])
data = data[BLKBYTES:]
datalen -= BLKBYTES
# cache all leftover bytes until next call to update()
if datalen > 0:
self.cache = self.cache + data[:datalen]
# - - - - - - - - - - - - - - - - - - - - - - - - - - -
def final(self, data=''):
""" finalize the hash -- pad and hash remaining data
returns hashval, the digest
"""
ZZ = b'\x00'
ZO = b'\x01'
OZ = b'\x80'
OO = b'\x81'
PADDING = OZ + ZZ*128 # pre-formatted padding data
if data:
self.update(data)
# copy nb. bits hash in total as a 64-bit BE word
# copy nb. bits hash in total as a 128-bit BE word
tt = self.t + (len(self.cache) << 3)
if self.BLKBYTES == 64:
msglen = self._int2eightByte(tt)
else:
low = tt & self.MASK
high = tt >> self.WORDBITS
msglen = self._int2eightByte(high) + self._int2eightByte(low)
# size of block without the words at the end that count
# the number of bits, 55 or 111.
# Note: (((self.WORDBITS/8)*2)+1) equals ((self.WORDBITS>>2)+1)
sizewithout = self.BLKBYTES - ((self.WORDBITS>>2)+1)
if len(self.cache) == sizewithout:
# special case of one padding byte
self.t -= 8
if self.hashbitlen in [224, 384]:
self.update(OZ)
else:
self.update(OO)
else:
if len(self.cache) < sizewithout:
# enough space to fill the block
# use t=0 if no remaining data
if len(self.cache) == 0:
self.nullt=1
self.t -= (sizewithout - len(self.cache)) << 3
self.update(PADDING[:sizewithout - len(self.cache)])
else:
# NOT enough space, need 2 compressions
# ...add marker, pad with nulls and compress
self.t -= (self.BLKBYTES - len(self.cache)) << 3
self.update(PADDING[:self.BLKBYTES - len(self.cache)])
# ...now pad w/nulls leaving space for marker & bit count
self.t -= (sizewithout+1) << 3
self.update(PADDING[1:sizewithout+1]) # pad with zeroes
self.nullt = 1 # raise flag to set t=0 at the next _compress
# append a marker byte
if self.hashbitlen in [224, 384]:
self.update(ZZ)
else:
self.update(ZO)
self.t -= 8
# append the number of bits (long long)
self.t -= self.BLKBYTES
self.update(msglen)
hashval = []
if self.BLKBYTES == 64:
for h in self.h:
hashval.append(self._int2fourByte(h))
else:
for h in self.h:
hashval.append(self._int2eightByte(h))
return b''.join(hashval)[:self.hashbitlen >> 3]
digest = final # may use digest() as a synonym for final()
def midstate(self, data=''):
if data:
self.update(data)
hashval = []
if self.BLKBYTES == 64:
for h in self.h:
hashval.append(self._int2fourByte(h))
else:
for h in self.h:
hashval.append(self._int2eightByte(h))
return b''.join(hashval)[:self.hashbitlen >> 3]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# utility functions
def _fourByte2int(self, bytestr): # see also long2byt() below
""" convert a 4-byte string to an int (long) """
return struct.unpack('!L', bytestr)[0]
def _eightByte2int(self, bytestr):
""" convert a 8-byte string to an int (long long) """
return struct.unpack('!Q', bytestr)[0]
def _int2fourByte(self, x): # see also long2byt() below
""" convert a number to a 4-byte string, high order
truncation possible (in Python x could be a BIGNUM)
"""
return struct.pack('!L', x)
def _int2eightByte(self, x):
""" convert a number to a 8-byte string, high order
truncation possible (in Python x could be a BIGNUM)
"""
return struct.pack('!Q', x)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if have_psyco:
_compress = psyco.proxy(self._compress)
#---------------------------------------------------------------
#---------------------------------------------------------------
#---------------------------------------------------------------
|
You can have confidence in Self Storage Crew to give you the most suitable professional services for Self Storage in Owen, WI. Our company has a team of experienced contractors and the most advanced technology in the industry to deliver everything that you're looking for. We make certain that you get the very best products and services, the most suitable price, and the finest quality materials. Call us today by dialing 888-739-5110 to begin.
At Self Storage Crew, we understand that you want to remain in your price range and reduce costs wherever you are able to. Yet, lowering costs shouldn't ever suggest that you sacrifice excellent quality on Self Storage in Owen, WI. We provide you with the highest quality while still costing you less. Our mission is to be sure that you have the best products and a end result that holds up throughout the years. That is attainable because we recognize how to save you time and funds on supplies and work. If you wish to save cash, Self Storage Crew is the company to connect with. You can reach our business at 888-739-5110 to learn more.
You have to be well informed on the subject of Self Storage in Owen, WI. We ensure you know what to expect. We will take the unexpected surprises out of the scenario by supplying precise and detailed advice. Start by talking about your project with our client service reps once you dial 888-739-5110. We will respond to your concerns and questions and arrange your preliminary meeting. Our staff will arrive at the arranged time with the necessary resources, and will work together with you during the entire undertaking.
You have got many good reasons to look to Self Storage Crew to suit your needs regarding Self Storage in Owen, WI. We'll be the best option when you need the best cash saving methods, the very best equipment, and the highest rate of client satisfaction. We are aware of your expectations and objectives, and we are here to help you using our practical experience. Dial 888-739-5110 when you require Self Storages in Owen, and we'll work with you to successfully complete your task.
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: abc
import types
from _weakrefset import WeakSet
class _C:
pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
__isabstractmethod__ = True
class ABCMeta(type):
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
abstracts = set((name for name, value in namespace.items() if getattr(value, '__isabstractmethod__', False)))
for base in bases:
for name in getattr(base, '__abstractmethods__', set()):
value = getattr(cls, name, None)
if getattr(value, '__isabstractmethod__', False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError('Can only register classes')
if issubclass(subclass, cls):
return
if issubclass(cls, subclass):
raise RuntimeError('Refusing to create an inheritance cycle')
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1
def _dump_registry(cls, file = None):
print >> file, 'Class: %s.%s' % (cls.__module__, cls.__name__)
print >> file, 'Inv.counter: %s' % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith('_abc_'):
value = getattr(cls, name)
print >> file, '%s: %r' % (name, value)
def __instancecheck__(cls, instance):
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if cls._abc_negative_cache_version == ABCMeta._abc_invalidation_counter and subtype in cls._abc_negative_cache:
return False
return cls.__subclasscheck__(subtype)
else:
return cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)
def __subclasscheck__(cls, subclass):
if subclass in cls._abc_cache:
return True
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
cls._abc_negative_cache.add(subclass)
return False
|
All women deserve to be appreciated, respected, and admired. When a woman improves her appearance, an amazing thing happens. People begin to pay attention to her, listen to her, seek her company, ask her opinion. She becomes more confident, and this makes her look even better! As her confidence grows, so does the Respect she receives – at work, at home, everywhere. As her insecurity falls away, she becomes more daring, self-assured, courageous; she finds she can do anything. Her life improves in every way.
A woman becomes more alluring through the creation of her choice of scent. The appeal of perfume is that it is at once ephemeral and empowering. It creates a shimmering invisible armor that lingers in a room long after its wearer has gone and infuses our imagination with a subtle power, hinting at a hidden identity. The message is, a woman must always smell good.
The fact is, women receive more respect not just from men but from other women too when they make themselves look good and show their self confidence. When they dress well, take care of their hair, and moderately/skillfully apply makeup, it affects the behavior of those around them in a positive way. And that makes them feel better and increases their confidence and self-esteem. And that gains them more respect, which makes them feel and look even better, and the cycle repeats. You’ve heard of the vicious circle? This is the virtuous circle…The Cycle of Beauty!
Beauty is a state of mind. It’s not question of whether you’re “pretty” or “plain”, young or old, thin or not thin, literate or illiterate… it’s a matter of your self-image; when you look good, you feel good. When you feel good, you look even better. And then you can do ANYTHING!
A good appearance shows that you respect yourself. When people see that, they feel respect for you too. Of course it is possible to respect yourself without making yourself look good, but when you take the trouble to fix your hair and your nails, put on makeup, and dress well, that also shows your respect for others. And when you give respect, you get respect.
Note however, whatever message you’re trying to send to the world, never forget the clothes you put that message in will determine the way it’s received. So, dress it carefully.
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import unittest
from marionette import MarionetteTestCase
from marionette_driver.addons import Addons, AddonInstallException
here = os.path.abspath(os.path.dirname(__file__))
class TestAddons(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
self.addons = Addons(self.marionette)
@property
def all_addon_ids(self):
with self.marionette.using_context('chrome'):
addons = self.marionette.execute_async_script("""
Components.utils.import("resource://gre/modules/AddonManager.jsm");
AddonManager.getAllAddons(function(addons){
let ids = addons.map(function(x) {
return x.id;
});
marionetteScriptFinished(ids);
});
""")
return addons
def test_install_and_remove_temporary_unsigned_addon(self):
addon_path = os.path.join(here, 'mn-restartless-unsigned.xpi')
addon_id = self.addons.install(addon_path, temp=True)
self.assertIn(addon_id, self.all_addon_ids)
self.addons.uninstall(addon_id)
self.assertNotIn(addon_id, self.all_addon_ids)
def test_install_unsigned_addon(self):
addon_path = os.path.join(here, 'mn-restartless-unsigned.xpi')
with self.assertRaises(AddonInstallException):
self.addons.install(addon_path)
@unittest.skip("need to get the test extension signed")
def test_install_and_remove_signed_addon(self):
addon_path = os.path.join(here, 'mn-restartless-signed.xpi')
addon_id = self.addons.install(addon_path)
self.assertIn(addon_id, self.all_addon_ids)
self.addons.uninstall(addon_id)
self.assertNotIn(addon_id, self.all_addon_ids)
|
Home > Technology & Electronics > Microfiber Cloths > Wesleyan Microfiber Cloth - 7" x 9"
Wesleyan Microfiber Cloth - 7" x 9"
Don't use a small microfiber cloth for large electronics. The custom imprinted Wesleyan Microfiber Cloth - 7" x 9" is the perfect item for cleaning tablets and computers but also for displaying your logo and text on a large product. Your clients will wipe their items and keep their things clean - thanks to your imprint.
Multiple packaging options available, call 1-800-525-9600 for pricing.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.