prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
taddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _first_last(v):
if v.size == 2:
first_usable = int(netaddr.IPAddress(v.first))
last_usable = int(netaddr.IPAddress(v.last))
return first_usable, last_usable
elif v.size > 1:
first_usable = int(netaddr.IPAddress(v.first + 1))
last_usable = int(netaddr.IPAddress(v.last - 1))
return first_usable, last_usable
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except Exception:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
# /31 networks in netaddr have no broadcast address
if v.ip != v.network or not v.broadcast:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _address_prefix_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 2:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except Exception:
return False
def _first_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size == 2:
return str(netaddr.IPAddress(int(v.network)))
elif v.size > 1:
return str(netaddr.IPAddress(int(v.network) + 1))
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ip_prefix_query(v):
if v.size == 2:
return str(v.ip) + '/' + str(v.prefixlen)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _ip_netmask_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.netmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.netmask)
'''
def _ip_wildcard_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.hostmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.hostmask)
'''
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except Exception:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _last_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
return str(netaddr.IPAddress(last_usable))
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
return str(v.netmask)
def _network_query(v):
'''Return the network of a given IP or subnet'''
return str(v.network)
def _network_id_query(v):
'''Return the network of a given IP or subnet'''
return str(v.network)
def _network_netmask_query(v):
return str(v.network) + ' ' + str(v.netmask)
def _network_wildcard_query(v):
return str(v.network) + ' ' + str(v.hostmask)
def _next_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
next_ip = int(netaddr.IPAddress(int(v.ip) + 1))
if next_ip >= first_usable a | nd next_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + 1))
def _prefix_query(v):
return int(v.prefixlen)
def _previous_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usa | ble, last_usable = _first_last(v)
previous_ip = int(netaddr.IPAddress(int(v.ip) - 1))
if previous_ip >= first_usable and previous_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - 1))
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if (v_ip.is_unicast() and not v_ip.is_private() and
not v_ip.is_loopback() and not v_ip.is_netmask() and
not v_ip.is_hostmask()):
return value
def _range_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
first_usable = str(netaddr.IPAddress(first_usable))
last_usable = str(netaddr.IPAddress(last_usable))
return "{0}-{1}".format(first_usable, last_usable)
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _size_usable_query(v):
if v.size == 1:
return 0
elif v.size == 2:
return 2
return v.size - 2
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return va |
#!/usr/bin/env python
# Copyright (C) 2011 Aaron Lindsay <aaron@aclindsay.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the i | mplied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from time import sleep
import atexit
import logging
from signal import SIGTERM
pi | d_file = "" #holds the name of file holding our pid
def daemonize(pid_filename, daemon_fn):
"""Daemonize the current process, store the new pid in pid_filename, and
call daemon_fn() to continue execution."""
global pid_file
pid_file = pid_filename
try:
#fork off a process, kill the parent
if os.fork() > 0:
os._exit(0)
except:
logging.error("Failed to fork new process.")
os._exit(0)
os.chdir("/")
os.setsid() #start a new session, with this as the session leader
os.umask(0) #reset file creation mask
#fork again
try:
if os.fork() > 0:
os._exit(0)
except:
logging.error("Failed to fork new process.")
os._exit(0)
#flush all terminal 'files' and redirect them to /dev/null
sys.stdout.flush()
sys.stderr.flush()
null = os.open('/dev/null', os.O_RDWR)
os.dup2(null, sys.stdin.fileno())
os.dup2(null, sys.stdout.fileno())
os.dup2(null, sys.stderr.fileno())
os.close(null)
#store our current pid in the given pidfile
atexit.register(rm_pid_file) #delete pid file when current process exits
pid = os.getpid()
try:
with open(pid_file,'w') as f:
f.write(str(pid))
f.close()
except:
logging.error("Failed to create pid file at %s" %
(pid_filename))
os._exit(0)
#run the function with "real work" in it
daemon_fn()
def rm_pid_file():
global pid_file
os.remove(pid_file)
def aengelize(pid_filename):
"""Make the daemonized process represented by the given filename 'go to
heaven'."""
try:
with open(pid_filename,'r') as f:
pid = int(f.read().strip())
f.close()
except:
logging.error("Failed to open pid file at %s. Process already exited?"
% (pid_filename))
sys.exit(0)
#kill process
try:
#try to kill process for 11 seconds
for i in range(0,110):
os.kill(pid, SIGTERM)
sleep(0.1)
logging.error("Failed to stop process")
except OSError, err:
if str(err).find("No such process") <= 0:
logging.error("Failed to stop process")
sys.exit(1)
|
import os
import codecs
try:
from setuptools import (setup, find_packages)
except ImportError:
from distutils.core import (setup, find_packages)
VERSION = (0, 2, 0)
__version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:])
__package_name__ = 'pelican-readtime'
__description__ = 'Plugin for Pelican that computes average read time.'
__contact_names__ = 'David Jenkins, Deepak Bhalla, Jonathan Dektiar'
__contact_emails__ = 'djenkinsdev@gmail.com, contact@deepakrb.com, contact@jonathandekhtiar.eu'
__homepage__ = 'https://github.com/JenkinsDev/pelican-readtime'
__repository_url__ = 'https://github.com/JenkinsDev/pelican-readtime'
__download_url__ = 'https://github.com/JenkinsDev/pelican-readtime'
__docformat__ = 'markdown'
__license__ = 'MIT'
__keywords__ = 'pelican blogging blog static webdevelopment plugin pelican-plugin readtime python python3 python2'
here = os.path.abspath(os.path.dirname(__file__))
if os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(here, 'README.rst'), 'r', 'utf-8').read()
else:
long_description = 'See ' + __homepage__
setup(
| name=__package_name__,
version=__versi | on__,
description=__description__,
long_description=long_description,
url=__repository_url__,
download_url=__download_url__,
license='MIT',
author=__contact_names__,
author_email=__contact_emails__,
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords=__keywords__,
packages=[''],
install_requires=['pelican>=3.6'],
zip_safe=True,
include_package_data=True
)
|
# -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.forms.util import flatatt, ErrorDict, ErrorList
from django.test import TestCase
from django.utils.s | afestring import mark_safe
from django.utils.translation import ugettext_lazy
class FormsUtilTestCase(TestCase):
# Tests for forms/util.py module.
def test_flatatt(self):
###########
# flatatt #
###########
self.assertEqual(flatatt({'id': "header"}), u' id="header"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this"}), u' class="news" title="Read this"')
self.assertEqual(flatatt({}), u'')
def test_ | validation_error(self):
###################
# ValidationError #
###################
# Can take a string.
self.assertHTMLEqual(str(ErrorList(ValidationError("There was an error.").messages)),
'<ul class="errorlist"><li>There was an error.</li></ul>')
# Can take a unicode string.
self.assertHTMLEqual(unicode(ErrorList(ValidationError(u"Not \u03C0.").messages)),
u'<ul class="errorlist"><li>Not π.</li></ul>')
# Can take a lazy string.
self.assertHTMLEqual(str(ErrorList(ValidationError(ugettext_lazy("Error.")).messages)),
'<ul class="errorlist"><li>Error.</li></ul>')
# Can take a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["Error one.", "Error two."]).messages)),
'<ul class="errorlist"><li>Error one.</li><li>Error two.</li></ul>')
# Can take a mixture in a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["First error.", u"Not \u03C0.", ugettext_lazy("Error.")]).messages)),
'<ul class="errorlist"><li>First error.</li><li>Not π.</li><li>Error.</li></ul>')
class VeryBadError:
def __unicode__(self): return u"A very bad error."
# Can take a non-string.
self.assertHTMLEqual(str(ErrorList(ValidationError(VeryBadError()).messages)),
'<ul class="errorlist"><li>A very bad error.</li></ul>')
# Escapes non-safe input but not input marked safe.
example = 'Example of link: <a href="http://www.example.com/">example</a>'
self.assertHTMLEqual(str(ErrorList([example])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorList([mark_safe(example)])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': example})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': mark_safe(example)})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
|
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.signals import user_logged_out
from django.dispatch import receiver
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from repan | ier.auth_backend import RepanierAuthBackend
@login_required()
@csrf_protect
@never_cache
def logout_view(request):
"""
Logs out the user and displays 'You are logged out' message.
"""
logout(request)
# pages-root is the django cms root page.
# pages-root may be replaced by login_form to go to the login form instead of the home page
# The reverse may be replaced by "/" to also go to the home page
return HttpResponseRedirect(re | verse("pages-root"))
@receiver(user_logged_out)
def receiver_user_logged_out(sender, request, user, **kwargs):
RepanierAuthBackend.remove_staff_right(user=user)
|
#!/usr/bin/env python
#------------------------------------------------------------
# Script which demonstrates how to find the best-fit
# parameters of a Voigt line-shape model
#
# Vog, 26 Mar 2012
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from scipy.special import wofz
from kapteyn import kmpfit
ln2 = numpy.log(2)
def voigt(x, y):
# The Voigt function is also the real part of
# w(z) = exp(-z^2) erfc(iz), the complex probability function,
# which is also known as the Faddeeva function. Scipy has
# implemented this function under the name wofz()
z = x + 1j*y
I = wofz(z).real
return I
def Voigt(nu, alphaD, alphaL, nu_0, A, a=0, b=0):
# The Voigt line shape in terms of its physical parameters
f = numpy.sqrt(ln2)
x = (nu-nu_0)/alphaD * f
y = alphaL/alphaD * f
backg = a + b*nu
V = A*f/(alphaD*numpy.sqrt(numpy.pi)) * voigt(x, y) + backg
return V
def funcV(p, x):
# Compose the Voigt line-shape
alphaD, alphaL, nu_0, I, a, b = p
return Voigt(x, alphaD, alphaL, nu_0, I, a, b)
def funcG(p, x):
# Model function is a gaussian
A, mu, sigma, zerolev = p
return( A * numpy.exp(-(x-mu)*(x-mu)/(2*sigma*sigma)) + zerolev )
def residualsV(p, data):
# Return weighted residuals of Voigt
x, y, err = data
return (y-funcV(p,x)) / err
def residualsG(p, data):
# Return weighted residuals of Gauss
x, y, err = data
return (y-funcG(p,x)) / err
# Data from simulated MUSE cube
x = numpy.array([854.05,854.18,854.31,854.44,854.57,854.7,854.83,854.96,\
855.09,855.22,855.35,855.48,855.61,855.74,855.87,856.0,\
856.13,856.26,856.39,856.52,856.65,856.78,856.91])
y = numpy.array([6.31683382764,6.41273839772,6.43047296256,6.37437933311,\
6.34883451462,6.30711287633,6.24409954622,6.09241716936,\
5.75421549752,5.20381929725,4.18020502292,3.64663145132,\
4.25251198746,5.23945118487,5.76701752096,6.06587703526,\
6.15751018003,6.25985588506,6.35063433647,6.41795488447,\
6.42002335563,6.35883554071,6.36915982142])
N = len(y)
err = numpy.ones(N)
A = -2
alphaD = 0.5
alphaL = 0.5
a = 6
b = 0
nu_0 = 855
p0 = [alphaD, alphaL, nu_0, A, a, b]
# Do the fit
fitter = kmpfit.Fitter(residuals=residualsV, data=(x,y,err))
fitter.parinfo = [{}, {}, {}, {}, {}, {'fixed':True}] # Take zero level fixed in fit
fitter.fit(params0=p0)
print("\n========= Fit results Voigt profile ====== | ====")
print("Initial params:", fitter.params0)
print("Params: ", fitter.params)
print("Iterations: ", fitter.niter)
print("Function ev: ", fitter.nfev)
print("Uncertainties: ", fitter.xerror)
print("dof: ", fitter.dof)
prin | t("chi^2, rchi2: ", fitter.chi2_min, fitter.rchi2_min)
print("stderr: ", fitter.stderr)
print("Status: ", fitter.status)
alphaD, alphaL, nu_0, I, a_back, b_back = fitter.params
c1 = 1.0692
c2 = 0.86639
hwhm = 0.5*(c1*alphaL+numpy.sqrt(c2*alphaL**2+4*alphaD**2))
print("\nFWHM Voigt profile: ", 2*hwhm)
f = numpy.sqrt(ln2)
Y = alphaL/alphaD * f
amp = I/alphaD*numpy.sqrt(ln2/numpy.pi)*voigt(0,Y)
print("Amplitude Voigt profile:", amp)
print("Area under profile: ", I)
# Fit the Gaussian model
p0 = [-3, 855, 0.5, 6.3]
fitterG = kmpfit.Fitter(residuals=residualsG, data=(x,y,err))
#fitterG.parinfo = [{}, {}, {}, {}, {}] # Take zero level fixed in fit
fitterG.fit(params0=p0)
print("\n========= Fit results Gaussian profile ==========")
print("Initial params:", fitterG.params0)
print("Params: ", fitterG.params)
print("Iterations: ", fitterG.niter)
print("Function ev: ", fitterG.nfev)
print("Uncertainties: ", fitterG.xerror)
print("dof: ", fitterG.dof)
print("chi^2, rchi2: ", fitterG.chi2_min, fitterG.rchi2_min)
print("stderr: ", fitterG.stderr)
print("Status: ", fitterG.status)
fwhmG = 2*numpy.sqrt(2*numpy.log(2))*fitterG.params[2]
print("FWHM Gaussian: ", fwhmG)
# Plot the result
rc('legend', fontsize=6)
fig = figure()
frame1 = fig.add_subplot(1,1,1)
xd = numpy.linspace(x.min(), x.max(), 200)
frame1.plot(x, y, 'bo', label="data")
label = "Model with Voigt function"
frame1.plot(xd, funcV(fitter.params,xd), 'g', label=label)
label = "Model with Gaussian function"
frame1.plot(xd, funcG(fitterG.params,xd), 'm', ls='--', label=label)
offset = a_back+b_back*nu_0
frame1.plot((nu_0-hwhm,nu_0+hwhm), (offset+amp/2,offset+amp/2), 'r', label='fwhm')
frame1.plot(xd, a_back+b_back*xd, "y", label='Background')
frame1.set_xlabel("$\\nu$")
frame1.set_ylabel("$\\phi(\\nu)$")
vals = (fitter.chi2_min, fitter.rchi2_min, fitter.dof)
title = "Profile data with Voigt- vs. Gaussian model"
frame1.set_title(title, y=1.05)
frame1.grid(True)
leg = frame1.legend(loc=3)
show() |
from django.core import cache
from django.core.exceptions import MiddlewareNotUsed
from versionedcache.debug import CacheClass
class CacheDebugMiddleware(object):
def __init__(self):
if not isinstance(cache | .cache, CacheClass):
raise MiddlewareNotUsed()
def process_request(self, request):
if request.user.is_superuser and 'cache_debug' in request.GET:
action | = request.GET['cache_debug']
# only two actions allowed
if action not in ('turn_off', 'write_only'):
return
# implement action
getattr(cache.cache, action)()
|
) == 'extended':
return None
separator = ''
if 'cciss' in self.name or 'loop' in self.name:
separator = 'p'
return '%s%s%s' % (self.name, separator, self.next_count())
class Partition(object):
def __init__(self, name, count, device, begin, end, partition_type,
flags=None, guid=None, configdrive=False):
self.name = name
self.count = count
self.device = device
self.name = name
self.begin = begin
self.end = end
self.type = partition_type
self.flags = flags or []
self.guid = guid
self.configdrive = configdrive
def set_flag(self, flag):
if flag not in self.flags:
self.flags.append(flag)
def set_guid(self, guid):
self.guid = guid
class Pv(object):
def __init__(self, name, metadatasize=16, metadatacopies=2):
self.name = name
self.metadatasize = metadatasize
self.metadatacopies = metadatacopies
class Vg(object):
def __init__(self, name, pvnames=None):
self.name = name
self.pvnames = pvnames or []
def add_pv(self, pvname):
if pvname not in self.pvnames:
self.pvnames.append(pvname)
class Lv(object):
def __init__(self, name, vgname, size):
self.name = name
self.vgname = vgname
self.size = size
@property
def device_name(self):
return '/dev/mapper/%s-%s' % (self.vgname.replace('-', '--'),
self.name.replace('-', '--'))
class Md(object):
def __init__(self, name, level,
devices=None, spares=None):
self.name = name
self.level = level
self.devices = devices or []
self.spares = spares or []
def add_device(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.devices.append(device)
def add_spare(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.spares.append(device)
class Fs(object):
def __init__(self, device, mount=None,
fs_type=None, fs_options=None, fs_label=None):
self.device = device
self.mount = mount
self.type = fs_type or 'xfs'
self.options = fs_options or ''
self.label = fs_label or ''
class PartitionScheme(object):
def __init__(self):
self.parteds = []
self.mds = []
self.pvs = []
self.vgs = []
self.lvs = []
self.fss = []
self.kernel_params = ''
def add_parted(self, **kwargs):
parted = Parted(**kwargs)
self.parteds.append(parted)
return parted
def add_pv(self, **kwargs):
pv = Pv(**kwargs)
self.pvs.append(pv | )
return pv
def add_vg(self, **kwargs):
vg = Vg(**kwargs)
self.vgs.append(vg)
return vg
def add_lv(self, **kwargs):
lv = Lv(**kwargs)
self.lvs.append(lv)
return lv
def add_fs(self, **kwargs):
fs = Fs(**kwargs)
sel | f.fss.append(fs)
return fs
def add_md(self, **kwargs):
mdkwargs = {}
mdkwargs['name'] = kwargs.get('name') or self.md_next_name()
mdkwargs['level'] = kwargs.get('level') or 'mirror'
md = Md(**mdkwargs)
self.mds.append(md)
return md
def md_by_name(self, name):
found = filter(lambda x: x.name == name, self.mds)
if found:
return found[0]
def md_by_mount(self, mount):
fs = self.fs_by_mount(mount)
if fs:
return self.md_by_name(fs.device)
def md_attach_by_mount(self, device, mount, spare=False, **kwargs):
md = self.md_by_mount(mount)
if not md:
md = self.add_md(**kwargs)
fskwargs = {}
fskwargs['device'] = md.name
fskwargs['mount'] = mount
fskwargs['fs_type'] = kwargs.pop('fs_type', None)
fskwargs['fs_options'] = kwargs.pop('fs_options', None)
fskwargs['fs_label'] = kwargs.pop('fs_label', None)
self.add_fs(**fskwargs)
md.add_spare(device) if spare else md.add_device(device)
return md
def md_next_name(self):
count = 0
while True:
name = '/dev/md%s' % count
if name not in [md.name for md in self.mds]:
return name
if count >= 127:
raise errors.MDAlreadyExistsError(
'Error while generating md name: '
'names from /dev/md0 to /dev/md127 seem to be busy, '
'try to generate md name manually')
count += 1
def vg_by_name(self, vgname):
found = filter(lambda x: (x.name == vgname), self.vgs)
if found:
return found[0]
def pv_by_name(self, pvname):
found = filter(lambda x: (x.name == pvname), self.pvs)
if found:
return found[0]
def vg_attach_by_name(self, pvname, vgname,
metadatasize=16, metadatacopies=2):
vg = self.vg_by_name(vgname) or self.add_vg(name=vgname)
pv = self.pv_by_name(pvname) or self.add_pv(
name=pvname, metadatasize=metadatasize,
metadatacopies=metadatacopies)
vg.add_pv(pv.name)
def fs_by_mount(self, mount):
found = filter(lambda x: (x.mount and x.mount == mount), self.fss)
if found:
return found[0]
def fs_by_device(self, device):
found = filter(lambda x: x.device == device, self.fss)
if found:
return found[0]
def lv_by_device_name(self, device_name):
found = filter(lambda x: x.device_name == device_name, self.lvs)
if found:
return found[0]
def root_device(self):
fs = self.fs_by_mount('/')
if not fs:
raise errors.WrongPartitionSchemeError(
'Error while trying to find root device: '
'root file system not found')
return fs.device
def boot_device(self, grub_version=2):
# We assume /boot is a separate partition. If it is not
# then we try to use root file system
boot_fs = self.fs_by_mount('/boot') or self.fs_by_mount('/')
if not boot_fs:
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'boot file system not fount, '
'it must be a separate mount point')
if grub_version == 1:
# Legacy GRUB has a limitation. It is not able to mount MD devices.
# If it is MD compatible it is only able to ignore MD metadata
# and to mount one of those devices which are parts of MD device,
# but it is possible only if MD device is a MIRROR.
md = self.md_by_name(boot_fs.device)
if md:
try:
return md.devices[0]
except IndexError:
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'md device %s does not have devices attached' %
md.name)
# Legacy GRUB is not able to mount LVM devices.
if self.lv_by_device_name(boot_fs.device):
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'found device is %s but legacy grub is not able to '
'mount logical volumes' %
boot_fs.device)
return boot_fs.device
def configdrive_device(self):
# Configdrive device must be a small (about 10M) partition
# on one of node hard drives. This partitio |
# -*- coding: utf-8 -*-
from sqlalchemy.ext.declarative import declarative_base
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalch | emy.orm import scoped_session, sessionmaker
DeclarativeBase = declarative_base()
maker = sessionmaker(autoflush=True, autocommit=False,
extension=ZopeTransactionExtension())
DBSession = scoped_session(maker)
metadata = DeclarativeBase.metadata
def init_model(engine1 ):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure(bind=engine1)
metadata.bind = en | gine1
from .logsurvey import LogSurvey |
se()
sock = None
continue
if sock is None:
raise Retry
return sock
familystr = {socket.AF_INET: "IPv4", socket.AF_INET6: "IPv6",
socket.AF_UNSPEC: "Unspecified-IPv4/6"}
opmsg = "Connecting over %s to %s:%s"
self.info(opmsg, familystr.get(family, "Unknown"), host, port)
opmsg = opmsg % (familystr.get(family, "Unknown"), host, port)
return self._try_until_timeout_expires(opmsg, check_fun)
def _get_ips(self, server, version=4, network=None):
"""Get the IPs of a server from the detailed server info
If network not given then get the public IPs. Else the IPs
attached to that network
"""
assert version in (4, 6)
nics = server['attachments']
addrs = []
for nic in nics:
net_id = nic['network_id']
if network is None:
if self.clients.network.get_network_details(net_id)['public']:
if nic['ipv' + str(version)]:
addrs.append(nic['ipv' + str(version)])
else:
if net_id == network['id']:
if nic['ipv' + str(version)]:
addrs.append(nic['ipv' + str(version)])
self.assertGreater(len(addrs), 0,
"Can not get IPs from server attachments")
for addr in addrs:
self.assertEqual(IPy.IP(addr).version(), version)
if network is None:
msg = "Server's public IPv%s is %s"
for addr in addrs:
self.info(msg, version, addr)
else:
msg = "Server's IPv%s attached to network \"%s\" is %s"
for addr in addrs:
self.info(msg, version, network['id'], addr)
return addrs
def _insist_on_ping(self, ip_addr, version=4, should_fail=False):
"""Test server responds to a single IPv4 of IPv6 ping"""
def check_fun():
"""Ping to server"""
cmd = ("ping%s -c 3 -w 20 %s" %
("6" if version == 6 else "", ip_addr))
ping = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
ping.communicate()
ret = ping.wait()
if ret != 0:
raise Retry
assert version in (4, 6)
opmsg = "Sent IPv%s ping requests to %s"
self.info(opmsg, version, ip_addr)
opmsg = opmsg % (version, ip_addr)
if should_fail:
self._try_once(opmsg, check_fun, should_fail=True)
else:
self._try_until_timeout_expires(opmsg, check_fun)
def _image_is(self, image, osfamily):
"""Return true if the image is of `osfamily'"""
d_image = self.clients.cyclades.get_image_details(image['id'])
return d_image['metadata']['osfamily'].lower().find(osfamily) >= 0
# pylint: disable=no-self-use
def _ssh_execute(self, hostip, username, password, command):
"""Execute a command via ssh"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostip, username=username, password=password)
except (paramiko.SSHException, socket.error) as err:
self.warning("%s", err.message)
raise Retry()
_, stdout, _ = ssh.exec_command(command)
status = stdout.channel.recv_exit_status()
output = stdout.readlines()
ssh.close()
return output, status
def _insist_get_hostname_over_ssh(self, hostip, username, password):
"""Connect to server using ssh and get it's hostname"""
def check_fun():
"""Get hostname"""
try:
lines, status = self._ssh_execute(
hostip, username, password, "hostname")
self.assertEqual(status, 0)
self.assertEqual(len(lines), 1)
# Remove new line
return lines[0].strip('\n')
except AssertionError:
raise Retry()
opmsg = "Connecting to server using ssh and get it's hostname"
self.info(opmsg)
hostname = self._try_until_timeout_expires(opmsg, check_fun)
self.info("Server's hostname is %s", hostname)
return hostname
# pylint: disable=too-many-arguments
def _check_file_through_ssh(self, hostip, username, password,
remotepath, content):
"""Fetch file from server and compare contents"""
def check_fun():
"""Fetch file"""
try:
tr | ansport = paramiko.Transport((hostip, 22))
transport.connect(username=username, password=password)
with tempfile.NamedTemporaryFile() as ftmp:
sftp = parami | ko.SFTPClient.from_transport(transport)
sftp.get(remotepath, ftmp.name)
sftp.close()
transport.close()
self.info("Comparing file contents")
remote_content = base64.b64encode(ftmp.read())
self.assertEqual(content, remote_content)
except paramiko.SSHException as err:
self.warning("%s", err.message)
raise Retry()
opmsg = "Fetching file %s from remote server" % remotepath
self.info(opmsg)
self._try_until_timeout_expires(opmsg, check_fun)
# ----------------------------------
# Networks
def _create_network(self, cidr="10.0.1.0/28", dhcp=True,
project_id=None):
"""Create a new private network"""
name = self.run_id
network = self.clients.network.create_network(
"MAC_FILTERED", name=name, shared=False,
project_id=project_id)
self.info("Network with id %s created", network['id'])
subnet = self.clients.network.create_subnet(
network['id'], cidr=cidr, enable_dhcp=dhcp)
self.info("Subnet with id %s created", subnet['id'])
# Verify quotas
if project_id is None:
project_id = self._get_uuid()
changes = \
{project_id: [(QNET, QADD, 1, None)]}
self._check_quotas(changes)
# Test if the right name is assigned
self.assertEqual(network['name'], name)
self.assertEqual(network['tenant_id'], project_id)
return network
def _delete_networks(self, networks, error=False):
"""Delete a network"""
for net in networks:
self.info("Deleting network with id %s", net['id'])
self.clients.network.delete_network(net['id'])
if error:
curr_states = ["ACTIVE", "SNF:DRAINED", "ERROR"]
else:
curr_states = ["ACTIVE", "SNF:DRAINED"]
for net in networks:
self._insist_on_network_transition(net, curr_states, "DELETED")
# Networks no longer in network list
new_networks = [n['id'] for n in self._get_list_of_networks()]
for net in networks:
self.info("Verifying that network with id %s is no longer in "
"network list", net['id'])
self.assertNotIn(net['id'], new_networks)
# Verify quotas
changes = \
{self._get_uuid(): [(QNET, QREMOVE, len(networks), None)]}
self._check_quotas(changes)
def _get_public_networks(self, networks=None):
"""Get the public networks"""
if networks is None:
networks = self._get_list_of_networks(detail=True)
self.info("Getting the public networks")
public_networks = []
for net in networks:
if net['SNF:floating_ip_pool'] and net['public']:
public_networks.append(net)
self.assertNotEqual(public_networks, [],
"Could not find a public network to use")
return public_networks
def _create_floating_ip(self, project_id=None):
"""Create a new floating ip"""
|
context):
"usage: latest"
self._init_source()
if not utils.wait4dc("transition", not options.batch):
return False
self._set_source("live")
crm_report().refresh_source()
f = self._get_pe_byidx(-1)
if not f:
return False
crm_report().show_transition_log(f)
@command.skill_level('administrator')
@command.completers_repeating(compl.call(lambda: crm_report().rsc_list()))
def do_resource(self, context, *args):
"usage: resource <rsc> [<rsc> ...]"
self._init_source()
return crm_report().resource(*args)
@command.skill_level('administrator')
@command.wait
@command.completers_repeating(compl.call(lambda: crm_report().node_list()))
def do_node(self, context, *args):
"usage: node <node> [<node> ...]"
self._init_source()
return crm_report().node(*args)
@command.skill_level('administrator')
@command.completers_repeating(compl.call(lambda: crm_report().node_list()))
def do_log(self, context, *args):
"usage: log [<node> ...]"
self._init_source()
return crm_report().log(*args)
def ptest(self, nograph, scores, utilization, actions, verbosity):
'Send a decompressed self.pe_file to ptest'
try:
s = bz2.decompress(open(self.pe_file).read())
except IOError, msg:
common_err("open: %s" % msg)
return False
return utils.run_ptest(s, nograph, scores, utilization, actions, verbosity)
@command.skill_level('administrator')
def do_events(self, context):
"usage: events"
self._init_source()
return crm_report().events()
@command.skill_level('administrator')
@command.completers_repeating(compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['v'])))
def do_peinputs(self, context, *args):
"""usage: peinputs [{<range>|<number>} ...] [v]"""
self._init_source()
argl = list(args)
opt_l = utils.fetch_opts(argl, ["v"])
if argl:
l = []
for s in argl:
a = utils.convert2ints(s.split(':'))
if a and len(a) == 2 and not utils.check_range(a):
common_err("%s: invalid peinputs range" % a)
return False
l += crm_report().pelist(a, long=("v" in opt_l))
else:
l = crm_report().pelist(long=("v" in opt_l))
if not l:
return False
s = '\n'.join(l)
utils.page_string(s)
def _get_pe_byname(self, s):
l = crm_report().find_pe_files(s)
if len(l) == 0:
common_err("%s: path not found" % s)
return None
elif len(l) > 1:
common_err("%s: path ambiguous" % s)
return None
return l[0]
def _get_pe_byidx(self, idx):
l = crm_report().pelist()
if len(l) < abs(idx):
if idx == -1:
common_err("no transitions found in the source")
else:
common_err("PE input f | ile for index %d not found" % (idx+1))
return None
| return l[idx]
def _get_pe_bynum(self, n):
l = crm_report().pelist([n])
if len(l) == 0:
common_err("PE file %d not found" % n)
return None
elif len(l) > 1:
common_err("PE file %d ambiguous" % n)
return None
return l[0]
def _get_pe_input(self, pe_spec):
'''Get PE input file from the <number>|<index>|<file>
spec.'''
if re.search('pe-', pe_spec):
f = self._get_pe_byname(pe_spec)
elif utils.is_int(pe_spec):
n = int(pe_spec)
if n <= 0:
f = self._get_pe_byidx(n-1)
else:
f = self._get_pe_bynum(n)
else:
f = self._get_pe_byidx(-1)
return f
def _show_pe(self, f, opt_l):
self.pe_file = f # self.pe_file needed by self.ptest
ui_utils.ptestlike(self.ptest, 'vv', "transition", opt_l)
return crm_report().show_transition_log(f)
def _display_dot(self, f):
if not config.core.dotty:
common_err("install graphviz to draw transition graphs")
return False
f = crm_report().pe2dot(f)
if not f:
common_err("dot file not found in the report")
return False
utils.show_dot_graph(f, keep_file=True, desc="configuration graph")
return True
def _pe2shadow(self, f, argl):
try:
name = argl[0]
except:
name = os.path.basename(f).replace(".bz2", "")
common_info("transition %s saved to shadow %s" % (f, name))
return xmlutil.pe2shadow(f, name)
@command.skill_level('administrator')
@command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['log', 'showdot', 'save'])))
def do_transition(self, context, *args):
"""usage: transition [<number>|<index>|<file>] [nograph] [v...] [scores] [actions] [utilization]
transition showdot [<number>|<index>|<file>]
transition log [<number>|<index>|<file>]
transition save [<number>|<index>|<file> [name]]"""
self._init_source()
argl = list(args)
subcmd = "show"
if argl and argl[0] in ("showdot", "log", "save", "tags"):
subcmd = argl[0]
del argl[0]
if subcmd == "show":
opt_l = utils.fetch_opts(argl, ptest_options)
if argl:
f = self._get_pe_input(argl[0])
del argl[0]
else:
f = self._get_pe_byidx(-1)
if (subcmd == "save" and len(argl) > 1) or \
(subcmd in ("show", "showdot", "log") and argl):
syntax_err(args, context="transition")
return False
if not f:
return False
if subcmd == "show":
common_info("running ptest with %s" % f)
rc = self._show_pe(f, opt_l)
elif subcmd == "showdot":
rc = self._display_dot(f)
elif subcmd == "save":
rc = self._pe2shadow(f, argl)
elif subcmd == "tags":
rc = crm_report().show_transition_tags(f)
else:
rc = crm_report().show_transition_log(f, True)
return rc
def _save_cib_env(self):
try:
self._cib_f_save = os.environ["CIB_file"]
except:
self._cib_f_save = None
def _reset_cib_env(self):
if self._cib_f_save:
os.environ["CIB_file"] = self._cib_f_save
else:
try:
del os.environ["CIB_file"]
except:
pass
def _setup_cib_env(self, pe_f):
'''Setup the CIB_file environment variable.
Alternatively, we could (or should) use shadows, but the
file/shadow management would be a bit involved.'''
if pe_f != "live":
os.environ["CIB_file"] = pe_f
else:
self._reset_cib_env()
def _pe_config_obj(self, pe_f):
'''Return set_obj of the configuration. It can later be
rendered using the repr() method.'''
self._setup_cib_env(pe_f)
if not cib_factory.refresh():
set_obj = mkset_obj("NOOBJ")
else:
set_obj = mkset_obj()
return set_obj
def _pe_config_noclr(self, pe_f):
'''Configuration with no formatting (no colors).'''
return self._pe_config_obj(pe_f).repr_nopretty()
def _pe_config_plain(self, pe_f):
'''Configuration with no formatting (but with colors).'''
return self._pe_config_obj(pe_f).repr(format=0)
def _pe_config(self, pe_f):
'''Formatted configuration.'''
return self._pe_config_obj(pe_f).repr()
def _pe_status(self, pe_f):
'''Return status as a string.'''
self._setup_cib_env(pe_f)
rc, s = cmd_status.crm_mon()
if rc != 0:
if s:
comm |
nd seat['price'] and seat['sku']:
price = Decimal(seat.get('price'))
return price, seat.get('sku')
return None, None
def get_unenrolled_courses(courses, user_enrollments):
"""
Given a list of courses and a list of user enrollments, return the courses in which the user is not enrolled.
Depending on the enrollments that are passed in, this method can be used to determine the courses in a program in
which the user has not yet enrolled or the courses in a program for which the user has not yet purchased a
certificate.
"""
# Get the enrollment course ids here, so we don't need to loop through them for every course run
enrollment_course_ids = {enrollment.course_id for enrollment in user_enrollments}
unenrolled_courses = []
for course in courses:
if not is_enrolled_in_course(course, enrollment_course_ids):
unenrolled_courses.append(course)
return unenrolled_courses
def is_enrolled_in_all_courses(courses, user_enrollments):
"""
Determine if the user is enrolled in all of the courses
"""
# Get the enrollment course ids here, so we don't need to loop through them for every course run
enrollment_course_ids = {enrollment.course_id for enrollment in user_enrollments}
for course in courses:
if not is_enrolled_in_course(course, enrollment_course_ids):
# User is not enrolled in this course, meaning they are not enrolled in all courses in the program
return False
# User is enrolled in all courses in the program
return True
def is_enrolled_in_course(course, enrollment_course_ids):
"""
Determine if the user is enrolled in this course
"""
course_runs = course.get('course_runs')
if course_runs:
for course_run in course_runs:
if is_enrolled_in_course_run(course_run, enrollment_course_ids):
return True
return False
def is_enrolled_in_course_run(course_run, enrollment_course_ids):
"""
Determine if the user is enrolled in this course run
"""
key = None
try:
key = course_run.get('key')
course_run_key = CourseKey.from_string(key)
return course_run_key in enrollment_course_ids
except InvalidKeyError:
logger.warn(
u'Unable to determine if user was enrolled since the course key {} is invalid'.format(key)
)
return False # Invalid course run key. Assume user is not enrolled.
def get_dashboard_course_info(user, dashboard_enrollments):
"""
Given a list of enrollments shown on the dashboard, return a dict of course ids and experiment info for that course
"""
course_info = None
if DASHBOARD_INFO_FLAG.is_enabled():
# Get the enrollments here since the dashboard filters out those with completed entitlements
user_enrollments = CourseEnrollment.objects.select_related('course').filter(user_id=user.id)
course_info = {
str(dashboard_enrollment.course): get_base_experiment_metadata_context(dashboard_enrollment.course,
| user,
dashboard_enrollment,
| user_enrollments)
for dashboard_enrollment in dashboard_enrollments
}
return course_info
# TODO: clean up as part of REVEM-199 (END)
def get_experiment_user_metadata_context(course, user):
"""
Return a context dictionary with the keys used by the user_metadata.html.
"""
enrollment = None
# TODO: clean up as part of REVO-28 (START)
user_enrollments = None
audit_enrollments = None
has_non_audit_enrollments = False
try:
user_enrollments = CourseEnrollment.objects.select_related('course').filter(user_id=user.id)
has_non_audit_enrollments = user_enrollments.exclude(mode__in=CourseMode.UPSELL_TO_VERIFIED_MODES).exists()
# TODO: clean up as part of REVO-28 (END)
enrollment = CourseEnrollment.objects.select_related(
'course'
).get(user_id=user.id, course_id=course.id)
except CourseEnrollment.DoesNotExist:
pass # Not enrolled, use the default values
has_entitlements = False
if user.is_authenticated():
has_entitlements = CourseEntitlement.objects.filter(user=user).exists()
context = get_base_experiment_metadata_context(course, user, enrollment, user_enrollments)
has_staff_access = has_staff_access_to_preview_mode(user, course.id)
forum_roles = []
if user.is_authenticated:
forum_roles = list(Role.objects.filter(users=user, course_id=course.id).values_list('name').distinct())
# get user partition data
if user.is_authenticated():
partition_groups = get_all_partitions_for_course(course)
user_partitions = get_user_partition_groups(course.id, partition_groups, user, 'name')
else:
user_partitions = {}
# TODO: clean up as part of REVO-28 (START)
context['has_non_audit_enrollments'] = has_non_audit_enrollments or has_entitlements
# TODO: clean up as part of REVO-28 (END)
context['has_staff_access'] = has_staff_access
context['forum_roles'] = forum_roles
context['partition_groups'] = user_partitions
return context
def get_base_experiment_metadata_context(course, user, enrollment, user_enrollments):
"""
Return a context dictionary with the keys used by dashboard_metadata.html and user_metadata.html
"""
enrollment_mode = None
enrollment_time = None
# TODO: clean up as part of REVEM-199 (START)
program_key = get_program_context(course, user_enrollments)
# TODO: clean up as part of REVEM-199 (END)
if enrollment and enrollment.is_active:
enrollment_mode = enrollment.mode
enrollment_time = enrollment.created
# upgrade_link and upgrade_date should be None if user has passed their dynamic pacing deadline.
upgrade_link, upgrade_date = check_and_get_upgrade_link_and_date(user, enrollment, course)
return {
'upgrade_link': upgrade_link,
'upgrade_price': six.text_type(get_cosmetic_verified_display_price(course)),
'enrollment_mode': enrollment_mode,
'enrollment_time': enrollment_time,
'pacing_type': 'self_paced' if course.self_paced else 'instructor_paced',
'upgrade_deadline': upgrade_date,
'audit_access_deadline': get_audit_access_expiration(user, course),
'course_key': course.id,
'course_start': course.start,
'course_end': course.end,
# TODO: clean up as part of REVEM-199 (START)
'program_key_fields': program_key,
# TODO: clean up as part of REVEM-199 (END)
}
def get_audit_access_expiration(user, course):
"""
Return the expiration date for the user's audit access to this course.
"""
if not CourseDurationLimitConfig.enabled_for_enrollment(user=user, course_key=course.id):
return None
return get_user_course_expiration_date(user, course)
# TODO: clean up as part of REVEM-199 (START)
def get_program_context(course, user_enrollments):
"""
Return a context dictionary with program information.
"""
program_key = None
non_audit_enrollments = user_enrollments.exclude(mode__in=CourseMode.UPSELL_TO_VERIFIED_MODES)
if PROGRAM_INFO_FLAG.is_enabled():
programs = get_programs(course=course.id)
if programs:
# A course can be in multiple programs, but we're just grabbing the first one
program = programs[0]
complete_enrollment = False
has_courses_left_to_purchase = False
total_courses = None
courses = program.get('courses')
courses_left_to_purchase_price = None
courses_left_to_purchase_url = None
program_uuid = program.get('uuid')
is_eligible_for_one_click_purchase = program.get('is_program_eligible_for_one_click_purchase')
if courses is not None:
|
#-*- coding: utf-8 -*-
import urllib2
import json
import CommonFunctions
common = CommonFunctions
from xml.dom import minidom
from resources.lib import utils
from resources.lib import globalvar
title=['ARTE']
img=['arte']
readyForUse=True
def fix_text(text):
return text.replace('&','&').encode('utf-8').replace(''',' ')
def list_shows(channel,folder):
shows=[]
d=dict()
filePath=utils.downloadCatalog('http://www.arte.tv/papi/tvguide-flow/sitemap/feeds/videos/F.xml','ARTE.XML',False)
if folder=='none':
xml = open(filePath).read()
url=common.parseDOM(xml, "url")
for i in range(0, len(url)):
categoryTab=common.parseDOM(url[i], "video:category")
if len(categoryTab)>0:
category=fix_text(categoryTab[0])
if category not in d:
shows.append( [channel,category,category,'','folder'] )
d[category]=category
else:
xml = open(filePath).read()
url=common.parseDOM(xml, "url")
for i in range(0, len(url)):
titleTab=common.parseDOM(url[i], "video:title")
if len(titleTab)>0:
title=fix_text(titleTab[0])
categoryTab=common.parseDOM(url[i], "video:category")
if globalvar.ADDON.getSetting('arteFull')=='true':
videoTag=common.parseDOM(url[i], "video:tag")[0]
else:
videoTag='ARTE+7'
if len(categoryTab)>0:
if(fix_text(categoryTab[0])==folder and title not in d and videoTag=='ARTE+7'):
shows.append( [channel,title,title,'','shows'] )
d[title]=title
return shows
def getVideoURL(channel,video_id):
#Get JSON file
jsonFile=urllib2.urlopen('http://arte.tv/papi/tvguide/videos/stream/player/F/'+ video_id + '/ALL/ALL.json').read()
#Parse JSON to
jsoncat = json.loads(jsonFile)
url=''
if globalvar.ADDON.getSetting('%sQuality' % (channel))=='HD':
#HD HTTP
if 'HTTP_MP4_SQ_1' in jsoncat['videoJsonPlayer']['VSR']:
url=jsoncat['videoJsonPlayer']['VSR']['HTTP_MP4_SQ_1']['url']
#HD RTMP
else:
url=jsoncat['videoJsonPlayer']['VSR']['RTMP_SQ_1']['streamer'] + jsoncat['videoJsonPlayer']['VSR']['RTMP_SQ_1']['url']
if globalvar.ADDON.getSetting('%sQuality' % (channel))=='SD' or url=='':
#SD HTTP
if 'HLS_SQ_1':
url=jsoncat['videoJsonPlayer']['VSR']['HLS_SQ_1']['url']
#SD RTMP
else:
url=jsoncat['videoJsonPlayer']['VSR']['RTMP_MQ_1']['streamer'] + jsoncat['videoJsonPlayer']['VSR']['RTMP_MQ_1']['url']
url=jsoncat['videoJsonPlayer']['VSR']['HLS_SQ_1']['url']
return url
def list_videos(channel,show_title):
videos=[]
filePath=utils.downloadCatalog('http://www.arte.tv/papi/tvguide-flow/sitemap/feeds/videos/F.xml','ARTE.XML',False)
xml = open(filePath).read()
url=common.parseDOM(xml, "url")
for i in range(0, len(url)):
titleTab=common.parseDOM(url[i], "video:title")
if len(titleTab)>0:
title=fix_text(titleTab[0])
if(title==show_title):
name=''
image_url=''
date=''
duration=''
views=''
desc=''
rating=''
tmpTab=common.parseDOM(url[i], "video:publication_date")
if len(tmpTab)>0:
date=tmpTab[0][:10]
tmpTab=common.parseDOM(url[i], "video:duration")
if len(tmpTab)>0:
duration=float(tmpTab[0])/60
tmpTab=common.parseDOM(url[i], "video:view_count")
if len(tmpTab)>0:
views=tmpTab[0]
tmpTab=common.parseDOM(url[i], "video:rating")
if len(tmpTab)>0:
rating=tmpTab[0]
descriptionTab=common.parseDOM(url[i], "video:description")
if len(descriptionTab)>0:
name=fix_text(descriptionTab[0])
desc=fix_text(descriptionTab[0])
tmpTab=common.parseDOM(url[i],"video:player_loc")
if len(tmpTab)>0:
if tmpTab[0]=="1":
tmpTab=common.parseDOM(url[i], "video:id")
if len(tmpTab)>0: |
video_id=tmpTab[0][28:28+10] + "_PLUS7-F"
else:
start=tmpTab[0].find("%2Fplayer%2FF%2F")
end=tmpTab[0].find("%2F", start+16)
video_id=tmpTab[0][start+16:end]
if video_id.find("EXTRAIT")>0 :
name="Extrait-" + name
videoTag=common.parseDOM(url[i], "video:tag")[0]
| picTab=common.parseDOM(url[i], "video:thumbnail_loc")
if len(picTab)>0:
image_url=picTab[0]
infoLabels={ "Title": name,"Plot":desc,"Aired":date,"Duration": duration, "Year":date[:4]}
if not(globalvar.ADDON.getSetting('arteFull')=='true' and videoTag!='ARTE+7'):
videos.append( [channel, video_id, name, image_url,infoLabels,'play'] )
return videos |
"""Test cases for the switcher_kis component."""
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Generator
from pytest import raises
from homeassistant.components.switcher_kis import (
CONF_AUTO_OFF,
DATA_DEVICE,
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
SERVICE_SET_AUTO_OFF_SCHEMA,
SIGNAL_SWITCHER_DEVICE_UPDATE,
)
from homeassistant.const import CONF_ENTITY_ID
from homeassistant.core import Context, callback
from homeassistant.exceptions import Unauthorized, UnknownUser
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from .consts import (
DUMMY_AUTO_OFF_SET,
DUMMY_DEVICE_ID,
DUMMY_DEVICE_NAME,
DUMMY_DEVICE_STATE,
DUMMY_ELECTRIC_CURRENT,
DUMMY_IP_ADDRESS,
DUMMY_MAC_ADDRESS,
DUMMY_PHONE_ID,
DUMMY_POWER_CONSUMPTION,
DUMMY_REMAINING_TIME,
MANDATORY_CONFIGURATION,
SWITCH_ENTITY_ID,
)
from tests.common import async_fire_time_changed, async_mock_service
if TYPE_CHECKING:
from aioswitcher.devices import SwitcherV2Device
from tests.common import MockUser
async def test_failed_config(
hass: HomeAssistantType, mock_failed_bridge: Generator[None, Any, None]
) -> None:
"""Test failed configuration."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION) is False
async def test_minimal_config(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test setup with configuration minimal entries."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
async def test_discovery_data_bucket(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test the event send with the updated device."""
assert await async_setup_component( | hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
device = hass.data[DOMAIN].get(DATA_DEVICE)
assert device.device_id == DUMMY_DEVICE_ID
| assert device.ip_addr == DUMMY_IP_ADDRESS
assert device.mac_addr == DUMMY_MAC_ADDRESS
assert device.name == DUMMY_DEVICE_NAME
assert device.state == DUMMY_DEVICE_STATE
assert device.remaining_time == DUMMY_REMAINING_TIME
assert device.auto_off_set == DUMMY_AUTO_OFF_SET
assert device.power_consumption == DUMMY_POWER_CONSUMPTION
assert device.electric_current == DUMMY_ELECTRIC_CURRENT
assert device.phone_id == DUMMY_PHONE_ID
async def test_set_auto_off_service(
hass: HomeAssistantType,
mock_bridge: Generator[None, Any, None],
mock_api: Generator[None, Any, None],
hass_owner_user: "MockUser",
hass_read_only_user: "MockUser",
) -> None:
"""Test the set_auto_off service."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
assert hass.services.has_service(DOMAIN, SERVICE_SET_AUTO_OFF_NAME)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
blocking=True,
context=Context(user_id=hass_owner_user.id),
)
with raises(Unauthorized) as unauthorized_read_only_exc:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
blocking=True,
context=Context(user_id=hass_read_only_user.id),
)
assert unauthorized_read_only_exc.type is Unauthorized
with raises(Unauthorized) as unauthorized_wrong_entity_exc:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{
CONF_ENTITY_ID: "light.not_related_entity",
CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET,
},
blocking=True,
context=Context(user_id=hass_owner_user.id),
)
assert unauthorized_wrong_entity_exc.type is Unauthorized
with raises(UnknownUser) as unknown_user_exc:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
blocking=True,
context=Context(user_id="not_real_user"),
)
assert unknown_user_exc.type is UnknownUser
service_calls = async_mock_service(
hass, DOMAIN, SERVICE_SET_AUTO_OFF_NAME, SERVICE_SET_AUTO_OFF_SCHEMA
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
)
await hass.async_block_till_done()
assert len(service_calls) == 1
assert str(service_calls[0].data[CONF_AUTO_OFF]) == DUMMY_AUTO_OFF_SET.lstrip("0")
async def test_signal_dispatcher(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test signal dispatcher dispatching device updates every 4 seconds."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
@callback
def verify_update_data(device: "SwitcherV2Device") -> None:
"""Use as callback for signal dispatcher."""
pass
async_dispatcher_connect(hass, SIGNAL_SWITCHER_DEVICE_UPDATE, verify_update_data)
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=5))
|
# | !/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='bloom',
version='0.4.4',
packages=find_packages(exclude=['test']),
package_data={
'bloom.generators.debian': [
'bloom/generators/debian/templates/*',
'bloom/generators/debian/templates/so | urce/*'
]
},
include_package_data=True,
install_requires=[
'argparse',
'catkin-pkg >= 0.1.14',
'distribute',
'empy',
'python-dateutil',
'PyYAML',
'rosdep >= 0.10.3',
'rosdistro >= 0.2.12',
'vcstools >= 0.1.22',
],
author='Tully Foote, William Woodall',
author_email='tfoote@willowgarage.com, william@osrfoundation.org',
maintainer='William Woodall',
maintainer_email='william@osrfoundation.org',
url='http://www.ros.org/wiki/bloom',
download_url='http://pr.willowgarage.com/downloads/bloom/',
keywords=['ROS'],
classifiers=['Programming Language :: Python',
'License :: OSI Approved :: BSD License'],
description="Bloom is a release automation tool.",
long_description="""\
Bloom provides tools for releasing software on top of a git repository \
and leverages tools and patterns from git-buildpackage. Additionally, \
bloom leverages meta and build information from catkin \
(https://github.com/ros/catkin) to automate release branching and the \
generation of platform specific source packages, like debian's src-debs.""",
license='BSD',
test_suite='test',
entry_points={
'console_scripts': [
'git-bloom-config = bloom.commands.git.config:main',
'git-bloom-import-upstream = bloom.commands.git.import_upstream:main',
'git-bloom-branch = bloom.commands.git.branch:main',
'git-bloom-patch = bloom.commands.git.patch.patch_main:main',
'git-bloom-generate = bloom.commands.git.generate:main',
'git-bloom-release = bloom.commands.git.release:main',
'bloom-export-upstream = bloom.commands.export_upstream:main',
'bloom-update = bloom.commands.update:main',
'bloom-release = bloom.commands.release:main',
'bloom-generate = bloom.commands.generate:main'
],
'bloom.generators': [
'release = bloom.generators.release:ReleaseGenerator',
'rosrelease = bloom.generators.rosrelease:RosReleaseGenerator',
'debian = bloom.generators.debian:DebianGenerator',
'rosdebian = bloom.generators.rosdebian:RosDebianGenerator'
],
'bloom.generate_cmds': [
'debian = bloom.generators.debian.generate_cmd:description',
'rosdebian = bloom.generators.rosdebian:description'
]
}
)
|
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: remove this script when GYP has for loops
from __future__ import print_function
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] format_string locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
| default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 3:
print('ERROR: need string and list of locales')
return 1
str_template = arglist[1]
locales = arglist[2:]
results = []
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://cr | bug.com/19165, http://crbug.com/25578).
if options.dash_to_underscore:
if locale == 'en-US':
locale = 'en'
locale = locale.replace('-', '_')
results.append(str_template.replace('ZZLOCALE', locale))
# Quote each element so filename spaces don't mess up GYP's attempt to parse
# it into a list.
print(' '.join(["'%s'" % x for x in results]))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
###############################################################################
#
# Copyright 2010 Locomatix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
__all__ = ['create_feed', 'delete_feed', 'list_feeds', \
'create_object','delete_object', 'delete_all_objects', \
'list_objects', 'query_objects', \
'update_attributes','get_attributes', 'update_location','get_location', \
'search_nearby', 'query_search_nearby', \
'search_region', 'query_search_region', \
'create_zone', 'activate_zone', 'get_zone', 'deactivate_zone', \
'delete_zone', 'delete_all_zones', 'list_zones', \
'create_fence', 'activate_fence','get_fence','deactivate_fence', \
'delete_fence', 'delete_all_fences', 'list_fences' \
'get_location_history', 'query_location_history', \
'get_space_activity', 'query_space_activity', \
'get_histogram', 'query_histogram'
]
from create_feed import create_feed
from delete_feed import delete_feed
from list_feeds import list_feeds
from create_object import create_object
from delete_object import delete_object
from delete_all_objects import delete_all_objects
from list_objects import list_objects
from query_objects import query_objects
from update_attributes import update_attributes
from get_attributes import get_attributes
from update_location import update_location
from get_location import get_location
from create_zone import create_zone
from activate_zone import activate_zone
from get_zone import get_zone
from delete_zone import delete_zone
from delete_all_zones import delete_all_zones
from deactivate_zone import deactivate_zone
from list_zones import list_zones
from create_fence import create_fence
from activate_fence import activate_fence
from get_fence import get_fence
from deactivate_fence import deactivate_fence
from delete_fence import delete_fence
from delete_all_fences import delete_all_fences
from list_fences import list_fences
from search_region im | port search_region
from query_search_region import query_search_region
from search_nearby import search_nearby
from query_search_nearby import query_search_nearby
from get_location_history import get_location_history
from query_location_history import query_location_history
from get_space_activity import get_space_activity
from query_space_activity import query_space_activit | y
from get_histogram import get_histogram
from query_histogram import query_histogram
|
real tarfile module try
to open the fake paths.
"""
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
report = self.distributor.publish_repo(self.repo, self.conduit, config)
| self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertTrue(report.details['errors'][0][0] in [self.uk1, self.uk2])
self.assertTrue(report.details['errors'][1][0] in [self.uk1, self.uk2])
self.assertEqual(len(report.details['success_ | unit_keys']), 0)
@mock.patch('tarfile.open', autospec=True)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
def test_cannot_extract_tarballs(self, mock_clear, mock_check, mock_open):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
mock_open.return_value.extractall.side_effect = OSError
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertTrue(report.details['errors'][0][0] in [self.uk1, self.uk2])
self.assertTrue(report.details['errors'][1][0] in [self.uk1, self.uk2])
self.assertEqual(len(report.details['success_unit_keys']), 0)
@mock.patch('shutil.move', side_effect=IOError)
@mock.patch('tarfile.open', autospec=True)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
def test_cannot_move(self, mock_clear, mock_check, mock_open, mock_move):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
mock_open.return_value.getnames.return_value = ['a/b', 'a/c']
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertTrue(report.details['errors'][0][0] in [self.uk1, self.uk2])
self.assertTrue(report.details['errors'][1][0] in [self.uk1, self.uk2])
self.assertEqual(len(report.details['success_unit_keys']), 0)
@mock.patch('tarfile.open', autospec=True)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
def test_multiple_extraction_dirs(self, mock_clear, mock_check, mock_open):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
mock_open.return_value.getnames.return_value = ['a/b', 'c/b']
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertTrue(report.details['errors'][0][0] in [self.uk1, self.uk2])
self.assertTrue(report.details['errors'][1][0] in [self.uk1, self.uk2])
self.assertEqual(len(report.details['success_unit_keys']), 0)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
def test_no_units(self, mock_clear):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
self.conduit.get_units.return_value = []
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertTrue(report.success_flag)
self.assertEqual(len(report.details['errors']), 0)
self.assertEqual(len(report.details['success_unit_keys']), 0)
# we still need to clear the destination
mock_clear.assert_called_once_with(self.puppet_dir)
def _add_error(self, *args, **kwargs):
"""
add an error to the detail report. This gives us a chance to add an error
during a particular step in the workflow.
"""
if not self.distributor.detail_report.report['errors']:
self.distributor.detail_report.error(self.uk1, 'failed')
class TestFindDuplicateNames(unittest.TestCase):
def setUp(self):
self.uk1 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.2.0'}
self.uk2 = {'author': 'puppetlabs', 'name': 'java', 'version': '1.3.1'}
self.uk3 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.3.1'}
self.unit3 = AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk3, {}, '/a/b/z', '', '', '', '')
self.units = [
AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk1, {}, '/a/b/x', '', '', '', ''),
AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk2, {}, '/a/b/y', '', '', '', ''),
]
self.method = installdistributor.PuppetModuleInstallDistributor._find_duplicate_names
def test_no_dups(self):
ret = self.method(self.units)
self.assertEqual(ret, [])
def test_with_dups(self):
self.units.append(self.unit3)
ret = self.method(self.units)
self.assertTrue(self.units[0] in ret)
self.assertTrue(self.units[2] in ret)
class TestMoveToDestinationDirectory(unittest.TestCase):
def setUp(self):
self.working_dir = tempfile.mkdtemp()
self.destination_dir = os.path.join(self.working_dir, 'target')
os.makedirs(self.destination_dir)
self.source_dir = os.path.join(self.working_dir, 'source')
os.makedirs(self.source_dir)
def tearDown(self):
shutil.rmtree(self.working_dir)
def existing_files_saved(self):
existing_file = os.path.join(self.destination_dir, 'foo.txt')
touch(existing_file)
new_dir = os.path.join(self.source_dir, 'bar')
os.makedirs(new_dir)
installdistributor.PuppetModuleInstallDistributor.\
_move_to_destination_directory(self.source_dir, self.destination_dir)
self.assertTrue(os.path.exists(existing_file))
def test_source_dir_removed(self):
installdistributor.PuppetModuleInstallDistributor.\
_move_to_destination_directory(self.source_dir, self.destination_dir)
self.assertFalse(os.path.exists(self.source_dir))
def test_move_dirs(self):
new_dir = os.path.join(self.source_dir, 'bar')
os.makedirs(new_dir)
installdistributor.PuppetModuleInstallDistributor.\
_move_to_destination_directory(self.source_dir, self.destination_dir)
self.assertTrue(os.path.exists(os.path.join(self.destination_dir, 'bar')))
class TestRenameDirectory(unittest.TestCase):
def setUp(self):
uk = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.2.0'}
self.unit = AssociatedUnit(constants.TYPE_PUPPET_MODULE, uk, {}, '/a/b/x', '', '', '', '')
self.method = installdistributor.PuppetModuleInstallDistributor._rename_directory
@mock.patch('shutil.move', autospec=True)
def test_trailing_slash(self, mock_move):
self.method(self.unit, '/tmp/', ['a/b', 'a/c'])
mock_move.assert_called_once_with('/tmp/a', '/tmp/stdlib')
@mock.patch('shutil.move', autospec=True)
def test_no_trailing_sl |
"""Redo the `...` (representation) but with limits on most sizes."""
__all__ = ["Repr","repr"]
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxdict = 4
self.maxstring = 30
self.maxlong = 40
self.maxother = 20
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
s = `x`
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_tuple(self, x, level):
n = len(x)
if n == 0: return '()'
if level <= 0: return '(...)'
s = ''
for i in range(min(n, self.maxtuple)):
if s: s = s + ', '
s = s + self.repr1(x[i], level-1)
if n > self.maxtuple: s = s + ', ...'
elif n == 1: s = s + ','
return '(' + s + ')'
def repr_list(self, x, level):
n = len(x)
if n == 0: return '[]'
if level <= 0: return '[...]'
s = ''
for i in range(min(n, self.maxlist)):
if s: s = s + ', '
s = s + self.repr1(x[i], level-1)
if n > self.maxlist: s = s + ', ...'
return '[' + s + ']'
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
s = ''
keys = x.keys()
keys.sort()
for i in range(min(n, self.maxdict)):
if s: s = s + ', '
key = keys[i]
s = s + self.repr1(key, level-1)
s = s + ': ' + self.repr1(x[key], level-1)
if n > self.maxdict: s = s + ', ...'
return '{' + s + '}'
def repr_str(self, x, level):
s = `x[:self.maxstring]`
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = `x[:i] + x[len(x)-j:]`
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_long(self, x, level):
s = `x` # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = `x`
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except:
return '<' + x.__class__.__name__ + ' instance at ' + \
hex(id(x))[2:] + '>'
if len(s) > sel | f.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self | .maxstring-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
aRepr = Repr()
repr = aRepr.repr
|
://www1.skysports.com/watch/video/sports/football/teams/liverpool',176,art+'/skysports.png')
main.addDir('Manchester City','http://www1.skysports.com/watch/video/sports/football/teams/manchester-city',176,art+'/skysports.png')
main.addDir('Manchester United','http://www1.skysports.com/watch/video/sports/football/teams/manchester-united',176,art+'/skysports.png')
main.addDir('Newcastle United','http://www1.skysports.com/watch/video/sports/football/teams/newcastle-united',176,art+'/skysports.png')
main.addDir('Norwich City','http://www1.skysports.com/watch/video/sports/football/teams/norwich-city',176,art+'/skysports.png')
main.addDir('Queens Park Rangers','http://www1.skysports.com/watch/video/sports/football/teams/queens-park-rangers',176,art+'/skysports.png')
main.addDir('Reading','http://www1.skysports.com/watch/video/sports/football/teams/reading',176,art+'/skysports.png')
main.addDir('Southampton','http://www1.skysports.com/watch/video/sports/football/teams/southampton',176,art+'/skysports.png')
main.addDir('Stoke City','http://www1.skysports.com/watch/video/sports/football/teams/stoke-city',176,art+'/skysports.png')
main.addDir('Sunderland','http://www1.skysports.com/watch/video/sports/football/teams/sunderland',176,art+'/skysports.png')
main.addDir('Swansea City','http://www1.skysports.com/watch/video/sports/football/teams/swansea-city',176,art+'/skysports.png')
main.addDir('Tottenham Hotspur','http://www1.skysports.com/watch/video/sports/football/teams/tottenham-hotspur',176,art+'/skysports.png')
main.addDir('West Bromwich Albion','http://www1.skysports.com/watch/video/sports/football/teams/west-bromwich-albion',176,art+'/skysports.png')
main.addDir('West Ham United','http://www1.skysports.com/watch/video/sports/football/teams/west-ham-united',176,art+'/skysports.png')
main.addDir('Wigan Athletic','http://www1.skysports.com/watch/video/sports/football/teams/wigan-athletic',176,art+'/skysports.png')
if murl=='championship':
main.addDir('Championship [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/16428/100/1',173,art+'/skysports.png')
main.addDir('Barnsley','http://www1.skysports.com/watch/video/sports/football/teams/barnsley',176,art+'/skysports.png')
main.addDir('Birmingham City','http://www1.skysports.com/watch/video/sports/football/teams/birmingham-city',176,art+'/skysports.png')
main.addDir('Blackburn Rovers','http://www1.skysports.com/watch/video/sports/football/teams/blackburn-rovers',176,art+'/skysports.png')
main.addDir('Blackpool','http://www1.skysports.com/watch/video/sports/football/teams/blackpool',176,art+'/skysports.png')
main.addDir('Bolton Wander | ers','http://www1.skysports.com/watch/video/sports/football/teams/bolton-wanderers',176,art+'/skysports.png')
| main.addDir('Brighton','http://www1.skysports.com/watch/video/sports/football/teams/brighton',176,art+'/skysports.png')
main.addDir('Bristol City','http://www1.skysports.com/watch/video/sports/football/teams/bristol-city',176,art+'/skysports.png')
main.addDir('Burnley','http://www1.skysports.com/watch/video/sports/football/teams/burnley',176,art+'/skysports.png')
main.addDir('Cardiff City','http://www1.skysports.com/watch/video/sports/football/teams/cardiff-city',176,art+'/skysports.png')
main.addDir('Charlton Athletic','http://www1.skysports.com/watch/video/sports/football/teams/charlton-athletic',176,art+'/skysports.png')
main.addDir('Crystal Palace','http://www1.skysports.com/watch/video/sports/football/teams/crystal-palace',176,art+'/skysports.png')
main.addDir('Derby County','http://www1.skysports.com/watch/video/sports/football/teams/derby-county',176,art+'/skysports.png')
main.addDir('Huddersfield Town','http://www1.skysports.com/watch/video/sports/football/teams/huddersfield-town',176,art+'/skysports.png')
main.addDir('Hull City','http://www1.skysports.com/watch/video/sports/football/teams/hull-city',176,art+'/skysports.png')
main.addDir('Ipswich Town','http://www1.skysports.com/watch/video/sports/football/teams/ipswich-town',176,art+'/skysports.png')
main.addDir('Leeds United','http://www1.skysports.com/watch/video/sports/football/teams/leeds-united',176,art+'/skysports.png')
main.addDir('Leicester City','http://www1.skysports.com/watch/video/sports/football/teams/leicester-city',176,art+'/skysports.png')
main.addDir('Middlesbrough','http://www1.skysports.com/watch/video/sports/football/teams/middlesbrough',176,art+'/skysports.png')
main.addDir('Millwall','http://www1.skysports.com/watch/video/sports/football/teams/millwall',176,art+'/skysports.png')
main.addDir('Nottingham Forest','http://www1.skysports.com/watch/video/sports/football/teams/nottingham-forest',176,art+'/skysports.png')
main.addDir('Peterborough United','http://www1.skysports.com/watch/video/sports/football/teams/peterborough-united',176,art+'/skysports.png')
main.addDir('Sheffield Wednesday','http://www1.skysports.com/watch/video/sports/football/teams/sheffield-wednesday',176,art+'/skysports.png')
main.addDir('Watford','http://www1.skysports.com/watch/video/sports/football/teams/watford',176,art+'/skysports.png')
main.addDir('Wolverhampton','http://www1.skysports.com/watch/video/sports/football/teams/wolverhampton',176,art+'/skysports.png')
if murl=='league-one':
main.addDir('League One [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/16478/100/1',173,art+'/skysports.png')
main.addDir('Bournemouth','http://www1.skysports.com/watch/video/sports/football/teams/bournemouth',176,art+'/skysports.png')
main.addDir('Brentford','http://www1.skysports.com/watch/video/sports/football/teams/brentford',176,art+'/skysports.png')
main.addDir('Bury','http://www1.skysports.com/watch/video/sports/football/teams/bury',176,art+'/skysports.png')
main.addDir('Carlisle United','http://www1.skysports.com/watch/video/sports/football/teams/carlisle-united',176,art+'/skysports.png')
main.addDir('Colchester United','http://www1.skysports.com/watch/video/sports/football/teams/colchester-united',176,art+'/skysports.png')
main.addDir('Coventry City','http://www1.skysports.com/watch/video/sports/football/teams/coventry-city',176,art+'/skysports.png')
main.addDir('Crawley Town','http://www1.skysports.com/watch/video/sports/football/teams/crawley-town',176,art+'/skysports.png')
main.addDir('Crewe Alexandra','http://www1.skysports.com/watch/video/sports/football/teams/crewe-alexandra',176,art+'/skysports.png')
main.addDir('Doncaster','http://www1.skysports.com/watch/video/sports/football/teams/doncaster',176,art+'/skysports.png')
main.addDir('Hartlepool United','http://www1.skysports.com/watch/video/sports/football/teams/hartlepool-united',176,art+'/skysports.png')
main.addDir('Leyton Orient','http://www1.skysports.com/watch/video/sports/football/teams/leyton-orient',176,art+'/skysports.png')
main.addDir('Milton Keynes Dons','http://www1.skysports.com/watch/video/sports/football/teams/milton-keynes-dons',176,art+'/skysports.png')
main.addDir('Notts County','http://www1.skysports.com/watch/video/sports/football/teams/notts-county',176,art+'/skysports.png')
main.addDir('Oldham Athletic','http://www1.skysports.com/watch/video/sports/football/teams/oldham-athletic',176,art+'/skysports.png')
main.addDir('Portsmouth','http://www1.skysports.com/watch/video/sports/football/teams/ |
ame=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""C | aches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableNa | me)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multip |
# Copyright 2011 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Speedway iptables generator. This is a subclass of Iptables li | b."""
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = 'watson@google.com (Tony Watson)'
from string import Template
from lib import iptables
class Error(Exception):
pass
class Term(iptables.Term):
"""Generate Iptables policy terms."""
_PLATFORM = 'speedway'
_PREJUMP_FORMAT = None
_POSTJUMP_FORMAT = Template('-A $filter -j $term')
class Speedway(iptables.Iptables):
"""Generates filters and terms from provided policy object."""
_PLATFORM = 'speedway'
_DEFAULT_PROTOCOL = 'all'
SUFFIX = '.ipt'
_RENDER_PREFIX = '*filter'
_RENDER_SUFFIX = 'COMMIT'
_DEFAULTACTION_FORMAT = ':%s %s'
_TERM = Term
|
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from tempest.common import rest_client
from tempest.common import xml_utils
from tempest import config
from tempest import exceptions
CONF = config.CONF
class AggregatesClientXML(rest_client.RestClient):
TYPE = "xml"
def __init__(self, auth_provider):
super(AggregatesClientXML, self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
def _format_aggregate(self, g):
agg = xml_utils.xml_to_json(g)
aggregate = {}
for key, value in agg.items():
if key == 'hosts':
aggregate['hosts'] = []
for k, v in value.items():
aggregate['hosts'].append(v)
elif key == 'availability_zone':
aggregate[key] = None if value == 'None' else value
else:
aggregate[key] = value
return aggregate
def _parse_array(self, node):
return [self._format_aggregate(x) for x in node]
def list_aggregates(self):
"""Get aggregate list."""
resp, body = self.get("os-aggregates")
aggregates = self._parse_array(etree.fromstring(body))
return resp, aggregates
def get_aggregate(self, aggregate_id):
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def create_aggregate(self, name, availability_zone=None):
"""Creates a new aggregate."""
if availability_zone is not None:
post_body = xml_utils.Element("aggregate", name=name,
availability_zone=availability_zone)
else:
post_body = xml_ | utils.Element("aggregate", name=name)
resp, body = self.post('os-aggregates',
str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def update_aggregate(self, aggregate_id, name, availability_zone=None):
"""Update a aggregate."""
if availabil | ity_zone is not None:
put_body = xml_utils.Element("aggregate", name=name,
availability_zone=availability_zone)
else:
put_body = xml_utils.Element("aggregate", name=name)
resp, body = self.put('os-aggregates/%s' % str(aggregate_id),
str(xml_utils.Document(put_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
return self.delete("os-aggregates/%s" % str(aggregate_id))
def is_resource_deleted(self, id):
try:
self.get_aggregate(id)
except exceptions.NotFound:
return True
return False
def add_host(self, aggregate_id, host):
"""Adds a host to the given aggregate."""
post_body = xml_utils.Element("add_host", host=host)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def remove_host(self, aggregate_id, host):
"""Removes a host from the given aggregate."""
post_body = xml_utils.Element("remove_host", host=host)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def set_metadata(self, aggregate_id, meta):
"""Replaces the aggregate's existing metadata with new metadata."""
post_body = xml_utils.Element("set_metadata")
metadata = xml_utils.Element("metadata")
post_body.append(metadata)
for k, v in meta.items():
meta = xml_utils.Element(k)
meta.append(xml_utils.Text(v))
metadata.append(meta)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
|
"""luigi target for writing data into an HP Vertica database"""
import logging
import luigi
logger = logging.getLogger('luigi-interface') # pylint: disable-msg=C0103
try:
import vertica_python
except ImportError:
logger.warning("Attempted to load Vertica interface tools without the vertica_python package; will crash if \
Vertica functionality is used.")
class VerticaTarget(luigi.Target):
"""
Target for a resource in HP Vertica
"""
marker_table = 'table_updates'
def __init__(self, host, user, password, schema, table, update_id):
"""
Initializes a VerticaTarget instance.
:param host: Vertica server address. Possibly a host:port string.
:type host: str
:param user: database user.
:type user: str
:param password: password for the specified user.
:type password: str
:param schema: the schema being written to.
:type schema: str
:param table: the table within schema being written to.
:type table: str
:param update_id: an identifier for this data set.
:type update_id: str
"""
if ':' in host:
self.host, self.port = host.split(':')
self.port = int(self.port)
else:
self.host = host
self.port = 5433
self.user = user
self.password = password
self.schema = schema
self.table = table
self.update_id = update_id
# Default to using the schema data is being inserted into as the schema for the marker table.
self.marker_schema = schema
def touch(self, connection=None):
"""
Mark this update as complete.
IMPORTANT, If the marker table doesn't exist,
the connection transaction will be aborted and the connection reset.
Then the marker table will be created.
"""
self.create_marker_table()
if connection is None:
connection = self.connect()
connection.autocommit = True # if connection created here, we commit it here
connection.cursor().execute(
"""INSERT INTO {marker_schema}.{marker_table} (update_id, target_table)
VALUES (%s, %s)""".format(marker_schema=self.marker_schema, marker_table=self.marker_table),
(self.update_id, "{schema}.{table}".format(schema=self.schema, table=self.table))
)
# make sure update is properly marked
assert self.exists(connection)
def exists(self, connection=None): # pylint: disable-msg=W0 | 221
if connection is None:
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
| try:
cursor.execute("""SELECT 1 FROM {marker_schema}.{marker_table}
WHERE update_id = %s
LIMIT 1""".format(marker_schema=self.marker_schema, marker_table=self.marker_table),
(self.update_id,)
)
row = cursor.fetchone()
except vertica_python.errors.Error as err:
if (type(err) is vertica_python.errors.MissingRelation) or ('Sqlstate: 42V01' in err.args[0]):
# If so, then our query error failed because the table doesn't exist.
row = None
else:
raise
return row is not None
def connect(self, autocommit=False):
"""
Creates a connection to a Vertica database using the supplied credentials.
:param autocommit: whether the connection should automatically commit.
:type autocmommit: bool
"""
# vertica-python 0.5.0 changes the code for connecting to databases to use kwargs instead of a dictionary.
# The 'database' parameter is included for DBAPI reasons and does not actually affect the session.
connection = vertica_python.connect(user=self.user, password=self.password, host=self.host, port=self.port,
database="", autocommit=autocommit)
return connection
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
connection = self.connect(autocommit=True)
cursor = connection.cursor()
try:
cursor.execute(
""" CREATE TABLE {marker_schema}.{marker_table} (
id AUTO_INCREMENT,
update_id VARCHAR(4096) NOT NULL,
target_table VARCHAR(128),
inserted TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (update_id, id)
)
""".format(marker_schema=self.marker_schema, marker_table=self.marker_table)
)
except vertica_python.errors.QueryError as err:
if 'Sqlstate: 42710' in err.args[0]: # This Sqlstate will appear if the marker table already exists.
pass
else:
raise
connection.close()
|
from django import forms
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm
from crispy_forms.bootstrap import FormActions, AppendedText, StrictButton, InlineField
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Button, Field, Hidden, HTML, Div
class MyLoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(MyLoginForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = ''
self.helper.label_class = 'col-lg-3'
self.helper.field_class = 'col-lg-6'
self | .helper.layout = Layout(
'username',
Field('password'),
FormActions(Submit('login', 'Login', css_class='btn btn_success')),
)
class MyPasswordChangeForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
s | uper(MyPasswordChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = ''
self.helper.label_class = 'col-lg-3'
self.helper.field_class = 'col-lg-6'
self.helper.layout = Layout(
'old_password',
'new_password1',
'new_password2',
FormActions(Submit('save', 'Save', css_class='btn btn_success')),
)
|
"""
WSGI config for PythonAnywhere test project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could intr | oduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemo | n process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
interface = {}
mode = 'unknown'
try:
body = run_commands(module, [command])[0]
except IndexError:
return None
if intf_type in ['ethernet', 'portchannel']:
interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
return mode
def get_hsrp_group(group, interface, module):
command = 'show hsrp group {0} all | json'.format(group)
hsrp = {}
hsrp_key = {
'sh_if_index': 'interface',
'sh_group_num': 'group',
'sh_group_version': 'version',
'sh_cfg_prio': 'priority',
'sh_preempt': 'preempt',
'sh_vip': 'vip',
'sh_authentication_type': 'auth_type',
'sh_keystring_attr': 'auth_enc',
'sh_authentication_data': 'auth_string'
}
try:
body = run_commands(module, [command])[0]
hsrp_table = body['TABLE_grp_detail']['ROW_grp_detail']
if 'unknown enum:' in str(hsrp_table):
hsrp_table = get_hsrp_group_unknown_enum(module, command, hsrp_table)
except (AttributeError, IndexError, TypeError, KeyError):
return {}
if isinstance(hsrp_table, dict):
hsrp_table = [hsrp_table]
for hsrp_group in hsrp_table:
parsed_hsrp = apply_key_map(hsrp_key, hsrp_group)
parsed_hsrp['interface'] = parsed_hsrp['interface'].lower()
if parsed_hsrp['version'] == 'v1':
parsed_hsrp['version'] = '1'
elif parsed_hsrp['version'] == 'v2':
parsed_hsrp['version'] = '2'
if parsed_hsrp['auth_type'] == 'md5':
if parsed_hsrp['auth_enc'] == 'hidden':
parsed_hsrp['auth_enc'] = '7'
else:
parsed_hsrp['auth_enc'] = '0'
if parsed_hsrp['interface'] == interface:
return parsed_hsrp
return hsrp
def get_hsrp_group_unknown_enum(module, command, hsrp_table):
'''Some older NXOS images fail to set the attr values when using structured output and
instead set the values to <unknown enum>. This fallback method is a workaround that
uses an unstructured (text) request to query the device a second time.
'sh_preempt' is currently the only attr affected. Add checks for other attrs as needed.
'''
if 'unknown enum:' in hsrp_table['sh_preempt']:
cmd = {'output': 'text', 'command': command.split('|')[0]}
out = run_commands(module, cmd)[0]
hsrp_table['sh_preempt'] = 'enabled' if ('may preempt' in out) else 'disabled'
return hsrp_table
def get_commands_remove_hsrp(group, interface):
commands = ['interface {0}'.format(interface), 'no hsrp {0}'.format(group)]
return commands
def get_commands_config_hsrp(delta, interface, args, existing):
commands = []
config_args = {
'group': 'hsrp {group}',
'priority': '{priority}',
'preempt': '{preempt}',
'vip': '{vip}'
}
preempt = delta.get('preempt', None)
group = delta.get('group', None)
vip = delta.get('vip', None)
priority = delta.get('priority', None)
if preempt:
if preempt == 'enabled':
delta['preempt'] = 'preempt'
elif preempt == 'disabled':
delta['preempt'] = 'no preempt'
if priority:
if priority == 'default':
if existing and existing.get('priority') != PARAM_TO_DEFAULT_KEYMAP.get('priority'):
delta['priority'] = 'no priority'
else:
del(delta['priority'])
else:
delta['priority'] = 'priority {0}'.format(delta['priority'])
if vip:
if vip == 'default':
if existing and existing.get('vip') != PARAM_TO_DEFAULT_KEYMAP.get('vip'):
delta['vip'] = 'no ip'
else:
del(delta['vip'])
else:
delta['vip'] = 'ip {0}'.format(delta['vip'])
for key in delta:
command = config_args.get(key, 'DNE').format(**delta)
if command and command != 'DNE':
if key == 'group':
commands.insert(0, command)
else:
commands.append(command)
command = None
auth_type = delta.get('auth_type', None)
auth_string = delta.get('auth_string', None)
auth_enc = delta.get('auth_enc', None)
if auth_type or auth_string:
if not auth_type:
auth_type = args['auth_type']
elif not auth_string:
auth_string = args['auth_string']
if auth_string != 'default':
if auth_type == 'md5':
command = 'authentication md5 key-string {0} {1}'.format(auth_enc, auth_string)
commands.append(command)
elif auth_type == 'text':
command = 'authentication text {0}'.format(auth_string)
commands.append(command)
else:
if existing and existing.get('auth_string') != PARAM_TO_DEFAULT_KEYMAP.get('auth_string'):
commands.append('no authentication')
if commands and not group:
commands.insert(0, 'hsrp {0}'.format(args['group']))
version = delta.get('version', None)
if version:
if version == '2':
command = 'hsrp version 2'
elif version == '1':
command = 'hsrp | version 1'
commands.insert(0, command)
commands.insert(0, 'interface {0}'.format(interface))
if comman | ds:
if not commands[0].startswith('interface'):
commands.insert(0, 'interface {0}'.format(interface))
return commands
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = run_commands(module, [command], check_rc=False)[0]
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def validate_config(body, vip, module):
new_body = ''.join(body)
if "invalid ip address" in new_body.lower():
module.fail_json(msg="Invalid VIP. Possible duplicate IP address.",
vip=vip)
def main():
argument_spec = dict(
group=dict(required=True, type='str'),
interface=dict(required=True),
version=dict(choices=['1', '2'], default='1', required=False),
priority=dict(type='str', required=False),
preempt=dict(type='str', choices=['disabled', 'enabled'], required=False),
vip=dict(type='str', required=False),
auth_type=dict(choices=['text', 'md5'], required=False),
auth_string=dict(type='str', required=False),
state=dict(choices=['absent', 'present'], required=False, default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
results = dict(changed=False, warnings=warnings)
interface = module.params['interface'].lower()
group = module.params['group']
version = module.params['version']
state = module.params['state']
priority = module.params['priority']
preempt = module.params['preempt']
vip = module.params['vip']
auth_type = module.params['auth_type']
auth_full_string = module.params['auth_string']
auth_enc = '0'
auth_string = None
if auth_full_string:
kstr = auth_full_string.split()
if len(kstr) == 2:
auth_enc = kstr[0]
auth_string = kstr[1]
elif len(kstr) == 1:
auth_string = kstr[0]
else:
module.fail_json(msg='Invalid auth_string')
if auth_enc != '0' and auth_enc != '7':
module.fail_json(msg='Invalid auth_string, only 0 or 7 allowed')
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' a |
from elasticsearch import helpers
from c2corg_api.scripts.migration.batch import Batch
from elasticsearch.helpers import BulkIndexError
import logging
log = logging.getLogger(__name__)
class ElasticBatch(Batch):
"""A batch implementation to do bulk inserts for ElasticSearch.
Example usage:
batch = ElasticBatch(client, 1000)
with batch:
...
batch.add({
'_op_type': 'index',
'_index': index_name,
'_type': SearchDocument._doc_type.name,
'_id': document_id,
'title': 'Abc'
})
"""
def __init__(self, client, batch_size):
super(ElasticBatch, self).__init__(client, batch_size)
self.client = client
self.actions = []
def add(self, action):
self.actions.append(action)
self.flush_or_not()
def should_flush(self):
retu | rn len(self.actions) > self.batch_size
def flush(self):
if self.actions:
try:
helpers.bulk(self.client, self.actions)
except BulkIndexError:
# when trying to delete a document that does not exist, an
# error is raised, and other documents are not inserted
| log.warning(
'error sending bulk update to ElasticSearch',
exc_info=True)
self.actions = []
|
from importlib import import_module
from django.apps import A | ppConfig as BaseAppConfig
class AppConfig(BaseAppConfig):
name = "ge | stioneide"
def ready(self):
import_module("gestioneide.receivers")
|
from timeit import timeit
import pytest
from cfme import test_requirements
from cfme.base.ui import navigate_to
from cfme.services.myservice import MyService
from cfme.tests.test_db_migrate import download_and_migrate_db
from cfme.utils.conf import cfme_data
@pytest | .fixture
def appliance_with_performance_db(temp_appliance_extended_db):
app = temp_appliance_extended_db
try:
db_backups = cfme_data['db_backups']
performance_db = db_backups['performance_510']
except KeyError as e:
pytest.skip(f"Couldn't find the performance DB in the cfme_data: {e}")
download_and_mig | rate_db(app, performance_db.url)
yield app
@test_requirements.service
@pytest.mark.meta(automates=[1688937, 1686433])
def test_services_performance(appliance_with_performance_db):
"""
Polarion:
assignee: jhenner
initialEstimate: 1/4h
casecomponent: Services
Bugzilla:
1688937
1686433
"""
app = appliance_with_performance_db
assert 50000 == app.rest_api.collections.services.count
my_service = MyService(app)
# Timeit seems to accept callable as well as string of Python code on cPython.
assert timeit(lambda: navigate_to(my_service, 'All', use_resetter=False), number=1) < 180
|
import pandas as pd
class Status:
Instance = None
@classmethod
def add(cls, message, red = False, verbosity = 1):
cls.get().add_message(message, red, verbosity)
@classmethod
def initialize_status(cls, status_method, verbosity = 1):
# Note: verbosity must be passed (amd not read directly form preferencecs)
# in to avoid circulate reference
status = cls.get()
status.status_method = status_method
status.verbosity = verbosity
@classmethod
def get(cls):
if cls.Instance == None:
cls.Instance = Status()
return cls.Instance
def __init__(self):
self.verbosity = 1
def add_message(self, message, red, verbosity):
if verbosity <= self.v | erbosity:
if isinstance(message, pd.DataFrame) or isinstance(message, pd.core.frame | .DataFrame):
text = str(message.head())
else:
text = str(message)
lines = text.split("\n")
for line in lines:
self.status_method(line, red)
def status_method(self, message, red):
print message |
##############################################################################
#
# OSIS | stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# | Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.dispatch import receiver
from assessments.business import scores_encodings_deadline
from base.signals import publisher
@receiver(publisher.compute_scores_encodings_deadlines)
def compute_scores_encodings_deadlines(sender, **kwargs):
scores_encodings_deadline.compute_deadline(kwargs['offer_year_calendar'])
@receiver(publisher.compute_student_score_encoding_deadline)
def compute_student_score_encoding_deadline(sender, **kwargs):
scores_encodings_deadline.compute_deadline_by_student(kwargs['session_exam_deadline'])
@receiver(publisher.compute_all_scores_encodings_deadlines)
def compute_all_scores_encodings_deadlines(sender, **kwargs):
scores_encodings_deadline.recompute_all_deadlines(kwargs['academic_calendar'])
|
fr | om server import app
app.run() | |
"""An event loop.
This event loop should handle both asynchronous App Engine RPC objects
(specifically urlfetch, memcache and datastore RPC objects) and arbitrary
callback functions with an optional time delay.
Normally, event loops are singleton objects, though there is no
enforcement of this requirement.
The API here is inspired by Monocle.
"""
import collections
import logging
import os
import threading
import time
from google.appengine.api.apiproxy_rpc import RPC
from google.appengine.datastore import datastore_rpc
from . import utils
logging_debug = utils.logging_debug
IDLE = RPC.IDLE
RUNNING = RPC.RUNNING
FINISHING = RPC.FINISHING
class EventLoop(object):
"""An event loop."""
def __init__(self):
"""Constructor."""
self.current = collections.deque() # FIFO list of (callback, args, kwds)
self.idlers = collections.deque() # Cyclic list of (callback, args, kwds)
self.inactive = 0 # How many idlers in a row were no-ops
self.queue = [] # Sorted list of (time, callback, args, kwds)
self.rpcs = {} # Map of rpc -> (callback, args, kwds)
def insort_event_right(self, event, lo=0, hi=None):
"""Insert event in queue, and keep it sorted assuming queue is sorted.
If event is already in queue, insert it to the right of the rightmost
event (to keep FIFO order).
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(self.queue)
while lo < hi:
mid = (lo + hi) // 2
if event[0] < self.queue[mid][0]: hi = mid
else: lo = mid + 1
self.queue.insert(lo, event)
# TODO: Rename to queue_callback?
def queue_call(self, delay, callback, *args, **kwds):
"""Schedule a function call at a specific time in the future."""
if delay is None:
self.current.append((callback, args, kwds))
return
if delay < 1e9:
when = delay + time.time()
else:
# Times over a billion seconds are assumed to be absolute.
when = delay
self.insort_event_right((when, callback, args, kwds))
def queue_rpc(self, rpc, callback=None, *args, **kwds):
"""Schedule an RPC with an optional callback.
The caller must have previously sent the call to the service.
The optional callback is called with the remaining arguments.
NOTE: If the rpc is a MultiRpc, the callback will be called once
for each sub-RPC. TODO: Is this a good idea?
"""
if rpc is None:
return
if rpc.state not in (RUNNING, FINISHING):
raise RuntimeError('rpc must be sent to service before queueing')
if isinstance(rpc, datastore_rpc.MultiRpc):
rpcs = rpc.rpcs
if len(rpcs) > 1:
# Don't call the callback until all sub-rpcs have completed.
rpc.__done = False
def help_multi_rpc_along(r=rpc, c=callback, a=args, k=kwds):
if r.state == FINISHING and not r.__done:
r.__done = True
c(*a, **k)
# TODO: And again, what about exceptions?
callback = help_multi_rpc_along
args = ()
kwds = {}
else:
rpcs = [rpc]
for rpc in rpcs:
self.rpcs[rpc] = (callback, args, kwds)
def add_idle(self, callback, *args, **kwds):
"""Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
"""
self.idlers.append((callback, args, kwds))
def run_idle(self):
"""Run one of the idle callbacks.
Returns:
True if one was called, False if no idle callback was called.
"""
if not self.idlers or self.inactive >= len(self.idlers):
return False
idler = self.idlers.popleft()
callback, args, kwds = idler
logging_debug('idler: %s', callback.__name__)
res = callback( | *args, **kwds)
# See add_idle() for the meaning of the callback return value.
if res is not None:
if res:
self.inactive = 0
else:
self.inactive += 1
self.idlers.append(idler)
else:
| logging_debug('idler %s removed', callback.__name__)
return True
def run0(self):
"""Run one item (a callback or an RPC wait_any).
Returns:
A time to sleep if something happened (may be 0);
None if all queues are empty.
"""
if self.current:
self.inactive = 0
callback, args, kwds = self.current.popleft()
logging_debug('nowevent: %s', callback.__name__)
callback(*args, **kwds)
return 0
if self.run_idle():
return 0
delay = None
if self.queue:
delay = self.queue[0][0] - time.time()
if delay <= 0:
self.inactive = 0
_, callback, args, kwds = self.queue.pop(0)
logging_debug('event: %s', callback.__name__)
callback(*args, **kwds)
# TODO: What if it raises an exception?
return 0
if self.rpcs:
self.inactive = 0
rpc = datastore_rpc.MultiRpc.wait_any(self.rpcs)
if rpc is not None:
logging_debug('rpc: %s.%s', rpc.service, rpc.method)
# Yes, wait_any() may return None even for a non-empty argument.
# But no, it won't ever return an RPC not in its argument.
if rpc not in self.rpcs:
raise RuntimeError('rpc %r was not given to wait_any as a choice %r' %
(rpc, self.rpcs))
callback, args, kwds = self.rpcs[rpc]
del self.rpcs[rpc]
if callback is not None:
callback(*args, **kwds)
# TODO: Again, what about exceptions?
return 0
return delay
def run1(self):
"""Run one item (a callback or an RPC wait_any) or sleep.
Returns:
True if something happened; False if all queues are empty.
"""
delay = self.run0()
if delay is None:
return False
if delay > 0:
time.sleep(delay)
return True
def run(self):
"""Run until there's nothing left to do."""
# TODO: A way to stop running before the queue is empty.
self.inactive = 0
while True:
if not self.run1():
break
class _State(threading.local):
event_loop = None
_EVENT_LOOP_KEY = '__EVENT_LOOP__'
_state = _State()
def get_event_loop():
"""Return a EventLoop instance.
A new instance is created for each new HTTP request. We determine
that we're in a new request by inspecting os.environ, which is reset
at the start of each request. Also, each thread gets its own loop.
"""
# TODO: Make sure this works with the multithreaded Python 2.7 runtime.
ev = None
if os.getenv(_EVENT_LOOP_KEY):
ev = _state.event_loop
if ev is None:
ev = EventLoop()
_state.event_loop = ev
os.environ[_EVENT_LOOP_KEY] = '1'
return ev
def queue_call(*args, **kwds):
ev = get_event_loop()
ev.queue_call(*args, **kwds)
def queue_rpc(rpc, callback=None, *args, **kwds):
ev = get_event_loop()
ev.queue_rpc(rpc, callback, *args, **kwds)
def add_idle(callback, *args, **kwds):
ev = get_event_loop()
ev.add_idle(callback, *args, **kwds)
def run():
ev = get_event_loop()
ev.run()
def run1():
ev = get_event_loop()
return ev.run1()
def run0():
ev = get_event_loop()
return ev.run0()
|
# See http://www.python.org/dev/peps/pep-0386/ for version | numbering, especially NormalizedVersion
from distutils import version
version = | version.LooseVersion('0.7.1-dev')
|
from stage import *
import os
use_gpu = os.environ.get('GNUMPY_USE_GPU', 'yes') == 'yes'
if use_gpu:
import gnumpy as gpu
import gnumpy as gnp
class Map(Stage):
def __init__(self,
outputDim,
activeFn,
inputNames=None,
initRange=1.0,
bias=True,
biasInitConst=-1.0,
initSeed=2,
needInit=True,
initWeights=0,
| initType='zeroMean',
learningRate=0.0,
learningRateAnnealConst=0.0,
momentum=0.0,
deltaMomentum=0.0,
| weightClip=0.0,
gradientClip=0.0,
weightRegConst=0.0,
outputdEdX=True,
defaultValue=0.0,
gpu=use_gpu,
name=None):
Stage.__init__(self,
name=name,
inputNames=inputNames,
outputDim=outputDim,
defaultValue=defaultValue,
learningRate=learningRate,
learningRateAnnealConst=learningRateAnnealConst,
momentum=momentum,
deltaMomentum=deltaMomentum,
weightClip=weightClip,
gradientClip=gradientClip,
weightRegConst=weightRegConst,
gpu=gpu,
outputdEdX=outputdEdX)
self.bias = bias
self.activeFn = activeFn
self.inputDim = None
self.random = np.random.RandomState(initSeed)
if not needInit:
if self.gpu:
self.W = gnp.as_garray(initWeights)
else:
self.W = initWeights
else:
# Lazy initialize the weights until the first data arrives
self.W = None
self.initRange = initRange
self.biasInitConst = biasInitConst
self.initType = initType
self.X = 0
self.Y = 0
pass
def initWeights(self):
if self.initType == 'zeroMean':
r0 = -self.initRange/2.0
r1 = self.initRange/2.0
elif self.initType == 'positive':
r0 = 0.0
r1 = self.initRange
else:
raise Exception('Unknown initialization type: ' + self.initType)
if self.bias:
if self.biasInitConst >= 0.0:
self.W = np.concatenate((self.random.uniform(
r0, r1, (self.inputDim, self.outputDim)),
np.ones((1, self.outputDim)) * self.biasInitConst), axis=0)
else:
self.W = self.random.uniform(
r0, r1, (self.inputDim + 1, self.outputDim))
else:
self.W = self.random.uniform(
-self.initRange/2.0, self.initRange/2.0, (self.inputDim, self.outputDim))
if self.gpu:
self.W = gpu.as_garray(self.W.astype('float32'))
def forward(self, X):
if self.inputDim is None: self.inputDim = X.shape[-1]
if self.W is None: self.initWeights()
if self.bias:
self.X = np.concatenate((X, np.ones((X.shape[0], 1), dtype=X.dtype)), axis=-1)
else:
self.X = X
if self.gpu:
self.X = gpu.as_garray(self.X.astype('float32'))
Z = gpu.dot(self.X, self.W)
Z = Z.as_numpy_array(dtype='float32')
self.Y = self.activeFn.forward(Z)
else:
Z = np.dot(self.X, self.W)
self.Y = self.activeFn.forward(Z)
return self.Y
def backward(self, dEdY):
dEdZ = self.activeFn.backward(dEdY, self.Y, 0)
if self.gpu:
gdEdZ = gpu.as_garray(dEdZ.astype('float32'))
self.dEdW = gpu.dot(self.X.transpose(), gdEdZ)
if self.bias:
dEdX = gpu.dot(gdEdZ, self.W[:-1, :].transpose())
else:
dEdX = gpu.dot(gdEdZ, self.W.transpose())
dEdX = gpu.as_numpy_array(dEdX)
else:
self.dEdW = np.dot(self.X.transpose(), dEdZ)
if self.bias:
dEdX = np.dot(dEdZ, self.W[:-1, :].transpose())
else:
dEdX = np.dot(dEdZ, self.W.transpose())
return dEdX if self.outputdEdX else None
|
ok=None):
"""
The standard query interface.
TODO: rename use_cache to use_qcache
Checks a big cache for qaid2_cm. If cache miss, tries to load each cm
individually. On an individual cache miss, it preforms the query.
Args:
ibs (ibeis.IBEISController) : ibeis control object
qaid_list (list): query annotation ids
daid_list (list): database annotation ids
use_cache (bool):
use_bigcache (bool):
Returns:
qaid2_cm (dict): dict of QueryResult objects
CommandLine:
python -m ibeis.algo.hots.match_chips4 --test-submit_query_request
Examples:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> qaid_list = [1]
>>> daid_list = [1, 2, 3, 4, 5]
>>> use_bigcache = True
>>> use_cache = True
>>> ibs = ibeis.opendb(db='testdb1')
>>> qreq_ = ibs.new_query_request(qaid_list, daid_list, cfgdict={}, verbose=True)
>>> qaid2_cm = submit_query_request(ibs, qaid_list, daid_list, use_cache, use_bigcache, qreq_=qreq_)
"""
# Get flag defaults if necessary
if verbose is None:
verbose = pipeline.VERB_PIPELINE
if use_cache is None:
use_cache = USE_CACHE
if save_qcache is None:
save_qcache = SAVE_CACHE
if use_bigcache is None:
use_bigcache = USE_BIGCACHE
# Create new query request object to store temporary state
if verbose:
#print('[mc4] --- Submit QueryRequest_ --- ')
ut.colorprint('[mc4] --- Submit QueryRequest_ --- ', 'darkyellow')
assert qreq_ is not None, 'query request must be prebuilt'
qreq_.prog_hook = prog_hook
# --- BIG CACHE ---
# Do not use bigcache single queries
use_bigcache_ = (use_bigcache and use_cache and
len(qaid_list) > MIN_BIGCACHE_BUNDLE)
if (use_bigcache_ or save_qcache) and len(qaid_list) > MIN_BIGCACHE_BUNDLE:
bc_dpath, bc_fname, bc_cfgstr = qreq_.get_bigcache_info()
if use_bigcache_:
# Try and load directly from a big cache
try:
qaid2_cm = ut.load_cache(bc_dpath, bc_fname, bc_cfgstr)
cm_list = [qaid2_cm[qaid] for qaid in qaid_list]
except (IOError, AttributeError):
pass
else:
return cm_list
# ------------
# Execute query request
qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose=verbose)
# ------------
if save_qcache and len(qaid_list) > MIN_BIGCACHE_BUNDLE:
ut.save_cache(bc_dpath, bc_fname, bc_cfgstr, qaid2_cm)
cm_list = [qaid2_cm[qaid] for qaid in qaid_list]
return cm_list
@profile
def execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose=True, batch_size=None):
"""
Args:
ibs (ibeis.IBEISController):
qreq_ (ibeis.QueryRequest):
use_cache (bool):
Returns:
qaid2_cm
CommandLine:
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:0
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:1
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:2
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:3
Example0:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, False, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example1:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> cfgdict1 = dict(codename='vsone', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, False, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example1:
>>> # SLOW_DOCTEST
>>> # TEST SAVE
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, True, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example2:
>>> # SLOW_DOCTEST
>>> # TEST LOAD
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = True, True, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example2:
>>> # ENABLE_DOCTEST
>>> # TEST PARTIAL HIT
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=False, prescore_method='csum')
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3,
>>> 4, 5, 6,
>>> 7, 8, 9])
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, True, False
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache,
>>> save_qcache, verbose,
>>> batch_size=3)
>>> cm = qaid2_cm[1]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[4]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[5]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[6]
>>> ut.delete(cm.get_fpath(qreq_))
>>> print('Re-execute')
>>> qaid2_cm_ = execute_query_and_save_L1(ibs, qreq_, use_cache,
>>> save_qcache, verbose,
>>> batch_size=3)
>>> assert all([qaid2_cm_[qaid] == qaid2_cm[qaid] for qaid in qreq_.qaids])
>>> [ut.delete(fpath) for fpath in qreq_.get_chipmatch_fpaths(qreq_.qaids)]
Ignore:
other = cm_ = qaid2_cm_[qaid]
cm = qaid2_cm[qaid]
"""
if use_cache:
if ut.VERBOSE:
print('[mc4] cache-query is on')
if ut.DEBUG2:
# sanity check
qreq_.assert_self(i | bs)
# Try loading as many cached results as possible
qaid2_cm_hit = {}
external_qaids = qreq_.qaids
fpath_list = qreq_.get_chipmatch_fpaths(external_qaids)
exists_flags = [exists(fpath) for fpath in fpath_list]
qaids_hit = ut.compress(external_qaids, exists_flags)
fpaths_hit = ut.compress(fpath_list, exists_flags)
| fpath_iter = ut.ProgressIter(
fpaths_hit, nTotal=len(fpaths_hit), enabled=len(fpaths_hit) > 1,
lbl='loading cache hits', adjust=True, freq=1)
try:
cm_hit_list = [
chip_match.ChipMatch.load_from_fpath(fpath, verbose=False)
for fpath in fpath_iter
]
assert all([qaid == cm.qaid for qaid, cm in zip(qaids_hit, cm_hit_list)]), (
'inconsistent')
qaid2_cm_hit |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-12 11:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(mig | rations.Migration):
dependencies = [
('ossuo', '0021_merge'),
('ossuo', '0022_signupformpage_signupformpagebullet_signupformpagelo | go_signupformpagequote_signupformpageresponse'),
]
operations = [
]
|
ors(self, initiator_group_type,
host_os_type, initiator_list):
"""Creates igroup and adds initiators."""
igroup_name = na_utils.OPENSTACK_PREFIX + six.text_type(uuid.uuid4())
self.zapi_client.create_igroup(igroup_name, initiator_group_type,
host_os_type)
for initiator in initiator_list:
self.zapi_client.add_igroup_initiator(igroup_name, initiator)
return igroup_name
def _add_lun_to_table(self, lun):
"""Adds LUN to cache table."""
if not isinstance(lun, NetAppLun):
msg = _("Object is not a NetApp LUN.")
raise exception.VolumeBackendAPIException(data=msg)
self.lun_table[lun.name] = lun
def _get_lun_from_table(self, name):
"""Gets LUN from cache table.
Refreshes cache if LUN not found in cache.
"""
lun = self.lun_table.get(name)
if lun is None:
lun_list = self.zapi_client.get_lun_list()
self._extract_and_populate_luns(lun_list)
lun = self.lun_table.get(name)
if lun is None:
raise exception.VolumeNotFound(volume_id=name)
return lun
def _clone_lun(self, name, new_name, space_reserved='true',
qos_policy_group_name=None, src_block=0, dest_block=0,
block_count=0):
"""Clone LUN with the given name to the new name."""
raise NotImplementedError()
def _get_lun_attr(self, name, attr):
"""Get the LUN attribute if found else None."""
try:
attr = getattr(self._get_lun_from_table(name), attr)
return attr
except exception.VolumeNotFound as e:
LOG.error(_LE("Message: %s"), e.msg)
except Exception as e:
LOG.error(_LE("Error getting LUN attribute. Exception: %s"), e)
return None
def _create_lun_meta(self, lun):
raise NotImplementedError()
def _get_fc_target_wwpns(self, include_partner=True):
raise NotImplementedError()
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
raise NotImplementedError()
def extend_volume(self, volume, new_size, qos_policy_group_name=None):
"""Extend an existing volume to the new size."""
name = volume['name']
lun = self._get_lun_from_table(name)
path = lun.metadata['Path']
curr_size_bytes = six.text_type(lun.size)
new_size_bytes = six.text_type(int(new_size) * units.Gi)
# Reused by clone scenarios.
# Hence comparing the stored size.
if curr_size_bytes != new_size_bytes:
lun_geometry = self.zapi_client.get_lun_geometry(path)
if (lun_geometry and lun_geometry.get("max_resize")
and int(lun_geometry.get("max_resize")) >=
int(new_size_bytes)):
self.zapi_client.do_direct_resize(path, new_size_bytes)
else:
self._do_sub_clone_resize(
path, new_size_bytes,
qos_policy_group_name=qos_policy_group_name)
self.lun_table[name].size = new_size_bytes
else:
LOG.info(_LI("No need to extend volume %s"
" as it is already the requested new size."), name)
def _get_vol_option(self, volume_name, option_name):
"""Get the value for the volume option."""
value = None
options = self.zapi_client.get_volume_options(volume_name)
for opt in options:
if opt.get_child_content('name') == option_name:
value = opt.get_child_content('value')
break
return value
def _do_sub_clone_resize(self, path, new_size_bytes,
qos_policy_group_name=None):
"""Does sub LUN clone after verification.
Clones the block ranges and swaps
the LUNs also deletes older LUN
after a successful clone.
"""
seg = path.split("/")
LOG.info(_LI("Resizing LUN %s to new size using clone operation."),
seg[-1])
name = seg[-1]
vol_name = seg[2]
lun = self._get_lun_from_table(name)
metadata = lun.metadata
compression = self._get_vol_option(vol_name, 'compression')
if compression == "on":
msg = _('%s cannot be resized using clone operation'
' as it is hosted on compressed volume')
raise exception.VolumeBackendAPIException(data=msg % name)
else:
block_count = self._get_lun_block_count(path)
if block_count == 0:
msg = _('%s cannot be resized using clone operation'
' as it contains no blocks.')
raise exception.VolumeBackendAPIException(data=msg % name)
new_lun = 'new-%s' % name
self.zapi_client.create_lun(
vol_name, new_lun, new_size_bytes, metadata,
qos_policy_group_name=qos_policy_group_name)
try:
self._clone_lun(name, new_lun, block_count=block_count,
qos_policy_group_name=qos_policy_group_name)
self._post_sub_clone_resize(path)
except Exception:
with excutils.save_and_reraise_exception():
new_path = '/vol/%s/%s' % (vol_name, new_lun)
self.zapi_client.destroy_lun(new_path)
def _post_sub_clone_resize(self, path):
"""Try post sub clone resize in a transactional manner."""
st_tm_mv, st_nw_mv, st_del_old = None, None, None
seg = path.split("/")
LOG.info(_LI("Post clone resize LUN %s"), seg[-1])
new_lun = 'new-%s' % (seg[-1])
tmp_lun = 'tmp-%s' % (seg[-1])
tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
new_path = "/vol/%s/%s" % (seg[2], new_lun)
try:
st_tm_mv = self.zapi_client.move_lun(path, tmp_path)
st_nw_mv = self.zapi_client.move_lun(new_path, path)
st_del_old = self.zapi_client.destroy_lun(tmp_path)
except Exception as e:
if st_tm_mv is None:
msg = _("Failure staging LUN %s to tmp.")
raise exception.Vo | lumeBackendAPIException(data=msg % (seg[-1]))
else:
if st_nw_mv is None:
self.zapi_client.move_lun(tmp_path, path)
msg = _("Failure moving new cloned LUN to %s.")
rai | se exception.VolumeBackendAPIException(
data=msg % (seg[-1]))
elif st_del_old is None:
LOG.error(_LE("Failure deleting staged tmp LUN %s."),
tmp_lun)
else:
LOG.error(_LE("Unknown exception in"
" post clone resize LUN %s."), seg[-1])
LOG.error(_LE("Exception details: %s"), e)
def _get_lun_block_count(self, path):
"""Gets block counts for the LUN."""
LOG.debug("Getting LUN block count.")
lun_infos = self.zapi_client.get_lun_by_args(path=path)
if not lun_infos:
seg = path.split('/')
msg = _('Failure getting LUN info for %s.')
raise exception.VolumeBackendAPIException(data=msg % seg[-1])
lun_info = lun_infos[-1]
bs = int(lun_info.get_child_content('block-size'))
ls = int(lun_info.get_child_content('size'))
block_count = ls / bs
return block_count
def _check_volume_type_for_lun(self, volume, lun, existing_ref,
extra_specs):
"""Checks if lun satifies the volume type."""
raise NotImplementedError()
def manage_existing(self, volume, existing_ref):
"""Brings an existing storage object under Cinde |
from datetime import datetime
# pokemon lottery
global_lotto_last_run = datetime(1969, 12, 31, 23, 59, 59, 999999)
lotto_new_run = None
print_speed_base = 0.03 # delay between printed characters
""" Graphics Notes:
The screen is 15x11 squares in dimension. Each square is 16x16 pixels. Total
screen is 240x176. Since I want to at least double the scale, a good starting
size would be 480x352, with each square being 32x32.
The background image needs to be called at scale, then moved instead of the
Player. Then obstacles and objects will be rendered as clear (or not) tiles
above the background layer.
"""
"""
General notes:
When slower than an opposing wild pokemon, there is approximately a 50% chance
you'll escape.
The only reason that the start button | fly away Trainer works is because
enemy trainers face south for one frame before turning back and starting the
fight sequence. Obviously, the trick does not work with trainers that are
already facing south.
Three Steps: After a battle, wild or T | rainer, a wild battle cannot be triggered
until the third step from their original location.
"""
|
"""Support for ESPHome binary sensors."""
import logging
from typing import TYPE_CHECKING, Optional
from homeassistant.components.binary_sensor import BinarySensorDevice
from . import EsphomeEntity, platform_async_setup_entry
if TYPE_CHECKING:
# pylint: disable=unused-import
from aioesphomeapi import BinarySensorInfo, BinarySensorState # noqa
DEPENDENCIES = ['esphome']
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, e | ntry, async_add_entities):
"""Set up ESPHome binary sensors based on a config entry."""
# pylint: disable=redefined-outer-name
from aioesphomeapi import BinarySensorInfo, BinarySensorState # noqa
| await platform_async_setup_entry(
hass, entry, async_add_entities,
component_key='binary_sensor',
info_type=BinarySensorInfo, entity_type=EsphomeBinarySensor,
state_type=BinarySensorState
)
class EsphomeBinarySensor(EsphomeEntity, BinarySensorDevice):
"""A binary sensor implementation for ESPHome."""
@property
def _static_info(self) -> 'BinarySensorInfo':
return super()._static_info
@property
def _state(self) -> Optional['BinarySensorState']:
return super()._state
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self._static_info.is_status_binary_sensor:
# Status binary sensors indicated connected state.
# So in their case what's usually _availability_ is now state
return self._entry_data.available
if self._state is None:
return None
return self._state.state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._static_info.device_class
@property
def available(self):
"""Return True if entity is available."""
if self._static_info.is_status_binary_sensor:
return True
return super().available
|
# -*- coding: utf-8 -*-
import json
import pytest
from keyard_client import KeyardClient, testutils
@pytest.mark.skipif(not testutils.keyard_is_available(), reason="keyard is missing")
class TestKeyardClient(object):
def setup_method(self, method):
self.client = KeyardClient('http://127.0.0.1:8000')
| def test_re | gister(self):
response = self.client.register('app', '0.1', 'localhost:8002')
assert response is True
def test_health_check(self):
response = self.client.health_check('app', '0.1', 'localhost:8002')
assert response is True
def test_unregister(self):
response = self.client.unregister('app', '0.1', 'localhost:8002')
assert response is True
def test_get_service(self):
result = {"result": ['localhost:8080']}
value = self.client.get_service('app')
assert result == value
|
# -*- mode: python; fill-column: 100; comment-column: 100; -*-
import unittest
import sys
import os
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import base_test
class ForwardTest(base_test.WebDriverB | aseTest):
# Get a static page that must be the same upon refresh
def test_forward(self):
self.driver.get(
self.webserver.where_is('navigat | ion/res/forwardStart.html'))
self.driver.get(
self.webserver.where_is('navigation/res/forwardNext.html'))
nextbody = self.driver.find_element_by_css("body").get_text()
self.driver.go_back()
currbody = self.driver.find_element_by_css("body").get_text()
self.assertNotEqual(nextbody, currbody)
self.driver.go_forward()
currbody = self.driver.find_element_by_css("body").get_text()
self.assertEqual(nextbody, currbody)
if __name__ == '__main__':
unittest.main()
|
import os
from cpenv import api, paths
from cpenv.cli import core
from cpenv.module import parse_module_path
class Create(core.CLI):
'''Create a new Module.'''
def setup_parser(self, parser):
parser.add_argument(
'where',
help='Path to new module',
)
def run(self, args):
where = paths.normalize(args.where)
if os.path.isdir(where):
core.echo()
core.echo('Error: Can not create module in existing directory.')
core.exit(1)
default_name, default_version = parse_module_path(where)
core.echo()
core.echo('This command will guide you through creating a new module.')
core.echo()
name = core.prompt(' Module Name [%s]: ' % default_name)
version = core.prompt(' Version [%s]: ' % default_version.string)
description = core.prompt(' Description []: ')
author = core.prompt(' Author []: ')
email = core.prompt(' Email []: ')
core.echo()
core.echo('- Creating your new Module...', end='')
module = api.create(
where=where,
name=name or default_name,
version=version or default_version.string,
description=description,
author=author,
email=email,
)
core.echo('OK!')
| core.echo()
core.echo(' ' + module.path)
core.echo()
core.echo('Steps you might take before publishing...')
core.echo()
core.echo(' - Include binaries your module depends on')
core.echo(' - Edit the module.yml file')
core.echo(' - Add variables to the environment section')
core.echo(' - Add other modules to the requires section')
core.echo(' - Add python hooks like | post_activate')
core.echo()
|
limit: str = '',
season_id: Optional[Union[str, int]] = None) -> Sequence[Person]:
person_query = query.person_query()
season_join = query.season_join() if season_id else ''
season_query = query.season_query(season_id, 'season.id')
sql = f"""
SELECT
p.id,
{person_query} AS name,
p.mtgo_username,
p.tappedout_username,
p.mtggoldfish_username,
p.discord_id,
p.elo,
p.locale,
SUM(1) AS num_decks,
SUM(dc.wins) AS wins,
SUM(dc.losses) AS losses,
SUM(dc.draws) AS draws,
SUM(wins - losses) AS record,
SUM(CASE WHEN dc.wins >= 5 AND dc.losses = 0 AND d.source_id IN (SELECT id FROM source WHERE name = 'League') THEN 1 ELSE 0 END) AS perfect_runs,
SUM(CASE WHEN d.finish = 1 THEN 1 ELSE 0 END) AS tournament_wins,
SUM(CASE WHEN d.finish <= 8 THEN 1 ELSE 0 END) AS tournament_top8s,
IFNULL(ROUND((SUM(dc.wins) / NULLIF(SUM(dc.wins + dc.losses), 0)) * 100, 1), '') AS win_percent,
SUM(DISTINCT CASE WHEN d.competition_id IS NOT NULL THEN 1 ELSE 0 END) AS num_competitions
FROM
person AS p
LEFT JOIN
deck AS d ON d.person_id = p.id
LEFT JOIN
deck_cache AS dc ON d.id = dc.deck_id
{season_join}
WHERE
({where}) AND ({season_query})
GROUP BY
p.id
ORDER BY
{order_by}
{limit}
"""
people = [Person(r) for r in db().select(sql)]
for p in people:
p.season_id = season_id
return people
def seasons_active(person_id: int) -> List[int]:
sql = f"""
SELECT
DISTINCT season.id
FROM
deck AS d
{query.season_join()}
WHERE
d.person_id = {person_id}
ORDER BY
season.id
"""
return db().values(sql)
def preaggregate() -> None:
achievements.preaggregate_achievements()
preaggregate_head_to_head()
def preaggregate_head_to_head() -> None:
table = '_head_to_head_stats'
sql = """
CREATE TABLE IF NOT EXISTS _new{table} (
person_id INT NOT NULL,
opponent_id INT NOT NULL,
season_id INT NOT NULL,
num_matches INT NOT NULL,
wins INT NOT NULL,
losses INT NOT NULL,
draws INT NOT NULL,
PRIMARY KEY (season_id, person_id, opponent_id),
FOREIGN KEY (season_id) REFERENCES season (id) ON UPDATE CASCADE ON DELETE CASCADE,
FOREIGN KEY (person_id) REFERENCES person (id) ON UPDATE CASCADE ON DELETE CASCADE,
FOREIGN KEY (opponent_id) REFERENCES person (id) ON UPDATE CASCADE ON DELETE CASCADE
) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci AS
SELECT
p.id AS person_id,
opp.id AS opponent_id,
season.id AS season_id,
COUNT(p.id) AS num_matches,
SUM(CASE WHEN dm.games > odm.games THEN 1 ELSE 0 END) AS wins,
SUM(CASE WHEN dm.games < odm.games THEN 1 ELSE 0 END) AS losses,
SUM(CASE WHEN dm.games = odm.games THEN 1 ELSE 0 END) AS draws
FROM
person AS p
INNER JOIN
deck AS d ON p.id = d.person_id
INNER JOIN
deck_match AS dm ON dm.deck_id = d.id
INNER JOIN
deck_match AS odm ON dm.match_id = odm.match_id AND dm.deck_id <> IFNULL(odm.deck_id, 0)
INNER JOIN
deck AS od ON odm.deck_id = od.id
INNER JOIN
person AS opp ON od.person_id = opp.id
{season_join}
GROUP BY
p.id,
opp.id,
season.id
""".format(table=table, season_join=query.season_join())
preaggregation.preaggregate(table, sql)
@retry_after_calling(achievements.preaggregate_achievements)
def set_achievements(people: List[Person], season_id: int = None) -> None:
people_by_id = {person.id: person for person in people}
sql = achievements.load_query(people_by_id, season_id)
results = [Container(r) for r in db().select(sql)]
for result in results:
people_by_id[result['id']].num_achievements = len([k for k, v in result.items() if k != 'id' and v > 0])
people_by_id[result['id']].achievements = result
people_by_id[result['id']].achievements.pop('id')
@retry_after_calling(preaggregate_head_to_head)
def load_head_to_head_count(person_id: int, where: str = 'TRUE', season_id: Optional[int] = None) -> int:
season_query = query.season_query(season_id)
sql = f'SELECT COUNT(*) FROM _head_to_head_stats AS hths INNER JOIN person AS opp ON hths.opponent_id = opp.id WHERE ({where}) AND (hths.person_id = {person_id}) AND ({season_query})'
return db().value(sql)
@retry_after_calling(preaggregate_head_to_head)
def load_head_to_head(person_id: int, where: str = 'TRUE', order_by: str = 'num_matches DESC, record DESC, win_percent DESC, wins DESC, opp_mtgo_username', limit: str = '', season_id: int = None) -> Sequence[Container]:
season_query = query.season_query(season_id)
sql = f"""
SELECT
hths.person_id AS id,
LOWER(opp.mtgo_username) AS opp_mtgo_username,
SUM(num_matches) AS num_matches,
SUM(wins) - SUM(losses) AS record,
SUM(wins) AS wins,
SUM(losses) AS losses,
SUM(draws) AS draws,
IFNULL(ROUND((SUM(wins) / NULLIF(SUM(wins + losses), 0)) * 100, 1), '') AS win_percent
FROM
_head_to_head_stats AS hths
INNER JOIN
person AS opp ON hths.opponent_id = opp.id
WHERE
({where}) AND (hths.person_id = {person_id}) AND ({season_query})
GROUP BY
hths.person_id,
hths.opponent_id
ORDER BY
{order_by}
{limit}
"""
return [Container(r) for r in db().select(sql)]
def associate(d: deck.Deck, discord_id: int) -> int:
person_id = db().value('SELECT person_id FROM deck WHERE id = %s', [d.id], fail_on_missing=True)
sql = 'UPDATE person SET discord_id = %s WHERE id = %s'
return db().execute(sql, [d | iscord_id, person_id])
def is_allowed_to_retire(deck_id: Optional[int], discord_id: Optional[int]) -> bool:
if not deck_id:
return False
if not discord_id:
return True
person = maybe_load_person_by_discord_id(discord_id)
if person is None:
return True
return any(int( | deck_id) == deck.id for deck in person.decks)
def get_or_insert_person_id(mtgo_username: Optional[str], tappedout_username: Optional[str], mtggoldfish_username: Optional[str]) -> int:
sql = 'SELECT id FROM person WHERE LOWER(mtgo_username) = LOWER(%s) OR LOWER(tappedout_username) = LOWER(%s) OR LOWER(mtggoldfish_username) = LOWER(%s)'
person_id = db().value(sql, [mtgo_username, tappedout_username, mtggoldfish_username])
if person_id:
return person_id
sql = 'INSERT INTO person (mtgo_username, tappedout_username, mtggoldfish_username) VALUES (%s, %s, %s)'
return db().insert(sql, [mtgo_username, tappedout_username, mtggoldfish_username])
def load_aliases() -> List[Container]:
sql = """
SELECT
pa.person_id,
pa.alias,
p.mtgo_username
FROM
person_alias AS pa
INNER JOIN
person AS p ON p.id = pa.person_id
"""
return [Container(r) for r in db().select(sql)]
def add_alias(person_id: int, alias: str) -> None:
db().begin('add_alias')
try:
p = load_person_by_mtgo_username(alias)
db().execute('UPDATE deck SET person_id = %s WHERE person_id = %s', [person_id, p.id])
db().execute('DELETE FROM person WHERE id = %s', [p.id])
except DoesNotExistException:
pass
db().execute('INSERT INTO person_alias (person_id, alias) VALUES (%s, %s)', [person_id, alias])
db().commit('add_alias')
def load_notes(person_id: int = None) -> List[Container]:
where = f'subject_id = {person_id}' if person_id else 'TRUE'
sq |
#!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Python module for Blender Driver demonstration application.
Abstract base class for demonstration applications.
This module can only be used from within the Blender Game Engine."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Module for command line switches.
# https://docs.python.org/3.5/library/argparse.html
# The import isn't needed because this class uses the base class to get an
# object.
# import argparse
#
# Module for levelled logging messages.
# Tutorial is here: https://docs.python.org/3.5/howto/logging.html
# Reference is here: https://docs.python.org/3.5/library/logging.html
from logging import DEBUG, INFO, WARNING, ERROR, log
#
# Blender library imports, in alphabetic order.
#
# Local imports.
#
# Blender Driver application with threads and locks.
import blender_driver.application.thread
# Diagnostic print to show when it's imported. Only printed if all its own
# imports run OK.
print('"'.join(('Applicat | ion module ', __name__, '.')))
class Application(blender_driver.application.thread.Application):
_instructions = "Press ESC to crash BGE, or any other key to terminate."
_bannerName = 'banner'
_bannerObject = No | ne
@property
def bannerObject(self):
return self._bannerObject
# Overriden.
def data_initialise(self):
#
# Do common initialisation for subclasses.
self._bannerObject = self.data_add_banner()
self.dontDeletes.append(self._bannerName)
#
# Run the base class method.
super().data_initialise()
def data_add_banner(self):
banner = "\n".join(
("Blender Driver" , self.applicationName , self._instructions))
return self.bpyutils.set_up_object(
self._bannerName, {'text':banner, 'physicsType':'NO_COLLISION'
, 'location': (-5, -8, 3)})
# Overriden.
def game_initialise(self):
super().game_initialise()
self.mainLock.acquire()
try:
self._bannerObject = self.game_add_text(self._bannerName)
log(DEBUG, "Game scene objects {}\nArguments: {}\nSettings: {}"
, self.gameScene.objects, vars(self.arguments), self.settings)
print(self._instructions)
finally:
self.mainLock.release()
# Overriden.
def game_keyboard(self, *args):
#
# Formally, run the base class method. Actually, it's a pass.
super().game_keyboard(*args)
#
# Default is to terminate on any key press.
log(DEBUG, "Terminating.")
self.game_terminate()
def tick_skipped(self):
log(WARNING, "Skipped ticks: {:d}.", self.skippedTicks)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserActivateKey',
| fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activation_key', models.CharField(max_length=40, null=True, blank=True)),
('key_expires', models.DateTimeField(null=True, blank=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'tcms_user_activate_keys',
},
),
] | |
n the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys # NOQA
import profile
import ConfigParser
import pygame
from pygame import *
from static_functions import *
import camera as camera
import planet as planet
from orbitable import GCD_Singleton, SoundSystem_Singleton
from helldebris_collection import HellDebrisCollection
from team import Team
from simplestats import SimpleStats
wwidth = 1024
wheight = 768
p1_name = "Player1"
p2_name = "Player2"
config = ConfigParser.RawConfigParser()
config.read('profile.cfg')
wwidth = config.getint("Screen", "width")
wheight = config.getint("Screen", "height")
p1_name = config.get("Player", "P1_name")
p2_name = config.get("Player", "P2_name")
display = (wwidth, wheight)
clock = pygame.time.Clock()
class Profile():
def __init__(self, is_player2_present=False,
is_player1_ai=False,
is_player2_ai=False,
player1_team="Green",
player2_team="Red",
greenteamsize=8,
redteamsiz | e=8,
debris_min=6,
debris_max=20,
draw_planet=False,
name=""):
self.p2 = is_player2_present
self.p1_ai = is_player1_ai
| self.p2_ai = is_player2_ai
self.p1_team = player1_team
self.p2_team = player2_team
mingreen = int(self.p1_team == "Green") + int(self.p2_team == "Green" and self.p2)
minred = int(self.p1_team == "Red") + int(self.p2_team == "Red" and self.p2)
self.green = max(mingreen, greenteamsize)
self.red = max(minred, redteamsize)
self.hellmin = debris_min
self.hellmax = debris_max
self.draw_planet = draw_planet
self.name = name
self.ERAD = 1000
self.MAXRAD = 1700
self.ORBHEIGHT = 350
def game_init(self):
pygame.init()
self.PROFILESTEP = False
self.UPDAE_GAME = pygame.USEREVENT + 1
pygame.time.set_timer(self.UPDAE_GAME, GAME_SPEED)
self.screen = pygame.display.set_mode(display)
if self.p2:
self.bg1 = Surface((wwidth, wheight/2))
self.bg2 = Surface((wwidth, wheight/2))
self.cam2 = camera.Camera(self.bg2, first_in_order=False)
self.bgs = (self.bg1, self.bg2)
else:
self.bg1 = Surface((wwidth, wheight))
self.bgs = (self.bg1,)
self.cam1 = camera.Camera(self.bg1)
if self.name == "":
pygame.display.set_caption("Orbotor")
else:
pygame.display.set_caption("Orbotor - %s" % self.name)
self.pl = planet.Planet(self.bgs, self.ERAD, self.MAXRAD, "planet.png" if self.draw_planet else None)
GCD_Singleton.set_planet(self.pl)
self.soundsys = SoundSystem_Singleton
self.spawn = (self.ERAD+self.ORBHEIGHT, 0)
self.team1 = Team("Green", "#009900", self.green, self.spawn, self.pl)
self.team2 = Team("Red", "#880000", self.red, self.spawn, self.pl)
self.team1.set_opponent_team(self.team2)
self.team2.set_opponent_team(self.team1)
self.hell = HellDebrisCollection(self.spawn, self.pl, self.hellmin, self.hellmax)
if self.p1_team == "Green":
self.player1 = self.team1.guys[0]
if self.p2:
if self.p2_team == "Green":
self.player2 = self.team1.guys[1]
elif self.p2_team == "Red":
self.player2 = self.team2.guys[0]
else:
raise Exception("unknown team for p2: %s" % self.p2_team)
elif self.p1_team == "Red":
self.player1 = team2.guys[0]
if self.p2:
if self.p2_team == "Green":
self.player2 = self.team1.guys[0]
elif self.p2_team == "Red":
self.player2 = self.team2.guys[1]
else:
raise Exception("unknown team for p2: %s" % self.p2_team)
else:
raise Exception("unknown team for p1: %s" % self.p1_team)
self.player1.is_ai = self.p1_ai
if self.p1_ai:
self.player1.set_name("[bot] %s" % p1_name)
else:
self.player1.set_name("%s" % p1_name)
if self.p2:
self.player2.is_ai = self.p2_ai
if self.p2_ai:
self.player2.set_name("[bot] %s" % p2_name)
else:
self.player2.set_name("%s" % p2_name)
self.stats1 = SimpleStats(self.team1, self.team2, self.player1)
if self.p2:
self.stats2 = SimpleStats(self.team1, self.team2, self.player2)
def game_key_listen(self, event):
if event.type == KEYDOWN and event.key == K_F1:
self.PROFILESTEP = True
self.game_step()
elif event.type == KEYDOWN and event.key == K_F2:
print len(GCD_Singleton.orbitables)
elif event.type == KEYDOWN and event.key == K_F5:
self.soundsys.switch()
if not self.p1_ai:
self.player1.catch_kb_event(event)
if self.p2 and not self.p2_ai:
self.player2.catch_kb_event_hotseat(event)
self.cam1.keys_listen(event)
if self.p2:
self.cam2.keys_listen_hotseat(event)
def game_step(self):
if self.PROFILESTEP:
profile.runctx("self._step()", globals(), {"self": self})
else:
self._step()
def _step(self):
self.team2.step() # todo faster better stronger
self.team1.step()
self.hell.step()
self.player1.focus(self.cam1)
self.cam1.step()
if self.p2:
self.player2.focus(self.cam2)
self.cam2.step()
GCD_Singleton.step()
def game_draw(self):
if self.PROFILESTEP:
profile.runctx("self._draw()", globals(), {"self": self})
self.PROFILESTEP = False
else:
self._draw()
def _draw(self):
clock.tick(60)
tup = [self.pl, ] + self.team1.objectslist() + self.team2.objectslist()\
+ self.hell.objectslist() + self.pl.cities
tup = tuple(tup)
self.cam1.translate_coords(*tup)
if self.p2:
self.cam2.translate_coords(*tup)
self.stats1.draw(self.bg1)
self.screen.blit(self.bg1, (0, 0))
if self.p2:
self.stats2.draw(self.bg2)
self.screen.blit(self.bg2, (0, wheight/2))
pygame.display.update()
def DefaultProfile(draw_planet, hell):
return Profile(draw_planet=draw_planet, debris_min=hell[0], debris_max=hell[1])
def HotseatProfile(draw_planet, hell):
return Profile(is_player2_present=True, draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def RivalProfile(draw_planet, hell):
return Profile(is_player2_present=True, is_player2_ai=True, draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def CoopProfile(draw_planet, hell):
return Profile(is_player2_present=True, player2_team="Green", draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def SpectateProfile(draw_planet, hell):
return Profile(is_player1_ai=True, draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def SurvivalProfile(draw_planet):
return Profile(draw_planet=draw_planet, debris_min=35, debris_max=70,
greenteamsize=1, redteamsize=0)
def CoopSurvivalProfile(draw_planet):
return Profile(is_player2_present=True, pl |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in complian | ce with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIO | NS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class ImageBuilderException(Exception):
pass
class ImageRateLimitedException(Exception):
"""Rate Limited request"""
class ImageSpecificationException(Exception):
pass
class ImageUploaderException(Exception):
pass
class ImageUploaderThreadException(Exception):
"""Conflict during thread processing"""
pass
class ImageNotFoundException(Exception):
pass
|
sig)
async def terminate(self, force=False):
'''This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. '''
if os.name == 'nt':
signals = [signal.SIGINT, signal.SIGTERM]
else:
signals = [signal.SIGHUP, signal.SIGCONT, signal.SIGINT,
signal.SIGTERM]
loop = IOLoop.current()
def sleep(): return asyncio.sleep(self.ptyproc.delayafterterminate)
if not self.ptyproc.isalive():
return True
try:
for sig in signals:
self.kill(sig)
await sleep()
if not self.ptyproc.isalive():
return True
if force:
self.kill(signal.SIGKILL)
await sleep()
if not self.ptyproc.isalive():
return True
else:
return False
return False
except OSError:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
await sleep()
if not self.ptyproc.isalive():
return True
else:
return False
def _update_removing(target, changes):
"""Like dict.update(), but remove keys where the value is None.
"""
for k, v in changes.items():
if v is None:
target.pop(k, None)
else:
target[k] = v
class TermManagerBase(object):
"""Base class for a terminal manager."""
def __init__(self, shell_command, server_url="", term_settings={},
extra_env=None, ioloop=None):
self.shell_command = shell_command
self.server_url = server_url
self.term_settings = term_settings
self.extra_env = extra_env
self.log = logging.getLogger(__name__)
self.ptys_by_fd = {}
if ioloop is not None:
warnings.warn(
f"Setting {self.__class__.__name__}.ioloop is deprecated and ignored",
DeprecationWarning,
stacklevel=2,
)
def make_term_env(self, height=25, width=80, winheight=0, winwidth=0, **kwargs):
"""Build the environment variables for the process in the terminal."""
env = os.environ.copy()
env["TERM"] = self.term_settings.get("type", DEFAULT_TERM_TYPE)
dimensions = "%dx%d" % (width, height)
if winwidth and winheight:
dimensions += ";%dx%d" % (winwidth, winheight)
env[ENV_PREFIX+"DIMENSIONS"] = dimensions
env["COLUMNS"] = str(width)
env["LINES"] = str(height)
if self.server_url:
env[ENV_PREFIX+"URL"] = self.server_url
if self.extra_env:
_update_removing(env, self.extra_env)
return env
def new_terminal(self, **kwargs):
"""Make a new terminal, return a :class:`PtyWithClients` instance."""
options = self.term_settings.copy()
options['shell_command'] = self.shell_command
options.update(kwargs)
argv = options['shell_command']
env = self.make_term_env(**options)
cwd = options.get('cwd', None)
return PtyWithClients(argv, env, cwd)
def start_reading(self, ptywclients):
"""Connect a terminal to the tornado event loop to read data from it."""
fd = ptywclients.ptyproc.fd
self.ptys_by_fd[fd] = ptywclients
loop = IOLoop.current()
loop.add_handler(fd, self.pty_read, loop.READ)
def on_eof(self, ptywclients):
"""Called when the pty has closed.
"""
# Stop trying to read from that terminal
fd = ptywclients.ptyproc.fd
self.log.info("EOF on FD %d; stopping reading", fd)
del self.ptys_by_fd[fd]
IOLoop.current().remove_handler(fd)
# This closes the fd, and should result in the process being reaped.
ptywclients.ptyproc.close()
def pty_read(self, fd, events=None):
"""Called by the event loop when there is pty data ready to read."""
r, _, _ = select.select([fd], [], [], .1)
if not r:
return
ptywclients = self.ptys_by_fd[fd]
try:
s = ptywclients.ptyproc.read(65536)
client_list = ptywclients.clients
ptywclients.read_buffer.append(s)
if not client_list:
# No one to consume our output: buffer it.
ptywclients.preopen_buffer.append(s)
return
for client in ptywclients.clients:
client.on_pty_read(s)
except EOFError:
self.on_eof(ptywclients)
for client in ptywclients.clients:
client.on_pty_died()
def get_terminal(self, url_component=None):
"""Override in a subclass to give a terminal to a new websocket connection
The :class:`TermSocket` handler works with zero or one URL components
(capturing groups in the URL spec regex). If it receives one, it is
passed as the ``url_component`` parameter; otherwise, this is None.
"""
raise NotImplementedError
def client_disconnected(self, websocket):
"""Override this to e.g. kill terminals on client disconnection.
"""
pass
async def shutdown(self):
await self.kill_all()
async def kill_all(self):
futures = []
for term in self.ptys_by_fd.values():
futures.append(term.terminate(force=True))
# wait for futures to finish
if futures:
await asyncio.gather(*futures)
class SingleTermManager(TermManagerBase):
"""All connections to the websocket share a common terminal."""
def __init__(self, **kwargs):
super(SingleTermManager, self).__init__(**kwargs)
self.terminal = None
def get_terminal(self, url_component=None):
if self.terminal is None:
self.terminal = self.new_terminal()
self.start_reading(self.terminal)
return self.terminal
async def kill_all(self):
await super().kill_all()
self.terminal = None
class MaxTerminalsReached(Exception):
def __init__(self, max_terminals):
self.max_terminals = max_terminals
def __str__(self):
return "Cannot create more than %d terminals" % self.max_terminals
class UniqueTermManager(TermManagerBase):
"""Give each websocket a unique terminal to use."""
def __init__(self, max_terminals=None, **kwargs):
super(UniqueTermManager, self).__init__(**kwargs)
self.max_terminals = max_terminals
def get_terminal(self, url_component=None):
if self.max_terminals and len(self.ptys_by_fd) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
term = self.new_terminal()
self.start_reading(term)
return term
def client_disconnected(self, websocket):
"""Send terminal SIGHUP when client disconnects."""
self.log.info("Websocket closed, sending SIGHUP to terminal.")
if websocket.terminal:
if os.name == ' | nt' | :
websocket.terminal.kill()
# Immediately call the pty reader to process
# the eof and free up space
self.pty_read(websocket.terminal.ptyproc.fd)
return
websocket.terminal.killpg(signal.SIGHUP)
class NamedTermManager(TermManagerBase):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(NamedTermManager, self).__init__(**kwargs)
self.max_terminals = max_terminals
self.terminals = {}
def get_terminal(self, term_name):
assert term_name is not None
if t |
from django.utils.encoding import smart_unicode
def ssn_check_digit(value):
"Calculate Italian social security number check digit."
ssn_even_chars = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,
'9': 9, 'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5, 'G': 6, 'H': 7,
'I': 8, 'J': 9, 'K': 10, 'L': 11, 'M': 12, 'N': 13, 'O': 14, 'P': 15,
'Q': 16, 'R': 17, 'S': 18, 'T': 19, 'U': 20, 'V': 21, 'W': 22, 'X': 23,
'Y': 24, 'Z': 25
}
ssn_odd_chars = {
'0': 1, '1': 0, '2': 5, '3': 7, '4': 9, '5': 13, '6': 15, '7': 17, '8':
19, '9': 21, 'A': 1, 'B': 0, 'C': 5, 'D': 7, 'E': 9, 'F': 13, 'G': 15,
'H': 17, 'I': 19, 'J': 21, 'K': 2, 'L': 4, 'M': 18, 'N': 20, 'O': 11,
'P': 3, 'Q': 6, 'R': 8, 'S': 12, 'T': 14, 'U': 16, ' | V': 10, 'W': 22,
'X': 25, 'Y': 24, 'Z': 23
}
# Chars from 'A' to 'Z'
ssn_check_digits = [chr(x) for x in range(65, 91)]
ssn = val | ue.upper()
total = 0
for i in range(0, 15):
try:
if i % 2 == 0:
total += ssn_odd_chars[ssn[i]]
else:
total += ssn_even_chars[ssn[i]]
except KeyError:
msg = "Character '%(char)s' is not allowed." % {'char': ssn[i]}
raise ValueError(msg)
return ssn_check_digits[total % 26]
def vat_number_check_digit(vat_number):
"Calculate Italian VAT number check digit."
normalized_vat_number = smart_unicode(vat_number).zfill(10)
total = 0
for i in range(0, 10, 2):
total += int(normalized_vat_number[i])
for i in range(1, 11, 2):
quotient , remainder = divmod(int(normalized_vat_number[i]) * 2, 10)
total += quotient + remainder
return smart_unicode((10 - total % 10) % 10)
|
print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input("> ")
if door == "1":
print "There`s a giant bear here eating a chees cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away." %bear
elif door =="2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity =="2":
print "Your body survives powered by a mind of jello. G | ood job!"
else:
print "The insanity rots your eyes into a pool of muck. G | ood job!"
else:
print "You stumble around and fall on a knife and die. Good job!" |
# Copyright 2013 Virantha Ekanayake All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides capability to search PDFs and file to a specific folder based
on keywords
"""
from sets import Set
import sys, os
import re
import logging
import shutil
from PyPDF2 import PdfFileReader
from pypdfocr_filer import PyFiler
from pypdfocr_filer_dirs import PyFilerDirs
class PyPdfFiler(object):
def __init__(self, filer):
assert isinstance(filer, PyFiler)
self.filer = filer # Must be a subclass of PyFiler
# Whether to fall back on filename for matching keywords against
# if there is no match in the text
self.file_using_filename = False
def iter_pdf_page_text(self, filename):
self.filename = filename
reader = PdfFileReader(filename)
logging.info("pdf scanner found %d pages in %s" % (reader.getNumPages(), filename))
for pgnum in range(reader.getNumPages()):
text = reader.getPage(pgnum).extractText()
text = text.encode('ascii', 'ignore')
text = text.replace('\n', ' ')
yield text
def _get_matching_folder(self, pdfText):
searchText = pdfText.lower()
for folder,strings in self.filer.folder_targets.items():
for s in strings:
logging.debug("Checking string %s" % s)
if s in searchText:
logging.info("Matched keyword '%s'" % s)
return folder
# No match found, so return
return None
def file_original (self, original_filename):
return self.filer.file_original(original_filename)
def move_to_matching_folder(self, filename):
for page_text in self.iter_pdf_page_text(filename):
tgt_folder = self._get_matching_folder(page_text)
if tgt_folder: break # Stop searching through pdf pages as soon as we find a match
if not tgt_folder and self.file_using_filename:
| tgt_folder = self._get_matching_folder(filename)
tgt_file = self.filer.move_to_matching_folder(filename, tgt_folder)
return tgt_file
if __name__ == '__main__':
p = PyPdfFiler(PyFilerDirs())
for page_text in p.iter_pdf_page | _text("scan_ocr.pdf"):
print (page_text)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
from twisted.enterprise import adbapi
from calvin.runtime.south.async import async
from calvin.utilities.calvinlogger import get_logger
from calvin.runtime.south.calvinsys import base_calvinsys_object
_log = get_logger(__name__)
class PersistentBuffer(base_calvinsys_object.BaseCalvinsysObject):
"""
Asynchronous (using twisted adbapi) SQLite-based implementation of persistant queue
Based on the following (from sqlite.org):
1) If no ROWID is specified on the insert [...] then an appropriate ROWID is created automatically.
2) The usual algorithm is to give the newly created row a ROWID that is one larger than the largest
ROWID in the table prior to the insert.
3) If the table is initially empty, then a ROWID of 1 is used.
4) If the largest ROWID is equal to the largest possible integer (9223372036854775807) then the
database engine starts picking positive candidate ROWIDs at random until it finds one
that is not previously used.
5) The normal ROWID selection [...] will generate monotonically increasing unique ROWIDs as long
as you never use the maximum ROWID value and you never delete the entry in the table with the largest ROWID.
Since we are implementing a FIFO queue, 1) should ensure there is a row id, 2) & 5) that the ordering is correct
and 3) that the rowid is reset whenever the queue is emptied, so 4) should never happen.
"""
init_schema = {
"type": "object",
"properties": {
"buffer_id": {
"description": "Buffer identifier, should be unique - will be used as part of filename",
"type": "string",
"pattern": "^[a-zA-Z0-9]+"
},
"reporting": {
"description": "Log some statistics on buffer at given interval (in seconds)",
"type": "number"
}
},
"required": ["buffer_id"],
"description": "Initialize buffer"
}
can_write_schema = {
"description": "Returns True if buffer ready for write, otherwise False",
"type": "boolean"
}
write_schema = {
"description": "Push data to buffer; always a list of json serializable items",
"type": "array"
}
can_read_schema = {
"description": "Returns True if data can be read, otherwise False",
"type": "boolean"
}
read_schema = {
"description": "Pop data from buffer, always a list",
"type": "array"
}
def init(self, buffer_id, reporting=None, *args, **kwargs):
self.db_name = buffer_id
self.db_path = os.path.join(os.path.abspath(os.path.curdir), self.db_name + ".sq3")
self.db = adbapi.ConnectionPool('sqlite3', self.db_path, check_same_thread=False)
self._pushed_values = 0
self._popped_values = 0
self._latest_timestamp = 0
self._value = None
self._changed = None
self._statlogging = None
def ready(length):
def log_stats():
_log.info("{} : pushed {}, popped {} (latest timestamp: {}) ".format(self.db_name, self._pushed_values, self._popped_values, self._latest_timestamp))
self._statlogging.reset()
self._changed = True # Something has changed, need to check if readable
# install timer to report on pushing/popping
if reporting:
self._statlogging= async.DelayedCall(reporting, log_stats)
self.scheduler_wakeup()
def create(db):
# Create simple queue table. Using TEXT unless there is a reason not to.
db.execute("CREATE TABLE IF NOT EXISTS queue (value BLOB)")
def error(e):
_log.error("Error initializing queue {}: {}".format(self.db_name, e))
q = self.db.runInteraction(create)
q.addCallback(ready)
q.addErrback(error)
def can_write( | self):
# Can always write after init, meaning changed is no longer None
return self._changed is not None
def write(self, value):
def error(e):
_log.warning("Error during write: {}".format(e))
done() # Call done to wake scheduler, not sure this is a good idea
def done(unused=None):
self._changed = Tru | e # Let can_read know there may be something new to read
self.scheduler_wakeup()
self._pushed_values += len(value)
try:
value = json.dumps(value) # Convert to string for sqlite
except TypeError:
_log.error("Value is not json serializable")
else:
q = self.db.runOperation("INSERT INTO queue (value) VALUES (?)", (value, ))
q.addCallback(done)
q.addErrback(error)
def can_read(self):
def error(e):
_log.warning("Error during read: {}".format(e))
done()
def done(value=None):
if value:
self._changed = True # alert can_read that the database has changed
self._value = value
self.scheduler_wakeup()
def pop(db):
limit = 2 # <- Not empirically/theoretically tested
db.execute("SELECT value FROM queue ORDER BY rowid LIMIT (?)", (limit,))
value = db.fetchall() # a list of (value, ) tuples, or None
if value:
# pop values (i.e. delete rows with len(value) lowest row ids)
db.execute("DELETE FROM queue WHERE rowid in (SELECT rowid FROM queue ORDER BY rowid LIMIT (?))",
(len(value),))
return value
if self._value :
# There is a value to read
return True
elif self._changed :
# Something has changed, try to pop a value
self._changed = False
q = self.db.runInteraction(pop)
q.addCallback(done)
q.addErrback(error)
# Nothing to do
return False
def read(self):
value = []
while self._value:
# get an item from list of replies
dbtuple = self._value.pop(0)
# the item is a tuple, get the first value
dbvalue = dbtuple[0]
# convert value from string and return it
try:
value.extend(json.loads(dbvalue))
except ValueError:
_log.error("No value decoded - possibly corrupt file")
self._popped_values += len(value)
return value
def close(self):
if self._statlogging:
self._statlogging.cancel()
def done(response):
# A count response; [(cnt,)]
if response[0][0] == 0:
try:
os.remove(self.db_path)
except:
# Failed for some reason
_log.warning("Could not remove db file {}".format(self._dbpath))
q = self.db.runQuery("SELECT COUNT(*) from queue")
q.addCallback(done)
self.db.close()
|
"""Treadmill app configurator daemon, subscribes to eventmgr events.
"""
import click
from .. import appcfgmgr
def init():
"""Top level command handler."""
@click.command()
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
def top(approot):
"" | "Starts appcfgmgr process."""
| mgr = appcfgmgr.AppCfgMgr(root=approot)
mgr.run()
return top
|
"""Gets the specified peering for the express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs: Any
) -> "_models.ExpressRouteCircuitPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not | in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deser | ialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitPeering"]:
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
|
#!/usr/bin/env python
import time
import json
import random
import re
from bottle import route, hook, response, run, static_file
@route('/')
def index():
return static_file('index.html', root = '.')
@route('/maptweets. | js')
def index_css():
return static_file('maptweets.js', root = '.')
@rou | te('/cross.jpg')
def index_css():
return static_file('cross.jpg', root = '.')
@route('/light.png')
def index_css():
return static_file('light.png', root = '.')
@route('/event.png')
def index_css():
return static_file('event.png', root = '.')
run(host = '0.0.0.0', port = 80, server = 'tornado', debug = True)
|
from __future__ import print_function, division
from sympy.core.numbers import nan
from .function import Function
class Mod(Function):
"""Represents a modulo operation on symbolic expressions.
Receives two arguments, dividend p and divisor q.
The convention used is the same as Python's: the remainder always has the
same sign as the divisor.
Examples
========
>>> from sympy.abc import x, y
>>> x**2 % y
Mod(x**2, y)
>>> _.subs({x: 5, y: 6})
1
"""
@classmethod
def eval(cls, p, q):
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.exprtools import gcd_terms
from sympy.polys.polytools import gcd
def doit(p, q):
"""Try to return p % q if both are numbers or +/-p is known
to be less than or equal q.
"""
if p.is_infinite or q.is_infinite or p is nan or q is nan:
return nan
if (p == q or p == -q or
p.is_Pow and p.exp.is_Integer and p.base == q or
p.is_in | teger and q == 1):
return S.Zero
if q.is_Nu | mber:
if p.is_Number:
return (p % q)
if q == 2:
if p.is_even:
return S.Zero
elif p.is_odd:
return S.One
# by ratio
r = p/q
try:
d = int(r)
except TypeError:
pass
else:
if type(d) is int:
rv = p - d*q
if (rv*q < 0) == True:
rv += q
return rv
# by difference
d = p - q
if d.is_negative:
if q.is_negative:
return d
elif q.is_positive:
return p
rv = doit(p, q)
if rv is not None:
return rv
# denest
if p.func is cls:
# easy
qinner = p.args[1]
if qinner == q:
return p
# XXX other possibilities?
# extract gcd; any further simplification should be done by the user
G = gcd(p, q)
if G != 1:
p, q = [
gcd_terms(i/G, clear=False, fraction=False) for i in (p, q)]
pwas, qwas = p, q
# simplify terms
# (x + y + 2) % x -> Mod(y + 2, x)
if p.is_Add:
args = []
for i in p.args:
a = cls(i, q)
if a.count(cls) > i.count(cls):
args.append(i)
else:
args.append(a)
if args != list(p.args):
p = Add(*args)
else:
# handle coefficients if they are not Rational
# since those are not handled by factor_terms
# e.g. Mod(.6*x, .3*y) -> 0.3*Mod(2*x, y)
cp, p = p.as_coeff_Mul()
cq, q = q.as_coeff_Mul()
ok = False
if not cp.is_Rational or not cq.is_Rational:
r = cp % cq
if r == 0:
G *= cq
p *= int(cp/cq)
ok = True
if not ok:
p = cp*p
q = cq*q
# simple -1 extraction
if p.could_extract_minus_sign() and q.could_extract_minus_sign():
G, p, q = [-i for i in (G, p, q)]
# check again to see if p and q can now be handled as numbers
rv = doit(p, q)
if rv is not None:
return rv*G
# put 1.0 from G on inside
if G.is_Float and G == 1:
p *= G
return cls(p, q, evaluate=False)
elif G.is_Mul and G.args[0].is_Float and G.args[0] == 1:
p = G.args[0]*p
G = Mul._from_args(G.args[1:])
return G*cls(p, q, evaluate=(p, q) != (pwas, qwas))
def _eval_is_integer(self):
from sympy.core.logic import fuzzy_and, fuzzy_not
p, q = self.args
if fuzzy_and([p.is_integer, q.is_integer, fuzzy_not(q.is_zero)]):
return True
def _eval_is_nonnegative(self):
if self.args[1].is_positive:
return True
def _eval_is_nonpositive(self):
if self.args[1].is_negative:
return True
|
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Re-runs initial_sharding.py with a varbinary keyspace_id."""
from vtdb import keyrange_const | ants
import base_sharding
import initial_sharding
import utils
# this test is just re-running an entire initial_sharding.py with a
| # varbinary keyspace_id
if __name__ == '__main__':
base_sharding.keyspace_id_type = keyrange_constants.KIT_BYTES
utils.main(initial_sharding)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from typing import Tuple
from time import sleep
from fedlearner_webconsole.composer.interface import IItem, IRunner, ItemType
from fedlearner_webconsole.composer.models import Context, RunnerStatus
from fedlearner_webconsole.db import get_session
from fedlearner_webconsole.workflow.models import Workflow, WorkflowState
class WorkflowCronJobItem(IItem):
def __init__(self, task_id: int):
self.id = task_id
def type(self) -> ItemType:
return ItemType.WORKFLOW_CRON_JOB
def get_id(self) -> int:
return self.id
def __eq__(self, obj: IItem):
return self.id == obj.id and self.type() == obj.type()
class WorkflowCronJob(IRunner):
""" start workflow every intervals
"""
def __init__(self, task_id: int):
self._workflow_id = task_id
self._msg = None
def start(self, context: Context):
with get_session(context.db_e | ngine) as session:
try:
workflow: Workflow = session.query(Workflow).filte | r_by(
id=self._workflow_id).one()
# TODO: This is a hack!!! Templatelly use this method
# cc @hangweiqiang: Transaction State Refactor
state = workflow.get_state_for_frontend()
if state in ('COMPLETED', 'FAILED', 'READY', 'STOPPED', 'NEW'):
if state in ('COMPLETED', 'FAILED'):
workflow.update_target_state(
target_state=WorkflowState.STOPPED)
session.commit()
# check workflow stopped
# TODO: use composer timeout cc @yurunyu
for _ in range(24):
# use session refresh to get the latest info
# otherwise it'll use the indentity map locally
session.refresh(workflow)
if workflow.state == WorkflowState.STOPPED:
break
sleep(5)
else:
self._msg = f'failed to stop \
workflow[{self._workflow_id}]'
return
workflow.update_target_state(
target_state=WorkflowState.RUNNING)
session.commit()
self._msg = f'restarted workflow[{self._workflow_id}]'
elif state == 'RUNNING':
self._msg = f'skip restarting workflow[{self._workflow_id}]'
elif state == 'INVALID':
self._msg = f'current workflow[{self._workflow_id}] \
is invalid'
else:
self._msg = f'workflow[{self._workflow_id}] \
state is {state}, which is out of expection'
except Exception as err: # pylint: disable=broad-except
self._msg = f'exception of workflow[{self._workflow_id}], \
details is {err}'
def result(self, context: Context) -> Tuple[RunnerStatus, dict]:
del context # unused by result
if self._msg is None:
return RunnerStatus.RUNNING, {}
output = {'msg': self._msg}
return RunnerStatus.DONE, output
|
"""Helper stuff for things"""
import gc
gc.disable()
print 'Disabled | GC'
def timeit(func, iter = 1000, *args, **kwargs):
"""timeit(func, iter = 1000 *args, **kwargs) -> elapsed time
calls func iter times with args and kwargs, returns time elapsed
"""
import time
r = range(iter)
t = time.time( | )
for i in r:
func(*args, **kwargs)
return time.time() - t
|
class Excel | (object):
def __init__(self, H, W):
"""
| :type H: int
:type W: str
"""
self.table = [[{'v': 0, 'sum': None} for _ in range(ord(W) - 64)] for __ in range(H)]
def set(self, r, c, v):
"""
:type r: int
:type c: str
:type v: int
:rtype: void
"""
self.table[r - 1][ord(c) - 65] = {'v': v, 'sum': None}
def get(self, r, c):
"""
:type r: int
:type c: str
:rtype: int
"""
cell = self.table[r - 1][ord(c) - 65]
if not cell['sum']: return cell['v']
return sum(self.get(*pos) * cell['sum'][pos] for pos in cell['sum'])
def sum(self, r, c, strs):
"""
:type r: int
:type c: str
:type strs: List[str]
:rtype: int
"""
self.table [r - 1][ord(c) - 65]['sum'] = self.parse(strs)
return self.get(r, c)
def parse(self, strs):
c = collections.Counter()
for s in strs:
s, e = s.split(':')[0], s.split(':')[1] if ':' in s else s
for i in range(int(s[1:]), int(e[1:]) + 1):
for j in range(ord(s[0]) - 64, ord(e[0]) - 64 + 1):
c[(i, chr(j + 64))] += 1
return c
# Your Excel object will be instantiated and called as such:
# obj = Excel(H, W)
# obj.set(r,c,v)
# param_2 = obj.get(r,c)
# param_3 = obj.sum(r,c,strs) |
# -*- coding: utf-8 -*-
# Authors: Y. Jia <ytjia.zju@gmail.com>
"""
Given a collection of intervals, merge all overlapping intervals.
https://leetcode.com/problems/merge-intervals/description/
"""
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __eq__(self, other):
return self.start == other.start and self.end == other.end
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[In | terval]
:rtype: List[Interval]
"""
lens = len(intervals)
if lens <= | 1:
return intervals
merged_intervals = list()
intervals.sort(key=lambda interval: interval.start)
i = 0
j = i + 1
while j < lens:
if intervals[i].end >= intervals[j].start:
intervals[i].end = max(intervals[i].end, intervals[j].end)
j += 1
else:
merged_intervals.append(intervals[i])
i = j
j = i + 1
merged_intervals.append(intervals[i])
return merged_intervals
|
client of the current session does not support one or more Profiles that are necessary for the subscription."'),
0x80BF0000: ('BadStateNotActive', '"The sub-state machine is not currently active."'),
0x81150000: ('BadAlreadyExists', '"An equivalent rule already exists."'),
0x807D0000: ('BadTcpServerTooBusy', '"The server cannot process the request because it is too busy."'),
0x807E0000: ('BadTcpMessageTypeInvalid', '"The type of the message specified in the header invalid."'),
0x807F0000: ('BadTcpSecureChannelUnknown', '"The SecureChannelId and/or TokenId are not currently in use."'),
0x80800000: ('BadTcpMessageTooLarge', '"The size of the message specified in the header is too large."'),
0x80810000: ('BadTcpNotEnoughResources', '"There are not enough resources to process the request."'),
0x80820000: ('BadTcpInternalError', '"An internal error occurred."'),
0x80830000: ('BadTcpEndpointUrlInvalid', '"The server does not recognize the QueryString specified."'),
0x80840000: ('BadRequestInterrupted', '"The request could not be sent because of a network interruption."'),
0x80850000: ('BadRequestTimeout', '"Timeout occurred while processing the request."'),
0x80860000: ('BadSecureChannelClosed', '"The secure channel has been closed."'),
0x80870000: ('BadSecureChannelTokenUnknown', '"The token has expired or is not recognized."'),
0x80880000: ('BadSequenceNumberInvalid', '"The sequence number is not valid."'),
0x80BE0000: ('BadProtocolVersionUnsupported', '"The applications do not have compatible protocol versions."'),
0x80890000: ('BadConfigurationError', '"There is a problem with the configuration that affects the usefulness of the value."'),
0x808A0000: ('BadNotConnected', '"The variable should receive its value from another variable, but has never been configured to do so."'),
0x808B0000: ('BadDeviceFailure', '"There has been a failure in the device/data source that generates the value that has affected the value."'),
0x808C0000: ('BadSensorFailure', '"There has been a failure in the sensor from which the value is derived by the device/data source."'),
0x808D0000: ('BadOutOfService', '"The source of the data is not operational."'),
0x808E0000: ('BadDeadbandFilterInvalid', '"The deadband filter is not valid."'),
0x408F0000: ('UncertainNoCommunicationLastUsableValue', '"Communication to the data source has failed. The variable value is the last value that had a good quality."'),
0x40900000: ('UncertainLastUsableValue', '"Whatever was updating this value has stopped doing so."'),
0x40910000: ('UncertainSubstituteValue', '"The value is an operational value that was manua | lly overwritten."'),
0x40920000: ('UncertainInitialValue', '"The value is an initial value for a variable that normally receives its value from another variable."'),
0x40930000: ('UncertainSen | sorNotAccurate', '"The value is at one of the sensor limits."'),
0x40940000: ('UncertainEngineeringUnitsExceeded', '"The value is outside of the range of values defined for this parameter."'),
0x40950000: ('UncertainSubNormal', '"The value is derived from multiple sources and has less than the required number of Good sources."'),
0x00960000: ('GoodLocalOverride', '"The value has been overridden."'),
0x80970000: ('BadRefreshInProgress', '"This Condition refresh failed, a Condition refresh operation is already in progress."'),
0x80980000: ('BadConditionAlreadyDisabled', '"This condition has already been disabled."'),
0x80CC0000: ('BadConditionAlreadyEnabled', '"This condition has already been enabled."'),
0x80990000: ('BadConditionDisabled', '"Property not available, this condition is disabled."'),
0x809A0000: ('BadEventIdUnknown', '"The specified event id is not recognized."'),
0x80BB0000: ('BadEventNotAcknowledgeable', '"The event cannot be acknowledged."'),
0x80CD0000: ('BadDialogNotActive', '"The dialog condition is not active."'),
0x80CE0000: ('BadDialogResponseInvalid', '"The response is not valid for the dialog."'),
0x80CF0000: ('BadConditionBranchAlreadyAcked', '"The condition branch has already been acknowledged."'),
0x80D00000: ('BadConditionBranchAlreadyConfirmed', '"The condition branch has already been confirmed."'),
0x80D10000: ('BadConditionAlreadyShelved', '"The condition has already been shelved."'),
0x80D20000: ('BadConditionNotShelved', '"The condition is not currently shelved."'),
0x80D30000: ('BadShelvingTimeOutOfRange', '"The shelving time not within an acceptable range."'),
0x809B0000: ('BadNoData', '"No data exists for the requested time range or event filter."'),
0x80D70000: ('BadBoundNotFound', '"No data found to provide upper or lower bound value."'),
0x80D80000: ('BadBoundNotSupported', '"The server cannot retrieve a bound for the variable."'),
0x809D0000: ('BadDataLost', '"Data is missing due to collection started/stopped/lost."'),
0x809E0000: ('BadDataUnavailable', '"Expected data is unavailable for the requested time range due to an un-mounted volume, an off-line archive or tape, or similar reason for temporary unavailability."'),
0x809F0000: ('BadEntryExists', '"The data or event was not successfully inserted because a matching entry exists."'),
0x80A00000: ('BadNoEntryExists', '"The data or event was not successfully updated because no matching entry exists."'),
0x80A10000: ('BadTimestampNotSupported', '"The client requested history using a timestamp format the server does not support (i.e requested ServerTimestamp when server only supports SourceTimestamp)."'),
0x00A20000: ('GoodEntryInserted', '"The data or event was successfully inserted into the historical database."'),
0x00A30000: ('GoodEntryReplaced', '"The data or event field was successfully replaced in the historical database."'),
0x40A40000: ('UncertainDataSubNormal', '"The value is derived from multiple values and has less than the required number of Good values."'),
0x00A50000: ('GoodNoData', '"No data exists for the requested time range or event filter."'),
0x00A60000: ('GoodMoreData', '"The data or event field was successfully replaced in the historical database."'),
0x80D40000: ('BadAggregateListMismatch', '"The requested number of Aggregates does not match the requested number of NodeIds."'),
0x80D50000: ('BadAggregateNotSupported', '"The requested Aggregate is not support by the server."'),
0x80D60000: ('BadAggregateInvalidInputs', '"The aggregate value could not be derived due to invalid data inputs."'),
0x80DA0000: ('BadAggregateConfigurationRejected', '"The aggregate configuration is not valid for specified node."'),
0x00D90000: ('GoodDataIgnored', '"The request specifies fields which are not valid for the EventType or cannot be saved by the historian."'),
0x80E40000: ('BadRequestNotAllowed', '"The request was rejected by the server because it did not meet the criteria set by the server."'),
0x81130000: ('BadRequestNotComplete', '"The request has not been processed by the server yet."'),
0x00DC0000: ('GoodEdited', '"The value does not come from the real source and has been edited by the server."'),
0x00DD0000: ('GoodPostActionFailed', '"There was an error in execution of these post-actions."'),
0x40DE0000: ('UncertainDominantValueChanged', '"The related EngineeringUnit has been changed but the Variable Value is still provided based on the previous unit."'),
0x00E00000: ('GoodDependentValueChanged', '"A dependent value has been changed but the change has not been applied to the device."'),
0x80E10000: ('BadDominantValueChanged', '"The related EngineeringUnit has been changed but this change has not been applied to the device. The Variable Value is still dependent on the previous unit but its status is currently Bad."'),
0x40E20000: ('UncertainDependentValueChanged', '"A dependent value has been changed but the change has not been applied to the device. The quality of the dominant variable is uncertain."'),
0x80E30000: ('BadDependentValueChanged', '"A dependent value has been changed but the change has not b |
# Copyright Kevin Deldycke <kevin@deldycke.com> and contributors.
# All Rights Reserved.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, | MA 02111-1307, USA.
import pytest
from click_extra.tests.conftest import destructive
from ..pool import ALL_MANAGER_IDS
from .test_cli import CLISubCommandTests
@pytest.fixture
def subcmd():
return "install", "arrow"
class TestInstall(CLISubCommandTests):
strict_selection_match = False
""" Install sub-command try each user-selected man | ager until it find one providing
the package we seek to install, after which the process stop. This mean not all
managers will be called, so we allow the CLI output checks to partially match.
"""
def test_no_package_id(self, invoke):
result = invoke("install")
assert result.exit_code == 2
assert not result.stdout
assert "Error: Missing argument 'PACKAGE_ID'." in result.stderr
PACKAGE_IDS = {
"apm": "markdown-pdf",
"apt": "wget",
"apt-mint": "exiftool",
"brew": "jpeginfo",
"cask": "pngyu",
"choco": "ccleaner",
"composer": "illuminate/contracts",
"flatpak": "org.gnome.Dictionary",
"gem": "markdown",
"mas": "747648890", # Telegram
"npm": "raven",
"opkg": "enigma2-hotplug",
"pip": "arrow",
"snap": "standard-notes",
"vscode": "tamasfe.even-better-toml",
"yarn": "markdown",
}
assert set(PACKAGE_IDS) == set(ALL_MANAGER_IDS)
@destructive
@pytest.mark.parametrize(
"mid,package_id", (pytest.param(*v, id=v[0]) for v in PACKAGE_IDS.items())
)
def test_single_manager_install(self, invoke, mid, package_id):
result = invoke("--manager", mid, "install", package_id)
assert result.exit_code == 0
self.check_manager_selection(result, {mid}, reference_set=ALL_MANAGER_IDS)
destructive()(TestInstall.test_stats)
destructive()(TestInstall.test_default_all_managers)
destructive()(TestInstall.test_manager_selection)
|
tring. No metadata or positional
metadata will be included.
See Also
--------
sequence
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUAAAGGA', metadata={'id':'hello'})
>>> str(s)
'GGUCGUAAAGGA'
"""
return str(self._string.decode("ascii"))
@stable(as_of="0.4.0")
def __repr__(self):
r"""Return a string representation of the biological sequence object.
Representation includes:
* sequence type
* metadata keys and values: will display key/value if it is an
understood type, otherwise just the type will be displayed. If | it is
an understood type whose representation is too long, just the type
will be displayed
* positional metadata: column names and column dtypes will be displayed
in the order they appear in the positional metadata ``pd.DataFrame``.
Column names (i.e., keys) follow the | same display rules as metadata
keys
* sequence stats (e.g., length)
* up to five lines of chunked sequence data. Each line of chunked
sequence data displays the current position in the sequence
Returns
-------
str
String representation of the biological sequence object.
Notes
-----
Subclasses can override Sequence._repr_stats to provide custom
statistics.
Examples
--------
Short sequence without metadata:
>>> from skbio import Sequence
>>> Sequence('ACGTAATGGATACGTAATGCA')
Sequence
-------------------------
Stats:
length: 21
-------------------------
0 ACGTAATGGA TACGTAATGC A
Longer sequence displays first two lines and last two lines:
>>> Sequence('ACGT' * 100)
Sequence
---------------------------------------------------------------------
Stats:
length: 400
---------------------------------------------------------------------
0 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
60 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
...
300 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
360 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
Sequence with metadata and positional metadata:
>>> metadata = {
... 'id': 'seq-id',
... 'description': 'description of the sequence, wrapping across '
... 'lines if it\'s too long',
... 'authors': ['Alice', 'Bob', 'Carol'],
... 'year': 2015,
... 'published': True
... }
>>> positional_metadata = {
... 'quality': [3, 10, 11, 10],
... 'exons': [True, True, False, True]
... }
>>> Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
Sequence
----------------------------------------------------------------------
Metadata:
'authors': <class 'list'>
'description': "description of the sequence, wrapping across lines
if it's too long"
'id': 'seq-id'
'published': True
'year': 2015
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
----------------------------------------------------------------------
0 ACGT
"""
return _SequenceReprBuilder(
seq=self,
width=71, # 79 for pep8, 8 space indent for docstrings
indent=4,
chunk_size=10).build()
def _repr_stats(self):
"""Define statistics to display in the sequence's repr.
Subclasses can override this method to provide type-specific
statistics.
This method computes a single statistic: length.
Returns
-------
list
List of tuples where each tuple represents a statistic. Each tuple
contains exactly two ``str`` elements: the statistic's name/label,
and the str-formatted value of the statistic. Ordering of
statistics (i.e., list order) determines display order in the
sequence repr.
"""
return [('length', '%d' % len(self))]
@stable(as_of="0.4.0")
def __copy__(self):
"""Return a shallow copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=False)``.
"""
return self.copy(deep=False)
@stable(as_of="0.4.0")
def __deepcopy__(self, memo):
"""Return a deep copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=True)``.
"""
return self._copy(True, memo)
@stable(as_of="0.4.0")
def has_metadata(self):
"""Determine if the sequence contains metadata.
Returns
-------
bool
Indicates whether the sequence has metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_metadata()
False
>>> t = DNA('ACACGACGTT', metadata={'id': 'seq-id'})
>>> t.has_metadata()
True
"""
return self._metadata is not None and bool(self.metadata)
@stable(as_of="0.4.0")
def has_positional_metadata(self):
"""Determine if the sequence contains positional metadata.
Returns
-------
bool
Indicates whether the sequence has positional metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_positional_metadata()
False
>>> t = DNA('ACACGACGTT', positional_metadata={'quality': range(10)})
>>> t.has_positional_metadata()
True
"""
return (self._positional_metadata is not None and
len(self.positional_metadata.columns) > 0)
@stable(as_of="0.4.0")
def copy(self, deep=False):
"""Return a copy of the biological sequence.
Parameters
----------
deep : bool, optional
Perform a deep copy. If ``False``, perform a shallow copy.
Returns
-------
Sequence
Copy of the biological sequence.
Notes
-----
Since sequence objects can share the same underlying immutable sequence
data (or pieces of it), this method can be used to create a sequence
object with its own copy of the sequence data so that the original
sequence data can be garbage-collected.
Examples
--------
Create a sequence:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('ACGT',
... metadata={'id': 'seq-id', 'authors': ['Alice']},
... positional_metadata={'quality': [7, 10, 8, 5],
... 'list': [[], [], [], []]})
Make a shallow copy of the sequence:
>>> seq_copy = seq.copy()
>>> seq_copy == seq
True
Setting new references in the copied sequence's metadata doesn't affect
the original sequence's metadata:
>>> seq_copy.metadata['id'] = 'new-id'
>>> pprint(seq_copy.metadata)
{'authors': ['Alice'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'quality'] = 999
>>> seq_copy.positional_metadata
list quality
0 [] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
|
from JumpSc | ale import j
import JumpScale.baselib.redis
import JumpScale.grid.jumpscripts
class CmdRouter(object):
def __init__(self, path=None):
j.core.jumpscripts.load(path)
def route(self,organization,actor,name,**args):
| pass
|
im | port pandas as pd
adv = pd.read_csv('Advertising.csv')
tv_budget_x = adv.TV.to | list()
print(tv_budget_x)
|
from .actions import *
from .ac | tions_re impor | t *
from .expectations import *
|
# Remove event listener if no forwarding targets present
self.hass.bus.remove_listener(ha.MATCH_ALL,
self._event_listener)
return did_remove
def _event_listener(self, event):
"""Listen and forward all events."""
with self._lock:
# We don't forward time events or, if enabled, non-local events
if event.event_type == ha.EVENT_TIME_CHANGED or \
(self.restrict_origin and event.origin != self.restrict_origin):
return
for api in self._targets.values():
fire_event(api, event.event_type, event.data)
class StateMachine(ha.StateMachine):
"""Fire set events to an API. Uses state_change events to track states."""
def __init__(self, bus, api):
"""Initalize the statemachine."""
super().__init__(None)
self._api = api
self.mirror()
bus.listen(ha.EVENT_STATE_CHANGED, self._state_changed_listener)
def remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return remove_state(self._api, entity_id)
def set(self, entity_id, new_state, attributes=None):
"""Call set_state on remote AP | I."""
set_state(self._api, entity_id, new_state, attributes)
def mirror(self):
"""Discard current data and mirrors the remote state machine."""
self._states = {state.entity_id: state for state
in get_states(self._api)}
def _state_cha | nged_listener(self, event):
"""Listen for state changed events and applies them."""
if event.data['new_state'] is None:
self._states.pop(event.data['entity_id'], None)
else:
self._states[event.data['entity_id']] = event.data['new_state']
class JSONEncoder(json.JSONEncoder):
"""JSONEncoder that supports Home Assistant objects."""
# pylint: disable=too-few-public-methods,method-hidden
def default(self, obj):
"""Convert Home Assistant objects.
Hand other objects to the original method.
"""
if isinstance(obj, datetime):
return obj.isoformat()
elif hasattr(obj, 'as_dict'):
return obj.as_dict()
try:
return json.JSONEncoder.default(self, obj)
except TypeError:
# If the JSON serializer couldn't serialize it
# it might be a generator, convert it to a list
try:
return [self.default(child_obj)
for child_obj in obj]
except TypeError:
# Ok, we're lost, cause the original error
return json.JSONEncoder.default(self, obj)
def validate_api(api):
"""Make a call to validate API."""
try:
req = api(METHOD_GET, URL_API)
if req.status_code == 200:
return APIStatus.OK
elif req.status_code == 401:
return APIStatus.INVALID_PASSWORD
else:
return APIStatus.UNKNOWN
except HomeAssistantError:
return APIStatus.CANNOT_CONNECT
def connect_remote_events(from_api, to_api):
"""Setup from_api to forward all events to to_api."""
data = {
'host': to_api.host,
'api_password': to_api.api_password,
'port': to_api.port
}
try:
req = from_api(METHOD_POST, URL_API_EVENT_FORWARD, data)
if req.status_code == 200:
return True
else:
_LOGGER.error(
"Error setting up event forwarding: %s - %s",
req.status_code, req.text)
return False
except HomeAssistantError:
_LOGGER.exception("Error setting up event forwarding")
return False
def disconnect_remote_events(from_api, to_api):
"""Disconnect forwarding events from from_api to to_api."""
data = {
'host': to_api.host,
'port': to_api.port
}
try:
req = from_api(METHOD_DELETE, URL_API_EVENT_FORWARD, data)
if req.status_code == 200:
return True
else:
_LOGGER.error(
"Error removing event forwarding: %s - %s",
req.status_code, req.text)
return False
except HomeAssistantError:
_LOGGER.exception("Error removing an event forwarder")
return False
def get_event_listeners(api):
"""List of events that is being listened for."""
try:
req = api(METHOD_GET, URL_API_EVENTS)
return req.json() if req.status_code == 200 else {}
except (HomeAssistantError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Unexpected result retrieving event listeners")
return {}
def fire_event(api, event_type, data=None):
"""Fire an event at remote API."""
try:
req = api(METHOD_POST, URL_API_EVENTS_EVENT.format(event_type), data)
if req.status_code != 200:
_LOGGER.error("Error firing event: %d - %s",
req.status_code, req.text)
except HomeAssistantError:
_LOGGER.exception("Error firing event")
def get_state(api, entity_id):
"""Query given API for state of entity_id."""
try:
req = api(METHOD_GET, URL_API_STATES_ENTITY.format(entity_id))
# req.status_code == 422 if entity does not exist
return ha.State.from_dict(req.json()) \
if req.status_code == 200 else None
except (HomeAssistantError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Error fetching state")
return None
def get_states(api):
"""Query given API for all states."""
try:
req = api(METHOD_GET,
URL_API_STATES)
return [ha.State.from_dict(item) for
item in req.json()]
except (HomeAssistantError, ValueError, AttributeError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Error fetching states")
return []
def remove_state(api, entity_id):
"""Call API to remove state for entity_id.
Return True if entity is gone (removed/never existed).
"""
try:
req = api(METHOD_DELETE, URL_API_STATES_ENTITY.format(entity_id))
if req.status_code in (200, 404):
return True
_LOGGER.error("Error removing state: %d - %s",
req.status_code, req.text)
return False
except HomeAssistantError:
_LOGGER.exception("Error removing state")
return False
def set_state(api, entity_id, new_state, attributes=None):
"""Tell API to update state for entity_id.
Return True if success.
"""
attributes = attributes or {}
data = {'state': new_state,
'attributes': attributes}
try:
req = api(METHOD_POST,
URL_API_STATES_ENTITY.format(entity_id),
data)
if req.status_code not in (200, 201):
_LOGGER.error("Error changing state: %d - %s",
req.status_code, req.text)
return False
else:
return True
except HomeAssistantError:
_LOGGER.exception("Error setting state")
return False
def is_state(api, entity_id, state):
"""Query API to see if entity_id is specified state."""
cur_state = get_state(api, entity_id)
return cur_state and cur_state.state == state
def get_services(api):
"""Return a list of dicts.
Each dict has a string "domain" and a list of strings "services".
"""
try:
req = api(METHOD_GET, URL_API_SERVICES)
return req.json() if req.status_code == 200 else {}
except (HomeAssistantError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Got unexpected services result")
return {}
def call_service(api, domain, service, service_data=None):
"""Call a service at the remote API."""
try:
req = api(METHOD_POST,
URL_API_SE |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from miasm2.expression.expression import ExprId
from miasm2.core.cpu import gen_reg, gen_regs
gen_reg('PC', globals())
gen_reg('PC_FETCH', globals())
gen_reg('R_LO', globals())
gen_reg('R_HI', globals())
exception_flags = ExprId('exception_flags', 32)
PC_init = ExprId("PC_init")
PC_FETCH_init = ExprId("PC_FETCH_init")
regs32_str = ["ZERO", 'AT', 'V0', 'V1'] +\
['A%d'%i for i in xrange(4)] +\
['T%d'%i for i in xrange(8)] +\
['S%d'%i for i in xrange(8)] +\
['T%d'%i for i in xrange(8, 10)] +\
['K0', 'K1'] +\
['GP', 'SP', 'FP', 'RA']
regs32_expr = [ExprId(x, 32) for x in regs32_str]
regs_flt_str = ['F%d'%i for i in xrange(0x20)]
regs_fcc_str = ['FCC%d'%i for i in xrange(8)]
R_LO = ExprId('R_LO', 32)
R_HI = ExprId('R_HI', 32)
R_LO_init = ExprId('R_LO_init', 32)
R_HI_init = ExprId('R_HI_init', 32)
cpr0_str = ["CPR0_%d"%x for x | in xrange(0x100)]
cpr0_str[0] = "INDEX"
cpr0_str[16] = "ENTRYLO0"
cpr0_str[24] = "ENTRYLO1"
cpr0_str[40] = "PAGEMASK"
cpr0_str[72] = "COUNT"
cpr0_str[80] = "ENTRYHI"
cpr0_str[104] = "CAUSE"
cpr0_str[112] = "EPC"
cpr0_str[128] = "CONFIG"
cpr0_str[152] = "WAT | CHHI"
regs_cpr0_expr, regs_cpr0_init, regs_cpr0_info = gen_regs(cpr0_str, globals())
gpregs_expr, gpregs_init, gpregs = gen_regs(regs32_str, globals())
regs_flt_expr, regs_flt_init, fltregs = gen_regs(regs_flt_str, globals(), sz=64)
regs_fcc_expr, regs_fcc_init, fccregs = gen_regs(regs_fcc_str, globals())
all_regs_ids = [PC, PC_FETCH, R_LO, R_HI] + gpregs_expr + regs_flt_expr + \
regs_fcc_expr + regs_cpr0_expr
all_regs_ids_byname = dict([(x.name, x) for x in all_regs_ids])
all_regs_ids_init = [PC_init, PC_FETCH_init, R_LO_init, R_HI_init] + \
gpregs_init + regs_flt_init + regs_fcc_init + regs_cpr0_init
all_regs_ids_no_alias = all_regs_ids[:]
regs_init = {}
for i, r in enumerate(all_regs_ids):
regs_init[r] = all_regs_ids_init[i]
|
ements for a dataset.
Parameters
----------
dataset : sima.ImagingDataset
Returns
-------
displacements : list of ndarray of int
"""
shifts = self._estimate(dataset)
assert np.any(np.all(x is not np.ma.masked for x in shift)
for shift in it.chain.from_iterable(shifts))
assert np.all(
np.all(x is np.ma.masked for x in shift) or
not np.any(x is np.ma.masked for x in shift)
for shift in it.cha | in.from_iterable(shifts))
shifts = self._make_nonnegative(shifts)
assert np.any(np.all(x is not np.ma.masked for x in shift)
for shift in it.chain. | from_iterable(shifts))
assert np.all(
np.all(x is np.ma.masked for x in shift) or
not np.any(x is np.ma.masked for x in shift)
for shift in it.chain.from_iterable(shifts))
return shifts
def correct(self, dataset, savedir, channel_names=None, info=None,
correction_channels=None, trim_criterion=None):
"""Create a motion-corrected dataset.
Parameters
----------
dataset : sima.ImagingDataset or list of sima.Sequence
Dataset or sequences to be motion corrected.
savedir : str
The directory used to store the dataset. If the directory
name does not end with .sima, then this extension will
be appended.
channel_names : list of str, optional
Names for the channels. Defaults to ['0', '1', '2', ...].
info : dict
Data for the order and timing of the data acquisition.
See sima.ImagingDataset for details.
correction_channels : list of int, optional
Information from the channels corresponding to these indices
will be used for motion correction. By default, all channels
will be used.
trim_criterion : float, optional
The required fraction of frames during which a location must
be within the field of view for it to be included in the
motion-corrected imaging frames. By default, only locations
that are always within the field of view are retained.
Returns
-------
dataset : sima.ImagingDataset
The motion-corrected dataset.
"""
sequences = [s for s in dataset]
if correction_channels:
correction_channels = [
sima.misc.resolve_channels(c, channel_names, len(sequences[0]))
for c in correction_channels]
mc_sequences = [s[:, :, :, :, correction_channels]
for s in sequences]
else:
mc_sequences = sequences
displacements = self.estimate(sima.ImagingDataset(mc_sequences, None))
disp_dim = displacements[0].shape[-1]
max_disp = np.max(list(it.chain.from_iterable(d.reshape(-1, disp_dim)
for d in displacements)),
axis=0)
frame_shape = np.array(sequences[0].shape)[1: -1] # (z, y, x)
if len(max_disp) == 2: # if 2D displacements
frame_shape[1:3] += max_disp
else: # if 3D displacements
frame_shape += max_disp
corrected_sequences = [s.apply_displacements(d, frame_shape)
for s, d in zip(sequences, displacements)]
planes, rows, columns = _trim_coords(
trim_criterion, displacements, sequences[0].shape[1:4],
frame_shape)
corrected_sequences = [
s[:, planes, rows, columns] for s in corrected_sequences]
return sima.ImagingDataset(
corrected_sequences, savedir, channel_names=channel_names)
class ResonantCorrection(MotionEstimationStrategy):
"""Motion estimation strategy for resonant scanner data.
When acquiring data imaging data with a resonant scanner, the data
acquired when imaging the same positions can be substantially different
depending no whether the resonant scanner is moving in one direction
or the other when passing over that row. This can cause problems when
trying to motion correct the data, since even rows are collected while
scanning in one direction and odd rows are collected by scanning
in the other direction.
The class defined here addresses this issue by using only the even
rows to estimate the displacements, and then uses those displacements
to motion-correct the entire dataset.
Parameters
----------
base_strategy : sima.motion.MotionEstimationStrategy
The underlying motion estimation strategy that will be used.
offset : int
Horizontal displacement to be added to odd rows. Note the
convention that row 0 (i.e. the "first" row) is considered
even.
"""
def __init__(self, base_strategy, offset=0):
self._base_strategy = base_strategy
self._offset = offset
def _estimate(self, dataset):
if not next(iter(dataset)).shape[2] % 2 == 0:
raise ValueError(
'Resonant motion correction requires an even number of rows')
downsampled_dataset = sima.ImagingDataset(
[sima.Sequence.join(
*it.chain.from_iterable(
(seq[:, :, ::2, :, c], seq[:, :, 1::2, :, c])
for c in range(seq.shape[4])))
for seq in dataset],
None)
downsampled_displacements = self._base_strategy.estimate(
downsampled_dataset)
displacements = []
for d_disps in downsampled_displacements:
disps = np.repeat(d_disps, 2, axis=2) # Repeat the displacements
disps[:, :, :, 0] *= 2 # multiply y-shifts by 2
disps[:, :, 1::2, -1] += self._offset # shift even rows by offset
displacements.append(disps)
return displacements
def _trim_coords(trim_criterion, displacements, raw_shape, untrimmed_shape):
"""The coordinates used to trim the corrected imaging data."""
epsilon = 1e-8
assert len(raw_shape) == 3
assert len(untrimmed_shape) == 3
if trim_criterion is None:
trim_criterion = 1.
if trim_criterion == 0.:
trim_criterion = epsilon
if not isinstance(trim_criterion, (float, int)):
raise TypeError('Invalid type for trim_criterion')
obs_counts = sum(_observation_counts(raw_shape, d, untrimmed_shape)
for d in it.chain.from_iterable(displacements))
num_frames = sum(len(x) for x in displacements)
occupancy = old_div(obs_counts.astype(float), num_frames)
plane_occupancy = old_div(occupancy.sum(axis=2).sum(axis=1), (
raw_shape[1] * raw_shape[2]))
good_planes = plane_occupancy + epsilon > trim_criterion
plane_min = np.nonzero(good_planes)[0].min()
plane_max = np.nonzero(good_planes)[0].max() + 1
row_occupancy = old_div(occupancy.sum(axis=2).sum(axis=0), (
raw_shape[0] * raw_shape[2]))
good_rows = row_occupancy + epsilon > trim_criterion
row_min = np.nonzero(good_rows)[0].min()
row_max = np.nonzero(good_rows)[0].max() + 1
col_occupancy = old_div(occupancy.sum(axis=1).sum(axis=0), np.prod(
raw_shape[:2]))
good_cols = col_occupancy + epsilon > trim_criterion
col_min = np.nonzero(good_cols)[0].min()
col_max = np.nonzero(good_cols)[0].max() + 1
rows = slice(row_min, row_max)
columns = slice(col_min, col_max)
planes = slice(plane_min, plane_max)
return planes, rows, columns
def _observation_counts(raw_shape, displacements, untrimmed_shape):
cnt = np.zeros(untrimmed_shape, dtype=int)
if displacements.ndim == 1:
z, y, x = displacements
cnt[z:(z + raw_shape[0]),
y:(y + raw_shape[1]),
x:(x + raw_shape[2])] = 1
elif displacements.ndim == 2:
for plane in range(raw_shape[0]):
d = list(displacements[plane])
if len(d) == 2:
d = [0] + d
cnt[plane + d[0],
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import sys
from Muon.GUI.Common.muon_load_data import MuonLoadData
from Muon.GUI.Common.utilities.load_utils import load_workspace_from_filename
from Muon.GUI.Common.muon_data_context import MuonDataContext
from Muon.GUI.FrequencyDomainAnalysis.frequency_context import FrequencyContext
from mantid.api import AnalysisDataService
import unittest
from Muon.GUI.Common.observer_pattern import Observer
from mantid.api impor | t FileFinder
import copy
if sys.version_info.major < 2:
from unittest import mock
else:
import mock
class MuonDataContextTest(unittest.TestCase):
def setUp(self):
self.loaded_data = MuonLoadData()
self.context = MuonDataContext(self.loaded_data)
self.frequency_context = FrequencyContext(self.context)
self.gui_variable_observer = Observer()
self.gui_variable_observer.update = mo | ck.MagicMock()
self.context.gui_variables_notifier.add_subscriber(self.gui_variable_observer)
self.context.instrument = 'CHRONUS'
self.gui_variable_observer = Observer()
self.gui_variable_observer.update = mock.MagicMock()
self.context.gui_variables_notifier.add_subscriber(self.gui_variable_observer)
filepath = FileFinder.findRuns('CHRONUS00003422.nxs')[0]
load_result, run, filename = load_workspace_from_filename(filepath)
self.loaded_data.add_data(workspace=load_result, run=[run], filename=filename, instrument='CHRONUS')
self.context.current_runs = [[run]]
self.context.update_current_data()
def tearDown(self):
AnalysisDataService.clear()
def test_get_detectors_excluded_from_default_grouping_tables_gets_correct_groups_for_CHRONUS(self):
result = self.frequency_context.get_detectors_excluded_from_default_grouping_tables()
self.assertEqual(result, [256, 425])
if __name__ == '__main__':
unittest.main(buffer=False, verbosity=2) |
from distutils.core impo | rt setup
setup(name='robotframework-wiremo | ck',
packages=['WireMockLibrary'],
package_dir={'': 'src'},
version='development',
description='Robot framework library for WireMock',
author='Timo Yrjola',
author_email='timo.yrjola@gmail.com',
classifiers=[])
|
PageToken = _messages.StringField(3)
class ListInstancesResponse(_messages.Message):
"""Response message for BigtableInstanceAdmin.ListInstances.
Fields:
failedLocations: Locations from which Instance information could not be
retrieved, due to an outage or some other transient condition. Instances
whose Clusters are all in one of the failed locations may be missing
from `instances`, and Instances with at least one Cluster in a failed
location may only have partial information returned.
instances: The list of requested instances.
nextPageToken: Set if not all instances could be returned in a single
response. Pass this value to `page_token` in another request to get the
next page of results.
"""
failedLocations = _messages.StringField(1, repeated=True)
instances = _messages.MessageField('Instance', 2, repeated=True)
nextPageToken = _messages.StringField(3)
class ListOperationsResponse(_messages.Message):
"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ListTablesResponse(_messages.Message):
"""Response message for
google.bigtable.admin.v2.BigtableTableAdmin.ListTables
Fields:
nextPageToken: Set if not all tables could be returned in a single
response. Pass this value to `page_token` in another request to get the
next page of results.
tables: The tables present in the requested instance.
"""
nextPageToken = _messages.StringField(1)
tables = _messages.MessageField('Table', 2, repeated=True)
class Modification(_messages.Message):
"""A create, update, or delete of a particular column family.
Fields:
create: Create a new column family with the specified schema, or fail if
one already exists with the given ID.
drop: Drop (delete) the column family with the given ID, or fail if no
such family exists.
id: The ID of the column family to be modified.
update: Update an existing column family to the specified schema, or fail
if no column family exists with the given ID.
"""
create = _messages.MessageField('ColumnFamily', 1)
drop = _messages.BooleanField(2)
id = _messages.StringField(3)
update = _messages.MessageField('ColumnFamily', 4)
class ModifyColumnFamiliesRequest(_messages.Message):
"""Request message for
google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies
Fields:
modifications: Modifications to be atomically applied to the specified
table's families. Entries are applied in order, meaning that earlier
modifications can be masked by later ones (in the case of repeated
updates to the same family, for example).
"""
modifications = _messages.MessageField('Modification', 1, repeated=True)
class Operation(_messages.Message):
"""This resource represents a long-running operation that is the result of a
network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If true, the o | peration is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some se | rvices might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should have the format of `operations/some/unique/name`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class Split(_messages.Message):
"""An initial split point for a newly created table.
Fields:
key: Row key to use as an initial tablet boundary.
"""
key = _messages.BytesField(1)
class StandardQueryParameters(_messages.Message):
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1006230013.xml')
with open(sbm | lFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbml | String) |
from django.apps import AppConfig
class TagsConfig(AppConfig):
| name = " | hav.apps.tags"
|
##################################################
# MODIFIED FROM ORIGINAL VERSION
#
# This file is not the same as in pypi. It includes a pull request to fix py3
# incompabilities that never ended up getting merged.
###############################################################################
import os
from ctypes import CDLL, c_char_p, c_int, c_void_p, c_uint, c_double, byref, Structure, get_errno,\
POINTER, c_short, c_size_t, create_string_buffer
from ctypes.util import find_library
from psistats.libsensors.lib import stdc
version_info = (0, 0, 3)
__version__ = '.'.join(map(str, version_info))
__date__ = '2014-08-17'
__author__ = "Marc 'BlackJack' Rintsch"
__contact__ = 'marc@rintsch.de'
__license__ = 'LGPL v2.1'
API_VERSION = 4
DEFAULT_CONFIG_FILENAME = '/etc/sensors3.conf'
LIB_FILENAME = os.environ.get('SENSORS_LIB') or find_library('sensors')
SENSORS_LIB = CDLL(LIB_FILENAME)
VERSION = c_char_p.in_dll(SENSORS_LIB, 'libsensors_version').value
MAJOR_VERSION = version_info[0]
class SensorsError(Exception):
def __init__(self, message, error_number=None):
Exception.__init__(self, message)
self.error_number = error_number
def _error_check(result, _func, _arguments):
if result < 0:
raise SensorsError(_strerror(result), result)
return result
_strerror = SENSORS_LIB.sensors_strerror
_strerror.argtypes = [c_int]
_strerror.restype = c_char_p
_init = SENSORS_LIB.sensors_init
_init.argtypes = [c_void_p]
_init.restype = c_int
_init.errcheck = _error_check
cleanup = SENSORS_LIB.sensors_cleanup
cleanup.argtypes = None
cleanup.restype = None
SENSORS_FEATURE_IN = 0x00
SENSORS_FEATURE_FAN = 0x01
SENSORS_FEATURE_TEMP = 0x02
SENSORS_FEATURE_POWER = 0x03
SENSORS_FEATURE_ENERGY = 0x04
SENSORS_FEATURE_CURR = 0x05
SENSORS_FEATURE_HUMIDITY = 0x06
# SENSORS_FEATURE_MAX_MAIN
SENSORS_FEATURE_VID = 0x10
SENSORS_FEATURE_INTRUSION = 0x11
#SENSORS_FEATURE_MAX_OTHER,
SENSORS_FEATURE_BEEP_ENABLE = 0x18
#SENSORS_FEATURE_MAX,
#SENSORS_FEATURE_UNKNOWN = INT_MAX
def init(config_filename=DEFAULT_CONFIG_FILENAME):
file_p = stdc.fopen(config_filename.encode('utf-8'), b'r')
if file_p is None:
error_number = get_errno()
raise OSError(error_number, os.strerror(error_number), config_filename)
try:
_init(file_p)
finally:
stdc.fclose(file_p)
class Subfeature(Structure):
_fields_ = [
('name', c_char_p),
('number', c_int),
('type', c_int),
('mapping', c_int),
('flags', c_uint),
]
def __repr__(self):
return '<%s name=%r number=%d type=%d mapping=%d flags=%08x>' % (
self.__class__.__name__,
self.name,
self.number,
self.type,
self.mapping,
self.flags
)
def get_value(self):
result = c_double()
_get_value(byref(self.parent.chip), self.number, byref(result))
return result.value
SUBFEATURE_P = POINTER(Subfeature)
class Feature(Structure):
_fields_ = [
('name', c_char_p),
('number', c_int),
('type', c_int),
('_first_subfeature', c_int),
('_padding1', c_int),
]
def __repr__(self):
return '<%s name=%r number=%r type=%r>' % (
self.__class__.__name__,
self.name,
self.number,
self.type
)
def __iter__(self):
number = c_int(0)
while True:
result_p = _get_all_subfeatures(
byref(self.chip),
byref(self),
byref(number)
)
| if not result_p:
break
result = result_p.contents
result.chip = self.chip
result.parent = self
yield result
@property
def label(self):
#
# TODO Maybe this is a memory leak!
#
return _get_label(byref(self.chip), byref(self)).decode('utf-8')
def get_value(self):
#
# TOD | O Is the first always the correct one for all feature types?
#
return next(iter(self)).get_value()
FEATURE_P = POINTER(Feature)
class Bus(Structure):
TYPE_ANY = -1
NR_ANY = -1
_fields_ = [
('type', c_short),
('nr', c_short),
]
def __str__(self):
return (
'*' if self.type == self.TYPE_ANY
else _get_adapter_name(byref(self)).decode('utf-8')
)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.type, self.nr)
@property
def has_wildcards(self):
return self.type == self.TYPE_ANY or self.nr == self.NR_ANY
BUS_P = POINTER(Bus)
class Chip(Structure):
#
# TODO Move common stuff into `AbstractChip` class.
#
_fields_ = [
('prefix', c_char_p),
('bus', Bus),
('addr', c_int),
('path', c_char_p),
]
PREFIX_ANY = None
ADDR_ANY = -1
def __new__(cls, *args):
result = super(Chip, cls).__new__(cls)
if args:
_parse_chip_name(args[0].encode('utf-8'), byref(result))
return result
def __init__(self, *_args):
Structure.__init__(self)
#
# Need to bind the following to the instance so it is available in
# `__del__()` when the interpreter shuts down.
#
self._free_chip_name = _free_chip_name
self.byref = byref
def __del__(self):
if self._b_needsfree_:
self._free_chip_name(self.byref(self))
def __repr__(self):
return '<%s prefix=%r bus=%r addr=%r path=%r>' % (
(
self.__class__.__name__,
self.prefix,
self.bus,
self.addr,
self.path
)
)
def __str__(self):
buffer_size = 200
result = create_string_buffer(buffer_size)
used = _snprintf_chip_name(result, len(result), byref(self))
assert used < buffer_size
return result.value.decode('utf-8')
def __iter__(self):
number = c_int(0)
while True:
result_p = _get_features(byref(self), byref(number))
if not result_p:
break
result = result_p.contents
result.chip = self
yield result
@property
def adapter_name(self):
return str(self.bus)
@property
def has_wildcards(self):
return (
self.prefix == self.PREFIX_ANY
or self.addr == self.ADDR_ANY
or self.bus.has_wildcards
)
CHIP_P = POINTER(Chip)
_parse_chip_name = SENSORS_LIB.sensors_parse_chip_name
_parse_chip_name.argtypes = [c_char_p, CHIP_P]
_parse_chip_name.restype = c_int
_parse_chip_name.errcheck = _error_check
_free_chip_name = SENSORS_LIB.sensors_free_chip_name
_free_chip_name.argtypes = [CHIP_P]
_free_chip_name.restype = None
_snprintf_chip_name = SENSORS_LIB.sensors_snprintf_chip_name
_snprintf_chip_name.argtypes = [c_char_p, c_size_t, CHIP_P]
_snprintf_chip_name.restype = c_int
_snprintf_chip_name.errcheck = _error_check
_get_adapter_name = SENSORS_LIB.sensors_get_adapter_name
_get_adapter_name.argtypes = [BUS_P]
_get_adapter_name.restype = c_char_p
_get_label = SENSORS_LIB.sensors_get_label
_get_label.argtypes = [CHIP_P, FEATURE_P]
_get_label.restype = c_char_p
_get_value = SENSORS_LIB.sensors_get_value
_get_value.argtypes = [CHIP_P, c_int, POINTER(c_double)]
_get_value.restype = c_int
_get_value.errcheck = _error_check
#
# TODO sensors_set_value()
# TODO sensors_do_chip_sets()
#
_get_detected_chips = SENSORS_LIB.sensors_get_detected_chips
_get_detected_chips.argtypes = [CHIP_P, POINTER(c_int)]
_get_detected_chips.restype = CHIP_P
_get_features = SENSORS_LIB.sensors_get_features
_get_features.argtypes = [CHIP_P, POINTER(c_int)]
_get_features.restype = FEATURE_P
_get_all_subfeatures = SENSORS_LIB.sensors_get_all_subfeatures
_get_all_subfeatures.argtypes = [CHIP_P, FEATURE_P, POINTER(c_int)]
_get_all_subfeatures.restype = SUBFEATURE_P
#
# TODO senso |
t__(converter, provider, py_type, attr=None):
if attr is not None: throw(TypeError, 'Attribute %s has invalid type NoneType' % attr)
Converter.__init__(converter, provider, py_type)
def get_sql_type(converter, attr=None):
assert False
def get_fk_type(converter, sql_type):
assert False
class BoolConverter(Converter):
def validate(converter, val):
return bool(val)
def sql2py(converter, val):
return bool(val)
def sql_type(converter):
return "BOOLEAN"
class StrConverter(Converter):
def __init__(converter, provider, py_type, attr=None):
converter.max_len = None
converter.db_encoding = None
Converter.__init__(converter, provider, py_type, attr)
def init(converter, kwargs):
attr = converter.attr
if not attr.args: max_len = None
elif len(attr.args) > 1: unexpected_args(attr, attr.args[1:])
else: max_len = attr.args[0]
if issubclass(attr.py_type, (LongStr, LongUnicode)):
if max_len is not None: throw(TypeError, 'Max length is not supported for CLOBs')
elif max_len is None: max_len = converter.provider.varchar_default_max_len
elif not isinstance(max_len, int_types):
throw(TypeError, 'Max length argument must be int. Got: %r' % max_len)
converter.max_len = max_len
converter.db_encoding = kwargs.pop('db_encoding', None)
converter.autostrip = kwargs.pop('autostrip', True)
def validate(converter, val):
if PY2 and isinstance(val, str): val = val.decode('ascii')
elif not isinstance(val, unicode): throw(TypeError,
'Value type for attribute %s must be %s. Got: %r' % (converter.attr, unicode.__name__, type(val)))
if converter.autost | rip: val = val.strip()
max_len = converter.max_len
val_len = len(val)
if max_len and val_len > max_len:
throw(ValueError, 'Value for attribute %s is | too long. Max length is %d, value length is %d'
% (converter.attr, max_len, val_len))
return val
def sql_type(converter):
if converter.max_len:
return 'VARCHAR(%d)' % converter.max_len
return 'TEXT'
class IntConverter(Converter):
signed_types = {None: 'INTEGER', 8: 'TINYINT', 16: 'SMALLINT', 24: 'MEDIUMINT', 32: 'INTEGER', 64: 'BIGINT'}
unsigned_types = None
def init(converter, kwargs):
Converter.init(converter, kwargs)
attr = converter.attr
min_val = kwargs.pop('min', None)
if min_val is not None and not isinstance(min_val, int_types):
throw(TypeError, "'min' argument for attribute %s must be int. Got: %r" % (attr, min_val))
max_val = kwargs.pop('max', None)
if max_val is not None and not isinstance(max_val, int_types):
throw(TypeError, "'max' argument for attribute %s must be int. Got: %r" % (attr, max_val))
size = kwargs.pop('size', None)
if size is None:
if attr.py_type.__name__ == 'long':
deprecated(9, "Attribute %s: 'long' attribute type is deprecated. "
"Please use 'int' type with size=64 option instead" % attr)
attr.py_type = int
size = 64
elif attr.py_type.__name__ == 'long': throw(TypeError,
"Attribute %s: 'size' option cannot be used with long type. Please use int type instead" % attr)
elif not isinstance(size, int_types):
throw(TypeError, "'size' option for attribute %s must be of int type. Got: %r" % (attr, size))
elif size not in (8, 16, 24, 32, 64):
throw(TypeError, "incorrect value of 'size' option for attribute %s. "
"Should be 8, 16, 24, 32 or 64. Got: %d" % (attr, size))
unsigned = kwargs.pop('unsigned', False)
if unsigned is not None and not isinstance(unsigned, bool):
throw(TypeError, "'unsigned' option for attribute %s must be of bool type. Got: %r" % (attr, unsigned))
if size == 64 and unsigned and not converter.provider.uint64_support: throw(TypeError,
'Attribute %s: %s provider does not support unsigned bigint type' % (attr, converter.provider.dialect))
if unsigned is not None and size is None: size = 32
lowest = highest = None
if size:
highest = highest = 2 ** size - 1 if unsigned else 2 ** (size - 1) - 1
lowest = 0 if unsigned else -(2 ** (size - 1))
if highest is not None and max_val is not None and max_val > highest:
throw(ValueError, "'max' argument should be less or equal to %d because of size=%d and unsigned=%s. "
"Got: %d" % (highest, size, max_val, unsigned))
if lowest is not None and min_val is not None and min_val < lowest:
throw(ValueError, "'min' argument should be greater or equal to %d because of size=%d and unsigned=%s. "
"Got: %d" % (lowest, size, min_val, unsigned))
converter.min_val = min_val or lowest
converter.max_val = max_val or highest
converter.size = size
converter.unsigned = unsigned
def validate(converter, val):
if isinstance(val, int_types): pass
elif isinstance(val, basestring):
try: val = int(val)
except ValueError: throw(ValueError,
'Value type for attribute %s must be int. Got string %r' % (converter.attr, val))
else: throw(TypeError, 'Value type for attribute %s must be int. Got: %r' % (converter.attr, type(val)))
if converter.min_val and val < converter.min_val:
throw(ValueError, 'Value %r of attr %s is less than the minimum allowed value %r'
% (val, converter.attr, converter.min_val))
if converter.max_val and val > converter.max_val:
throw(ValueError, 'Value %r of attr %s is greater than the maximum allowed value %r'
% (val, converter.attr, converter.max_val))
return val
def sql2py(converter, val):
return int(val)
def sql_type(converter):
if not converter.unsigned:
return converter.signed_types.get(converter.size)
if converter.unsigned_types is None:
return converter.signed_types.get(converter.size) + ' UNSIGNED'
return converter.unsigned_types.get(converter.size)
class RealConverter(Converter):
# The tolerance is necessary for Oracle, because it has different representation of float numbers.
# For other databases the default tolerance is set because the precision can be lost during
# Python -> JavaScript -> Python conversion
default_tolerance = 1e-14
def init(converter, kwargs):
Converter.init(converter, kwargs)
min_val = kwargs.pop('min', None)
if min_val is not None:
try: min_val = float(min_val)
except ValueError:
throw(TypeError, "Invalid value for 'min' argument for attribute %s: %r" % (converter.attr, min_val))
max_val = kwargs.pop('max', None)
if max_val is not None:
try: max_val = float(max_val)
except ValueError:
throw(TypeError, "Invalid value for 'max' argument for attribute %s: %r" % (converter.attr, max_val))
converter.min_val = min_val
converter.max_val = max_val
converter.tolerance = kwargs.pop('tolerance', converter.default_tolerance)
def validate(converter, val):
try: val = float(val)
except ValueError:
throw(TypeError, 'Invalid value for attribute %s: %r' % (converter.attr, val))
if converter.min_val and val < converter.min_val:
throw(ValueError, 'Value %r of attr %s is less than the minimum allowed value %r'
% (val, converter.attr, converter.min_val))
if converter.max_val and val > converter.max |
#!/usr/bin/python3
# Copyright (C) 2015 Bitquant Research Laboratories (Asia) Limited
# Released under the Simplified BSD License
import my_path
import time
import zmq.green as zmq
import pprint
import algobroker
import msgpack
class Dispatcher(algobroker.Broker):
def __init__(self):
algobroker.Broker.__init__(self, "dispatcher")
# send work
self.sms_sender = self.socket(zmq.PUSH)
self.sms_sender.connect(algobroker.ports['data']['broker_plivo'])
self.bitmex_sender = self.socket(zmq.PUSH)
self.bitmex_sender.connect(algobroker.ports['data']['broker_bitmex'])
self.web_sender | = self.socket(zmq.PUSH)
self.web_sender.connect(algobroker.ports['data']['broker_web'])
def process_data(self, data):
if (data['cmd'] == "log"):
self.war | ning(pprint.pformat(data))
elif (data['cmd'] == 'alert' and
data['type'] == 'sms'):
self.debug("sending sms")
self.debug(pprint.pformat(data))
self.sms_sender.send(msgpack.packb(data))
elif (data['cmd'] == 'alert' and
data['type'] == 'web'):
self.debug("sending web")
self.debug(pprint.pformat(data))
self.web_sender.send(msgpack.packb(data))
elif (data.get('broker', None) == 'bitmex'):
self.debug("sending bitmex")
self.debug(pprint.pformat(data))
self.bitmex_sender.send(msgpack.packb(data))
else:
self.error("unknown action")
if __name__ == "__main__":
dispatcher = Dispatcher()
dispatcher.run()
|
om worker.crawler.china_telecom_tool import login_unity
class Crawler(BaseCrawler):
"""
kwargs 包含
'tel': str,
'pin_pwd': str,
'id_card': str,
'full_name': unicode,
'sms_code': str,
'captcha_code': str
錯誤等級
0: 成功
1: 帳號密碼錯誤
2: 認證碼錯誤
9: 其他錯誤
"""
def __init__(self, **kwargs):
"""
初始化
"""
super(Crawler, self).__init__(**kwargs)
self.pin_pwd_error_times = 0
self.info_res = ''
def need_parameters(self, **kwargs):
return ['pin_pwd']
def get_verify_type(self, **kwargs):
return 'SMS'
def login(self, **kwargs):
ProvinceID = '07'
code, key = login_unity(self, ProvinceID, **kwargs)
if code != 0:
return code, key
cookie_url = 'http://nm.189.cn/selfservice/service/userLogin'
cookie_data = {
"number" : kwargs['tel'],
"intLoginType":"4",
"areaCode":"0471",
"isBusinessCustType":"N",
"identifyType":"B",
"userLoginType":"4",
"password":"",
"randomPass":"",
"noCheck":"N",
"isSSOLogin":"Y",
"sRand":"SSOLogin"
}
code, key, resp = self.post(cookie_url, data=json.dumps(cookie_data))
if code != 0:
return code, key
personal_info_url = 'http://www.189.cn/dqmh/userCenter/userInfo.do?method=editUserInfo_new&fastcode=10000557&cityCode=nm'
for retry in xrange(self.max_retry):
code, key, tel_info_res = self.get(personal_info_url)
if code != 0:
return code, key
if u'真实姓名' | in tel_info_res.text:
self.info_res = tel_info_res.text
return 0, "success"
else:
pass
else:
sel | f.log('crawler', "request_error", tel_info_res)
return 9, "website_busy_error"
def send_verify_request(self, **kwargs):
"""
請求發送短信,或是下載圖片,或是同時發送請求
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
image_str: str, Captcha圖片的base64字串, SMS則回空
"""
send_sms_url = "http://nm.189.cn/selfservice/bill/xdQuerySMS"
send_sms_data = {
"phone": kwargs['tel']
}
code, key, resp = self.post(send_sms_url, data=json.dumps(send_sms_data))
if code != 0:
return code, key, ""
if resp.text:
try:
resp_json_response = resp.json()
except:
error = traceback.format_exc()
self.log('crawler', "Not json file : {}, resp:{}".format(error, resp.history), resp)
return 9, 'website_busy_error', ''
if resp_json_response.get('flag', '') == '0':
return 0, "success", ""
elif resp_json_response.get('flag', '') == '2':
self.log('crawler', "send_sms_error", resp)
return 9, "send_sms_error", ''
else:
self.log('crawler', "unknown_error", resp)
return 9, "unknown_error", ''
else:
self.log('crawler', "send_sms_error", resp)
return 9, "send_sms_error", ''
def verify(self, **kwargs):
"""
執行二次驗證
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
"""
check_sms_url = "http://nm.189.cn/selfservice/bill/xdQuerySMSCheck"
check_sms_data = {
'code': kwargs['sms_code']
}
code, key, resp = self.post(check_sms_url, data=json.dumps(check_sms_data))
if code != 0:
return code, key
try:
resp_json_response = resp.json()
except:
error = traceback.format_exc()
self.log('crawler', "json_error : %s" % error, resp)
return 9, 'json_error'
if resp_json_response.get('flag', '') == '0':
self.log('crawler', "verify_error", resp)
return 2, "verify_error"
# 如果直接返回详单数据按成功处理。
elif resp_json_response.get('flag', '') == '1' or 'resultNum' in resp.text or 'items' in resp.text:
return 0, "success"
else:
self.log('crawler', "unknown_error", resp)
return 9, "unknown_error"
def crawl_info(self, **kwargs):
"""
爬取帳戶資訊
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
info: dict, 帳戶信息,參考帳戶信息格式
"""
user_info = {}
selector = Selector(text=self.info_res)
try:
full_name = selector.xpath('//input[@name="realName"]/@value').extract()
user_info['full_name'] = full_name[0] if full_name else ''
id_card = selector.xpath('//input[@name="certificateNumber"]/@value').extract()
user_info['id_card'] = id_card[0] if id_card else ''
address = re.findall(u'id="address".*?;">(.*?)</textarea>', self.info_res)
user_info['address'] = address[0] if address else ''
user_info['open_date'] = ""
user_info['is_realname_register'] = True
except:
error = traceback.format_exc()
self.log('crawler', "html_error : %s" % error, '')
return 9, "html_error", {}
return 0, "success", user_info
def random_sleep(self, tm, modulus=3):
time.sleep(random.uniform(tm / modulus / 1.5, 1.5 * tm / modulus))
def crawl_call_log(self, **kwargs):
"""
爬取詳單
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
call_log: list, 通信詳單,參考詳單格式
"""
call_log = []
crawl_num = 0
call_log_url = "http://nm.189.cn/selfservice/bill/xdQuery"
today = date.today()
missing_list = []
pos_missing = []
search_month = [x for x in range(0, -6, -1)]
for each_month in search_month:
query_date = today + relativedelta(months=each_month)
search_month = "%d%02d" % (query_date.year, query_date.month)
call_log_data = {
"billingCycle": "{}{}".format(query_date.year, str(query_date.month).zfill(2)),
'accNbr': kwargs['tel'],
'accNbrType': '4',
'areaCode': '0478',
'pageNo': -1,
'pageRecords': -1,
'prodSpecId': '378',
'qtype': '0',
'isYWlQuery': 'N',
}
header = {
'Referer': 'http://nm.189.cn/selfservice/bill/xd',
'Host': 'nm.189.cn',
'Content-Type': 'application/json'
}
start_time = time.time()
end_time = start_time + 10
aid_time_dict = dict()
retry_times = self.max_retry
log_for_retry = []
while 1:
log_for_retry.append((1, retry_times))
retry_times -= 1
code, key, resp = self.post(call_log_url, data=json.dumps(call_log_data), headers=header)
if code:
missing_flag = True
elif 'POR-2102' in resp.text:
# 无查询结果,这个月没有数据
missing_flag = False
else:
flag = True
break
now_time = time.time()
if retry_times >= 0:
aid_time_dict.update({retry_times: time.time()})
elif now_time < end_time:
loop_time = aid_time_dict.get(0, time.time())
left_time = end_time - loop_time
self.random_sleep(left_time)
else:
flag = False
if missing_flag:
missing_list.append(search_month)
e |
# -*- coding: utf-8 -*-
import sys
import os
from os.path import dirname
| # Set the directory for using the modules in the same project such as eshop.
PROJECT_PATH = dirname(os.path.abspath(os.path.dirname(__file__)))
ESHOP_PATH = os.path.jo | in(PROJECT_PATH, 'eshop/')
sys.path.append(PROJECT_PATH)
|
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload',
'okru',
'netutv',
'rapidvideo'
]
list_quality = ['default']
host = "http://www.anitoonstv.com"
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Anime", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Series Animadas", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Pokemon", url=host,
thumbnail=thumb_series))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
if 'Novedades' in item.title:
patron_cat = '<div class="activos"><h3>(.+?)<\/h2><\/a><\/div>'
patron = '<a href="(.+?)"><h2><span>(.+?)<\/span>'
else:
patron_cat = '<li><a href=.+?>'
patron_cat += str(item.title)
patron_cat += '<\/a><div>(.+?)<\/div><\/li>'
patron = "<a href='(.+?)'>(.+?)<\/a>"
data = scrapertools.find_single_match(data, patron_cat)
matches = scrapertools.find_multiple_matches(data, patron)
for link, name in matches:
if "Novedades" in item.title:
url = link
title = name.capitalize()
else:
url = host + link
title = name
if ":" in title:
cad = title.split(":")
show = cad[0]
else:
if "(" in title:
cad = title.split("(")
if "Super" in title:
show = cad[1]
show = show.replace(")", "")
else:
show = cad[0]
else:
show = title
if "&" in show:
cad = title.split("xy")
show = cad[0]
context1=[renumbertools.context(item), autoplay.context]
itemlist.append(
item.clone(title=title, url=url, plot=show, action="episodios", show=show,
context=context1))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>'
data = scrapertools.find_single_match(data, patron)
patron_caps = "<a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data, patron_caps)
show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>')
scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>")
scrapedplot = scrapertools.find_single_match(data, '<span>Descripcion.+?<\/span>(.+?)<br>')
i = 0
temp = 0
for link, cap, name in matches:
if int(cap) == 1:
temp = temp + 1
if int(cap) < 10:
cap = "0" + cap
season = temp
episode = int(cap)
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
date = name
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# title = str(temp)+"x"+cap+" "+name
url = host + "/" + link
if "NO DISPONIBLE" not in name:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail,
plot=scrapedplot, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url) | .data
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
data_vid = scrapertools.find_single_match(data1, '<div class="videos">(.+?)<\/div><div .+?>')
# name = scrapertools.find_single_match(data,'<span>Titulo.+?<\/span>([^<]+)<br>')
scrapedplot = scrapertools.find_single_match(data, '<br><span>Descrip.+?<\/span>([^<]+)<br>')
scrapedthumbnail = scrapertools.find_single_mat | ch(data, '<div class="caracteristicas"><img src="([^<]+)">')
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla:
if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ")
server = server.lower().strip()
if "ok" == server:
server = 'okru'
if "netu" == server:
continue
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail))
return itemlist
|
from django.core.exceptions import ValidationError
from django.db import IntegrityError, models, transaction
from django.test import SimpleTestCase, TestCase
from .models import BooleanModel, FksToBooleans, NullBooleanModel
class BooleanFieldTests(TestCase):
def _test_get_prep_value(self, f):
self.assertEqual(f.get_prep_value(True), True)
self.assertEqual(f.get_prep_value('1'), True)
self.assertEqual(f.get_prep_value(1), True)
self.assertEqual(f.get_prep_value(False), False)
self.assertEqual(f.get_prep_value('0'), False)
self.assertEqual(f.get_prep_value(0), False)
self.assertEqual(f.get_prep_value(None), None)
def _test_to_python(self, f):
self.assertIs(f.to_python(1), True)
self.assertIs(f.to_python(0), False)
def test_booleanfield_get_prep_value(self):
self._test_get_prep_value(models.BooleanField())
def test_nullbooleanfield_get_prep_value(self):
self._test_get_prep_value(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_booleanfield_c | hoices_blank(self):
"""
BooleanField with choices and defaults doesn't | generate a formfield
with the blank option (#9640, #10549).
"""
choices = [(1, 'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel.objects.create(bfield=True)
b.refresh_from_db()
self.assertEqual(b.bfield, True)
b2 = BooleanModel.objects.create(bfield=False)
b2.refresh_from_db()
self.assertEqual(b2.bfield, False)
b3 = NullBooleanModel.objects.create(nbfield=True)
b3.refresh_from_db()
self.assertEqual(b3.nbfield, True)
b4 = NullBooleanModel.objects.create(nbfield=False)
b4.refresh_from_db()
self.assertEqual(b4.nbfield, False)
# When an extra clause exists, the boolean conversions are applied with
# an offset (#13293).
b5 = BooleanModel.objects.all().extra(select={'string_col': 'string'})[0]
self.assertNotIsInstance(b5.pk, bool)
def test_select_related(self):
"""
Boolean fields retrieved via select_related() should return booleans.
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# select_related('fk_field_name')
ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id)
self.assertEqual(ma.bf.bfield, True)
self.assertEqual(ma.nbf.nbfield, True)
# select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
self.assertEqual(mb.bf.bfield, True)
self.assertEqual(mb.nbf.nbfield, True)
self.assertEqual(mc.bf.bfield, False)
self.assertEqual(mc.nbf.nbfield, False)
def test_null_default(self):
"""
A BooleanField defaults to None, which isn't a valid value (#15124).
"""
boolean_field = BooleanModel._meta.get_field('bfield')
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with transaction.atomic():
with self.assertRaises(IntegrityError):
b.save()
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ValidationTest(SimpleTestCase):
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
with self.assertRaises(ValidationError):
f.clean(None, None)
def test_nullbooleanfield_blank(self):
"""
NullBooleanField shouldn't throw a validation error when given a value
of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
nullboolean.full_clean()
|
# -*- coding: utf-8 -*-
"""Defines mixing class.
You can use it for inherit from Class Base Views, it was
developed by Timothée Peignier https://gist.github.com/cyberdelia/1231560
"""
from django.contrib.auth.decorators import login_required
from django.utils.cache import patch_response_headers
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page, never_cache
from django.views.decorators.csrf import csrf_exempt
class NeverCacheMixin(object):
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(NeverCacheMixin, self).dispatch(*args, **kwargs)
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class CSRFExemptMixin(object):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(CSRFExemptMixin, self).dispatch(*args, **kwargs)
class CacheMixin(object):
cache_timeout = 60
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
return cache_page(self.get_cache_timeout())(super(CacheMixin, self).dispatch)(*args, **kwargs)
class CacheControlMixin(object):
cache_timeout = 60
def get_cache_timeout(self):
return self.c | ache_timeout
def dispatch(self, *args, **kwargs):
response = super(CacheControlMixin, self).dispatch(*args, **kwargs)
patch_response_head | ers(response, self.get_cache_timeout())
return response
class JitterCacheMixin(CacheControlMixin):
cache_range = [40, 80]
def get_cache_range(self):
return self.cache_range
def get_cache_timeout(self):
return random.randint(*self.get_cache_range()) |
fset(dateString):
# format in form of 2015-04-24T08:00:00-04:00 converted to UTC timestamp
if dateString is None:
return None
try:
sign = int(dateString[19:20] + '1')
(hour, minute) = [int(s) for s in dateString[20:].split(':')]
offset = sign * (hour * 60 * 60 + minute * 60)
except:
return None
try:
start_time = datetime.strptime(dateString[:19], "%Y-%m-%dT%H:%M:%S")
timestamp = int(calendar.timegm(start_time.timetuple())) - offset
except:
return None
return timestamp
def rmTimestampToYearMonthDay(timestamp):
d = datetime.fromtimestamp(timestamp)
return d.year, d.month, d.day
def rmNowToYearMonthDay():
d = datetime.now()
return d.year, d.month, d.day
def rmNormalizeTimestamp(timestamp):
return int(datetime.fromtimestamp(timestamp).strftime('%s'))
def rmTimestampToDayOfYear(timestamp):
if timestamp is None:
timestamp = rmCurrentDayTimestamp()
d = datetime.fromtimestamp(timestamp).timetuple()
return d.tm_yday
def rmNowDateTime():
return datetime.now()
def rmCurrentTimestamp():
return int(time.time())
def rmCurrentDayTimestamp():
return rmGetStartOfDay(int(time.time()))
def rmCurrentMinuteTimestamp():
timestamp = int(time.time())
return timestamp - (timestamp % 60)
def rmGetStartOfDay(timestamp):
tuple = datetime.fromtimestamp(timestamp).timetuple()
return int(datetime(tuple.tm_year, tuple.tm_mon, tuple.tm_mday).strftime("%s"))
def rmGetStartOfDayUtc(timestamp):
tuple = datetime.utcfromtimestamp(timestamp).timetuple()
dt = datetime(tuple.tm_year, tuple.tm_mon, tuple.tm_mday, tzinfo=utc)
return int((dt-utc_t0).total_seconds())
def rmTimestampIsLeapYear(timestamp):
d = datetime.fromtimestamp(timestamp)
#try:
# datetime(d.year, 2, 29)
# return True
#except ValueError:
# return False
if d.year % 400 == 0:
return True
elif d.year % 100 == 0:
return False
elif d.year % 4 == 0:
return True
return False
def rmConvertDateStringToFormat(dateString, inputFormat, outputFormat):
return datetime.strptime(dateString, inputFormat).strftime(outputFormat)
def rmDayRange(startDayTimestamp, numDays):
d = datetime.fromtimestamp(startDayTimestamp)
if numDays >=0:
dateList = [int(time.mktime( (d + timedelta(days=x)).timetuple() )) for x in range(0, numDays)]
else:
numDays = -numDays
dateList = [int(time.mktime( (d - timedelta(days=x)).timetuple() )) for x in range(0, numDays)]
return dateList
def rmDeltaDayFromTimestamp(startDayTimeStamp, deltaDays):
d = datetime.fromtimestamp(startDayTimeStamp)
if deltaDays < 0:
d = d - timedelta(days=-deltaDays)
else:
d = d + timedelta(days=deltaDays)
return int(time.mktime(d.timetuple()))
def rmGetNumberOfDaysBetweenTimestamps(startTimestamp, endTimestamp):
d1 = datetime.fromtimestamp(startTimestamp)
d2 = datetime.fromtimestamp(endTimestamp)
delta = d2-d1
return delta.days
# Sunrise and sunset for specific location and elevation
def computeSuntransitAndDayLenghtForDayTs(ts, lat, lon, elevation):
ts = rmGetStartOfDayUtc(ts)
n = julianDayFromTimestamp(ts)
J = __computeMeanSolarNoon(n, lon)
M = __computeSolarMeanAnomay(J)
C = __equationOfTheCenter(M)
L = __computeEclipticLongitude(M, C)
Jtr = computeSolarTransit(J, M, L)
delta = __computeSinSunDeclination(L)
w0 = computeHourAngle(lat, delta, elevation)
return Jtr, w0
def rmGetSunsetTimestampForDayTimestamp(ts, lat, lon, elevation):
Jtr, w0 = computeSuntransitAndDayLenghtForDayTs(ts, lat, -lon, elevation)
Jset = Jtr+w0/360
tsJset = julianDayToUTC(Jset)
return tsJset
def rmGetSunriseTimestampForDayTimestamp(ts, lat, lon, elevation):
if lat is None or lon is None:
log.debug("Latitude or longitude is not set. Returning same timestamp")
return ts
Jtr, w0 = computeSuntransitAndDayLenghtForDayTs(ts, lat, -lon, elevation)
Jrise = Jtr-w0/360
tsJrise = julianDayToUTC(Jrise)
return tsJrise
def julianDayFromTimestamp(ts):
ts = rmGetStartOfDayUtc(ts) + 12*3600
JD = float(ts)/86400 + 2440587.5
return JD - 2451545.0 + 0.0008
def julianDayToUTC(JD):
return (JD - 2440587.5)*86400
def __cosa(degree):
radian = degree/180*3.14159265359
return cos(radian)
def __sina(degree):
radian = degree/180*3.14159265359
return sin(radian)
def __acosa(x):
if abs(x) > 1:
return 180. if x< 0 else 0.
radian = acos(x)
return radian/3.14159265359*180.
def __asina(x):
if abs(x) > 1:
return -90. if x< 0 else 90.
radian = asin(x)
return radian/(3.14159265359)*180.
def __computeMeanSolarNoon(jd, wlon):
J = wlon/360 + jd
return J
def __computeSolarMeanAnomay(solarNoon): #degrees
return (357.5291 + 0.98560028*solarNoon)%360
def __equationOfTheCenter(solarMeanAnomaly): # constant from sine
M = solarMeanAnomaly
return 1.9148*__sina(M) + 0.0200*__sina(2*M) + 0.0003*__sina(3*M)
def __computeEclipticLongitude(solarMeanAnomaly, eqCenter): #degrees (it adds a sum a sines)
L = (solarMeanAnomaly + eqCenter + 180 + 102.9372) % 360
return L
def computeSolarTransit(meanSolarNoon, solarMeanAnomaly, eclipticLongitude): #substract sinuses from 12 am
Jtr = 2451545.0 + meanSolarNoon + (0.0053*__sina(solarMeanAnomaly) - 0.0069*__sina(2*eclipticLongitude))
return Jtr
def __computeSinSunDeclination(L):
delta = __sina(L)*__sina(23.439 )
return delta
def computeHourAngle(nlat, sdelta, elevation):
if elevation < 0:
elevation = 0
elevCoef = -2.076*sqrt(elevation)/60
cosw0 = (__sina(-0.83+elevCoef) - __sina(nlat)*sdelta)/ ( sqrt(1-sdelta*sdelta) * __cosa(nlat))
return __acosa(cosw0)
def rmNTPFetch(server = "pool.ntp.org", withRequestDrift = False):
import struct
from socket import socket, AF_INET, SOCK_DGRAM
requestPacket = '\x1b' + 47 * '\0'
startTime = time.time()
try:
sock = socket(AF_INET, SOCK_DGRAM)
sock.settimeout(5)
except Exception, e:
log.error("NTPFetch: Can't create socket")
return None
try:
sock.sendto(requestPacket, (server, 123))
data, ip = sock.recvfrom(1024)
except Exception, e:
#log.error("NTPFetch: Error receiving data: %s" % e)
return None
try:
if data:
timestamp = struct.unpack('!12I', data)[10]
timestamp -= 2208988800L # = date in sec since epoch
# http://stackoverflow.com/questions/1599060/how-can-i-get-an-accurate-utc-time-with-python
if withRequestDrift:
reqTime = time.time() - startTime
timestamp += reqTime / 2
return timestamp
except:
log.error("NTPFetch: Conversion failed.")
return None
def getAlarmElapsedRealTime():
### DEPRECATED: This method was used on Android to get the UP_TIME (replaced by monotonicTime())
elapsedTime = -1
try:
alarmFile = open("/dev/alarm", 'r')
if alarmFile:
t = timespec()
# ANDROID_ALARM_GET_TIME(ANDROID_ALARM_ELAPSED_REALTIME) = 0x40086134
result = fcntl.ioctl(alarmFile.fileno(), 0x40086134, t)
if result == 0:
elapsedTime = t.tv_sec
alarmFile.close()
except Exception, e:
log.error(e)
return elapsedTime
class rmMonotonicTime:
CLOCK_MONOTONIC_RAW = 4 # see <linux/time.h>
def __init__(self, fallback = True):
self.fallback = fallback
self.clock_gettime = None
self.get = None
self.monotonicInit()
def monotonicInit(self):
try:
from RMOSGlue.rmOSPlatform import RMOSPlatform
if RMOSPlatform().AUTODETECTED = | = RMOSPlatform.ANDROID:
librt = ctypes.CDLL | ('libc.so', use_errno=True)
log.info("Initialised Android monotonic clock")
elif RMOSPlatform().AUTODETECTED == RMOSPlatform.OPENWRT:
lib |
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'BOM for working process',
'version': '0.1', |
'category': '',
'description': """
Add extra information for manage BOM as a work BO | M
""",
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'mrp',
],
'init_xml': [],
'demo': [],
'data': [
'security/ir.model.access.csv',
'bom_views.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
|
"""
WSGI config for opendai_lleida_web project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will | have the standard Django WSGI application here, but it also
might make se | nse to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin_web.settings-production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
fr | om .data_asset import DataAsset
from .file_data_asset import FileDat | aAsset
|
import co | nnect_tests
import string_utils_tes | ts
|
#!/usr/bin/env python
# encoding:utf-8
"""
@software: PyCharm
@file: video_db.py
@time: 2016/8/4 16:56
"""
import sqlite3
class Create_DB():
| def __init__(self):
self.conn = sqlite3.connect('video.db')
self.cn = self.conn.cursor()
def create_table(self, table):
# 创建表格 table == 创建表命令
self.cn.execute(table)
def insert_db(self):
# 插入数据
pass
def select_db(self):
# 查询数据
| pass
if __name__ == '__main__':
pass
|
foo isn't good enough to write it.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), r'import bad\)syntax')
self.assertRegexpMatches(err_out.getvalue(), 'SyntaxError')
def test_addpackage_import_bad_exec(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("randompath\nimport nosuchmodule\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 2")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), 'ImportError')
@unittest.skipIf(sys.platform == "win32", "Windows does not raise an "
| "error for file paths containing null characters")
def test_addpackage_import_bad_pth_file(self):
# Issue 5258
pth_dir, pth_fn = self.make_pth("abc\x00def\n")
with captured_output("stderr") as err_out:
sit | e.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 1")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), 'TypeError')
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
@unittest.skipUnless(site.ENABLE_USER_SITE, "requires access to PEP 370 "
"user-site (site.ENABLE_USER_SITE)")
def test_s_option(self):
usersite = site.USER_SITE
self.assertIn(usersite, sys.path)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 1, "%r is not in sys.path (sys.exit returned %r)"
% (usersite, rc))
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
def test_getuserbase(self):
site.USER_BASE = None
user_base = site.getuserbase()
# the call sets site.USER_BASE
self.assertEqual(site.USER_BASE, user_base)
# let's set PYTHONUSERBASE and see if it uses it
site.USER_BASE = None
import sysconfig
sysconfig._CONFIG_VARS = None
with EnvironmentVarGuard() as environ:
environ['PYTHONUSERBASE'] = 'xoxo'
self.assertTrue(site.getuserbase().startswith('xoxo'),
site.getuserbase())
def test_getusersitepackages(self):
site.USER_SITE = None
site.USER_BASE = None
user_site = site.getusersitepackages()
# the call sets USER_BASE *and* USER_SITE
self.assertEqual(site.USER_SITE, user_site)
self.assertTrue(user_site.startswith(site.USER_BASE), user_site)
def test_getsitepackages(self):
site.PREFIXES = ['xoxo']
dirs = site.getsitepackages()
if sys.platform in ('os2emx', 'riscos'):
self.assertEqual(len(dirs), 1)
wanted = os.path.join('xoxo', 'Lib', 'site-packages')
self.assertEqual(dirs[0], wanted)
elif (sys.platform == "darwin" and
sysconfig.get_config_var("PYTHONFRAMEWORK")):
# OS X framework builds
site.PREFIXES = ['Python.framework']
dirs = site.getsitepackages()
self.assertEqual(len(dirs), 3)
wanted = os.path.join('/Library',
sysconfig.get_config_var("PYTHONFRAMEWORK"),
sys.version[:3],
'site-packages')
self.assertEqual(dirs[2], wanted)
elif os.sep == '/':
# OS X non-framwework builds, Linux, FreeBSD, etc
self.assertEqual(len(dirs), 2)
wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3],
'site-packages')
self.assertEqual(dirs[0], wanted)
wanted = os.path.join('xoxo', 'lib', 'site-python')
self.assertEqual(dirs[1], wanted)
else:
# other platforms
self.assertEqual(len(dirs), 2)
self.assertEqual(dirs[0], 'xoxo')
wanted = os.path.join('xoxo', 'lib', 'site-packages')
self.assertEqual(dirs[1], wanted)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print>>FILE, "#import @bad module name"
print>>FILE, "\n"
print>>FILE, "import %s" % self.imported
print>>FILE, self.good_dirname
print>>FILE, self.bad_dirname
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Te |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# ( | at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General | Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import factory.fuzzy
from education_group.ddd.domain._campus import Campus
class CampusFactory(factory.Factory):
class Meta:
model = Campus
abstract = False
name = factory.Sequence(lambda n: 'Campus %02d' % n)
university_name = factory.Sequence(lambda n: 'University %02d' % n)
|
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
## test with python setup.py develop
setup(
name='ipyreload',
packages=['ipyreload'],
version= 1.2,
description='ipython productivity tools',
long_description=readme(),
url="https://github.com/wolfiex/ipython-dev-reload",
keywords= 'ipython reload'.split(' '),
author='Dan Ellis',
author_email='daniel.ellis.research@gmail.c | om',
license='MIT',
z | ip_safe=False)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# url_read.py file is part of slpkg.
# Copyright 2014-2021 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://gitlab.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib3
from slpkg.__metadata__ import MetaData as _meta_
class URL:
"""Urls reading class
"""
def __init__(self, link):
self.link = link
self.meta = _meta_
self.red = _meta_.color["RED"]
self.endc = _meta_.color["ENDC"]
if self.meta.http_proxy:
self.http = url | lib3.ProxyManager(self.meta.http_proxy)
else:
self.http | = urllib3.PoolManager()
def reading(self):
"""Open url and read
"""
try:
f = self.http.request('GET', self.link)
return f.data.decode("utf-8", "ignore")
except urllib3.exceptions.NewConnectionError:
print(f"\n{self.red}Can't read the file '{self.link.split('/')[-1]}'{self.endc}")
return " "
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is based on chromium/chromium/master/tools/clang/scripts/update.py.
It is used on Windows platforms to copy the correct msdia*.dll to the
clang folder, as a "gclient hook".
"""
imp | ort os
import shutil
import stat
import sys
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
LLVM_BUILD_DIR = os.path.abspath(os.path.join(THIS_D | IR, '..', '..', 'third_party',
'llvm-build', 'Release+Asserts'))
def GetDiaDll():
"""Get the location of msdia*.dll for the platform."""
# Bump after VC updates.
DIA_DLL = {
'2013': 'msdia120.dll',
'2015': 'msdia140.dll',
'2017': 'msdia140.dll',
'2019': 'msdia140.dll',
}
# Don't let vs_toolchain overwrite our environment.
environ_bak = os.environ
sys.path.append(os.path.join(THIS_DIR, '..', '..', 'build'))
import vs_toolchain
win_sdk_dir = vs_toolchain.SetEnvironmentAndGetSDKDir()
msvs_version = vs_toolchain.GetVisualStudioVersion()
if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1'))):
dia_path = os.path.join(win_sdk_dir, '..', 'DIA SDK', 'bin', 'amd64')
else:
if 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
vs_path = os.environ['GYP_MSVS_OVERRIDE_PATH']
else:
vs_path = vs_toolchain.DetectVisualStudioPath()
dia_path = os.path.join(vs_path, 'DIA SDK', 'bin', 'amd64')
os.environ = environ_bak
return os.path.join(dia_path, DIA_DLL[msvs_version])
def CopyFile(src, dst):
"""Copy a file from src to dst."""
print("Copying %s to %s" % (str(src), str(dst)))
shutil.copy(src, dst)
def CopyDiaDllTo(target_dir):
# This script always wants to use the 64-bit msdia*.dll.
dia_dll = GetDiaDll()
CopyFile(dia_dll, target_dir)
def main():
CopyDiaDllTo(os.path.join(LLVM_BUILD_DIR, 'bin'))
return 0
if __name__ == '__main__':
sys.exit(main())
|
"""
Views to support exchange of authentication credentials.
The following are currently implemented:
1. AccessTokenExchangeView:
3rd party (social-auth) OAuth 2.0 access token -> 1st party (open-edx) OAuth 2.0 access token
2. LoginWithAccessTokenView:
1st party (open-edx) OAuth 2.0 access token -> session cookie
"""
# pylint: disable=abstract-method
import django.contrib.auth as auth
import social.apps.django_app.utils as social_utils
from django.conf import settings
from django.contrib.auth import login
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from edx_oauth2_provider.constants import SCOPE_VALUE_DICT
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views.base import TokenView as DOTAccessTokenView
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from provider import constants
from provider.oauth2.views import AccessTokenView as DOPAccessTokenView
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from openedx.core.djangoapps.auth_exchange.forms import AccessTokenExchangeForm
from openedx.core.djangoapps.oauth_dispatch import adapters
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
class AccessTokenExchangeBase(APIView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token.
"""
@method_decorator(csrf_exempt)
@method_decorator(social_utils.strategy("social:complete"))
def dispatch(self, *args, **kwargs):
return super(AccessTokenExchangeBas | e, self).dispatch(*args, **kwargs)
def get(self, request, _backend): # pylint: disable=arguments-differ
"""
Pass through GET requests without the _backend
"""
return super(Acces | sTokenExchangeBase, self).get(request)
def post(self, request, _backend): # pylint: disable=arguments-differ
"""
Handle POST requests to get a first-party access token.
"""
form = AccessTokenExchangeForm(request=request, oauth2_adapter=self.oauth2_adapter, data=request.POST) # pylint: disable=no-member
if not form.is_valid():
return self.error_response(form.errors) # pylint: disable=no-member
user = form.cleaned_data["user"]
scope = form.cleaned_data["scope"]
client = form.cleaned_data["client"]
return self.exchange_access_token(request, user, scope, client)
def exchange_access_token(self, request, user, scope, client):
"""
Exchange third party credentials for an edx access token, and return a
serialized access token response.
"""
if constants.SINGLE_ACCESS_TOKEN:
edx_access_token = self.get_access_token(request, user, scope, client) # pylint: disable=no-member
else:
edx_access_token = self.create_access_token(request, user, scope, client)
return self.access_token_response(edx_access_token) # pylint: disable=no-member
class DOPAccessTokenExchangeView(AccessTokenExchangeBase, DOPAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth2-provider (DOP) to manage access
tokens.
"""
oauth2_adapter = adapters.DOPAdapter()
class DOTAccessTokenExchangeView(AccessTokenExchangeBase, DOTAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth-toolkit (DOT) to manage access
tokens.
"""
oauth2_adapter = adapters.DOTAdapter()
def get(self, request, _backend):
return Response(status=400, data={
'error': 'invalid_request',
'error_description': 'Only POST requests allowed.',
})
def get_access_token(self, request, user, scope, client):
"""
TODO: MA-2122: Reusing access tokens is not yet supported for DOT.
Just return a new access token.
"""
return self.create_access_token(request, user, scope, client)
def create_access_token(self, request, user, scope, client):
"""
Create and return a new access token.
"""
_days = 24 * 60 * 60
token_generator = BearerToken(
expires_in=settings.OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS * _days,
request_validator=oauth2_settings.OAUTH2_VALIDATOR_CLASS(),
)
self._populate_create_access_token_request(request, user, scope, client)
return token_generator.create_token(request, refresh_token=True)
def access_token_response(self, token):
"""
Wrap an access token in an appropriate response
"""
return Response(data=token)
def _populate_create_access_token_request(self, request, user, scope, client):
"""
django-oauth-toolkit expects certain non-standard attributes to
be present on the request object. This function modifies the
request object to match these expectations
"""
request.user = user
request.scopes = [SCOPE_VALUE_DICT[scope]]
request.client = client
request.state = None
request.refresh_token = None
request.extra_credentials = None
request.grant_type = client.authorization_grant_type
def error_response(self, form_errors):
"""
Return an error response consisting of the errors in the form
"""
return Response(status=400, data=form_errors)
class LoginWithAccessTokenView(APIView):
"""
View for exchanging an access token for session cookies
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser,)
permission_classes = (permissions.IsAuthenticated,)
@staticmethod
def _get_path_of_arbitrary_backend_for_user(user):
"""
Return the path to the first found authentication backend that recognizes the given user.
"""
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = auth.load_backend(backend_path)
if backend.get_user(user.id):
return backend_path
@method_decorator(csrf_exempt)
def post(self, request):
"""
Handler for the POST method to this view.
"""
# The django login method stores the user's id in request.session[SESSION_KEY] and the
# path to the user's authentication backend in request.session[BACKEND_SESSION_KEY].
# The login method assumes the backend path had been previously stored in request.user.backend
# in the 'authenticate' call. However, not all authentication providers do so.
# So we explicitly populate the request.user.backend field here.
if not hasattr(request.user, 'backend'):
request.user.backend = self._get_path_of_arbitrary_backend_for_user(request.user)
login(request, request.user) # login generates and stores the user's cookies in the session
return HttpResponse(status=204) # cookies stored in the session are returned with the response
|
import | android
class SMSPoolMember:
def __init__(self, query):
self.droid = android.Android()
self.query = str(query).lstrip().rstrip()
def wifiConnected(self):
none = "<unknown ssid>"
return not self.droid.wifiGetConnectionInfo().result["ssid"] == none
def dataConnected(self):
return self.droid.getCellLocation | ().result["cid"] > -1
def sendResponse(self):
if self.query == "connection":
return "pool:" + str(self.wifiConnected() or self.dataConnected())
else:
return "pool: None"
|
import caffe
import numpy as np
import pdb
class MyLossLayer(caffe.Layer):
"""Layer of Efficient Siamese loss function."""
def setup(self, bottom, top):
self.margin = 10
print '*********************** SETTING UP'
pass
def forward(self, bottom, top):
| """The parameters here have the same meaning as data_layer"""
self.Num = 0
batch = 1
level = 5
dis = 9
SepSize = batch*level
self.dis = []
# for the first
for k in r | ange(dis):
for i in range(SepSize*k,SepSize*(k+1)-batch):
for j in range(SepSize*k + int((i-SepSize*k)/batch+1)*batch,SepSize*(k+1)):
self.dis.append(bottom[0].data[i]-bottom[0].data[j])
self.Num +=1
self.dis = np.asarray(self.dis)
self.loss = np.maximum(0,self.margin-self.dis) # Efficient Siamese forward pass of hinge loss
top[0].data[...] = np.sum(self.loss)/bottom[0].num
def backward(self, top, propagate_down, bottom):
"""The parameters here have the same meaning as data_layer"""
batch=1
index = 0
level = 5
dis = 9
SepSize = batch*level
self.ref= np.zeros(bottom[0].num,dtype=np.float32)
for k in range(dis):
for i in range(SepSize*k,SepSize*(k+1)-batch):
for j in range(SepSize*k + int((i-SepSize*k)/batch+1)*batch,SepSize*(k+1)):
if self.loss[index]>0:
self.ref[i] += -1
self.ref[j] += +1
index +=1
# Efficient Siamese backward pass
bottom[0].diff[...]= np.reshape(self.ref,(bottom[0].num,1))/bottom[0].num
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
top[0].reshape(1)
|
#!/usr/bin/env python3
import sys
if len(sys.argv) != 2:
raise SystemExit('Incor | rect usage. Use: ' + sys.argv[0] + ' <image.img>')
image_filename = sys.argv[1]
content = ''
with open(image_filename, 'rb') as f:
content = bytearray(f.read())
bytes_to_append = 512 - len(content) % 512
for i in range(bytes_to_append):
content.append(0)
with open(image_filename, 'wb') as f:
f.write(con | tent)
print('Successfully aligned to sector')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.