prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY o | r FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
i | mport os
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp, working_dir
import spack
from spack.util.executable import ProcessError, which
_SPACK_UPSTREAM = 'https://github.com/spack/spack'
description = "create a new installation of spack in another prefix"
section = "admin"
level = "long"
def setup_parser(subparser):
subparser.add_argument(
'-r', '--remote', action='store', dest='remote',
help="name of the remote to clone from", default='origin')
subparser.add_argument(
'prefix',
help="names of prefix where we should install spack")
def get_origin_info(remote):
git_dir = os.path.join(spack.prefix, '.git')
git = which('git', required=True)
try:
branch = git('symbolic-ref', '--short', 'HEAD', output=str)
except ProcessError:
branch = 'develop'
tty.warn('No branch found; using default branch: %s' % branch)
if remote == 'origin' and \
branch not in ('master', 'develop'):
branch = 'develop'
tty.warn('Unknown branch found; using default branch: %s' % branch)
try:
origin_url = git(
'--git-dir=%s' % git_dir,
'config', '--get', 'remote.%s.url' % remote,
output=str)
except ProcessError:
origin_url = _SPACK_UPSTREAM
tty.warn('No git repository found; '
'using default upstream URL: %s' % origin_url)
return (origin_url.strip(), branch.strip())
def clone(parser, args):
origin_url, branch = get_origin_info(args.remote)
prefix = args.prefix
tty.msg("Fetching spack from '%s': %s" % (args.remote, origin_url))
if os.path.isfile(prefix):
tty.die("There is already a file at %s" % prefix)
mkdirp(prefix)
if os.path.exists(os.path.join(prefix, '.git')):
tty.die("There already seems to be a git repository in %s" % prefix)
files_in_the_way = os.listdir(prefix)
if files_in_the_way:
tty.die("There are already files there! "
"Delete these files before boostrapping spack.",
*files_in_the_way)
tty.msg("Installing:",
"%s/bin/spack" % prefix,
"%s/lib/spack/..." % prefix)
with working_dir(prefix):
git = which('git', required=True)
git('init', '--shared', '-q')
git('remote', 'add', 'origin', origin_url)
git('fetch', 'origin', '%s:refs/remotes/origin/%s' % (branch, branch),
'-n', '-q')
git('reset', '--hard', 'origin/%s' % branch, '-q')
git('checkout', '-B', branch, 'origin/%s' % branch, '-q')
tty.msg("Successfully created a new spack in %s" % prefix,
"Run %s/bin/spack to use this installation." % prefix)
|
import squeezeboxserver
import t | ime
squeezebox = squeezeboxserver.SqueezeboxServer("192.168.1.65:9000")
players = squeezebox.players()
for p in players:
print ("MAC: %s" % p['playerid'])
time.sleep(10)
squeezebox.power("00:04:20:06:8c:55", "on")
squeezebox.playlist("00:04:20:06:8c:55", "play")
time.sleep(10)
squeezebox.playlist("00:04:20:06:8c:55" | , "stop")
time.sleep(3)
squeezebox.power("00:04:20:06:8c:55", "off")
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/MSCommon/__init__. | py 5357 2011/09/09 21:31:03 bdeegan"
__doc__ = """
Common functions for Microsoft Visual Studio and Visual C/C++.
"""
import copy
import os
import re
import subprocess
import SCons.Errors
import SCons.Platform.win32
import SCons.Util
from SCons.Tool.MSCommon.sdk import mssdk_exists, \
mssdk_setup_env
from SCons.Tool.MSCommon.vc import msvc_e | xists, \
msvc_setup_env, \
msvc_setup_env_once
from SCons.Tool.MSCommon.vs import get_default_version, \
get_vs_by_version, \
merge_default_version, \
msvs_exists, \
query_versions
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
import re
def fun(s):
# return True if s is a valid email, else return False
f = "^[a-zA-Z][\w-]*@[a-zA-Z0-9]+\.[a-zA-Z]{1,3}$"
if not re.match(f, s):
return False
username, after = re.spli | t(r'[@]', s)
websitename, extension = re.split(r'[.]', after)
if(len(extension) > 3):
return False
return True
def filter_mail(emails):
return list(filter(fun, emails))
if __name__ == '__main__':
n = int(input())
emails = []
for _ | in range(n):
emails.append(input())
filtered_emails = filter_mail(emails)
filtered_emails.sort()
print(filtered_emails)
|
from models import Event
from django.views.generic | import DetailView, ListView
class EventListView(ListView):
template_name = 'agenda/event_list.html'
queryset = Event.objects.upcoming()
paginate_by = 20
class EventArchiveview(EventListView):
queryset = Event.objects.past()
class EventDetailView(DetailView):
model = Event
te | mplate_name = 'agenda/event_detail.html'
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer that computes the maximum (element-wise) of several inputs."""
# pylint: disable=g-direct-tensorflow-import
from keras.layers.merging.base_merge import _Merge
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Maximum')
class Maximum(_Merge):
"""Layer that computes the maximum (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[5],
[6],
[7],
[8],
[9]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> maxed = tf.keras.layers.Maximum()([x1, x2])
>>> maxed.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = tf.maximum(output, inputs[i])
return output
@keras_export('keras.layers.maximum')
def maximum(inputs, **kwargs):
"""Functional interface to compute maximum (element-wise) list of `inputs`.
This is equivalent to the `tf.keras.layers.Maximum` layer.
For example:
```python
input1 = tf.keras.layers.Input(shape=(16,))
x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)
input2 = tf.keras.layers.Input(shape=(32,))
x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)
max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None | , 8)
out = tf.keras.laye | rs.Dense(4)(max_inp)
model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
```
Args:
inputs: A list of input tensors (at least 2) of same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor (of same shape as input tensor) with the element-wise
maximum of the inputs.
Raises:
ValueError: If input tensors are of different shape.
"""
return Maximum(**kwargs)(inputs)
|
#!/usr/bin/env python
"""
demonstrate adding a FigureCanvasGTK3Agg widget to a Gtk.ScrolledWindow
using GTK3 accessed via pygobject
"""
from gi.repository import Gtk
from matplotlib.figure i | mport Figure
from numpy import arange, sin, pi
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
win = Gtk.Window()
win. | connect("delete-event", Gtk.main_quit )
win.set_default_size(400,300)
win.set_title("Embedding in GTK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
sw = Gtk.ScrolledWindow()
win.add (sw)
# A scrolled window border goes outside the scrollbars and viewport
sw.set_border_width (10)
canvas = FigureCanvas(f) # a Gtk.DrawingArea
canvas.set_size_request(800,600)
sw.add_with_viewport (canvas)
win.show_all()
Gtk.main()
|
import os
import sys
from Bio import Entrez
import wx
from xml.dom import minidom
import re
class etPlugin():
def GetName(self):
'''
Method to return name of tool
'''
return "ESummary"
def GetBMP(self, dirH):
'''
Method to return identifying image
'''
return dirH + r"\Utils\Icons\ncbi_logoESum.bmp"
def GetOutFile(self):
self.outfile=dirH + r"\plugins\clustal.aln"
return self.outfile
def GetExec(self,parent,dbName,query):
self.parent = parent
erl = GetExec(dbName,query)
for line in erl.split('\n'):
if not re.search('.*<Item Name="?',line) == None:
if not re.search('.*<.*>.*<.*>',line) == None:
e = re.sub('.*<Item Name="?','',line)
alpha = re.sub('" Type=".*">?','\n',e)
beta = re.sub('<.*','\n',alpha)
parent.text2.write(str(beta))
parent.text2.write('\n')
def helpEXE(self,parent):
parent.l1.Show(True)
parent.text1.Show(True)
parent.l2.Show(True)
parent.l3.Show(True)
parent.text2 = wx.TextCtrl(parent.panelRSLT, -1, "", size=(892, 370),
style=wx.TE_MULTILINE|wx.TE_PROCESS_ENTER, pos=(75,10))
wx.CallAfter(parent.text2.SetInsertionPoint, 0)
tempVal = parent.dbCB.GetValue()
parent.dbList =['pubmed', 'protein', 'nuccore', 'nucleotide', 'nucgss', 'nucest',
'structure', 'genome', 'genomeprj', 'bioproject', 'biosample',
'biosystems', 'blastdbinfo', 'books', 'cancerchromosomes', 'cdd',
'gap', 'dbvar', 'epigenomics', 'gene', 'gensat', 'gds', 'geo',
'geoprofiles', 'homologene', 'journals', 'mesh', 'ncbisearch',
'nlmcatalog', 'omia', 'omim', 'pmc', 'popset', 'probe',
'proteinclusters', 'pcassay', 'pccompound', 'pcsubstance',
'seqannot', 'snp', 'sra', 'taxonomy', 'toolkit', 'toolkitall',
'unigene', 'unists', 'gencoll', 'gcassembly']
parent.dbGoMenu=[] # Database menu options.
for dummy in parent.dbList: # Iterate through all available databases
parent.dbGoMenu.append(wx.NewId())
tempIdNum = len(parent.menuHandlers)
for itnum,tool in enumerate(parent.dbList):
parent.menuHandlers.append((p | arent.dbGoMenu[itnum], parent.helpDB))
parent.lowDB,dummy = parent.menuHandlers[tempIdNum]
#Setup the database menu options
parent.dbaseMenu = wx.Menu()
for itnum,tool in enumerate(parent.dbList):
parent.dbaseMenu.Append(parent.dbGoMenu[itnum], tool, kind=wx.ITEM_RADIO)
#Update the menu bar
parent.menuBar.Replace | (2,parent.dbaseMenu, "Database")
parent.menuBar.UpdateMenus()
parent.dbCB = wx.ComboBox(parent=parent.panelSQ, id=-1, pos=(256,6),
choices=parent.dbList, style=wx.CB_READONLY)
tempChk = 0
while tempChk < len(parent.dbList):
if tempVal == parent.dbList[tempChk]:
parent.dbCB.SetSelection(tempChk)
tempChk = len(parent.dbList)
tempChk += 1
if tempChk == len(parent.dbList):
parent.dbCB.SetSelection(0)
parent.Bind(wx.EVT_COMBOBOX, parent.helpDB, parent.dbCB)
def GetExec(dbName,idName):
handle = Entrez.esummary(db=dbName,id=idName,rettype='xml')
erl = handle.read()
return erl
|
#!/usr/bin/env
import subprocess, os, random, copy
output_filename = "twelvetone_ex.ly"
output_file = open(output_filename, "w")
notes = ['c','cis','d','dis','e','f','fis','g','gis','a','ais','b']
temp_tt_array = []
def twelvetone_gen():
notes_svd = copy.copy(notes)
a = 11
while len(temp_tt_array) < 12:
r = random.randint(0,a)
temp_tt_array.append(notes_svd[r])
notes_svd.remove(notes_svd[r])
a = a-1
return temp_t | t_a | rray
output_file.write(r'''\version "2.16.0"\header{tagline=""}\paper{indent=0 line-width=130 top-margin=13}\layout{\context{\Staff \remove "Stem_engraver" \remove "Time_signature_engraver" \override Stem #'transparent = ##t}}\score{\transpose c c' << \new Staff''')
temp_tt_string = ''
for x in range(0, 16):
output_file.write('\n' + r'{ \time 12/4 ')
twelvetone_gen()
for element in temp_tt_array:
temp_tt_string+=element + ' '
output_file.write(temp_tt_string)
temp_tt_string = ''
temp_tt_array = []
output_file.write(r'\bar "||" }')
output_file.write('\n>>}')
output_file.close()
try:
os.startfile(output_filename)
except AttributeError:
subprocess.call(['open', output_filename])
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc4043
class PermIdCertTestCase(unittest.TestCase):
cert_pem_text = """\
MIIDDTCCApOgAwIBAgIJAKWzVCgbsG5HMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
dXMgQ0EwHhcNMTkxMTEwMDA0MDIyWhcNMjAxMTA5MDA0MDIyWjBNMQswCQYDVQQG
EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
YW1wbGUxDTALBgNVBAMTBEdhaWwwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQBoktg
/68xL+uEQaWBoHyOjw8EMLeMEng3R2H7yiEzTGoaMJgPOKvSfzB2P0paHYPL+B5y
Gc0CK5EHRujMl9ljH+Wydpk57rKBLo1ZzpWUS6anLGIkWs1sOakcgGGr7hGjggFL
MIIBRzAdBgNVHQ4EFgQU1pCNZuMzfEaJ9GGhH7RKy6Mvz+cwbwYDVR0jBGgwZoAU
8jXbNATapVXyvWkDmbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQI
DAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQDokdYG
kU/O8jAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvh | CAQ0E
NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
cnBvc2UuMFMGA1UdEQRMMEqgNgYIKwYBBQUHCAOgKjAoDBs4MjYyMDgtNDE3MDI4
LTU0ODE5NS0yMTUyMzMGCSsGAQQBgaxgMIEQZ2FpbEBleGFtcGxlLmNvbTAKBggq
hkjOPQQDAwNoADBlAjBT+36Y/LPaGSu+61P7kR97M8jAjtH5DtUwrWR02ChshvYJ
x0bpZq3PJaO0WlBgFicCMQCf+67wSvjxxtjI/OAg4t8NQIJW1LcehSXizlPDc772
/FC5OiUAxO+iFa | SVMeDFsCo=
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
perm_id_oid = rfc4043.id_on_permanentIdentifier
assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
permanent_identifier_found = False
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] == rfc5280.id_ce_subjectAltName:
extnValue, rest = der_decoder(
extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
self.assertFalse(rest)
self.assertTrue(extnValue.prettyPrint())
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
for gn in extnValue:
if gn['otherName'].hasValue():
self.assertEqual(perm_id_oid, gn['otherName']['type-id'])
onValue, rest = der_decoder(
gn['otherName']['value'],
asn1Spec=rfc4043.PermanentIdentifier())
self.assertFalse(rest)
self.assertTrue(onValue.prettyPrint())
self.assertEqual(gn['otherName']['value'], der_encoder(onValue))
self.assertEqual(assigner_oid, onValue['assigner'])
permanent_identifier_found = True
self.assertTrue(permanent_identifier_found)
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
perm_id_oid = rfc4043.id_on_permanentIdentifier
assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
permanent_identifier_found = False
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] == rfc5280.id_ce_subjectAltName:
extnValue, rest = der_decoder(
extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(extnValue.prettyPrint())
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
for gn in extnValue:
if gn['otherName'].hasValue():
on = gn['otherName']
self.assertEqual(perm_id_oid, on['type-id'])
self.assertEqual(assigner_oid, on['value']['assigner'])
permanent_identifier_found = True
self.assertTrue(permanent_identifier_found)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
"""
Filenames of the tests
"""
import os
from os.path import join as pjoin
from .. import LiF_g4
# This is a 2x2x2 q-point grid. The weights can be obtained from abinit.
nqpt = 3
wtq = [0.125, 0.5, 0.375]
# Indices of the q-points in the 4x4x4 grid.
iqpt_subset = [0, 2, 6]
dirname = os.path.dirname(__file__)
fnames = dict(
eigk_fname=LiF_g4.fnames['eigk_fname'],
eigq_fnames=list(),
ddb_fnames=list(),
eigr2d_fnames=list(),
gkk_fnames=list(),
| )
for key in ('eigq_fnames', 'ddb_fnames', 'eigr2d_fname | s', 'gkk_fnames'):
for i in iqpt_subset:
fnames[key].append(LiF_g4.fnames[key][i])
refdir = pjoin(dirname, 'epc_outputs')
|
"""
"""
from Tkinter import *
from areavi import AreaVi
from ttk import Notebook
class PanedHorizontalWindow(PanedWindow):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
PanedWindow.__init__(self, orient=HORIZONTAL, *args, **kwargs)
def create_area(self):
"""
"""
frame = Frame(master=self)
scrollbar = Scrollbar(master=frame)
area = AreaVi('none', frame , border=3, relief=RAISED,
yscrollcommand=scrollbar.set)
scrollbar.config(command=area.yview)
scrollbar.pack(side='right', fill=Y)
from vyapp.plugins import INSTALL, HANDLE
for plugin, args, kwargs in INSTALL:
plugin.install(area, *args, **kwargs)
for handle, args, kwargs in HANDLE:
handle(area, *args, **kwargs)
area.pack(expand=True, side='left', fill=BOTH)
area.focus_set()
self.add(frame)
return area
def create(self):
"""
"""
area = self.create_area()
self.add(area.master)
return area
def load(self, filename):
"""
"""
area = self.create_area()
self.add(area.master)
area.load_data(filename)
return area
class PanedVerticalWindow(PanedWindow):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
PanedWindow.__init__(self, orient=VERTICAL, *args, **kwargs)
def create(self):
"""
"""
base = PanedHorizontalWindow(master=self)
self.add(base)
base.create()
return base
d | ef | load(self, *args):
"""
"""
base = PanedHorizontalWindow(master=self)
self.add(base)
for ind in args:
base.load(ind)
return base
class NoteVi(Notebook):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
Notebook.__init__(self, *args, **kwargs)
def create(self, filename):
"""
"""
base = PanedVerticalWindow(master=self)
base.create()
self.add(base, text=filename)
return base
def load(self, *args):
"""
"""
for indi in args:
base = PanedVerticalWindow(master=self)
base.pack(side='left', expand=True, fill=BOTH)
self.add(base)
for indj in indi:
base.load(*indj)
|
ort json_encoding
from ..xmlrpc import client as xmlrpcclient
from ...sfa.trust.credential import Credential
class Framework_Base():
"""
Framework_Base is an abstract class that identifies the minimal set of functions
that must be implemented in order to add a control framework to omni.
Instructions for adding a new framework:
Create "framework_X" in the frameworks directory, where X is your control framework.
Create a Framework class in the file that inherits "Framework_Base" and fill out each of the functions.
Edit the sample "omni_config" file and add a section for your framework, giving the section
the same name as X used in framework_X. For instance, 'sfa' or 'gcf'. Your framework's section
of the omni config *MUST* have a cert and key entry, which omni will use when talking to
the GENI Aggregate managers.
"""
def __init__(self, config):
self.cert = getAbsPath(config['cert'])
if not os.path.exists(self.cert):
sys.exit("Frameworks certfile %s doesn't exist" % self.cert)
if not os.path.getsize(self.cert) > 0:
sys.exit("Frameworks certfile %s is empty" % self.cert)
self.key = getAbsPath(config['key'])
if not os.path.exists(self.key):
sys.exit("Frameworks keyfile %s doesn't exist" % self.key)
if not os.path.getsize(self.key) > 0:
sys.exit("Frameworks keyfile %s is empty" % self.key)
self.sslctx = None
def init_user_cred( self, opts ):
"""Initialize user credential either from file (if
--usercredfile) or else to None.
Must call this method in framework's __init__ in order for
--usercredfile to be handled properly.
Returns the usercred - in XML string format.
"""
try:
if self.user_cred_struct is not None:
pass
except:
self.user_cred_struct = None
# read the usercred from supplied file
cred = None
if opts.usercredfile and os.path.exists(opts.usercredfile) and os.path.isfile(opts.usercredfile) and os.path.getsize(opts.usercredfile) > 0:
# read the user cred from the given file
if hasattr(self, 'logger'):
logger = self.logger
else:
logger = logging.getLogger("omni.framework")
logger.info("Getting user credential from file %s", opts.usercredfile)
# cred = _load_cred(logger, opts.usercredfile)
with open(opts.usercredfile, 'r') as f:
cred = f.read()
try:
cred = json.loads(cred, encoding='ascii', cls=json_encoding.DateTimeAwareJSONDecoder)
if cred and isinstance(cred, dict) and \
cred.has_key('geni_type') and \
cred.has_key('geni_value') and \
cred['geni_type'] == Credential.SFA_CREDENTIAL_TYPE and \
cred['geni_value'] is not None:
self.user_cred_struct = cred
except Exception, e:
logger.debug("Failed to get a JSON struct from cred in file %s. Treat as a string: %s", opts.usercredfile, e)
cred2 = credutils.get_cred_xml(cred)
if cred2 is None or cred2 == "":
logger.info("Did NOT get valid user cred from %s", opts.usercredfile)
if opts.devmode:
logger.info(" ... but using it anyhow")
else:
cred = None
else:
# This would force a saved user cred in struct to be XML. Is that correct?
#cred = cred2
target = ""
try:
target = credutils.get_cred_target_urn(logger, cred)
if "+authority+sa" in target:
self.logger.debug("Got target %s - PG user creds list the user as the owner only", target)
target = credutils.get_cred_owner_urn(logger, cred)
except:
if not opts.devmode:
logger.warn("Failed to parse target URN from user cred?")
logger.info("Read user %s credential from file %s", target, opts.usercredfile)
elif opts.usercredfile:
if hasattr(self, 'logger'):
logger = self.logger
else:
logger = logging.getLogger("omni.framework")
logger.info("NOT getting user credential from file %s - file doesn't exist or is empty", opts.usercredfile)
return cred
def get_version(self):
"""
Returns a dict of the GetVersion return from the control framework. And an error message if any.
"""
raise NotImplementedError('get_version')
def get_user_cred(self):
"""
Returns a user credential from the control framework as a string. And an error message if any.
"""
raise NotImplementedError('get_user_cred')
def get_slice_cred(self, urn):
"""
Retrieve a slice with the given urn and returns the signed credential as a string.
"""
raise NotImplementedError('get_slice_cred')
def create_slice(self, urn):
"""
If the slice already exists in the framework, it returns that. Otherwise it creates the slice
and returns the new slice as a string.
"""
raise NotImplementedError('create_slice')
def delete_slice(self, urn):
"""
Removes the slice from the control framework.
"""
raise NotImplementedError('delete_slice')
def list_aggregates(self):
"""
Get a list of available GENI Aggregates from the control framework.
Returns: a dictionary where keys are urns and values are aggregate urls
"""
raise NotImplementedError('list_aggregates')
def list_my_slices(self, username):
"""
Get a list of slices for this user.
Returns: a list of slice URNs
"""
raise NotImplementedError('list_my_slices')
def list_my_projects(self, username):
"""
'''List projects owned by the user (name or URN) provided, returning a list of structs, containing
PROJECT_URN, PROJECT_UID, EXPIRED, and PROJECT_ROLE. EXPIRED is a boolean.'''
"""
raise NotImplementedError('list_my_projects')
def list_ssh_keys(self, username=None):
"""
Get a list of SSH key pairs for the given user or the configured current user if not specified.
Private key will be omitted if not known or found.
Returns: a list of structs containing SSH key pairs ('public_key', 'private_key' (may be omitted))
"""
raise NotImplementedError('list_ssh_keys')
def slice_name_to_urn(self, name):
"""Convert a slice name to a slice urn."""
# Default implementation just converts to generic URN.
raise NotImplementedError('slice_name_to_urn')
def renew_slice(self, urn, requested_expiration):
"""Renew a slice.
urn is framework urn, already converted via slice_name_to_urn.
requested_expiration is a datetime object.
Returns the expiration date as a datetime. If there is an error,
print it and return None.
"""
raise NotImplementedError('renew_slice')
def make_client(self, url, keyfile, certfile, verbose=False, timeout=None,
allow_none=False):
"""Create an API client. This is currently an XML-R | PC client
over SSL with a client side certificate."""
return xmlrpcclient.make_client(url, keyfile, certfile,
verbose=verbose,
timeout=timeout,
allow_none=allow_none)
def ssl_context(self, retries=2):
"""Returns an SSL Context or an exception is raised."""
if hasattr(self, 'logger'):
| logger = self.logger
else:
|
"""
WSGI config for elixirapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os, sys
from django.core.wsgi | import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "elixirapp.settings")
os. | environ["CELERY_LOADER"] = "django"
sys.path.insert(0,'/elixir/application/backend')
application = get_wsgi_application()
|
# -*- coding: utf-8 -*-
"""
database.use_tornado_database
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using tornado.database with MySQL
http://flask.pocoo.org/snippets/11/
"""
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from tornado.database import Connection
from flask import g, render_template
from app import app
import config
@app.before_request
def connect_db():
g.db = Connection(config.DB_HOST,
config.DB_NAME,
config.DB_USER,
config. | DB_PASSWD)
@app.after_request
def close_connection(response):
g.db.close()
| return response
@app.route("/")
def index():
newsitems = g.db.iter("select * from newsitems")
return render_template("index.html", newsitems=newsitems)
"""
{% for item in newsitems %}
<h3>{{ item.title }}</h3>
{% endfor %}
You can get much of the same functionality in SQLAlchemy 0.6 using NamedTuples, without using the ORM:
from sqlalchemy import create_engine
@app.before_request
def connect_db():
g.db = create_engine(config.DB_URI)
@app.route("/")
def index():
newsitems = g.db.execute("select * from newsitems")
# now you can do newsitem.title...
"""
|
from biokbase.workspace.client import Workspace
from biokbase.narrative_method_store.client import NarrativeMethodStore
from biokbase.userandjobstate.client import UserAndJobState
from biokbase.catalog.Client import Catalog
from biokbase.service.Client import Client as ServiceClient
from biokbase.execution_engine2.execution_engine2Client import execution_engine2
from biokbase.narrative.common.url_config import URLS
def get(client_name, token=None):
return __init_client(client_name, token=token)
def reset():
__clients = dict()
def __init_client(client_name, token=None):
if client_name == 'workspace':
c = Workspace(URLS.workspace, token=token)
elif client_name == 'narrative_method_store':
c = NarrativeMethodStore(URLS.narrative_method_store, token=token)
elif client_name == 'user_and_job_state':
c = UserAndJobState(URLS.user_and_job_state, token=token)
elif client_name == 'catalog':
c = Catalog(URLS.catalog, token=token)
elif client_name == 'service' or client_name == 'service_wizard':
c = ServiceClient(URLS.service_wizard, use_url_lookup=True, token=token)
elif client_name == 'execution_engine2' or client_n | ame == 'execution_engine' or client_name == 'job_service':
c = execution_engine2(URLS.execution_engine2, token=token)
elif client_name == 'job_service_mock':
c = JobServiceMock()
else:
raise ValueError('Unknown client name "%s"' % client_name)
return c
class JobServiceMock():
def | __init__(self):
self.client = get('service')
def check_job(self, job_id):
return self.client.sync_call('narrative_job_mock.check_job', [job_id])[0]
def check_jobs(self, params):
return self.client.sync_call('narrative_job_mock.check_jobs', [params])[0]
|
# _*_ coding: utf-8 _*_
import os
try:
from cStringIO import StringIO # python 2
except ImportError:
from io import StringIO # python 3
from collections import OrderedDict
import unittest
from tornado.escape import to_unicode
from tortik.util import make_qs, update_url, real_ip
from tortik.util.xml_etree import parse, tostring
class Request(object):
headers = {}
remote_ip = None
class BaseTest(unittest.TestCase):
def assertQueriesEqual(self, qs1, qs2):
qs1_list = sorted(qs1.split('&'))
qs2_list = sorted(qs2.split('&'))
self.assertEqual(qs1_list, qs2_list)
def assertUrlsEqual(self, url1, url2):
u1 = url1.split('?')
u2 = url2.split('?')
self.assertEqual(len(u1), len(u2))
self.assertEqual(u1[0], u2[0])
if len(u1) > 1:
self.assertQueriesEqual(u1[1], u2[1])
class TestMakeQs(BaseTest):
"""This is copy of Frontik's make_qs test: https://github.com/hhru/frontik/blob/master/tests/test_util.py
"""
def test_make_qs_simple(self):
query_args = {'a': '1', 'b': '2'}
self.assertQueriesEqual(make_qs(query_args), 'a=1&b=2')
def test_make_qs_not_str(self):
query_args = {'a': 1, 'b': 2.0, 'c': True}
self.assertQueriesEqual(make_qs(query_args), 'a=1&b=2.0&c=True')
def test_make_qs_iterables(self):
query_args = {'a': [1, 2], 'b': {1, 2}, 'c': (1, 2), 'd': frozenset((1, 2))}
self.assertQueriesEqual(make_qs(query_args), 'a=1&a=2&b=1&b=2&c=1&c=2&d=1&d=2')
def test_make_qs_none(self):
query_args = {'a': None, 'b': None}
self.assertQueriesEqual(make_qs(query_args), '')
def test_make_qs_encode(self):
query_args = {'a': u'тест', 'b': 'тест'}
qs = make_qs(query_args)
self.assertIsInstance(qs, str)
self.assertQueriesEqual(qs, 'a=%D1%82%D0%B5%D1%81%D1%82&b=%D1%82%D0%B5%D1%81%D1%82')
def test_from_ordered_dict(self):
qs = make_qs(OrderedDict([('z', 'я'), | ('г', 'd'), ('b', ['2', '1'])]))
self.assertIsInstance(qs, str)
self.assertEqual(qs, 'z=%D1%8F&%D0%B3=d&b=2&b=1')
def test_unicode_params(self):
self.assertQueriesEqual(
make_qs({'при': 'вет', u'по': u'ка'}),
'%D0%BF%D1%80%D0%B8=%D0%B2%D0%B5%D | 1%82&%D0%BF%D0%BE=%D0%BA%D0%B0'
)
def test_make_qs_comma(self):
query_args = {'a': '1,2,3', 'b': 'asd'}
self.assertQueriesEqual(make_qs(query_args, '/,'), 'a=1,2,3&b=asd')
def test_make_qs_comma_quoted(self):
# default value for `safe` parameter of make_qs is '/' so commas
# should be encoded
query_args = {'a': '1,2,3', 'b': 'asd'}
self.assertQueriesEqual(make_qs(query_args), 'a=1%2C2%2C3&b=asd')
class TestUpdateUrl(BaseTest):
def test_simple(self):
self.assertUrlsEqual(update_url('http://google.com'), 'http://google.com')
self.assertUrlsEqual(update_url('https://google.com'), 'https://google.com')
self.assertUrlsEqual(update_url('google.com'), 'google.com')
self.assertUrlsEqual(update_url('//google.com'), '//google.com')
self.assertUrlsEqual(update_url('http://google.com?a=1'), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=1&b=2'), 'http://google.com?a=1&b=2')
self.assertUrlsEqual(update_url('http://google.com?привет=1'),
'http://google.com?%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82=1')
self.assertUrlsEqual(update_url(u'http://google.com?привет=1'),
'http://google.com?%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82=1')
def test_update_args(self):
self.assertUrlsEqual(update_url('http://google.com', update_args={'a': 1}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com', update_args={'a': '1'}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com', update_args={'a': u'1'}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com', update_args={u'a': u'1'}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=2', update_args={'a': 1}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=2&b=1', update_args={'a': 1}), 'http://google.com?a=1&b=1')
def test_remove_args(self):
self.assertUrlsEqual(update_url('http://google.com?a=2', remove_args=['a']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?a=2', remove_args=[u'a']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?привет=2', remove_args=['привет']), 'http://google.com')
self.assertUrlsEqual(update_url(u'http://google.com?привет=2', remove_args=[u'привет']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?a=2&a=1', remove_args=['a']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?a=2&a=1&b=3', remove_args=['a']), 'http://google.com?b=3')
self.assertUrlsEqual(update_url('http://google.com?a=2&a=1&b=3', remove_args=['b']),
'http://google.com?a=2&a=1')
def test_both(self):
self.assertUrlsEqual(update_url('http://google.com?b=3', update_args={'a': 1}, remove_args=['b']),
'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=2&b=3&c=4', update_args={'a': 1}, remove_args=['b']),
'http://google.com?a=1&c=4')
class TestParse(BaseTest):
def test_parse_xml(self):
fd = open(os.path.join(os.path.dirname(__file__), 'data', 'simple.xml'), 'r')
tree = parse(fd)
self.assertEqual(tree.getroot().tag, 'data')
convert = tostring(tree.getroot(), pretty_print=True, xml_declaration=True, encoding='UTF-8')
# replace any possible conversion differences that are ok
# Python 3+ native etree does not include xml declaration so we should remove it everywhere
converted = to_unicode(convert).replace('\n', '').replace(' ', '').replace('\'', '"').\
replace('<?xmlversion="1.0"encoding="UTF-8"?>', '').strip()
fd.seek(0)
base = to_unicode(fd.read()).replace('\n', '').replace(' ', '').\
replace('<?xmlversion="1.0"encoding="UTF-8"?>', '').strip()
self.assertEqual(converted, base)
fd.close()
class TestRealIp(BaseTest):
def test_real_ip(self):
# default
request = Request()
self.assertEqual('127.0.0.1', real_ip(request))
request = Request()
request.headers = {'X-Real-Ip': '8.8.8.8', 'X-Forwarded-For': '10.0.0.1'}
self.assertEqual('8.8.8.8', real_ip(request))
request = Request()
request.headers = {'X-Forwarded-For': '10.0.0.1, 127.0.0.1'}
self.assertEqual('10.0.0.1', real_ip(request))
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'voir' de la commande 'chemin'."""
from primaires.format.fonctions import oui_ou_non
from primaires.interpreteur.masque.parametre import Parametre
from primaires.pnj.chemin import FLAGS
class PrmVoir(Parametre):
"""Commande 'chemin voir'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "voir", "view")
self.schema = "<cle>"
self.aide_courte = "affiche le détail d'un chemin"
self.aide_longue = \
"Cette commande permet d'obtenir plus d'informations sur " \
"un chemin (ses flags actifs, ses salles et sorties...)."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
cle = self.noeud.get_masque("cle")
cle.proprietes["regex"] = r"'[a-z0-9_:]{3,}'"
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
cle = dic_masques["cle"].cle
if cle not in importeur.pnj.chemins:
personnage << "|err|Ce chemin n'existe pas.|ff|"
return
chemin = importeur.pnj.chemins[cle]
msg = "Détail sur le chemin {} :".format(chemin.cle)
msg += "\n Flags :"
for nom_flag in FLAGS.keys():
msg += "\n {}".format(nom_flag.capitalize())
msg += " : " + oui_ou_non(chemin.a_flag(nom_flag))
msg += "\n Salles du chemin :"
if len(chemin.salles) == 0:
msg += "\n Aucune"
else:
for salle, direction in che | min.salles.items():
msg += "\n " + salle.ident.ljust(20) + " "
msg += direc | tion.ljust(10)
if salle in chemin.salles_retour and \
chemin.salles_retour[salle]:
msg += " (retour " + chemin.salles_retour[salle] + ")"
personnage << msg
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from collections import MutableMapping
from .robottypes import is_dict_like
_WHITESPACE_REGEXP = re.compile('\s+')
def normalize(string, ignore=(), caseless=True, spaceless=True):
"""Normalizes given string according to given spec.
By default string is turned to lower case and all whitespace is removed.
Additional characters can be removed by giving them in `ignore` list.
"""
if spaceless:
string = _WHITESPACE_REGEXP.sub('', string)
if caseless:
string = lower(string)
ignore = [lower(i) for i in ignore]
for ign in ignore:
if ign in string: # performance optimization
string = string.replace(ign, '')
return string
# http://ironpython.codeplex.com/workitem/33133
if sys.platform == 'cli' and sys.version_info < (2, 7, 5):
def lower(string):
return ('A' + string).lower()[1:]
else:
def lower(string):
return string.lower()
class | NormalizedDict(MutableMapping):
"""Custom dictionary implementation automatically normalizing keys."""
def __init__(self, initial=None, ignore=(), caseless=True, spaceless=True):
"""Initializes with possible initial value and normalizing spec.
Initial values can be either a dictionary or an iterable of name/value
pairs. In the latter case items are added in the given order.
Normalizing spec has exact same semantics a | s with `normalize` method.
"""
self._data = {}
self._keys = {}
self._normalize = lambda s: normalize(s, ignore, caseless, spaceless)
if initial:
self._add_initial(initial)
def _add_initial(self, initial):
items = initial.items() if hasattr(initial, 'items') else initial
for key, value in items:
self[key] = value
def __getitem__(self, key):
return self._data[self._normalize(key)]
def __setitem__(self, key, value):
norm_key = self._normalize(key)
self._data[norm_key] = value
self._keys.setdefault(norm_key, key)
def __delitem__(self, key):
norm_key = self._normalize(key)
del self._data[norm_key]
del self._keys[norm_key]
def __iter__(self):
return (self._keys[norm_key] for norm_key in sorted(self._keys))
def __len__(self):
return len(self._data)
def __str__(self):
return '{%s}' % ', '.join('%r: %r' % (key, self[key]) for key in self)
def __eq__(self, other):
if not is_dict_like(other):
return False
if not isinstance(other, NormalizedDict):
other = NormalizedDict(other)
return self._data == other._data
def __ne__(self, other):
return not self == other
def copy(self):
copy = NormalizedDict()
copy._data = self._data.copy()
copy._keys = self._keys.copy()
copy._normalize = self._normalize
return copy
# Speed-ups. Following methods are faster than default implementations.
def __contains__(self, key):
return self._normalize(key) in self._data
def clear(self):
self._data.clear()
self._keys.clear()
|
import os
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import scoped_session, sessionmaker
metadata = MetaData()
def get_sa_db_uri(driver='', username='', password='', host='', port='', database=''):
"""get SQLAlchemy DB URI: driver://username:password@host:port/database"""
assert driver
if d | river == 'sqlite':
# get absolute file path
if not database.startswith('/'):
db_file = os.path.abspath(database)
else:
db_file = database
db_uri = '%s:///%s' % (driver, db_file)
else:
db_uri | = ('%s://%s:%s@%s:%s/%s' %
(driver, username, password, host, port, database))
return db_uri
class DB(object):
def __init__(self, db_uri):
self.engine = create_engine(db_uri, convert_unicode=True)
self.session = scoped_session(
sessionmaker(autocommit=False,
autoflush=False,
bind=self.engine))
def init_database(self):
metadata.create_all(bind=self.engine)
|
[-6., -5.], self.evaluate(var1))
def testPrecomputedGradient(self):
for dtype in [dtypes.half, dt | ypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
grad_loss = constant_op.constant([42, -42], dtype=dtype)
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
| sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost, global_step, [var0, var1], grad_loss=grad_loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0))
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testNoVariables(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# pylint: disable=cell-var-from-loop
def loss():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, trainable=False, name='a')
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, trainable=False, name='b')
return 5 * var0 + var1
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No.*variables'):
sgd_op.minimize(loss)
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
# pylint: disable=cell-var-from-loop
def loss():
return 5 * var0
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss():
return constant_op.constant(5.0)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes
def testGradientsAsVariables(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
resource_variable_ops.ResourceVariable(array_ops.zeros([2], dtype),
name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
self.evaluate(variables.global_variables_initializer())
# Run convert_ops to achieve the gradietns converting
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd_op.apply_gradients(converted_grads_and_vars)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testComputeGradientsWithTensors(self):
x = ops.convert_to_tensor(1.0)
def f():
return x * x
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(f, [x])
self.assertEqual(1, len(grads_and_vars))
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd_op.apply_gradients(grads_and_vars)
def testTrainOp(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
self.assertTrue(opt_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with self.cached_session():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_i |
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Constants defined in netdevice(7)
# Interface flags
# from net/if.h
IFF_UP = 1 << 0 # Interface is running.
IFF_BROADCAST = 1 << 1 # Valid broadcast address set.
IFF_DEBUG = 1 << 2 # Internal debugging flag.
IFF_LOOPBACK = 1 << 3 # Interface is a loopback interface.
IFF_POINTOPOINT = 1 << 4 # Interface is a point-to-point link.
IFF_NOTRAILERS = 1 << 5 # Avoid use of trailers.
IFF_RUNNING = 1 << 6 # Resources allocated.
IFF_NOARP = 1 << 7 # No arp protocol, L2 destination address not set.
IFF_PROMISC = 1 << 8 # Interface is in promiscuous m | ode.
IFF_ALLMULTI = 1 << 9 # Receive all multicast packets.
IFF_MASTER = 1 << 10 # Master of a load balancing bundle.
IFF_SLAVE = 1 << 11 # Slave of a load balancing bundle.
IFF_MULTICAST = 1 << 12 # Supports multicast.
IFF_PORTSEL = 1 << 13 # Is able to select media type via ifmap.
IFF_AUTOMEDIA = 1 << 14 # Auto media selection active.
IFF_DYNAMIC = 1 << 15 # The addresses are lost when the interface goes down.
# from linux/if.h
IFF_LOWER_UP = 1 << | 16 # Driver signals L1 up. (since Linux 2.6.17)
IFF_DORMANT = 1 << 17 # Driver signals dormant. (since Linux 2.6.17)
IFF_ECHO = 1 << 18 # Echo sent packets. (since Linux 2.6.25)
# Private interface flags
# from linux/netdevice.h
IFF_802_1Q_VLAN = 1 << 0 # 802.1Q VLAN device.
IFF_EBRIDGE = 1 << 1 # Ethernet bridging device.
IFF_BONDING = 1 << 2 # bonding master or slave.
IFF_ISATAP = 1 << 3 # ISATAP interface (RFC4214).
IFF_WAN_HDLC = 1 << 4 # WAN HDLC device.
IFF_XMIT_DST_RELEASE = 1 << 5 # dev_hard_start_xmit() is allowed to release skb->dst.
IFF_DONT_BRIDGE = 1 << 6 # disallow bridging this ether dev.
IFF_DISABLE_NETPOLL = 1 << 7 # disable netpoll at run-time.
IFF_MACVLAN_PORT = 1 << 8 # device used as macvlan port.
IFF_BRIDGE_PORT = 1 << 9 # device used as bridge port.
IFF_OVS_DATAPATH = 1 << 10 # device used as Open vSwitch datapath port.
IFF_TX_SKB_SHARING = 1 << 11 # The interface supports sharing skbs on transmit.
IFF_UNICAST_FLT = 1 << 12 # Supports unicast filtering.
IFF_TEAM_PORT = 1 << 13 # device used as team port.
IFF_SUPP_NOFCS = 1 << 14 # device supports sending custom FCS.
IFF_LIVE_ADDR_CHANGE = 1 << 15 # device supports hardware address change when it's running.
IFF_MACVLAN = 1 << 16 # Macvlan device.
IFF_XMIT_DST_RELEASE_PERM = 1 << 17 # IFF_XMIT_DST_RELEASE not taking into account underlying stacked devices.
IFF_IPVLAN_MASTER = 1 << 18 # IPvlan master device.
IFF_IPVLAN_SLAVE = 1 << 19 # IPvlan slave device.
IFF_L3MDEV_MASTER = 1 << 20 # device is an L3 master device.
IFF_NO_QUEUE = 1 << 21 # device can run without qdisc attached.
IFF_OPENVSWITCH = 1 << 22 # device is a Open vSwitch master.
IFF_L3MDEV_SLAVE = 1 << 23 # device is enslaved to an L3 master device.
IFF_TEAM = 1 << 24 # device is a team device.
IFF_RXFH_CONFIGURED = 1 << 25 # device has had Rx Flow indirection table configured.
IFF_PHONY_HEADROOM = 1 << 26 # the headroom value is controlled by an external entity. (i.e. the master device for bridged veth)
IFF_MACSEC = 1 << 27 # device is a MACsec device.
|
"""
keyring_demo.py
This demo shows how to create a new keyring and enable it in keyring lib.
Created by Kang Zhang on 2009-07-12
"""
import os
KEYRINGRC = "keyringrc.cfg"
def load_keyring_by_config():
"""This function shows how to enable a keyring using config file
"""
# create the config file
config_file = open(KEYRINGRC,'w')
config_file.writelines(["[backend]\n",
# the path for the user created keyring
"keyring-path= %s\n" % str(os.path.abspath(__file__))[:-16],
# the name of the keyring class
"default-keyring=simplekeyring.SimpleKeyring\n" ])
config_file.close()
# impor | t the keyring lib, the lib will automaticlly load the
# config file and load the user defined module
import keyring
# invoke | the keyring to store and fetch the password
try:
keyring.set_password("demo-service", "tarek", "passexample")
print "password stored sucessfully"
except keyring.backend.PasswordSetError:
print "failed to store password"
print "password", keyring.get_password("demo-service", "tarek")
os.remove(KEYRINGRC)
def set_keyring_in_runtime():
"""This function shows how to create a keyring manully and use it
in runtime
"""
# define a new keyring class which extends the KeyringBackend
import keyring.backend
class TestKeyring(keyring.backend.KeyringBackend):
"""A test keyring which always outputs same password
"""
def supported(self): return 0
def set_password(self, servicename, username, password): return 0
def get_password(self, servicename, username):
return "password from TestKeyring"
# set the keyring for keyring lib
import keyring
keyring.set_keyring(TestKeyring())
# invoke the keyring lib
try:
keyring.set_password("demo-service", "tarek", "passexample")
print "password stored sucessfully"
except keyring.backend.PasswordSetError:
print "failed to store password"
print "password", keyring.get_password("demo-service", "tarek")
def main():
"""This script shows how to enable the keyring using the config
file and in runtime.
"""
load_keyring_by_config()
set_keyring_in_runtime()
if __name__ == '__main__':
main()
|
#!/usr/bin/e | nv python
from algorithms.sorting.selection_sort import *
from __prototype__ import *
if __name__ == '__main__':
test_all(selection_so | rt) |
# Data fitting wrappers with optimized parameters.
#
# Copyright (C) 2010-2011 Huang Xin
#
# See LICENSE.TXT that came with this file.
from __future__ import division
import math
import numpy as np
import scipy.ndimage as nd
from sinusoidfitter import onedsinusoidfit,onedsinusoid
from gaussfitter import gaussfit,onedgaussfit,onedgaussian,onedloggaussfit,onedloggaussian
from gaborfitter import gaborfit,onedgaborfit,onedgabor
class SinusoidFit(object):
def sinusoid1d(self,xax,data,modelx=None,return_models=True,return_all=False,**kwargs):
"""
1d sinusoidal params: (height, amplitude, frequency, phase)
"""
frequency = 2*np.pi/(xax.max()-xax.min())
amplitude = (data.max()-data.min())/2
params=[(data.max()+data.min())/2,amplitude,frequency,0]
fixed=[False,False,True,False]
limitedmin=[True,True,True,True]
limitedmax=[True,True,True,True]
minpars=[data.min(),0.8*amplitude,0.8*frequency,-180]
maxpars=[data.max(),1.2*amplitude,1.2*frequency,540]
params,_model,errs,chi2 = onedsinusoidfit(xax,data,params=params,fixed=fixed,\
limitedmin=limitedmin,limitedmax=limitedmax,\
minpars=minpars,maxpars=maxpars,**kwargs)
if modelx == None:
modelx = xax
model_xdata = onedsinusoid(xax,*params)
model_fitting = onedsinusoid(modelx,*params)
if return_all:
return params,model_xdata,model_fitting,errs,chi2
elif return_models:
return (model_xdata, model_fitting)
class GaussFit(object):
def gaussfit1d(self,xax,data,modelx=None,return_models=True,return_all=False,**kwargs):
"""
1d gaussian params: (height, amplitude, shift, width)
"""
width = xax.max()-xax.min()
lower_bound = np.sort(data)[:3].mean()
params=[0,(data.max()-data.min())*0.5,0,width*0.2]
fixed=[False,False,False,False]
limitedmin=[False,True,True,True]
limitedmax=[True,True,True,True]
minpars=[0,(data.max()-data.min())*0.5,xax.min()-width,width*0.05]
maxpars=[lower_bound*1.5,data.max()-data.min(),xax.max(),width*3.0]
params,_model,errs,chi2 = onedgaussfit(xax,data,params=params,fixed=fixed,\
limitedmin=limitedmin,limitedmax=limitedmax,\
minpars=minpars,maxpars=maxpars,**kwargs)
if modelx == None:
modelx = xax
model_xdata = onedgaussian(xax,*params)
model_fitting = onedgaussian(modelx,*params)
if return_all:
return params,model_xdata,model_fitting,errs,chi2
elif return_models:
return (model_xdata, model_fitting)
def loggaussfit1d(self,xax,data,modelx=None,return_models=True,return_all=False,**kwargs):
"""
1d gaussian params: (height, amplitude, shift, width)
"""
width = xax.max()-xax.min()
lower_bound = np.sort(data)[:3].mean()
params=[0,(data.max()-data.min())*0.5,0,width*0.2]
fixed=[False,False,False,False]
limitedmin=[False,True,True,True]
limitedmax=[True,True,True,True]
minpars=[0,(data.max()-data.min())*0.5,xax.min()-width,width*0.05]
maxpars=[lower_bound*1.5,data.max()-data.min(),xax.max(),width*3.0]
params,_model,errs,chi2 = onedloggaussfit(xax,data,params=params,fixed=fixed,\
limitedmin=limitedmin,limitedmax=limitedmax,\
minpars=minpars,maxpars=maxpars,**kwargs)
if modelx == None:
modelx = xax
model_xdata = onedloggaussian(xax,*params)
model_fitting = onedloggaussian(modelx,*params)
if return_all:
return params,model_xdata,model_fitting,errs,chi2
elif return_models:
return (model_xdata, model_fitting)
def gaussfit2d(self,img,returnfitimage=True,return_all=False):
"""
2d gaussian params: (height, amplitude, center_x, center_y, width_x, width_y, theta)
"""
x_dim,y_dim = img.shape
limitedmin = [False,False,True,True,True,True,True]
limitedmax = [False,False,True,True,True,True,True]
minpars = [0.0, 0.0, 0, 0, x_dim*0.1, y_dim*0.1, 0.0]
maxpars = [0.0, 0.0, x_dim, y_dim, x_dim*0.8, y_dim*0.8, 360.0]
usemoment= np.array([True,True,False,False,False,False,True],dtype='bool')
#usemoment=np.array([],dtype='bool')
params = [0.0, 0.0, x_dim/2, y_dim/2, x_dim/3, y_dim/3, 0.0]
img = nd.filters.gaussian_filter(img,0.2)
if returnfitimage:
params,img = gaussfit(img,params=params,returnfitimage=True,limitedmin=limitedmin,\
limitedmax=limitedmax,minpars=minpars,maxpars=maxpars,usemoment=usemoment)
return params,img
elif return_all:
params,errors = gaussfit(img,params=params,return_all=True,limitedmin=limitedmin,\
limitedmax=limitedmax,minpars=minpars,maxpars=maxpars,usemoment=usemoment)
return params,errors
class GaborFit(object):
def gaborfit1d(self,xax,data,modelx=None,return_models=True,return_all=False,**kwargs):
"""
1d gabor params: (height,amplitude,shift,width,wavelength,phase)
"""
wavelength = xax.max()-xax.min()
width = xax.max()-xax.min()
params=[(data.max()+data.min())/2,(data.max()-data.min())/2,width*0.5,width*0.2,wavelength,0]
fixed=[False,False,False,False,True,False]
limitedmin=[True,False,True,True,False,True]
limitedmax=[True,False,True,True,False,True]
minpars=[data.min(),0,xax.min()-3*width,width*0.05,0,0]
maxpars=[data.max(),0,xax.max()+3*width,width*3.00,0,360]
params,_model,errs,chi2 = onedgaborfit(xax,data,params=params,fixed=fixed,\
limitedmin=limitedmin,limitedmax=limitedmax,\
minpars=minpars,maxpars=maxpars,**kwargs)
if modelx == None:
modelx = xax
model_xdata = onedgabor(xax,*params)
model_fitting = onedgabor(modelx,*params)
if return_all:
return params,model_xdata,model_fitting,errs,chi2
elif return_models:
return (model_xdata, model_fitting)
def gaborfit2d(self,img,returnfitimage=True,return_all=False):
"""
2d gabor params: (height,amplitude,center_x,center_y,width_x,width_y,theta,frequency,phase)
These parameters determine the properties of the spatial receptive field. see Dayan etc., 2002
"""
x_dim,y_dim = img.shape
diag = math.sqrt(x_dim**2+y_dim**2)
limitedmin=[False,False,True,True,True,True,True,True,True]
limitedmax=[False,False,True,True,True,True,True,True,True]
minpars=[0.0, 0.0, 0.0, 0.0, x_dim*0.2, y_dim*0.2, 0.0, diag, 0.0]
maxpars=[0.0, 0.0, x_dim, y_dim, x_dim*0.5, y_dim*0.5, 360.0, diag*2, 180.0]
params = [0.0, 0.0, x_dim/2, y_dim/2, x_dim/3, y_dim/3, 0.0, diag, 0.0]
img = nd.filters.gaussian_filter(img,0.2)
if returnfitimage:
params,img | = gaborfit(img,params=params,returnfitimage=True,limitedmin=limitedmin,\
limitedmax=limitedmax,minpars=minpars,maxpars=maxpars)
return params,img
elif return_all:
params,errors = gaborfit(img,params=params,return_all=True,limitedmin=limitedmin,\
| limitedmax=limitedmax,minpars=minpars,maxpars=maxpars)
return params,errors
|
.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for collectives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import enum
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# TODO(b/170340570): print deprecation warning for CollectiveCommunication.
@tf_export("distribute.experimental.CommunicationImplementation",
"distribute.experimental.CollectiveCommunication")
class CommunicationImplementation(enum.Enum):
"""Cross device communication implementation.
Warning: The alias `tf.distribute.experimental.CollectiveCommunication` is
deprecated and will be removed in a future version. Use
`tf.distribute.experimental.CommunicationImplementation` instead.
* `AUTO`: Automatically chosen by Tensorflow.
* `RING`: TensorFlow's ring algorithms for all-reduce and
all-gather.
* `NCCL`: NVIDIA®'s NCCL library. This is now only used for all-reduce on
GPUs; all-reduce on CPU, all-gather and broadcast fallbacks to RING.
"""
AUTO = "AUTO"
RING = "RING"
NCCL = "NCCL"
# TODO(ayushd): add ncclAllGather implementation.
CollectiveCommunication = CommunicationImplementation
@tf_export("distribute.experimental.CommunicationOptions")
class _OptionsExported(object):
"""Options for cross device communications like All-reduce.
This can be passed to methods like
`tf.distribute.get_replica_context().all_reduce()` to optimize collective
operation performance. Note that these are only hints, which may or may not
change the actual behavior. Some options only apply to certain strategy and
are ignored by others.
One common optimization is to break gradients all-reduce into multiple packs
so that weight updates can overlap with gradient all-reduce.
Examples:
```python
options = tf.distribute.experimental.CommunicationOptions(
bytes_per_pack=50 * 1024 * 1024,
timeout_seconds=120,
implementation=tf.distribute.experimental.CommunicationImplementation.NCCL
)
grads = tf.distribute.get_replica_context().all_reduce(
'sum', grads, options=options)
optimizer.apply_gradients(zip(grads, vars),
experimental_aggregate_gradients=False)
```
"""
def __new__(cls, *args, **kwargs):
# We expose a dummy class so that we can separate internal and public APIs.
# Note that __init__ won't be called on the returned object if it's a
# different class [1].
# [1] https://docs.python.org/3/reference/datamodel.html#object.__new__
return Options(*args, **kwargs)
def __init__(self,
bytes_per_pack=0,
timeout_seconds=None,
implementation=CommunicationImplementation.AUTO):
"""Creates a CollectiveHints.
Args:
bytes_per_pack: a non-negative integer. Breaks collective operations into
packs of certain size. If it's zero, the value is determined
automatically. This only applies to all-reduce with
`MultiWorkerMirroredStrategy` currently.
timeout_seconds: a float or None, timeout in seconds. If not None, the
collective raises `tf.errors.DeadlineExceededError` if it takes longer
than this timeout. Zero disables timeout. This can be useful when
debugging hanging issues. This should only be used for debugging since
it creates a new thread for each collective, i.e. an overhead of
`timeout_seconds * num_collectives_per_second` more threads. This only
works for `tf.distribute.experimental.MultiWorkerMirroredStrategy`.
implementation: a
`tf.distribute.experimental.CommunicationImplementation`. This is a hint
on the preferred communication implementation. Possible values include
`AUTO`, `RING`, and `NCCL`. NCCL is generally more performant for GPU,
but doesn't work for CPU. This only works for
`tf.distribute.experimental.MultiWorkerMirroredStrategy`.
Raises:
ValueError: When arguments have invalid value.
"""
pass
class Options(object):
"""Implementation of OptionsInterface."""
def __init__(self,
bytes_per_pack=0,
timeout_seconds=None,
implementation=CommunicationImplementation.AUTO):
if bytes_per_pack < 0:
raise ValueError(
f"Argument `bytes_per_pack` must be >=0, Received {bytes_per_pack}.")
if isinstance(implementation, str):
implementation = CommunicationImplementation(implementation.upper())
if not isinstance(implementation, CommunicationImplementation):
raise ValueError(
"Argument `implementation` must be instance of "
"`tf.distribute.experimental.CommunicationImplementation`.")
self.bytes_per_pack = bytes_per_pack
self.timeout_seconds = timeout_seconds
self.implementation = implementation
__init__.__doc__ = _OptionsExported.__init__.__doc__
def merge(self, options):
"""Merges with another options and returns a new one.
Values specified in the `options` takes precedence if they're not the
default.
Args:
options: a `tf.distribute.experimental.CollectiveCommunication`.
Returns:
A new `tf.distribute.experimental.CollectiveCommunication`.
"""
merged = copy.deepcopy(self)
if options is None:
return merged
if options.bytes_per_pack != 0:
merged.bytes_per_pack = options.bytes_per_pack
if options.timeout_seconds is not None:
merged.timeout_seconds = options.timeout_seconds
if options.implementation != CommunicationImplementation.AUTO:
merged.implementation = options.implementation
return merged
@tf_export("distribute.experimental.CollectiveHints")
class Hints(object):
"""Hints for collective operations like AllReduce.
This can be passed to methods like
`tf.di | stribute.get_replica_context().all_reduce()` to optimize collective
operation performance. Note that these are only hints, which may or may not
change the actual behavior. Some options only apply to certain strategy and
are ignored by others.
One common optimization is to break gradients all-reduce into multiple packs
so that weight updates can overlap with gradient all-reduce.
Examples:
- bytes_per_pack
```python
hints = tf.dis | tribute.experimental.CollectiveHints(
bytes_per_pack=50 * 1024 * 1024)
grads = tf.distribute.get_replica_context().all_reduce(
'sum', grads, experimental_hints=hints)
optimizer.apply_gradients(zip(grads, vars),
experimental_aggregate_gradients=False)
```
- timeout_seconds
```python
strategy = tf.distribute.MirroredStrategy()
hints = tf.distribute.experimental.CollectiveHints(
timeout_seconds=120)
try:
strategy.reduce("sum", v, axis=None, experimental_hints=hints)
except tf.errors.DeadlineExceededError:
do_something()
```
"""
@deprecation.deprecated(
None, "use distribute.experimental.CommunicationOptions instead")
def __new__(cls, bytes_per_pack=0, timeout_seconds=None):
return Options(
bytes_per_pack=bytes_per_pack, timeout_seconds=timeout_seconds)
def __init__(self, bytes_per_pack=0, timeout_seconds=None):
"""Creates a CollectiveHints.
Args:
bytes_per_pack: a non-negative integer. Breaks collective operations into
packs of certain size. If it's zero, the value is determined
automatically. This only applies to all-reduce with
`MultiWorkerMirroredStrategy` currently.
timeout_seconds: a float or None, timeout in seconds. If not None, the
collective raises `tf.errors.DeadlineExceededError` if it takes longer
than this timeout. This can be useful when debugging hanging issues.
This should only be used for debugging si |
from __future__ import print_function
import numpy as np
from stompy.grid import exact_delaunay, unstructured_grid
Triangulation=exact_delaunay.Triangulation
from stompy.spatial import robust_predicates
def test_gen_intersected_elements():
dt = Triangulation()
pnts = [ [0,0],
[5,0],
[10,0],
[5,5] ]
nA=dt.add_node( x=pnts[0] ) # This tests insert into empty
dt.add_node( x=pnts[1] ) # adjacent_vertex
dt.add_node( x=pnts[2] ) # adjacent_vertex
dt.add_node( x=pnts[3] ) # adjacent_edge
dt.add_node( x=[3,0] ) # colinear
dt.add_node( x=[6,2] ) # into cell interior
nB=dt.add_node( x=[12,4] ) # collinear cell interior
nodes=list(dt.valid_node_iter())
for iA,nA in enumerate(nodes):
for nB in nodes[iA+1:]:
print("test_gen_intersected_elements: %s to %s"%(dt.nodes['x'][nA],
dt.nodes['x'][nB]))
fwd=list(dt.gen_intersected_elements(nA=nA,nB=nB))
rev=list(dt.gen_intersected_elements(nA=nB,nB=nA))
assert len(fwd) == len(rev)
def test_gen_int_elts_dim1():
dt = Triangulation()
pnts = [ [0,0],
[5,0],
[10,0] ]
for pnt in pnts:
dt.add_node( x=pnt )
assert len(list(dt.gen_intersected_elements(0,1)))==3
assert len(list(dt.gen_intersected_elements(0,2)))==5
assert len(list(dt.gen_intersected_elements(1,2)))==3
# and with some points
assert len(list(dt.gen_intersected_elements(pA=[-1,-1],
pB=[-1,1])))==0
elts=list(dt.gen_intersected_elements(pA=[0,-1],pB=[0,1]))
assert len(elts)==1
assert elts[0][0]=='node'
elts=list(dt.gen_intersected_elements(pA=[0,-1],pB=[1,1]))
| assert len(elts)==1
assert elts[0][0]=='edge'
def test_gen_int_elts_dim0():
dt = Triangulation()
assert len(list(dt.gen_intersected_elements(pA=[-1,0 | ],pB=[1,0])))==0
dt.add_node(x=[0,0])
assert len(list(dt.gen_intersected_elements(pA=[-1,0],pB=[1,0])))==1
assert len(list(dt.gen_intersected_elements(pA=[-1,0],pB=[1,1])))==0
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.http import JsonResponse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from shuup.admin.utils.picotable import ChoicesFilter, Column, TextFilter
from shuup.admin.utils.views import PicotableListView
from shuup.core.models import Product, StockBehavior, Supplier
from shuup.simple_supplier.forms import AlertLimitForm, StockAdjustmentForm
from shuup.simple_supplier.models import StockCount
from shuup.simple_supplier.utils import (
get_stock_adjustment_div, get_stock_information_div_id,
get_stock_information_html
)
class StocksListView(PicotableListView):
template_name = "shuup/simple_supplier/admin/base_picotable.jinja"
model = Product
default_columns = [
Column(
"sku", _("SKU"), sort_field="product__sku", display="product__sku", linked=True,
filter_config=TextFilter(filter_field="product__sku", placeholder=_("Filter by SKU..."))
),
Column(
"name", _("Name"), sort_field="product__translations__name", display="product__name", linked=True,
filter_config=TextFilter(filter_field="product__translations__name", placeholder=_("Filter by name..."))
),
Column(
"supplier", _("Supplier"), display="supplier", linked=False,
filter_config=ChoicesFilter(Supplier.objects.filter(module_identifier="simple_supplier"))
),
Column(
"stock_information", _("Stock information"), display="get_stock_information",
linked=False, sortable=False, raw=True
),
Column(
"adjust_stock", _("Adjust stock"), display="get_stock_adjustment_form",
sortable=False, linked=False, raw=True
)
]
def __init__(self):
super(StocksListView, self).__init__()
self.columns = self.default_columns
def get_object_abstract(self, instance, item):
item.update({"_linked_in_mobile": False, "_url": self.get_object_url(instance.product)})
return [
{"text": item.get("name"), "class": "header"},
{"title": "", "te | xt": item.get("sku")},
{"title": "", "text": " ", "raw": item.get("stock_information")},
{"title": "", "text": " ", "raw": item.get("adjust_stock")},
]
def get_queryset(self):
return StockCount.objects.filter(
supplier__module_identifier="simple_supplier",
product__stock_behavior=StockBehavior.STOCKED,
product__deleted=False
| ).order_by("product__id")
def get_context_data(self, **kwargs):
context = super(PicotableListView, self).get_context_data(**kwargs)
context["toolbar"] = None
context["title"] = _("Stock management")
return context
def get_stock_information(self, instance):
return get_stock_information_html(instance.supplier, instance.product)
def get_stock_adjustment_form(self, instance):
return get_stock_adjustment_div(self.request, instance.supplier, instance.product)
def get_adjustment_success_message(stock_adjustment):
arguments = {
"delta": stock_adjustment.delta,
"unit_short_name": stock_adjustment.product.sales_unit.short_name,
"product_name": stock_adjustment.product.name,
"supplier_name": stock_adjustment.supplier.name
}
if stock_adjustment.delta > 0:
return _(
"Added %(delta)s %(unit_short_name)s for product %(product_name)s stock (%(supplier_name)s)"
) % arguments
else:
return _(
"Removed %(delta)s %(unit_short_name)s from product %(product_name)s stock (%(supplier_name)s)"
) % arguments
def _process_stock_adjustment(form, request, supplier_id, product_id):
data = form.cleaned_data
supplier = Supplier.objects.get(id=supplier_id)
stock_adjustment = supplier.module.adjust_stock(
product_id,
delta=data.get("delta"),
purchase_price=data.get("purchase_price"),
created_by=request.user
)
success_message = {
"stockInformationDiv": "#%s" % get_stock_information_div_id(
stock_adjustment.supplier, stock_adjustment.product),
"updatedStockInformation": get_stock_information_html(
stock_adjustment.supplier, stock_adjustment.product),
"message": get_adjustment_success_message(stock_adjustment)
}
return JsonResponse(success_message, status=200)
def process_stock_adjustment(request, supplier_id, product_id):
return _process_and_catch_errors(
_process_stock_adjustment, StockAdjustmentForm, request, supplier_id, product_id)
def _process_alert_limit(form, request, supplier_id, product_id):
supplier = Supplier.objects.get(id=supplier_id)
product = Product.objects.get(id=product_id)
sc = StockCount.objects.get(supplier=supplier, product=product)
data = form.cleaned_data
sc.alert_limit = data.get("alert_limit")
sc.save()
supplier = Supplier.objects.get(id=supplier_id)
success_message = {
"stockInformationDiv": "#%s" % get_stock_information_div_id(supplier, product),
"updatedStockInformation": get_stock_information_html(supplier, product),
"message": _("Alert limit for product %(product_name)s set to %(value)s.") % {
"product_name": product.name, "value": sc.alert_limit},
}
return JsonResponse(success_message, status=200)
def process_alert_limit(request, supplier_id, product_id):
return _process_and_catch_errors(
_process_alert_limit, AlertLimitForm, request, supplier_id, product_id)
def _process_and_catch_errors(process, form_class, request, supplier_id, product_id):
try:
if request.method != "POST":
raise Exception(_("Not allowed"))
form = form_class(request.POST)
if form.is_valid():
return process(form, request, supplier_id, product_id)
error_message = ugettext("Error, please check submitted values and try again.")
return JsonResponse({"message": error_message}, status=400)
except Exception as exc:
error_message = ugettext(
"Error, please check submitted values and try again (%(error)s).") % {"error": exc}
return JsonResponse({"message": error_message}, status=400)
|
import os
import psutil
import sys
__all__ = [
'BeerProgress'
]
_default_display = {
'cpu': True,
'mem': True,
'progressbar': True,
'percent': True,
'tasks_ratio': True,
'skipped_tasks': True,
'fd_count': True,
'context_switches': True
}
class BeerProgress(object):
def __init__(self, indicator='#', total_tasks=0, display=_default_display, progress_character='\U0001F37A'):
self.indicator = indicator
self._total_tasks = total_tasks
self._completed_tasks = 0
self._skipped_tasks = 0
self.progress_character = progress_character
for s in display:
if s not in _default_display:
raise ValueError("Unsupported display item: %s", s)
self.display = display
self.proc = psutil.Process(os.getpid())
self.metrics = {
'cpu': 0,
'mem': 0,
'percent': 0,
'fds': 0,
'ctxv': 0,
'ctxi': 0
}
@property
def completed_tasks(self):
return self._completed_tasks
@completed_tasks.setter
def | completed_tasks(self, completed_tasks):
self._completed_tasks = completed_tasks
@property
def total_tasks(self):
return self._total_tasks
@total_tasks.setter
def total_tasks(self, total_tasks):
self._tot | al_tasks = total_tasks
@property
def skipped_tasks(self):
return self._skipped_tasks
@skipped_tasks.setter
def skipped_tasks(self, skipped_tasks):
self._skipped_tasks = skipped_tasks
def print_progress(self, same_line=True, stream=sys.stderr):
if same_line:
stream.write('\r' + self.progress_string())
else:
stream.write(self.progress_string()+'\n')
def tick(self):
if self._total_tasks == 0:
raise ValueError("Cannot tick without total tasks set")
self.metrics['percent'] = float((self.completed_tasks + self.skipped_tasks) * 100.0 / self.total_tasks)
self.metrics['cpu'] = float(self.proc.get_cpu_percent(interval=0))
self.metrics['mem'] = float(self.proc.get_memory_info()[0]/1024.0/1024.0)
self.metrics['fds'] = self.proc.get_num_fds()
self.metrics['ctxv'] = self.proc.get_num_ctx_switches()[0]
self.metrics['ctxi'] = self.proc.get_num_ctx_switches()[1]
def progress_string(self, length=20):
# in characters, not bytes
pb_symbol_length = int(self.metrics['percent'] * length / 100)
pb_spaces_length = length - pb_symbol_length
full_pb = self.progress_character * length
pb_string = full_pb[:pb_symbol_length*len(self.progress_character)] + pb_spaces_length * ' '
status = ""
if "cpu" in self.display:
status += "CPU %6.2f%% " % self.metrics['cpu']
if "mem" in self.display:
status += "Mem %6.2fMB " % self.metrics['mem']
if "progressbar" in self.display:
status += pb_string + " "
if "percent" in self.display:
status += "[%6.2f%%] " % self.metrics['percent']
if "tasks_ratio" in self.display:
status += "Complete: %d/%d " % (self.completed_tasks+self.skipped_tasks, self.total_tasks)
if "skipped_tasks" in self.display:
status += "Skipped: %d " % self.skipped_tasks
if "fd_count" in self.display:
status += "CTXvol: %d CTXinvol: %d" % (self.metrics['ctxv'], self.metrics['ctxi'])
return status
|
import random
import sys
sys.setrecursionlimit(7000)
def selection_sort(array, counter):
for i in range(0,len(array)):
min_val = array[i:len(array)+1][0]
for j in array[i:len(array)+1]:
counter += 1
if j < min_val:
min_val = j
k = array[i:len(array)+1].index(min_val)+i
counter += 2
array[k], array[i] = array[i], array[k]
return [array, counter]
def insertion_sort(array, counter):
for i in range(1, len(array)):
k = i-1
x = array[i]
counter += 1
while k >= 0 and x < array[k]:
counter += 1
array[k+1] = array[k]
k -= 1
array[k+1] = x
return [array, counter]
def quick_sort_random(array, counter):
n = len(array)
if n <= 50:
return insertion_sort(array, counter)
rand_num = random.randrange(len(array))
pivot = array[rand_num]
L_lst = []
R_lst = []
M_lst = []
for i in array:
counter += 1
if i < pivot:
L_lst.append(i)
elif i > pivot:
R_lst.append(i)
else:
M_lst.append(i)
return quick_sort_random(L_lst, counter) + M_lst + quick_sort_random(R_lst, counter)
def quick_sort_first(array, counter):
n = len(array)
if n <= 50:
return insertion_sort(array, counter)
pivot = array[0]
L_lst = []
R_lst = []
M_lst = []
for i in array:
counter += 1
if i < pivot:
L_lst.append(i)
elif i > pivot:
R_lst.append(i)
else:
M_lst.append(i)
return quick_sort_first(L_lst, counter) + M_lst + quick_sort_first(R_lst, counter)
def merge_sort(array, counter):
n = len(array)
if n > 1:
return merge(merge_sort(array[0:n/2], counter), merge_sort(array[n/2:n], counter), counter)
else:
return array
def merge(arr1, arr2, counter):
k = len(arr1)
l = len(arr2)
counter += 1
if k == 0: return arr2
if l == 0: return arr1
if arr1[0] <= arr2[0]:
return [arr1[0]] + merge(arr1[1:k], arr2[0:l], counter)
else:
return [arr2[0]] + merge(arr1[0:k], arr2[1:l], counter)
def print_menu():
print "\n>Press A to run Selection Sort"
print ">Press B to run Insertion Sort"
print ">Press C to run Quick Sort with random pivot selection"
print ">Press D to run Quick Sort with first pivot selection"
print ">Press E to run Merge Sort"
print ">Press F to change the initial list to sort"
print ">Press Q to Quit"
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
def print_list(array):
print "[",
for i in array[0]:
print i,
print "] --> comparisons: {}".format(array[1])
def user_input_list():
user_lst = []
try:
while True:
sort_input = int(raw_input("Please enter a number: "))
user_lst.append(sort_input)
except ValueError:
pass
return user_lst
if __name__ == "__main__":
print "~~~~ WELCOME TO THE SORTER ~~~~"
opt_input = ""
input_list = user_input_list()
og_list = list(inp | ut_list)
while opt_input.upper() != 'Q':
print_menu()
opt_input = raw_input("Enter an option from the menu above: ")
input_list = list(og_list)
counter = 0
if opt_input.upper() == 'A':
array = selection_sort(input_list, counter)
print_list(array)
| elif opt_input.upper() == 'B':
array = insertion_sort(input_list, counter)
print_list(array)
elif opt_input.upper() == 'C':
array = quick_sort_random(input_list, counter)
print_list(array)
elif opt_input.upper() == 'D':
array = quick_sort_first(input_list, counter)
print_list(array)
elif opt_input.upper() == 'E':
array = merge_sort(input_list, counter)
print "[",
for i in array:
print i,
print "]"
elif opt_input.upper() == 'F':
og_list = user_input_list()
elif opt_input.upper() == 'Q':
exit()
else:
print "Your input is invalid. Try again."
|
# Copyright Amazon | .com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Contains common test fixtures used to run unit tests.
"""
import sys
# This is needed so Python can find test_tools on the path | .
sys.path.append('../../..')
from test_tools.fixtures.common import *
|
ing the
# output of the standard diff tool, parsing it into separate changes, then
# ignoring changes that are know to be safe, such as adding or removing
# blank lines, etc...
#
Option('gen', 'IDL generated files', default='hdir')
Option('src', 'Original ".h" files', default='../c')
Option('halt', 'Stop if a difference is found')
Option('diff', 'Directory holding acceptable diffs', default='diff')
Option('ok', 'Write out the diff file.')
# Change
#
# A Change object contains the previous lines, new news and change type.
#
class Change(object):
def __init__(self, mode, was, now):
self.mode = mode
self.was = was
self.now = now
def Dump(self):
if not self.was:
print('Adding %s' % self.mode)
elif not self.now:
print('Missing %s' % self.mode)
else:
print('Modifying %s' % self.mode)
for line in self.was:
print('src: >>%s<<' % line)
for line in self.now:
print('gen: >>%s<<' % line)
print
#
# IsCopyright
#
# Return True if this change is only a one line change in the copyright notice
# such as non-matching years.
#
def IsCopyright(change):
if len(change.now) != 1 or len(change.was) != 1: return False
if 'Copyright (c)' not in change.now[0]: return False
if 'Copyright (c)' not in change.was[0]: return False
return True
#
# IsBlankComment
#
# Return True if this change only removes a blank line from a comment
#
def IsBlankComment(change):
if change.now: return False
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
return True
#
# IsBlank
#
# Return True if this change only adds or removes blank lines
#
def IsBlank(change):
for line in change.now:
if line: return False
for line in change.was:
if line: return False
return True
#
# IsCppComment
#
# Return True if this change only going from C++ to C style
#
def IsToCppComment(change):
if not len(change.now) or len(change.now) != len(change.was):
return False
for index in range(len(change.now)):
was = change.was[index].strip()
if was[:2] != '//':
return False
was = was[2:].strip()
now = change.now[index].strip()
if now[:2] != '/*':
return False
now = now[2:-2].strip()
if now != was:
return False
return True
return True
def IsMergeComment(change):
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
for line in change.now:
stripped = line.strip()
if stripped != '*' and stripped[:2] != '/*' and stripped[-2:] != '*/':
return False
return True
#
# IsSpacing
#
# Return True if this change is only different in the way 'words' are spaced
# such as in an enum:
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
# vs
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
#
def IsSpacing(change):
if len(change.now) != len(change.was): return False
for i in range(len(change.now)):
# Also ignore right side comments
line = change.was[i]
offs = line.find('//')
if offs == -1:
offs = line.find('/*')
if offs >-1:
line = line[:offs-1]
words1 = change.now[i].split()
words2 = line.split()
if words1 != words2: return False
return True
#
# IsInclude
#
# Return True if change has extra includes
#
def IsInclude(change):
for line in change.was:
if line.strip().find('struct'): return False
for line in change.now:
if line and '#include' not in line: return False
return True
#
# IsCppComment
#
# Return True if the change is only missing C++ comments
#
def IsCppComment(change):
if len(change.now): return False
for line in change.was:
line = line.strip()
if line[:2] != '//': return False
return True
#
# ValidChange
#
# Return True if none of the changes does not patch an above "bogus" change.
#
def ValidChange(change):
if IsToCppComment(change): return False
if IsCopyright(change): return False
if IsBlankComment(change): return False
if IsMergeComment(change): return False
if IsBlank(change): return False
if IsSpacing(change): return False
if IsInclude(change): return False
if IsCppComment(change): return False
return True
#
# Swapped
#
# Check if the combination of last + next change signals they are both
# invalid such as swap of line around an invalid block.
#
def Swapped(last, next):
if not last.now and not next.was and len(last.was) == len(next.now):
cnt = len(last.was)
for i in range(cnt):
match = True
for j in range(cnt):
if last.was[j] != next.now[(i + j) % cnt]:
match = False
break;
if match: return True
if not last.was and not next.now and len(last.now) == len(next.was):
cnt = len(last.now)
for i in range(cnt) | :
match = True
for j in range(cnt):
if last.now[i] != next.was[(i + j) % cnt]:
match = False
break;
if match: return True
return False
def FilterLinesIn(output):
was = []
now = []
filter = []
for index in range(len(output)):
filter.append(False)
line = output | [index]
if len(line) < 2: continue
if line[0] == '<':
if line[2:].strip() == '': continue
was.append((index, line[2:]))
elif line[0] == '>':
if line[2:].strip() == '': continue
now.append((index, line[2:]))
for windex, wline in was:
for nindex, nline in now:
if filter[nindex]: continue
if filter[windex]: continue
if wline == nline:
filter[nindex] = True
filter[windex] = True
if GetOption('verbose'):
print("Found %d, %d >>%s<<" % (windex + 1, nindex + 1, wline))
out = []
for index in range(len(output)):
if not filter[index]:
out.append(output[index])
return out
#
# GetChanges
#
# Parse the output into discrete change blocks.
#
def GetChanges(output):
# Split on lines, adding an END marker to simply add logic
lines = output.split('\n')
lines = FilterLinesIn(lines)
lines.append('END')
changes = []
was = []
now = []
mode = ''
last = None
for line in lines:
#print("LINE=%s" % line)
if not line: continue
elif line[0] == '<':
if line[2:].strip() == '': continue
# Ignore prototypes
if len(line) > 10:
words = line[2:].split()
if len(words) == 2 and words[1][-1] == ';':
if words[0] == 'struct' or words[0] == 'union':
continue
was.append(line[2:])
elif line[0] == '>':
if line[2:].strip() == '': continue
if line[2:10] == '#include': continue
now.append(line[2:])
elif line[0] == '-':
continue
else:
change = Change(line, was, now)
was = []
now = []
if ValidChange(change):
changes.append(change)
if line == 'END':
break
return FilterChanges(changes)
def FilterChanges(changes):
if len(changes) < 2: return changes
out = []
filter = [False for change in changes]
for cur in range(len(changes)):
for cmp in range(cur+1, len(changes)):
if filter[cmp]:
continue
if Swapped(changes[cur], changes[cmp]):
filter[cur] = True
filter[cmp] = True
for cur in range(len(changes)):
if filter[cur]: continue
out.append(changes[cur])
return out
def Main(args):
filenames = ParseOptions(args)
if not filenames:
gendir = os.path.join(GetOption('gen'), '*.h')
filenames = sorted(glob.glob(gendir))
srcdir = os.path.join(GetOption('src'), '*.h')
srcs = sorted(glob.glob(srcdir))
for name in srcs:
name = os.path.split(name)[1]
name = os.path.join(GetOption('gen'), name)
if name not in filenames:
print('Missing: %s' % name)
for filename in filenames:
gen = filename
filename = filename[len(GetOption('gen')) + 1:]
src = os.path.join(GetOption('src'), filename)
diff = os.path.join(GetOption('diff'), filename)
p = subprocess.Popen(['diff', src, gen], stdout=subprocess.PIPE)
output, errors = p.communicate()
try:
input = open(diff, 'rt').read()
except:
input = ''
if input != output:
changes = GetChanges(output)
else:
changes = []
if changes:
print("\n\nDelta between:\n src=%s |
# Problem B
# Small dataset
# O = G = V = 0. (Each unicorn has only one hair color in its mane.)
def solve(N, R, Y, B):
res = ""
imp = "IMPOSSIBLE "
if sum((R,Y,B)) == 0:
| return ""
if max(R,Y,B) > N//2 :
return imp
| if (R==Y) and (Y==B):
return "RYB"*R
elif (R==Y) and (B == 0):
return "RY"*R
elif (R==B) and (Y == 0) :
return "RB"*R
elif (Y==B) and (R == 0) :
return "YB"*Y
arr = [["R",R],["Y",Y],["B",B]]
arr.sort(key=lambda x:x[1], reverse=True)
sum_arr = lambda x : x[0][1] + x[1][1] + x[2][1]
while(sum_arr(arr) > 0 ):
if (arr[0][1] == arr[1][1]) and (arr[1][1] == arr[2][1]):
m = arr[0][1]
s = set(["B","R","Y"])
s.remove(res[-1])
first = min(s)
s.add(res[-1])
s.remove(first)
s.difference_update(set([res[0]]))
last = min(s)
s = set(["B","R","Y"])
s.remove(first)
s.remove(last)
mid = list(s)[0]
r0 = first+mid+last
r = r0*m
res += r
break
if arr[0][1] > 0:
res += arr[0][0]
arr[0][1] -= 1
if arr[1][1] > 0 :
res += arr[1][0]
arr[1][1] -= 1
arr.sort(key=lambda x:x[1], reverse=True)
return res
if __name__ == "__main__":
tc = int(input())
for ti in range(tc):
N, R, O, Y, G, B, V = map(int,input().strip().split())
r = solve(N, R, Y, B)
print("Case #{0}: {1}".format(ti + 1, r))
|
# -*- coding: utf-8 -*-
from __fu | ture__ import (
absolute_import, division, print_function, | unicode_literals
)
|
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class MethodNotAllowed(Exception):
status_code = 405
|
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
| return rv
class Conflict(Exception):
status_code = 409
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
|
"""Tests for the frequency module in analysis"""
import pytest
from lantern.analysis import frequency
def test_frequency_analyze():
"""Testing frequency analyze works for ngram = 1"""
assert frequency.frequency_analyze("abb") == {'a': 1, 'b': 2}
def test_frequency_analyze_bigram():
"""Testing frequency analyze works for ngram = 2"""
assert frequency.frequency_analyze("abb", 2) == {'ab': 1, 'bb': 1}
def test_frequency_analyze_empty_string():
"""Testing empty string can be frequency analyzed"""
assert frequency.frequency_analyze("") == {}
def test_frequency_to_probability():
"""Testing frequency map is converted to probability distribution succesfully"""
frequency_map = {'a': 1, 'b': 2}
assert frequency.frequency_to_probability(frequency_map) == {'a': 1.0 / 3, 'b': 2.0 / 3}
def test_frequency_to_probability_empty():
"""Testing empty frequency_map is converted to empty probability distribution"""
assert frequency.frequency_to_probability({}) == {}
d | ef test_index_of_coincidence():
"""T | esting index of coincidence for a piece of text"""
assert frequency.index_of_coincidence("aabbc") == 0.2
def test_index_of_coincidence_multiple_texts():
"""Testing index of coincidence with multiple texts"""
assert frequency.index_of_coincidence("aabbc", "abbcc") == 0.2
def test_index_of_coincidence_none():
"""Testing index of coincidence raises value error on empty texts"""
with pytest.raises(ValueError):
frequency.index_of_coincidence()
def test_index_of_coincidence_empty():
"""Testing index of coincidence for empty string returns 0"""
assert frequency.index_of_coincidence("") == 0
def test_chi_squared():
"""Testing matching frequency distributions have chi squared of 0"""
assert frequency.chi_squared({'a': 2, 'b': 3}, {'a': 2, 'b': 3}) == 0
def test_chi_squared_similar():
"""Testing similar frequency distributions have chi squared of 0.1"""
assert frequency.chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2}) == 0.1
def test_chi_squared_different_symbols():
"""Testing different symbols are handled appropriately"""
assert frequency.chi_squared({'a': 1, 'd': 3}, {'a': 1}) == 0
def test_languagefrequency_attribute_access():
"""Testing correct attributes are found, incorrect attributes raise AttributeErrors"""
frequency.english.unigrams
with pytest.raises(AttributeError):
frequency.english.invalid
|
ep 03 12:00:00 2016
Aims
----
- normalization
Notes
-----
This is migrated from **SLAM** package
"""
from __future__ import division
import numpy as np
from joblib import Parallel, delayed
from .extern.interpolate import SmoothSpline
def normalize_spectrum(wave, flux, norm_range, dwave,
p=(1E-6, 1E-6), q=0.5, ivar=None, eps=1e-10,
rsv_frac=1.):
""" A double smooth normalization of a spectrum
Converted from Chao Liu's normSpectrum.m
Updated by Bo Zhang
Parameters
----------
wave: ndarray (n_pix, )
wavelegnth array
flux: ndarray (n_pix, )
flux array
norm_range: tuple
a tuple consisting (wave_start, wave_stop)
dwave: float
binning width
p: tuple of 2 ps
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
q: float in range of [0, 100]
percentile, between 0 and 1
ivar: ndarray (n_pix, ) | None
ivar array, default is None
eps: float
the ivar threshold
rsv_frac: float
the fraction of pixels reserved in terms of std. default is 3.
Returns
-------
flux_norm: ndarray
normalized flux
flux_cont: ndarray
continuum flux
Example
-------
>>> flux_norm, flux_cont = normalize_spectrum(
>>> wave, flux, (4000., 8000.), 100., p=(1E-8, 1E-7), q=0.5,
>>> rsv_frac=2.0)
"""
if ivar is not None:
# ivar is set
ivar = np.where(np.logical_or(wave < norm_range[0],
wave > norm_range[1]), 0, ivar)
ivar = np.where(ivar <= eps, eps, ivar)
# mask = ivar <= eps
var = 1. / ivar
else:
# default config is even weight
var = np.ones_like(flux)
# wave = wave[~mask]
# flux = flux[~mask]
# check q region
assert 0. < q < 1.
# n_iter = len(p)
n_bin = np.int(np.fix(np.diff(norm_range) / dwave) + 1)
wave1 = norm_range[0]
# SMOOTH 1
# print(wave.shape, flux.shape, var.shape)
if ivar is not None:
ind_good_init = 1. * (ivar > 0. | ) * (flux > 0.)
else:
ind_good_init = 1. * (flux > 0.)
ind_good_init = ind_good_init.astype(np.bool)
# print("@Cham: sum(ind_good_init)", np.sum(ind_good_init))
flux_smoothed1 = SmoothSpline(wave[ind_good_init], flux[ind_good_init],
p=p[0], va | r=var[ind_good_init])(wave)
dflux = flux - flux_smoothed1
# collecting continuum pixels --> ITERATION 1
ind_good = np.zeros(wave.shape, dtype=np.bool)
for i_bin in range(n_bin):
ind_bin = np.logical_and(wave > wave1 + (i_bin - 0.5) * dwave,
wave <= wave1 + (i_bin + 0.5) * dwave)
if np.sum(ind_bin > 0):
# median & sigma
bin_median = np.median(dflux[ind_bin])
bin_std = np.median(np.abs(dflux - bin_median))
# within 1 sigma with q-percentile
ind_good_ = ind_bin * (
np.abs(dflux - np.nanpercentile(dflux[ind_bin], q * 100.)) < (
rsv_frac * bin_std))
ind_good = np.logical_or(ind_good, ind_good_)
ind_good = np.logical_and(ind_good, ind_good_init)
# assert there is continuum pixels
try:
assert np.sum(ind_good) > 0
except AssertionError:
Warning("@Keenan.normalize_spectrum(): unable to find continuum! ")
ind_good = np.ones(wave.shape, dtype=np.bool)
# SMOOTH 2
# continuum flux
flux_smoothed2 = SmoothSpline(
wave[ind_good], flux[ind_good], p=p[1], var=var[ind_good])(wave)
# normalized flux
flux_norm = flux / flux_smoothed2
return flux_norm, flux_smoothed2
def normalize_spectra_block(wave, flux_block, norm_range, dwave,
p=(1E-6, 1E-6), q=0.5, ivar_block=None, eps=1e-10,
rsv_frac=3., n_jobs=1, verbose=10):
""" normalize multiple spectra using the same configuration
This is specially designed for TheKeenan
Parameters
----------
wave: ndarray (n_pix, )
wavelegnth array
flux_block: ndarray (n_obs, n_pix)
flux array
norm_range: tuple
a tuple consisting (wave_start, wave_stop)
dwave: float
binning width
p: tuple of 2 ps
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
q: float in range of [0, 100]
percentile, between 0 and 1
ivar_block: ndarray (n_pix, ) | None
ivar array, default is None
eps: float
the ivar threshold
rsv_frac: float
the fraction of pixels reserved in terms of std. default is 3.
n_jobs: int
number of processes launched by joblib
verbose: int / bool
verbose level
Returns
-------
flux_norm: ndarray
normalized flux
"""
if ivar_block is None:
ivar_block = np.ones_like(flux_block)
if flux_block.ndim == 1:
flux_block.reshape(1, -1)
n_spec = flux_block.shape[0]
results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(normalize_spectrum)(
wave, flux_block[i], norm_range, dwave, p=p, q=q,
ivar=ivar_block[i], eps=eps, rsv_frac=rsv_frac)
for i in range(n_spec))
# unpack results
flux_norm_block = []
flux_cont_block = []
for result in results:
flux_norm_block.append(result[0])
flux_cont_block.append(result[1])
return np.array(flux_norm_block), np.array(flux_cont_block)
def get_stable_pixels(pixel_disp, wave_arm=100, frac=0.20):
"""
Parameters
----------
pixel_disp: np.ndarray
dispersion array
wave_arm: int
the arm length in terms of pixels
frac: float
the reserved fraction, between 0.00 and 1.00
Returns
-------
ind_stable
"""
ind_stable = np.zeros_like(pixel_disp, dtype=np.bool)
for i in range(len(ind_stable)):
edge_l = np.max([i - wave_arm, 0])
edge_r = np.min([i + wave_arm, len(pixel_disp)])
if pixel_disp[i] <= \
np.percentile(pixel_disp[edge_l:edge_r], frac * 100.):
ind_stable[i] = True
return ind_stable
# TODO: this is a generalized version
def normalize_spectra(wave_flux_tuple_list, norm_range, dwave,
p=(1E-6, 1E-6), q=50, n_jobs=1, verbose=False):
""" normalize multiple spectra using the same configuration
Parameters
----------
wave_flux_tuple_list: list[n_obs]
a list of (wave, flux) tuple
norm_range: tuple
a tuple consisting (wave_start, wave_stop)
dwave: float
binning width
p: tuple of 2 ps
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
q: float in range of [0, 100]
percentile, between 0 and 1
n_jobs: int
number of processes launched by joblib
verbose: int / bool
verbose level
Returns
-------
flux_norm: ndarray
normalized flux
"""
pass
def test_normaliza_spectra_block():
import os
os.chdir('/pool/projects/TheKeenan/data/TheCannonData')
from TheCannon import apogee
import matplotlib.pyplot as plt
tr_ID, wl, tr_flux, tr_ivar = apogee.load_spectra("example_DR10/Data")
tr_label = apogee.load_labels("example_DR10/reference_labels.csv")
test_ID = tr_ID
test_flux = tr_flux
test_ivar = tr_ivar
r = normalize_spectra_block(wl, tr_flux, (15200., 16900.), 30., q=0.9,
rsv_frac=0.5,
p=(1E-10, 1E-10), ivar_block=tr_ivar,
n_jobs=10, verbose=10)
flux_norm, flux_cont = r
flux_norm = np.array(flux_norm)
flux_cont = np.array(flux_cont)
flux_ivar = tr_ivar * flux_cont ** 2
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(10, 20):
ofst = i * 0.5
ax.plot(wl, tr_flux[i] + ofst, 'b')
ax.plot(wl, flux_cont[i] + ofst, 'r')
fig.tight_layout()
fig.savefi |
#!/usr/bin/env python
import json
import optparse
import os
import subprocess
import sys
import tempfile
CHUNK_SIZE = 2**20
DEFAULT_DATA_TABLE_NAME = "bowtie_indexes"
def get_id_name( params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_bowtie_index( data_manager_dict, fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=DEFAULT_DATA_TABLE_NAME, color_space=False ):
# TODO: allow multiple FASTA input files
fasta_base_name = os.path.split( fasta_filename )[-1]
sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name )
os.symlink( fasta_filename, sym_linked_fasta_filename )
args = [ 'bowtie-build' ]
if color_space:
args.append( '-C' )
args.append( sym_linked_fasta_filename)
args.append( fasta_base_name )
args.append( sym_linked_fasta_filename )
tmp_stderr = tempfile.NamedTemporaryFile( prefix="tmp-data-manager-bowtie-index-builder-stderr" )
proc = subprocess.Popen( args=args, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno() )
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
print >> sys.stderr, "Error building index:"
while True:
chunk = tmp_stderr.read( CHUNK_SIZE )
if not chunk:
break
sys.stderr.write( chunk )
sys.exit( return_code )
tmp_stderr.close()
data_table_entry = dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name )
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def main():
# Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' )
parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' )
parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' )
parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' )
parser.add_option( '-c', '--color_space', dest='color_space', action='store_true', default=False, help='color_space' )
(options, args) = parser.parse_args()
filename = args[0]
params = json.loads( ope | n( filename ).read() )
target_directory = params[ 'output_data' ][0]['extra_files_path']
os.mkdir( target_directory )
data_manager_dict = {}
dbkey = options.fasta_dbkey
if dbkey in [ None, '', '?' ]:
raise Exception( '"%s" is not a valid dbkey. You must specify | a valid dbkey.' % ( dbkey ) )
sequence_id, sequence_name = get_id_name( params, dbkey=dbkey, fasta_description=options.fasta_description )
# build the index
build_bowtie_index( data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME, color_space=options.color_space )
# save info to json file
open( filename, 'wb' ).write( json.dumps( data_manager_dict ) )
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from psycopg2 import IntegrityError
from openerp.addons.mail.tests.common import TestMail
class TestMailFollowers(TestMail):
def setUp(self):
super(TestMailFollowers, self).setUp()
Subtype = self.env['mail.message.subtype']
self.mt_mg_def = Subtype.create({'name': 'mt_mg_def', 'default': True, 'res_model': 'mail.channel'})
self.mt_cl_def = Subtype.create({'name': 'mt_cl_def', 'default': True, 'res_model': 'crm.lead'})
self.mt_al_def = Subtype.create({'name': 'mt_al_def', 'default': True, 'res_model': False})
self.mt_mg_nodef = Subtype.create({'name': 'mt_mg_nodef', 'default': False, 'res_model': 'mail.channel'})
self.mt_al_nodef = Subtype.create({'name': 'mt_al_nodef', 'default': False, 'res_model': False})
self.default_group_subtypes = Subtype.search([('default', '=', True), '|', ('res_model', '=', 'mail.channel'), ('res_model', '=', False)])
def test_m2o_command_new(self):
test_channel = self.env['mail.channel'].create({'name': 'Test'})
groups = self.group_pigs | self.group_public
generic, specific = self.env['mail.followers']._add_follower_command(
'mail.channel', groups.ids,
{self.user_employee.partner_id.id: [self.mt_mg_nodef.id]},
{test_channel.id: [self.mt_al_nodef.id]})
self.assertFalse(specific)
self.assertEqual(len(generic), 2)
self.assertEqual(set([generic[0][2]['res_model'], generic[1][2]['res_model']]),
set(['mail.channel']))
self.assertEqual(set(filter(None, [generic[0][2].get('channel_id'), generic[1][2].get('channel_id')])),
set([test_channel.id]))
self.assertEqual(set(filter(None, [generic[0][2].get('partner_id'), generic[1][2].get('partner_id')])),
set([self.user_employee.partner_id.id]))
self.assertEqual(set(generic[0][2]['subtype_ids'][0][2] + generic[1][2]['subtype_ids'][0][2]),
set([self.mt_mg_nodef.id, self.mt_al_nodef.id]))
def test_m2o_command_update_selective(self):
test_channel = self.env['mail.channel'].create({'name': 'Test'})
groups = self.group_pigs | self.group_public
self.env['mail.followers'].create({'partner_id': self.user_employee.partner_id.id, 'res_model': 'mail.channel', 'res_id': self.group_pigs.id})
generic, specific = self.env['mail.followers']._add_follower_command(
'mail.channel', groups.ids,
{self.user_employee.partner_id.id: [self.mt_mg_nodef.id]},
{test_channel.id: False},
force=False)
self.assertEqual(len(generic), 1)
self.assertEqual(len(specific), 1)
self.assertEqual(generic[0][2]['res_model'], 'mail.channel')
self.assertEqual(generic[0][2]['channel_id'], test_channel.id)
self.assertEqual(set(generic[0][2]['subtype_ids'][0][2]), set(self.default_group_subtypes.ids))
self.assertEqual(specific.keys(), [self.group_public.id])
self.assertEqual(specific[self.group_public.id][0][2]['res_model'], 'mail.channel')
self.assertEqual(specific[self.group_public.id][0][2]['partner_id'], self.user_employee.partner_id.id)
self.assertEqual(set(specific[self.group_public.id][0][2]['subtype_ids'][0][2]), set([self.mt_mg_nodef.id]))
def test_message_is_follower(self):
self.assertFalse(self.group_pigs.sudo(self.user_employee).message_is_follower)
self.group_pigs.message_subscribe_users(user_ids=[self.user_employee.id])
self.assertTrue(self.group_pigs.sudo(self.user_employee).message_is_follower)
def test_followers_subtypes_default(self):
self.group_pigs.message_subscribe_users(user_ids=[self.user_employee.id])
self.assertEqual(self.group_pigs.message_follower_ids.mapped('partner_ | id'), self.user_employee.partner_id)
self.assertEqual(self.group_pigs.message_follower_ids.mapped('channel_id'), self.env['mail.channel'])
follower = self.env['mail.followers'].search([
| ('res_model', '=', 'mail.channel'),
('res_id', '=', self.group_pigs.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(len(follower), 1)
self.assertEqual(follower.subtype_ids, self.default_group_subtypes)
def test_followers_subtypes_specified(self):
self.group_pigs.sudo(self.user_employee).message_subscribe_users(subtype_ids=[self.mt_mg_nodef.id])
self.assertEqual(self.group_pigs.message_follower_ids.mapped('partner_id'), self.user_employee.partner_id)
self.assertEqual(self.group_pigs.message_follower_ids.mapped('channel_id'), self.env['mail.channel'])
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.channel'),
('res_id', '=', self.group_pigs.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(len(follower), 1)
self.assertEqual(follower.subtype_ids, self.mt_mg_nodef)
def test_followers_multiple_subscription(self):
self.group_pigs.sudo(self.user_employee).message_subscribe_users(subtype_ids=[self.mt_mg_nodef.id])
self.assertEqual(self.group_pigs.message_follower_ids.mapped('partner_id'), self.user_employee.partner_id)
self.assertEqual(self.group_pigs.message_follower_ids.mapped('channel_id'), self.env['mail.channel'])
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.channel'),
('res_id', '=', self.group_pigs.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(len(follower), 1)
self.assertEqual(follower.subtype_ids, self.mt_mg_nodef)
self.group_pigs.sudo(self.user_employee).message_subscribe_users(subtype_ids=[self.mt_mg_nodef.id, self.mt_al_nodef.id])
self.assertEqual(self.group_pigs.message_follower_ids.mapped('partner_id'), self.user_employee.partner_id)
self.assertEqual(self.group_pigs.message_follower_ids.mapped('channel_id'), self.env['mail.channel'])
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.channel'),
('res_id', '=', self.group_pigs.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(len(follower), 1)
self.assertEqual(follower.subtype_ids, self.mt_mg_nodef | self.mt_al_nodef)
def test_no_DID(self):
"""Test that a follower cannot suffer from dissociative identity disorder.
It cannot be both a partner and a channel.
"""
test_channel = self.env['mail.channel'].create({
'name': 'I used to be schizo, but now we are alright.'
})
with self.assertRaises(IntegrityError):
self.env['mail.followers'].create({
'res_model': 'mail.channel',
'res_id': test_channel.id,
'partner_id': self.user_employee.partner_id.id,
'channel_id': self.group_pigs.id,
})
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimizer that attempts to use specialized minimizers on different parts of
an HTML test case."""
import functools
from clusterfuzz._internal.bot.tokenizer.antlr_tokenizer import AntlrTokenizer
from clusterfuzz._internal.bot.tokenizer.grammars.HTMLLexer import HTMLLexer
from clusterfuzz._internal.bot.tokenizer.grammars.JavaScriptLexer import \
JavaScriptLexer
from . import chunk_minimizer
from . import delta_minimizer
from . import js_minimizer
from . import minimizer
from . import utils
SCRIPT_START_STRING = b'<script'
SCRIPT_END_STRING = b'</script>'
class HTMLMinimizer(minimizer.Minimizer): # pylint:disable=abstract-method
"""Specialized HTML minimizer.
Note that this will not work properly with normal tokenizers. It simply
acts as a wrapper around other minimizers and passes pieces of the HTML
file to those."""
class Token(object):
"""Helper class to represent a single token."""
TYPE_HTML = 0
TYPE_SCRIPT = 1
def __init__(self, data, token_type):
self.data = data
self.token_type = token_type
class TokenizerState(object):
"""Enum for tokenizer states."""
SEARCHING_FOR_SCRIPT = 0
SEARCHING_FOR_TAG_END = 1
SEARCHING_FOR_CLOSE_SCRIPT = 2
HTMLTOKENIZER = AntlrTokenizer(HTMLLexer).tokenize
JSTOKENIZER = AntlrTokenizer(JavaScriptLexer).tokenize
TOKENIZER_MAP = {
Token.TYPE_HTML: [HTMLTOKENIZER, HTMLTOKENIZER, HTMLTOKENIZER],
Token.TYPE_SCRIPT: [JSTOKENIZER, JSTOKENIZER],
}
CHUNK_SIZES = [
[400, 100, 20, 5],
[400, 100, 20, 5, 2],
[400, 100, 20, 5, 1],
]
def __init__(self, test_function, *args, **kwargs):
# The HTML minimizer will not be used directly. Instead, preserve its
# arguments and pass them along when creating subminimizers.
super(HTMLMinimizer, self).__init__(lambda: False)
assert not args, 'Positional arguments not supported.'
assert 'tokenizer' not in kwargs, 'Custom tokenizers not supported.'
assert 'token_combiner' not in kwargs, 'Custom tokenizers not supported.'
self.test_function = test_function
self.kwargs = kwargs
def minimize(self, data):
"""Wrapper to perform common tasks and call |_execute|."""
# Do an initial line-by-line minimization to filter out noise.
line_minimizer = delta_minimizer.DeltaMinimizer(self.test_function,
**self.kwargs)
# Do two line minimizations to make up for the fact that minimzations on
# bots don't always minimize as much as they can.
for _ in range(2):
data = line_minimizer.minimize(data)
tokens = self.get_tokens_and_metadata(data)
for index, token in enumerate(tokens):
current_tokenizers = self.TOKENIZER_MAP[token.token_type]
prefix = self.combine_tokens(tokens[:index])
suffix = self.combine_tokens(tokens[index + 1:])
token_combiner = functools.partial(
self.combine_worker_tokens, prefix=prefix, suffix=suffix)
for level, current_tokenizer in enumerate(current_tokenizers):
# We need to preserve the parts of the test case that are not cu | rrently
# being minimized. Create a special token combiner that adds these
# portions of the test to the combined tokens.
if token.token_type == HTMLMinimizer.Token.TYPE_HTML:
curr | ent_minimizer = chunk_minimizer.ChunkMinimizer(
self.test_function,
chunk_sizes=HTMLMinimizer.CHUNK_SIZES[level],
token_combiner=token_combiner,
tokenizer=current_tokenizer,
**self.kwargs)
else:
current_minimizer = js_minimizer.JSMinimizer(
self.test_function,
token_combiner=token_combiner,
tokenizer=current_tokenizer,
**self.kwargs)
result_data = current_minimizer.minimize(token.data)
start = len(prefix)
end = len(result_data) - len(suffix)
token.data = result_data[start:end]
# TODO(mbarbella): Remove this once other minimizers are improved.
# Do a final line-by-line minimization pass.
data = self.combine_tokens(tokens)
return line_minimizer.minimize(data)
@staticmethod
def get_tokens_and_metadata(data):
"""Get the token list with associated metadata."""
tokens = []
state = HTMLMinimizer.TokenizerState.SEARCHING_FOR_SCRIPT
current_token_start = 0
current_token_type = HTMLMinimizer.Token.TYPE_HTML
index = 0
while 0 <= index < len(data):
if state == HTMLMinimizer.TokenizerState.SEARCHING_FOR_SCRIPT:
# In this case, we are simply searching for the next script tag.
index = data.find(SCRIPT_START_STRING, index)
state = HTMLMinimizer.TokenizerState.SEARCHING_FOR_TAG_END
elif state == HTMLMinimizer.TokenizerState.SEARCHING_FOR_TAG_END:
# Make sure that this really looks like a script tag.
next_newline = data.find(b'\n', index)
tag_end = data.find(b'>', index)
if 0 <= tag_end < next_newline or next_newline < 0 <= tag_end:
# The end of the script tag is before the next newline, so it should
# be safe to attempt to split this.
index = tag_end + 1
token = HTMLMinimizer.Token(data[current_token_start:index],
current_token_type)
tokens.append(token)
# Update state.
current_token_type = HTMLMinimizer.Token.TYPE_SCRIPT
current_token_start = index
state = HTMLMinimizer.TokenizerState.SEARCHING_FOR_CLOSE_SCRIPT
else:
# We found a newline before the end of tag or did not find the end
# of the tag, so something seems wrong. Skip this one.
index += len(SCRIPT_START_STRING)
elif state == HTMLMinimizer.TokenizerState.SEARCHING_FOR_CLOSE_SCRIPT:
# Simply look for the end of this script.
index = data.find(SCRIPT_END_STRING, index)
if index < 0:
break
# TODO(mbarbella): Optimize for empty script case (e.g. for "src=").
token = HTMLMinimizer.Token(data[current_token_start:index],
current_token_type)
tokens.append(token)
current_token_start = index
current_token_type = HTMLMinimizer.Token.TYPE_HTML
state = HTMLMinimizer.TokenizerState.SEARCHING_FOR_SCRIPT
token = HTMLMinimizer.Token(data[current_token_start:], current_token_type)
tokens.append(token)
return tokens
@staticmethod
def combine_worker_tokens(tokens, prefix=b'', suffix=b''):
"""Combine tokens for a worker minimizer."""
# The Antlr tokenizer decodes the bytes objects we originally pass to it.
encoded_tokens = [
t if isinstance(t, bytes) else t.encode('utf-8') for t in tokens
]
return prefix + b''.join(encoded_tokens) + suffix
@staticmethod
def combine_tokens(tokens):
"""Combine tokens into a usable format, stripping metadata."""
return b''.join([t.data for t in tokens])
@staticmethod
def run(data,
thread_count=minimizer.DEFAULT_THREAD_COUNT,
file_extension='.html'):
"""Attempt to minimize an html test case."""
html_minimizer = HTMLMinimizer(
utils.test, max_threads=thread_count, file_extension=file_extension)
return html_minimizer.minimize(data)
|
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), faces)
faces = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites), faces)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), edges)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites), edges)
self.assertArrayAlmostEqual(cg_oct.solid_angles(),
[2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951])
pmeshes = cg_oct.get_pmeshes(sites=sites)
self.assertEqual(pmeshes[0]['pmesh_string'],
'14\n 0.00000000 0.00000000 1.00000000\n'
' 0.00000000 0.00000000 -1.00000000\n'
' 1.00000000 0.00000000 0.00000000\n'
' -1.00000000 0.00000000 0.00000000\n'
' 0.00000000 1.00000000 0.00000000\n'
' 0.00000000 -1.00000000 0.00000000\n'
' 0.33333333 0.33333333 0.33333333\n'
' 0.33333333 -0.33333333 0.33333333\n'
' -0.33333333 0.33333333 0.33333333\n'
' -0.33333333 -0.33333333 0.33333333\n'
' 0.33333333 0.33333333 -0.33333333\n'
' 0.33333333 -0.33333333 -0.33333333\n'
' | -0.33333333 0.33333333 -0.33333333\n'
' -0.33333333 -0.33333333 -0.33333333\n'
'8\n4\n0\n2\n4\n0\n4\n0\n2\n5\n0\n4\n0\ | n3\n4\n0\n'
'4\n0\n3\n5\n0\n4\n1\n2\n4\n1\n4\n1\n2\n5\n1\n4\n'
'1\n3\n4\n1\n4\n1\n3\n5\n1\n')
allcg_str = allcg.__str__()
self.assertTrue('\n#=======================================================#\n'
'# List of coordination geometries currently implemented #\n'
'#=======================================================#\n'
'\nCoordination geometry type : Single neighbor (IUCr: [1l])\n\n'
' - coordination number : 1\n'
' - list of points :\n'
' - [0.0, 0.0, 1.0]\n'
'------------------------------------------------------------\n\n' in allcg_str)
self.assertTrue('Coordination geometry type : Trigonal plane (IUPAC: TP-3 || IUCr: [3l])\n\n'
' - coordination number : 3\n'
' - list of points :\n' in allcg_str)
all_symbols = [u'S:1', u'L:2', u'A:2', u'TL:3', u'TY:3', u'TS:3', u'T:4', u'S:4', u'SY:4', u'SS:4',
u'PP:5', u'S:5', u'T:5', u'O:6', u'T:6', u'PP:6', u'PB:7', u'ST:7', u'ET:7', u'FO:7',
u'C:8', u'SA:8', u'SBT:8', u'TBT:8', u'DD:8', u'DDPN:8', u'HB:8', u'BO_1:8', u'BO_2:8',
u'BO_3:8', u'TC:9', u'TT_1:9', u'TT_2:9', u'TT_3:9', u'HD:9', u'TI:9', u'SMA:9', u'SS:9',
u'TO_1:9', u'TO_2:9', u'TO_3:9', u'PP:10', u'PA:10', u'SBSA:10', u'MI:10', u'S:10',
u'H:10', u'BS_1:10', u'BS_2:10', u'TBSA:10', u'PCPA:11', u'H:11', u'SH:11', u'CO:11',
u'DI:11', u'I:12', u'PBP:12', u'TT:12', u'C:12', u'AC:12', u'SC:12', u'S:12', u'HP:12',
u'HA:12', u'SH:13', u'DD:20', u'UNKNOWN', u'UNCLEAR']
self.assertEqual(len(allcg.get_geometries()), 68)
self.assertEqual(len(allcg.get_geometries(coordination=3)), 3)
self.assertEqual(sorted(allcg.get_geometries(returned='mp_symbol')), sorted(all_symbols))
self.assertEqual(sorted(allcg.get_geometries(returned='mp_symbol', coordination=3)),
['TL:3', 'TS:3', 'TY:3'])
self.assertEqual(allcg.get_symbol_name_mapping(coordination=3),
{u'TY:3': u'Triangular non-coplanar', u'TL:3': u'Trigonal plane', u'TS:3': u'T-shaped'})
self.assertEqual(allcg.get_symbol_cn_mapping(coordination=3),
{u'TY:3': 3, u'TL:3': 3, u'TS:3': 3})
self.assertEqual(sorted(allcg.get_implemented_geometries(coordination=4, returned='mp_symbol')),
[u'S:4', u'SS:4', u'SY:4', u'T:4'])
self.assertEqual(sorted(allcg.get_not_implemented_geometries(returned='mp_symbol')),
[u'CO:11', u'DD:20', u'H:10', u'S:10', u'S:12', u'UNCLEAR', u'UNKNOWN'])
self.assertEqual(allcg.get_geometry_from_name('Octahedron').mp_symbol, cg_oct.mp_symbol)
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_name('Octahedran')
self.assertEqual(str(cm.exception), 'No coordination geometry found with name "Octahedran"')
self.assertEqual(allcg.get_geometry_from_IUPAC_symbol('OC-6').mp_symbol, cg_oct.mp_symbol)
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_IUPAC_symbol('OC-7')
self.assertEqual(str(cm.exception), 'No coordination geometry found with IUPAC symbol "OC-7"')
self.assertEqual(allcg.get_geometry_from_IUCr_symbol('[6o]').mp_symbol, cg_oct.mp_symbol)
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_IUCr_symbol('[6oct]')
self.assertEqual(str(cm.exception), 'No coordination geometry found with IUCr symbol "[6oct]"')
with self.assertRaises(LookupError) as cm:
allcg.get_geometry_from_mp_symbol('O:7')
self.assertEqual(str(cm.exception), 'No coordination geometry found with mp_symbol "O:7"')
self.assertEqual(allcg.pretty_print(maxcn=4),
'+--------------- |
f | rom AsynMongo import Collection | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_osm_find_first
----------------------------------
Tests for `osm-find-first` module.
"""
import unittest
import tempfile
import re
import httpretty
import osm_find_first
def v(string):
return string.format(version=osm_find_first.__version__)
class TestOsmFindFirst(unittest.TestCase):
def testInt(self):
for good in [1, '1', 1.0]:
self.assertTrue(osm_find_first.is_int(good), msg=repr(good))
for bad in ['foo']:
self.assertFalse(osm_find_first.is_int(bad), msg=repr(bad))
def testOSMType(self):
for good in ['node', 'way', 'relation']:
self.assertTrue(osm_find_first.is_osm_type(good), msg=repr(good))
for bad in ['foo']:
self.assertFalse(osm_find_first.is_osm_type(bad), msg=repr(bad))
def testWriteResultsToCSV(self):
outputfile = tempfile.NamedTemporaryFile()
outputfilename = outputfile.name
results = [{'osm_type': 'relation', 'osm_id': 2227344, 'osm_user':
'brianh', 'osm_uid': 19612, 'osm_timestamp': '2012-06-12 15:24:49+01'}]
osm_find_first.write_to_csv(outputfilename, results)
outputfile.seek(0, 0)
self.assertEqual(outputfile.read().decode("utf8"),
'osm_type,osm_id,osm_user,osm_uid,osm_timestamp\nrelation,2227344,brianh,19612,2012-06-12 15:24:49+01\n')
outputfile.close()
def testReadMissingFromCSV(self):
csv_content = 'osm_type,osm_id\nrelation,123\n'
outputfile = tempfile.NamedTemporaryFile()
outputfilename = outputfile.name
outputfile.write(csv_content.encode("utf8"))
outputfile.seek(0)
missing = osm_find_first.read_missing_from_csv(outputfilename)
self.assertEqual(missing, [{'osm_type': 'relation', 'osm_id': '123'}])
outputfile.close()
@httpretty.activate
def testGettingResult(self):
httpretty.register_uri(httpretty.GET,
re.compile(
"http://api.openstreetmap.org/api/0.6/(node|way|relation)/[0-9]+/1"),
body='<osm><relation id="1" uid="123" user="testuser" timestamp="2000-01-01 115:24:02"></relation></osm>',
content_type="text/xml")
result = osm_find_first.find_first(
[], [{'osm_type': 'relation', 'osm_id': '1'}])
self.assertEqual(
result, [{'osm_timestamp': '2000-01-01 115:24:02', 'osm_type': 'relation', 'osm_uid': '123', 'osm_user': 'testuser', 'osm_id': '1'}])
self.assertEqual(httpretty.last_request().method, "GET")
self.assertEqual(
httpretty.last_request().path, "/api/0.6/relation/1/1")
self.assertEqual(httpretty.last_request().headers[
'user-agent'], v("osm-find-first/{version}"))
@http | retty.activate
def testEmptyDoesNothing(self | ):
httpretty.register_uri(httpretty.GET,
re.compile(
"http://api.openstreetmap.org/api/0.6/(node|way|relation)/[0-9]+/1"),
body='<osm><relation id="1" uid="123" user="testuser" timestamp="2000-01-01 115:24:02"></relation></osm>',
content_type="text/xml")
result = osm_find_first.find_first([], [])
self.assertEqual(result, [])
self.assertEqual(
httpretty.last_request().__class__, httpretty.core.HTTPrettyRequestEmpty)
if __name__ == '__main__':
unittest.main()
|
# Python - 3.6.0
Test.assert_equals(cube_checker(-12,2), False)
Test.assert_equals(cube_checker(8, 3), False)
Test.assert_equals(cube_checker(8, 2), True)
Test.assert_equals | (cube_checker(-8,-2), False)
Test.assert_equals(cube_checker(0, 0), False)
Test.assert_equals( | cube_checker(27, 3), True)
Test.assert_equals(cube_checker(1, 5), False)
Test.assert_equals(cube_checker(125, 5),True)
Test.assert_equals(cube_checker(125,-5),False)
Test.assert_equals(cube_checker(0, 12), False)
Test.assert_equals(cube_checker(12, -1),False)
Test.assert_equals(cube_checker(1, 1), True)
|
print nodes[node]
result.append((node, node))
return result
def do_disable_host(self, host):
status, data = self._request("POST", "execute", body={'command': 'disable_host', 'args': {'host': host}})
return data
def _load_filters(self):
return CONF.scheduler_default_filters
def _init_aggregates(self):
elevated = context_module.get_admin_context()
aggs = objects.AggregateList.get_all(elevated)
for agg in aggs:
self.aggs_by_id[agg.id] = agg
for host in agg.hosts:
self.host_aggregates_map[host].add(agg.id)
def update_aggregates(self, aggregates):
"""Updates internal HostManager information about aggregates."""
if isinstance(aggregates, (list, objects.AggregateList)):
for agg in aggregates:
self._update_aggregate(agg)
else:
self._update_aggregate(aggregates)
def _update_aggregate(self, aggregate):
self.aggs_by_id[aggregate.id] = aggregate
for host in aggregate.hosts:
self.host_aggregates_map[host].add(aggregate.id)
# Refreshing the mapping dict to remove all hosts that are no longer
# part of the aggregate
for host in self.host_aggregates_map:
if (aggregate.id in self.host_aggregates_map[host]
and host not in aggregate.hosts):
self.host_aggregates_map[host].remove(aggregate.id)
def delete_aggregate(self, aggregate):
"""Deletes internal HostManager information about a specific aggregate.
"""
if aggregate.id in self.aggs_by_id:
del self.aggs_by_id[aggregate.id]
for host in aggregate.hosts:
if aggregate.id in self.host_aggregates_map[host]:
self.host_aggregates_map[host].remove(aggregate.id)
def _init_instance_info(self):
"""Creates the initial view of instances for all hosts.
As this initial population of instance information may take some time,
we don't wish to block the scheduler's startup while this completes.
The async method allows us to simply mock out the _init_instance_info()
method in tests.
"""
def _async_init_instance_info():
context = context_module.get_admin_context()
LOG.debug("START:_async_init_instance_info")
self._instance_info = {}
compute_nodes = objects.ComputeNodeList.get_all(context).objects
LOG.debug("Total number of compute nodes: %s", len(compute_nodes))
# Break the queries into batches of 10 to reduce the total number
# of calls to the DB.
batch_size = 10
start_node = 0
end_node = batch_size
while start_node <= len(compute_nodes):
curr_nodes = compute_nodes[start_node:end_node]
start_node += batch_size
end_node += batch_size
filters = {"host": [curr_node.host
for curr_node in curr_nodes],
"deleted": False}
result = objects.InstanceList.get_by_filters(context,
filters)
instances = result.objects
LOG.debug("Adding %s instances for hosts %s-%s",
len(instances), start_node, end_node)
for instance in instances:
host = instance.host
if host not in self._instance_info:
self._instance_info[host] = {"instances": {},
"updated": False}
inst_dict = self._instance_info[host]
inst_dict["instances"][instance.uuid] = instance
# Call sleep() to cooperatively yield
time.sleep(0)
LOG.debug("END:_async_init_instance_info")
# Run this async so that we don't block the scheduler start-up
utils.spawn_n(_async_init_instance_info)
def _choose_host_filters(self, filter_cls_names):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if not isinstance(filter_cls_names, (list, tuple)):
filter_cls_names = [filter_cls_names]
good_filters = []
bad_filters = []
for filter_name in filter_cls_names:
if filter_name not in self.filter_obj_map:
if filter_name not in self.filter_cls_map:
bad_filters.append(filter_name)
continue
filter_cls = self.filter_cls_map[filter_name]
self.filter_obj_map[filter_name] = filter_cls()
good_filters.append(self.filter_obj_map[filter_name])
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def get_filtered_hosts(self, hosts, filter_properties,
filter_class_names=None, index=0):
"""Filter hosts and return only ones passing all filters."""
def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
for host in hosts_to_ignore:
for (hostname, nodename) in list(host_map.keys()):
if host == hostname:
del host_map[(hostname, nodename)]
ignored_hosts.append(host)
ignored_hosts_str = ', '.join(ignored_hosts)
LOG.info(_LI('Host filter ignoring hosts: %s'), ignored_hosts_str)
def _match_forced_hosts(host_map, hosts_to_force):
forced_hosts = []
for (hostname, nodename) in list(host_map.keys()):
if hostname not in hosts_to_force:
del host_map[(hostname, nodename)]
else:
forced_hosts.append(hostname)
if host_map:
fo | rced_hosts_str = ', '.join(forced_hosts)
msg = _LI('Host filter forcing available hosts to %s')
else:
forced_hosts_str = ', '.join(hosts_to_force)
msg = _LI("No hosts matched due to not matching "
| "'force_hosts' value of '%s'")
LOG.info(msg % forced_hosts_str)
def _match_forced_nodes(host_map, nodes_to_force):
forced_nodes = []
for (hostname, nodename) in list(host_map.keys()):
if nodename not in nodes_to_force:
del host_map[(hostname, nodename)]
else:
forced_nodes.append(nodename)
if host_map:
forced_nodes_str = ', '.join(forced_nodes)
msg = _LI('Host filter forcing available nodes to %s')
else:
forced_nodes_str = ', '.join(nodes_to_force)
msg = _LI("No nodes matched due to not matching "
"'force_nodes' value of '%s'")
LOG.info(msg % forced_nodes_str)
if filter_class_names is None:
filters = self.default_filters
else:
filters = self._choose_host_filters(filter_class_names)
ignore_hosts = filter_properties.get('ignore_hosts', [])
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
if ignore_hosts or force_hosts or force_nodes:
# NOTE(deva): we can't assume "host" is unique because
# one host may have many nodes.
name_to_cls_map = {(x.host, x.nodename): x for x in hosts}
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
if not name_to_cls_map:
return []
# NOTE(deva): allow force_hosts and force |
odule. (Ripped from coursera-dl)
TODO: Convert to Hy and make generic.
"""
import logging
import os
import ssl
import tempfile
import getpass
import requests
from requests.adapters import HTTPAdapter
try: # Workaround for broken Debian/Ubuntu packages? (See issue #331)
from requests.packages.urllib3.poolmanager import PoolManager
except ImportError:
from urllib3.poolmanager import PoolManager
from six.moves import StringIO
from six.moves import http_cookiejar as cookielib
#from .define import CLASS_URL, AUTH_REDIRECT_URL, PATH_COOKIES, AUTH_URL_V3
#from .utils import mkdir_p
# Monkey patch cookielib.Cookie.__init__.
# Reason: The expires value may be a decimal string,
# but the Cookie class uses int() ...
__orginal_init__ = cookielib.Cookie.__init__
if os.name == "posix": # pragma: no cover
import pwd
_USER = pwd.getpwuid(os.getuid())[0]
else:
_USER = getpass.getuser()
PATH_CACHE = os.path.join(tempfile.gettempdir(), _USER + "_populi_dl_cache") # from define.py
PATH_COOKIES = os.path.join(PATH_CACHE, 'cookies') # ^
def __fixed_init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False):
if expires is not None:
expires = float(expires)
__orginal_init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False)
cookielib.Cookie.__init__ = __fixed_init__
class ClassNotFound(BaseException):
"""
Raised if a course is not found in Coursera's site.
"""
class AuthenticationFailed(BaseException):
"""
Raised if we cannot authenticate on Coursera's site.
"""
def login(session, username, password, class_name=None):
"""
Login on coursera.org with the given credentials.
This adds the following cookies to the session:
sessionid, maestro_login, maestro_login_flag
"""
logging.debug('Initiating login.')
try:
session.cookies.clear('.coursera.org')
logging.debug('Cleared .coursera.org cookies.')
except KeyError:
logging.debug('There were no .coursera.org cookies to be cleared.')
"""
# Hit class url
if class_name is not None:
class_url = CLASS_URL.format(class_name=class_name)
r = requests.get(class_url, allow_redirects=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
logging.error(e)
raise ClassNotFound(class_name)
# csrftoken is simply a 20 char random string.
csrftoken = random_string(20)
# Now make a call to the authenticator url.
csrf2cookie = 'csrf2_token_%s' % random_string(8)
csrf2token = random_string(24)
cookie = "csrftoken=%s; %s=%s" % (csrftoken, csrf2cookie, csrf2token)
logging.debug('Forging cookie header: %s.', cookie)
headers = {
'Cookie': cookie,
'X-CSRFToken': csrftoken,
'X-CSRF2-Cookie': csrf2cookie,
'X-CSRF2-Token': csrf2token,
}
"""
data = {
'email': username,
'password': password,
'webrequest': 'true'
}
# Auth API V3
r = session.post(AUTH_URL_V3, data=data,
headers=headers, allow_redirects=False)
try:
r.raise_for_status()
# Some how the order of cookies parameters are important
# for coursera!!!
v = session.cookies.pop('CAUTH')
session.cookies.set('CAUTH', v)
except requests.exceptions.HTTPError:
raise AuthenticationFailed('Cannot login on coursera.org.')
logging.info('Logged in on coursera.org.')
def down_the_wabbit_hole(session, class_name):
"""
Authenticate on class.coursera.org
"""
auth_redirector_url = AUTH_REDIRECT_URL.format(class_name=class_name)
r = session.get(auth_redirector_url)
logging.debug('Following %s to authenticate on class.coursera.org.',
auth_redirector_url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
raise AuthenticationFailed('Cannot login on class.coursera.org.')
logging.debug('Exiting "deep" authentication.')
def get_authentication_cookies(session, class_name, username, password):
"""
Get the necessary cookies to authenticate on | class.coursera.org.
To access the class pages we need two cookies on class.coursera.org:
csrf_token, session
"""
# First, chec | k if we already have the .coursera.org cookies.
if session.cookies.get('CAUTH', domain=".coursera.org"):
logging.debug('Already logged in on accounts.coursera.org.')
else:
login(session, username, password, class_name=class_name)
try:
session.cookies.clear('class.coursera.org', '/' + class_name)
except KeyError:
pass
down_the_wabbit_hole(session, class_name)
enough = do_we_have_enough_cookies(session.cookies, class_name)
if not enough:
raise AuthenticationFailed('Did not find necessary cookies.')
logging.info('Found authentication cookies.')
def do_we_have_enough_cookies(cj, class_name):
"""
Check whether we have all the required cookies
to authenticate on class.coursera.org.
"""
domain = 'class.coursera.org'
path = "/" + class_name
return cj.get('csrf_token', domain=domain, path=path) is not None
def validate_cookies(session, class_name):
"""
Checks whether we have all the required cookies
to authenticate on class.coursera.org. Also check for and remove
stale session.
"""
if not do_we_have_enough_cookies(session.cookies, class_name):
return False
url = CLASS_URL.format(class_name=class_name) + '/class'
r = session.head(url, allow_redirects=False)
if r.status_code == 200:
return True
else:
logging.debug('Stale session.')
try:
session.cookies.clear('.coursera.org')
except KeyError:
pass
return False
def make_cookie_values(cj, class_name):
"""
Makes a string of cookie keys and values.
Can be used to set a Cookie header.
"""
path = "/" + class_name
cookies = [c.name + '=' + c.value
for c in cj
if c.domain == class_name]
return '; '.join(cookies)
def find_cookies_for_class(cookies_file, class_name):
"""
Return a RequestsCookieJar containing the cookies for
.coursera.org and class.coursera.org found in the given cookies_file.
TODO: Purge coursera specific code.
"""
path = "/" + class_name
def cookies_filter(c):
# return c.domain == ".coursera.org" \
# or (c.domain == "class.coursera.org" and c.path == path)
return c.domain == class_name
cj = get_cookie_jar(cookies_file)
new_cj = requests.cookies.RequestsCookieJar()
for c in filter(cookies_filter, cj):
new_cj.set_cookie(c)
return new_cj
def load_cookies_file(cookies_file):
"""
Load cookies file.
We pre-pend the file with the special Netscape header because the cookie
loader is very particular about this string.
"""
logging.debug('Loading cookie file %s into memory.', cookies_file)
cookies = StringIO()
cookies.write('# Netscape HTTP Cookie File')
cookies.write(open(cookies_file, 'rU').read())
cookies.flush()
cookies.seek(0)
return cookies
def get_cookie_jar(cookies_file):
cj = cookielib.MozillaCookieJar()
cookies = load_cookies_file(cookies_file)
# nasty hack: cj.load() requires a filename not a file, but if I use
# stringio, that file doesn't exist. I use |
from .settings import info
class get_settings:
def __init__(self):
self.settings = info()
self.repo = self.settings['repo']
self.repo_path = self.repo['repo_path']
self.repo_url = self.repo['repo_url']
self.key = self.repo['ssh_key']
self.slack = self.settings['slack']
self.slack_token = self.slack['token']
def repo(self):
return(self.repo)
def repo_url(self):
return(self.repo_u | rl)
de | f repo_path(self):
return(self.repo_path)
def ssh_key(self):
return(self.key)
def slack(self):
return(self.slack)
def slack_token(self):
return(self.slack_token)
|
"""
* Copyright (c) 2012-2017, Nic McDonald and Adriana Flores
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of prim nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABIL | ITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING I | N ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
import codecs
import re
import os
import sys
try:
from setuptools import setup
except:
print('please install setuptools via pip:')
print(' pip3 install setuptools')
sys.exit(-1)
def find_version(*file_paths):
version_file = codecs.open(os.path.join(os.path.abspath(
os.path.dirname(__file__)), *file_paths), 'r').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='sssweep',
version=find_version('sssweep', '__init__.py'),
description='Automatic task generation for SuperSim sweeps and plot web viewer',
author='Nic McDonald and Adriana Flores',
author_email='nicci02@hotmail.com and adrifloresm@gmail.com',
license='BSD',
url='http://github.com/nicmcd/sssweep',
packages=['sssweep'],
install_requires=['taskrun >= 3.0.0',
'ssplot >= 0.1.0'],
)
|
maj | or = | 0
minor = 37
patch = 0
|
import json
from requests import Request, Session
class Requester:
@staticmethod
def verifyconnection(url="http://google.com"):
return Requester.request(url, method='GET', decode=False)
@staticmethod
def request(url, method=None, data=None, decode=True):
if not url.startswith('http://') and not | url.startswith('https://'):
url = 'http://' + url
request = Request(method, url)
if data:
request.data = json.dumps(data)
with Session() as session:
prepped = session.prepare_request(request)
try:
response = session.send(prepped)
except:
| return False
if decode:
return json.loads(response.text)
else:
return response
|
#
# Copyright (C) 2000 Stephen Davies
# Copyright (C) 2000 Stefan Seefeld
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
from Synopsis.Processor import Parameter
from Synopsis import FileTree
from Synopsis.Formatters.HTML.View import View
from Synopsis.Formatters.HTML.Tags import *
import os
class FileListing(View):
"""A view that creates an index of files, and an index for each file.
First the index of files is created, intended for the top-left frame.
Second a view is created for each file, listing the major declarations for
that file, eg: classes, global functions, namespaces, etc."""
def filename(self):
if self.main:
return self.directory_layout.index()
else:
return self.directory_layout.special('FileListing')
def title(self):
return 'Files'
def root(self):
return self.filename(), self.title()
def register_filenames(self):
"""Registers a view for each file indexed."""
self.processor.register_filename(self.filename(), self, None)
def process(self):
"""Creates the listing using the recursive process_file_tree_node method"""
# Start the file
self.start_file()
self.write_navigation_bar()
self.write('<ul class="tree">')
# recursively visit all nodes
self.process_file_tree_node(self.processor.file_tree.root())
self.write('</ul>')
self.end_file()
def _node_sorter(self, a, b):
"""Compares file nodes a and b depending on whether they are leaves
or not"""
a_leaf = isinstance(a, FileTree.File)
b_leaf = isinstance(b, FileTree.File)
if a_leaf != b_leaf:
return cmp(b_leaf, a_leaf)
return cmp(a.path.upper(), b.path.upper())
def process_file_tree_node(self, node):
"""Creates a portion of the tree for the given file node. This method
assumes that the file is already in progress, and just appends to
it. This method is recursive, calling itself for each child of no | de
(file or directory)."""
if isinstance(node, FileTree.File):
# Leaf node
ref = rel(self.filename(), self.directory_layout.file_index(node.path))
text = href(ref, node.filename, target='detail')
self.write('<li>%s</li>'% | text)
return
# Non-leaf node
children = node.children
children.sort(self._node_sorter)
if len(node.path):
self.write('<li>%s<ul class="tree">'%node.filename+os.sep)
if len(children):
for child in children:
self.process_file_tree_node(child)
if len(node.path):
self.write('</ul></li>')
|
=['GET'])
@login_required
def past():
# render past events
if not g.user.is_editor():
flash(mustBeStudentCoord[0],mustBeStudentCoord[1])
return redirect(url_for('index'))
allusers = Users.query.order_by(Users.lname.asc())
events = Opps.query.filter(Opps.date < datetime.now()).order_by(desc(Opps.date)).all()
def render_past():
for event in events:
#time.sleep(.01)
yield event
g.user = current_user
return Response(stream_with_context(stream_template('past_event.html', eventiter = render_past(), events=events, allusers=allusers)))
@app.route('/events', methods=['GET', 'POST'])
@login_required
def events():
allEvents = db.session.query(Opps).filter(Opps.date > datetime.now()).order_by(asc(Opps.date)).all()
return render_template('events.html',allEvents = allEvents)
@app.route('/pastevents', methods=['GET'])
@login_required
def pastevents():
#allEvents = db.session.query(Opps).filter(Opps.date < datetime.now()).order_by(desc(Opps.date)).all()
return render_template('pastevents.html')
# NEW EVENT PAGE #
@app.route('/new', methods=['GET', 'POST'])
@login_required
def new():
if not g.user.is_editor() and not g.user.is_webadmin():
flash(mustBeStudentCoord[0],mustBeStudentCoord[1])
return redirect(url_for('events'))
if request.method == 'POST': # form was submitted
# do a whole bunch of form verification (is there a better way to do this?)
if not request.form['title']:
flash('Title is required', 'danger')
elif not request.form['location']:
flash('Location is required', 'danger')
elif not request.form['time']:
flash('Start time is requried', 'danger')
elif not request.form['endtime']:
flash('End time is required', 'danger')
elif not request.form['ntechs']:
flash('Number of techs is required', 'danger')
else: # finally, if we pass inspection, add the event to the database
title = request.form['title']
todo = Opps(title, request.form['location'])
todo.date = datetime.strptime(request.form['date']+request.form['time'],'%m/%d/%Y%I:%M %p')
todo.enddate = datetime.strptime(request.form['date']+request.form['endtime'],'%m/%d/%Y%I:%M %p')
todo.user = g.user
todo.techsneeded = int(request.form['ntechs'])
todo.info = request.form['info']
db.session.add(todo)
db.session.commit()
flash('"' + title + '" was successfully created', 'success')
#flash(todo.uuid,'info')
return redirect(url_for('admin'))
return render_template('new.html') # page was loaded
# EDIT EVENT PAGE #
@app.route('/events/<int:eventID>', methods = ['GET' , 'POST'])
@login_required
def show_or_update(eventID):
if not g.user.is_editor() and not g.user.is_webadmin():
flash(mustBeStudentCoord[0],mustBeStudentCoord[1])
return redirect(url_for('index'))
todo_item = Opps.query.get(eventID)
if request.method == 'GET':
return render_template('view.html',todo=todo_item)
if request.form['submit'] == 'submit':
todo_item.name = request.form['title']
todo_item.desc = request.form['location']
todo_item.date = datetime.strptime(request.form['date']+request.form['time'],'%m/%d/%Y%I:%M %p')
todo_item.enddate = datetime.strptime(request.form['date']+request.form['endtime'],'%m/%d/%Y%I:%M %p')
todo_item.techsneeded = request.form['ntechs']
todo_item.info = request.form['info']
flash('Event updated.', 'info')
else:
db.session.delete(todo_item)
flash('Event deleted.', 'info')
db.session.commit()
return redirect(url_for('admin'))
flash('You are not authorized to edit this todo item','danger')
return redirect(url_for('show_or_update',todo_id=todo_id))
# CLEAR PAST EVENTS METHOD #
@app.route('/clear')
def clear():
if not g.user.is_editor() and not g.user.is_webadmin():
flash(mustBeStudentCoord[0],mustBeStudentCoord[1])
return redirect(url_for('index'))
opps = Opps.query.all()
for opp in opps:
if opp.get_timeline() == 2:
opp.deleted = True
db.session.commit()
flash('Cleared Past Events.','info')
return redirect(url_for('index'))
@app.route('/signup')
@login_required
def signup():
if not g.user.cansignup:
flash('Sign-ups not available.','danger')
return redirect(url_for('index'))
return render_template('signup.html', user = g.user)
@app.route('/feedback')
@login_required
def feedback():
return render_template('feedback.html')
@app.route('/feedback/<int:eventID>', methods = ['GET' , 'POST'])
@login_required
def feedbackFor(eventID):
if request.method == 'GET':
return render_template('feedbackform.html', event = Opps.query.get(int(eventID)))
return redirect(url_for('feedback'))
@app.route('/upload', methods = ['POST'])
def upload():
filename = photos.save(request.files['file'])
print(filename)
return redirect(url_for('index'))
# DOWNLOAD CSV RECEIPT METHOD #
@app.route('/download')
@login_required
def download():
result = [['Event','Date','Techs']]
for opp in Opps.query.all():
item = [opp.name, opp.date]
for usr in opp.users:
item.append(usr.fname +' '+ usr.lname)
result.append(item)
result = excel.make_response_from_array(result, 'csv', status=200, file_name='opps')
response = make_response(result)
response.headers["Content-Disposition"] = "attachment; filename=opps.csv"
return response
@app.route('/mailreceipt')
@login_required
def mailreceipt():
mailstring = ""
for opp in Opps.query.all():
if opp.get_timeline() != 2:
mailstring += opp.name + " - " + str(opp.date.strftime('%a, %b %-d, %Y')) + "\n"
for tech in opp.users:
mailstring += tech.fname + " " + tech.lname + ", "
mailstring += "\n\n"
return redirect('mailto:' + g.user.email + '?subject=Work Opps&body=' + mailstring)
# USER PROFILE PAGE #
@app.route('/profile/<gccid>', methods=['GET','POST'])
@login_required
def profile(gccid):
if request.method == 'POST':
if 'changepassword' in request.form:
if g.user.check_password(request.form['oldpwd']):
if request.form['newpwd1'] == request.form['newpwd2']:
g.user.set_password(request.form['newpwd1'])
db.session.commit()
flash('Password successfully changed.','success')
else:
flash('New passwords do not match.','danger')
else:
flash('Old password incorrect','danger')
if 'bcc' in request.form:
if request.form['bcc'] == "on":
changesetting(g.user, 'bcc', 1)
else:
changesetting(g.user, 'bcc', 0)
users = Users.query.filter(Users.gccid != gccid).order_by(asc(Users.lname)).all()
staff = Staff.query.all()
usr = Users.query.filter(Users.gccid == gccid).first()
if not usr:
flash('User '+ gccid +' not found.','danger')
return redirect(url_for('index'))
return render_template('profile.html', users = users, usr = usr, staff = staff)
# REGISTER NEW USER PAGE #
@app.route('/register' , methods=['GET','POST'])
def register():
if request.method == 'GET':
return render_template('register.html')
if not request.form['fname'] or not request.form['lname']:
flash ('Please enter your name.','danger')
elif not request.form[ | 'email']:
flash('Please enter an email address.','danger')
elif not re.match(r"(.*@gcc\.edu)", request.form['email']):
flash('Please enter a valid GCC email address.','danger')
| elif not re.match(r"(\d{6})", request.form['gccid']):
flash('Please enter a valid GCC ID number','danger')
elif request.form['password'] != request.form |
#!/usr/bin/python
###############################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://www.vauxoo.com>).
# All Rights Reserved
###############################################################################
# Credits:
# Coded by: Katherine Zaoral <kathy@vauxoo.com>
# Planified by: Katherine Zaoral <kathy@vauxoo.com>
# Audited by: Katherine Zaoral <kathy@vauxoo.com>
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GN | U Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
| {
"name": "Costing Method Settings",
"summary": "Set product default cost method",
"version": "1.6",
"author": "Vauxoo",
"website": "http://www.vauxoo.com/",
"category": "Settings",
"depends": [
"purchase",
],
"data": [
"wizard/res_config_view.xml"
],
"demo": [],
"test": [],
"qweb": [],
"js": [],
"css": [],
"installable": True,
}
|
com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ErrorGroupServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ErrorGroupServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ErrorGroupServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ErrorGroupServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def error_group_path(project: str, group: str,) -> str:
"""Returns a fully-qualified error_group string."""
return "projects/{project}/groups/{group}".format(project=project, group=group,)
@staticmethod
def parse_error_group_path(path: str) -> Dict[str, str]:
"""Parses a error_group path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/groups/(?P<group>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client c | ert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always" | , use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ErrorGroupServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (C) 2015 Daniel Standage <daniel.standage@gmail.com>
#
# This file is part of tag (http://github.com/standage/tag) and is licensed
# under the BSD 3-clause license: see LICENSE.
# -----------------------------------------------------------------------------
"""Package-wide configuration"""
try:
import __builtin__ as builtins
except ImportError: # pragma: no cover
import builtins
from tag.comment import Comment
from tag.directive import Directive
from tag.feature import Feature
from tag.sequence import Sequence
from tag.range import Range
from tag.reader import GFF3Reader
from tag.writer import GFF3Writer
from tag.score import Score
from tag import bae
from tag import cli
from tag import index
from tag import locus
fro | m tag import select
from tag import transcript
from gzip import open as gzopen
import sys
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def open(filename, mode):
if mode not in ['r', 'w']:
raise ValueError('invalid mode "{}"'.format(m | ode))
if filename in ['-', None]: # pragma: no cover
filehandle = sys.stdin if mode == 'r' else sys.stdout
return filehandle
openfunc = builtins.open
if filename.endswith('.gz'):
openfunc = gzopen
mode += 't'
return openfunc(filename, mode)
|
SECRET_KEY = 'secret'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
'DIRS': 'templates'
},
]
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_auth_adfs.middleware.LoginRequiredMiddleware',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_auth_adfs',
'tests',
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
'django_auth_adfs.backend.AdfsAuthCodeBackend',
'django_auth_adfs.backend.AdfsAccessTokenBackend',
)
ROOT_URLCONF = 'tests.urls'
STATIC_ROOT = '/tmp/' # Dummy
STATIC_URL = '/static/'
AUTH_ADFS = {
"SERVER": "adfs.example.com",
"CLIENT_ID": "your-configured-client-id",
"RELYING_PARTY_ID": "your-adfs-RPT-name",
"AUDIENCE": "microsoft:identityserver:your-RelyingPartyTrust-identifier",
"CA_BUNDLE": "/path/to/ca-bundle.pem",
"CLAIM_MAPPING": {"first_name": "given_name",
"last_name": "family_name",
"email": "email"},
"BOOLEAN_CLAIM_MAPPING": {"is_staff": "user_is_staff",
"is_superuser": "user_is_superuser | "},
"CONFIG_RELOAD_ | INTERVAL": 0, # Always reload settings
}
LOGIN_URL = "django_auth_adfs:login"
LOGIN_REDIRECT_URL = "/"
|
# -*- coding:utf-8 -*-
from django.db import models
# Create your models here.
class UserType(models.Model):
display = models.CharField(max_length=50)
def __unicode__(self):
return self.display
class Admin(models.Model):
username = models.CharField(max_length=50)
password = models.CharField(max_length=50)
email = models.EmailField()
CreateDate = models.DateTimeField(auto_now_add | = True)
user_type = models.ForeignKey('UserType')
def __unicode__(self):
return self.username
class Chat(models.Model):
content = models.TextField()
user = models.ForeignKey('Admin')
create_date = models.DateTimeField(auto_now_add = True)
def __unicode__(self):
| return self.content
class NewType(models.Model):
display = models.CharField(max_length=50)
def __unicode__(self):
return self.display
class News(models.Model):
title = models.CharField(max_length=30)
summary = models.CharField(max_length=256)
url = models.URLField()
favor_count = models.IntegerField(default=0)
#点赞数
replay_count = models.IntegerField(default=0)
#评论数
news_type = models.ForeignKey('NewType')
user = models.ForeignKey('Admin')
create_date = models.DateTimeField(auto_now_add = True)
def __unicode__(self):
return self.title
class Reply(models.Model):
content = models.TextField()
user = models.ForeignKey('Admin')
new = models.ForeignKey('News')
create_date = models.DateTimeField(auto_now_add = True)
def __unicode__(self):
return self.content
|
import serial,time,socket
import hart_protocol
import sys
port = 3
if len(sys.argv) < 4:
print "Error, usage " + sys.argv[0] + " port long_address new_longtag"
| print "Usage hex string (5 hex digits) as address and LATIN-1 string as new long tag"
quit()
address = sys.argv[2].decode('hex')
if len(address) != 5:
print "Error, address should be 5 bytes long!"
longtag = sys.argv[3]
if len(longtag) != 32:
print "Error, long tag should be 32 bytes long!"
port = int(sys.ar | gv[1]) - 1
print "Opening COM" + str(port + 1) + "..."
preambles = 10
delimiter = '\x82' # master command with long address
command = '\x16' # write long tag
ln = 32 # long address exact length
pack = delimiter + address + command + chr(ln) + longtag
packet = '\xff' * preambles + pack + hart_protocol.get_checksum(pack)
ser = serial.Serial(port, 1200)
print "writing: " + hart_protocol.dump_hex(packet)
ser.write(packet)
print "packet sent succesfully!" |
# Linktastic Module
# - A python2/3 compatible module that can create hardlinks/symlinks on windows-based systems
#
# Linktastic is distributed under the MIT License. The follow are the terms and conditions of using Linktastic.
#
# The MIT License (MIT)
# Copyright (c) 2012 Solipsis Development
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import subprocess
from subprocess import CalledProcessError
import os
if os.name == 'nt':
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Prevent spaces from messing with us!
def _escape_param(param):
return '"%s"' % param
# Private function to create link on nt-based systems
def _link_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /H %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _symlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _dirlink_windows(src, dest):
try:
subproces | s.check_output(
'cmd /C mklink /J %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=i | nfo)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _junctionlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /D %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
# Create a hard link to src named as dest
# This version of link, unlike os.link, supports nt systems as well
def link(src, dest):
if os.name == 'nt':
_link_windows(src, dest)
else:
os.link(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def symlink(src, dest):
if os.name == 'nt':
_symlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def dirlink(src, dest):
if os.name == 'nt':
_dirlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def junctionlink(src, dest):
if os.name == 'nt':
_junctionlink_windows(src, dest)
else:
os.symlink(src, dest) |
from ailment.expression import UnaryOp
from .base import PeepholeOptimizationExprBase
class RemoveRedundantNots(Peepho | leOptimizationExprBase):
__slots__ = ()
name = "Remove redundant Nots"
expr_classes = (UnaryOp, ) # all expressions are allowed
def optimize(self, expr: UnaryOp):
# Not(Not(expr)) ==> expr
if expr.op == "Not" \
and isinstance(expr.operand, UnaryOp) \
and expr.operand.op == "Not":
return expr.operan | d.operand
return None
|
rom_request()
command = json_get_str(self.body, NKeys.COMMAND, required=True)
log.debug(f"NLPRP request received from {self.request.remote_addr}: "
f"username={self.username}, command={command}")
if DEBUG_SHOW_REQUESTS:
log.debug(f"Request: {self.body!r}")
return self.parse_command(command)
def parse_command(self, command: str) -> JsonObjectType:
"""
Parse the NLPRP command.
"""
if command == NlprpCommands.LIST_PROCESSORS:
return self.list_processors()
elif command == NlprpCommands.PROCESS:
process_request = NlprpProcessRequest(self.body)
if process_request.queue:
return self.put_in_queue(process_request)
else:
return self.process_now(process_request)
elif command == NlprpCommands.SHOW_QUEUE:
return self.show_queue()
elif command == NlprpCommands.FETCH_FROM_QUEUE:
return self.fetch_from_queue()
elif command == NlprpCommands.DELETE_FROM_QUEUE:
return self.delete_from_queue()
# -------------------------------------------------------------------------
# NLPRP command handlers
# -------------------------------------------------------------------------
def list_processors(self) -> JsonObjectType:
"""
Returns an HTTP response listing the available NLP processors.
"""
return self.create_response(
status=HttpStatus.OK,
extra_info={
NKeys.PROCESSORS: [
proc.infodict
for proc in ServerProcessor.processors.values()
]
}
)
def process_now(self, process_request: NlprpProcessRequest) \
-> JsonObjectType:
"""
Processes the text supplied by the user immediately, without putting
it in the queue.
Args:
process_request: a :class:`NlprpProcessRequest`
"""
results = [] # type: JsonArrayType
for text, metadata in process_request.gen_text_metadataobj():
processor_data = [] # type: JsonArrayType
for processor in process_request.processors:
# Send the text off for processing
procresult = process_nlp_text_immediate(
text=text,
processor=processor,
username=self.username,
password=self.password
)
# proc_dict = procresult.nlprp_processor_dict(processor)
if procresult[NKeys.NAME] is None:
procresult[NKeys.NAME] = processor.name
procresult[NKeys.TITLE] = processor.title
procresult[NKeys.VERSION] = processor.version
processor_data.append(procresult)
doc_result = {
NKeys.METADATA: metadata,
NKeys.PROCESSORS: proc | essor_data
}
if process_request.include_text:
doc_result[NKe | ys.TEXT] = text
results.append(doc_result)
response_info = {
NKeys.CLIENT_JOB_ID: process_request.client_job_id,
NKeys.RESULTS: results,
}
return self.create_response(status=HttpStatus.OK,
extra_info=response_info)
def put_in_queue(self,
process_request: NlprpProcessRequest) -> JsonObjectType:
"""
Puts the document-processor pairs specified by the user into a celery
queue to be processed.
Args:
process_request: a :class:`NlprpProcessRequest`
"""
# Generate unique queue_id for whole client request
queue_id = make_unique_id()
# Encrypt password using reversible encryption for passing to the
# processors.
# We must pass the password as a string to the task because it won't
# let us pass a bytes object
crypt_pass = encrypt_password(self.password).decode()
docprocrequest_ids = [] # type: List[str]
with transaction.manager: # one COMMIT for everything inside this
# Iterate through documents...
for doctext, metadata in process_request.gen_text_metadatastr():
doc_id = make_unique_id()
# PyCharm doesn't like the "deferred" columns, so:
# noinspection PyArgumentList
doc = Document(
document_id=doc_id,
doctext=doctext,
client_job_id=process_request.client_job_id,
queue_id=queue_id,
username=self.username,
client_metadata=metadata,
include_text=process_request.include_text
)
dbsession.add(doc) # add to database
# Iterate through processors...
for processor in process_request.processors:
# The combination of a document and a processor gives us
# a docproc.
docprocreq_id = make_unique_id()
docprocreq = DocProcRequest(
docprocrequest_id=docprocreq_id,
document_id=doc_id,
processor_id=processor.processor_id
)
dbsession.add(docprocreq) # add to database
docprocrequest_ids.append(docprocreq_id)
# Now everything's in the database and committed, we can fire off
# back-end jobs:
for dpr_id in docprocrequest_ids:
process_nlp_text.apply_async(
# unlike delay(), this allows us to specify task_id, and
# then the Celery task ID is the same as the DocProcRequest
# ID.
args=(dpr_id, ), # docprocrequest_id
kwargs=dict(
username=self.username,
crypt_pass=crypt_pass,
),
task_id=dpr_id, # for Celery
)
response_info = {NKeys.QUEUE_ID: queue_id}
return self.create_response(status=HttpStatus.ACCEPTED,
extra_info=response_info)
def fetch_from_queue(self) -> JsonObjectType:
"""
Fetches requests for all document-processor pairs for the queue_id
supplied by the user (if complete).
"""
# ---------------------------------------------------------------------
# Args
# ---------------------------------------------------------------------
args = json_get_toplevel_args(self.body)
queue_id = json_get_str(args, NKeys.QUEUE_ID, required=True)
# ---------------------------------------------------------------------
# Start with the DocProcRequests, because if some are still busy,
# we will return a "busy" response.
# ---------------------------------------------------------------------
q_dpr = (
dbsession.query(DocProcRequest)
.join(Document)
.filter(Document.username == self.username)
.filter(Document.queue_id == queue_id)
)
q_doc = (
dbsession.query(Document)
.filter(Document.username == self.username)
.filter(Document.queue_id == queue_id)
)
dprs = list(q_dpr.all()) # type: List[DocProcRequest]
if not dprs:
raise mkerror(NOT_FOUND, "The queue_id given was not found")
busy = not all([dpr.done for dpr in dprs])
if busy:
response = self.create_response(HttpStatus.PROCESSING, {})
# todo: is it correct (from previous comments) that we can't
# return JSON via Pyramid with a status of HttpStatus.PROCESSING?
# If that's true, we have to force as below, but then we need to
# alter the NLPRP docs (as these state the JSON code and HTTP code
# should always be the same).
self.s |
import urllib.parse
from wptserve.utils import isomorphic_encode
def main(request, response):
"""Handler that causes multiple redirections. Redirect chain is as follows:
1. Initial URL containing multi-redirect.py
2. Redirect to cross-origin URL
3. Redirect to same-origin URL
4. Final URL containing the final same-origin resource.
Mandatory parameters:
page_origin - The page origin, used for redirection and to set TAO. This is a mandatory parameter.
cross_origin - The cross origin used to make this a cross-origin redirect. This is a mandatory parameter.
final_resource - Path of the final resource, without origin. This is a mandatory parameter.
Optional parameters:
tao_steps - Number of redirects for which the TAO header will be present (a number 0 - 3 makes the most sense). Default value is 0.
tao_value - The value of the TAO header, when present. Default value is "*".
Note that |step| is a parameter used internally for the multi-redirect. It's the step we're at in the redirect chain.
"""
step = 1
if b"step" in request.GET:
try:
step = int(request.GET.first(b"step"))
except ValueError:
pass
page_origin = request.GET.first(b"page_origin")
cross_origin = request.GET.first(b"cross_origin")
final_resource = request.GET.first(b"final_resource")
tao_value = b"*"
if b"tao_value" in request.GET:
tao_value = request.GET.first(b"tao_value")
tao_steps = 0
if b"tao_steps" in request.GET:
tao_steps = int(request.GET.first(b"tao_steps"))
next_tao_steps = tao_steps - 1
redirect_url_path = b"/resource-timing/resources/multi_redirect.py?"
redirect_url_path += b"page_origin=" + page_origin
redirect_url_path += b"&cross_origin=" + cross_origin
redirect_url_path += b"&final_resource=" + urllib.parse.quote(final_resource).encode('ascii')
redirect_url_path += b"&tao_value=" + tao_value
redirect_url_path += b | "&tao_steps=" + isomorphic_encode(str(next_tao_steps))
redirect_url_path += b"&step="
if tao_ | steps > 0:
response.headers.set(b"timing-allow-origin", tao_value)
if step == 1:
# On the first request, redirect to a cross origin URL
redirect_url = cross_origin + redirect_url_path + b"2"
elif step == 2:
# On the second request, redirect to a same origin URL
redirect_url = page_origin + redirect_url_path + b"3"
else:
# On the third request, redirect to a static response
redirect_url = page_origin + final_resource
response.status = 302
response.headers.set(b"Location", redirect_url)
|
from flask_login import LoginManager
from server.users.models import User
login_manager = Log | inManager()
@login_manager.user_loader
def load_user | (user_id):
return User.get(user_id=user_id)
|
that type, then return True
# this is used in db_query to check if permission on DocType
if verbose: print "Has a shared document"
return True
return False
role_permissions = get_role_permissions(meta, user=user, verbose=verbose)
if not role_permissions.get(ptype):
return false_if_not_shared()
if doc:
if isinstance(doc, basestring):
doc = frappe.get_doc(meta.name, doc)
# if owner match, then return True
if doc.owner == frappe.session.user and role_permissions["if_owner"].get(ptype) and ptype!="create":
return True
# check if user permission
if role_permissions["apply_user_permissions"].get(ptype):
if not user_has_permission(doc, verbose=verbose, user=user,
user_permission_doctypes=role_permissions.get("user_permission_doctypes", {}).get(ptype) or []):
if verbose: print "No user permission"
return false_if_not_shared()
if not has_controller_permissions(doc, ptype, user=user):
if verbose: print "No controller permission"
return false_if_not_shared()
if verbose:
print "Has Role"
return True
def get_doc_permissions(doc, verbose=False, user=None):
"""Returns a dict of evaluated permissions for given `doc` like `{"read":1, "write":1}`"""
if not user: user = frappe.session.user
if frappe.is_table(doc.doctype):
return {"read":1, "write":1}
meta = frappe.get_meta(doc.doctype)
role_permissions = copy.deepcopy(get_role_permissions(meta, user=user, verbose=verbose))
if not cint(meta.is_submittable):
role_permissions["submit"] = 0
if not cint(meta.allow_import):
role_permissions["import"] = 0
if role_permissions.get("apply_user_permissions"):
# no user permissions, switch off all user-level permissions
for ptype in role_permissions:
if role_permissions["apply_user_permissions"].get(ptype) and not user_has_permission(doc, verbose=verbose, user=user,
user_permission_doctypes=role_permissions.get("user_permission_doctypes", {}).get(ptype) or []):
role_permissions[ptype] = 0
# apply owner permissions on top of existing permissions
if doc.owner == frappe.session.user:
role_permissions.update(role_permissions.if_owner)
update_share_permissions(role_permissions, doc, user)
return role_permissions
def update_share_permissions(role_permissions, doc, user):
"""Updates share permissions on `role_permissions` for given doc, if shared"""
share_ptypes = ("read", "write", "share")
permissions_by_share = frappe.db.get_value("DocShare",
{"share_doctype": doc.doctype, "share_name": doc.name, "user": user},
share_ptypes, as_dict=True)
if permissions_by_share:
for ptype in share_ptypes:
if ptype:
role_permissions[ptype] = 1
def get_role_permissions(meta, user=None, verbose=False):
"""Returns dict of evaluated role permissions like `{"read": True, "write":False}`
If user permissions are applicable, it adds a dict of user permissions like
{
// user permissions will apply on these rights
"apply_user_permissions": {"read": 1, "write": 1},
// doctypes that will be applicable for each right
"user_permission_doctypes": {
"read": [
// AND between "DocType 1" and "DocType 2"
["DocType 1", "DocType 2"],
// OR
["DocType 3"]
]
}
"if_owner": {"read": 1, "write": 1}
}
"""
if not user: user = frappe.session.user
cache_key = (meta.name, user)
if not frappe.local.role_permissions.get(cache_key):
perms = frappe._dict({ "apply_user_permissions": {}, "user_permission_doctypes": {}, "if_owner": {} })
user_roles = frappe.get_roles(user)
dont_match = []
for p in meta.permissions:
if cint(p.permlevel)==0 and (p.role in user_roles):
# apply only for level 0
for ptype in rights:
# build if_owner dict if applicable for this right
perms[ptype] = perms.get(ptype, 0) or cint(p.get(ptype))
if ptype != "set_user_permissions" and p.get(ptype):
perms["apply_user_permissions"][ptype] = (perms["apply_user_permissions"].get(ptype, 1)
and p.get("apply_user_permissions"))
if p.if_owner and p.get(ptype):
perms["if_owner"][ptype] = 1
if p.get(ptype) and not p.if_owner and not p.get("apply_user_permissions"):
dont_match.append(ptype)
if p.apply_user_permissions:
if p.user_permission_doctypes:
# set user_permission_doctypes in perms
user_permission_doctypes = json.loads(p.user_permission_doctypes)
if user_permission_doctypes:
# perms["user_permission_doctypes"][ptype] would be a list of list like [["User", "Blog Post"], | ["User"]]
for ptype in rights:
if p.get(ptype):
perms["user_permission_doct | ypes"].setdefault(ptype, []).append(user_permission_doctypes)
else:
user_permission_doctypes = get_linked_doctypes(meta.name)
# if atleast one record having both Apply User Permission and If Owner unchecked is found,
# don't match for those rights
for ptype in rights:
if ptype in dont_match:
if perms["apply_user_permissions"].get(ptype):
del perms["apply_user_permissions"][ptype]
if perms["if_owner"].get(ptype):
del perms["if_owner"][ptype]
# if one row has only "Apply User Permissions" checked and another has only "If Owner" checked,
# set Apply User Permissions as checked
for ptype in rights:
if perms["if_owner"].get(ptype) and perms["apply_user_permissions"].get(ptype)==0:
perms["apply_user_permissions"][ptype] = 1
# delete 0 values
for key, value in perms.get("apply_user_permissions").items():
if not value:
del perms["apply_user_permissions"][key]
frappe.local.role_permissions[cache_key] = perms
return frappe.local.role_permissions[cache_key]
def user_has_permission(doc, verbose=True, user=None, user_permission_doctypes=None):
from frappe.defaults import get_user_permissions
user_permissions = get_user_permissions(user)
user_permission_doctypes = get_user_permission_doctypes(user_permission_doctypes, user_permissions)
def check_user_permission(d):
meta = frappe.get_meta(d.get("doctype"))
end_result = False
messages = {}
# check multiple sets of user_permission_doctypes using OR condition
for doctypes in user_permission_doctypes:
result = True
for df in meta.get_fields_to_check_permissions(doctypes):
if (df.options in user_permissions and d.get(df.fieldname)
and d.get(df.fieldname) not in user_permissions[df.options]):
result = False
if verbose:
msg = _("Not allowed to access {0} with {1} = {2}").format(df.options, _(df.label), d.get(df.fieldname))
if d.parentfield:
msg = "{doctype}, {row} #{idx}, ".format(doctype=_(d.doctype),
row=_("Row"), idx=d.idx) + msg
messages[df.fieldname] = msg
end_result = end_result or result
if not end_result and messages:
for fieldname, msg in messages.items():
msgprint(msg)
return end_result
_user_has_permission = check_user_permission(doc)
for d in doc.get_all_children():
_user_has_permission = check_user_permission(d) and _user_has_permission
return _user_has_permission
def has_controller_permissions(doc, ptype, user=None):
if not user: user = frappe.session.user
for method in frappe.get_hooks("has_permission").get(doc.doctype, []):
if not frappe.call(frappe.get_attr(method), doc=doc, ptype=ptype, user=user):
return False
return True
def can_set_user_permissions(doctype, docname=None):
# System Manager can always set user permissions
if "System Manager" in frappe.get_roles():
return True
meta = frappe.get_meta(doctype)
# check if current user has read permission for docname
if docname and not has_permission(doctype, "read", docname):
return False
# check if current user has a role that can set permission
if get_role_permissions(meta).set_user_permissions!=1:
return False
return True
def set_user_permission_if_allowed(doctype, name, user, with_message=False):
if get_role_permissions(frappe.get_meta(doctype), user).set_user_permissions!=1:
add_user_permission(doctype, name, user, with_message)
def add_user_permission(doctype, name, user, with_message=False):
if name not in frappe.defaults.get_user_permissions(user).get(doctype, []):
if not f |
from | playerGst im | port *
|
or showing a rendered image derived from a Legend. """
class RenderThread(Thread):
def __init__(self, plugin, size, handler):
super().__init__()
self.plugin = plugin
self.size = size
self.handler = handler
def run(self):
wx.PostEvent(self.handler, LegendRenderEvent(image=self.plugin.get_legend(*self.size)))
RESET_LEGEND = 0
def __init__(self, parent, id):
super().__init__(parent, id, size=wx.Size(140, 300), style=wx.FRAME_NO_TASKBAR | wx.FRAME_FLOAT_ON_PARENT)
self.max_size = self.GetSize()
make_window_transparent(self)
self.canvas = parent.gl_canvas
self.mouse_pos = wx.DefaultPosition
self.start_pos = wx.DefaultPosition
self.visualization = None
self.width = 1
self.height = 1
self.dragging = False
self.translucent_background = wx.Frame(
parent, wx.ID_ANY, pos=self.GetScreenPosition(), size=self.GetSize(),
style=wx.FRAME_NO_TASKBAR | wx.FRAME_FLOAT_ON_PARENT
)
self.translucent_background.SetTransparent(150)
self.translucent_background.SetBackgroundColour(wx.BLACK)
self.legend_image = StaticImage(self, wx.ID_ANY, Image.new("RGBA", self.GetSize().Get()))
self.legend_image.SetSize(self.GetSi | ze())
self.legend_image.fit = False
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(main_sizer)
main_sizer.Add(self.legend_image, 0, wx.EXPAND | wx.BOTTOM, 0)
self.legend_image.Bind(wx.EVT_LEFT_DOWN, self.OnLef | tDown)
self.legend_image.Bind(wx.EVT_MOTION, self.OnMotion)
self.legend_image.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.legend_image.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.OnCaptureLost)
self.legend_image.Bind(wx.EVT_RIGHT_DOWN, self.OnRightClick)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
self.Bind(EVT_LEGEND_RENDERED, self.OnLegendRendered)
self.translucent_background.Bind(wx.EVT_LEFT_DOWN, self.OnBackgroundFocus)
self.translucent_background.Bind(wx.EVT_RIGHT_DOWN, self.OnBackgroundFocus)
parent = self.GetParent()
while parent is not None:
parent.Bind(wx.EVT_MOVE, self.OnMove)
parent.Bind(wx.EVT_PAINT, self.OnPaintParent)
parent = parent.GetParent()
self.reset = True
def OnBackgroundFocus(self, event: wx.MouseEvent):
self.legend_image.SetFocus()
wx.PostEvent(self.legend_image, event)
def OnDestroy(self, event):
parent = self.GetParent()
while parent is not None:
parent.Unbind(wx.EVT_MOVE)
parent.Unbind(wx.EVT_PAINT)
parent = parent.GetParent()
event.Skip()
def OnLegendRendered(self, event: LegendRenderEvent):
self.legend_image.image = event.image
self.Refresh()
def CalculateProportions(self):
canvas_size = self.canvas.GetSize()
size = self.GetSize()
center = wx.Point(self.start_pos.x + size. x / 2, self.start_pos.y + size.y / 2)
min_x = (size.x / 2) / canvas_size.x
min_y = (size.y / 2) / canvas_size.y
max_x = (canvas_size.x - size.x / 2) / canvas_size.x
max_y = (canvas_size.y - size.y / 2) / canvas_size.y
self.width = center.x / canvas_size.x
if self.width <= min_x:
self.width = 0.0
elif self.width >= max_x:
self.width = 1.0
self.height = center.y / canvas_size.y
if self.height <= min_y:
self.height = 0.0
elif self.height >= max_y:
self.height = 1.0
def RepaintLegend(self):
canvas_pos = self.canvas.GetScreenPosition()
canvas_size = self.canvas.GetSize()
size = self.GetSize()
if self.reset and self.IsShown():
self.start_pos = wx.Point(0, canvas_size.y - size.y)
self.CalculateProportions()
self.reset = False
x = canvas_pos.x + canvas_size.x * self.width - size.x / 2
y = canvas_pos.y + canvas_size.y * self.height - size.y / 2
if x < canvas_pos.x:
x = canvas_pos.x
elif x + size.x > canvas_pos.x + canvas_size.x:
x = canvas_pos.x + canvas_size.x - size.x
if y < canvas_pos.y:
y = canvas_pos.y
elif y + size.y > canvas_pos.y + canvas_size.y:
y = canvas_pos.y + canvas_size.y - size.y
new_pos = wx.Point(x, y)
self.SetPosition(new_pos)
self.translucent_background.SetPosition(new_pos)
new_size = wx.Size(self.max_size)
if canvas_size.x < self.max_size.x:
new_size.SetWidth(canvas_size.x)
if canvas_size.y < self.max_size.y:
new_size.SetHeight(canvas_size.y)
self.legend_image.SetSize(new_size)
self.SetSize(new_size)
self.translucent_background.SetSize(new_size)
self.translucent_background.Refresh()
self.legend_image.Refresh()
def OnMove(self, event):
self.RepaintLegend()
event.Skip()
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0), wx.BRUSHSTYLE_TRANSPARENT))
dc.Clear()
trans_dc = wx.BufferedPaintDC(self.translucent_background)
trans_dc.Clear()
trans_dc.SetBrush(wx.BLACK_BRUSH)
trans_dc.DrawRectangle(0, 0, *self.GetSize().Get())
self.RepaintLegend()
event.Skip()
def OnPaintParent(self, event):
self.Refresh()
event.Skip()
def OnLeftDown(self, event):
self.dragging = True
self.legend_image.CaptureMouse()
def OnMotion(self, event: wx.MouseEvent):
canvas_pos = self.canvas.GetScreenPosition()
canvas_size = self.canvas.GetSize()
if self.dragging and event.LeftIsDown():
if self.mouse_pos.x != -1 and self.mouse_pos.y != -1:
pos = self.GetPosition()
new_pos = wx.Point(pos.x + event.GetX() - self.mouse_pos.x, pos.y + event.GetY() - self.mouse_pos.y)
size = self.GetSize()
if new_pos.x < canvas_pos.x:
new_pos.x = canvas_pos.x
if new_pos.y < canvas_pos.y:
new_pos.y = canvas_pos.y
if new_pos.x + size.x > canvas_pos.x + canvas_size.x:
new_pos.x = canvas_pos.x + canvas_size.x - size.x
if new_pos.y + size.y > canvas_pos.y + canvas_size.y:
new_pos.y = canvas_pos.y + canvas_size.y - size.y
self.SetPosition(new_pos)
self.translucent_background.SetPosition(new_pos)
else:
self.mouse_pos = event.GetPosition()
else:
self.mouse_pos = wx.DefaultPosition
def OnLeftUp(self, event):
if self.legend_image.HasCapture():
self.dragging = False
self.legend_image.ReleaseMouse()
current_pos = self.GetPosition()
canvas_pos = self.canvas.GetScreenPosition()
self.start_pos = wx.Point(current_pos.x - canvas_pos.x, current_pos.y - canvas_pos.y)
self.CalculateProportions()
self.RepaintLegend()
def OnCaptureLost(self, event):
self.dragging = False
def OnRightClick(self, event):
menu = wx.Menu()
menu.Append(self.RESET_LEGEND, "Reset Legend")
menu.Bind(wx.EVT_MENU, self.OnPopupMenu)
self.PopupMenu(menu, event.GetPosition())
def OnPopupMenu(self, event: wx.MenuEvent):
id = event.GetId()
if id == self.RESET_LEGEND:
self.reset = True
self.RepaintLegend()
def ShowWindow(self):
self.mouse_pos = wx.DefaultPosition
self.translucent_background.Show()
self.Show()
self.RepaintLegend()
def HideWindow(self):
self.translucent_background.Hide()
self.Hide()
def RefreshLegend(self):
size = self.GetClientSize().Get()
if self.visualization is not N |
import random
def prio():
action_lst = []
lim | = 1000
for _ in range(lim):
k = rando | m.randint(1, 201)
action_lst.append(k)
return action_lst
|
__author__ = 'Tom'
import pickle
import urllib2
import os
import pymel.core as pm
import project_data as prj
reload(prj)
class updater():
def __init__(self):
self.master_url = 'https://raw.githubusercontent.com/tb-animator/tbtools/master/'
self.realPath = os.path.realpath(__file__)
self.basename = os.path.basename(__file__)
self.base_dir = os.path.normpath(os.path.dirname(__file__))
self.data_file = "prj_files.poo"
self.out_files = []
self.local_project_info = self.load_project_data_from_local()
self.version = pm.optionVar.get('tb_version', self.local_project_info.version )
self.project_info = self.load_project_data_from_git()
def check_version(self):
if self.project_info.version > self.version:
updaterWindow().showUI()
print "where's the window"
def get_url_dir(self, dir):
print "in", dir
print self.base_dir
out = dir.replace(self.base_dir,self.master_url).replace("\\","/")
return out
def load_project_data(self):
data = pickle.load(open(os.path.join(self.base_dir,self.data_file), "rb" ))
return data
def load_project_data_from_git(self):
url = self.master_url + self.data_file
print url
data = pickle.load(urllib2.urlopen(url, "rb"))
return data
def load_project_data_from_local(self):
file_location = os.path.join(self.base_dir+"\\",self.data_file)
print file_location
data = pickle.load(open(file_location, "rb"))
return data
def create_url(self, item):
url = (self.master_url + item).replace("\\","/")
return url
def read_from_url(self, url):
lines = []
data = urllib2.urlopen(url)
for line in data:
lines.append(line)
return lines
def copy_from_url(self, url, fileName):
if fileName:
dirName = os.path.split(fileName)[0]
if not os.path.isdir(dirName):
print "making folder", dirName
os.mkdir(dirName)
'''
# read the target script from git
01file_data = self.read_from_url(url)
print "downloading:: ", fileName
if file_data:
# nukes the current file
f = open(fileName,"w")
# writes into the file from the url
f.writelines(file_data)
f.close()
'''
print "dowloading file:", fileName
remote_file = urllib2.urlopen(url)
localFile = open(fileName, 'wb')
localFile.write(remote_file.read())
localFile.close()
else:
print "no fileName"
def download_project_files(self, win, *args):
files = self.project_info.scripts
print "downloading module to ", self.base_dir
for fileName in files:
# print self.base_dir, fileName
local_dir = '%s\%s' % (self.base_dir,fileName)
url = self.create_url(fileName)
# increment progress bar
win.step_bar()
# set current downloading label
| win.set_label(fileName)
try:
self.copy_from_url(url, local_dir)
except:
print "skipping", url
win.finish_bar()
pm.optionVar(floatValue=('tb_version', self.project_info.version) )
class updaterWindow():
def __init__(self):
self.project_data = updater().project_info
def set_label(self, text=""):
pm | .text(self.file_text, edit=True, label=text)
def step_bar(self,):
pm.progressBar(self.progress_bar, edit=True, step=1)
def finish_bar(self):
max_value = pm.progressBar(self.progress_bar, query=True, maxValue=True)
pm.progressBar(self.progress_bar, edit=True, maxValue=max_value)
pm.text(self.file_text, edit=True, label="Complete")
def showUI(self):
if pm.window("update", exists=True):
pm.deleteUI("update")
window = pm.window("update", title="tb tools update")
layout = pm.columnLayout(adjustableColumn=True )
pm.text(font="boldLabelFont",label="There's a new version")
pm.text(label=self.project_data.version)
pm.text(label="release notes")
pm.scrollField( editable=True, wordWrap=True, text=self.project_data.relaseNotes )
'''
for items in self.command_list:
self.command_widget(command_name=items, parent=layout)
'''
self.file_text = pm.text(label="")
self.progress_bar = pm.progressBar(maxValue=len(self.project_data.scripts)-1)
# pm.button( label='Delete all', parent=layout)
pm.button( label='Update',
command=lambda *args : updater().download_project_files(self),
parent=layout)
pm.button( label='Ignore this version', command=('cmds.deleteUI(\"' + window + '\", window=True)') , parent=layout)
pm.button( label='Close', command=('cmds.deleteUI(\"' + window + '\", window=True)') , parent=layout)
pm.setParent( '..' )
pm.showWindow(window)
def update_hotkeys():
try:
import tb_keyCommands as tb_hotKeys
reload(tb_hotKeys)
tb_hotKeys.hotkey_tool().update_commands()
except:
print "warning, hotkey update failed, please restart maya"
|
import pymongo
import getpass
import os
import base64
import ConfigParser
import sys
from database import *
from pymongo.errors import DuplicateKeyError
db, logger = None, None
def setup_config(args):
'''Saves MongoDB settings to a configuration file'''
config = ConfigParser.SafeConfigParser()
config.add_section('MongoDB')
print 'Please | enter the settings for your MongoDB server:'
config.set('MongoDB', 'host', args.host or raw_i | nput('Host [localhost]: ') or 'localhost')
config.set('MongoDB', 'port', args.port or raw_input('Port [27017]: ') or '27017')
config.set('MongoDB', 'database', args.database or raw_input('Database [cssscl]: ') or 'cssscl')
#config.set('MongoDB', 'username', args.username or raw_input('Username [none]: '))
#config.set('MongoDB', 'password', args.password or getpass.getpass('Password [none]: '))
# Writing our configuration file
with open(os.path.expanduser('~/.cssscl/cssscl.cfg'), 'wb') as configfile:
config.write(configfile)
def main(args):
'''Setup MongoDB for use by cssscl'''
global db, logger
logger = args.logging.getLogger(__name__)
# Setup config files
setup_config(args)
db = connect(args)
logger.info('Done!')
if __name__ == '__main__':
print 'This program should be run as part of the cssscl package:\n\t$ cssscl configure -h\n\tor\n\t$ /path/to/cssscl/bin/cssscl configure -h'
|
# Copyright 2011-2018 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later ve | rsion.
import os
from urllib.parse import quote_plus
from gi.repository import Gtk
import quodlibet
from quodlibet import _
from quodlibet import qltk
from quodlibet.formats import AudioFile
from quodlibet.pattern import Pattern
from quodlibet.plugins.songsmenu import So | ngsMenuPlugin
from quodlibet.qltk import Icons
from quodlibet.qltk.cbes import StandaloneEditor
from quodlibet.qltk.x import SeparatorMenuItem
from quodlibet.util import connect_obj, print_w, print_d
from quodlibet.util import website
from quodlibet.util.path import uri_is_valid
from quodlibet.util.tags import USER_TAGS, MACHINE_TAGS
class WebsiteSearch(SongsMenuPlugin):
"""Loads a browser with a URL designed to search on tags of the song.
This may include a standard web search engine, eg Google, or a more
specific site look-up. The URLs are customisable using tag patterns.
"""
PLUGIN_ICON = Icons.APPLICATION_INTERNET
PLUGIN_ID = "Website Search"
PLUGIN_NAME = _("Website Search")
PLUGIN_DESC_MARKUP = (_(
"Searches your choice of website using any song tags.\n"
"Supports patterns e.g. <tt>%(pattern-example)s</tt>.")
% {"pattern-example": "https://duckduckgo.com?q=<~artist~title>"}
)
# Here are some starters...
DEFAULT_URL_PATS = [
("Google song search",
"https://google.com/search?q=<artist~title>"),
("Wikipedia (en) artist entry",
"https://wikipedia.org/wiki/<albumartist|<albumartist>|<artist>>"),
("Musicbrainz album listing",
"https://musicbrainz.org/<musicbrainz_albumid|release/"
"<musicbrainz_albumid>|search?query=<album>&type=release>"),
("Discogs album search",
"https://www.discogs.com/search?type=release&artist="
"<albumartist|<albumartist>|<artist>>&title=<album>"),
("Youtube video search",
"https://www.youtube.com/results?search_query=<artist~title>"),
("Go to ~website", "<website>"),
]
PATTERNS_FILE = os.path.join(
quodlibet.get_user_dir(), 'lists', 'searchsites')
_no_launch = False
def __set_site(self, name):
self.chosen_site = name
def get_url_pattern(self, key):
"""Gets the pattern for a given key"""
return dict(self._url_pats).get(key, self.DEFAULT_URL_PATS[0][1])
@classmethod
def edit_patterns(cls, button):
def valid_uri(s):
# TODO: some pattern validation too (that isn't slow)
try:
p = Pattern(s)
return (p and uri_is_valid(s))
except ValueError:
return False
win = StandaloneEditor(filename=cls.PATTERNS_FILE,
title=_("Search URL patterns"),
initial=cls.DEFAULT_URL_PATS,
validator=valid_uri)
win.show()
@classmethod
def PluginPreferences(cls, parent):
hb = Gtk.HBox(spacing=3)
hb.set_border_width(0)
button = qltk.Button(_("Edit search URLs"), Icons.EDIT)
button.connect("clicked", cls.edit_patterns)
hb.pack_start(button, True, True, 0)
hb.show_all()
return hb
def _get_saved_searches(self):
filename = self.PATTERNS_FILE + ".saved"
self._url_pats = StandaloneEditor.load_values(filename)
# Failing all else...
if not len(self._url_pats):
print_d("No saved searches found in %s. Using defaults." %
filename)
self._url_pats = self.DEFAULT_URL_PATS
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.chosen_site = None
self._url_pats = []
submenu = Gtk.Menu()
self._get_saved_searches()
for name, url_pat in self._url_pats:
item = Gtk.MenuItem(label=name)
connect_obj(item, 'activate', self.__set_site, name)
submenu.append(item)
# Add link to editor
configure = Gtk.MenuItem(label=_(u"Configure Searches…"))
connect_obj(configure, 'activate', self.edit_patterns, configure)
submenu.append(SeparatorMenuItem())
submenu.append(configure)
if submenu.get_children():
self.set_submenu(submenu)
else:
self.set_sensitive(False)
def plugin_songs(self, songs):
# Check this is a launch, not a configure
if self.chosen_site:
url_pat = self.get_url_pattern(self.chosen_site)
pat = Pattern(url_pat)
# Remove Nones, and de-duplicate collection
urls = set(filter(None, (website_for(pat, s) for s in songs)))
if not urls:
print_w("Couldn't build URLs using \"%s\"."
"Check your pattern?" % url_pat)
return False
print_d("Got %d websites from %d songs" % (len(urls), len(songs)))
if not self._no_launch:
for url in urls:
website(url)
return True
def website_for(pat, song):
"""Gets a utf-8 encoded string for a website from the given pattern"""
# Generate a sanitised AudioFile; allow through most tags
subs = AudioFile()
# See issue 2762
for k in (USER_TAGS + MACHINE_TAGS + ['~filename']):
vals = song.comma(k)
if vals:
try:
# Escaping ~filename stops ~dirname ~basename etc working
# But not escaping means ? % & will cause problems.
# Who knows what user wants to do with /, seems better raw.
subs[k] = (vals if k in ['website', '~filename']
else quote_plus(vals))
except KeyError:
print_d("Problem with %s tag values: %r" % (k, vals))
return pat.format(subs) or None
|
# coding=utf-8
# Copyright 2017 Christopher Bartz <bartz@dkrz.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with th | e License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s | oftware
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .server import app
|
import django_filters
from django.contrib.auth import get_user_model
from .models import Sprint, Task
User = get_user_model()
class NullFilter(django_filters.BooleanFilter):
"""Filter on a field set as null or not."""
def filter(self, qs, value):
if va | lue is not None:
return qs.filter(**{'%s__isnull' % self.name: value})
return qs
class SprintFilter(django_filters.FilterSet):
end_min = django_filters.DateFilter(name='end', lookup_type='gte')
end_max = django_filters.DateFilter(name='end', lookup_type= | 'lte')
class Meta:
model = Sprint
fields = ('end_min', 'end_max', )
class TaskFilter(django_filters.FilterSet):
backlog = NullFilter(name='sprint')
class Meta:
model = Task
fields = ('sprint', 'status', 'assigned', 'backlog', )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters['assigned'].extra.update(
{'to_field_name': User.USERNAME_FIELD})
|
from lxml import etree
import sys
REQUEST_BODY_PART_1 = '<![CDATA[actualEvent='
REQUEST_BO | DY_PART_2 = '&queryEmail='
REQUEST_BODY_PART_3 = ']]>'
CONTENT_TYPE = 'Content-type: application/x-www-form-urlencoded'
| def usage():
print "python create_test_case [URL]"\
" [EVENT_NAME] [AMOUNT_CASES] [TEST_CASE_FILENAME]"
def main():
if len(sys.argv) != 5:
usage()
root = etree.Element('testcases')
url = sys.argv[1]
event = sys.argv[2]
amount_cases = int(sys.argv[3])
test_case_filename = sys.argv[4]
for case in range(1,amount_cases):
case_node = etree.Element('case')
etree.SubElement(case_node, 'url').text = url + "/query_guest"
etree.SubElement(case_node, 'method').text = 'POST'
body = REQUEST_BODY_PART_1 + event + REQUEST_BODY_PART_2 + "Email" + str(case) + REQUEST_BODY_PART_3
etree.SubElement(case_node, 'body').text = body
etree.SubElement(case_node, 'add_header').text = CONTENT_TYPE
root.append(case_node)
etree.ElementTree(root).write(test_case_filename,
pretty_print=True,
encoding='iso-8859-1')
# Line to indicate that this is the main
if __name__ == "__main__":
main()
|
from dja | ngo. | apps import AppConfig
class ReqsConfig(AppConfig):
name = 'reqs'
|
import sys
import os
import importlib
import glob
# Imoprt and instantiate each Cmd object.
_this_dir = os.path.dirname(__file__)
_this_mod = os.path.basename(_this_dir)
|
def build_cmds(sub_parser):
cmd_objs = {}
imlist = glob.glob(os.path.join(_this_dir, "*.py"))
imlist.remove(os.path.join(_this_dir, "__init__.py"))
imlist.remove(os.path.join(_this_dir, "base.py"))
imlist = [os.path.basename(x) for x in imlist]
imlist = [os.path.splitext(x)[0] for x in imlist]
for im in imlist:
# print(im)
mod = importlib.import_module("pcm." + _this_mod + '.' + im)
if hasattr(mod, 'Cmd'):
# | print("Found Command: ", mod.Cmd.name)
cmd_objs[mod.Cmd.name] = mod.Cmd(sub_parser)
cmd_objs[mod.Cmd.name].build()
# end for im in imlist
# print(cmd_objs)
return cmd_objs
#build_cmds()
|
""
if protocol is None:
protocol = ssl.PROTOCOL_SSLv23
ctx = _SSLContext(protocol)
ctx.load_cert_chain(cert_file, pkey_file)
return ctx
class _SSLContext(object):
'''A dummy class with a small subset of Python3's ``ssl.SSLContext``, only
intended to be used with and by Werkzeug.'''
def __init__(self, protocol):
self._protocol = protocol
self._certfile = None
self._keyfile = None
self._password = None
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._certfile = certfile
self._keyfile = keyfile or certfile
self._password = password
def wrap_socket(self, sock, **kwargs):
return ssl.wrap_socket(sock, keyfile=self._keyfile,
certfile=self._certfile,
ssl_version=self._protocol, **kwargs)
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
exc_types = (ssl.SSLError,)
try:
from OpenSSL.SSL import Error
exc_types += (Error,)
except ImportError:
pass
if error is None:
error = sys.exc_info()[1]
return isinstance(error, exc_types)
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
# try:
# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
# socket.SOCK_STREAM, 0,
# socket.AI_PASSIVE)
# if info:
# return info[0][0]
# except socket.gaierror:
# pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = 128
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
self.host = host
self.port = port
if ssl_context is not None:
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = ssl_context.wrap_socket(self.socket,
server_side=True)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = self.host != '*' and self.host or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
quit_msg = '(Press CTRL+C to quit)'
_log('info', ' * Running on %s://%s:%d/ %s', self.ssl_context is None
and 'http' or 'https', display_hostname, self.port, quit_msg)
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
finally:
self.server_close()
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def is_running_from_reloader():
"""Checks if the applicat | ion is running from within the Werkzeug
reloader subprocess.
.. versionadded:: 0.10
"""
return os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1,
reload | er_type='auto', threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start a WSGI application. Optional features include a reloader,
multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
.. versionadded:: 0.10
Improved the reloader and added support for changing the backend
through the `reloader_type` parameter. See :ref:`reloader`
for more information.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of c |
r),
KeyValue(key = 'Output', value = stdout) ]
return diag_vals, diag_msgs, diag_level
tmp = stdout.strip()
if unicode(tmp).isnumeric():
temp = float(tmp) / 1000
diag_vals.append(KeyValue(key = 'Core %d Temp' % index, value = str(temp)))
if temp >= warm_threshold and temp < hot_threshold:
diag_level = max(diag_level, DiagnosticStatus.WARN)
diag_msgs.append('Warm')
if temp >= hot_threshold:
diag_level = max(diag_level, DiagnosticStatus.ERROR)
diag_msgs.append('Hot')
else:
diag_level = max(diag_level, DiagnosticStatus.ERROR) # Error if not numeric value
diag_vals.append(KeyValue(key = 'Core %s Temp' % index, value = tmp))
return diag_vals, diag_msgs, diag_level
## Checks clock speed from reading from CPU info
def check_clock_speed(enforce_speed):
vals = []
msgs = []
lvl = DiagnosticStatus.OK
try:
p = subprocess.Popen('cat /proc/cpuinfo | grep MHz',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
lvl = DiagnosticStatus.ERROR
msgs = [ 'Clock speed error' ]
vals = [ KeyValue(key = 'Clock speed error', value = stderr),
KeyValue(key = 'Output', value = stdout) ]
return (vals, msgs, lvl)
for index, ln in enumerate(stdout.split('\n')):
words = ln.split(':')
if len(words) < 2:
continue
speed = words[1].strip().split('.')[0] # Conversion to float doesn't work with decimal
vals.append(KeyValue(key = 'Core %d MHz' % index, value = speed))
if unicode(speed).isnumeric():
mhz = float(speed)
if mhz < 2240 and mhz > 2150:
lvl = max(lvl, DiagnosticStatus.WARN)
if mhz <= 2150:
lvl = max(lvl, DiagnosticStatus.ERROR)
else:
# Automatically give error if speed isn't a number
lvl = max(lvl, DiagnosticStatus.ERROR)
if not enforce_speed:
lvl = DiagnosticStatus.OK
if lvl == DiagnosticStatus.WARN and enforce_speed:
msgs = [ 'Core slowing' ]
elif lvl == DiagnosticStatus.ERROR and enforce_speed:
msgs = [ 'Core throttled' ]
except Exception, e:
rospy.logerr(traceback.format_exc())
lvl = DiagnosticStatus.ERROR
msgs.append('Exception')
vals.append(KeyValue(key = 'Exception', value = traceback.format_exc()))
return vals, msgs, lvl
# Add msgs output, too
##\brief Uses 'uptime' to see load average
def check_uptime(load1_threshold, load5_threshold):
level = DiagnosticStatus.OK
vals = []
load_dict = { 0: 'OK', 1: 'High Load', 2: 'Very High Load' }
try:
p = subprocess.Popen('uptime', stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
vals.append(KeyValue(key = 'uptime Failed', value = stderr))
return DiagnosticStatus.ERROR, vals
upvals = stdout.split()
load1 = upvals[-3].rstrip(',')
load1 = load1.replace(',','.') #Correct locale for conversion
load5 = upvals[-2].rstrip(',')
load5 = load5.replace(',','.') #Correct locale for conversion
load15 = upvals[-1]
num_users = upvals[-7]
# Give warning if we go over load limit
if float(load1) > load1_threshold or float(load5) > load5_threshold:
level = DiagnosticStatus.WARN
vals.append(KeyValue(key = 'Load Average Status', value = load_dict[level]))
vals.append(KeyValue(key = '1 min Load Average', value = load1))
vals.append(KeyValue(key = '1 min Load Average Threshold', value = str(load1_threshold)))
vals.append(KeyValue(key = '5 min Load Average', value = load5))
vals.append(KeyValue(key = '5 min Load Average Threshold', value = str(load5_threshold)))
vals.append(KeyValue(key = '15 min Load Average', value = load15))
vals.append(KeyValue(key = 'Number of Users', value = num_users))
except Exception, e:
rospy.logerr(traceback.format_exc())
level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = 'Load Average Status', value = traceba | ck.format_exc()))
return level, load_dict[level], vals
# Add msgs output
##\brief Uses 'free -m' to check free memory
def check_memory():
values = []
level = DiagnosticStatus.OK
msg = ''
mem_dict = { 0: 'OK', 1: 'Low Memory', 2: 'Very Low Memory' }
try:
p = subprocess.Popen('free -m',
| stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
values.append(KeyValue(key = "\"free -m\" Call Error", value = str(retcode)))
return DiagnosticStatus.ERROR, values
rows = stdout.split('\n')
data = rows[1].split()
total_mem = data[1]
used_mem = data[2]
free_mem = data[3]
level = DiagnosticStatus.OK
if float(free_mem) < 25:
level = DiagnosticStatus.WARN
if float(free_mem) < 1:
level = DiagnosticStatus.ERROR
values.append(KeyValue(key = 'Memory Status', value = mem_dict[level]))
values.append(KeyValue(key = 'Total Memory', value = total_mem))
values.append(KeyValue(key = 'Used Memory', value = used_mem))
values.append(KeyValue(key = 'Free Memory', value = free_mem))
msg = mem_dict[level]
except Exception, e:
rospy.logerr(traceback.format_exc())
msg = 'Memory Usage Check Error'
values.append(KeyValue(key = msg, value = str(e)))
level = DiagnosticStatus.ERROR
return level, mem_dict[level], values
##\brief Use mpstat to find CPU usage
##
usage_old = 0
has_warned_mpstat = False
has_error_core_count = False
def check_mpstat(core_count = -1):
vals = []
mp_level = DiagnosticStatus.OK
load_dict = { 0: 'OK', 1: 'High Load', 2: 'Error' }
try:
p = subprocess.Popen('mpstat -P ALL 1 1',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
global has_warned_mpstat
if not has_warned_mpstat:
rospy.logerr("mpstat failed to run for cpu_monitor. Return code %d.", retcode)
has_warned_mpstat = True
mp_level = DiagnosticStatus.ERROR
vals.append(KeyValue(key = '\"mpstat\" Call Error', value = str(retcode)))
return mp_level, 'Unable to Check CPU Usage', vals
# Check which column '%idle' is, #4539
# mpstat output changed between 8.06 and 8.1
rows = stdout.split('\n')
col_names = rows[2].split()
idle_col = -1 if (len(col_names) > 2 and col_names[-1] == '%idle') else -2
num_cores = 0
cores_loaded = 0
for index, row in enumerate(stdout.split('\n')):
if index < 3:
continue
# Skip row containing 'all' data
if row.find('all') > -1:
continue
lst = row.split()
if len(lst) < 8:
continue
## Ignore 'Average: ...' data
if lst[0].startswith('Average'):
continue
cpu_name = '%d' % (num_cores)
idle = lst[idle_col].replace(',', '.')
user = lst[3].replace(',', '.')
nice = lst[4].replace(',', '.')
system = lst[5].replace(',', '.')
core_level = 0
|
des the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing
whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not iterable_as_array
and not bigint_as_string | and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_stri | ng_bitcount is None
and not kw
):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
iterable_as_array=iterable_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
from . import decoder as dec
from . import encoder as enc
from . import scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
|
ware: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along with this project. If not, see
# <http://www.gnu.org/licenses/>.
import os
import re
from contextlib import contextmanager
from unittest import mock
from celery import task
from pyvirtualdisplay import Display
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import TestCase as DjangoTestCase
VIRTUAL_DISPLAY = os.environ.get('VIRTUAL_DISPLAY', 'y').lower().strip() == 'y'
class HomepageTestCaseMixin(object):
def assertIsTask(self, t, expected):
self.assertEqual(t, task(expected))
def assertTaskCount(self, mocked, count):
"""Assert that `count` Celery tasks have been called."""
self.assertEqual(mocked.call_count, count)
def assertNoTasks(self, mocked):
self.assertTaskCount(mocked, 0)
def assertTaskCall(self, mocked, task, *args, **kwargs):
self.assertTrue(mocked.called)
a, k = mocked.call_args
self.assertEqual(k, {}) # apply_async receives task args/kwargs as tuple/dict arg
instance, called_args, called_kwargs = a
self.assertIsTask(instance, task)
self.assertEqual(args, called_args)
self.assertEqual(kwargs, called_kwargs)
@contextmanager
def mock_celery(self):
def run(self, args, kwargs):
return self.run(*args, **kwargs)
with mock.patch('celery.app.task.Task.apply_async', side_effect=run, autospec=True) as mocked:
yield mocked
class SeleniumMixin(object):
@classmethod
def setUpClass(cls):
super().setUpClass()
if VIRTUAL_DISPLAY:
cls.vdisplay = Display(visible=0, size=(1024, 768))
cls.vdisplay.start()
cls.selenium = WebDriver(executable_path=settings.GECKODRIVER_PATH)
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
if VIRTUAL_DISPLAY:
cls.vdisplay.stop()
super().tearDownClass()
class wait_for_css_property(object):
def __init__(self, elem, prop, value):
self.elem = elem
self.prop = prop
self.value = value
def __call__(self, driver):
if self.elem.value_of_css_property(self.prop) == self.value:
return self.elem
else:
return False
def wait_for_display(self, elem, wait=2):
WebDriverWait(self.selenium, wait).until(lambda d: elem.is_displayed())
def wait_for_page_load(self, wait=2):
WebDriverWait(self.selenium, wait).until(lambda driver: driver.find_element_by_tag_name('body'))
def wait_for_valid_form(self, form=None, wait=2):
"""Wait until a form becomes valid according to HTML5 form validation.
The registration form becomes valid only after a split second, for some reason.
"""
if form is None:
form = self.find('form')
WebDriverWait(self.selenium, wait).until(
lambda driver: self.selenium.execute_script('return arguments[0].checkValidity() === true', form))
def wait_for_focus(self, elem):
# when an element gets focus, it turns blue:
wait = WebDriverWait(self.selenium, 10)
wait.until(self.wait_for_css_property(elem, 'border-top-color', 'rgb(128, 189, 255)'))
def wait_for_invalid(self, elem):
wait = WebDriverWait(self.selenium, 10)
wait.until(self.wait_for_css_property(elem, 'border-top-color', 'rgb(220, 53, 69)'))
def wait_for_valid(self, elem):
wait = WebDriverWait(self.selenium, 10)
wait.until(self.wait_for_css_property(elem, 'border-top-color', 'rgb(40, 167, 69)'))
def find(self, selector):
"""Find an element by CSS selector."""
return self.selenium.find_element_by_css_selector(selector)
def get_classes(self, elem):
"""Get CSS classes from the passed Element."""
return re.split(r'\s+', elem.get_attribute('class').strip())
def get_validity(self, elem):
"""Get validity object from a HTML5 form field."""
return self.selenium.execute_script('return arguments[0].validity', elem)
def get_valid(self, elem):
val = self.get_validity(elem)
return val['valid']
def assertNoElementExists(self, selector, wait=0):
"""Assert that no element with the passed selector is present on the page."""
if wait:
with self.assertRaises(TimeoutException):
WebDriverWait(self.selenium, wait).until(lambda d: self.find(selector))
else:
with self.assertRaises(NoSuchElementException):
self.find(selector)
def assertDisplayed(self, elem):
if isinstance(elem, str):
elem = self.find(elem)
self.assertTrue(elem.is_displayed())
def assertNotDisplayed(self, elem):
if isinstance(elem, str):
elem = self.find(elem)
self.assertFalse(elem.is_displayed())
def assertClass(self, elem, cls):
"""Assert that an element has a CSS class."""
self.assertIn(cls, self.get_classes(elem))
def assertNotClass(self, elem, cls):
"""Assert that an element does **not** have a CSS class."""
self.assertNotIn(cls, self.get_classes(elem))
def assertCSSBorderColor(self, elem, color):
"""Assert that an element has a given border color."""
self.assertEqual(elem.value_of_css_property('border-right-color'), color)
self.assertEqual(elem.value_of_css_property('border-left-color'), color)
self.assertEqual(elem.value_of_css_property('border-top-color'), color)
self.assertEqual(elem.value_of_css_property('border-bottom-color'), color)
def assertNotValidated(self, fg, elem):
"""Assert that a Bootstrap input element is not validated."""
self.assertNotClass(fg, 'was-validated')
for feedback in fg.find_elements_by_css_selector('.invalid-feedback'):
self.assertFalse(feedback.is_displayed())
if self.selenium.switch_to.active_element != elem: # passed element is not currently active
self.assertCSSBorderColor(elem, 'rgb(206, 212, 218)')
else:
self.assertCSSBorderColor(elem, 'rgb(128, 189, 255)')
def assertInvalid(self, fg, elem, *errors):
"""Assert that a Bootstrap input element validates as invalid."""
self.asser | tClass(fg, 'was-validated')
e | rrors = set(['invalid-%s' % e for e in errors])
for feedback in fg.find_elements_by_css_selector('.invalid-feedback'):
classes = set(self.get_classes(feedback))
if errors & classes:
self.assertTrue(feedback.is_displayed(), '.%s is not displayed' % ('.'.join(classes)))
else:
self.assertFalse(feedback.is_displayed(), '.%s is displayed' % ('.'.join(classes)))
self.wait_for_invalid(elem)
self.assertFalse(self.get_valid(elem))
def assertValid(self, fg, elem):
"""Assert that a Bootstrap input element validates as valid."""
self.assertClass(fg, 'was-validated')
for feedback in fg.find_elements_by_css_selector('.invalid-feedback'):
self.assertFalse(feedback.is_displayed())
self.wait_for_valid(elem)
self.assertTrue(self.get_valid(elem))
class TestCase(HomepageTestCaseMixin, DjangoTestCase):
pass
class SeleniumTestCase(Seleniu |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrated.v3 import test_servers
class LockServerSamplesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-lock-server"
def setUp(self):
"""setUp Method for LockServer api samples extension
This method creates the server that will be used in each tests
"""
super(LockServerSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
def test_post_lock_server(self):
# Get api samples to lock server request.
| response = self._do_post('servers/%s/action' % self.uuid,
'lock-server', {})
self.assertEqual(response.status, 202)
def test_post_unlock_server(self):
# Get api | samples to unlock server request.
self.test_post_lock_server()
response = self._do_post('servers/%s/action' % self.uuid,
'unlock-server', {})
self.assertEqual(response.status, 202)
|
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LI | ABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Python Cryptography Toolkit
A collection of cryptographic modules implementing various algorithms
and protocols.
Subpackages:
Crypto.Cipher
Secret- | key (AES, TDES, Salsa20, ChaCha20, CAST, Blowfish, ARC4) and public-key encryption (RSA PKCS#1) algorithms
Crypto.Hash
Hashing algorithms (SHA-1, SHA-2, SHA-3, BLAKE2, HMAC, MD5)
Crypto.IO
Encodings useful for cryptographic data (PEM, PKCS#8)
Crypto.Protocol
Cryptographic protocols (key derivation functions, Shamir's Secret Sharing scheme)
Crypto.PublicKey
Public-key generation, import, export (RSA, DSA, ECC)
Crypto.Signature
Public-key signature algorithms (RSA PKCS#1, DSA, ECDSA)
Crypto.Util
Various useful modules and functions (padding, ASN.1, XOR)
"""
__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util', 'Signature',
'IO', 'Math']
version_info = (3, 4, 5)
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import _, api, fields, models
class Job(models.Model):
_inherit = "hr.job"
_name = "hr.job"
_inherits = {'mail.alias': 'alias_id'}
@api.model
def _default_address_id(self):
return self.env.user.company_id.partner_id
address_id = fields.Many2one(
'res.partner', "Job Location", default=_default_address_id,
help="Address where employees are working")
application_ids = fields.One2many('hr.applicant', 'job_id', "Applications")
application_count = fields.Integer(compute='_compute_application_count', string="Applications")
manager_id = fields.Many2one(
'hr.employee', related='department_id.manager_id', string="Department Manager",
readonly=True, store=True)
user_id = fields.Many2one('res.users', "Recruitment Responsible", track_visibility='onchange')
stage_ids = fields.Many | 2many(
'hr.recruitment.stage', 'job_stage_rel', 'job_id', 'stage_id',
'Job Stages',
default=[(0, 0, {'name': _('New')})])
document_ids = fields.One2many('ir.attachment', compute='_compute_document_ids', string="Applications")
documents_count = fields.Integer(compute='_compute_document_ids', string="Documents")
survey_id = fields.Many2one(
'survey.survey', "Interview Form",
help="Choose an interview form f | or this job position and you will be able to print/answer this interview from all applicants who apply for this job")
alias_id = fields.Many2one(
'mail.alias', "Alias", ondelete="restrict", required=True,
help="Email alias for this job position. New emails will automatically create new applicants for this job position.")
color = fields.Integer("Color Index")
def _compute_document_ids(self):
applicants = self.mapped('application_ids').filtered(lambda self: not self.emp_id)
app_to_job = dict((applicant.id, applicant.job_id.id) for applicant in applicants)
attachments = self.env['ir.attachment'].search([
'|',
'&', ('res_model', '=', 'hr.job'), ('res_id', 'in', self.ids),
'&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', applicants.ids)])
result = dict.fromkeys(self.ids, self.env['ir.attachment'])
for attachment in attachments:
if attachment.res_model == 'hr.applicant':
result[app_to_job[attachment.res_id]] |= attachment
else:
result[attachment.res_id] |= attachment
for job in self:
job.document_ids = result[job.id]
job.documents_count = len(job.document_ids)
@api.multi
def _compute_application_count(self):
read_group_result = self.env['hr.applicant'].read_group([('job_id', '=', self.id)], ['job_id'], ['job_id'])
result = dict((data['job_id'][0], data['job_id_count']) for data in read_group_result)
for job in self:
job.application_count = result.get(job.id, 0)
@api.model
def create(self, vals):
job = super(Job, self.with_context(alias_model_name='hr.applicant',
mail_create_nolog=True,
alias_parent_model_name=self._name)).create(vals)
job.alias_id.write({'alias_parent_thread_id': job.id, "alias_defaults": {'job_id': job.id}})
return job
@api.multi
def unlink(self):
# Cascade-delete mail aliases as well, as they should not exist without the job position.
aliases = self.mapped('alias_id')
res = super(Job, self).unlink()
aliases.unlink()
return res
def _auto_init(self, cr, context=None):
"""Installation hook to create aliases for all jobs and avoid constraint errors."""
return self.pool.get('mail.alias').migrate_to_alias(
cr, self._name, self._table, super(Job, self)._auto_init,
'hr.applicant', self._columns['alias_id'], 'name',
alias_prefix='job+', alias_defaults={'job_id': 'id'}, context=context)
@api.multi
def _track_subtype(self, init_values):
if 'state' in init_values and self.state == 'open':
return 'hr_recruitment.mt_job_new'
return super(Job, self)._track_subtype(init_values)
@api.multi
def action_print_survey(self):
return self.survey_id.action_print_survey()
@api.multi
def action_get_attachment_tree_view(self):
action = self.env.ref('base.action_attachment').read()[0]
action['context'] = {
'default_res_model': self._name,
'default_res_id': self.ids[0]
}
action['domain'] = ['|', '&', ('res_model', '=', 'hr.job'), ('res_id', 'in', self.ids), '&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', self.mapped('application_ids').ids)]
return action
@api.multi
def action_set_no_of_recruitment(self, value):
return self.write({'no_of_recruitment': value})
|
"""
Wire-level packet handling
"""
from mysqlproxy.types import FixedLengthInteger, \
FixedLengthString, LengthEncodedInteger, \
RestOfPacketString
from mysqlproxy import capabilities
from StringIO import StringIO
__all__ = [
'PacketMeta', 'IncomingPacketChain', 'OutgoingPacketChain',
'Packet', 'OKPacket', 'ERRPacket', 'EOFPacket'
]
class PacketMeta(object):
"""
Useful packet metadata for chains
"""
def __init__(self, length, seq_id):
self.length = length
self.seq_id = seq_id
class IncomingPacketChain(object):
"""
List of packets containing one payload
"""
def __init__(self):
self.packet_meta = []
self.seq_id = 0
self.payload = None
def read_in(self, fde):
"""
Read in full payload
"""
total_read = 0
packet_length = FixedLengthInteger(3, 0xffffff)
seq_id = FixedLengthInteger(1)
self.payload = StringIO()
while packet_length.val == 0xffffff:
packet_length.read_in(fde, label=None)
seq_id.read_in(fde, label=None)
cur_payload = FixedLengthString(packet_length.val)
cur_payload.read_in(fde, label=None)
self.payload.write(cur_payload.val)
self.packet_meta.append(PacketMeta(packet_length.val, seq_id.val))
total_read += packet_length.val
self.seq_id = seq_id.val
self.payload.seek(0)
return total_read
@property
def chain_length(self):
"""
Amount of packets needed to be read to retrieve
the entire payload
"""
return len(self.packet_meta)
@property
def total_length(self):
"""
Total payload length
"""
return sum([x.length for x in self.packet_meta])
class OutgoingPacketChain(object):
def __init__(self, start_seq_id=0):
self.fields = []
self.start_seq_id = start_seq_id
def add_field(self, field, label='<unlabeled>'):
"""
Add field to payload
"""
self.fields.append((label, field))
def _write_packet_header(self, length, seq, fde):
"""
Write out packet header with given length
and sequence id to file-like fde
"""
length_field = FixedLengthInteger(3, length)
seq_field = FixedLengthInteger(1, seq)
length_field.write_out(fde, label=None)
seq_field.write_out(fde, label=None)
def write_out(self, fde):
"""
Write out full packet chain
"""
# TODO: impl is just outright terrible.
# Fix it in any way shape or form i don't care
sio = StringIO()
seq_id = self.start_seq_id
net_total_written = 0
total_written = 0
last_total_written = 0xffffff
for label, field in self.fields:
written = field.write_out(sio, label='\t%s' % label)
total_written += written
net_total_written += written
if total_written >= 0xffffff:
self._write_packet_header(0xffffff, seq_id, fde)
fde.write(sio.read(0xffffff))
remaining_bytes = sio.read()
sio.close()
sio = StringIO(remaining_bytes)
last_total_written = total_written
total_written -= 0xffffff
seq_id += 1
if last_total_written == 0xffffff:
self._write_packet_header(total_written, seq_id, fde)
sio.seek(0)
fde.write(sio.read(total_written))
net_total_written += total_written
return (net_total_written, seq_id)
class Packet(object):
"""
Interface class for extracting fields expected out of a single packet
or writing them out in order.
"""
def __init__(self, capabilities, **kwargs):
self.capabilities = capabilities
self.fields = []
self.seq_id = kwargs.pop('seq_id', 0)
def read_in(self, fde):
"""
Generic read-in of all fields
"""
ipc = IncomingPacketChain()
ipc.read_in(fde)
self.seq_id = ipc.seq_id
return self.read_in_internal(ipc.payload, ipc.total_length)
def read_in_internal(self, pl_fd, packet_size):
"""
This is what you actually want to extend to
do custom payload reading
"""
read_length = 0
for label, field in self.fields:
read_length += field.read_in(pl_fd, label='\t%s' % label)
return read_length
def write_out(self, fde):
"""
Generic write-out of all fields
"""
opc = OutgoingPacketChain(start_seq_id=self.seq_id)
for label, field in self.fields:
opc.add_field(field, label=label)
return opc.write_out(fde)
def get_field(self, field_of_interest):
"""
Return first field going by name `field_of_interest`
"""
for field_name, field in self.fields:
if field_name == field_of_interest:
return field
raise ValueError('field name %s does not exist' % field_of_interest)
class OKPacket(Packet):
"""
Generic OK packet, will most likely not be read in
"""
def __init__(self, capability_flags, affected_rows, last_insert_id, **kwargs):
super(OKPacket, self).__init__(capability_flags, **kwargs)
self.affected_rows = affected_rows
self.last_insert_id = last_insert_id
use_41 = capability_flags & capabilities.PROTOCOL_41
transactions = capability_flags & capabilities.TRANSACTIONS
if use_41 or transactions:
self.status_flags = kwargs.pop('status_flags', 0)
self.warnings = kwargs.pop('warnings', 0)
self.fields = [
('ok_header', FixedLengthInteger(1, 0)), # OK header
('affected_rows', LengthEncodedInteger(affected_rows)),
('last_insert_id', LengthEncodedInteger(last_insert_id))
]
if use_41:
self.fields += [
('status_flags', FixedLengthInteger(2, self.status_flags)),
('warnings', FixedLength | Integer(2, self.warnings))
]
eli | f transactions:
self.fields.append(('status_flags', FixedLengthInteger(2, self.status_flags)))
ok_message = kwargs.pop('info', 'k thanks')
self.fields.append(('ok_message', RestOfPacketString(ok_message)))
class ERRPacket(Packet):
"""
Error packet
"""
def __init__(self, capability_flags, error_code, error_msg, **kwargs):
super(ERRPacket, self).__init__(capability_flags, **kwargs)
self.error_code = error_code
self.error_msg = error_msg
self.fields = [
('err_header', FixedLengthInteger(1, 0xff)), # ERR header
('error_code', FixedLengthInteger(2, error_code))
]
if capability_flags & capabilities.PROTOCOL_41:
self.fields += [
('sql_state_flag', FixedLengthString(1, '#')),
('sql_state', FixedLengthString(5, kwargs.pop('sql_state', 'HY000')))
]
self.fields.append(('error_msg', RestOfPacketString(self.error_msg)))
class EOFPacket(Packet):
"""
EOF Packet
"""
def __init__(self, capability_flags, **kwargs):
super(EOFPacket, self).__init__(capability_flags, **kwargs)
self.fields = [
('eof_header', FixedLengthInteger(1, 0xfe)) # EOF header
]
if capability_flags & capabilities.PROTOCOL_41:
self.fields += [
('warnings', FixedLengthInteger(2, kwargs.pop('warnings', 0))),
('status_flags', FixedLengthInteger(2, kwargs.pop('status_flags', 0)))
]
|
import time
import requests
from .exceptions import HttpError
from .json import json_loads
def check_rep(rep):
if (rep.status_code // 100) != 2:
raise HttpError(rep.text, rep.status_code)
def rep_to_json(rep):
check_rep(rep)
# we use our json loads for date parsing
return json_loads(rep.text)
class RestClient:
MAX_ITERATIONS = 100
def __init__(
self,
url,
login,
password,
verify_ssl=True
):
self.base_url = url.strip("/")
self.session = requests.Session()
self.session.auth = (login, password)
self.verify_ssl = verify_ssl
def list(self, path, params=None):
rep = self.session.get(
f"{self.base_url}/{path}/",
params=params,
verify=self.verify_ssl)
return rep_to_json(rep)
def retrieve(self, path, resource_id):
rep = self.session.get(
f"{self.base_url}/{path}/{resource_id}/",
verify=self.verify_ssl)
return rep_to_json(rep)
def create(self, path, data):
rep = self.session.post(
f"{self.base_url}/{path}/",
json=data,
verify=self.verify_ssl)
return rep_to_json(rep)
def partial_update(self, path, resource_id, data):
rep = self.session.patch(
f"{self.base_url}/{path}/{resource_id}/",
json=data,
verify=self.verify_ssl)
return rep_to_json(rep)
def update(self, path, resource_id, data):
rep = self.session.put(
f"{self.base_url}/{path}/{resource_id}/",
json=data,
verify=self.verify_ssl)
return rep_to_json(rep)
def detail_action(
self,
path,
resource_id,
http_method,
action_name,
params=None,
data=None,
return_json=True,
send_jso | n=True):
rep = getattr(self.session, http_method.lower())(
f"{self.base_url}/{path}/{resource_id}/{action_name}/",
para | ms=params,
json=data if send_json else None,
data=None if send_json else data,
verify=self.verify_ssl
)
if rep.status_code == 204:
return
if return_json:
return rep_to_json(rep)
check_rep(rep)
return rep.content
def list_action(
self,
path,
http_method,
action_name,
params=None,
data=None,
return_json=True,
send_json=True):
rep = getattr(self.session, http_method.lower())(
f"{self.base_url}/{path}/{action_name}/",
params=params,
json=data if send_json else None,
data=None if send_json else data,
verify=self.verify_ssl
)
if rep.status_code == 204:
return
if return_json:
return rep_to_json(rep)
check_rep(rep)
return rep.content
def destroy(self, path, resource_id, params=None):
rep = self.session.delete(
f"{self.base_url}/{path}/{resource_id}/",
params=params,
verify=self.verify_ssl)
if rep.status_code == 204:
return
return rep_to_json(rep)
def wait_for_on(self, timeout=10, freq=1):
start = time.time()
if timeout <= 0:
raise ValueError
while True:
if (time.time() - start) > timeout:
raise TimeoutError
try:
rep = self.session.get(
f"{self.base_url}/oteams/projects/",
params=dict(empty=True),
verify=self.verify_ssl)
if rep.status_code == 503:
raise TimeoutError
break
except (requests.exceptions.ConnectionError, TimeoutError):
pass
time.sleep(freq)
|
'
Walk the new directories list and make sure that permissions are as we would expect
'''
if new_directory_list:
working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
directory_args['path'] = working_dir
changed = module.set_fs_attributes_if_different(directory_args, changed)
changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
return changed
class Artifact(object):
def __init__(self, group_id, artifact_id, version, classifier='', extension='jar'):
if not group_id:
raise ValueError("group_id must be set")
if not artifact_id:
raise ValueError("artifact_id must be set")
self.group_id = group_id
self.artifact_id = artifact_id
self.version = version
self.classifier = classifier
if not extension:
self.extension = "jar"
else:
self.extension = extension
def is_snapshot(self):
return self.version and self.version.endswith("SNAPSHOT")
def path(self, with_version=True):
base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
if with_version and self.version:
base = posixpath.join(base, self.version)
return base
def _generate_filename(self):
filename = self.artifact_id + "-" + self.classifier + "." + self.extension
if not self.classifier:
filename = self.artifact_id + "." + self.extension
return filename
def get_filename(self, filename=None):
if not filename:
filename = self._generate_filename()
elif os.path.isdir(filename):
filename = os.path.join(filename, self._generate_filename())
return filename
def __str__(self):
result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
if self.classifier:
result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
elif self.extension != "jar":
result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
return result
@staticmethod
def parse(input):
parts = input.split(":")
if len(parts) >= 3:
g = parts[0]
a = parts[1]
v = parts[len(parts) - 1]
t = None
c = None
if len(parts) == 4:
t = parts[2]
if len(parts) == 5:
t = parts[2]
c = parts[3]
return Artifact(g, a, v, c, t)
else:
return None
class MavenDownloader:
def __init__(self, module, base="http://repo1.maven.org/maven2"):
self.module = module
if base.endswith("/"):
base = base.rstrip("/")
self.base = base
self.user_agent = "Maven Artifact Downloader/1.0"
self.latest_version_found = None
def find_latest_version_available(self, artifact):
if self.latest_version_found:
return self.latest_version_found
path = "/%s/maven-metadata.xml" % (artifact.path(False))
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse)
v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
if v:
self.latest_version_found = v[0]
return v[0]
def find_uri_for_artifact(self, artifact):
if artifact.version == "latest":
artifact.version = self.find_latest_version_available(artifact)
if artifact.is_snapshot():
path = "/%s/maven-metadata.xml" % (artifact.path())
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse)
timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
classifier = snapshotArtifact.xpath("classifier/text()")
artifact_classifier = classifier[0] if classifier else ''
extension = snapshotArtifact.xpath("extension/text()")
arti | fact_extension = extension[0] if extension else ''
if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension:
return self._uri_for_artifact(artifact, snap | shotArtifact.xpath("value/text()")[0])
return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber))
return self._uri_for_artifact(artifact, artifact.version)
def _uri_for_artifact(self, artifact, version=None):
if artifact.is_snapshot() and not version:
raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
elif not artifact.is_snapshot():
version = artifact.version
if artifact.classifier:
return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
def _request(self, url, failmsg, f):
url_to_use = url
parsed_url = urlparse(url)
if parsed_url.scheme=='s3':
parsed_url = urlparse(url)
bucket_name = parsed_url.netloc
key_name = parsed_url.path[1:]
client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10)
req_timeout = self.module.params.get('timeout')
# Hack to add parameters in the way that fetch_url expects
self.module.params['url_username'] = self.module.params.get('username', '')
self.module.params['url_password'] = self.module.params.get('password', '')
self.module.params['http_agent'] = self.module.params.get('user_agent', None)
response, info = fetch_url(self.module, url_to_use, timeout=req_timeout)
if info['status'] != 200:
raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
else:
return f(response)
def download(self, artifact, filename=None):
filename = artifact.get_filename(filename)
if not artifact.version or artifact.version == "latest":
artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact),
artifact.classifier, artifact.extension)
url = self.find_uri_for_artifact(artifact)
result = True
if not self.verify_md5(filename, url + ".md5"):
response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r)
if response:
f = open(filename, 'w')
# f.write(response.read())
self._write_chunks(response, f, report_hook=self.chunk_report)
f.close()
else:
result = False
return result
def chunk_report(self, bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent * 100, 2)
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
if bytes_so_far >= total_size:
sys.stdout.write('\n')
def _write_chunks(self, response, file, chunk_size=8192, report_hook=None):
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not |
from sqlalchemy import Column, Integer, String, Text, Boolean, ForeignKey
from sqlalchemy.orm import relationship
from database.main import Base
class CreatureTemplateSchema(Base):
"""
This table holds the information about each creature in the game
entry - the unique ID of this creature
creature_name - the name of this creature
type - the type of creature. Currently supported:
monster - hostile creature
fnpc - friendly npc
vendor - vendor npc
level - the level of this creature
hp - the health points of this creature
mana - the mana points of this creature
armor - the armor points of this creature NOTE: If not specified, the creature will take the default armor for
his level from the creature_defaults table
min_dmg - the minimum damage this creature does per swing
max_dmg - the maximum damage this creature does per swing
quest_relation_id - the id of the quest this creature is related with (if applicable)
loot_tabl | e_id - the id of the loot this creature drops (in the loot_table table)
gossip - the text this creature says when he is talked to (if applicable)
respawnable - whether this creature will respawn on different g | ame starts. Ex: Some special creatures should not be
killed more than once
Example:
entry, creature name, type, level, hp, mana, armor, min_dmg, max_dmg, quest_relation_ID, loot_table,ID, gossip, respawnable
1, Zimbab, "monster" 1, 10, 10, 50, 2, 4 1, 1, "Hey there", False
type is "monster" meaning this is a hostile NPC
Creature Level: 1 Zimbab, HP: 10, MANA: 10, Damage: 2-4.
He is needed to complete quest with ID 1 and the loot he drops is from the row in the loot_table DB table with
entry 1. If talking to him is enabled, he would say "Hey there".
"""
__tablename__ = 'creature_template'
entry = Column(Integer, primary_key=True)
name = Column(String(60))
type = Column(String(60))
level = Column(Integer)
health = Column(Integer)
mana = Column(Integer)
armor = Column(Integer)
min_dmg = Column(Integer)
max_dmg = Column(Integer)
quest_relation_id = Column(Integer, ForeignKey('quest_template.entry'))
loot_table_id = Column(Integer, ForeignKey('loot_table.entry'))
loot_table = relationship('LootTableSchema', foreign_keys=[loot_table_id])
vendor_inventory = relationship('NpcVendorSchema', uselist=True)
gossip = Column(Text)
respawnable = Column(Boolean)
def build_vendor_inventory(self):
"""
This function loads all the items that a certain vendor should sell.
We take them from the self.vendor_inventory list, which holds NpcVendor objects
:return: A dictionary of Key: "Item Name", Value: Tuple(1,2)
1 - Item object of class Item from items.py
2 - The count of the item
"""
vendor_inventory: {str: ('Item', int)} = {}
for product in self.vendor_inventory:
item: 'Item' = product.item.convert_to_item_object()
item_count: int = product.item_count
if product.price: # check if there is anything set to price that'll make us override
item.buy_price = product.price
vendor_inventory[item.name] = (item, item_count)
return vendor_inventory
|
import sys, datetime
import elasticsearch
INDEX = 'cli'
DOC_TYPE = 'cli'
def create_elasticsearch_index(es):
"""es should be an elasticsearch.Elasticsearch instance"""
es.indices.create(index = INDEX, ignore = 400) # ignore already existing index
es.indices.put_mapping(index = INDEX, doc_type = DOC_TYPE, body = {
DOC_TYPE : {
"_timestamp" : { "enabled" : True },
"properties" : {
"contribu | tor": {
"type" : "string",
| "index" : "not_analyzed"
},
"authors": {
"type" : "string",
"index" : "not_analyzed"
},
"license": {
"type" : "string",
"index" : "not_analyzed"
},
}
}})
# es.indices.delete('cli')
def update_elasticsearch_index(es, docs, source):
# retrieve existing documents
try:
existing = [doc['_id'] for doc in
es.search(INDEX, DOC_TYPE, body = dict(
query = dict(
term = dict(
source = source)
)),
fields = ['_id'],
size = 10000)['hits']['hits']]
except elasticsearch.exceptions.NotFoundError:
existing = []
# now update changed / add new documents:
for timestamp, doc in docs:
doc['source'] = source
doc_id = '%s:%s' % (source, doc['name'])
timestamp = datetime.datetime.fromtimestamp(timestamp)
try:
old = es.get(INDEX, doc_id, DOC_TYPE) # FIXME: with elasticsearch-2.1.1, this produces 404 warnings
except elasticsearch.exceptions.NotFoundError:
es.index(INDEX, DOC_TYPE, body = doc, id = doc_id, timestamp = timestamp)
sys.stdout.write("added new document '%s'.\n" % doc_id)
else:
existing.remove(old['_id'])
if old['_source'] != doc:
es.index(INDEX, DOC_TYPE, body = doc, id = doc_id, timestamp = timestamp)
sys.stdout.write("changed document '%s'.\n" % doc_id)
else:
sys.stdout.write("leaving '%s' alone, no change...\n" % doc_id)
# finally, remove existing documents that were not contained in `docs`:
for doc_id in existing:
sys.stdout.write("removing '%s', which is no longer in the '%s' JSON...\n" % (doc_id, source))
es.delete(INDEX, DOC_TYPE, doc_id)
|
from cobra.model.aaa import User, UserDomain
from createLocalUser import input_key_args as input_local_user
from createMo import *
def input_key_args(msg='\nPlease Specify User Domain:'):
print msg
return input_raw_input("User Domain Name", required=True)
def add_user_domain(parent_mo, user_domain):
"""The AAA domain to which the user belongs. """
aaa_userdomain = UserDomain(parent_mo, user_domain)
return aaa_userdomain
class AddSecurityDomain(CreateMo):
def __init__(self):
self.description = 'The AAA domain to which the user belongs. '
self.local_user = None
self.user_domain = None
super(AddSecurityDomain, self).__init__()
def set_cli_mode(self):
super(AddSecurityDomain, self).set_cli_mode()
self.parser_cli.add_argument('local_user', help='The name of a locally-authenticated user account.')
self.parser_cli.add_argument('user_domain', help='The name of the user domain')
def read_key_args(self):
self.local_user = self.args.pop('local_user')
self.user_domain = self.args.pop('user_domain')
def wizard_mode_input_args(self):
self.args['local_user'] = input_local_user('\nPlease Specify User Domain:', user_only=True, delete_function=self.delete)[0]
self.args['user_domain'] = input_key_args('')
def delete_mo(self):
self.check_if_mo_exist('uni/userext/user-' + self.local_user + '/userdomain-', self.user_domain, UserDomain, desc | ription='User Domain')
super(AddSecurityDomain, self).delete_mo()
def main | _function(self):
self.check_if_mo_exist('uni/userext/user-', self.local_user, User, 'User')
add_user_domain(self.mo, self.user_domain)
if __name__ == '__main__':
user_domain = AddSecurityDomain() |
#! /usr/bin/python
import base64
import sys
f = open(sys.argv[1], "r")
items = []
for line in f.readlines():
if len(line.strip()) == 0:
continue
if line | [0] == "{":
items.append(base64.b64encode(line.strip()))
else:
items.append(line.strip())
pri | nt ".".join(items)
|
#!/usr/bin/env python
#
# test_doctests.py - test runner for COT doctests
#
# July 2016, Glenn F. Matthews
# Copyright (c) 2016-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/bl | ob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) proje | ct.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Test runner for COT doctest tests."""
import logging
from logging import NullHandler
from doctest import DocTestSuite
from unittest import TestSuite
logging.getLogger('COT').addHandler(NullHandler())
def load_tests(*_):
"""Load doctests as unittest test suite.
For the parameters, see :mod:`unittest`. The parameters are unused here.
"""
suite = TestSuite()
suite.addTests(DocTestSuite('COT.data_validation'))
suite.addTests(DocTestSuite('COT.utilities'))
return suite
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#
# Provides a simple function for scheduling some code to run at some time in
# the future (assumes application is wxPython based).
#
# Written by: David C. Morrill
#
# Date: 05/18/2005
#
# (c) Copyright 2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
# Import the toolkit specific version.
from enthought.pyface.toolkit import toolkit_object
DoLaterTimer = toolkit_object('timer.do_later:DoLaterTimer')
#-------------------------------------------------------------------------------
# Does something 50 milliseconds from now:
#-------------------------------------------------------------------------------
def do_later ( callable, *args, **kw_args ):
""" Does something 50 milliseconds from now.
"""
DoLaterTimer( 50, callable, args, kw_args )
#----------------------------------------------------------- | --------------------
# Does something after some specified time interval:
#-------------------------------------------------------------------------------
def do_after ( interval, | callable, *args, **kw_args ):
""" Does something after some specified time interval.
"""
DoLaterTimer( interval, callable, args, kw_args )
|
#
# ident/geoio.py
#
# Copyright (C) 2011 Damien Churchill <damoxc@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
import GeoIP
import logging
from vsmtpd.hooks import hook
from vsmtpd.plugins.plugin import PluginBase
log = logging.getLogger(__name__)
class Plugin(PluginBase):
def __init__(self):
self.gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE)
| @hook
def connect(self, connection):
country = self.gi.country_code_by_addr(connection.remote_ip)
| connection.notes['geoip_country'] = country
|
version.
#
# Arx Libertatis is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Arx Libertatis. If not, see <http://www.gnu.org/licenses/>.
from ctypes import (
LittleEndianStructure,
c_char,
c_uint32,
c_int16,
c_int32,
c_float
)
from .dataCommon import SavedVec3, PolyTypeFlag
class UNIQUE_HEADER(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("path", c_char * 256),
("count", c_int32),
("version", c_float),
("uncompressedsize", c_int32),
("pad", c_int32 * 3)
]
class UNIQUE_HEADER3(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("path", c_char * 256), # In the c code this is currently in a separate struct
("check", c_char * 512)
]
class FAST_SCENE_HEADER(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("version", c_float),
("sizex", c_int32),
("sizez", c_int32),
("nb_textures", c_int32),
("nb_polys", c_int32),
("nb_anchors", c_int32),
("playerpos", SavedVec3),
("Mscenepos", SavedVec3),
("nb_portals", c_int32),
("nb_rooms", c_int32)
]
class FAST_TEXTURE_CONTAINER(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("tc", c_int32),
("temp", c_int32),
("fic", c_char * 256)
]
class FAST_SCENE_INFO(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("nbpoly", c_int32),
("nbianchors", c_int32),
]
class FAST_VERTEX(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("sy", c_float),
("ssx", c_float),
("ssz", c_float),
("stu", c_float),
("stv", c_float)
]
class FAST_EERIEPOLY(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("v", FAST_VERTEX * 4),
("tex", c_int32),
("norm", SavedVec3),
("norm2", SavedVec3),
("nrml", SavedVec3 * 4),
("transval", c_float),
("area", c_float),
("type", PolyTypeFlag),
("room", c_int16),
("paddy", c_int16)
]
class FAST_ANCHOR_DATA(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("pos", SavedVec3),
("radius", c_float),
("height", c_float),
("nb_linked", c_int16),
("flags", c_int16)
]
class SavedTextureVertex(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("pos", SavedVec3),
("rhw", c_float),
("color", c_uint32),
("specular", c_uint32),
("tu", c_float),
("tv", c_float)
]
class SAVE_EERIEPOLY(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("type", c_int32),
("min", SavedVec3),
("max", SavedVec3),
("norm", SavedVec3),
("norm2", SavedVec3),
("v", SavedTextureVertex * 4),
("tv", SavedTextureVertex * 4),
("nrml", SavedVec3 * 4),
("tex", c_int32),
("center", SavedVec3),
("transval", c_float),
("area", c_float),
("room", c_int16),
("misc", c_int16)
]
class EERIE_SAVE_PORTALS(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("poly", SAVE_EERIEPOLY),
("room_1", c_int32),
("room_2", c_int32),
("useportal", c_int16),
("paddy", c_int16)
]
class EERIE_SAVE_ROOM_DATA(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("nb_portals", c_int32),
("nb_polys", c_int32),
("padd", c_int32 * 6)
]
class FAST_EP_DATA(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("px", c_int16),
("py", c_int16),
("idx", c_int16),
("padd", c_int16)
]
class ROOM_DIST_DATA_SAVE(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("distance", c_float),
("startpos", SavedVec3),
("endpos", SavedVec3),
]
from collections import namedtuple
FtsData = namedtuple('FtsData', ['sceneOffset', 'textures', 'cells', 'anchors', 'portals'])
import logging
from ctypes import sizeof
from .lib import ArxIO
class FtsSerializer(object):
def __init__(self, ioLib):
self.log = logging.getLogger('FtsSerializer')
self.ioLib = ioLib
def read_fts(self, data) -> FtsData:
"""If you want to read a fts file use read_fts_container"""
pos = 0
ftsHeader = FAST_SCENE_HEADER.from_buffer_copy(data, pos)
pos += sizeof(FAST_SCENE_HEADER)
self.log.debug("Fts Header version: %f" % ftsHeader.version)
self.log.debug("Fts Header size x,z: %i,%i" % (ftsHeader.sizex, ftsHeader.sizez))
self.log.debug("Fts Header playerpos: %f,%f,%f" % (ftsHeader.playerpos.x, ftsHeader.playerpos.y, ftsHeader.playerpos.z))
self.log.debug("Fts Header Mscenepos: %f,%f,%f" % (ftsHeader.Mscenepos.x, ftsHeader.Mscenepos.y, ftsHeader.Mscenepos.z))
sceneOffset = (ftsHeader.Mscenepos.x, ftsHeader.Mscenepos.y, ftsHeader.Mscenepos.z)
texturesType = FAST_TEXTURE_CONTAINER * ftsHeader.nb_textures
textures = texturesType.from_buffer_copy(data, pos)
pos += sizeof(texturesType)
self.log.debug("Loaded %i textures" % len(textures))
#for i in textures:
# log.info(i.fic.decode('iso-8859-1'))
cells = [[None for x in range(ftsHeader.sizex)] for x in range(ftsHeader.sizez)]
for z in range(ftsHeader.sizez):
for x in range(ftsHeader.sizex):
cellHeader = FAST_SCENE_INFO.from_buffer_copy(data, pos)
pos += sizeof(FAST_SCENE_INFO)
try:
if cellHeader.nbpoly <= 0:
cells[z][x] = None
else:
polysType = FAST_EERIEPOLY * cellHeader.nbpoly
polys = polysType.from_buffer_copy(data, pos)
pos += sizeof(polysType)
cells[z][x] = polys
except ValueError as e:
print("Failed reading cell data, x:%i z:%i polys:%i" % (x, z, cellHeader.nbpoly))
raise e
if cellHeader.nbianchors > 0:
AnchorsArrayType = c_int32 * cellHeader.nbianchors
anchors = AnchorsArrayType.from_buffer_copy(data, pos)
pos += sizeof(AnchorsArrayType)
anchors = []
for i in | range(ftsHeader.nb_anchors):
anchor = FAST_ANCHOR_DATA.from_buffer_copy(data, pos)
pos += sizeof(FAST_ANCHOR_DATA)
if anchor.nb_linked > 0:
LinkedAnchorsArrayType | = c_int32 * anchor.nb_linked
linked = LinkedAnchorsArrayType.from_buffer_copy(data, pos)
pos += sizeof(LinkedAnchorsArrayType)
anchors.append( ((anchor.pos.x, anchor.pos.y, anchor.pos.z), linked) )
else:
anchors.append( ((anchor.pos.x, anchor.pos.y, anchor.pos.z), []) )
portals = []
for i in range(ftsHeader.nb_portals):
portal = EERIE_SAVE_PORTALS.from_buffer_copy(data, pos)
pos += sizeof(EERIE_SAVE_PORTALS)
portals.append(portal)
for i in range(ftsHeader.nb_rooms + 1): # Off by one in data
room = EERIE_SAVE_ROOM_DATA.from_buffer_copy(data, pos)
pos += sizeof(EERIE_SAVE_ROOM_DATA)
if room.nb_portals > 0:
PortalsArrayType = c_int32 * room.nb_portals
portals2 = PortalsArrayType.from_buffer_copy(data, pos)
pos += sizeof(PortalsArrayType)
if room.nb_polys > 0:
|
"""Web dashboard."""
import os
import sys
import json
import aiohttp
from discord.ext import commands
import util.dynaimport as di
from .cog import Cog
japronto = di.load('japronto')
sanic = di.load('sanic')
response = di.load('sanic.response')
root_dir = os.path.dirname(os.path.abspath(sys.modules['__main__'].core_file))
web_root = os.path.join(root_dir, 'assets', 'web')
def webroot(f):
return os.path.join(web_root, *f.split('/'))
class Web(Cog):
"""The awesome web dashboard."""
def __init__(self, bot):
super().__init__(bot)
| self.logger = self.logger.getChild('web')
| self.port = 8085
self.host = '127.0.0.1'
self.app = None
self.server = None
self.server_task = None
if bot.user:
self.loop.create_task(self.start())
def __unload(self):
self.guild_task.cancel()
async def on_ready(self):
await self.start()
async def start(self):
self.logger.info('Starting web server on %s:%s!', self.host, str(self.port))
app = sanic.Sanic()
await self.init_app(app)
self.app = app
self.server = app.create_server(host=self.host, port=self.port)
self.server_task = self.loop.create_task(self.server)
async def init_app(self, app):
self.logger.info('Initializing app...')
@app.route('/')
async def test(req):
self.logger.info('Got request at /')
return response.text('hello')
return response.file(webroot('index.html'))
def setup(bot):
bot.add_cog(Web(bot))
'''
async def hello(request):
return request.Response(text='Hello world!')
app = japronto.Application()
app.router.add_route('/', hello)
app.run()'''
|
lues, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\ | 342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\3 | 46' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( int(str[j+1:j+4], 8) ) )
i = j+4
return _nulljoin(res)
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is |
"""
drslib exceptions
"""
class Tr | anslationError(Exception):
| pass
|
alues for nodes to be shut d | own.
:type ids: Sequence
:param by_user: Requesting user.
:type by_user: User_
:return: Those Nodes for which shutdown was actually requested.
:rtype: list
"""
nodes = self.get_nodes(by_user, NODE_PERMISSION.EDIT, ids=ids)
processed_nodes = []
for node in nodes:
power_params | = node.get_effective_power_parameters()
node_power_type = node.get_effective_power_type()
# WAKE_ON_LAN does not support poweroff.
if node_power_type != POWER_TYPE.WAKE_ON_LAN:
power_off.apply_async(
queue=node.work_queue, args=[node_power_type],
kwargs=power_params)
processed_nodes.append(node)
return processed_nodes
def start_nodes(self, ids, by_user, user_data=None):
"""Request on given user's behalf that the given nodes be started up.
Power-on is only requested for nodes that the user has ownership
privileges for; any other nodes in the request are ignored.
:param ids: The `system_id` values for nodes to be started.
:type ids: Sequence
:param by_user: Requesting user.
:type by_user: User_
:param user_data: Optional blob of user-data to be made available to
the nodes through the metadata service. If not given, any
previous user data is used.
:type user_data: unicode
:return: Those Nodes for which power-on was actually requested.
:rtype: list
"""
# Avoid circular imports.
from metadataserver.models import NodeUserData
nodes = self.get_nodes(by_user, NODE_PERMISSION.EDIT, ids=ids)
for node in nodes:
NodeUserData.objects.set_user_data(node, user_data)
processed_nodes = []
for node in nodes:
power_params = node.get_effective_power_parameters()
node_power_type = node.get_effective_power_type()
if node_power_type == POWER_TYPE.WAKE_ON_LAN:
mac = power_params.get('mac_address')
do_start = (mac != '' and mac is not None)
else:
do_start = True
if do_start:
power_on.apply_async(
queue=node.work_queue, args=[node_power_type],
kwargs=power_params)
processed_nodes.append(node)
return processed_nodes
# Non-ambiguous characters (i.e. without 'ilousvz1250').
non_ambiguous_characters = imap(
random.choice, repeat('abcdefghjkmnpqrtwxy346789'))
def generate_hostname(size):
"""Generate a hostname using only non-ambiguous characters."""
return "".join(islice(non_ambiguous_characters, size))
def patch_pgarray_types():
"""Monkey-patch incompatibility with recent versions of `djorm_pgarray`.
An upstream commit in `djorm_pgarray` on 2013-07-21 effectively limits
arrays to a fixed set of types. An attempt to create an `ArrayField` of
any other type results in the error "TypeError: invalid postgreSQL type."
We have been getting that error with python-djorm-ext-pgarray 0.8, the
first Ubuntu-packaged version, but not with 0.6.
This function monkey-patches the set of supported types, adding macaddr.
Upstream bug: https://github.com/niwibe/djorm-ext-pgarray/issues/19
"""
# TYPES maps PostgreSQL type names to their Django casters. The error
# happens when using a postgres type name that is not in this dict.
#
# Older versions did not have TYPES, and worked out of the box.
types_dict = getattr(djorm_pgarray.fields, 'TYPES', None)
if types_dict is not None and 'macaddr' not in types_dict:
djorm_pgarray.fields.TYPES['macaddr'] = MAC
# Monkey-patch djorm_pgarray's types list to support MAC.
patch_pgarray_types()
class Node(CleanSave, TimestampedModel):
"""A `Node` represents a physical machine used by the MAAS Server.
:ivar system_id: The unique identifier for this `Node`.
(e.g. 'node-41eba45e-4cfa-11e1-a052-00225f89f211').
:ivar hostname: This `Node`'s hostname.
:ivar status: This `Node`'s status. See the vocabulary
:class:`NODE_STATUS`.
:ivar owner: This `Node`'s owner if it's in use, None otherwise.
:ivar after_commissioning_action: The action to perform after
commissioning. See vocabulary
:class:`NODE_AFTER_COMMISSIONING_ACTION`.
:ivar power_type: The :class:`POWER_TYPE` that determines how this
node will be powered on. If not given, the default will be used as
configured in the `node_power_type` setting.
:ivar nodegroup: The `NodeGroup` this `Node` belongs to.
:ivar tags: The list of :class:`Tag`s associated with this `Node`.
:ivar objects: The :class:`NodeManager`.
"""
class Meta(DefaultMeta):
"""Needed for South to recognize this model."""
system_id = CharField(
max_length=41, unique=True, default=generate_node_system_id,
editable=False)
hostname = CharField(max_length=255, default='', blank=True, unique=True)
status = IntegerField(
max_length=10, choices=NODE_STATUS_CHOICES, editable=False,
default=NODE_STATUS.DEFAULT_STATUS)
owner = ForeignKey(
User, default=None, blank=True, null=True, editable=False)
after_commissioning_action = IntegerField(
choices=NODE_AFTER_COMMISSIONING_ACTION_CHOICES,
default=NODE_AFTER_COMMISSIONING_ACTION.DEFAULT)
distro_series = CharField(
max_length=20, choices=DISTRO_SERIES_CHOICES, null=True,
blank=True, default='')
architecture = CharField(
max_length=31, choices=ARCHITECTURE_CHOICES, blank=False,
default=ARCHITECTURE.i386)
routers = djorm_pgarray.fields.ArrayField(dbtype="macaddr")
agent_name = CharField(max_length=255, default='', blank=True, null=True)
zone = ForeignKey(
Zone, to_field='name', verbose_name="Availability zone",
default=None, blank=True, null=True, editable=True, db_index=True)
# Juju expects the following standard constraints, which are stored here
# as a basic optimisation over querying the lshw output.
cpu_count = IntegerField(default=0)
memory = IntegerField(default=0)
storage = IntegerField(default=0)
# For strings, Django insists on abusing the empty string ("blank")
# to mean "none."
power_type = CharField(
max_length=10, choices=POWER_TYPE_CHOICES, null=False, blank=True,
default=POWER_TYPE.DEFAULT)
# JSON-encoded set of parameters for power control.
power_parameters = JSONObjectField(blank=True, default="")
token = ForeignKey(
Token, db_index=True, null=True, editable=False, unique=False)
error = CharField(max_length=255, blank=True, default='')
netboot = BooleanField(default=True)
license_key = CharField(max_length=30, null=True, blank=True)
# This field can't be null, but we can't enforce that in the
# database schema because we can only create the default value from
# a complete schema, after schema migration. We can't use custom
# model validation either, because the node forms need to set their
# default values *after* saving the form (with commit=False), which
# incurs validation before the default values are set.
# So all we can do is set blank=False, and make the field editable
# to cajole Django out of skipping it during "model" (actually model
# form) validation.
nodegroup = ForeignKey(
'maasserver.NodeGroup', editable=True, null=True, blank=False)
tags = ManyToManyField(Tag)
objects = NodeManager()
def __unicode__(self):
if self.hostname:
return "%s (%s)" % (self.system_id, self.fqdn)
else:
return self.system_id
@property
def fqdn(self):
"""Fully qualified domain name for this node.
If MAAS manages DNS for this node, the domain part of the
hostname (if present), is replaced by the domain configured
on the cluster controller.
If not, |
other available options: ({})".format(
options_hr)))
elif options == "any" or r in options:
answer = r
else:
await self.bot.send_message(
user,
cf.warning(
"Please choose one of the available options: ({})"
.format(cf.bold(options_hr))))
if not answer:
await self.bot.send_message(
user,
cf.info("Survey {} is now closed.".format(survey_id)))
else:
if not self._save_answer(
server_id, survey_id, user, answer, change):
await self.bot.send_message(
user,
cf.warning(
"That answer has reached its limit. Answer could"
" not be {}. To try again, use `{}changeanswer {}`"
" in this DM.".format(
"changed" if change else "recorded",
prefix, survey_id)))
return
await self._update_answers_message(server_id, survey_id)
await self.bot.send_message(
user,
cf.info(
"Answer {}. If you want to change it, use"
" `{}changeanswer {}` in this DM.\nYou can see all the"
" answers in {}.".format(
"changed" if change else "recorded",
prefix, survey_id, achannel.mention)))
except asyncio.CancelledError:
await self.bot.send_message(
user,
cf.info("Survey {} has been closed.".format(survey_id)))
except discord.Forbidden:
return
@commands.command(pass_context=True, no_pm=True, name="startsurvey")
@checks.admin_or_permissions(administrator=True)
async def _startsurvey(self, ctx: commands.Context,
role: discord.Role, channel: discord.Channel,
question: str, options: str, *, deadline: str):
"""Starts a new survey.
Role is the Discord server role to notify. Should be the @<role>.
Channel is the channel in which to post results. Should be #<channel>
Question is the survey question.
Options should be a semicolon-separated list of options, or * to allow any option.
Each option is of the format <name>:<limit>:<reprompt>:<link>, where everything but <name> is optional, i.e. the simplest form is <opt1>;<opt2>;...
<name> is the name of the option.
<limit> is the maximum number of answers that are this option.
<reprompt> is the time, in minutes, before the deadline to reprompt those who answered with this option.
<link> is the name of a different option. If set, reprompt will only happen if the given option has not hit its l | imit of responses. Re | quires that <reprompt> is set.
Deadline should be of a sane time format, date optional, but timezone abbreviation is strongly recommended (otherwise UTC is assumed).
For example: [p]startsurvey everyone channel_name "Question here. Which should be enclosed in double quotes because it includes SPACES" "Yes;No;Options enclosed with double quotes too, if contain SPACES" 2016/12/25 12:00
"""
server = ctx.message.server
if server.id not in self.surveys:
self.surveys[server.id] = {}
dataIO.save_json(self.surveys_path, self.surveys)
try:
dl = self._deadline_string_to_datetime(deadline)
deadline_better = dl.strftime("%m/%d/%Y %I:%S%p %Z")
except ValueError:
await self.bot.reply(cf.error(
"Your deadline format could not be understood."
" Please try again."))
return
except PastDeadlineError:
await self.bot.reply(cf.error(
"Your deadline is in the past."))
return
opts = await self._parse_options(options)
if opts == "return":
return
new_survey_id = str(self.surveys["next_id"])
self.surveys["next_id"] += 1
dataIO.save_json(self.surveys_path, self.surveys)
self.surveys[server.id][new_survey_id] = {}
dataIO.save_json(self.surveys_path, self.surveys)
self._save_prefix(server.id, new_survey_id, ctx.prefix)
self._save_deadline(server.id, new_survey_id, deadline_better)
self._save_channel(server.id, new_survey_id, channel.id)
self._save_question(server.id, new_survey_id, question)
self._save_options(server.id, new_survey_id, opts if opts else "any")
self._setup_reprompts(server.id, new_survey_id)
self._schedule_close(server.id, new_survey_id, self._get_timeout(dl))
users_with_role = self._get_users_with_role(server, role)
self._save_asked(server.id, new_survey_id, users_with_role)
try:
await self._update_answers_message(server.id, new_survey_id)
except discord.Forbidden:
await self.bot.reply(
"I do not have permission to talk in {}.".format(
channel.mention))
return
for user in users_with_role:
new_task = self.bot.loop.create_task(
self._send_message_and_wait_for_message(server.id,
new_survey_id, user))
self.tasks[new_survey_id].append(new_task)
await self.bot.reply(cf.info("Survey started. You can close it with"
" `{}closesurvey {}`.".format(
ctx.prefix, new_survey_id)))
@commands.command(pass_context=True, no_pm=True, name="closesurvey")
@checks.admin_or_permissions(administrator=True)
async def _closesurvey(self, ctx: commands.Context,
survey_id: str):
"""Cancels the given survey."""
server = ctx.message.server
surver = self._get_server_id_from_survey_id(survey_id)
if not surver or server.id != surver:
await self.bot.reply(cf.error("Survey with ID {} not found."
.format(survey_id)))
return
if survey_id in self.surveys["closed"]:
await self.bot.reply(cf.warning(
"Survey with ID {} is already closed.".format(survey_id)))
return
if survey_id in self.tasks:
for t in self.tasks[survey_id]:
t.cancel()
del self.tasks[survey_id]
self._mark_as_closed(survey_id)
await self.bot.reply(cf.info("Survey with ID {} closed."
.format(survey_id)))
@commands.command(pass_context=True, no_pm=False, name="changeanswer")
async def _changeanswer(self, ctx: commands.Context,
survey_id: str):
"""Changes the calling user's response for the given survey."""
user = ctx.message.author
server_id = self._get_server_id_from_survey_id(survey_id)
if survey_id in self.surveys["closed"]:
await self.bot.send_message(user,
cf.error("That survey is closed."))
return
if not server_id:
await self.bot.send_message(user, cf.error(
"Survey with ID {} not found.".format(survey_id)))
return
new_task = self.bot.loop.create_task(
self._send_message_and_wait_for_message(server_id,
survey_id, user,
change=True))
self.tasks[survey_id].append(new_task)
def check_folders():
if not os.path.exists("data/survey"):
print("Creating data/survey directory...")
os.makedirs("data/survey")
def check_f |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import applications.discount.models
class Migration(migrations.Migration):
dependencies = [
('discount', '0005_auto_20160507_2145'),
]
operations = [
migrations.AlterField(
model_name='action',
name='datetime_end',
field=models.DateTimeField(default=applications.discount.models.default_datetime_end, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u04 | 3a\u043e\u043d\u0447\u0430\u043d\u0438\u044f \u0430\u043a\u0446\u0438\u0438'),
),
migrations.AlterField(
model_name='action',
name='datetime_start',
field=models.DateTimeField(default=datetime.datetime.now, verbose_name='\u0414\u0430\u0442\u0430 \u043d\u0430\u0447\u0430\u043b\u0430 \u0430\u043a\u0446\u0438\u0438'),
),
migrations.AlterField( |
model_name='action',
name='name',
field=models.CharField(default=applications.discount.models.default_action_name, max_length=256, verbose_name='\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435 \u0430\u043a\u0446\u0438\u0438'),
),
]
|
#!/usr/bin/env python3
#
# Script for polling N64/GC SI bus devices
#
# This script uses the serial bridge and pool in loops
# for the buttons status.
#
# It currently supports N64 controllers, N64 mouses & GameCube controllers.
#
# --Jacques Gagnon <darthcloud@gmail.com>
#
from bus import Bus
from collections import namedtuple, OrderedDict
import struct, time, os, sys
bmap = namedtuple('status', 'name mask color')
identity_req = namedtuple('identity_req', 'cmd')
status_req = namedtuple('status_req', 'cmd')
read_req = namedtuple('read_req', 'cmd address')
write_req = namedtuple('write_req', 'cmd address data')
dol_status_req = namedtuple('dol_status_req', 'cmd unknown rumble')
dol_wb_assoc_req = namedtuple('dol_wb_assoc_req', 'cmd id')
identity_resp = namedtuple('identity_resp', 'id info')
status_resp = namedtuple('status_resp', 'buttons x_axis y_axis')
dol_status_resp = namedtuple('dol_status_resp', 'buttons x_axis y_axis cx_axis cy_axis l_trigger r_trigger')
RED='\x1b[1;91m'
GREEN='\x1b[1;92m'
YELLOW='\x1b[1;93m'
BLUE='\x1b[1;94m'
MAGENTA='\x1b[1;95m'
CYAN='\x1b[1;96m'
LGRAY='\x1b[1;37m'
DGRAY='\x1b[1;90m'
END='\x1b[0m'
IDENTIFY = 0x00
STATUS = 0x01
READ = 0x02
WRITE = 0x03
DOL_STATUS = 0x40
WB_INIT = 0x4E
MOUSE = 0x02
NUS = 0x05
DOL = 0x09
WB_DOWN = 0xA8
WB_AUTH = 0xE9
WB_ASSOC = 0xEB
EMPTY = 0x00
OCCUPY = 0x01
EMPTIED = 0x02
INSERT = 0x03
BUTTON = {NUS:OrderedDict(
[('buttons',[bmap('A',0x8000,BLUE),
bmap('B',0x4000,GREEN),
bmap('Z',0x2000,LGRAY),
bmap('St',0x1000,RED),
bmap('Up',0x0800,LGRAY),
bmap('Dn',0x0400,LGRAY),
bmap('Lt',0x0200,LGRAY),
bmap('Rt',0x0100,LGRAY),
bmap('L',0x0020,LGRAY),
bmap('R',0x0010,LGRAY),
bmap('CUp',0x0008,YELLOW),
bmap('CDn',0x0004,YELLOW),
bmap('CLt',0x0002,YELLOW),
bmap('CRt',0x0001,YELLOW)]),
('x_axis', [bmap('X',0xFF,LGRAY)]),
('y_axis', [bmap('Y',0xFF,LGRAY)])]),
DOL:OrderedDict(
[('buttons',[bmap('St',0x1000,LGRAY),
bmap('Y',0x0800,LGRAY),
bmap('X',0x0400,LGRAY),
bmap('B',0x0200,RED),
bmap('A',0x0100,CYAN),
bmap('L',0x0040,LGRAY),
bmap('R',0x0020,LGRAY),
bmap('Z',0x0010,MAGENTA),
bmap('Up',0x0008,LGRAY),
bmap('Dn',0x0004,LGRAY),
bmap('Rt',0x0002,LGRAY),
bmap('Lt',0x0001,LGRAY)]),
('x_axis', [bmap('X',0xFF,LGRAY)]),
('y_axis', [bmap('Y',0xFF,LGRAY)]),
('cx_axis', [bmap('CX',0xFF,YELLOW)]),
('cy_axis', [bmap('CY',0xFF,YELLOW)]),
('l_trigger', [bmap('AL',0xFF,LGRAY)]),
('r_trigger', [bmap('AR',0xFF,LGRAY)])])}
class Bus(Bus):
def identify(self):
reply = self.bridge.write(bytes([IDENTIFY]), 3)[1]
if reply[0] == MOUSE:
return {'system':NUS, 'type':'mouse'}
elif reply[0] == NUS:
if reply[2] == EMPTY:
return {'system':NUS, 'type':'controller', 'slot':'Empty '}
elif reply[2] == OCCUPY:
return {'system':NUS, 'type':'controller', 'slot':'Occupy '}
elif reply[2] == EMPTIED:
return {'system':NUS, 'type':'controller', 'slot':'Emptied'}
elif reply[2] == INSERT:
return {'system':NUS, 'type':'controller', 'slot':'Insert '}
else:
print("Unknown N64 controller slot state: {}".format(reply))
sys.exit()
elif reply[0] == DOL:
return {'system':DOL, 'type':'controller'}
elif reply[0] == WB_DOWN:
return {'system':WB_DOWN, 'type':'wavebird'}
elif reply[0] == WB_AUTH:
return {'system':WB_AUTH, 'type':'wavebird', 'id':reply[-2:]}
elif reply[0] == WB_ASSOC:
return {'system':DOL, 'type':'wavebird'}
else:
print("Unknown device identity: {}".format(reply))
sys.exit()
def status(self, system):
if system == NUS:
reply = self.bridge.write(bytes([STATUS]), 4)[1]
return status_resp._make(struct.unpack('>H2b', reply))
elif system == DOL:
reply = self.bridge.write(struct.pack(">BH", DOL_STATUS, 0x0300), 8)[1]
return dol_status_resp._make(struct.unpack('>H6B', reply))
else:
print("Unknown system ID: {}".format(system))
sys.exit()
def wavebird_init(self, id):
return self.bridge.write(struct.pack(">BBB", WB_INIT, (id[0] | 0x20) & 0x10, id[1]), 3)[1]
def poll():
os.system('setterm -cursor off')
interface = Bus()
device = interface.identify()
time.sleep(0.02)
while device['system'] == WB_DOWN:
device = interface.identify()
time.sleep(1)
if device['system'] == W | B_AUTH:
interface.wavebird_init(device['id'])
try:
while 1:
device = interface.identify()
ti | me.sleep(0.02)
status = interface.status(device['system'])
for field, values in BUTTON[device['system']].items():
for value in values:
if value.mask != 0xFF:
print("{}{}{} ".format(value.color if getattr(status, field) & value.mask else DGRAY, value.name, END), end='')
else:
print("{}{}:{:+03X}{} ".format(value.color, value.name, getattr(status, field), END), end='')
if 'slot' in device:
print("slot:{}".format(device['slot']), end='')
print("\r", end='')
time.sleep(0.02)
except KeyboardInterrupt:
pass
os.system('setterm -cursor on')
print("")
if __name__ == "__main__":
poll()
### The End ###
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.