prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
#!/usr/bin/env/python """ make_enum.py -- make enumerations for po
sitions """ __author__ = "Michael Conlon" __copyright__ = "Copyright 2015 (c) Michael Conlon" __license__ = "BSD 3-Clause license" __version__ = "0.1.1" from datetime import datetime from vivopump import get_parms, create_enum def main(): """ Generate the enums for positions """ print datetime.now(), "Start" parms = get_parms() # person via Orcid query = """ SELECT (MIN (?xshort) AS ?short) ?vivo WHERE { ?vivo vivo:orcid
Id ?xshort . } GROUP BY ?vivo ORDER BY ?short """ create_enum("orcid_enum.txt", query, parms) # department via label query = """ SELECT (MIN (?xlabel) AS ?short) ?vivo WHERE { {?vivo a vivo:Department . } UNION {?vivo a vivo:Institute . } UNION {?vivo a vivo:School . } UNION {?vivo a vivo:Center .} UNION {?vivo a vivo:College . } ?vivo rdfs:label ?xlabel . } GROUP BY ?vivo ORDER BY ?short """ create_enum("dept_enum.txt", query, parms) # dates via datetime query = """ SELECT ?short ?vivo WHERE { ?vivo a vivo:DateTimeValue . # ?vivo vivo:dateTimePrecision vivo:yearMonthDayPrecision . ?vivo vivo:dateTime ?short . } ORDER BY ?short """ create_enum("date_enum.txt", query, parms, trim=10) print datetime.now(), "End" if __name__ == "__main__": main()
rix(cPickle.load(open(path)), dtype=theano.config.floatX) def compute_prauc(pred, lab): pred = np.asarray(pred) lab = np.asarray(lab) order = np.argsort(pred) lab_ordered = lab[order] pred_ordered = pred[order] precision = {} recall = {} # All examples are classified 1 precision[np.min(pred_ordered) - 1.0] = (np.sum(lab_ordered) / float(len(lab))) recall[np.min(pred_ordered) - 1.0] = 1. for i in range(len(lab)): if len(lab) - i - 1 == 0: # No examples are classified 1 precision[pred_ordered[i]] = 1 else: precision[pred_ordered[i]] = (np.sum(lab_ordered[i + 1:]) / float(len(lab) - i - 1)) recall[pred_ordered[i]] = (np.sum(lab_ordered[i + 1:]) / float(np.sum(lab_ordered))) # Precision-Recall curve points points = [] for i in np.sort(precision.keys())[::-1]: points += [(float(recall[i]), float(precision[i]))] # Compute area auc = sum((y0 + y1) / 2. * (x1 - x0) for (x0, y0), (x1, y1) in zip(points[:-1], points[1:])) return auc class DD(dict): """This class is only used to replace a state variable of Jobman""" def __getattr__(self, attr): if attr == '__getstate__': return super(DD, self).__getstate__ elif attr == '__setstate__': return super(DD, self).__setstate__ elif attr == '__slots__': return super(DD, self).__slots__ return self[attr] def __setattr__(self, attr, value): assert attr not in ('__getstate__', '__setstate__', '__slots__') self[attr] = value def __str__(self): return 'DD%s' % dict(self) def __repr__(self): return str(self) def __deepcopy__(self, memo): z = DD() for k, kv in self.iteritems(): z[k] = copy.deepcopy(kv, memo) return z # ---------------------------------------------------------------------------- # Experiment function -------------------------------------------------------- def Tensorexp(state, channel): # Show experiment parameters print >> sys.stderr, state np.random.seed(state.seed) # Experiment folder if hasattr(channel, 'remote_path'): state.savepath = channel.remote_path + '/' elif hasattr(channel, 'path'): state.savepath = channel.path + '/' else: if not os.path.isdir(state.savepath): os.mkdir(state.savepath) # Positives trainl = load_file(state.datapath + state.dataset + '-train-pos-lhs-fold%s.pkl' % state.fold) trainr = load_file(state.datapath + state.dataset + '-train-pos-rhs-fold%s.pkl' % state.fold) traino = load_file(state.datapath + state.dataset + '-train-pos-rel-fold%s.pkl' % state.fold) if state.op == 'SE': traino = traino[-state.Nrel:, :] # Negatives trainln = load_file(state.datapath + state.dataset + '-train-neg-lhs-fold%s.pkl' % state.fold) trainrn = load_file(state.datapath + state.dataset + '-train-neg-rhs-fold%s.pkl' % state.fold) trainon = load_file(state.datapath + state.dataset + '-train-neg-rel-fold%s.pkl' % state.fold) if state.op == 'SE': trainon = trainon[-state.Nrel:, :] # Valid set validl = load_file(state.datapath + state.dataset + '-valid-lhs-fold%s.pkl' % state.fold) validr = load_file(state.datapath + state.dataset + '-valid-rhs-fold%s.pkl' % state.fold) valido = load_file(state.datapath + state.dataset + '-valid-rel-fold%s.pkl' % state.fold) if state.op == 'SE': valido = valido[-state.Nrel:, :] outvalid = cPickle.load(open(state.datapath + '%s-valid-targets-fold%s.pkl' % (state.dataset, state.fold))) # Test set testl = load_file(state.datapath + state.dataset + '-test-lhs-fold%s.pkl' % state.fold) testr = load_file(state.datapath + state.dataset + '-test-rhs-fold%s.pkl' % state.fold) testo = load_file(state.datapath + state.dataset + '-test-rel-fold%s.pkl' % state.fold) if state.op == 'SE': testo = testo[-state.Nrel:, :] outtest = cPickle.load(open(state.datapath + '%s-test-targets-fold%s.pkl' % (state.dataset, state.fold))) # Model declaration if not state.loadmodel: # operators if state.op == 'Unstructured': leftop = Unstructured() rightop = Unstructured() elif state.op == 'SME_lin': leftop = LayerLinear(np.random, 'lin', state.ndim, state.ndim, state.nhid, 'left') rightop = LayerLinear(np.random, 'lin', state.ndim, state.ndim, state.nhid, 'right') elif state.op == 'SME_bil': leftop = LayerBilinear(np.random, 'lin', state.ndim, state.ndim, state.nhid, 'left') rightop = LayerBilinear(np.random, 'lin', state.ndim, state.ndim, state.nhid, 'right') elif state.op == 'SE': leftop = LayerMat('lin', state.ndim, state.nhid) rightop = LayerMat('lin', state.ndim, state.nhid) # embeddings if not state.loademb: embeddings = Embeddings(np.random, state.Nent, state.ndim, 'emb') else: f = open(state.loademb) embeddings = cPickle.load(f) f.close() if state.op == 'SE' and type(embeddings) is not list: relationl = Embeddings(np.random, state.Nrel, state.ndim * state.nhid, 'rell') relationr = Embeddings(np.random, state.Nrel, state.ndim * state.nhid, 'relr') embeddings = [embeddings, relationl, relationr] simfn = eval(state.simfn + 'sim') else: f = open(state.loadmodel) embeddings = cPickle.load(f) leftop = cPickle.load(f) rightop = cPickle.load(f) simfn = cPickle.load(f) f.close() # Functions compilation trainfunc = TrainFn(simfn, embeddings, leftop, rightop, marge=state.marge) testfunc = SimFn(simfn, embeddings, leftop, rightop) out = [] outb = [] state.bestvalid = -1 batchsize = trainl.shape[1] / state.nbatches print >> sys.stderr, "BEGIN TRAINING" timeref = time.time() for epoch_count in xrange(1, state.totepochs + 1): # Shuffling order = np.random.permutation(trainl.shape[1]) trainl = trainl[:, order] trainr = trainr[:, order] traino = traino[:, order] order = np.random.permutation(trainln.shape[1]) trainln = trainln[:, order] trainrn = trainrn[:, order] trainon = trainon[:, order] for i in range(state.nbatches): tmpl = trainl[:, i * batchsize:(i + 1) * batchsize] tmpr = trainr[:, i *
batchsize:(i + 1) * batchsize] tmpo = traino[:, i * batchsize:(i + 1) * batchsize] tmpln = trainln[:, i * batchsize:(i + 1) * batchsize] tmprn = trainrn[:, i * batchsize:(i + 1) * batchsize] tmpon = trainon[:, i * batchsize:(i + 1) * batchsize] # training iteration outtmp = trainfunc(state.lremb, state.lrparam / float(batchsize), tmpl, tmpr, tmpo, tmpln, tmprn, tmpon) out += [outtmp[0
] / float(batchsize)] outb += [outtmp[1]] # embeddings normalization if type(embeddings) is list: embeddings[0].normalize() else: embeddings.normalize() if (epoch_count % state.test_all) == 0: # model evaluation print >> sys.stderr, "-- EPOCH %s (%s seconds per epoch):" % ( epoch_count, round(time.time() - timeref, 3) / float(state.test_all)) timeref = time.time() print >> sys.stderr, "COST >> %s +/- %s, %% updates: %s%%" % ( round(np.mean(out), 4), round(np.std(out), 4), round(np.mean(outb) * 100, 3))
# C
opyright (c) 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, sof
tware # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import sys import tempfile import zipfile import yaml from murano.engine import yaql_yaml_loader import murano.packages.application_package import murano.packages.exceptions as e import murano.packages.versions.hot_v1 import murano.packages.versions.mpl_v1 def load_from_file(archive_path, target_dir=None, drop_dir=False, loader=yaql_yaml_loader.YaqlYamlLoader): if not os.path.isfile(archive_path): raise e.PackageLoadError('Unable to find package file') created = False if not target_dir: target_dir = tempfile.mkdtemp() created = True elif not os.path.exists(target_dir): os.mkdir(target_dir) created = True else: if os.listdir(target_dir): raise e.PackageLoadError('Target directory is not empty') try: if not zipfile.is_zipfile(archive_path): raise e.PackageFormatError("Uploaded file {0} is not a " "zip archive".format(archive_path)) package = zipfile.ZipFile(archive_path) package.extractall(path=target_dir) return load_from_dir(target_dir, preload=True, loader=loader) finally: if drop_dir: if created: shutil.rmtree(target_dir) else: for f in os.listdir(target_dir): os.unlink(os.path.join(target_dir, f)) def load_from_dir(source_directory, filename='manifest.yaml', preload=False, loader=yaql_yaml_loader.YaqlYamlLoader): formats = { '1.0': murano.packages.versions.mpl_v1, 'MuranoPL/1.0': murano.packages.versions.mpl_v1, 'Heat.HOT/1.0': murano.packages.versions.hot_v1 } if not os.path.isdir(source_directory) or not os.path.exists( source_directory): raise e.PackageLoadError('Invalid package directory') full_path = os.path.join(source_directory, filename) if not os.path.isfile(full_path): raise e.PackageLoadError('Unable to find package manifest') try: with open(full_path) as stream: content = yaml.safe_load(stream) except Exception as ex: trace = sys.exc_info()[2] raise e.PackageLoadError( "Unable to load due to '{0}'".format(str(ex))), None, trace if content: p_format = str(content.get('Format')) if not p_format or p_format not in formats: raise e.PackageFormatError( 'Unknown or missing format version') package = formats[p_format].create(source_directory, content, loader) formats[p_format].load(package, content) if preload: package.validate() return package
ction! " "To avoid, add errbacks to ALL remote commands!") if self.transport is not None: self.transport.loseConnection() _counter = 0L def _nextTag(self): """ Generate protocol-local serial numbers for _ask keys. @return: a string that has not yet been used on this connection. """ self._counter += 1 return '%x' % (self._counter,) _failAllReason = None def failAllOutgoing(self, reason): """ Call the errback on all outstanding requests awaiting responses. @param reason: the Failure instance to pass to those errbacks. """ self._failAllReason = reason OR = self._outstandingRequests.items() self._outstandingRequests = None # we can never send another request for key, value in OR: value.errback(reason) def ampBoxReceived(self, box): """ An AmpBox was received. Respond to it according to its contents. @param box: an AmpBox """ if ANSWER in box: question = self._outstandingRequests.pop(box[ANSWER]) question.addErrback(self._puke) question.callback(box) elif ERROR in box: question = self._outstandingRequests.pop(box[ERROR]) question.addErrback(self._puke) # protocol-recognized errors errorCode = box[ERROR_CODE] description = box[ERROR_DESCRIPTION] if errorCode in PROTOCOL_ERRORS: exc = PROTOCOL_ERRORS[errorCode](errorCode, description) else: exc = RemoteAmpError(errorCode, description) question.errback(Failure(exc)) elif COMMAND in box: cmd = box[COMMAND] def sendAnswer(answerBox): if ASK not in box: return if self.transport is None: return if self._locked: return answerBox[ANSWER] = box[ASK] answerBox._sendTo(self) def sendError(error): if ASK not in box: return error if error.check(RemoteAmpError): code = error.value.errorCode desc = error.value.description if error.value.fatal: errorBox = QuitBox() else: errorBox = AmpBox() else: errorBox = QuitBox() log.err(error) # here is where server-side logging happens # if the error isn't handled code = UNKNOWN_ERROR_CODE desc = "Unknown Error" errorBox[ERROR] = box[ASK] errorBox[ERROR_DESCRIPTION] = desc errorBox[ERROR_CODE] = code if self.transport is not None: errorBox._sendTo(self) return None # intentionally stop the error here: don't log the # traceback if it's handled, do log it (earlier) if # it isn't self.dispatchCommand(box).addCallbacks( sendAnswer, sendError).addErrback(self._puke) else: raise NoEmptyBoxes(box) def _sendBoxCommand(self, command, box, requiresAnswer=True): """ Send a command across the wire with the given C{amp.Box}. Mutate the given box to give it any additional keys (_command, _ask)
required for the command and request/response machinery, then send it. Returns a Deferred which fires w
ith the response C{amp.Box} when it is received, or fails with a C{amp.RemoteAmpError} if an error is received. If the Deferred fails and the error is not handled by the caller of this method, the failure will be logged and the connection dropped. @param command: a str, the name of the command to issue. @param box: an AmpBox with the arguments for the command. @param requiresAnswer: a boolean. Defaults to True. If True, return a Deferred which will fire when the other side responds to this command. If False, return None and do not ask the other side for acknowledgement. @return: a Deferred which fires the AmpBox that holds the response to this command, or None, as specified by requiresAnswer. """ if self._locked: raise ProtocolSwitched( "This connection has switched: no AMP traffic allowed.") if self._failAllReason is not None: return fail(self._failAllReason) box[COMMAND] = command tag = self._nextTag() if requiresAnswer: box[ASK] = tag result = self._outstandingRequests[tag] = Deferred() else: result = None box._sendTo(self) return result def callRemoteString(self, command, requiresAnswer=True, **kw): """ This is a low-level API, designed only for opitmizing simple messages for which the overhead of parsing is too great. @param command: a str naming the command. @param kw: arguments to the amp box. @param requiresAnswer: a boolean. Defaults to True. If True, return a Deferred which will fire when the other side responds to this command. If False, return None and do not ask the other side for acknowledgement. @return: a Deferred which fires the AmpBox that holds the response to this command, or None, as specified by requiresAnswer. """ box = Box(kw) return self._sendBoxCommand(command, box) def callRemote(self, commandType, *a, **kw): """ This is the primary high-level API for sending messages via AMP. Invoke it with a command and appropriate arguments to send a message to this connection's peer. @param commandType: a subclass of Command. @type commandType: L{type} @param a: Positional (special) parameters taken by the command. Positional parameters will typically not be sent over the wire. The only command included with AMP which uses positional parameters is L{ProtocolSwitchCommand}, which takes the protocol that will be switched to as its first argument. @param kw: Keyword arguments taken by the command. These are the arguments declared in the command's 'arguments' attribute. They will be encoded and sent to the peer as arguments for the L{commandType}. @return: If L{commandType} has a C{requiresAnswer} attribute set to L{False}, then return L{None}. Otherwise, return a L{Deferred} which fires with a dictionary of objects representing the result of this call. Additionally, this L{Deferred} may fail with an exception representing a connection failure, with L{UnknownRemoteError} if the other end of the connection fails for an unknown reason, or with any error specified as a key in L{commandType}'s C{errors} dictionary. """ # XXX this takes command subclasses and not command objects on purpose. # There's really no reason to have all this back-and-forth between # command objects and the protocol, and the extra object being created # (the Command instance) is pointless. Command is kind of like # Interface, and should be more like it. # In other words, the fact that commandType is instantiated here is an # implementation detail. Don't rely on it. co = commandType(*a, **kw) return co._doCommand(self) class Argument: """ Base-class of all objects that take values from Amp packets and convert them into objects for Python functions. """ optional = False def __init__(self, optional=False): """ Create an Argument. @param optional: a boolean indicating whether this argument can be omitt
# # (c) 2015, Peter Sprygada <psprygada@ansible.com> # # Copyright (c) 2016 Dell Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later
version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Publ
ic License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = """ options: provider: description: - A dict object containing connection details. default: null suboptions: host: description: - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport. required: true port: description: - Specifies the port to use when building the connection to the remote device. default: 22 username: description: - User to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. password: description: - Password to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. default: null ssh_keyfile: description: - Path to an ssh key used to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. timeout: description: - Specifies idle timeout (in seconds) for the connection. Useful if the console freezes before continuing. For example when saving configurations. default: 10 notes: - For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking). """
#!/usr/bin/env python import pysolr import os.path import sqlite3 import sys import json import Geohash import re import geojson import shapely.wkt from shapely.geometry import Polygon from shapely.geometry import LineString solr = pysolr.Solr('http://localhost:9999/solr/buildings') solr.delete(q='*:*') dbconn = sqlite3.connect('buildings.osm.db') dbcurs = dbconn.cursor() last_woeid = 2147483647 uid = last_woeid count = 0 offset = 0 limit = 10000 counter = 0 docs = [] sql = "SELECT COUNT(id) FROM ways" dbcurs.execute(sql) row = dbcurs.fetchone() count = row[0] while o
ffset < count : sql = "SELECT * FROM ways LIMIT %s, %s" % (offset, limit) print "%s (%s)" % (sql, count) dbcurs.execute(sql) for row in dbcurs.fetchall(): counter += 1 uid = uid + 1 way_id, lat, lon, woeid, nodes, tags = row if not lat or not lon: continue if float(lat) < -90
. or float(lat) > 90.: continue if float(lon) < -180. or float(lon) > 180.: continue if not woeid: woeid = 0 nodes = nodes.split(',') points = [] poly = None center = None alltags = {} name = None tags = json.loads(tags) if tags.get('name', False): name = tags['name'] for node_id in nodes: dbcurs.execute("SELECT * FROM nodes WHERE id=?", (node_id, )) node = dbcurs.fetchone() points.append((node[2], node[1])) try: _tags = json.loads(node[3]) for k,v in _tags.items(): alltags[k] = v except Exception, e: pass # TO DO: fix me (define line) if len(points) == 2: line = LineString(points) poly = line.centroid center = line.centroid else : points.append(points[0]) poly = Polygon(points) center = poly.centroid # TO DO : trim decimal coordinates if poly: # poly = shapely.wkt.dumps(poly) poly = geojson.dumps(poly) if center : lat = center.y lon = center.x # tags for k,v in tags.items(): alltags[k] = v if alltags.get('building') and alltags['building'] == 'yes': del(alltags['building']) _alltags = [] for k,v in alltags.items(): tmp = k.split(":") v = unicode(v) v = re.sub("8", "88", v) v = re.sub("/", "8s", v) v = re.sub(":", "8c", v) tmp.append(v) _alltags.append("/".join(map(unicode, tmp))) alltags = _alltags # go! lat = float("%.6f" % lat) lon = float("%.6f" % lon) # def stupid_floating_points(m): return m.group(1) poly = re.sub(r'(\.\d{6})\d+', stupid_floating_points, poly) # doc = { 'id' : uid, 'parent_woeid' : woeid, 'way_id' : way_id, 'nodes' : nodes, 'centroid' : "%s,%s" % (lat,lon), } if poly != None : doc['polygon'] = poly if len(alltags): doc['tags'] = alltags if name != None: doc['name'] = name for k,v in doc.items(): if v == None or v == '': print "WTF %s : %s" % (k, v) sys.exit() print "[%s] add doc" % counter docs.append(doc) # if doc.get('tags'): # print doc['tags'] try: solr.add(docs) except Exception, e: fh = open('add.json', 'w') fh.write(json.dumps(docs, indent=2)) fh.close() raise Exception, e docs = [] offset += limit if len(docs): solr.add(docs)
# vi: ts=4 expandtab syntax=python # # Copyright (C) 2009-2010 Canonical Ltd. # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # # Author: Scott Moser <scott.moser@canonical.com> # Author: Juerg Haefliger <juerg.haefliger@hp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import cloudinit import logging import cloudinit.util as util import traceback DEF_FILENAME = "20-cloud-config.conf" DEF_DIR = "/etc/rsyslog.d" def handle(_name, cfg, _cloud, log, _args): # rsyslog: # - "*.* @@192.158.1.1" # - content: "*.* @@192.0.2.1:10514" # - filename: 01-examplecom.conf # content: | # *.* @@syslogd.example.com # process 'rsyslog' if not 'rsyslog' in cfg: return def_dir = cfg.get('rsyslog_dir', DEF_DIR) def_fname = cfg.get('rsyslog_filename', DEF_FILENAME) files = [] elst = [] for ent in cfg['rsyslog']: if isinstance(ent, dict): if not "content" in ent: elst.append((ent, "no 'content' entry")) continue content = ent['content'] filename = ent.get("filename", def_fname) else: content = ent filename = def_fname if not filename.startswith("/"): filename = "%s/%s" % (def_dir, filename) omode = "ab" # truncate filename first time you see it if filename not in files: omode = "wb" files.append(filename) try: util.write_file(filename, content + "\n", omode=omode) except Exception as e: log.debug(traceback.format_exc(e)) elst.append((content, "failed to write to %s" % filename)) # need to restart syslogd restarted = False try: # if this config module is running at cloud-init time # (before rsyslog is running) we don't actually have to # restart syslog. # # upstart actually does what we want here, in that it doesn't # start a service that wasn't running already on 'restart' # it will also return failure on the attempt, so 'restarted' # won't get set log.debug("restarting rsyslog"
) util.subp(['service', 'rsyslog', 'restart']) restarted = True except Exception as e: elst.append(("restart", str(e))) if restarted: # this only needs to run if we *actually* restarted # syslog above. cloudinit.logging_set_fro
m_cfg_file() log = logging.getLogger() log.debug("rsyslog configured %s" % files) for e in elst: log.warn("rsyslog error: %s\n" % ':'.join(e)) return
#!/usr/bin/env python # -*- coding: utf-8 -*- # # ${FILENAME} # # Copyright 2015 Anupam Mitra <anupam.mitra@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc
., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # import numpy as np import pywt def wavelet_decomp (s, wavelet='haar', levels=4): """ Wavelet decomposition based features Parameters ---------- s: Signal segments from which to compute first difference with lag. The shape should be (n_signals, n_samples) wavelet: str Wavelet bas
is to use for decomposition levels: int Level of wavelet decomposition """ n_features = np.shape(s)[1] n_spikes = np.shape(s)[0] features = np.empty((n_spikes, n_features)) for j in range(n_spikes): wd = pywt.wavedec(s[j, :], wavelet=wavelet, level=levels) features[j,:] = np.hstack(wd) return features
import pytest from django.urls import reverse from adhocracy4.modules.models import Module from adhocracy4.test.helpers import redirect_target @pytest.mark.django_db def test_module_delete_perms(client, phase, user, user2): module = phase.module module_delete_url = reverse('a4dashboard:module-delete', kwargs={ 'slug': module.slug}) response = client.post(module_delete_url) assert redirect_target(response) == 'account_login' client.login(username=user, password='password') response = client.post(module_delete_url) assert response.status_code == 403 organisation = module.project.organisation organisation.initiators.add(user2) client.login(username=user2, password='password') response = client.post(module_delete_url) assert redirect_target(response) == 'project-edit' @pytest.mark.django_db def test_module_delete(client, phase, user2): module = phase.module module.is_draft = False module.save() organisation = module.project.organisation organisation.initiators.add(user2) assert Module.objects.all().count() == 1 module_delete_url = reverse('a4dashboard:module-delete', kwargs={ 'slug': module.slug}) # deleting published modules has no effect client.login(username=user2, password='password') response = client.post(module_delete_url) assert response.status_code == 302 assert Module.objects.all().count() == 1 # unpublish module module.is_draft = True module.save() client.login(username=user2, password='password') response = client.post(module_delete_url) assert redirect_target(response) == 'project-edit' assert Module.objects.all().count() == 0 @pytest.mark.django_db def test_module_delete_redirect(client, module_factory, user2): module = module_factory(is_draft=True) organisation = module.project.organisation organisation.initiators.add(user2) module_2 = module
_factory(project=module.project, i
s_draft=True) module_3 = module_factory(project=module.project, is_draft=True) module_delete_url = reverse('a4dashboard:module-delete', kwargs={ 'slug': module.slug}) module_delete_url_2 = reverse('a4dashboard:module-delete', kwargs={ 'slug': module_2.slug}) module_delete_url_3 = reverse('a4dashboard:module-delete', kwargs={ 'slug': module_3.slug}) client.login(username=user2, password='password') referrer = reverse('a4dashboard:dashboard-information-edit', kwargs={ 'project_slug': module.project.slug}) response = client.post(module_delete_url, {'referrer': referrer}) assert response.status_code == 302 assert response['location'] == referrer response = client.post(module_delete_url_2, {}, HTTP_REFERER=referrer) assert response.status_code == 302 assert response['location'] == referrer response = client.post(module_delete_url_3, {}) assert redirect_target(response) == 'project-edit' @pytest.mark.django_db def test_module_unsuccessful_delete_redirect(client, module_factory, user2): module = module_factory(is_draft=False) organisation = module.project.organisation organisation.initiators.add(user2) module_delete_url = reverse('a4dashboard:module-delete', kwargs={ 'slug': module.slug}) client.login(username=user2, password='password') referrer = reverse('a4dashboard:dashboard-information-edit', kwargs={ 'project_slug': module.project.slug}) response = client.post(module_delete_url, {'referrer': referrer}) assert response.status_code == 302 assert response['location'] == referrer response = client.post(module_delete_url, {}, HTTP_REFERER=referrer) assert response.status_code == 302 assert response['location'] == referrer response = client.post(module_delete_url, {}) assert redirect_target(response) == 'project-edit'
# Copyright 2016 - 2018 Ternaris. # SPDX-License-Identifier: AGPL-3.0-only from pkg_resources import resource_filename import marv_node.testing from
marv_node.testing import make_dataset, run_nodes, temporary_directory from marv_robotics.detail import connections_section as node f
rom marv_store import Store class TestCase(marv_node.testing.TestCase): # TODO: Generate bags instead, but with connection info! BAGS = [ resource_filename('marv_node.testing._robotics_tests', 'data/test_0.bag'), resource_filename('marv_node.testing._robotics_tests', 'data/test_1.bag'), ] async def test_node(self): with temporary_directory() as storedir: store = Store(storedir, {}) dataset = make_dataset(self.BAGS) store.add_dataset(dataset) streams = await run_nodes(dataset, [node], store) self.assertNodeOutput(streams[0], node) # TODO: test also header
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from perf
kitbenchmarker import sample class SampleTestCase(unittest.TestCase): def testMetadataOptional(self): instance = sample.Sample(metric='Test', value=1.0, unit='Mbps') self.assertDictEqual({}, instance.metadata) def testProvidedMetadataSet(self): metadata = {'origin': 'unit test'}
instance = sample.Sample(metric='Test', value=1.0, unit='Mbps', metadata=metadata.copy()) self.assertDictEqual(metadata, instance.metadata)
op Integration Tools # Copyright (C) 2011-2012 Alessio Treglia <quadrispro@ubuntu.com> # Copyright (C) 2007-2010, Marc-Olivier Barre <marco@marcochapeau.org> # Copyright (C) 2007-2009, Nedko Arnaudov <nedko@arnaudov.name> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import dbus from .controller import LadiController name_base = 'org.jackaudio' ctrl_iface_name = name_base + '.JackControl' conf_iface_name = name_base + '.Configure' service_name = name_base + '.service' obj_path = '/org/jackaudio/Controller' def _dbus_type_to_python_type (dbus_value): if type (dbus_value) == dbus.Boolean: return bool(dbus_value) if type (dbus_value) == dbus.Int32 or type (dbus_value) == dbus.UInt32: return int(dbus_value) if type (dbus_value) == dbus.String: return str(dbus_value) if type (dbus_value) == dbus.Byte: return str (dbus_value) return dbus_value class JackController(LadiController): """Wrapper for controlling and monitoring JACK. This class provides an (almost) complete control on configured JACK servers. """ def __init__ (self): LadiController.__init__(self, dbus_type='SessionBus', service_name=service_name, obj_path=obj_path, iface_name=ctrl_iface_name) def is_started (self): return self.controller_iface.IsStarted () def name_owner_changed (name = None, old_owner = None, new_owner = None): sys.stderr.write("Name changed : %r\n" % name) sys.stderr.flush() def is_realtime (self): return self.controller_iface.IsRealtime () def get_load (self): return self.controller_iface.GetLoad () def get_xruns (self): return self.controller_iface.GetXruns () def get_sample_rate (self): return self.controller_iface.GetSampleRate () def get_latency (self): return self.controller_iface.GetLatency () def reset_xruns (self): return self.controller_iface.ResetXruns () def start (self): self.controller_iface.StartServer () def stop (self): self.controller_iface.StopServer () def kill (self): self.controller_iface.Exit () class JackConfigParameter(object): """Wrapper for JACK's parameters. This class provides an (almost) complete control to JACK's configuration parameters. """ def __init__(self, jack, path): self._jack = jack self.path = path self.name = path[-1:] def get_name(self): return self.name def get_type(self): return self._jack.get_param_type(self.path) def get_value(self): return self._jack.get_param_value(self.path) def set_value(self, value): self._jack.set_param_value(self.path, value) def reset_value(self): self._jack.reset_param_value(self.path) def get_short_description(self): return self._jack.get_param_short_description(self.path) def get_long_description(self): descr = self._jack.get_param_long_description(self.path) if not descr: descr = self.get_short_description() return descr def has_range(self): return self._jack.param_has_range(self.path) def get_range(self): return self._jack.param_get_range(self.path) def has_enum(self): return self._jack.param_has_enum(self.path) def is_strict_enum(self): return self._jack.param_is_strict_enum(self.path) def is_fake_values_enum(self): return self._jack.param_is_fake_value(self.path) def get_enum_values(self): return self._jack.param_get_enum_values(self.path) class JackConfigProxy(LadiController): """Wrapper for JACK's configuration. This controller provides access to the JACK's whole configuration. """ def __init__ (self): LadiController.__init__(self,
dbus_type='SessionBus', service_name=service_name, obj_path=obj_path, iface_name=conf_iface_name) def name_owner_changed (name = None, old_owner = None, new_owner = None): print "Name changed : %r" % name def get_selected_driver (s
elf): isset, default, value = self.controller_iface.GetParameterValue (['engine', 'driver']) return value def read_container (self, path): is_leaf, children = self.controller_iface.ReadContainer (path) if is_leaf: return [] return children def get_param_names (self, path): is_leaf, children = self.controller_iface.ReadContainer (path) if not is_leaf: return [] return children def get_param_short_description (self, path): type_char, name, short_descr, long_descr = self.controller_iface.GetParameterInfo (path) return short_descr def get_param_long_description (self, path): type_char, name, short_descr, long_descr = self.controller_iface.GetParameterInfo (path) return long_descr def get_param_type (self, path): type_char, name, short_descr, long_descr = self.controller_iface.GetParameterInfo (path) return str (type_char) def get_param_value (self, path): isset, default, value = self.controller_iface.GetParameterValue (path) isset = bool (isset) default = _dbus_type_to_python_type (default) value = _dbus_type_to_python_type (value) return isset, default, value def set_param_value (self, path, value): typestr = self.get_param_type (path) if typestr == "b": value = dbus.Boolean (value) elif typestr == "y": value = dbus.Byte (value) elif typestr == "i": value = dbus.Int32 (value) elif typestr == "u": value = dbus.UInt32 (value) self.controller_iface.SetParameterValue (path, value) def reset_param_value (self, path): self.controller_iface.ResetParameterValue (path) def param_has_range (self, path): is_range, is_strict, is_fake_value, values = self.controller_iface.GetParameterConstraint (path) return bool (is_range) def param_get_range (self, path): is_range, is_strict, is_fake_value, values = self.controller_iface.GetParameterConstraint (path) if not is_range or len (values) != 2: return -1, -1 return _dbus_type_to_python_type (values[0][0]), _dbus_type_to_python_type (values[1][0]) def param_has_enum (self, path): is_range, is_strict, is_fake_value, values = self.controller_iface.GetParameterConstraint (path) return not is_range and len (values) != 0 def param_is_strict_enum (self, path): is_range, is_strict, is_fake_value, values = self.controller_iface.GetParameterConstraint (path) return is_strict def param_is_fake_value (self, path): is_range, is_strict, is_fake_value, values = self.controller_iface.GetParameterConstraint (path) return is_fake_value def param_get_enum_values (self, path): is_range, is_strict, is_fake_value, dbus_values = self.controller_iface.GetParameterConstraint (path) values = [] if not is_range and len (dbus_values) != 0: for dbus_value in dbus_values: values.append ([_dbus_type_to_python_type (dbus_value[0]), _dbus_type_to_python_type (dbus_value
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # vim: ai ts=4 sts=4 et sw=4 nu """ supervisor event listenner for generic script launc
hing launches a script (passed as $1+) and communicates with supervisor """ import sys import pathlib import datetime import subprocess def to_supervisor(text): # only eventlistener protocol messages may be sent to stdout sys.stdout.write(text) sys.stdout.flush() def to_log(text): sys.stder
r.write(text) sys.stderr.flush() def main(interval, command, args=[]): last_run = None while True: # transition from ACKNOWLEDGED to READY to_supervisor("READY\n") # read header line and print it to stderr line = sys.stdin.readline() # read event payload and print it to stderr headers = dict([x.split(":") for x in line.split()]) sys.stdin.read(int(headers["len"])) now = datetime.datetime.now() if last_run is None or last_run <= now - datetime.timedelta(seconds=interval): last_run = now script = subprocess.run( [command] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, ) to_log(script.stdout) # if script.returncode != 0: # to_supervisor("RESULT 4\nFAIL") # transition from READY to ACKNOWLEDGED to_supervisor("RESULT 2\nOK") if __name__ == "__main__": if len(sys.argv) < 3: to_log("missing interval and/or script, crashing") sys.exit(1) try: interval = int(sys.argv[1]) except Exception: to_log(f"incorrect interval `{sys.argv[1]}, crashing") sys.exit(1) args = sys.argv[2:] if not pathlib.Path(args[0]).exists(): to_log("script path `{cmd}` doesnt exists. crashing") sys.exit(1) main(interval, args[0], args[1:])
ecifies Sequence() for usage on other backends. supports_native_enum Indicates if the dialect supports a native ENUM construct. This will prevent types.Enum from generating a CHECK constraint when that type is used. supports_native_boolean Indicates if the dialect supports a native boolean construct. This will prevent types.Boolean from generating a CHECK constraint when that type is used. """ _has_events = False def create_connect_args(self, url): """Build DB-API compatible connection argu
ments. Give
n a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple consisting of a `*args`/`**kwargs` suitable to send directly to the dbapi's connect function. """ raise NotImplementedError() @classmethod def type_descriptor(cls, typeobj): """Transform a generic type to a dialect-specific type. Dialect classes will usually use the :func:`.types.adapt_type` function in the types module to accomplish this. The returned result is cached *per dialect class* so can contain no dialect-instance state. """ raise NotImplementedError() def initialize(self, connection): """Called during strategized creation of the dialect with a connection. Allows dialects to configure options based on server version info or other properties. The connection passed here is a SQLAlchemy Connection object, with full capabilities. The initialize() method of the base dialect should be called via super(). """ pass def reflecttable( self, connection, table, include_columns, exclude_columns): """Load table description from the database. Given a :class:`.Connection` and a :class:`~sqlalchemy.schema.Table` object, reflect its columns and properties from the database. The implementation of this method is provided by :meth:`.DefaultDialect.reflecttable`, which makes use of :class:`.Inspector` to retrieve column information. Dialects should **not** seek to implement this method, and should instead implement individual schema inspection operations such as :meth:`.Dialect.get_columns`, :meth:`.Dialect.get_pk_constraint`, etc. """ raise NotImplementedError() def get_columns(self, connection, table_name, schema=None, **kw): """Return information about columns in `table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return column information as a list of dictionaries with these keys: name the column's name type [sqlalchemy.types#TypeEngine] nullable boolean default the column's default value autoincrement boolean sequence a dictionary of the form {'name' : str, 'start' :int, 'increment': int} Additional column attributes may be present. """ raise NotImplementedError() def get_primary_keys(self, connection, table_name, schema=None, **kw): """Return information about primary keys in `table_name`. Deprecated. This method is only called by the default implementation of :meth:`.Dialect.get_pk_constraint`. Dialects should instead implement the :meth:`.Dialect.get_pk_constraint` method directly. """ raise NotImplementedError() def get_pk_constraint(self, connection, table_name, schema=None, **kw): """Return information about the primary key constraint on table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return primary key information as a dictionary with these keys: constrained_columns a list of column names that make up the primary key name optional name of the primary key constraint. """ raise NotImplementedError() def get_foreign_keys(self, connection, table_name, schema=None, **kw): """Return information about foreign_keys in `table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return foreign key information as a list of dicts with these keys: name the constraint's name constrained_columns a list of column names that make up the foreign key referred_schema the name of the referred schema referred_table the name of the referred table referred_columns a list of column names in the referred table that correspond to constrained_columns """ raise NotImplementedError() def get_table_names(self, connection, schema=None, **kw): """Return a list of table names for `schema`.""" raise NotImplementedError def get_view_names(self, connection, schema=None, **kw): """Return a list of all view names available in the database. schema: Optional, retrieve names from a non-default schema. """ raise NotImplementedError() def get_view_definition(self, connection, view_name, schema=None, **kw): """Return view definition. Given a :class:`.Connection`, a string `view_name`, and an optional string `schema`, return the view definition. """ raise NotImplementedError() def get_indexes(self, connection, table_name, schema=None, **kw): """Return information about indexes in `table_name`. Given a :class:`.Connection`, a string `table_name` and an optional string `schema`, return index information as a list of dictionaries with these keys: name the index's name column_names list of column names in order unique boolean """ raise NotImplementedError() def get_unique_constraints( self, connection, table_name, schema=None, **kw): """Return information about unique constraints in `table_name`. Given a string `table_name` and an optional string `schema`, return unique constraint information as a list of dicts with these keys: name the unique constraint's name column_names list of column names in order \**kw other options passed to the dialect's get_unique_constraints() method. .. versionadded:: 0.9.0 """ raise NotImplementedError() def normalize_name(self, name): """convert the given name to lowercase if it is detected as case insensitive. this method is only used if the dialect defines requires_name_normalize=True. """ raise NotImplementedError() def denormalize_name(self, name): """convert the given name to a case insensitive identifier for the backend if it is an all-lowercase name. this method is only used if the dialect defines requires_name_normalize=True. """ raise NotImplementedError() def has_table(self, connection, table_name, schema=None): """Check the existence of a particular table in the database. Given a :class:`.Connection` object and a string `table_name`, return True if the given table (possibly within the specified `schema`) exists in the database, False otherwise. """ raise NotImplementedError() def has_sequence(self, connection, sequence_name, schema=None): """Check the existence of a particular sequence in the database. Gi
#!/usr/bin/env python import threading import dns from dnsdisttests import DNSDistTest class TestTrailing(DNSDistTest): # this test suite uses a different responder port # because, contrary to the other ones, its # responders allow trailing data and we don't want # to mix things up. _testServerPort = 5360 _config_template = """ newServer{address="127.0.0.1:%s"} addAction(AndRule({QTypeRule(dnsdist.AAAA), TrailingDataRule()}), DropAction()) """ @classmethod def startResponders(cls): print("Launching responders..") cls._UDPResponder = threading.Thread(name='UDP Responder', target=cls.UDPResponder, args=[cls._testServerPort, True]) cls._UDPResponder.setDaemon(True) cls._UDPResponder.start() cls._TCPResponder = threading.Thread(name='TCP Responder', target=cls.TCPResponder, args=[cls._testServerPort, True]) cls._TCPResponder.setDaemon(True) cls._TCPResponder.start() def testTrailingAllowed(self): """ Trailing: Allowed """ name = 'allowed.trailing.tests.powerdns.com.' query = dns.message.make_query(name, 'A', 'IN') response = dns.message.make_response(query) rrset = dns.rrset.from_text(name, 3600, dns.rdataclass.IN,
dns.rdatatype.A, '127.0.0.1') response.answer.append(rrset) raw = query.to_wire() raw = raw + 'A'* 20 (receivedQuery, receivedResponse) = self.sendUDPQuery(raw, response, rawQuery=True) self.assertTrue(receivedQuery) self.assertTrue(receivedResponse) receivedQuery.id = query.id self.assertEquals(query, receivedQuery)
self.assertEquals(response, receivedResponse) (receivedQuery, receivedResponse) = self.sendTCPQuery(raw, response, rawQuery=True) self.assertTrue(receivedQuery) self.assertTrue(receivedResponse) receivedQuery.id = query.id self.assertEquals(query, receivedQuery) self.assertEquals(response, receivedResponse) def testTrailingDropped(self): """ Trailing: dropped """ name = 'dropped.trailing.tests.powerdns.com.' query = dns.message.make_query(name, 'AAAA', 'IN') raw = query.to_wire() raw = raw + 'A'* 20 (_, receivedResponse) = self.sendUDPQuery(raw, response=None, rawQuery=True) self.assertEquals(receivedResponse, None) (_, receivedResponse) = self.sendTCPQuery(raw, response=None, rawQuery=True) self.assertEquals(receivedResponse, None)
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from conveyor.conveyorheat.engine.resources import signal_responder from conveyor.conveyorheat.engine.resources import wait_condition as wc_base from conveyor.conveyorheat.engine import support class WaitConditionHandle(wc_base.BaseWaitConditionHandle): """AWS WaitConditionHandle resource. the main point of this class is to : have no dependencies (so the instance can reference it) generate a unique url (to be returned in the reference) then the cfn-signal will use this url to post to and WaitCondition will poll it to see if has been written to. """ support_status = support.SupportStatus(version='2014.1') METADATA_KEYS = ( DATA, REASON, STATUS, UNIQUE_ID ) = ( '
Data', 'Reason', 'Status', 'UniqueId' ) def get_reference_id(self):
if self.resource_id: wc = signal_responder.WAITCONDITION return six.text_type(self._get_ec2_signed_url(signal_type=wc)) else: return six.text_type(self.name) def metadata_update(self, new_metadata=None): """DEPRECATED. Should use handle_signal instead.""" self.handle_signal(details=new_metadata) def handle_signal(self, details=None): """Validate and update the resource metadata. metadata must use the following format: { "Status" : "Status (must be SUCCESS or FAILURE)", "UniqueId" : "Some ID, should be unique for Count>1", "Data" : "Arbitrary Data", "Reason" : "Reason String" } """ if details is None: return return super(WaitConditionHandle, self).handle_signal(details) def resource_mapping(): return { 'AWS::CloudFormation::WaitConditionHandle': WaitConditionHandle, }
""" extend TiddlyWiki serialization to optionally use beta or externalized releases and add the UniversalBackstage. activated via "twrelease=beta" URL parameter or ServerSettings, see build_config_var """ import logging from tiddlyweb.util import read_utf8_file from tiddlywebwiki.serialization import Serialization as WikiSerialization from tiddlywebplugins.tiddlyspace.web import (determine_host, determine_space, determine_space_recipe) LOGGER = logging.getLogger(__name__) def build_config_var(beta=False, external=False): """ Create the configuration key which will be used to locate the base tiddlywiki file. """ base = 'base_tiddlywiki' if external: base += '_external' if beta: base += '_beta' return base class Serialization(WikiSerialization): """ Subclass of the standard TiddlyWiki serialization to allow choosing beta or externalized versions of the base empty.html in which the tiddlers will be servered. Also, if the TiddlyWiki is not being downloaded, add the UniversalBackstage by injecting a script tag. """ def list_tiddlers(self, tiddlers): """ Override tiddlers.link so the location in noscript is to /tiddlers. """ http_host, _ = determine_host(self.environ) space_name = determine_space(self.environ, http_host) if space_name: recipe_name = determine_space_recipe(self.environ, space_name) if '/recipes/%s' % recipe_name in tiddlers.link: tiddlers.link = '/tiddlers' return WikiSerialization.list_tiddlers(self, tiddlers) def _get_wiki(self): beta = external = False release = self.environ.get('tiddlyweb.query', {}).get( 'twrelease', [False])[0] externalize = self.environ.get('tiddlyweb.query', {}).get( 'external', [False])[0] download = self.environ.get('tiddlyweb.query', {}).get( 'download', [False])[0] if release == 'beta': beta = True if externalize: external = True # If somebody is downloading, don't allow them to # externalize. if
download: external = False wiki = None if beta or external: config_var = build_config_var(beta, external) LOGGER.debug('looking for %s', config_var) base_wiki_file = self.environ.get('tiddlyweb.config', {}).get(config_var, '') if base_wiki_file: LOGGER.debug('using %s as base_tiddlywiki', base_wiki_file) wiki = read_utf8_file(base_wiki_file) if not wiki:
wiki = WikiSerialization._get_wiki(self) tag = "<!--POST-SCRIPT-START-->" if not download: wiki = wiki.replace(tag, '<script type="text/javascript" ' 'src="/bags/common/tiddlers/backstage.js"></script> %s' % tag) return wiki
#------------------------------------------------------------------------------- # Name: homework 6 # Author: kirbs # Created: 11/9/2013 #------------------------------------------------------------------------------- #!/usr/bin/env python import urllib import numpy # ################################################### # ##################Question 2-6 Helpers ############# # ################################################### def in_dta(): fpin = urllib.urlopen("http://work.caltech.edu/data/in.dta") return ([map(float,(line.strip('\n').split('\r')[0].split())) for line in fpin]) def out_dta(): fpin = urllib.urlopen("http://work.caltech.edu/data/out.dta") return ([map(float,(line.strip('\n').split('\r')[0].split())) for line in fpin]) def transform(point): return [1, point[0], point[1], point[0]**2, point[1]**2, point[0]* point[1],abs(point[0] - point[1]), abs(point[0] + point[1]), point[2]] def transformPoints(points): transformedPoints = [] for point in points: transformedPoints.append(transform(point)) return transformedPoints """ Calculate weights using linear regression. Return list of weights. """ def linearRegression(samplePoints): X = [] y = [] y_location = len(samplePoints[0]) -1 # y's location is assumed to be the last element in the list # Construct X space and split y valu
es out for point in samplePoints: X.append(numpy.array(point[:y_location])) y.append(point[y_location]) X = numpy.array(X) y = numpy.array(y) X_inverse = numpy.linalg.pinv(X) return numpy.dot(X_inverse, y) def linRegWithRegularization(samplePoints, l):
X = [] y = [] y_location = len(samplePoints[0]) -1 # y's location is assumed to be the last element in the list for point in samplePoints: X.append(numpy.array(point[:y_location])) y.append(point[y_location]) weights = linearRegression(samplePoints) X = numpy.array(X) X_inverse = numpy.linalg.pinv(X + numpy.array(l/len(samplePoints)*numpy.dot(weights, weights))) return numpy.dot(X_inverse, y) """ Returns E_in error percentage for given weights and sample points. Assumes samplePoints is a list of lists, and the last element in given list is the y value. """ def Ein(weights, samplePoints): errorCount = 0 y_location = len(samplePoints[0]) - 1 for point in samplePoints: if numpy.sign(numpy.dot(weights,point[:y_location])) != point[y_location]: errorCount += 1 return errorCount/float(len(samplePoints)) # ################################################################## """ Print in and out of sample errors. """ def q2(): points = in_dta() testPoints = out_dta() transformedPoints = transformPoints(points) transformedTestPoints = transformPoints(testPoints) weights = linearRegression(transformedPoints) print "E_in: {}, E_out: {}".format(Ein(weights, transformedPoints), Ein(weights, transformedTestPoints)) def q3(l): points = in_dta() testPoints = out_dta() transformedPoints = transformPoints(points) transformedTestPoints = transformPoints(testPoints) weights = linRegWithRegularization(transformedPoints, l) print "E_in: {}, E_out: {}".format(Ein(weights, transformedPoints), Ein(weights, transformedTestPoints)) # Question 3 #q3(10**-3) # Question 4 #q3(10**3) # Question 5 def q5(start, end): points = in_dta() testPoints = out_dta() transformedPoints = transformPoints(points) transformedTestPoints = transformPoints(testPoints) smallest_k = -2 for i in range(start, end + 1): e_out = Ein(linRegWithRegularization(transformedPoints, 10**i), transformedTestPoints) print "k={}, E_out={}".format(i, e_out) # Question 5 #q5(-2, 2) # Question 6 #q5(-20, 20)
ee <http://www.gnu.org/licenses/>. # import iris tests first so that some things can be initialised before importing anything else import iris.tests as tests import os import warnings import datetime import gribapi import numpy as np import iris import iris.cube import iris.coord_systems import iris.coords @iris.tests.skip_data class TestLoadSave(tests.IrisTest): # load and save grib def setUp(self): iris.fileformats.grib.hindcast_workaround = True def tearDown(self): iris.fileformats.grib.hindcast_workaround = False def save_and_compare(self, source_grib, reference_text): """Load and save grib data, generate diffs, compare with expected diffs.""" # load and save from Iris cubes = iris.load(source_grib) saved_grib = iris.util.create_temp_filename(suffix='.grib2') iris.save(cubes, saved_grib) # missing reference? (the expected diffs between source_grib and saved_grib) if not os.path.exists(reference_text): warnings.warn("Creating grib compare reference %s" % reference_text) os.system("grib_compare %s %s > %s" % (source_grib, saved_grib, reference_text)) # generate and compar
e diffs compare_text = iris.util.create_temp_filename(suffix='.grib_compare.txt') os.system("grib_compare %s %s > %s" % (source_grib, saved_grib, compare_text)) self.assertTextFile(compare_text, reference_text, "grib_compare output") os.remove(saved_grib) os.remove(compare_text) def test_latlon_forecast_pl
ev(self): source_grib = tests.get_data_path(("GRIB", "uk_t", "uk_t.grib2")) reference_text = tests.get_result_path(("grib_save", "latlon_forecast_plev.grib_compare.txt")) self.save_and_compare(source_grib, reference_text) def test_rotated_latlon(self): source_grib = tests.get_data_path(("GRIB", "rotated_nae_t", "sensible_pole.grib2")) reference_text = tests.get_result_path(("grib_save", "rotated_latlon.grib_compare.txt")) # TODO: Investigate small change in test result: # long [iDirectionIncrement]: [109994] != [109993] # Consider the change in dx_dy() to "InDegrees" too. self.save_and_compare(source_grib, reference_text) # XXX Addressed in #1118 pending #1039 for hybrid levels # def test_hybrid_pressure_levels(self): # source_grib = tests.get_data_path(("GRIB", "ecmwf_standard", "t0.grib2")) # reference_text = tests.get_result_path(("grib_save", "hybrid_pressure.grib_compare.txt")) # self.save_and_compare(source_grib, reference_text) def test_time_mean(self): # This test for time-mean fields also tests negative forecast time. # Because the results depend on the presence of our api patch, # we currently have results for both a patched and unpatched api. # If the api ever allows -ve ft, we should revert to a single result. source_grib = tests.get_data_path(("GRIB", "time_processed", "time_bound.grib2")) reference_text = tests.get_result_path(("grib_save", "time_mean.grib_compare.txt")) # TODO: It's not ideal to have grib patch awareness here... import unittest try: self.save_and_compare(source_grib, reference_text) except unittest.TestCase.failureException: reference_text = tests.get_result_path(( "grib_save", "time_mean.grib_compare.FT_PATCH.txt")) self.save_and_compare(source_grib, reference_text) @iris.tests.skip_data class TestCubeSave(tests.IrisTest): # save fabricated cubes def _load_basic(self): path = tests.get_data_path(("GRIB", "uk_t", "uk_t.grib2")) return iris.load(path)[0] def test_params(self): # TODO pass def test_originating_centre(self): # TODO pass def test_irregular(self): cube = self._load_basic() lat_coord = cube.coord("latitude") cube.remove_coord("latitude") new_lats = np.append(lat_coord.points[:-1], lat_coord.points[0]) # Irregular cube.add_aux_coord(iris.coords.AuxCoord(new_lats, "latitude", units="degrees", coord_system=lat_coord.coord_system), 0) saved_grib = iris.util.create_temp_filename(suffix='.grib2') self.assertRaises(iris.exceptions.TranslationError, iris.save, cube, saved_grib) os.remove(saved_grib) def test_non_latlon(self): cube = self._load_basic() cube.coord(dimensions=[0]).coord_system = None saved_grib = iris.util.create_temp_filename(suffix='.grib2') self.assertRaises(iris.exceptions.TranslationError, iris.save, cube, saved_grib) os.remove(saved_grib) def test_forecast_period(self): # unhandled unit cube = self._load_basic() cube.coord("forecast_period").units = iris.unit.Unit("years") saved_grib = iris.util.create_temp_filename(suffix='.grib2') self.assertRaises(iris.exceptions.TranslationError, iris.save, cube, saved_grib) os.remove(saved_grib) def test_unhandled_vertical(self): # unhandled level type cube = self._load_basic() # Adjust the 'pressure' coord to make it into an "unrecognised Z coord" p_coord = cube.coord("pressure") p_coord.rename("not the messiah") p_coord.units = 'K' p_coord.attributes['positive'] = 'up' saved_grib = iris.util.create_temp_filename(suffix='.grib2') with self.assertRaises(iris.exceptions.TranslationError): iris.save(cube, saved_grib) os.remove(saved_grib) def test_scalar_int32_pressure(self): # Make sure we can save a scalar int32 coordinate with unit conversion. cube = self._load_basic() cube.coord("pressure").points = np.array([200], dtype=np.int32) cube.coord("pressure").units = "hPa" with self.temp_filename(".grib2") as testfile: iris.save(cube, testfile) def test_bounded_level(self): cube = iris.load_cube(tests.get_data_path(("GRIB", "uk_t", "uk_t.grib2"))) # Changing pressure to altitude due to grib api bug: # https://github.com/SciTools/iris/pull/715#discussion_r5901538 cube.remove_coord("pressure") cube.add_aux_coord(iris.coords.AuxCoord( 1030.0, long_name='altitude', units='m', bounds=np.array([111.0, 1949.0]))) with self.temp_filename(".grib2") as testfile: iris.save(cube, testfile) with open(testfile, "rb") as saved_file: g = gribapi.grib_new_from_file(saved_file) self.assertEqual( gribapi.grib_get_double(g, "scaledValueOfFirstFixedSurface"), 111.0) self.assertEqual( gribapi.grib_get_double(g, "scaledValueOfSecondFixedSurface"), 1949.0) class TestHandmade(tests.IrisTest): def _lat_lon_cube_no_time(self): """Returns a cube with a latitude and longitude suitable for testing saving to PP/NetCDF etc.""" cube = iris.cube.Cube(np.arange(12, dtype=np.int32).reshape((3, 4))) cs = iris.coord_systems.GeogCS(6371229) cube.add_dim_coord(iris.coords.DimCoord(np.arange(4) * 90 + -180, 'longitude', units='degrees', coord_system=cs), 1) cube.add_dim_coord(iris.coords.DimCoord(np.arange(3) * 45 + -90, 'latitude', units='degrees', coord_system=cs), 0) return cube def _cube_time_no_forecast(self): cube = self._lat_lon_cube_no_time() unit = iris.unit.Unit('hours since epoch', calendar=iris.unit.CALENDAR_GREGORIAN) dt = datetime.datetime(2010, 12, 31, 12, 0) cube.add_aux_coord(iris.coords.AuxCoord(np.array([unit.date2num(dt)], dtype=np.float64), 'time', units=unit)) retur
""" This module loads all the
classes from the VTK Charts library into its namespace. This is an optional module.""" import os if os.name == 'posix': from libvtkChartsPython import * e
lse: from vtkChartsPython import *
#! /usr/bin/pyt
hon f = open("birds.txt", "r") data = f.read() f.close() lines = data.split("\n") print("Wrong: The number of lines is", len(lines)) for l in lines: if not l: # Can also do this: if len(l)
== 0 lines.remove(l) print("Right: The number of lines is", len(lines))
#!/usr/bin/python # -*- coding: utf-
8 -*- libs_python=["functions"] libs_php=[] #aqui va el nombre de las bases de datos dbs=["main"] #variable para paso de parametros p={} consola=True host="localhost" c
onsola_port=9999
"""A basic implementation of a Neural Network by
following the tutorial by Andrew Trask http://iamtrask.github.io/2015/07/12/basic-python-network/ """ import numpy as np # sigmoid function def nonlin(x, deriv=False): if deriv==True: return x * (1-x) return 1 / (1 + np.exp(-x)) # input dataset x = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) # output dataset y = np.array([[0, 0, 1, 1]]).T # seed random numbers to make calculation # deterministic (good practice) np.ra
ndom.seed(1) # initialize weights randomly with mean 0 syn0 = 2*np.random.random((3, 1)) - 1 for i in xrange(10000): # forward propagation l0 = x l1 = nonlin(np.dot(l0, syn0)) print l1 break # how much did we miss l1_error = y - l1 # multiply how much we missed by the # slope of the sigmoid at the values in l1 l1_delta = l1_error * nonlin(l1, True) # update weights syn0 += np.dot(l0.T, l1_delta) print 'Output after training:' print l1
''' Created on 24 Feb 2015 @author: Ronny Andersson (ronny@andersson.tk) @copyright: (c) 2015 Ronny Andersson @license: MIT ''' # Standard library import unittest # Third party import nose # Internal from zignal.music import scales class Test_midi_scales(unittest.TestCase): # Benson, DJ. (2006). Music: A Mathematical Offering. Cambridge University Press. # http://homepages.abdn.ac.uk/mth192/pages/html/maths-music.html def test_freq2key_quantise(self): # 70 466.164 # 69 440.00 # 68 415.305 self.assertAlmostEqual(scales.midi_freq2key(416.4, quantise=True), 68, places=7) self.assertAlmostEqual(scales.midi_freq2key(438.0, quantise=True), 69, places=7) self.assertAlmostEqual(scales.midi_freq2key(441.0, quantise=True), 69, places=7) self.assertAlmostEqual(scales.midi_freq2key(442.0, quantise=True), 69, places=7) self.assertAlmostEqual(scales.midi_freq2key(452.1, quantise=True), 69, places=7) self.assertAlmostEqual(scales.midi_freq2key(453.1, quantise=True), 70, places=7) self.assertAlmostEqual(scales.midi_freq2key(460.0, quantise=True), 70, places=7) self.assertAlmostEqual(scales.midi_freq2key(470.0, quantise=True), 70, places=7) def test_key2freq(self): self.assertAlmostEqual(scales.midi_key2freq(69), 440.0, places=7) self.assertAlmostEqual(scales.midi_key2freq(81), 880.0, places=7) self.assertAlmostEqual(scales.midi_key2freq(21), 27.5, places=7) self.assertAlmostEqual(scales.midi_key2freq(43), 97.9989, places=4) def test_freq2key(self): self.assertAlmostEqual(scales.midi_freq2key(440), 69.0, places=7) self.assertAlmostEqual(scales.midi_freq2key(880), 81.0, places=7) def test_key2freq_tuning(self): self.assertAlmostEqual(scales.midi_key2freq(69, tuning=450), 450.0, places=7) self.assertAlmostEqual(scales.midi_key2freq(81, tuning=450), 900.0, places=7) self.assertAlmostEqual(scales.midi_key2freq(21, tuning=400), 25.0, places=7) def test_freq2key_tuning(self): self.assertAlmostEqual(scales.midi_freq2key(450, tuning=450), 69.0, places=7) self.assertAlmostEqual(scales.midi_freq2key(900, tuning=450), 81.0, places=7) def test_back2back_key(self): self.assertAlmostEqual( scales.midi_key2freq(scales.midi_freq2key(1234)), 1234, places=7) self.assertAlmostEqual( scales.midi_key2freq(scales.midi_freq2key(45.67)), 45.67, places=7) def test_back2back_freq(self): self.assertAlmostEqual( scales.midi_freq2key(scales.midi_key2freq(76.543)), 76.543, places=7) self.assertAlmostEqual( scales.midi_freq2key(scales.midi_key2freq(124)), 124, places=7) class Test_piano_note_to_freq(unittest.TestCase): def test_octaves(self): self.assertAlmostEqual(scales.piano_note2freq('A2'), 110.0, places=7) self.assertAlmostEqual(scales.piano_note2freq('A3'), 220.0, places=7) self.assertAlmostEqual(scales.piano_note2freq('A4'), 440.0, places=7) self.assertAlmostEqual(scales.piano_note2freq('A5'), 880.0, places=7) self.assertAlmostEqual(scales.piano_note2freq('A6'), 1760.0, places=7) def test_values(self): self.assertAlmostEqual(scales.piano_note2freq('C6'), 1046.50, places=2) self.assertAlmostEqual(scales.piano_note2freq('D1'), 36.7081, places=4) class Test_piano_freq_to_note(unittest.TestCase): def test_values(self): self.assertEqual(scales.piano_freq2note(1046.50), 'C6') self.assertEqual(scales.piano_freq2note(36.7051), 'D1') self.assertEqual(scales.piano_freq2note(440), 'A4') def test_quantise(self): self.assertEqual(scales.piano_freq2note(435.00), 'A4') self.assertEqual(scales.piano_freq2note(439.00), 'A4') self.assertEqual(scales.piano_freq2note(440.00), 'A4') self.assertEqual(scales.piano_freq2note(441.00), 'A4') self.assertEqual(scales.piano_freq2note(447.00), 'A4') class Test_piano(unittest.TestCase): def test_back2back_key(self): self.assertAlmostEqual( scales.piano_key2freq(scales.piano_freq2key(100)), 100, places=7) self.assertAlmostEqual( scales.piano_key2freq(scales.piano_freq2key(32)), 32, places=7) self.assertAlmostEqual( scales.piano_key2freq(scales.piano_freq2key(997)), 997, places=7) self.assertAlmostEqual( scales.piano_key2freq(scales.piano_freq2key(12345)), 12345, places=7) self.assertAlmostEqual( scales.piano_key2freq(scales.piano_freq2key(4.563)), 4.563, places=7) def test_back2back_freq(self): self.assertAlmostEqual( scales.piano_freq2key(scales.piano_key2freq(10)), 10, places=7) self.assertAlmostEqual( scales.piano_freq2key(scales.piano_key2freq(49)), 49, places=7) self.assertAlmostEqual( scales.piano_freq2key(scales.piano_key2freq(30.3)), 30.3, places=7) def test_back2back_freq_quantised(self): self.assertAlmostEqual(scales.piano_freq2key(scales.piano_key2freq(10.2), quantise=True), 10, places=7) self.assertAlmostEqual(scales.piano_freq2key(scales.piano_key2freq(34.678), quantise=True), 35, places=7) if __name__ == "__main__": noseargs = [__name__, "--verbosity=2", "--logging-format=%(asctime)s %(levelname)-8s: %(name)-15s " + "%(module)-15s %(funcName)-20s %(message)s"
, "--logging-level=DEBUG", __file__, ] nose.
run(argv=noseargs)
flight_file=open("flight.txt","w") flight_file.write("Hello") text=flight_file.read() flight_file.close() f
light_file.closed # Returns whether the file is clos
ed or not.
#!/usr/bin/env python # Licensed to Rackspace under one or more # contributor license agreem
ents. See the NOTICE file distributed with # this work for addit
ional information regarding copyright ownership. # Rackspace licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import uuid import random from optparse import OptionParser try: import simplejson as json except ImportError: import json try: import requests except ImportError: raise ImportError('Missing dependency requests. ' + 'Please install it using pip.') def _generate_metrics_data(tenantId, metricName): data = [] # Blueflood understands millis since epoch only now = long(time.time() * 1000) # Publish metrics with older timestamps (2 hrs before current time) startTimestamp = now - 2 * 60 * 60 * 1000 endTimestamp = startTimestamp for i in range(100): metric = {} metric['collectionTime'] = endTimestamp metric['metricName'] = metricName metric['metricValue'] = random.randint(1, 100) metric['ttlInSeconds'] = 2 * 24 * 60 * 60 # 2 days metric['unit'] = 'seconds' data.append(metric) endTimestamp += 30 * 1000 # 30s spaced metric samples return data, startTimestamp, endTimestamp def _get_metrics_url(host, port, scheme, tenantId): return scheme + '://' + host + ':' + port + '/v1.0/'\ + tenantId + '/experimental/metrics' def main(): usage = 'usage: %prog \n' + \ '--host=<host running blueflood> \n' + \ '--port=<blueflood HTTP metrics ingestion port>' parser = OptionParser(usage=usage) parser.add_option('--host', dest='host', help='Blueflood host') parser.add_option('--port', dest='port', help='HTTP ingestion port') (options, args) = parser.parse_args() if not options.host: options.host = 'localhost' if not options.port: options.port = '19000' tenantId = 'ac' + str(uuid.uuid1()) metricName = 'met.' + str(uuid.uuid1()) (payload, start, end) = _generate_metrics_data(tenantId, metricName) prettyjsondata = json.dumps(payload, indent=4, separators=(',', ': ')) print(prettyjsondata) url = _get_metrics_url(options.host, options.port, 'http', tenantId) print(url) try: print('Writing metrics for tenant: %s, metric name: %s,\ start: %d, end: %d' % (tenantId, metricName, start, end)) r = requests.post(url, data=json.dumps(payload)) print('Response from server %s' % (r)) print('To retrive the generated data with retrieve.py script, use the following command (assuming port number 20000):') print('') print('./retrieve.py --host %s --port 20000 --metric %s --tenant %s --from %s --to %s --points 100' \ % (options.host, metricName, tenantId, start - 100000000, end + 100000000)) print('') except Exception, ex: print(ex) raise Exception('Cannot ingest metrics into bluflood') main()
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-04-23 22:17 from __future__ import unicode_literals from django.db import migration
s class Migration(migrations.Migration): dependencies = [ ('backups', '0001
_initial'), ] operations = [ migrations.RemoveField( model_name='backupfirewall', name='file_location', ), migrations.RemoveField( model_name='backupfirewall', name='site_name', ), migrations.RemoveField( model_name='backuprouter', name='file_location', ), migrations.RemoveField( model_name='backuprouter', name='site_name', ), ]
nfig = await self._device.get_current_app_config( log_api_exception=False ) self._current_app = find_app_name( self._current_app_config, [APP_HOME, *self._all_apps, *self._additional_app_configs], ) if self._current_app == NO_APP_RUNNING: self._current_app = None def _get_additional_app_names(self) -> List[Dict[str, Any]]: """Return list of additional apps that were included in configuration.yaml.""" return [ additional_app["name"] for additional_app in self._additional_app_configs ] @staticmethod async def _async_send_update_options_signal
( hass: HomeAssistantType, config_entry: ConfigEntry ) -> None: "
""Send update event when Vizio config entry is updated.""" # Move this method to component level if another entity ever gets added for a single config entry. # See here: https://github.com/home-assistant/core/pull/30653#discussion_r366426121 async_dispatcher_send(hass, config_entry.entry_id, config_entry) async def _async_update_options(self, config_entry: ConfigEntry) -> None: """Update options if the update signal comes from this entity.""" self._volume_step = config_entry.options[CONF_VOLUME_STEP] # Update so that CONF_ADDITIONAL_CONFIGS gets retained for imports self._conf_apps.update(config_entry.options.get(CONF_APPS, {})) async def async_update_setting( self, setting_type: str, setting_name: str, new_value: Union[int, str] ) -> None: """Update a setting when update_setting service is called.""" await self._device.set_setting( setting_type, setting_name, new_value, ) async def async_added_to_hass(self) -> None: """Register callbacks when entity is added.""" # Register callback for when config entry is updated. self.async_on_remove( self._config_entry.add_update_listener( self._async_send_update_options_signal ) ) # Register callback for update event self.async_on_remove( async_dispatcher_connect( self.hass, self._config_entry.entry_id, self._async_update_options ) ) # Register callback for app list updates if device is a TV @callback def apps_list_update(): """Update list of all apps.""" self._all_apps = self._apps_coordinator.data self.async_write_ha_state() if self._device_class == DEVICE_CLASS_TV: self.async_on_remove( self._apps_coordinator.async_add_listener(apps_list_update) ) @property def available(self) -> bool: """Return the availabiliity of the device.""" return self._available @property def state(self) -> Optional[str]: """Return the state of the device.""" return self._state @property def name(self) -> str: """Return the name of the device.""" return self._name @property def icon(self) -> str: """Return the icon of the device.""" return self._icon @property def volume_level(self) -> Optional[float]: """Return the volume level of the device.""" return self._volume_level @property def is_volume_muted(self): """Boolean if volume is currently muted.""" return self._is_volume_muted @property def source(self) -> Optional[str]: """Return current input of the device.""" if self._current_app is not None and self._current_input in INPUT_APPS: return self._current_app return self._current_input @property def source_list(self) -> List[str]: """Return list of available inputs of the device.""" # If Smartcast app is in input list, and the app list has been retrieved, # show the combination with , otherwise just return inputs if self._available_apps: return [ *[ _input for _input in self._available_inputs if _input not in INPUT_APPS ], *self._available_apps, *[ app for app in self._get_additional_app_names() if app not in self._available_apps ], ] return self._available_inputs @property def app_id(self) -> Optional[str]: """Return the ID of the current app if it is unknown by pyvizio.""" if self._current_app_config and self.app_name == UNKNOWN_APP: return { "APP_ID": self._current_app_config.APP_ID, "NAME_SPACE": self._current_app_config.NAME_SPACE, "MESSAGE": self._current_app_config.MESSAGE, } return None @property def app_name(self) -> Optional[str]: """Return the friendly name of the current app.""" return self._current_app @property def supported_features(self) -> int: """Flag device features that are supported.""" return self._supported_commands @property def unique_id(self) -> str: """Return the unique id of the device.""" return self._config_entry.unique_id @property def device_info(self) -> Dict[str, Any]: """Return device registry information.""" return { "identifiers": {(DOMAIN, self._config_entry.unique_id)}, "name": self.name, "manufacturer": "VIZIO", "model": self._model, "sw_version": self._sw_version, } @property def device_class(self) -> str: """Return device class for entity.""" return self._device_class @property def sound_mode(self) -> Optional[str]: """Name of the current sound mode.""" return self._current_sound_mode @property def sound_mode_list(self) -> Optional[List[str]]: """List of available sound modes.""" return self._available_sound_modes async def async_select_sound_mode(self, sound_mode): """Select sound mode.""" if sound_mode in self._available_sound_modes: await self._device.set_setting( VIZIO_AUDIO_SETTINGS, VIZIO_SOUND_MODE, sound_mode ) async def async_turn_on(self) -> None: """Turn the device on.""" await self._device.pow_on() async def async_turn_off(self) -> None: """Turn the device off.""" await self._device.pow_off() async def async_mute_volume(self, mute: bool) -> None: """Mute the volume.""" if mute: await self._device.mute_on() self._is_volume_muted = True else: await self._device.mute_off() self._is_volume_muted = False async def async_media_previous_track(self) -> None: """Send previous channel command.""" await self._device.ch_down() async def async_media_next_track(self) -> None: """Send next channel command.""" await self._device.ch_up() async def async_select_source(self, source: str) -> None: """Select input source.""" if source in self._available_inputs: await self._device.set_input(source) elif source in self._get_additional_app_names(): await self._device.launch_app_config( **next( app["config"] for app in self._additional_app_configs if app["name"] == source ) ) elif source in self._available_apps: await self._device.launch_app(source, self._all_apps) async def async_volume_up(self) -> None: """Increase volume of the device.""" await self._device.vol_up(num=self._volume_step) if self._volume_level is not None: self._volume_level = min( 1.0, self._volume_level + self._volume_step / sel
# Django settings for firsty project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': '/home/chase/django/nrfirst/firsty/src/sqlite.db', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # URL prefix for admin static files -- CSS, JavaScript and images. # Make sure to use a trailing slash. # Examples: "http://foo.com/static/admin/", "/static/admin/". ADMIN_MEDIA_PREFIX = '/static/admin/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '11l4c!ngykol5x#rsqed+$bv9ln$(oefcf@ovzjhx+_56e7u6%' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader
', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'firsty.urls' TEMPLATE_
DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
e pipe angle is about 0 # 3. the pipe center is in the lower quater of the image. class Config: minSize = 0.05 maxSize = 0.4 fwSpeed = 0.8 deltaAngle = 0.192 # 0.192 radians = 11 degrees deltaDist = 100 kpAngle = 1.0 kpDist = 1.0 robCenterX = 320 robCenterY = 240 maxDistance = 320 mirror = False class Global: x = 0.0 y = 0.0 size = 0.0 orientation = 0.0 lastX = 0.0 lastY = 0.0 isSizeTooSmall = False currentPosition = Point() is_seen = True state = " " distance = 0 pipe_passed = 63.6 #============================================================================== # Constants #============================================================================== class States: NotSeenYet = 'NotSeenYet' Passed = 'Passed' IsSeen = 'IsSeen' Lost = 'Lost' class Transitions: IsSeen = 'IsSeen' Passed = 'Passed' Lost = 'Lost' Aborted = 'Aborted' class LostTypes: LostLeft = 'LostLeft' LostRight = 'LostRight' LostBottom = 'LostBottom' LostTop = 'LostTop' Lost = 'Lost' #============================================================================== # State classes #============================================================================== class AbortableState(smach.State): def abort(self): setMotorSpeed(0,0) self.service_preempt() return Transitions.Aborted class NotSeenYet(AbortableState): def __init__(self): smach.State.__init__(self, outcomes=[Transitions.IsSeen, Transitions.Aborted]) def execute(self, userdata): rospy.loginfo('Executing state '+States.NotSeenYet) while not rospy.is_shutdown() and not self.preempt_requested(): # if size between min und max.. if Config.minSize < Global.size < Config.maxSize: return Transitions.IsSeen setMotorSpeed(Config.fwSpeed, 0.0) rospy.sleep(0.2) return self.abort() class IsSeen(AbortableState): def __init__(self): smach.State.__init__(self, outcomes=[Transitions.Lost, Transitions.Passed, Transitions.Aborted], output_keys=['lost_type']) def execute(self, userdata): rospy.loginfo('Executing state '+States.IsSeen) while not rospy.is_shutdown() and not self.preempt_requested(): # if size between min und max.. if Config.minSize < Global.size < Config.maxSize: # end of pipe reached? #Coordiantes for end of pipe if Global.currentPosition.x < abs(Global.pipe_passed): setMotorSpeed(0,0) return Transitions.Passed # lost else: tmp_x = 0.0 tmp_y = 0.0 # lost if less than minSize is seen if Global.size <= Config.minSize: tmp_x = Global.x tmp_y = Global.y # lost if more than maxSize is seen elif Global.size >= Config.maxSize: tmp_x = Global.lastX tmp_y = Global.lastY tmp_x /= IMAGE_COLS tmp_y /= IMAGE_ROWS if tmp_x < 0.5: userdata.lost_type = LostTypes.LostLeft elif tmp_x >= 0.5: userdata.lost_type = LostTypes.LostRight elif tmp_y < 0.5: userdata.lost_type = LostTypes.LostTop elif tmp_y >= 0.5: userdata.lost_type = LostTypes.LostBottom else: userdata.lost_type = LostTypes.Lost return Transitions.Lost distanceY = computeIntersection(Global.x, Global.y, Global.ori
entation) #if not Config.mirror: distanceY = -distanceY #rospy.loginfo('distanceY: ' + repr(distanceY)) angularSpeed = 0.0 if math.fabs(Global.orienta
tion) > Config.deltaAngle: angularSpeed = Config.kpAngle * Global.orientation / (math.pi/2) if math.fabs(distanceY) > Config.deltaDist: angularSpeed += Config.kpDist * distanceY / Config.maxDistance #rospy.loginfo('angularSpeed: ' + repr(angularSpeed) + '\t\t ('+repr(Global.x)+','+repr(Global.y)+')') setMotorSpeed(Config.fwSpeed, angularSpeed) rospy.sleep(0.2) return self.abort() class Lost(AbortableState): def __init__(self): smach.State.__init__(self, outcomes=[Transitions.IsSeen, Transitions.Aborted], input_keys=['lost_type']) def execute(self, userdata): rospy.loginfo('Executing state '+States.Lost+' ('+userdata.lost_type +')') if userdata.lost_type == LostTypes.Lost: while not rospy.is_shutdown(): rospy.loginfo('PANIC: lost'); return self.abort() else: # linear-/angularspeed tuples speedDict = { LostTypes.LostLeft: (Config.fwSpeed, -0.2), LostTypes.LostRight: (Config.fwSpeed, 0.2), LostTypes.LostBottom:(Config.fwSpeed, 0.0), LostTypes.LostTop: (Config.fwSpeed, 0.0), } while not rospy.is_shutdown() and not self.preempt_requested(): if Config.minSize < Global.size < Config.maxSize: return Transitions.IsSeen setMotorSpeed(*speedDict[userdata.lost_type]) rospy.sleep(0.2) return self.abort() #============================================================================== # Callback functions #============================================================================== def objectCallback(msg): #rospy.loginfo('objectCallback: size='+repr(msg.size)+'\t\t orientation='+repr(msg.orientation)); Global.lastX = Global.x Global.lastY = Global.y Global.size = msg.size Global.is_seen = msg.is_seen if Config.mirror: Global.x = (IMAGE_COLS - msg.x) Global.y = (IMAGE_ROWS - msg.y) Global.orientation = -msg.orientation else: Global.x = msg.x Global.y = msg.y Global.orientation = msg.orientation distanceY = computeIntersection(Global.x, Global.y, Global.orientation) #rospy.loginfo('distY: '+repr(distanceY / Config.maxDistance)) def configCallback(config, level): rospy.loginfo('Reconfigure Request: ') Config.minSize = config['minSize'] Config.maxSize = config['maxSize'] Config.fwSpeed = config['fwSpeed'] Config.deltaAngle = config['deltaAngle'] Config.deltaDist = config['deltaDist'] Config.kpAngle = config['kpAngle'] Config.kpDist = config['kpDist'] Config.robCenterX = config['robCenterX'] Config.robCenterY = config['robCenterY'] Config.maxDistance = config['maxDistance'] Config.mirror = config['mirror'] return config def positionCallback(msg): Global.currentPosition = msg.pose.position #============================================================================== # Helper functions #============================================================================== def hasPassed(): return (math.fabs(Global.orientation) < math.pi/6.0) and (Global.y > 0.75*IMAGE_ROWS) and (0.2*IMAGE_COLS < Global.x < 0.8*IMAGE_COLS) def computeIntersection(meanX, meanY, theta): robX = Config.robCenterX robY = Config.robCenterY nzero = (math.cos(theta), math.sin(theta)) d = meanX * nzero[0] + meanY * nzero[1]; # nzero * p - d return (nzero[0] * robX) + (nzero[1] * robY) - d; # werte im bereich [-1, 1] def setMotorSpeed(lin, ang): linearVector = Vector3(x=lin,z=0) angularVector = Vector3(z=ang) twist = Twist(linear=linearVector, angular=angularVector) pub_cmd_vel.publish(twist) # #ang = ang # geschwindigkeitswerte fuer thruster berechnen #left = lin*127 + ang*127 #right = lin*127 - ang*127 # auf den wertebereich -127 bis 127 beschraenken #left = numpy.clip(left, -127, 127) #right = numpy.clip(right, -127, 127) # nachrichten an motoren publishen #pub_motor_left.publish(sollSpeed(data = left)) #pub_motor_right.publish(sollSpeed(data = right)) def timerCallback(event): pub_behaviour_info.publish(String(data = 'Orientation: '+str(Global.orientation))) #============================================================================== # main #============================================================================== if __name__ == '__main__': rospy.init_node('pipefollowing') # Config server dynamic_reconfigure.server.Server(PipeFollowingConfig, configCallback) # Subscriber rospy.Subscriber('/object', Object, objectCallback) rospy.Subscriber('position/estimate', PoseStamped, positionCallback) # Publisher #pub_cmd_vel = rospy.Publisher('/hanse/commands/cmd_vel', Twist) #pub_motor_left = rospy.Publisher('/hanse/motors/left', sollSpeed) #pub_motor_right = rospy.Publisher('/hanse/motors/right', sollSpeed) pub_cmd_vel = rospy.Publisher('/hanse/commands/cmd_vel_behaviour', Twist) pub_behaviour_info = rospy.Publisher('/hanse/behaviour/pipefollow_info', String) rospy.Timer
index = 20 bets = 25 names = ("Plain", "Cheval H", "Cheval V", "Trans", "Trans S", "Carre", "Colonne", "Simple") for bet in range(bets): col = 40 # --------------------------------------- money print(""" when %d => if bets_index > %d then fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14)); fb_a_dat_in <= x"24"; -- $ end if;""" % (index, bet, bet, col)) in
dex += 1 col += 2 # extra space for m in range(5, -1, -1): print("""when %d => if bets_index > %d then fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14)); fb_a_dat_in <= ascii_i(bets(%d).money, %d); end if;""" % (index, bet, bet,
col, bet, m)) index += 1 col += 1 if m == 5: col += 1 # extra space if m == 2: print("""when %d => if bets_index > %d then fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14)); fb_a_dat_in <= x"2e"; -- . end if;""" % (index, bet, bet, col)) index += 1 col += 1 # --------------------------------------- name col += 1 for n in range(8): # n = index of letter print("""when %d => if bets_index > %d then fb_a_addr <= std_logic_vector(to_unsigned(COLS*21 + COLS*%d + %d, 14)); case bets(%d).kind is""" % (index, bet, bet, col, bet)) for kind in range(1, 9): if n < len(names[kind-1]) and names[kind-1][n] != ' ': print(""" when %d => fb_a_dat_in <= x"%02x"; -- %c""" % (kind, ord(names[kind-1][n]), names[kind-1][n])) print(""" when others => fb_a_dat_in <= x"20"; -- space end case; fb_a_dat_in <= x"2e"; -- . end if;""") index += 1 col += 1
import pybossa.sched as sched from pybossa.forms.forms import TaskSchedulerForm from pybossa.core import project_repo from flask.ext.plugins import Plugin from functools import wraps import random __plugin__ = "RandomScheduler" __version__ = "0.0.1" SCHEDULER_NAME = 'random' def get_random_task(project_id, user_id=None, user_ip=None, n_answers=30, offset=0): """Return a random task for
the user.""" project = project_repo.get(project_id) if project and len(project.tasks) > 0: return random.choice(project.tasks) else: return None def with_random_scheduler(f): @wraps(f) def wrapper(project_id, sched, user_id=None, user_ip=None, offset=0): if sched == SCHEDU
LER_NAME: return get_random_task(project_id, user_id, user_ip, offset=offset) return f(project_id, sched, user_id=user_id, user_ip=user_ip, offset=offset) return wrapper def variants_with_random_scheduler(f): @wraps(f) def wrapper(): return f() + [(SCHEDULER_NAME, 'Random')] return wrapper class RandomScheduler(Plugin): def setup(self): sched.new_task = with_random_scheduler(sched.new_task) sched.sched_variants = variants_with_random_scheduler(sched.sched_variants) TaskSchedulerForm.update_sched_options(sched.sched_variants())
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. import base64 import mimetypes import re from contextlib import contextmanager from urllib.parse import urlsplit, urlunsplit import requests from flask import current_app, g, request from flask_webpackext import current_webpack from flask_webpackext.manifest import JinjaManifestEntry from pywebpack import Manifest from werkzeug.urls import url_parse from indico.core.config import config from indico.modules.events.layout.models.images import ImageFile from indico.web.flask.util import endpoint_for_url _css_url_pattern = r'''url\((['"]?)({}|https?:)?([^)'"]+)\1\)''' _url_has_extension_re = re.compile(r'.*\.([^/]+)$') _plugin_url_pattern = r'(?:{})?/static/plugins/([^/]+)/(.*?)(?:__v[0-9a-f]+)?\.([^.]+)$' _static_url_pattern = r'(?:{})?/(images|dist|fonts)(.*)/(.+?)(?:__v[0-9a-f]+)?\.([^.]+)$' _custom_url_pattern = r'(?:{})?/static/custom/(.+)$' def rewrite_static_url(path): """Remove __vxxx prefix from static URLs.""" plugin_pattern = _plugin_url_pattern.format(url_parse(config.BASE_URL).path) static_pattern = _static_url_pattern.format(url_parse(config.BASE_URL).path) custom_pattern = _custom_url_pattern.format(url_parse(config.BASE_URL).path) if re.match(plugin_pattern, path): return re.sub(plugin_pattern, r'static/plugins/\1/\2.\3', path) elif re.match(static_pattern, path): return re.sub(static_pattern, r'static/\1\2/\3.\4', path) else: return re.sub(custom_pattern, r'static/custom/\1', path) def _create_data_uri(url, filename): """Create a data url that contains the file in question.""" response = requests.get(url, verify=False) if response.status_code != 200: # couldn't access the file return url data = base64.b64encode(response.content).decode() content_type = (mimetypes.guess_type(filename)[0] or response.headers.get('Content-Type', 'application/octet-stream')) return f'data:{content_type};base64,{data}' def _rewrite_event_asset_url(event, url): """Rewrite URLs of assets such as event images. Only assets contained within the event will be taken into account """ scheme, netloc, path, qs, anchor = urlsplit(url) netloc = netloc or current_app.config['SERVER_NAME'] scheme = scheme or 'https' # internal URLs (same server) if netloc == current_app.config['SERVER_NAME']: # this piece of Flask magic finds the endpoint that corresponds to # the URL and checks that it points to an image belonging to this event endpoint_info = endpoint_for_url(path) if endpoint_info: endpoint, data = endpoint_info if endpoint == 'event_images.image_display' and data['event_id'] == event.id: image_file = ImageFile.get(data['image_id']) if image_file and image_file.event == event: return f'images/{image_file.id}-{image_file.filename}', image_file # if the URL is not internal or just not an image, # we embed the contents using a data URI data_uri = _create_data_uri(urlunsplit((scheme, netloc, path, qs, '')), urlsplit(path)[-1]) return data_uri, None def _remove_anchor(url): """Remove the anchor from a URL.""" scheme, netloc, path, qs, anchor = urlsplit(url) return urlunsplit((scheme, netloc, path, qs, '')) def rewrite_css_urls(event, css): """Rewrite CSS in order to handle url(...) properly.""" # keeping track of used URLs used_urls = set() used_images = set() def _replace_url(m): prefix = m.group(2) or '' url = m.group(3) if url.startswith('/event/') or re.match(r'https?:', prefix): rewritten_url, imag
e_file = _rewrite_event_asset_url(event, prefix + url) if image_file: used_images.add(image_file)
return f'url({rewritten_url})' else: rewritten_url = rewrite_static_url(url) used_urls.add(_remove_anchor(rewritten_url)) if url.startswith('/static/plugins/'): return f"url('../../../../../{rewritten_url}')" else: return f"url('../../../{rewritten_url}')" indico_path = url_parse(config.BASE_URL).path new_css = re.sub(_css_url_pattern.format(indico_path), _replace_url, css, flags=re.MULTILINE) return new_css, used_urls, used_images def url_to_static_filename(endpoint, url): """Handle special endpoint/URLs so that they link to offline content.""" if re.match(r'(events)?\.display(_overview)?$', endpoint): return 'index.html' elif endpoint == 'event_layout.css_display': return 'custom.css' elif endpoint == 'event_images.logo_display': return 'logo.png' indico_path = url_parse(config.BASE_URL).path if re.match(_static_url_pattern.format(indico_path), url): url = rewrite_static_url(url) else: # get rid of [/whatever]/event/1234 url = re.sub(fr'{indico_path}(?:/event/\d+)?/(.*)', r'\1', url) if not url.startswith('assets/'): # replace all remaining slashes url = url.rstrip('/').replace('/', '--') # it's not executed in a webserver, so we do need a .html extension if not _url_has_extension_re.match(url): url += '.html' return url def _rule_for_endpoint(endpoint): return next((x for x in current_app.url_map.iter_rules(endpoint) if 'GET' in x.methods), None) @contextmanager def override_request_endpoint(endpoint): rule = _rule_for_endpoint(endpoint) assert rule is not None old_rule = request.url_rule request.url_rule = rule try: yield finally: request.url_rule = old_rule class RewrittenManifest(Manifest): """A manifest that rewrites its asset paths.""" def __init__(self, manifest): super().__init__() self._entries = {k: JinjaManifestEntry(entry.name, self._rewrite_paths(entry._paths)) for k, entry in manifest._entries.items()} self.used_assets = set() def _rewrite_paths(self, paths): return [rewrite_static_url(path) for path in paths] def __getitem__(self, key): self.used_assets.add(key) return super().__getitem__(key) @contextmanager def collect_static_files(): """Keep track of URLs used by manifest and url_for.""" g.custom_manifests = {None: RewrittenManifest(current_webpack.manifest)} g.used_url_for_assets = set() used_assets = set() yield used_assets for manifest in g.custom_manifests.values(): used_assets |= {p for k in manifest.used_assets for p in manifest[k]._paths} used_assets |= {rewrite_static_url(url) for url in g.used_url_for_assets} del g.custom_manifests del g.used_url_for_assets
import os import unittest from blivet import Blivet from blivet import util from blivet.size import
Size from blivet.flags import flags @unittest.skipUnless(os.environ.get("JENKINS_HOME"), "jenkins only
test") @unittest.skipUnless(os.geteuid() == 0, "requires root access") class ImageBackedTestCase(unittest.TestCase): """ A class to encapsulate testing of blivet using block devices. The basic idea is you create some scratch block devices and then run some test code on them. :attr:`~.ImageBackedTestCase.disks` defines the set of disk images. :meth:`~.ImageBackedTestCase._set_up_storage` is where you specify the initial layout of the disks. It will be written to the disk images in :meth:`~.ImageBackedTestCase.set_up_storage`. You then write test methods as usual that use the disk images, which will be cleaned up and removed when each test method finishes. """ initialize_disks = True """ Whether or not to create a disklabel on the disks. """ disks = { "disk1": Size("2 GiB"), "disk2": Size("2 GiB") } """ The names and sizes of the disk images to create/use. """ def set_up_disks(self): """ Create disk image files to build the test's storage on. If you are actually creating the disk image files here don't forget to set the initializeDisks flag so they get a fresh disklabel when clearPartitions gets called from create_storage later. """ for (name, size) in iter(self.disks.items()): path = util.create_sparse_tempfile(name, size) self.blivet.config.diskImages[name] = path # # set up the disk images with a disklabel # self.blivet.config.initializeDisks = self.initialize_disks def _set_up_storage(self): """ Schedule creation of storage devices on the disk images. .. note:: The disk images should already be in a populated devicetree. """ pass def set_up_storage(self): """ Create a device stack on top of disk images for this test to run on. This will write the configuration to whatever disk images are defined in set_up_disks. """ # # create disk images # self.set_up_disks() # # populate the devicetree # self.blivet.reset() # # clear and/or initialize disks as specified in set_up_disks # self.blivet.clearPartitions() # # create the rest of the stack # self._set_up_storage() # # write configuration to disk images # self.blivet.doIt() def setUp(self): """ Do any setup required prior to running a test. """ flags.image_install = True self.blivet = Blivet() self.addCleanup(self._cleanUp) self.set_up_storage() def _cleanUp(self): """ Clean up any resources that may have been set up for a test. """ self.blivet.reset() self.blivet.devicetree.teardownDiskImages() for fn in self.blivet.config.diskImages.values(): if os.path.exists(fn): os.unlink(fn) flags.image_install = False
from colordetection imp
ort * topColors(99278
0587437103)
""" URL patterns for the views included in ``django.contrib.auth``. """ from django.conf.urls import patterns, url from registration_withemail.forms import EldonUserAuthenticationForm urlpatterns = patterns('', url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'registration/login.html', 'authentication_form':
EldonUserAuthenticationForm}, name='login'), url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'), url(r'^password_change/$', 'django.contrib.auth.views.password_change', name='password_change'), url(r'^password_change/done/$', 'django.contrib.auth.views.pa
ssword_change_done', name='password_change_done'), url(r'^password_reset/$', 'django.contrib.auth.views.password_reset', name='password_reset'), url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'), url(r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', 'django.contrib.auth.views.password_reset_confirm', name='password_reset_confirm'), url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete', name='password_reset_complete'), )
# -*- coding: utf-8 -*- import json from concurrent.futures import ThreadPoolExecutor from d
atetime import date, timedelta import requests import common # -----------
------------------------------------------------------------- def grab(url): requests.packages.urllib3.disable_warnings() # 请求头 headers = { 'Host': 'kyfw.12306.cn', 'Connection': 'keep-alive', 'Cache-Control': 'no-cache', 'Accept': '*/*', 'X-Requested-With': 'XMLHttpRequest', # 'If-Modified-Since': '0', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', 'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init', 'Accept-Encoding': 'gzip, deflate, sdch, br', 'Accept-Language': 'zh-CN,zh;q=0.8'} try: return requests.get(url, headers=headers, verify=False, timeout=15).text except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError): print(url) return '["timeout"]' # ------------------------------------------------------------------------ def grab_station_name(): url = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js' r = grab(url) r = r.replace('var station_names =\'@', '') r = r.replace('\';', '') r = r.split('@') station_name = [] for i in r: sn = i.split('|') sn = {'id': sn[5], 'telecode': sn[2], 'station_name': sn[1], 'pinyin': sn[3], 'initials': sn[4], 'pinyin_code': sn[0]} station_name.append(sn) with open('station_name.json', 'w', encoding='utf-8') as fp: json.dump(station_name, fp, ensure_ascii=False, sort_keys=True, indent=2) return True # ------------------------------------------------------------------------ def grab_train_list(): url = 'https://kyfw.12306.cn/otn/resources/js/query/train_list.js' r = grab(url) r = r.replace('var train_list =', '') d = json.loads(r) train_list = [] for key in d: for i in d[key]: for j in d[key][i]: j['station_train_code'] = j['station_train_code'].replace('(', '|') j['station_train_code'] = j['station_train_code'].replace(')', '|') j['station_train_code'] = j['station_train_code'].replace('-', '|') j['station_train_code'] = j['station_train_code'].split('|') j['train_code'] = j['station_train_code'][0] j['from_station'] = j['station_train_code'][1] j['to_station'] = j['station_train_code'][2] del j['station_train_code'] train_list.append(json.dumps(j, ensure_ascii=False, sort_keys=True)) train_list = list(set(train_list)) for i in train_list: train_list[train_list.index(i)] = json.loads(i) with open('train_list.json', 'w', encoding='utf-8') as fp: json.dump(train_list, fp, ensure_ascii=False, sort_keys=True, indent=2) return True # ------------------------------------------------------------------------ class train_schedule(): def get_train_schedule(self, d, max_workers): # self.grab_train_schedule(d, max_workers) self.parse_train_schedule() def parse_train_schedule(self): with open('train_schedule.json', 'r', encoding='utf-8') as ts: train_schedule = json.load(ts) get_train_schedule = [] get_train_schedule_err = [] for ts in train_schedule: try: train = ts['data'] ts.clear() ts['train'] = train ts['train']['schedule'] = ts['train']['data'] del ts['train']['data'] ts['train']['start_station_name'] = ts['train']['schedule'][0][ 'start_station_name'] del ts['train']['schedule'][0]['start_station_name'] ts['train']['end_station_name'] = ts['train']['schedule'][0]['end_station_name'] del ts['train']['schedule'][0]['end_station_name'] ts['train']['station_train_code'] = ts['train']['schedule'][0][ 'station_train_code'] del ts['train']['schedule'][0]['station_train_code'] ts['train']['train_class_name'] = ts['train']['schedule'][0]['train_class_name'] del ts['train']['schedule'][0]['train_class_name'] ts['train']['service_type'] = ts['train']['schedule'][0]['service_type'] del ts['train']['schedule'][0]['service_type'] ts['train']['s'] = sorted(ts['train']['schedule'], key=lambda x: x['station_no']) ts['train']['schedule'] = {} schedule_index = 0 for td in ts['train']['s']: ts['train']['schedule'].update({schedule_index: td}) schedule_index += 1 del ts['train']['s'] get_train_schedule.append(ts) except (IndexError, TypeError, json.decoder.JSONDecodeError): pass with open('get_train_schedule.json', 'w', encoding='utf-8') as fp: json.dump(get_train_schedule, fp, ensure_ascii=False, sort_keys=True, indent=2) def grab_train_schedule_callback(self, url): try: ts = json.loads(grab(url)) # print(ts) return [True, ts] except json.decoder.JSONDecodeError: # print(ts) return [False, ts] def grab_train_schedule(self, d, max_workers): common.timing_starts() with open('station_name.json', 'r', encoding='utf-8') as sn: station_name = json.load(sn) with open('train_list.json', 'r', encoding='utf-8') as tl: train_list = json.load(tl) year, month, day = d.split('-') start_time = date(int(year), int(month), int(day)) # dates = [ # start_time - timedelta(days=3), # start_time - timedelta(days=2), # start_time - timedelta(days=1), # start_time, # start_time + timedelta(days=1), # start_time + timedelta(days=2), # start_time + timedelta(days=3) # ] dates = [start_time] urls = [] for d in dates: for i in train_list: train_no = i['train_no'] for j in station_name: if i['from_station'] == j['station_name']: from_station_telecode = j['telecode'] if i['to_station'] == j['station_name']: to_station_telecode = j['telecode'] urls.append( 'https://kyfw.12306.cn/otn/czxx/queryByTrainNo?train_no=%s&from_station_telecode=%s&to_station_telecode=%s&depart_date=%s' % ( train_no, from_station_telecode, to_station_telecode, d.isoformat() )) train_schedule = [] train_schedule_err = [] with ThreadPoolExecutor(max_workers=max_workers) as pool: for i in pool.map(self.grab_train_schedule_callback, urls): if i[0]: train_schedule.append(i[1]) else: train_schedule_err.append(i[1]) with open('train_schedule.json', 'w', encoding='utf-8') as fp: json.dump(train_schedule, fp, ensure_ascii=False, sort_keys=True, indent=2) common.timing_ends('抓取列车时刻表') return True
f.tarifa.get_period_by_date(dia + timedelta(hours=9), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=10), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=11), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=12), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=13), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=14), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=15), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=16), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=17), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=18), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=19), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=20), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=21), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=22), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=23), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=23, minutes=59), self.holidays).code == 'P6' with it('should have correct energy period on laboral winter data'): dia = self.winter_laboral_day assert self.tarifa.get_period_by_date(dia, self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=1), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=2), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=3), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=4), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=5), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=6), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=7), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=8), self.holidays).code == 'P3' assert self.tarifa.get_period_by_date(dia + timedelta(hours=9), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=10), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=11), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=12), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=13), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=14), self.holidays).code == 'P3' assert self.tarifa.get_period_by_date(dia + timedelta(hours=15), self.holidays).code == 'P3' assert self.tarifa.get_period_by_date(dia + timedelta(hours=16), self.holidays).code == 'P3' assert self.tarifa.get_period_by_date(dia + timedel
ta(hours=17), self.holidays).code == 'P3' assert self.tarifa.get_period_by_date(dia + timedelta(hours=18), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=19), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=20), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=21), sel
f.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=22), self.holidays).code == 'P3' assert self.tarifa.get_period_by_date(dia + timedelta(hours=23), self.holidays).code == 'P3' assert self.tarifa.get_period_by_date(dia + timedelta(hours=23, minutes=59), self.holidays).code == 'P3' with it('should have correct energy period on laboral summer data'): dia = self.summer_laboral_day assert self.tarifa.get_period_by_date(dia, self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=1), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=2), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=3), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=4), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=5), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=6), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=7), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=8), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=9), self.holidays).code == 'P1' assert self.tarifa.get_period_by_date(dia + timedelta(hours=10), self.holidays).code == 'P1' assert self.tarifa.get_period_by_date(dia + timedelta(hours=11), self.holidays).code == 'P1' assert self.tarifa.get_period_by_date(dia + timedelta(hours=12), self.holidays).code == 'P1' assert self.tarifa.get_period_by_date(dia + timedelta(hours=13), self.holidays).code == 'P1' assert self.tarifa.get_period_by_date(dia + timedelta(hours=14), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=15), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=16), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=17), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=18), self.holidays).code == 'P1' assert self.tarifa.get_period_by_date(dia + timedelta(hours=19), self.holidays).code == 'P1' assert self.tarifa.get_period_by_date(dia + timedelta(hours=20), self.holidays).code == 'P1' assert self.tarifa.get_period_by_date(dia + timedelta(hours=21), self.holidays).code == 'P1' assert self.tarifa.get_period_by_date(dia + timedelta(hours=22), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=23), self.holidays).code == 'P2' assert self.tarifa.get_period_by_date(dia + timedelta(hours=23, minutes=59), self.holidays).code == 'P2' with it('should have correct energy period on weekend winter data'): dia = self.winter_weekend_day assert self.tarifa.get_period_by_date(dia, self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=1), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=2), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=3), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=4), self.holidays).code == 'P6' assert self.tarifa.get_period_by_date(dia + timedelta(hours=5), self.holidays).code == 'P6' assert self.
a, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bordercolor"] @bordercolor.setter def bordercolor(self, val): self["bordercolor"] = val # bordercolorsrc # -------------- @property def bordercolorsrc(self): """ Sets the source reference on Chart Studio Cloud for `bordercolor`. The 'bordercolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["bordercolorsrc"] @bordercolorsrc.setter def bordercolorsrc(self, val): self["bordercolorsrc"] = val # font # ---- @property def font(self): """ Sets the font used in hover labels. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.scatter.hoverlabel.Font` - A dict of string/value properties that will be passed to the Font constructor Supported dict properties: color colorsrc Sets the source reference on Chart Studio Cloud for `color`. family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for `family`. size sizesrc Sets the source reference on Chart Studio Cloud for `size`. Returns ------- plotly.graph_objs.scatter.hoverlabel.Font """ return self["font"] @font.setter def font(self, val): self["font"] = val # namelength # ---------- @property def namelength(self): """ Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. The 'namelength' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [-1, 9223372036854775807] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|numpy.ndarray """ return self["namelength"] @namelength.setter def namelength(self, val): self["namelength"] = val # namelengthsrc # ------------- @property def namelengthsrc(self): """ Sets the source reference on Chart Studio Cloud for `namelength`. The 'namelengthsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["namelengthsrc"] @namelengthsrc.setter def namelengthsrc(self, val): self["namelengthsrc"] = val # Self properties description # --------------------------- @property def _prop_descriptions(self): return """\ align Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines alignsrc Sets the source reference on Chart Studio Cloud for `align`. bgcolor Sets the background color of the hover labels for this trace bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. bordercolor Sets the border color of the hover labels for this trace. bordercolorsrc Sets the source reference on Chart Studio Cloud for `bordercolor`. font Sets the font used in hover labels. namelength Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. namelengthsrc Sets the source reference on Chart Studio Cloud for `namelength`. """ def __init__( self, arg=None, align=None, alignsrc=None, bgcolor=None, bgcolorsrc=None, bordercolor=None, bordercolorsrc=None, font=None, namelength=None, namelengthsrc=None, **kwargs ): """ Construct a new Hoverlabel object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scatter.Hoverlabel` align Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines alignsrc Sets the source reference on Chart Studio Cloud for `align`. bgcolor Sets the background color of the hover labels for this trace bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. bordercolor Sets the border color of the hover labels for this trace. bordercolorsrc Sets the source reference on Chart Studio Cloud for `bordercolor`. font Sets the font used in hover labels. namelength Sets the default length (
in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3
shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. namelengthsrc Sets the source reference on Chart Studio Cloud for `namelength`. Returns ------- Hoverlabel """ super(Hoverlabel, self).__init__("hoverlabel") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg
from genc.regions import ( Region, REGIONS, ) try: basestring except NameError: # pragma: no cover basestring = str def _build_cache(name): idx = Region._fields.index(name) return dict([(reg[idx].upper(), reg) for reg in REGIONS if reg[idx] is not None]) _alpha2 = _build_ca
che('alpha2') _alpha3 = _build_cache('alpha3') _name = _build_cache('name') def region_by_alpha2(code, default=None): if isinstance(code, basestring): code = code.upper()
return _alpha2.get(code, default) def region_by_alpha3(code, default=None): if isinstance(code, basestring): code = code.upper() return _alpha3.get(code, default) def region_by_name(name, default=None): if isinstance(name, basestring): name = name.upper() return _name.get(name, default) __all__ = ( 'region_by_alpha2', 'region_by_alpha3', 'region_by_name', 'REGIONS', )
from django.contrib import admin from django.contrib.contenttypes.generic import GenericStackedInline from reversion.admin import VersionAdmin from test_project.test_app.models import ChildModel, RelatedModel, GenericRelatedModel, ProxyModel class RelatedModelInline(admin.StackedInline): model = RelatedModel class GenericRelatedInline(GenericStackedInline): model = GenericRelatedModel class ChildModelAdmin(VersionAdmin): inlines = RelatedModelInline, GenericRelatedInline, list_display = ("parent_name", "child_name",) list_editable = ("child_name",) readonly_fields = ("parent_name",) adm
in.site.register(ChildModel, Ch
ildModelAdmin) admin.site.register(ProxyModel, ChildModelAdmin)
import time import numpy as np from numpy import log, exp from scipy.sparse import triu, csr_matrix from scipy.special import gammaln from scipy.stats import norm, lognorm from .NCRMmcmc import NGGPmcmc from .GGPgraphmcmc import tpoissonrnd def MixGGPgraphmcmc(G, modelparam, mcmcparam, typegraph, verbose=True): """ Run MCMC for the GGP graph model Convert the same function used in BNPGraph matlab package by Francois Caron http://www.stats.ox.ac.uk/~caron/code/bnpgraph/index.html :param G:sparse logical adjacency matrix :param modelparam: dictionary of model parameters with the following fields: - alpha: if scalar, the value of alpha. If vector of length 2, parameters of the gamma prior over alpha - sigma: if scalar, the value of sigma. If vector of length 2, parameters of the gamma prior over (1-sigma) - tau: if scalar, the value of tau. If vector of length 2, parameters of the gamma prior over tau :param mcmcparam: dictionary of mcmc parameters with the following fields: - niter: number of MCMC iterations - nburn: number of burn-in iterations - thin: thinning of the MCMC output - latent.MH_nb: number of MH iterations for latent (if 0: Gibbs update) - hyper.MH_nb: number of MH iterations for hyperparameters - store_w: logical. If true, returns MCMC draws of w :param typegraph: type of graph ('undirected' or 'simple') simple graph does not contain any self-loop :param verbose: logical. If true (default), print information :return: - samples: dictionary with the MCMC samples for the variables - w - w_rem - alpha - logalpha - sigma - tau - stats: dictionary with summary stats about the MCMC algorithm - w_rate: acceptance rate of the HMC step at each iteration - hyper_rate: acceptance rate of the MH for the hyperparameters at each iteration """ n_mixture = modelparam['n_mixture'] if typegraph is 'simple': issimple = True else: issimple = False if modelparam['estimate_alpha']: alpha = 100. * np.random.random(size=n_mixture) if verbose: print('Random Init: alpha', alpha) else: alpha = modelparam['alpha'] if modelparam['estimate_sigma']: sigma = 1 - np.random.lognormal(1, 1, size=n_mixture) else: sigma = modelparam['sigma'] if modelparam['estimate_tau']: tau = 10. * np.random.random(size=n_mixture) else: tau = modelparam['tau'] u = exp(np.random.normal(0, 1 / 4, size=n_mixture)) K = G.shape[0] # nodes pi = np.random.randint(0, n_mixture, size=K) if issimple: G2 = triu(G + G.T, k=1) else: G2 = triu(G + G.T, k=0) ind1, ind2 = G2.nonzero() n = np.random.randint(1, 5, size=len(ind1)) count = csr_matrix((n, (ind1, ind2)), shape=(K, K), dtype=int) N = count.sum(0).T + count.sum(1) niter = mcmcparam['niter'] nburn = mcmcparam['nburn'] thin = mcmcparam['thin'] dir_alpha = modelparam['dir_alpha'] J = np.zeros(K) J_rem = np.zeros(n_mixture) n_samples = int((niter - nburn) / thin) w_st = np.zeros((n_samples, K)) w_rem_st = np.zeros((n_samples, n_mixture)) alpha_st = np.zeros((n_samples, n_mixture)) tau_st = np.zeros((n_samples, n_mixture)) sigma_st = np.zeros((n_samples, n_mixture)) rate = np.zeros(niter) rate2 = np.zeros(niter) logdist = np.zeros(n_mixture) tic = time.time() for iter in range(niter): if verbose: print('Iteration=%d' % iter, flush=True) print('\talpha =', alpha, flush=True) print('\tsigma =', sigma, flush=True) print('\ttau =', tau, flush=True) print('\tu =', u, flush=True) print('\t# node for each mixture', [np.sum(pi == m) for m in range(n_mixture)], flush=True) print('\tJoint log likelihood', logdist, np.sum(logdist), flush=True) # update jump size & update hyperparams for m in range(n_mixture): J[pi == m], J_rem[m], alpha[m], sigma[m], tau[m], u[m] = NGGPmcmc(np.sum(N[pi == m]), N[pi == m], alpha[m], sigma[m], tau[m], u[m], modelparam, mcmcparam) logJ = log(J) # update node membership n_sum = np.zeros(n_mixture) for m in range(n_mixture): logdist[m] = joint_logdist(N[pi == m], alpha[m], sigma[m], tau[m], u[m]) n_sum[m] = np.sum(N[pi == m]) # print("DEBUG", logdist, exp(log_normalise(logdist))) for k in range(K): prev_m = pi[k] logdist[prev_m] += -log(alpha[prev_m]) - N[k] * log(u[prev_m]) + gammaln(n_sum[prev_m]) \ - gammaln(n_sum[prev_m] - N[k]) + (N[k] - sigma[prev_m]) * log(u[prev_m] + tau[prev_m]) n_sum[prev_m] -= N[k] tmp = np.zeros(n_mixture) for m in range(n_mixture): tmp[m] = logdist[m] + log(alpha[m]) + N[k] * log(u[m]) - gammaln(n_sum[m]) \ - gammaln(n_sum[m] + N[k]) - (N[k] - sigma[m]) * log(u[m] + tau[m]) tmp = log_normalise(tmp) pi[k] = np.random.multinomial(1, tmp).argmax() # print(tmp, pi[k]) new_m = pi[k] logdist[new_m] += log(alpha[new_m]) + N[k] * log(u[new_m]) - gammaln(n_sum[new_m]) \ - gammaln(n_sum[new_m] + N[k]) - (N[k] - sigma[new_m]) * log(u[new_m] + tau[new_m]) n_sum[new_m] += N[k] # update latent count n lograte_poi = log(2.) + logJ[ind1] + logJ[ind2] lograte_poi[ind1
== ind2] = 2. * logJ[ind1[ind1 == ind2]] n = tpoissonrnd(lograte_poi) count = csr_matrix((n, (ind1, ind2)), (K, K)
) N = count.sum(0).T + count.sum(1) if iter == 10: toc = (time.time() - tic) * niter / 10. hours = np.floor(toc / 3600) minutes = (toc - hours * 3600.) / 60. print('-----------------------------------', flush=True) print('Start MCMC for GGP graphs', flush=True) print('Nb of nodes: %d - Nb of edges: %d' % (K, G2.sum()), flush=True) print('Number of iterations: %d' % niter, flush=True) print('Estimated computation time: %.0f hour(s) %.0f minute(s)' % (hours, minutes), flush=True) print('Estimated end of computation: ', time.strftime('%b %dth, %H:%M:%S', time.localtime(tic + toc)), flush=True) print('-----------------------------------', flush=True) if iter > nburn and (iter - nburn) % thin == 0: ind = int((iter - nburn) / thin) if mcmcparam['store_w']: w_st[ind] = J w_rem_st[ind] = J_rem alpha_st[ind] = alpha sigma_st[ind] = sigma tau_st[ind] = tau def log_normalise(log_prob): log_prob -= np.max(log_prob) return exp(log_prob) def joint_logdist(pi, alpha, sigma, tau, u): abs_pi = len(pi) n = np.sum(pi) tmp = abs_pi * log(alpha) + (n - 1.) * log(u) - gammaln(n) - (n - sigma * abs_pi) * log(u + tau) \ - (alpha / sigma) * ((u + tau) ** sigma - tau ** sigma) tmp += np.sum(gammaln(pi - sigma) - gammaln(1. - sigma)) return tmp def dirichlet_multinomial(hyper_alpha, pi, n_mixture): pi_m = np.array([np.sum(pi == m) for m in range(n_mixture)]) return gammaln(n_mixture * hyper_alpha) + np.sum(gammaln(pi_m + hyper_alpha)) - n_mixture * gammaln( hyper_alpha) - gammaln(len(pi) + hyper_alpha)
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the
terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # b
ut WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: fireball short_description: Enable fireball mode on remote node version_added: "0.9" deprecated: "in favor of SSH with ControlPersist" description: - Modern SSH clients support ControlPersist which is just as fast as fireball was. Please enable that in ansible.cfg as a replacement for fireball. - Removed in ansible 2.0. author: - "Ansible Core Team" - "Michael DeHaan" ''' EXAMPLES = ''' '''
# # Copyright (C) 2006, 2013 Red Hat, Inc. # Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth
Floor, Boston, # MA 02110-1301 USA. # import logging from virtManager.baseclass import vmmGObjectUI class vmmAbout(vmmGObjectUI): def __init__(self): vmmGObjectUI.__init__(self, "about.ui", "vmm-about") self.builder.connect_signals({ "on_vmm_about_delete_event": self.close, "on_vmm_about_response": self.close, }) def show(self): logging.debug("Showing about") self.topwin.set_version(self.config.get_appversion()) self.topwin.present() def close(self, ignore1=None, ignore2=None): logging.debug("Closing about") self.topwin.hide() return 1 def _cleanup(self): pass
#!/usr/bin/python # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "Lic
ense"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governi
ng permissions and # limitations under the License. data = open('output.dex', 'rb').read() with open('output.txt', 'wb') as f: f.write(str(map(ord, data)))
('Number of processors: %s' % (multiprocessing.cpu_count(),)) print('Relevant system paths:') print('sys.executable: %s' % (sys.executable,)) print('sys.prefix: %s' % (sys.prefix,)) if hasattr(sys, 'base_prefix'): print('sys.base_prefix: %s' % (sys.base_prefix,)) if hasattr(sys, 'real_prefix'): print('sys.real_prefix: %s' % (sys.real_prefix,)) if hasattr(site, 'getusersitepackages'): print('site.getusersitepackages(): %s' % (site.getusersitepackages(),)) if hasattr(site, 'getsitepackages'): print('site.getsitepackages(): %s' % (site.getsitepackages(),)) for path in sys.path: if os.path.exists(path) and os.path.basename(path) == 'site-packages': print('Folder with "site-packages" in sys.path: %s' % (path,)) _started_monitoring_threads = False def _start_monitoring_threads(): # After the session finishes, wait 20 seconds to see if everything finished properly # and if it doesn't report an error. global _started_monitoring_threads if _started_monitoring_threads: return _started_monitoring_threads = True import threading if hasattr(sys, '_current_frames') and hasattr(threading, 'enumerate'): import time import traceback class DumpThreads(threading.Thread): def run(self): time.sleep(20) thread_id_to_name = {} try: for t in threading.enumerate(): thread_id_to_name[t.ident] = '%s (daemon: %s)' % (t.name, t.daemon) except: pass stack_trace = [ '===============================================================================', 'pydev pyunit runner: Threads still found running after tests finished', '================================= Thread Dump ================================='] for thread_id, stack in sys._current_frames().items(): stack_trace.append('\n-------------------------------------------------------------------------------') stack_trace.append(" Thread %s" % thread_id_to_name.get(thread_id, thread_id)) stack_trace.append('') if 'self' in stack.f_locals: sys.stderr.write(str(stack.f_locals['se
lf']) + '\n') for filename, lineno, name, line in traceback.extract_stack(stack): stack_trace.append(' File "%s", line %d, in %s' % (filename, lineno, name)) if line:
stack_trace.append(" %s" % (line.strip())) stack_trace.append('\n=============================== END Thread Dump ===============================') sys.stderr.write('\n'.join(stack_trace)) # Force thread run to finish import os os._exit(123) dump_current_frames_thread = DumpThreads() dump_current_frames_thread.daemon = True # Daemon so that this thread doesn't halt it! dump_current_frames_thread.start() def pytest_unconfigure(): _start_monitoring_threads() @pytest.fixture(scope="session", autouse=True) def check_no_threads(): yield _start_monitoring_threads() # see: http://goo.gl/kTQMs SYMBOLS = { 'customary': ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'), 'customary_ext': ('byte', 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa', 'zetta', 'iotta'), 'iec': ('Bi', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'), 'iec_ext': ('byte', 'kibi', 'mebi', 'gibi', 'tebi', 'pebi', 'exbi', 'zebi', 'yobi'), } def bytes2human(n, format='%(value).1f %(symbol)s', symbols='customary'): """ Bytes-to-human / human-to-bytes converter. Based on: http://goo.gl/kTQMs Working with Python 2.x and 3.x. Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> bytes2human(0) '0.0 B' >>> bytes2human(0.9) '0.0 B' >>> bytes2human(1) '1.0 B' >>> bytes2human(1.9) '1.0 B' >>> bytes2human(1024) '1.0 K' >>> bytes2human(1048576) '1.0 M' >>> bytes2human(1099511627776127398123789121) '909.5 Y' >>> bytes2human(9856, symbols="customary") '9.6 K' >>> bytes2human(9856, symbols="customary_ext") '9.6 kilo' >>> bytes2human(9856, symbols="iec") '9.6 Ki' >>> bytes2human(9856, symbols="iec_ext") '9.6 kibi' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = SYMBOLS[symbols] prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def format_memory_info(memory_info, curr_proc_memory_info): return 'Total: %s, Available: %s, Used: %s %%, Curr process: %s' % ( bytes2human(memory_info.total), bytes2human(memory_info.available), memory_info.percent, format_process_memory_info(curr_proc_memory_info)) def format_process_memory_info(proc_memory_info): return bytes2human(proc_memory_info.rss) DEBUG_MEMORY_INFO = False _global_collect_info = False PRINT_MEMORY_BEFORE_AFTER_TEST = False # This makes running tests slower (but it may be handy to diagnose memory issues). @pytest.fixture(autouse=PRINT_MEMORY_BEFORE_AFTER_TEST) def before_after_each_function(request): global _global_collect_info try: import psutil # Don't fail if not there except ImportError: yield return current_pids = set(proc.pid for proc in psutil.process_iter()) before_curr_proc_memory_info = psutil.Process().memory_info() if _global_collect_info and DEBUG_MEMORY_INFO: try: from pympler import summary, muppy sum1 = summary.summarize(muppy.get_objects()) except: pydev_log.exception() sys.stdout.write( ''' =============================================================================== Memory before: %s %s =============================================================================== ''' % (request.function, format_memory_info(psutil.virtual_memory(), before_curr_proc_memory_info))) yield processes_info = [] for proc in psutil.process_iter(): if proc.pid not in current_pids: try: try: cmdline = proc.cmdline() except: cmdline = '<unable to get>' processes_info.append( 'New Process: %s(%s - %s) - %s' % ( proc.name(), proc.pid, cmdline, format_process_memory_info(proc.memory_info()) ) ) except (psutil.NoSuchProcess, psutil.AccessDenied): pass # The process could've died in the meanwhile after_curr_proc_memory_info = psutil.Process().memory_info() if DEBUG_MEMORY_INFO: try: if after_curr_proc_memory_info.rss - before_curr_proc_memory_info.rss > 10 * 1000 * 1000: # 10 MB leak if _global_collect_info: sum2 = summary.summarize(muppy.get_objects()) diff = summary.get_diff(sum1, sum2) sys.stdout.write('===============================================================================\n') sys.
from __future__ import unicode_literals import os import sys import threading from contextlib import contextmanager from django.contrib.sites.models import Site from mezzanine.conf import settings from mezzanine.core.request import current_request from mezzanine.utils.conf import middlewares_or_subclasses_installed SITE_PERMISSION_MIDDLEWARE = \ "mezzanine.core.middleware.SitePermissionMiddleware" def current_site_id(): """ Responsible for determining the current ``Site`` instance to use when retrieving data for any ``SiteRelated`` models. If we're inside an override_current_site_id context manager, return the overriding site ID. Otherwise, try to determine the site using the following methods in order: - ``site_id`` in session. Used in the admin so that admin users can switch sites and
stay on the same domain for the admin. - The id of the Site object corresponding to the hostname in the current request. This result is cached. - ``MEZZANINE_SITE_ID`` environment variable, so management commands or anything else outs
ide of a request can specify a site. - ``SITE_ID`` setting. If a current request exists and the current site is not overridden, the site ID is stored on the request object to speed up subsequent calls. """ if hasattr(override_current_site_id.thread_local, "site_id"): return override_current_site_id.thread_local.site_id from mezzanine.utils.cache import cache_installed, cache_get, cache_set request = current_request() site_id = getattr(request, "site_id", None) if request and not site_id: site_id = request.session.get("site_id", None) if not site_id: domain = request.get_host().lower() if cache_installed(): # Don't use Mezzanine's cache_key_prefix here, since it # uses this very function we're in right now to create a # per-site cache key. bits = (settings.CACHE_MIDDLEWARE_KEY_PREFIX, domain) cache_key = "%s.site_id.%s" % bits site_id = cache_get(cache_key) if not site_id: try: site = Site.objects.get(domain__iexact=domain) except Site.DoesNotExist: pass else: site_id = site.id if cache_installed(): cache_set(cache_key, site_id) if not site_id: site_id = os.environ.get("MEZZANINE_SITE_ID", settings.SITE_ID) if request and site_id and not getattr(settings, "TESTING", False): request.site_id = site_id return site_id @contextmanager def override_current_site_id(site_id): """ Context manager that overrides the current site id for code executed within it. Used to access SiteRelated objects outside the current site. """ override_current_site_id.thread_local.site_id = site_id yield del override_current_site_id.thread_local.site_id override_current_site_id.thread_local = threading.local() def has_site_permission(user): """ Checks if a staff user has staff-level access for the current site. The actual permission lookup occurs in ``SitePermissionMiddleware`` which then marks the request with the ``has_site_permission`` flag, so that we only query the db once per request, so this function serves as the entry point for everything else to check access. We also fall back to an ``is_staff`` check if the middleware is not installed, to ease migration. """ if not middlewares_or_subclasses_installed([SITE_PERMISSION_MIDDLEWARE]): return user.is_staff and user.is_active return getattr(user, "has_site_permission", False) def host_theme_path(): """ Returns the directory of the theme associated with the given host. """ # Set domain to None, which we'll then query for in the first # iteration of HOST_THEMES. We use the current site_id rather # than a request object here, as it may differ for admin users. domain = None for (host, theme) in settings.HOST_THEMES: if domain is None: domain = Site.objects.get(id=current_site_id()).domain if host.lower() == domain.lower(): try: __import__(theme) module = sys.modules[theme] except ImportError: pass else: return os.path.dirname(os.path.abspath(module.__file__)) return ""
ttention, max_seq_len=10) assert type(attention) == sockeye.rnn_attention.LocationAttention assert attention._input_previous_word assert attention.max_source_seq_len == 10 def test_att_mlp(): config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_MLP, num_hidden=16, input_previous_word=True, source_num_hidden=None, query_num_hidden=None, layer_normalization=True, config_coverage=None) attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=10) assert type(attention) == sockeye.rnn_attention.MlpAttention assert attention._input_previous_word assert attention.attention_num_hidden == 16 assert attention.dynamic_source_num_hidden == 1 assert attention._ln assert not attention.coverage def test_att_cov(): config_coverage = sockeye.coverage.CoverageConfig(type='tanh', num_hidden=5, layer_normalization=True) config_attention = sockeye.rnn_attention.AttentionConfig(type=C.ATT_COV, num_hidden=16, input_previous_word=True, source_num_hidden=None, query_num_hidden=None, layer_normalization=True, config_coverage=config_coverage) attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=10) assert type(attention) == sockeye.rnn_attention.MlpCovAttention assert attention._input_previous_word assert attention.attention_num_hidden == 16 assert attention.dynamic_source_num_hidden == 5 assert attention._ln assert type(attention.coverage) == sockeye.coverage.ActivationCoverage @pytest.mark.parametrize("attention_type", attention_types) def test_attention(attention_type, batch_size=1, encoder_num_hidden=2, decoder_num_hidden=2): # source: (batch_size, seq_len, encoder_num_hidden) source = mx.sym.Variable("source") # source_length: (batch_size,) source_length = mx.sym.Variable("source_length") source_seq_len = 3 config_attention = sockeye.rnn_attention.AttentionConfig(type=attention_type, num_hidden=2, input_previous_word=False, source_num_hidden=2, query_num_hidden=2, layer_normalization=False, config_coverage=None) attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=source_seq_len) attention_state = attention.get_initial_state(source_length, source_seq_len) attention_func = attention.on(source, source_length, source_seq_len) attention_input = attention.make_input(0, mx.sym.Variable("word_vec_prev"), mx.sym.Variable("decoder_state")) attention_state = attention_func(attention_input, attention_state) sy
m = mx.sym.Group([attention_state.context, attention_state.probs]) executor = sym.simple_bind(ctx=mx.cpu(), source=(batch_size, source_seq_len, encoder_num_hidden), source_length=(batch_size,)
, decoder_state=(batch_size, decoder_num_hidden)) # TODO: test for other inputs (that are not equal at each source position) executor.arg_dict["source"][:] = np.asarray([[[1., 2.], [1., 2.], [3., 4.]]]) executor.arg_dict["source_length"][:] = np.asarray([2.0]) executor.arg_dict["decoder_state"][:] = np.asarray([[5, 6]]) exec_output = executor.forward() context_result = exec_output[0].asnumpy() attention_prob_result = exec_output[1].asnumpy() # expecting uniform attention_weights of 0.5: 0.5 * seq1 + 0.5 * seq2 assert np.isclose(context_result, np.asarray([[1., 2.]])).all() # equal attention to first two and no attention to third assert np.isclose(attention_prob_result, np.asarray([[0.5, 0.5, 0.]])).all() coverage_cases = [("gru", 10), ("tanh", 4), ("count", 1), ("sigmoid", 1), ("relu", 30)] @pytest.mark.parametrize("attention_coverage_type,attention_coverage_num_hidden", coverage_cases) def test_coverage_attention(attention_coverage_type, attention_coverage_num_hidden, batch_size=3, encoder_num_hidden=2, decoder_num_hidden=2): # source: (batch_size, seq_len, encoder_num_hidden) source = mx.sym.Variable("source") # source_length: (batch_size, ) source_length = mx.sym.Variable("source_length") source_seq_len = 10 config_coverage = sockeye.coverage.CoverageConfig(type=attention_coverage_type, num_hidden=attention_coverage_num_hidden, layer_normalization=False) config_attention = sockeye.rnn_attention.AttentionConfig(type="coverage", num_hidden=5, input_previous_word=False, source_num_hidden=encoder_num_hidden, query_num_hidden=decoder_num_hidden, layer_normalization=False, config_coverage=config_coverage) attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=source_seq_len) attention_state = attention.get_initial_state(source_length, source_seq_len) attention_func = attention.on(source, source_length, source_seq_len) attention_input = attention.make_input(0, mx.sym.Variable("word_vec_prev"), mx.sym.Variable("decoder_state")) attention_state = attention_func(attention_input, attention_state) sym = mx.sym.Group([attention_state.context, attention_state.probs, attention_state.dynamic_source]) source_shape = (batch_size, source_seq_len, encoder_num_hidden) source_length_shape = (batch_size,) decoder_state_shape = (batch_size, decoder_num_hidden) executor = sym.simple_bind(ctx=mx.cpu(), source=source_shape, source_length=source_length_shape, decoder_state=decoder_state_shape) source_length_vector = integer_vector(shape=source_length_shape, max_value=source_seq_len) executor.arg_dict["source"][:] = gaussian_vector(shape=source_shape) executor.arg_dict["source_length"][:] = source_length_vector executor.arg_dict["decoder_state"][:] = gaussian_vector(shape=decoder_state_shape) exec_output = executor.forward() context_result = exec_output[0].asnumpy() attention_prob_result = exec_output[1].asnumpy() dynamic_source_result = exec_output[2].asnumpy() expected_probs = (1. / source_length_vector).reshape((batch_size, 1)) assert context_result.shape == (batch_size, encoder_num_hidden) assert attention_prob_result.shape == (batch_size, source_seq_len) assert dynamic_source_result.shape == (batch_size, source_seq_len, attention_coverage_num_hidden) assert (np.sum(np.isclose(attention_prob_result, expected_probs), axis=1) == source_length_vector).all() def test_last_s
"""Helper for aiohttp webclient stuff.""" from __future__ import annotations import asyncio from contextlib import suppress from ssl import SSLContext import sys from typing import Any, Awaitable, cast import aiohttp from aiohttp import web from aiohttp.hdrs import CONTENT_TYPE, USER_AGENT from aiohttp.web_exceptions import HTTPBadGateway, HTTPGatewayTimeout import async_timeout from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE, __version__ from homeassistant.core import Event, HomeAssistant, callback from homeassistant.helpers.frame import warn_use from homeassistant.loader import bind_hass from homeassistant.util import ssl as ssl_util DATA_CONNECTOR = "aiohttp_connector" DATA_CONNECTOR_NOTVERIFY = "aiohttp_connector_notverify" DATA_CLIENTSESSION = "aiohttp_clientsession" DATA_CLIENTSESSION_NOTVERIFY = "aiohttp_clientsession_notverify" SERVER_SOFTWARE = "HomeAssistant/{0} aiohttp/{1} Python/{2[0]}.{2[1]}".format( __version__, aiohttp.__version__, sys.version_info ) @callback @bind_hass def async_get_clientsession( hass: HomeAssistant, verify_ssl: bool = True ) -> aiohttp.ClientSession: """Return default aiohttp ClientSession. This method must be run in the event loop. """ key = DATA_CLIENTSESSION_NOTVERIFY if verify_ssl: key = DATA_CLIENTSESSION if key not in hass.data: hass.data[key] = async_create_clientsession(hass, verify_ssl) return cast(aiohttp.ClientSession, hass.data[key]) @callback @bind_hass def async_create_clientsession( hass: HomeAssistant, verify_ssl: bool = True, auto_cleanup: bool = True, **kwargs: Any, ) -> aiohttp.ClientSession: """Create a new ClientSession with kwargs, i.e. for cookies. If auto_cleanup is False, you need to call detach() after the session returned is no longer used. Default is True, the session will be automatically detached on homeassistant_stop. This method must be run in the event loop. """ connector = _async_get_connector(hass, verify_ssl) clientsession = aiohttp.ClientSession( connector=connector, headers={USER_AGENT: SERVER_SOFTWARE}, **kwargs, ) clientsession.close = warn_use( # type: ignore clientsession.close, "closes the Home Assistant aiohttp session" ) if auto_cleanup: _async_register_clientsession_shutdown(hass, clientsession) return clientsession @bind_hass async def async_aiohttp_proxy_web( hass: HomeAssistant, request: web.BaseRequest, web_coro: Awaitable[aiohttp.ClientResponse], buffer_size: int = 102400, timeout: int = 10, ) -> web.StreamResponse | None: """Stream websession request to aiohttp web response.""" try: with async_timeout.timeout(timeout): req = await web_coro except asyncio.CancelledError: # The user cancelled the request return None except asyncio.TimeoutError as err: # Timeout trying to start the web request raise HTTPGatewayTimeout() from err except aiohttp.ClientError as err: # Something went wrong with the connection raise HTTPBadGateway() from err try: return await async_aiohttp_proxy_stream( hass, request, req.content, req.headers.get(CONTENT_TYPE) ) finally: req.close() @bind_hass async def async_aiohttp_proxy_stream( hass: HomeAssistant, request: web.BaseRequest, stream: aiohttp.StreamReader, content_type: str | None, buffer_size: int = 102400, timeout: int = 10, ) -> web.StreamResponse: """Stream a stream to aiohttp web response.""" response = web.StreamResponse() if content_type is not None: response.content_type = content_type await response.prepare(request) # Suppressing something went wrong fetching data, closed connection with suppress(asyncio.TimeoutError, aiohttp.ClientError): while hass.is_running: with async_timeout.timeout(timeout): data = await stream.read(buffer_size) if not data: break await response.write(data) return response
@callback def _async_register_clientsession_shutdown( hass: HomeAssistant, clientsession: aiohttp.ClientSession ) -> None: """Register ClientSession close on Home Assistant shutdown. This method must be run in the event loop. """ @callback def _async_close_websession(event
: Event) -> None: """Close websession.""" clientsession.detach() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_websession) @callback def _async_get_connector( hass: HomeAssistant, verify_ssl: bool = True ) -> aiohttp.BaseConnector: """Return the connector pool for aiohttp. This method must be run in the event loop. """ key = DATA_CONNECTOR if verify_ssl else DATA_CONNECTOR_NOTVERIFY if key in hass.data: return cast(aiohttp.BaseConnector, hass.data[key]) if verify_ssl: ssl_context: bool | SSLContext = ssl_util.client_context() else: ssl_context = False connector = aiohttp.TCPConnector(enable_cleanup_closed=True, ssl=ssl_context) hass.data[key] = connector async def _async_close_connector(event: Event) -> None: """Close connector pool.""" await connector.close() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_connector) return connector
from django.core.management.base import BaseCommand, CommandError from django.db import IntegrityError import olympia.core.logger from olympia.access.models import Group, GroupUser from olympia.users.models import UserProfile class Command(BaseCommand): help = 'Add a new user to a group.' log = olympia.core.logger.getLogger('z.users') def add_arguments(self, parser): parser.add_argument('user', type=unicode, help='User id or email') parser.add_argument('group_id', type=int, help='Group id') def handle(self, *args, **options): do_adduser(options['user'], options['group_id']) msg = 'Adding {user} to {group}\n'.format( user=options['user'], group=options['group_id']) self.log.info(msg) self.stdout.write(msg) def do_adduser(user, group): try: if '@' in user: user = UserProfile.objects.get(email=user) elif user.isdigit(): user = UserProfile.objects.get(pk=user) else: raise CommandError('Unknown input for user.') group = Group.objects.get(pk=group) GroupUser.objects.create(user=user, group=group) except IntegrityError, e: raise CommandError('User is already in that group? %s' % e) except UserProfile.DoesNotExist:
raise CommandError('User ({user}) does not exist.'.format(user=user)) except Group.DoesNotExist: raise CommandError('Group ({group}) does not exist.'
.format(group=group))
t() self.credentials.get_credentials(credentials) self.config.auth(None, None) def revoke_token(self): LOG.info("revoking token") self['server']['AccessToken'] = None self.credentials.get_credentials(self.credentials.get_credentials()) self.config['auth.token'] = None def get_available_servers(self): LOG.debug("Begin getAvailableServers") # Clone the credentials credentials = self.credentials.get_credentials() connect_servers = self._get_connect_servers(credentials) found_servers = self._find_servers(self._server_discovery()) if not connect_servers and not found_servers and not credentials['Servers']: # back out right away, no point in continuing LOG.info("Found no servers") return list() servers = list(credentials['Servers']) self._merge_servers(servers, found_servers) self._merge_servers(servers, connect_servers) servers = self._filter_servers(servers, connect_servers) try: servers.sort(key=lambda x: datetime.strptime(x['DateLastAccessed'], "%Y-%m-%dT%H:%M:%SZ"), reverse=True) except TypeError: servers.sort(key=lambda x: datetime(*(time.strptime(x['DateLastAccessed'], "%Y-%m-%dT%H:%M:%SZ")[0:6])), reverse=True) credentials['Servers'] = servers self.credentials.get_credentials(credentials) return servers def login_to_connect(self, username, password): if not username: raise AttributeError("username cannot be empty") if not password: raise AttributeError("password cannot be empty") try: result = self._request_url({ 'type': "POST", 'url': self.get_connect_url("user/authenticate"), 'data': { 'nameOrEmail': username, 'password': self._get_connect_password_hash(password) }, 'dataType': "json" }) except Exception as error: # Failed to login LOG.error(error) return False else: credentials = self.credentials.get_credentials() credentials['ConnectAccessToken'] = result['AccessToken'] credentials['ConnectUserId'] = result['User']['Id'] credentials['ConnectUser'] = result['User']['DisplayName'] self.credentials.get_credentials(credentials) # Signed in self._on_connect_user_signin(result['User']) return result def login(self, server, username, password=None, clear=True, options={}): if not username: raise AttributeError("username cannot be empty") if not server: raise AttributeError("server cannot be empty") try: request = { 'type': "POST", 'url': self.get_emby_url(server, "Users/AuthenticateByName"), 'json': { 'username': username, 'password': hashlib.sha1(password or "").hexdigest(), } } if clear: request['json']['pw'] = password or "" result = self._request_url(request, False) except Exception as error: # Failed to login LOG.error(error) return False self._on_authenticated(result, options) return result def connect_to_address(self, address, options={}): if not address: return False address = self._normalize_address(address) def _on_fail(): LOG.error("connectToAddress %s failed", address) return self._resolve_failure() try: public_info = self._try_connect(address, options=options) except Exception: return _on_fail() else: LOG.info("connectToAddress %s succeeded", address) server = { 'ManualAddress': address, 'LastConnectionMode': CONNECTION_MODE['Manual'] } self._update_server_info(server, public_info) server = self.connect_to_server(server, options) if server is False: return _on_fail() return server def connect_to_server(self, server, options={}): LOG.debug("Begin connectToServer") tests = [] if server.get('LastConnectionMode') != CONNECTION_MODE['Remote'] and server.get('AccessToken'): tests.append(server['LastConnectionMode']) if CONNECTION_MODE['Manual'] not in tests:
tests.append(CONNECTION_MODE['Manual']) if CONNECTION_MODE['Local'] not in tests:
tests.append(CONNECTION_MODE['Local']) if CONNECTION_MODE['Remote'] not in tests: tests.append(CONNECTION_MODE['Remote']) # TODO: begin to wake server return self._test_next_connection_mode(tests, 0, server, options) def connect(self, options={}): LOG.info("Begin connect") return self._connect_to_servers(self.get_available_servers(), options) def connect_user(self): return self.user def connect_user_id(self): return self.credentials.get_credentials().get('ConnectUserId') def connect_token(self): return self.credentials.get_credentials().get('ConnectAccessToken') def emby_user_id(self): return self.get_server_info(self.server_id)['UserId'] def emby_token(self): return self.get_server_info(self.server_id)['AccessToken'] def get_server_info(self, server_id): if server_id is None: LOG.info("server_id is empty") return {} servers = self.credentials.get_credentials()['Servers'] for server in servers: if server['Id'] == server_id: return server def get_public_users(self): return self.client.emby.get_public_users() def get_connect_url(self, handler): return "https://connect.emby.media/service/%s" % handler def get_emby_url(self, base, handler): return "%s/emby/%s" % (base, handler) def _request_url(self, request, headers=True): request['timeout'] = request.get('timeout') or self.timeout if headers: self._get_headers(request) try: return self.http.request(request) except Exception as error: LOG.error(error) raise def _add_app_info(self): return "%s/%s" % (self.config['app.name'], self.config['app.version']) def _get_headers(self, request): headers = request.setdefault('headers', {}) if request.get('dataType') == "json": headers['Accept'] = "application/json" request.pop('dataType') headers['X-Application'] = self._add_app_info() headers['Content-type'] = request.get('contentType', 'application/x-www-form-urlencoded; charset=UTF-8') def _connect_to_servers(self, servers, options): LOG.info("Begin connectToServers, with %s servers", len(servers)) result = {} if len(servers) == 1: result = self.connect_to_server(servers[0], options) LOG.debug("resolving connectToServers with result['State']: %s", result) return result first_server = self._get_last_used_server() # See if we have any saved credentials and can auto sign in if first_server is not None and first_server['DateLastAccessed'] != "2001-01-01T00:00:00Z": result = self.connect_to_server(first_server, options) if result['State'] in (CONNECTION_STATE['SignedIn'], CONNECTION_STATE['Unavailable']): return result # Return loaded credentials if exists credentials = self.credentials.get_credentials() self._ensure_connect_user(credentials) return { 'Servers': servers, 'State': CONNECTION_STATE['ConnectSignIn'] if (not len(servers) and not self.connect_user()) else (resul
from config.settings import STREAM_COLLECTION from bson.objectid import ObjectId
from radio_db.models import BaseModel class St
ream(BaseModel): def __init__(self, db, data): super().__init__(db, collection=STREAM_COLLECTION) self.stream_ip = data.get('stream_ip') self.name = data.get('name') self.id = data.get('id') self.user_id = ObjectId(data.get('user_id'))
#!/usr/bin/env python # -*- coding: utf-8 -*- import uuid from db.common import Base from db.specific_event import SpecificEvent from db.event import Event from db.player import Player from db.team import Team class Takeaway(Base, SpecificEvent): __tablename__ = 'takeaways' __autoload__ = True HUMAN_READABLE = 'takeaway' STANDARD_ATTRS = [ "team_id", "player_id", "zone", "taken_from_team_id" ] def __init__(self, event_id, data_dict): self.takeaway_id = uuid.uuid4().urn self.event_id = event_id for attr in self.STANDARD_ATTRS: if attr in data
_dict: setattr(self, attr, data_dict[attr]) else: setattr(self, attr, None) def __str__(self): plr = Player.find_by_id(self.player_id) event = Event.find_by_id(self.event_id) team = Team.find_by_id(self.team_id) return "Takeaway: %s (%s) - %s" % ( plr.name,
team.abbr, event)
""" Django settings for figexample project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'pp&p7ex-&+#n4waijg96v&txz$=y*rh=t$u-!hri@(-s@6^51=' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuth
enticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middle
ware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'figexample.urls' WSGI_APPLICATION = 'figexample.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'postgres', 'USER': 'postgres', 'HOST': 'db_1', 'PORT': 5432, } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/'
#!/usr/bin/env python #PYTHON 3 only # Sample Windows Startup check -- Mail alert # SurvivalAlert v1.0 # By thxer.com # N-pn.fr Community and Hexpresso CTF team import os import socket import ctypes import smtplib #Global Variables HOSTNAME = str(socket.gethostname()) IPLAN = str(socket.gethostbyname(socket.gethostname())) AUTHORIZE_USER = ['Users','Utilisateur'] #User wich are allow to use computers LIMIT_FREE_HDD_SPACE = 11 # Limit of free HDD space alert in GB #Email Settings TO = "admin@1337.com" # User who recept mail alert USER = "smtp_user@1337.com" PWD = "smtp_passwd" SMTPSERV = "smtp.server_addres.com" #Check H
DD Status def check_hdd(): """Check HDD disk with windows tools """ Hdd_status = os.popen("wmic diskdrive get status").read() for word in Hdd_status.split(): if not word
in ["Status", "OK"]: ctypes.windll.user32.MessageBoxW(None, u"ALERT: HDD ERROR", u"ERROR CONTACT ADMIN NOW !", 0) send_mail("Warning HDD not SAFE !","Windows claims About unsafe HDD !") return Hdd_status def get_free_space(): """ Test first Drive Free space then alert < LIMIT_HDD_FREE_SPACE """ free_space = round(int(os.popen("wmic logicaldisk get freespace").read().split()[1])/1024/1024/1024) if free_space < LIMIT_FREE_HDD_SPACE : ctypes.windll.user32.MessageBoxW(None, u"ALERT: HDD FREE SPACE ERROR", u"ERROR CONTACT ADMIN NOW !", 0) msg = "Warning Free space is : " + str(free_space) + "GB" send_mail("Warning C: Free SPACE !",msg) return free_space def whois_log(): """ Get user Login name and alert if not in AUTHORIZE_USER list """ if not os.getlogin() in AUTHORIZE_USER : msg = "SUSPECT Login IN : " + os.getlogin() send_mail("SUSPECT LOGIN",msg) def send_mail(subject,message): subject = str(subject) message = str(message) server = smtplib.SMTP(SMTPSERV,25) # 587 for STARTLS server.ehlo() #server.starttls() # Un comment for use STARTTLS server.login(USER, PWD) header = 'TO:' + TO + '\n' + 'From: ' + USER + '\n' + 'Subject:'+ HOSTNAME + " | " + IPLAN + " " + subject +'\n' mail = header + '\n' + "PC : " + HOSTNAME + " IP LAN : " + IPLAN + "\n" + message + '\n\n' server.sendmail(USER, TO, mail ) server.close() if __name__ == '__main__': # Uncomment for test mail configuration #send_mail("Send a Test Mail","1337 Are In place N-pn") whois_log() get_free_space() check_hdd()
from a10sdk.common.A10BaseClass import A10BaseClass class Udp(A10BaseClass): """Clas
s Description:: Set UDP
STUN timeout. Class udp supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` :param port_start: {"description": "Port Range (Port Range Start)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false} :param port_end: {"description": "Port Range (Port Range End)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false} :param timeout: {"description": "STUN timeout in minutes (default: 2 minutes)", "format": "number", "type": "number", "maximum": 60, "minimum": 0, "optional": true} :param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/cgnv6/lsn/stun-timeout/udp/{port_start}+{port_end}`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required = [ "port_start","port_end"] self.b_key = "udp" self.a10_url="/axapi/v3/cgnv6/lsn/stun-timeout/udp/{port_start}+{port_end}" self.DeviceProxy = "" self.port_start = "" self.port_end = "" self.timeout = "" self.uuid = "" for keys, value in kwargs.items(): setattr(self,keys, value)
if not source: raise Exception('Could not find table %s' % table) csv_delimiter = args['delimiter'] if args['format'] == 'csv' else None path = google.datalab.bigquery.Query.resolve_parameters(args['path'], parameters) job = source.extract(path, format=args['format'], csv_delimiter=csv_delimiter, csv_header=args['header'], compress=args['compress']) elif args['query'] or args['view']: source_name = args['view'] or args['query'] source = google.datalab.utils.commands.get_notebook_item(source_name) if not source: raise Exception('Could not find ' + ('view ' + args['view'] if args['view'] else 'query ' + args['query'])) query = source if args['query'] else bigquery.Query.from_view(source) query_params = get_query_parameters(args, cell_body) if args['query'] else None output_options = QueryOutput.file(path=args['path'], format=args['format'], csv_delimiter=args['delimiter'], csv_header=args['header'], compress=args['compress'], use_cache=not args['nocache']) context = google.datalab.utils._utils._construct_context_for_args(args) job = query.execute(output_options, context=context, query_params=query_params) else: raise Exception('A query, table, or view is needed to extract') if job.failed: raise Exception('Extract failed: %s' % str(job.fatal_error)) elif job.errors: raise Exception('Extract completed with errors: %s' % str(job.errors)) return job.result() def _load_cell(args, cell_body): """Implements the BigQuery load magic used to load data from GCS to a table. The supported syntax is: %bq load <optional args> Args: args: the arguments following '%bq load'. cell_body: optional contents of the cell interpreted as
YAML or JSON. Returns: A message about whether the load succeeded or failed. """ env = google.datalab.utils.commands.notebook_environment() confi
g = google.datalab.utils.commands.parse_config(cell_body, env, False) or {} parameters = config.get('parameters') or [] if parameters: jsonschema.validate({'parameters': parameters}, BigQuerySchema.QUERY_PARAMS_SCHEMA) name = google.datalab.bigquery.Query.resolve_parameters(args['table'], parameters) table = _get_table(name) if not table: table = bigquery.Table(name) if args['mode'] == 'create': if table.exists(): raise Exception('table %s already exists; use "append" or "overwrite" as mode.' % name) if not cell_body or 'schema' not in cell_body: raise Exception('Table does not exist, and no schema specified in cell; cannot load.') schema = config['schema'] # schema can be an instance of bigquery.Schema. # For example, user can run "my_schema = bigquery.Schema.from_data(df)" in a previous cell and # specify "schema: $my_schema" in cell input. if not isinstance(schema, bigquery.Schema): jsonschema.validate({'schema': schema}, BigQuerySchema.TABLE_SCHEMA_SCHEMA) schema = bigquery.Schema(schema) table.create(schema=schema) elif not table.exists(): raise Exception('table %s does not exist; use "create" as mode.' % name) csv_options = bigquery.CSVOptions(delimiter=args['delimiter'], skip_leading_rows=args['skip'], allow_jagged_rows=not args['strict'], quote=args['quote']) path = google.datalab.bigquery.Query.resolve_parameters(args['path'], parameters) job = table.load(path, mode=args['mode'], source_format=args['format'], csv_options=csv_options, ignore_unknown_values=not args['strict']) if job.failed: raise Exception('Load failed: %s' % str(job.fatal_error)) elif job.errors: raise Exception('Load completed with errors: %s' % str(job.errors)) def _create_pipeline_subparser(parser): import argparse pipeline_parser = parser.subcommand( 'pipeline', formatter_class=argparse.RawTextHelpFormatter, help=""" Creates a GCS/BigQuery ETL pipeline. The cell-body is specified as follows: input: table | path: <BQ table name or GCS path; both if path->table load is also required> schema: <For syntax, refer '%%bq execute'> format: {csv (default) | json} csv: <This section is relevant only when 'format' is 'csv'> delimiter: <The field delimiter to use; default is ','> skip: <Number of rows at the top of a CSV file to skip; default is 0> strict: <{True | False (default)}; whether to accept rows with missing trailing (or optional) columns> quote: <Value used to quote data sections; default is '"'> mode: <{append (default) | overwrite}; applicable if path->table load> transformation: <optional; when absent, a direct conversion is done from input (path|table) to output (table|path)> query: <name of BQ query defined via "%%bq query --name ..."> output: table | path: <BQ table name or GCS path; both if table->path extract is required> mode: <{append | overwrite | create (default)}; applicable only when table is specified. format: <{csv (default) | json}> csv: <This section is relevant only when 'format' is 'csv'> delimiter: <the field delimiter to use. Defaults to ','> header: <{True (default) | False}; Whether to include an initial header line> compress: <{True | False (default) }; Whether to compress the data on export> schedule: start: <formatted as '%Y-%m-%dT%H:%M:%S'; default is 'now'> end: <formatted as '%Y-%m-%dT%H:%M:%S'; default is 'forever'> interval: <{@once (default) | @hourly | @daily | @weekly | @ monthly | @yearly | <cron ex>}> catchup: <{True | False (default)}; when True, backfill is performed for start and end times. retries: Number of attempts to run the pipeline; default is 0 retry_delay_seconds: Number of seconds to wait before retrying the task emails: <comma separated list of emails to notify in case of retries, failures, etc.> parameters: <For syntax, refer '%%bq execute'> """) # noqa pipeline_parser.add_argument('-n', '--name', type=str, help='BigQuery pipeline name', required=True) pipeline_parser.add_argument('-d', '--gcs_dag_bucket', type=str, help='The Google Cloud Storage bucket for the Airflow dags.') pipeline_parser.add_argument('-f', '--gcs_dag_file_path', type=str, help='The file path suffix for the Airflow dags.') pipeline_parser.add_argument('-g', '--debug', type=str, help='Debug output with the airflow spec.') return pipeline_parser def _pipeline_cell(args, cell_body): """Implements the pipeline subcommand in the %%bq magic. Args: args: the arguments following '%%bq pipeline'. cell_body: Cell contents. """ name = args.get('name') if name is None: raise Exception('Pipeline name was not specified.') import google.datalab.utils as utils bq_pipeline_config = utils.commands.parse_config( cell_body, utils.commands.notebook_environment()) try: airflow_spec = \ google.datalab.contrib.bigquery.commands.get_airflow_spec_from_config(name, bq_pipeline_config) except AttributeError: return "Perhaps you're missing: import google.datalab.contrib.bigquery.commands" # If a gcs_dag_bucket is specified, we deploy to it so that the Airflow VM rsyncs it. error_message = '' gcs_dag_bucket = args.get('gcs_dag_bucket') gcs_dag_file_path = args.get('gcs_dag_file_path') if gcs_dag_bucket: try: airflow = google.datalab.contrib.pipeline.airflow.Airflow(gcs_dag_bucket, gcs_dag_file_path) airflow.deploy(name, airflow_spec) error_message += "Pipeline successfully deployed! View Airflow dashboard for more details." except AttributeError: return "Perhaps you're missing: import google.datalab.contrib.pipeline.airflow" if args.get('debug'): error_message += '\
"2.1" insertafter: description: - Used with C(state=present) and C(env). If specified, the environment variable will be inserted after the declaration of specified environment variable. version_added: "2.1" insertbefore: description: - Used with C(state=present) and C(env). If specified, the environment variable will be inserted before the declaration of specified environment variable. version_added: "2.1" requirements: - cron author: - Dane Summers (@dsummersl) - Mike Grozak (@rhaido) - Patrick Callahan (@dirtyharrycallahan) - E
van Kaufman (@EvanK) - Luca Berruti (@lberruti) """ EXAMPLES = ''' - name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null" cron: name: "check dirs" minute: "0" hour: "5,2" job: "ls -alh > /dev/null" - name: 'Ensure an old job
is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab' cron: name: "an old job" state: absent - name: Creates an entry like "@reboot /some/job.sh" cron: name: "a job for reboot" special_time: reboot job: "/some/job.sh" - name: Creates an entry like "PATH=/opt/bin" on top of crontab cron: name: PATH env: yes job: /opt/bin - name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration cron: name: APP_HOME env: yes job: /srv/app insertafter: PATH - name: Creates a cron file under /etc/cron.d cron: name: yum autoupdate weekday: 2 minute: 0 hour: 12 user: root job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate" cron_file: ansible_yum-autoupdate - name: Removes a cron file from under /etc/cron.d cron: name: "yum autoupdate" cron_file: ansible_yum-autoupdate state: absent - name: Removes "APP_HOME" environment variable from crontab cron: name: APP_HOME env: yes state: absent ''' import os import platform import pipes import pwd import re import sys import tempfile from ansible.module_utils.basic import AnsibleModule, get_platform CRONCMD = "/usr/bin/crontab" class CronTabError(Exception): pass class CronTab(object): """ CronTab object to write time based crontab file user - the user of the crontab (defaults to root) cron_file - a cron file under /etc/cron.d, or an absolute path """ def __init__(self, module, user=None, cron_file=None): self.module = module self.user = user self.root = (os.getuid() == 0) self.lines = None self.ansible = "#Ansible: " self.existing = '' if cron_file: if os.path.isabs(cron_file): self.cron_file = cron_file else: self.cron_file = os.path.join('/etc/cron.d', cron_file) else: self.cron_file = None self.read() def read(self): # Read in the crontab from the system self.lines = [] if self.cron_file: # read the cronfile try: f = open(self.cron_file, 'r') self.existing = f.read() self.lines = self.existing.splitlines() f.close() except IOError: # cron file does not exist return except Exception: raise CronTabError("Unexpected error:", sys.exc_info()[0]) else: # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) if rc != 0 and rc != 1: # 1 can mean that there are no jobs. raise CronTabError("Unable to read crontab") self.existing = out lines = out.splitlines() count = 0 for l in lines: if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)): self.lines.append(l) else: pattern = re.escape(l) + '[\r\n]?' self.existing = re.sub(pattern, '', self.existing, 1) count += 1 def is_empty(self): if len(self.lines) == 0: return True else: return False def write(self, backup_file=None): """ Write the crontab to the system. Saves all information. """ if backup_file: fileh = open(backup_file, 'w') elif self.cron_file: fileh = open(self.cron_file, 'w') else: filed, path = tempfile.mkstemp(prefix='crontab') os.chmod(path, int('0644', 8)) fileh = os.fdopen(filed, 'w') fileh.write(self.render()) fileh.close() # return if making a backup if backup_file: return # Add the entire crontab back to the user crontab if not self.cron_file: # quoting shell args for now but really this should be two non-shell calls. FIXME (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) os.unlink(path) if rc != 0: self.module.fail_json(msg=err) # set SELinux permissions if self.module.selinux_enabled() and self.cron_file: self.module.set_default_selinux_context(self.cron_file, False) def do_comment(self, name): return "%s%s" % (self.ansible, name) def add_job(self, name, job): # Add the comment self.lines.append(self.do_comment(name)) # Add the job self.lines.append("%s" % (job)) def update_job(self, name, job): return self._update_job(name, job, self.do_add_job) def do_add_job(self, lines, comment, job): lines.append(comment) lines.append("%s" % (job)) def remove_job(self, name): return self._update_job(name, "", self.do_remove_job) def do_remove_job(self, lines, comment, job): return None def add_env(self, decl, insertafter=None, insertbefore=None): if not (insertafter or insertbefore): self.lines.insert(0, decl) return if insertafter: other_name = insertafter elif insertbefore: other_name = insertbefore other_decl = self.find_env(other_name) if len(other_decl) > 0: if insertafter: index = other_decl[0] + 1 elif insertbefore: index = other_decl[0] self.lines.insert(index, decl) return self.module.fail_json(msg="Variable named '%s' not found." % other_name) def update_env(self, name, decl): return self._update_env(name, decl, self.do_add_env) def do_add_env(self, lines, decl): lines.append(decl) def remove_env(self, name): return self._update_env(name, '', self.do_remove_env) def do_remove_env(self, lines, decl): return None def remove_job_file(self): try: os.unlink(self.cron_file) return True except OSError: # cron file does not exist return False except Exception: raise CronTabError("Unexpected error:", sys.exc_info()[0]) def find_job(self, name, job=None): # attempt to find job by 'Ansible:' header comment comment = None for l in self.lines: if comment is not None: if comment == name: return [comment, l] else: comment = None elif re.match(r'%s' % self.ansible, l): comment = re.sub(r'%s' % self.ansible, '', l) # failing that, attempt to find job by exact match if job: for i, l in enumerate(self.lines): if
out.setObjectName("verticalLayout") self.metadata_groupbox = QtWidgets.QGroupBox(MetadataOptionsPage) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.metadata_groupbox.sizePolicy().hasHeightForWidth()) self.metadata_groupbox.setSizePolicy(sizePolicy) self.metadata_groupbox.setMinimumSize(QtCore.QSize(397, 135)) self.metadata_groupbox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.metadata_groupbox.setObjectName("metadata_groupbox") self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.metadata_groupbox) self.verticalLayout_3.setObjectName("verticalLayout_3") self.translate_artist_names = QtWidgets.QCheckBox(self.metadata_groupbox) self.translate_artist_names.setObjectName("translate_artist_names") self.verticalLayout_3.addWidget(self.translate_artist_names) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setContentsMargins(-1, -1, -1, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.selected_locales = QtWidgets.QLineEdit(self.metadata_groupbox) self.selected_locales.setReadOnly(True) self.selected_locales.setObjectName("selected_locales") self.horizontalLayout.addWidget(self.selected_locales) self.select_locales = QtWidgets.QPushButton(self.metadata_groupbox) self.select_locales.setObjectName("select_locales") self.horizontalLayout.addWidget(self.select_locales) self.verticalLayout_3.addLayout(self.horizontalLayout) self.translate_artist_names_script_exception = QtWidgets.QCheckBox(self.metadata_groupbox) self.translate_artist_names_script_exception.setObjectName("translate_artist_names_script_exception") self.verticalLayout_3.addWidget(self.translate_artist_names_script_exception) self.horizontalLayout_4 = QtWidgets.QHBoxLayout() self.horizontalLayout_4.setContentsMargins(-1, -1, -1, 0) self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.selected_scripts = QtWidgets.QLineEdit(self.metadata_groupbox) self.selected_scripts.setReadOnly(True) self.selected_scripts.setObjectName("selected_scripts") self.horizontalLayout_4.addWidget(self.selected_scripts) self.select_scripts = QtWidgets.QPushButton(self.metadata_groupbox) self.select_scripts.setObjectName("select_scripts") self.horizontalLayout_4.addWidget(self.select_scripts) self.verticalLayout_3.addLayout(self.horizontalLayout_4) self.standardize_artists = QtWidgets.QCheckBox(self.metadata_groupbox) self.standardize_artists.setObjectName("standardize_artists") self.verticalLayout_3.addWidget(self.standardize_artists) self.standardize_instruments = QtWidgets.QCheckBox(self.metadata_groupbox) self.standardize_instruments.setObjectName("standardize_instruments") self.verticalLayout_3.addWidget(self.standardize_instruments) self.convert_punctuation = QtWidgets.QCheckBox(self.metadata_groupbox) self.convert_punctuation.setObjectName("convert_punctuation") self.verticalLayout_3.addWidget(self.convert_punctuation) self.release_ars = QtWidgets.QCheckBox(self.metadata_groupbox) self.release_ars.setObjectName("release_ars") self.verticalLayout_3.addWidget(self.release_ars) self.track_ars = QtWidgets.QCheckBox(self.metadata_groupbox) self.track_ars.setObjectName("track_ars") self.verticalLayout_3.addWidget(self.track_ars) self.guess_tracknumber_and_title = QtWidgets.QCheckBox(self.metadata_groupbox) self.guess_tracknumber_and_title.setObjectName("guess_tracknumber_and_title") self.verticalLayout_3.addWidget(self.guess_tracknumber_and_title) self.verticalLayout.addWidget(self.metadata_groupbox) self.custom_fields_groupbox = QtWidgets.QGroupBox(MetadataOptionsPage) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.custom_fields_groupbox.sizePolicy().hasHeightForWidth()) self.custom_fields_groupbox.setSizePolicy(sizePolicy) self.custom_fields_groupbox.setMinimumSize(QtCore.QSize(397, 0)) self.custom_fields_groupbox.setObjectName("custom_fields_groupbox") self.gridlayout = QtWidgets.QGridLayout(self.custom_fields_groupbox) self.gridlayout.setSpacing(2) self.gridlayout.setObjectName("gridlayout") self.label_6 = QtWidgets.QLabel(self.custom_fields_groupbox) self.label_6.setObjectName("label_6") self.gridlayout.addWidget(self.label_
6, 0, 0, 1, 2) self.label_7 = QtWidgets.QLa
bel(self.custom_fields_groupbox) self.label_7.setObjectName("label_7") self.gridlayout.addWidget(self.label_7, 2, 0, 1, 2) self.nat_name = QtWidgets.QLineEdit(self.custom_fields_groupbox) self.nat_name.setObjectName("nat_name") self.gridlayout.addWidget(self.nat_name, 3, 0, 1, 1) self.nat_name_default = QtWidgets.QPushButton(self.custom_fields_groupbox) self.nat_name_default.setObjectName("nat_name_default") self.gridlayout.addWidget(self.nat_name_default, 3, 1, 1, 1) self.va_name_default = QtWidgets.QPushButton(self.custom_fields_groupbox) self.va_name_default.setObjectName("va_name_default") self.gridlayout.addWidget(self.va_name_default, 1, 1, 1, 1) self.va_name = QtWidgets.QLineEdit(self.custom_fields_groupbox) self.va_name.setObjectName("va_name") self.gridlayout.addWidget(self.va_name, 1, 0, 1, 1) self.verticalLayout.addWidget(self.custom_fields_groupbox) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.label_6.setBuddy(self.va_name_default) self.label_7.setBuddy(self.nat_name_default) self.retranslateUi(MetadataOptionsPage) QtCore.QMetaObject.connectSlotsByName(MetadataOptionsPage) MetadataOptionsPage.setTabOrder(self.translate_artist_names, self.translate_artist_names_script_exception) MetadataOptionsPage.setTabOrder(self.translate_artist_names_script_exception, self.standardize_artists) MetadataOptionsPage.setTabOrder(self.standardize_artists, self.standardize_instruments) MetadataOptionsPage.setTabOrder(self.standardize_instruments, self.convert_punctuation) MetadataOptionsPage.setTabOrder(self.convert_punctuation, self.release_ars) MetadataOptionsPage.setTabOrder(self.release_ars, self.track_ars) MetadataOptionsPage.setTabOrder(self.track_ars, self.guess_tracknumber_and_title) MetadataOptionsPage.setTabOrder(self.guess_tracknumber_and_title, self.va_name) MetadataOptionsPage.setTabOrder(self.va_name, self.va_name_default) MetadataOptionsPage.setTabOrder(self.va_name_default, self.nat_name) MetadataOptionsPage.setTabOrder(self.nat_name, self.nat_name_default) def retranslateUi(self, MetadataOptionsPage): _translate = QtCore.QCoreApplication.translate self.metadata_groupbox.setTitle(_("Metadata")) self.translate_artist_names.setText(_("Translate artist names to these locales where possible:")) self.select_locales.setText(_("Select...")) self.translate_artist_names_script_exception.setText(_("Ignore artist name translation for these language scripts:")) self.select_scripts.setText(_("Select...")) self.standardize_artists.setText(_("Use standardized artist names")) self.standardize_instruments.setText(_("Use standardized instrument and vocal credits")) self.convert_punctuation.setText(_("Convert Unicode punctuation characters to ASCII"))
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #from . import berny_solver as berny from .addons import as_pyscf_method def optimize(method, *args, **kwargs): try: from . import geometric_solver as geom
except ImportError as e1: try: from . import berny_solver as geom except ImportError as e2: raise e1 return geom.optimize(method, *args, **kwargs)
Args: client (Client): Azure DevOps API client. args (dict): Command arguments from XSOAR. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """ project = args['project'] repository_id = args['repository_id'] pull_request_id = args['pull_request_id'] response = client.pull_requests_get_request(project, repository_id, pull_request_id) outputs = copy.deepcopy(response) outputs['creationDate'] = arg_to_datetime(response.get('creationDate')).isoformat() readable_output = generate_pull_request_readable_information(outputs) command_results = CommandResults( readable_output=readable_output, outputs_prefix='AzureDevOps.PullRequest', outputs_key_field='pullRequestId', outputs=outputs, raw_response=response ) return command_results def pull_requests_list_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Retrieve pull requests in repository. Args: client (Client): Azure DevOps API client. args (dict): Command arguments from XSOAR. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """ project = args['project'] repository = args['repository'] page = arg_to_number(args.get('page') or '1') limit = arg_to_number(args.get('limit') or '50') if page < 1 or limit < 1: raise Exception('Page and limit arguments must be greater than 1.') offset = (page - 1) * limit response = client.pull_requests_list_request(project, repository, offset, limit) readable_message = f'Pull Request List:\n Current page size: {limit}\n Showing page {page} out of ' \ f'others that may exist.' outputs = copy.deepcopy(response.get('value')) for pr in outputs: pr['creationDate'] = arg_to_datetime(pr.get('creationDate')).isoformat() readable_output = generate_pull_request_readable_information(outputs, message=readable_message) command_results = CommandResults( readable_output=readable_output, outputs_prefix='AzureDevOps.PullRequest', outputs_key_field='pullRequestId', outputs=outputs, raw_response=response ) return command_results def project_list_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Retrieve all projects in the organization that the authenticated user has access to. Args: client (Client): Azure DevOps API client. args (dict): Command arguments from XSOAR. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """ page = arg_to_number(args.get('page') or '1') limit = arg_to_number(args.get('limit') or '50') if page < 1 or limit < 1: raise Exception('Page and limit arguments must be greater than 1.') offset = (page - 1) * limit response = client.project_list_request(offset, limit) readable_message = f'Project List:\n Current page size: {limit}\n Showing page {page} out others that may exist.' outputs = copy.deepcopy(response.get('value', [])) output_headers = ['name', 'id', 'state', 'revision', 'visibility', 'lastUpdateTime'] for project in outputs: project['lastUpdateTime'] = arg_to_datetime(project.get('lastUpdateTime')).isoformat() readable_output = tableToMarkdown( readable_message, outputs, headers=output_headers, headerTransform=pascalToSpace ) command_results = CommandResults( readable_output=readable_output, outputs_prefix='AzureDevOps.Project', outputs_key_field='id', outputs=outputs, raw_response=response ) return command_results def repository_list_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Retrieve git repositories in the organization project. Args: client (Client): Azure DevOps API client. args (dict): Command arguments from XSOAR. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """ project = args['project'] page = arg_to_number(args.get('page') or '1') limit = arg_to_number(args.get('limit') or '50') if page < 1 or limit < 1: raise Exception('Page and limit arguments must be greater than 1.') start = (page - 1) * limit end = start + limit readable_message = f'Repositories List:\n Current page size: {limit}\n Showing page {page} out others that may exist.' response = client.repository_list_request(project) outputs = [] if response.get('count') and response.get('count') >= start: min_index = min(response.get('count'), end) for repo in response.get('value')[start:min_index]: outputs.append(repo) readable_data = copy.deepcopy(outputs) for repo in readable_data: repo["size (Bytes)"] = repo.pop("size") readable_output = tableToMarkdown( readable_message, readable_data, headers=['id', 'name', 'webUrl', 'size (Bytes)'], headerTransform=pascalToSpace ) command_results = CommandResults( readable_output=readable_output, outputs_prefix='AzureDevOps.Repository', outputs_key_field='id', outputs=outputs, raw_response=response ) return command_results def users_query_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Query users in the organization. Args: client (Client): Azure DevOps API client. args (dict): Command arguments from XSOAR. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """ query = args['query'] page = arg_to_number(args.get('page') or '1') limit = arg_to_number(args.get('limit') or '50') if page < 1 or limit < 1: raise Exception('Page and limit arguments must be greater than 1.') start = (page - 1) * limit end = start + limit readable_message = f'Users List:\n Current page size: {limit}\n Showing page {page} out others that may exist.' response = client.users_query_request(query) outputs = [] results = response.get('results') readable_user_information = [] if results and len(results) > 0: identities = results[0].get('identities') if len(identities) >= start: min_index = min(len(identities), end) for identity in identities[start:min_index]: # Updating the id key as well. identity["id"] = identity.get("localId") outputs.append(identity) if identity.get("localDirectory") == "vsd": readable_user_information.append( {"entityType": id
entity.get("entityType"), "id": identity.get("localId"), "email": identity.get("signInAddress")}) readable_output = tableToMarkdown( readable_message, readable_user_information, headers=['email', 'entityType', 'id'], headerTransform=pascalToSpace ) command_res
ults = CommandResults( readable_output=readable_output, outputs_prefix='AzureDevOps.User', outputs_key_field='id', outputs=outputs, raw_response=response ) return command_results def pipeline_run_get_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Retrieve pipeline run information. Args: client (Client): Azure DevOps API client. args (dict): Command arguments from XSOAR. Returns: CommandResults: outputs, readable outputs and raw response for XSOAR. """ project = args['project'] pipeline_id = args['pipeline_id'] run_id = args['run_id'] scheduled = argToBoolean(args.get('scheduled', False)) response = client.get_pipeline_run_request(project, pipeline_id, run_id) # This is part of a scheduled command run state = response.get("state") if scheduled and state != 'completed': # schedule next poll scheduled_command = ScheduledC
#!/usr/bin/env python # Copyright 2014-2020 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Yang Gao <younggao1994@gmail.com> # ''' electron-phonon matrix from finite difference ''' import copy import numpy as np from pyscf import scf, dft, gto, hessian from pyscf.eph import rhf as rhf_eph from pyscf.lib import logger from pyscf.data.nist import MP_ME CUTOFF_FREQUENCY = rhf_eph.CUTOFF_FREQUENCY KEEP_IMAG_FREQUENCY = rhf_eph.KEEP_IMAG_FREQUENCY def run_mfs(mf, mols_a, mols_b): nconfigs = len(mols_a) dm0 = mf.make_rdm1() mflist = [] for i in range(nconfigs): mf1 = copy.copy(mf) mf1.reset(mols_a[i]) mf2 = copy.copy(mf) mf2.reset(mols_b[i]) mf1.kernel(dm0=dm0) mf2.kernel(dm0=dm0) if not (mf1.converged): logger.warn(mf, "%ith config mf1 not converged", i) if not (mf2.converged): logger.warn(mf, "%ith config mf2 not converged", i) mflist.append((mf1, mf2)) return mflist def get_mode(mf, cutoff_frequency=CUTOFF_FREQUENCY, keep_imag_frequency=KEEP_IMAG_FREQUENCY): hmat = mf.Hessian().kernel() w_new, c_new = rhf_eph.solve_hmat(mf.mol, hmat, cutoff_frequency, keep_imag_frequency) return w_new, c_new def gen_moles(mol, disp): """From the given equilibrium molecule, generate 3N molecules with a shift on + displacement(mol_a) and - displacement(mol_s) on each Cartesian coordinates """ coords = mol.atom_coords() natoms = len(coords) mol_a, mol_s = [],[] for i in range(natoms): for x in range(3): new_coords_a, new_coords_s = coords.copy(), coords.copy() new_coords_a[i][x] += disp new_coords_s[i][x] -= disp atoma = [[mol.atom_symbol(j), coord] for (j, coord) in zip(range(natoms), new_coords_a)] atoms = [[mol.atom_symbol(j), coord] for (j, coord) in zip(range(natoms), new_coords_s)] mol_a.append(mol.set_geom_(atoma, inplace=False, unit='B')) mol_s.append(mol.set_geom_(atoms, inplace=False, unit='B')) return mol_a, mol_s def get_vmat(mf, mfset, disp): vmat=[] mygrad = mf.nuc_grad_method() ve = mygrad.get_veff() + mygrad.get_hcore() + mf.mol.intor("int1e_ipkin") RESTRICTED = (ve.ndim==3) aoslice = mf.mol.aoslice_by_atom() for ki, (mf1, mf2) in enumerate(mfset): atmid, axis = np.divmod(ki, 3) p0, p1 = aoslice[atmid][2:] vfull1 = mf1.get_veff() + mf1.get_hcore() - mf1.mol.intor_symmetric('int1e_kin') # <u+|V+|v+> vfull2 = mf2.get_veff() + mf2.get_hcore() - mf2.mol.intor_symmetric('int1e_kin') # <u-|V-|v-> vfull = (vfull1 - vfull2)/disp # (<p+|V+|q+>-<p-|V-|q->)/dR if RESTRICTED: vfull[p0:p1] -= ve[axis,p0:p1] vfull[:,p0:p1] -= ve[axis,p0:p1].T else: vfull[:,p0:p1] -= ve[:,axis,p0:p1] vfull[:,:,p0:p1] -= ve[:,axis,p0:p1].transpose(0,2,1) vmat.append(vfull) return np.asarray(vmat) def kernel(mf, disp=1e-4, mo_rep=False, cutoff_frequency=CUTOFF_FREQUENCY, keep_imag_frequency=KEEP_IMAG_FREQUENCY): if hasattr(mf, 'xc'): mf.grids.build() if not mf.converged: mf.kernel() RESTRICTED = (mf.mo_coeff.ndim==2) mol = mf.mol omega, vec = get_mode(mf, cutoff_frequency, keep_imag_frequency) mass = mol.atom_mass_list() * MP_ME vec = rhf_eph._freq_mass_weighted_vec(vec, omega, mass) mols_a, mols_b = gen_moles(mol, disp/2.0) # generate a bunch of molecules with disp/2 on each cartesion coord mfset = run_mfs(mf, mols_a, mols_b) # run mean field calculations on all these molecules vmat = get_vmat(mf, mfset, disp) # extracting <p|dV|q>/dR if mo_rep: if RESTRICTED: vmat = np.einsum('xuv,up,vq->xpq', vmat, mf.mo_coeff.conj(), mf.mo_coeff) else: vmat = np.einsum('xsuv,sup,svq->xspq', vmat, mf.mo_coeff.conj(), mf.mo_coeff) if RESTRICTED: mat = np.einsum('xJ,xpq->Jpq', vec, vmat) else: mat = np.einsum('xJ,xspq->sJpq', vec, vmat) return mat, omega if __name__ == '__main__': mol = gto.M() mol.atom = '''O 0.000000000000 0.00000000136 0.459620634131 H 0.000000000000 -0.77050867841 1.139170094494 H 0.000000000000 0.77050867841 1.139170094494''' mol.unit = 'angstrom' mol.basis = 'sto3g' mol.verbose=4 mol.build() # this is a pre-computed relaxed geometry mf = dft.RKS(mol) mf.grids.level=4 mf.grids.build()
mf.xc = 'b3lyp' mf.conv_tol = 1e-14 mf.conv_tol_grad = 1e-8 mf.kernel() grad = mf
.nuc_grad_method().kernel() print("Force on the atoms/au:") print(grad) assert(abs(grad).max()<1e-5) mat, omega = kernel(mf) matmo, _ = kernel(mf, mo_rep=True) from pyscf.eph.rks import EPH myeph = EPH(mf) eph, _ = myeph.kernel() ephmo, _ = myeph.kernel(mo_rep=True) print("***Testing on RKS***") for i in range(len(mat)): print("AO",min(np.linalg.norm(eph[i]-mat[i]), np.linalg.norm(eph[i]+mat[i]))) print("AO", min(abs(eph[i]-mat[i]).max(), abs(eph[i]+mat[i]).max())) print("MO",min(np.linalg.norm(ephmo[i]-matmo[i]), np.linalg.norm(ephmo[i]+matmo[i]))) print("MO", min(abs(ephmo[i]-matmo[i]).max(), abs(ephmo[i]+matmo[i]).max()))
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('search', '0003_auto_20150826_0632'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='JSONVersions', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('date_created', models.DateTimeField(help_text=b'The time when this item was created', auto_now_add=True, db_index=True)), ('date_modified', models.DateTimeField(help_text=b'The last moment when the item was modified.', auto_now=True, db_index=True)), ('json_data', models.TextField(help_text=b'The JSON data for a particular version of the visualization.')), ], ), migrations.CreateModel( name='SCOTUSMap', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('date_created', models.DateTimeField(help_text=b'The time when this item was created', auto_now_add=True, db_index=True)), ('date_modified', models.DateTimeField(help_text=b'The last moment when the item was modified.', auto_now=True, db_index=True)), ('title', models.CharField(help_text=b"The title of the visualization that you're creating.", max_length=200)), ('subtitle', models.CharField(help_text=b"The subtitle of the visualization that you're creating.", max_length=300, blank=True)), ('slug', models.Slu
gField(help_text=b'The URL path that the visualization will map to (the slug)', max_length=75)), ('notes', models.TextField(help_text=b'Any notes that help explain the diagram, in Markdown format', blank=True)), ('degree_count', models.IntegerField(help_text=b'The number of degrees to display between cases')),
('view_count', models.IntegerField(default=0, help_text=b'The number of times the visualization has been seen.')), ('published', models.BooleanField(default=False, help_text=b'Whether the visualization can be seen publicly.')), ('deleted', models.BooleanField(default=False, help_text=b'Has a user chosen to delete this visualization?')), ('generation_time', models.FloatField(default=0, help_text=b'The length of time it takes to generate a visuzalization, in seconds.')), ('cluster_end', models.ForeignKey(related_name='visualizations_ending_here', to='search.OpinionCluster', help_text=b'The ending cluster for the visualization')), ('cluster_start', models.ForeignKey(related_name='visualizations_starting_here', to='search.OpinionCluster', help_text=b'The starting cluster for the visualization')), ('clusters', models.ManyToManyField(help_text=b'The clusters involved in this visualization', related_name='visualizations', to='search.OpinionCluster', blank=True)), ('user', models.ForeignKey(related_name='scotus_maps', to=settings.AUTH_USER_MODEL, help_text=b'The user that owns the visualization')), ], ), migrations.AddField( model_name='jsonversions', name='map', field=models.ForeignKey(related_name='json_versions', to='visualizations.SCOTUSMap', help_text=b'The visualization that the json is affiliated with.'), ), ]
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors # See license.txt from __future__ import unicode_literals import frappe import erpnext import unittest from frappe.utils import nowdate, add_
days from erpnext.tests.utils import create_test_contact_and_address from erpnext.stock.doctype.delivery_trip.delivery_trip import notify_customers, get_contact_and_address class TestDeliveryTrip(unittest.TestCase): def setUp(self): create_driver() create_vehicle() create_de
livery_notfication() create_test_contact_and_address() def test_delivery_trip(self): contact = get_contact_and_address("_Test Customer") if not frappe.db.exists("Delivery Trip", "TOUR-00000"): delivery_trip = frappe.new_doc("Delivery Trip") delivery_trip.company = erpnext.get_default_company() delivery_trip.date = add_days(nowdate(), 5) delivery_trip.driver = "DRIVER-00001" delivery_trip.vehicle = "JB 007" delivery_trip.append("delivery_stops", { "customer": "_Test Customer", "address": contact.shipping_address.parent, "contact": contact.contact_person.parent }) delivery_trip.delivery_notification = 'Delivery Notification' delivery_trip.insert() sender_email = frappe.db.get_value("User", frappe.session.user, "email") notify_customers(docname=delivery_trip.name, date=delivery_trip.date, driver=delivery_trip.driver, vehicle=delivery_trip.vehicle, sender_email=sender_email, delivery_notification=delivery_trip.delivery_notification) self.assertEquals(delivery_trip.get("delivery_stops")[0].notified_by_email, 0) def create_driver(): if not frappe.db.exists("Driver", "Newton Scmander"): driver = frappe.new_doc("Driver") driver.full_name = "Newton Scmander" driver.cell_number = "98343424242" driver.license_number = "B809" driver.insert() def create_delivery_notfication(): if not frappe.db.exists("Standard Reply", "Delivery Notification"): frappe.get_doc({ 'doctype': 'Standard Reply', 'name': 'Delivery Notification', 'response': 'Test Delivery Trip', 'subject': 'Test Subject', 'owner': frappe.session.user }).insert() def create_vehicle(): if not frappe.db.exists("Vehicle", "JB 007"): vehicle = frappe.get_doc({ "doctype": "Vehicle", "license_plate": "JB 007", "make": "Maruti", "model": "PCM", "last_odometer": 5000, "acquisition_date": frappe.utils.nowdate(), "location": "Mumbai", "chassis_no": "1234ABCD", "uom": "Litre", "vehicle_value": frappe.utils.flt(500000) }) vehicle.insert()
from mock import patch, sentinel, call from arctic.scripts.arctic_fsck import main from ...util import run_as_main import sys import pytest def test_main(): with patch('arctic.scripts.arctic_fsck.Arctic') as Arctic, \ patch('arctic.scripts.arctic_fsck.get_mongodb_uri') as get_mongodb_uri, \ patch('arctic.scripts.arctic_fsck.do_db_auth') as do_db_auth: run_as_main(main, '--host', '%s:%s' % (sentinel.host, sentinel.port), '-v', '--library', 'sentinel.library', 'lib2', '-f') get_mongodb_uri.assert_called_once_with('sentinel.host:sentinel.port') Arctic.assert_called_once_with(get_mongodb_uri.return_value) assert do_db_auth.call_args_list == [call('%s:%s' % (sentinel.host, sentinel.port), Arctic.return_value._conn, 'arctic_sentinel'), call('%s:%s' % (sentinel.host, sentinel.port), Arctic.return_value._conn, 'arctic')] assert Arctic.return_value.__getitem__.return_value._fsck.call_args_list == [call(False), call(False), ] def test_main_dry_run(): with patch('arctic.scripts.arctic_fsck.Arctic') as Arctic, \ patch('arctic.scripts.arctic_fsck.get_mongodb_uri') as get_mongodb_uri, \ patch('arctic.scripts.arctic_fsck.do_db_auth') as do_db_
auth: run_as_main(main, '--host', '%s:%s' % (sentinel.host, sentinel.port), '-v', '--library', 'sentinel.library', 'sentinel.lib2')
get_mongodb_uri.assert_called_once_with('sentinel.host:sentinel.port') Arctic.assert_called_once_with(get_mongodb_uri.return_value) assert do_db_auth.call_count == 0 assert Arctic.return_value.__getitem__.return_value._fsck.call_args_list == [call(True), call(True), ]
"""Add ReceiverRanking Revision ID: 7f5b8f65a977 Revises: c53fdb39f5a5 Create Date: 2020-12-02 22:33:58.821112 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '7f5b8f65a977' down_revision = 'c53fdb39f5a5' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('receiver_rankings', sa.Column('id', sa.Integer(), nullable=False), sa.Column('date', sa.Date(), nullable=True), sa.Column('local_rank', sa.Integer(), nullable=True), sa.Column('global_rank', sa.Integer(), nullable=True), sa.Column('max_distance', sa.Float(precision=2), nullable=True), sa.Column('max_normalized_quality', sa.Float(precision=2), nullable=True), sa.Column('messages_count', sa.Integer(), nullable=True), sa.Column('coverages_count', sa.Integer(), nullable=True), sa.Column('senders_count', sa.Integer(), nullable=True), sa.Column('receiver_id', sa.Integer(), nullable=True), sa.Column('country_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['country_id'], ['countries.gid'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['receiver_id'], ['receivers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id') ) op.create_index('idx_receiver_rankings_uc', 'receiver_rankings', ['date', 'receiver_id'], unique=True) op.create_index(op.f('ix_receiver_rankings_country_id'), 'receiver_rankings', ['country_id'], unique=False) op.create_index(op.f('ix_receiver_rankings_receiver_id'), 'receiver_rankings', ['receiver_id'], unique=False) op.drop_column('receiver_statuses', 'agl') op.drop_column('receiver_statuses', 'location_mgrs') op.dro
p_column('receiver_statuses'
, 'location_mgrs_short') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('receiver_statuses', sa.Column('location_mgrs_short', sa.VARCHAR(length=9), autoincrement=False, nullable=True)) op.add_column('receiver_statuses', sa.Column('location_mgrs', sa.VARCHAR(length=15), autoincrement=False, nullable=True)) op.add_column('receiver_statuses', sa.Column('agl', sa.REAL(), autoincrement=False, nullable=True)) op.drop_index(op.f('ix_receiver_rankings_receiver_id'), table_name='receiver_rankings') op.drop_index(op.f('ix_receiver_rankings_country_id'), table_name='receiver_rankings') op.drop_index('idx_receiver_rankings_uc', table_name='receiver_rankings') op.drop_table('receiver_rankings') # ### end Alembic commands ###
#!/usr/bin/env python3 # encoding: utf-8 """ userdata_script.py - control an aws instance from a sqs queue Run this guy on startup as a userdata script and he will connect to s3 to download code to a directory, and run commands in it that are provided by an SQS queue, one job at a time per core Processing as a string template, we replace the following keys wit
h their equivalents: - aws_access_key - aws_secret_key - job_queue_name - code_zip_key Created by Dave Williams on 2011-02-08 """ ## Import present packages import os import sys import time import traceback import subprocess as subp import multi
processing as mp ## Handle logging and thrown fatal errors def log_it(log_message): print(log_message) with open('/dev/console', 'w') as console: console.write("USER DATA: "+log_message+'\n') def fatal_error(error_log_message, feed_me = "differently"): log_it("ERROR: " + error_log_message) log_it("SHUTTING DOWN: feed me " + feed_me + " next time") #os.system("shutdown now -h") def try_and_log(command, message): out = subp.call(command, shell=True) log_it(message + str(out)) ## Install extra software on the node log_it("#"*60 + "\n START OF USERDATA SCRIPT\n"*3 + "#"*60) try_and_log("apt-get -qq update", "Synced package index with result: ") try_and_log("apt-get -qq install python3-scipy python3-pip unzip > \\dev\\null", "Installed scipy, pip, unzip with result: ") try_and_log("pip3 install boto ujson", "Installed boto, ujson: ") ## Userdata runs as root, but in /, let's move os.chdir('/root') HOMEDIR = os.getcwd()+'/' ## Configure control parameters ACCESS_KEY = '$aws_access_key' SECRET_KEY = '$aws_secret_key' JOB_QUEUE = '$job_queue_name' CODE_ZIP_KEY = '$code_zip_key' ## Write out boto configuration lines = """[Credentials] aws_access_key_id = %s aws_secret_access_key = %s \n"""%(ACCESS_KEY, SECRET_KEY) with open('.boto', 'w') as config_file: config_file.writelines(lines) ## Connect to aws with boto try: log_it("Connecting to boto") import boto # Had to wait until .boto was written S3 = boto.connect_s3() SQS = boto.connect_sqs() SQS.get_all_queues() # Call to test if our keys were accepted except (boto.exception.NoAuthHandlerFound, boto.exception.SQSError) as e: fatal_error("Probably gave bad aws keys", "valid credentials") ## Download files from passed bucket try: log_it("Downloading from code bucket") bucket_name = [n for n in CODE_ZIP_KEY.split('/') if len(n)>3][0] #s3:// & / key_name = CODE_ZIP_KEY[len(bucket_name)+CODE_ZIP_KEY.index(bucket_name)+1:] code_bucket = S3.get_bucket(bucket_name) key = code_bucket.get_key(key_name) key.get_contents_to_filename(key_name) try_and_log("unzip %s"%key_name, "Unzipped local code file %s with result: "%key_name) time.sleep(3) # poor man's race condition control! except boto.exception.S3ResponseError: fatal_error("No bucket with given name %s"%(CODE_ZIP_KEY), "a valid bucket") except IOError: fatal_error("Couldn't write code_bucket contents locally") ## Turn control over to the job queue try: log_it(str(dir())) log_it("Turning things over to queue eater processes") commandment = "python3 -c \"import multifil;\ multifil.aws.instance.multi_eaters('%s',shutdown=True)\""%JOB_QUEUE try_and_log(commandment, "Called sub-process to manage queue eaters") log_it("All done") except Exception as e: log_it("### An error occurred while running jobs") log_it("Exception of type " + str(type(e))) exc_type, exc_value, exc_traceback = sys.exc_info() log_it(repr(traceback.format_exception(exc_type, exc_value, exc_traceback))) log_it("Going no further, shutting down now") finally: os.system('shutdown now -h')
akes in a URL and treats it as a Bandcamp discography page. Takes over downloading of files as well as fetching of general information which can be used by other modules Args: url (str): Bandcamp URL to analyse and download from. output (str): relative or absolute path to write to. request (request): if supplied this given request's content will be analysed instead of making a new request to the mandatory URL. verbose (bool): sets if status messages and general information should be printed. Errors are still printed regardless of this. silent (bool): sets if error messages should be hidden. short (bool): omits arist and album fields from downloaded track filenames. sleep (number): timeout duration between failed requests in seconds. art_enabled (bool): if True the Bandcamp page's artwork will be downloaded and saved alongside each of the found albums/tracks. id3_enabled (bool): if True tracks downloaded will receive new ID3 tags. """ def __init__(self, url, output, request=None, verbose=False, silent=False, short=False, sleep=30, art_enabled=True, id3_enabled=True, abort_missing=False): # Requests and other information can optionally be filled to remove unneccessary # operations such as making a request to a URL that has already been fetched # by another component. self.url = url # URL to get information from. self.output = output # Basic information used when writing tracks. self.artist = None # Queue array to store album tracks in. self.queue = [] # Store the album request object for later reference. self.request = request self.content = None # Base Bandcamp URL. self.base_url = None # Set if status messages should be printed to the console. self.verbose = verbose # Set if error messages should be silenced. self.silent = silent # Set if the filename should be kept short. self.short = short # Store the timeout duration between failed requests. self.sleep = sleep # Set if the cover should be downloaded as well. self.art_enabled = art_enabled # Set if ID3 tags should be written to files. self.id3_enabled = id3_enabled # Sets if a missing album track aborts the entire album download. self.abort_missing = abort_missing def prepare(self): """ Prepares the discography class by gathering information about albums and tracks. If no previous request was made and supplied during instantiation one will be made at this point. This process does not require making requests to the album and track URLs. """ if not valid_url(self.url): # Validate the URL print("The supplied URL is not a valid URL.") return False if not self.request: # Make a request to the album URL. self.request = safe_get(self.url) if self.request.status_code != 200: print("An error occurred while trying to access your supplied URL. Status code: {}".format( self.request.status_code)) self.request = None return False # Get the content from the request and decode it correctly. self.content = self.request.content.decode('utf-8') # Verify that this is an discography page. if not page_type(self.content) == "discography": print("The supplied URL is not a discography page.") # Retrieve the base page URL. self.base_url = "{}//{}".format(str(self.url).split("/")[ 0], str(self.url).split("/")[2]) print(self.base_url) meta = html.unescape(string_between(self.content, '<meta name="Description" content="', ">")).strip() self.artist = meta.split(".\n", 1)[0] if self.artist: self.output = os.path.join(self.output, self.artist, "") # Create a new artist folder if it doesn't already exist. if not os.path.exists(self.output): os.makedirs(self.output) safe_print( '\nSet "{}" as the working directory.'.format(self.output)) # Make the artist name safe for file writing. self.artist = safe_filename(self.artist) # Define search markers to find the index of for track URLs. track_search_markers = [ '<a href="/track/', '<a href="{}/track/'.format(self.base_url), '<a href="https://\w+.bandcamp.com/track/' ] # Run a search through our track markers and handle regex options and duplicates. track_filtered_markers = [] for marker in track_search_markers: results = re.findall(marker, self.content) for result in results: if result not in track_filtered_markers: track_filtered_markers.append(result) # Create a list of indices for track links. tracks = [] for marker in track_filtered_markers: tracks.extend(find_string_indices(self.content, marker)) # Define search markers to find the index of for album URLs. album_search_markers = [ '<a href="/album/', '<a href="{}/album/'.format(self.base_url), '<a href="https://\w+.bandcamp.com/album/' ] # Run a search through our album markers and handle regex options and duplicates. album_filtered_markers = [] for marker in album_search_markers: results = re.findall(marker, self.content) for result in results: if result n
ot in album_filtered_markers: album_filtered_markers.append(result) # Create
a list of indices for album links. albums = [] for marker in album_filtered_markers: albums.extend(find_string_indices(self.content, marker)) if self.verbose: print('\nListing found discography content') for i, position in enumerate(albums): album_url = "" # Begin iteration over characters until the string begins. while self.content[position] != '"': position += 1 # Begin iteration over characters until the string closes. while self.content[position + 1] != '"' and self.content[position + 1] != '?': album_url += self.content[position + 1] position += 1 if album_url == "": continue if "http://" not in album_url and "https://" not in album_url: album_url = self.base_url + album_url # Print the prepared track. if self.verbose: safe_print(album_url) # Create a new track instance with the given URL. album = Album( album_url, self.output, verbose=self.verbose, silent=self.silent, short=self.short, sleep=self.sleep, art_enabled=self.art_enabled, id3_enabled=self.id3_enabled, abort_missing=self.abort_missing ) self.queue.insert(len(self.queue), album) for i, position in enumerate(tracks): track_url = "" # Begin iteration over characters until the string begins. while self.content[position] != '"': position += 1 # Begin iteration over characters until the string closes. while self.content[position + 1] != '"' and self.content[position + 1] != '?': track_url += self.content[position + 1] position += 1 if track_url == "": continue if not "http://" in track_url and not "https://" in track_url: track_url = self.base_url + track_url # Print the prepared track. if s
""" Demonstrates how to use the labjack.ljm.eReadAddresses (LJM_eReadAddresses) function. """ from labjack import ljm # Open first found LabJack handle = ljm.open(ljm.constants.dtANY, ljm.constants.ctANY, "ANY") #handle = ljm.openS("ANY", "ANY", "ANY") info = ljm.getHandleInfo(handle) print("Opened a LabJack with Device type: %i, Connection type: %i,\n
" \ "Serial number: %i, IP address: %s, Port: %i,\nMax bytes per MB: %i" % \ (info[0], info[1], info[2], ljm.numberToIP(info[3]), info[4], info[5])) # Setup and call eReadAddresses to read values from the LabJack. numFrames = 3 aAddresses = [60028, 60000, 60004] # [serial number, product ID, firmware version] aDataTypes = [ljm.constants.UINT32, ljm.constants.FLOAT32, ljm.constants.FLOAT32] results = ljm.eReadAddresses(ha
ndle, numFrames, aAddresses, aDataTypes) print("\neReadAddresses results: ") for i in range(numFrames): print(" Address - %i, data type - %i, value : %f" % \ (aAddresses[i], aDataTypes[i], results[i])) # Close handle ljm.close(handle)
#!/usr/bin/env python # -*- coding: UTF8 -*- # # Provides access to portaudio. # Copyright (C) 2010 Josiah Gordon <josiahg@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any lat
er version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCH
ANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ A portaudio module. """ __all__ = ['_portaudio']
from django.conf.urls import url
from cobra.core.application import Application from cobra.core.loading import get_class class AutoCheckDashboardApplication(Application): name = None index_view = get_class('dashboard.autocheck.views', 'IndexView') def get_urls(self): urls = [ url(r'^$', self.index_view.as_view(), name='autocheck-index'), ] return self.post_process_urls(urls) application = AutoCheckDashboardApplication()
alid_extension = 'wav' supplied_filename = ( '%s.%s' % (filename_without_extension, invalid_extension)) with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3), 'rb', encoding=None ) as f: raw_audio = f.read() response_dict = self.post_json( '%s/0' % (self.AUDIO_UPLOAD_URL_PREFIX), {'filename': supplied_filename}, csrf_token=csrf_token, expected_status_int=400, upload_files=(('raw_audio_file', 'unused_filename', raw_audio),) ) self.logout() self.assertEqual(response_dict['status_code'], 400) self.assertEqual( response_dict['error'], 'Invalid filename extension: it should have ' 'one of the following extensions: %s' % list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys())) def test_upload_empty_audio(self): """Test upload of empty audio.""" self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() # Upload empty audio. response_dict = self.post_json( '%s/0' % self.AUDIO_UPLOAD_URL_PREFIX, {'filename': 'test.mp3'}, csrf_token=csrf_token, expected_status_int=400, upload_files=(('raw_audio_file', 'unused_filename', ''),) ) self.logout() self.assertEqual(response_dict['status_code'], 400) self.assertEqual(response_dict['error'], 'No audio supplied') def test_upload_bad_audio(self): """Test upload of malformed audio.""" self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() response_dict = self.post_json( '%s/0' % self.AUDIO_UPLOAD_URL_PREFIX, {'filename': 'test.mp3'}, csrf_token=csrf_token, expected_status_int=400, upload_files=( ('raw_audio_file', 'unused_filename', 'non_audio_data'),) ) self.logout() self.assertEqual(response_dict['status_code'], 400) self.assertEqual( response_dict['error'], 'Audio not recognized as a mp3 file') def test_missing_extensions_are_detected(self): """Test upload of filenames with no extensions are caught.""" self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() missing_extension_filename = 'test' with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3), 'rb', encoding=None ) as f: raw_audio = f.read() response_dict = self.post_json( '%s/0' % (self.AUDIO_UPLOAD_URL_PREFIX), {'filename': missing_extension_filename}, csrf_token=csrf_token, expected_status_int=400, upload_files=(('raw_audio_file', 'unused_filename', raw_audio),) ) self.logout() self.assertEqual(response_dict['status_code'], 400) self.assertEqual( response_dict['error'], 'No filename extension: it should have ' 'one of the following extensions: ' '%s' % list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys())) def test_exceed_max_length_detected(self): """Test that audio file is less than max playback length.""" self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() with utils.open_file( os.path.join( feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_OVER_MAX_LENGTH), 'rb', encoding=None ) as f: raw_audio = f.read() response_dict = self.post_json( '%s/0' % self.AUDIO_UPLOAD_URL_PREFIX, {'filename': 'test.mp3'}, csrf_token=csrf_token, expected_status_int=400, upload_files=(('raw_audio_file', 'unused_filename', raw_audio),) ) self.logout() self.assertEqual(response_dict['status_code'], 400) self.assertIn( 'Audio files must be under %s seconds in length' % feconf.MAX_AUDIO_FILE_LENGTH_SEC, response_dict['error']) def test_non_matching_extensions_are_detected(self): """Test that filenames with extensions that don't match the audio are detected. """ self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() # Use an accepted audio extension in mismatched_filename # that differs from the uploaded file's audio type. mismatched_filename = 'test.mp3' with utils.open_file( os.path.join( feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_FLAC), 'rb', encoding=None ) as f: raw_audio = f.read() response_dict = self.post_json( '%s/0' % self.AUDIO_UPLOAD_URL_PREFIX, {'filename': mismatched_filename}, csrf_token=csrf_token, expected_status_int=400, upload_files=(('raw_audio_file', 'unused_filename', raw_audio),) ) self.logout() self.assertEqual(response_dict['status_code'], 400) self.assertEqual( response_dict['error'], 'Audio not recognized as a mp3 file') def test_upload_check_for_duration_sec_as_response(self): """Tests the file upload and trying to confirm the audio file duration_secs is accurate. """ self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() with utils.open_file( os.path.join(feconf.TESTS_DATA_DIR, self.TEST_AUDIO_FILE_MP3), 'rb', e
ncoding=None ) as f: raw_audio = f.read() response_dict = self.post_json( '%s/0' % self.AUDIO_UPLOAD_URL_PREFIX, {'filename': self.TEST_AUDIO_FILE_MP3}, csrf_token=csrf_token, expected_status_int=200, upload_files=(('raw_audio_file', 'unused_filename', raw_audio),) ) self.logout() expected_value = ({ 'filename': self.TEST_AUDIO_FILE
_MP3, 'duration_secs': 15.255510204081633}) self.assertEqual(response_dict, expected_value) class PromoBarHandlerTest(test_utils.GenericTestBase): """Test for the PromoBarHandler.""" def setUp(self): super(PromoBarHandlerTest, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup( self.RELEASE_COORDINATOR_EMAIL, self.RELEASE_COORDINATOR_USERNAME) self.add_user_role( self.RELEASE_COORDINATOR_USERNAME, feconf.ROLE_ID_RELEASE_COORDINATOR) def test_get_promo_bar_data(self): response = self.get_json('/promo_bar_handler') self.assertEqual( response, { 'promo_bar_enabled': False, 'promo_bar_message': '' }) def test_release_coordinator_able_to_update_promo_bar_config(self): self.login(self.RELEASE_COORDINATOR_EMAIL) csrf_token = self.get_new_csrf_token() response = self.put_json( '/promo_bar_handler', { 'promo_bar_enabled': True, 'promo_bar_message': 'New promo bar message.' }, csrf_token=csrf_token) self.assertEqual(response, {}) response = self.get_json('/promo_bar_handler') self.assertEqual( response, { 'promo_bar_enabled': True, 'promo_bar_message': 'New promo bar message.' }) self.logout() class ValueGeneratorHandlerTests(test_utils.GenericTestBase): def test_value_generated_error(self): dummy_id = 'someID' response = self.get_json( '/value_generator_handler/%s' % dummy_id, expected_status_int=404 ) error_message = 'Could not find the page http://localhost/{}{}.'.format( 'value_generator_handler/', dummy_id ) self.assertEqual(response['error'], error_message) def test_html_response(self):
01 Dmitry A. Rozmanov <dima@xenon.spb.ru> # # This library is free software: you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation, either # version 3 of the License, or (at your option) any later version. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/> or <http://www.gnu.org/licenses/lgpl.txt>. from U32 import U32 # --NON ASCII COMMENT ELIDED-- #typedef unsigned char des_cblock[8]; #define HDRSIZE 4 def c2l(c): "char[4] to unsigned long" l = U32(c[0]) l = l | (U32(c[1]) << 8) l = l | (U32(c[2]) << 16) l = l | (U32(c[3]) << 24) return l def c2ln(c,l1,l2,n): "char[n] to two unsigned long???" c = c + n l1, l2 = U32(0), U32(0) f = 0 if n == 8: l2 = l2 | (U32(c[7]) << 24) f = 1 if f or (n == 7): l2 = l2 | (U32(c[6]) << 16) f = 1 if f or (n == 6): l2 = l2 | (U32(c[5]) << 8) f = 1 if f or (n == 5): l2 = l2 | U32(c[4]) f = 1 if f or (n == 4): l1 = l1 | (U32(c[3]) << 24) f = 1 if f or (n == 3): l1 = l1 | (U32(c[2]) << 16) f = 1 if f or (n == 2): l1 = l1 | (U32(c[1]) << 8) f = 1 if f or (n == 1): l1 = l1 | U32(c[0]) return (l1, l2) def l2c(l): "unsigned long to char[4]" c = [] c.append(int(l & U32(0xFF))) c.append(int((l >> 8) & U32(0xFF))) c.append(int((l >> 16) & U32(0xFF))) c.append(int((l >> 24) & U32(0xFF))) return c def n2l(c, l): "network to host long" l = U32(c[0] << 24) l = l | (U32(c[1]) << 16) l = l | (U32(c[2]) << 8) l = l | (U32(c[3])) return l def l2n(l, c): "host to network long" c = [] c.append(int((l >> 24) & U32(0xFF))) c.append(int((l >> 16) & U32(0xFF))) c.append(int((l >> 8) & U32(0xFF))) c.append(int((l ) & U32(0xFF))) return c def l2cn(l1, l2, c, n): "" for i in range(n): c.append(0x00) f = 0 if f or (n == 8): c[7] = int((l2 >> 24) & U32(0xFF)) f = 1 if f or (n == 7): c[6] = int((l2 >> 16) & U32(0xFF)) f = 1 if f or (n == 6): c[5] = int((l2 >> 8) & U32(0xFF)) f = 1 if f or (n == 5): c[4] = int((l2 ) & U32(0xFF)) f = 1 if f or (n == 4): c[3] = int((l1 >> 24) & U32(0xFF)) f = 1 if f or (n == 3): c[2] = int((l1 >> 16) & U32(0xFF)) f = 1 if f or (n == 2): c[1] = int((l1 >> 8) & U32(0xFF)) f = 1 if f or (n == 1): c[0] = int((l1 ) & U32(0xFF)) f = 1 return c[:n] # array of data # static unsigned long des_SPtrans[8][64]={ # static unsigned long des_skb[8][64]={ from des_data import des_SPtrans, des_skb def D_ENCRYPT(tup, u, t, s): L, R, S = tup #print 'LRS1', L, R, S, u, t, '-->', u = (R ^ s[S]) t = R ^ s[S + 1] t = ((t >> 4) + (t << 28)) L = L ^ (des_SPtrans[1][int((t ) & U32(0x3f))] | \ des_SPtrans[3][int((t >> 8) & U32(0x3f))] | \ des_SPtrans[5][int((t >> 16) & U32(0x3f))] | \ des_SPtrans[7][int((t >> 24) & U32(0x3f))] | \ des_SPtrans[0][int((u ) & U32(0x3f))] | \ des_SPtrans[2][int((u >> 8) & U32(0x3f))] | \ des_SPtrans[4][int((u >> 16) & U32(0x3f))] | \ des_SPtrans[6][int((u >> 24) & U32(0x3f))]) #print 'LRS:', L, R, S, u, t return ((L, R, S), u, t, s) def PERM_OP (tup, n, m): "tup - (a, b, t)" a, b, t = tup t = ((a >> n) ^ b) & m b = b ^ t a = a ^ (t << n) return (a, b, t) def HPERM_OP (tup, n, m): "tup - (a, t)" a, t = tup t = ((a << (16 - n)) ^ a) & m a = a ^ t ^ (t >> (16 - n)) return (a, t) shifts2 = [0,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0] class DES: KeySched = None # des_key_schedule def __init__(self, key_str): # key - UChar[8] key = [] for i in key_str: key.append(ord(i)) #print 'key:', key self.KeySched = des_set_key(key) #print 'schedule:', self.KeySched, len(self.KeySched) def decrypt(self, str): # block - UChar[] block = [] for i in str: block.append(ord(i)) #print block block = des_ecb_encrypt(block, self.KeySched, 0) res = '' for i in block: res = res + (chr(i)) return res def encrypt(self, str): # block - UChar[] block = [] for i in str: block.append(ord(i)) block = des_ecb_encrypt(block, self.KeySched, 1) res = '' for i in block: res = res + (chr(i)) return res #------------------------ def des_encript(input, ks, encrypt): # input - U32[] # output - U32[] # ks - des_key_shedule - U32[2][16] # encrypt - int # l, r, t, u - U32 # i - int # s - U32[] l = input[0] r = input[1] t = U32(0) u = U32(0) r, l, t = PERM_OP((r, l, t), 4, U32(0x0f0f0f0fL)) l, r, t = PERM_OP((l, r, t), 16, U32(0x0000ffffL)) r, l, t = PERM_OP((r, l, t), 2, U32(0x33333333L)) l, r, t = PERM_OP((l, r, t), 8, U32(0x00ff00ffL)) r, l, t = PERM_OP((r, l, t), 1, U32(0x55555555L)) t = (r << 1)|(r >> 31) r = (l << 1)|(l >> 31) l = t s = ks # ??????????????? #print l, r if(encrypt): for i in range(0, 32, 4): rtup, u, t, s = D_ENCRYPT((l, r, i + 0), u, t, s) l = rtup[0] r = rtup[1] rtup, u, t, s = D_ENCRYPT((r, l, i + 2), u, t, s) r = rtup[0] l = rtup[1] else: for i in range(30, 0, -4): rtup, u, t, s = D_ENCRYPT((l, r, i - 0), u, t, s) l = rtup[0] r = rtup[1] rtup, u, t, s = D_ENCRYPT((r, l, i - 2), u, t, s) r = rtup[0] l = rtup[1] #print l, r l = (l >> 1)|(l << 31) r = (r >> 1)|(r << 31) r, l, t = PERM_OP((r, l, t), 1, U32(0x55555555L)) l, r, t = PERM_OP((l, r, t), 8, U32(0x00ff00ffL)) r, l, t = PERM_OP((r, l, t), 2, U32(0x33333333L)) l, r, t = PERM_OP((l, r, t), 16, U32(0x0000ffffL)) r, l, t = PERM_OP((r, l, t), 4, U32(0x0f0f0f0fL)) output = [l] output.append(r) l, r, t, u = U32(0), U32(0), U32(0), U32(0) return output def des_ecb_encrypt(input, ks, encrypt): # input - des_cblock - UChar[8] # output - des_cblock - UChar[8] # ks - des_key_shedule - U32[2][16] # encrypt - int #print input l0 = c2l(input[0:4]) l1 = c2l(input[4:8]) ll = [l0] ll.append(l1) #print ll ll = des_encript(ll, ks, encrypt) #print ll l0 = ll[0] l1 = ll[1] output = l2c(l0) output = output + l2c(l1) #print output l0, l1, ll[0], ll[1] = U32(0), U32(0), U32(0), U32(0) return output def des_set_key(key): # key - des_cblock - UChar[8] # schedule - des_key_schedule # register unsigned long c
,d,t,s; # register unsigned char *in; # register unsigned long *k; # register int i; #k = schedule # in = key k = [] c = c2l(key[0:4]) d = c2l(key[4:8]) t = U32(0) d, c, t = PERM_OP((d, c, t), 4, U32(0x0f0f0f0fL)) c, t = HPERM_OP((c, t), -2, U32(0xcccc0000L)) d, t = HPERM_OP((d, t), -2, U32(0xcccc0000L)) d, c, t = PERM_OP((d, c, t), 1,
U32(0x55555555L)) c, d, t = PERM_OP((c, d, t), 8, U32(0x00ff00ffL)) d, c, t = PERM_OP((d, c, t), 1, U32(0x55555555L)) d = (((d & U32(0x000000ffL)) << 16)|(d & U32(0x0000ff00L))|((d & U32(0x00ff0000L)) >> 16)|((c & U32(0xf0000000L)) >> 4)) c = c & U32(0x0fffffffL) for i in range(16): if (shifts2[i]): c = ((c >> 2)|(c << 26)) d = ((d >> 2)|(d << 26)) else: c = ((c >> 1)|(c << 27)) d = ((
import unittest import MonitorManager import RamByteMonitor import Constants import logging import Utils #TODO figure out proper logging practice. logging.basicConfig(level=logging.INFO) class TestMonitorManager(unittest.TestCase): def setUp(self): self.manager = MonitorManager.MonitorManager() #have to manually wipe monitors in between tests because this singleton stubbornly holds onto data. self.manager.clear_monitors() def test_adding_monitor_to_manager(self): monitor_1 = RamByteMonitor.RamByteMonitor() self.manager.add_monitor(monitor_1) self.assertTrue(Constants.RAM_BYTE_MONITOR in self.manager.monitor_list.keys(), "uh oh, guess its not in the keys,.") def test_monitor_properly_deleted_through_object_deletion(self): monitor_1 = RamByteMonitor.RamByteMonitor() self.manager.add_monitor(monitor_1) self.manager.remove_monitor(monitor_1) self.assertFalse(Constants.RAM_BYTE_MONITOR in self.manager.monitor_list.keys(), "Not Properly Deleted, found ID in the keys.") def test_empty_list_raises_error(self): self.assertRaises(KeyError, self.manager.remove_monitor_by_type(Constants.RAM_BYTE_MONITOR)) def test_remove_monitor_by_type(self): self.manager.add_monitor(RamByteMonitor.RamByteMonitor()) self.manager.remove_monitor_by_type(Constants.RAM_BYTE_MONITOR) print(self.manager.list_monitors()) self.assertTrue(len(self.manager.list_monitors()) == 0) def test_monitor_factory_generates_correct_monitor(self): mon1 = self.manager.create_monitor(Constants.RAM_BYTE_MONITOR) self.assertIsInstance(mon1, RamByteMonitor.RamByteMonitor) def test_monitor_factory_fails_on_unknown_type(self): self.assertRaises(ValueError, self.manager.create_monitor, "Some Garbage Type") def test_monitor_factory_fails_on_bad_option_parse(self): mon1 = self.manager.create_monitor(Constants.STORAGE_
BYTE_MONITOR + "ASDAS")#a garbage mount point. self.assertIsNone(mon1) def test_handle_config_successful_call(self): mp = Utils.get_drive_mountpoints()[0] cpu_mon = self.manager.create_monitor(Constants.CPU_PERCENT_MONITOR) self.manager.add_monitor(cpu_mon) config_dict = {
"add": [ Constants.RAM_BYTE_MONITOR, Constants.BYTES_RECEIVED_MONITOR, Constants.STORAGE_BYTE_MONITOR + mp ], "remove": [ Constants.CPU_PERCENT_MONITOR ] } self.manager.handle_config(config_dict) self.assertTrue(len(self.manager.list_monitors()) == 3 and Constants.CPU_PERCENT_MONITOR not in self.manager.list_monitors().keys()) if __name__ == "__main__": unittest.main()
"""Order 17: show time with give time and current time. """ class ShowTimeWithCurrentAndGiven(object): @staticmethod def show(cu
rrent_timestamp=0, given_timestamp=0): if current_timestamp - given_timestamp < 0: return ''
if current_timestamp - given_timestamp < 120: return '1分钟前' if current_timestamp - given_timestamp < 60 * 60: munite = int((current_timestamp - given_timestamp) / 60) return f'{munite}分钟前' if current_timestamp - given_timestamp < 24 * 60 * 60: hour = int((current_timestamp - given_timestamp) / 60 / 60) return f'{hour}小时前' day = int((current_timestamp - given_timestamp) / 24 / 60 / 60) return f'{day}天前' # # 1分钟前 # print(ShowTimeWithCurrentAndGiven.show(1502173717, 1502173700)) # # 3分钟前 # print(ShowTimeWithCurrentAndGiven.show(1502173717, 1502173517)) # # 2小时前 # print(ShowTimeWithCurrentAndGiven.show(1502173717, 1502163517)) # # 11天前 # print(ShowTimeWithCurrentAndGiven.show(1502173717, 1501163517)) # # 127天前 # print(ShowTimeWithCurrentAndGiven.show(1502173717, 1491163517))
''' Configures the path to pyatspi. Exposes all other package contents. @author: Eitan Isaacson @author: Peter Parente @organization: IBM Corporation @copyright: Copyright (c) 2006, 2007 IBM Corporation @license: BSD All rights reserved. This program and the accompanying materials are made available under the terms of the BSD which accompanies this distribution, and is available at U{http://www.opensource.org/licenses/bsd-license.php} ''' import sys, os from i18n import _ import signal def signal_handler(signal, frame): print _( 'You pressed Ctrl+Z. This would normally freeze your keyboard')
print _( 'Ctrl+Z has been disabled; use "accerciser &" instead from the command line') signal.signal(signal.SIGTSTP, signal_handler) # If pyatspi not installed seperately, add p
yatspi zip file to the path try: import pyatspi except ImportError: sys.path.insert(1, os.path.join(os.path.dirname(__file__), 'pyatspi.zip')) def main(): ''' Run program. ''' from accerciser import Main main = Main() main.run()
#!/usr/bin/env python # # Computation of the rate-distortion function for source coding with side # information at the decoder using the Blahut-Arimoto algorithm. # # Formulation similar to R.E. Blahut "Computation of Channel Capacity and # Rate-Distortion Functions," IEEE Transactions on Information Theory, 18, # no. 4, 1972. # # Author: Christophe Ramananjaona # (c) 2005, Department of Electrical and Computer Engineering, Duke University. # (c) 2017, Isloux, for the Python version. from numpy import shape,sum,zeros,ones,arange,log,exp,array,longdouble,finfo from sys import float_info from os.path import isfile from sys import argv #from code_generator0 import code_generator from code_generator import code_generator def distortion_measure(n): # Hamming distance D=ones((n,n),dtype='longdouble') for i in range(n): D[i][i]=0.0 return(D) def blahut_arimoto(q): nx,ny=shape(q) qx=[] for i in range(nx): qx.append(longdouble(sum(q[i,:]))) qy=[] for j in range(ny): qy.append(longdouble(sum(q[:,j]))) nz=nx #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # The array t contains all the possible codes that map Y into Z nt=nx+1 t,nt=code_generator(nz,ny) # If nx!=nz there is a problem D=distortion_measure(max(nx,ny)) npoints=100 ds=arange(-10.0,0.0,0.1) c=zeros((nx,nt),dtype='longdouble') vtx=zeros((nt,nx),dtype='longdouble') sexp=zeros(nt,dtype='longdouble') #epsilon=finfo(longdouble(1.0)).eps epsilon=1.0e-7 for s in range(npoints): qty=ones((nt,ny),dtype='longdouble') qty=qty/nt/ny # Initialise stop test stop=longdouble(1.0e5) n=0 while stop>epsilon: n=n+1 for i in range(nx): if qx[i]!=0.0: qxid=longdouble(1.0)/qx[i] for k in range(nt): ssum=longdouble(0.0) for j in range(ny): if qx[i]!=0.0: ssum+=q[i][j]*qxid*log(qty[k][j]) else: ssum+=qy[j]*log(qty[k][j]) c[i][k]=exp(ssum) for i in range(nx): sexp=zeros(nt,dtype='longdouble') sd=longdouble(0.0) if qx[i]!=0.0: qxid=longdouble(1.0)/qx[i] for k in range(nt): for j in range(ny): if qx[i]!=0.0: sexp[k]+=q[i][j]*qxid*D[i,t[2][k,j]] else: sexp[k]+=qy[j]*D[i,t[2][k,j]] sd+=c[i][k]*exp(ds[s]*sexp[k]) sd=longdouble(1.0)/sd for k in range(nt): vtx[k][i]=c[i][k]*exp(ds[s]*sexp[k])*sd qtym=qty qty=zeros((nt,ny),dtype='longdouble') stop=longdouble(0.0) for j in range(ny): qyjd=longdouble(1.0)/qy[j] for k in range(nt): for i in range(nx): qty[k][j]+=q[i][j]*qyjd*vtx[k][i] stop+=qy[j]*qty[k][j]*log(qty[k][j]/qtym[k][j]) ssum=longdouble(0.0) dv=longdouble(0.0) for i in range(nx): ssum2=longdouble(0.0) if qx[i]!=0.0: qxid=longdouble(1.0)/qx[i] for k in range(nt): ssexp=longdouble(0.0) for j in range(ny): if qx[i]!=0.0: ssexp+=q[i][j]*qxid*D[i,t[2][k,j]] else: ssexp+=qy[j]*D[i,t[2][k,j]] dv+=q[i][j]*vtx[k][i]*D[i,t[2][k,j]] ssum2+=c[i][k]*exp(ds[s]*ssexp) ssum+=qx[i]*log(ssum2) R=ds[s]*dv-ssum print dv,R,n def readinputfile(inputfile): a=[ line.split() for line in file(inputfile) ] nx=len(a) # Number of lines ny=len(a[0]) # Number of columns q=zeros((nx,ny
),dtype='longdouble') for i in range
(nx): for j in range(ny): q[i][j]=a[i][j] return(q) def main(inputfile="q.txt"): if isfile(inputfile): q=readinputfile(inputfile) else: nx=2 ny=2 q=array([[0.3,0.2],[0.24,0.26]],dtype='longdouble') blahut_arimoto(q) if __name__=="__main__": if len(argv)>1: main(argv[1]) else: main()
import unittest from mox import MoxTestBase, IsA, IgnoreArg import gevent from gevent.socket import create_connection from gevent.ssl import SSLSocket from slimta.edge.smtp import SmtpEdge, SmtpSession from slimta.envelope import Envelope from slimta.queue import QueueError from slimta.smtp.reply import Reply from slimta.smtp import ConnectionLost, MessageTooBig from slimta.smtp.client import Client class TestEdgeSmtp(MoxTestBase, unittest.TestCase): def test_call_validator(self): mock = self.mox.CreateMockAnything() mock.__call__(IsA(SmtpSession)).AndReturn(mock) mock.handle_test('arg') self.mox.ReplayAll() h = SmtpSession(None, mock, None) h._call_validator('test', 'arg') def test_protocol_attribute(self): h = SmtpSession(None, None, None) self.assertEqual('SMTP', h.protocol) h.extended_smtp = True self.assertEqual('ESMTP', h.protocol) h.security = 'TLS' self.assertEqual('ESMTPS', h.protocol) h.auth = 'test' self.assertEqual('ESMTPSA', h.protocol) def test_simple_handshake(self): mock = self.mox.CreateMockAnything() mock.__call__(IsA(SmtpSession)).AndReturn(mock) mock.handle_banner(IsA(Reply), ('127.0.0.1', 0)) mock.handle_helo(IsA(Reply), 'there') self.mox.ReplayAll()
h = SmtpSession(('127.0.0.1', 0), mock, None) h.BANNER_(Reply('220')) h.HELO(Reply('250'), 'there') self.assertEqual('there', h.ehlo_as) self.assertFalse(h.extended_smtp) def test_extended_handshake(self): creds = self.mox.CreateMockAnything() creds.authcid = 'testuser' cred
s.authzid = 'testzid' ssl_sock = self.mox.CreateMock(SSLSocket) mock = self.mox.CreateMockAnything() mock.__call__(IsA(SmtpSession)).AndReturn(mock) mock.handle_banner(IsA(Reply), ('127.0.0.1', 0)) mock.handle_ehlo(IsA(Reply), 'there') mock.handle_tls() mock.handle_tls2(IsA(SSLSocket)) mock.handle_auth(IsA(Reply), creds) self.mox.ReplayAll() h = SmtpSession(('127.0.0.1', 0), mock, None) h.BANNER_(Reply('220')) h.EHLO(Reply('250'), 'there') h.TLSHANDSHAKE2(ssl_sock) h.AUTH(Reply('235'), creds) self.assertEqual('there', h.ehlo_as) self.assertTrue(h.extended_smtp) self.assertEqual('TLS', h.security) self.assertEqual(('testuser', 'testzid'), h.auth) self.assertEqual('ESMTPSA', h.protocol) def test_mail_rcpt_data_rset(self): mock = self.mox.CreateMockAnything() mock.__call__(IsA(SmtpSession)).AndReturn(mock) mock.handle_mail(IsA(Reply), 'sender@example.com', {}) mock.handle_rcpt(IsA(Reply), 'rcpt@example.com', {}) mock.handle_data(IsA(Reply)) self.mox.ReplayAll() h = SmtpSession(None, mock, None) h.MAIL(Reply('250'), 'sender@example.com', {}) h.RCPT(Reply('250'), 'rcpt@example.com', {}) self.assertEqual('sender@example.com', h.envelope.sender) self.assertEqual(['rcpt@example.com'], h.envelope.recipients) h.DATA(Reply('550')) h.RSET(Reply('250')) self.assertFalse(h.envelope) def test_have_data_errors(self): h = SmtpSession(None, None, None) reply = Reply('250') h.HAVE_DATA(reply, None, MessageTooBig()) self.assertEqual('552', reply.code) with self.assertRaises(ValueError): h.HAVE_DATA(reply, None, ValueError()) def test_have_data(self): class PtrLookup(object): def finish(self, *args): return 'localhost' env = Envelope() handoff = self.mox.CreateMockAnything() handoff(env).AndReturn([(env, 'testid')]) self.mox.ReplayAll() h = SmtpSession(('127.0.0.1', 0), None, handoff) h.envelope = env h._ptr_lookup = PtrLookup() reply = Reply('250') h.HAVE_DATA(reply, b'', None) self.assertEqual('250', reply.code) self.assertEqual('2.6.0 Message accepted for delivery', reply.message) self.assertEqual('localhost', env.client['host']) def test_have_data_queueerror(self): env = Envelope() handoff = self.mox.CreateMockAnything() handoff(env).AndReturn([(env, QueueError())]) self.mox.ReplayAll() h = SmtpSession(('127.0.0.1', 0), None, handoff) h.envelope = env reply = Reply('250') h.HAVE_DATA(reply, b'', None) self.assertEqual('451', reply.code) self.assertEqual('4.3.0 Error queuing message', reply.message) def test_smtp_edge(self): queue = self.mox.CreateMockAnything() queue.enqueue(IsA(Envelope)).AndReturn([(Envelope(), 'testid')]) self.mox.ReplayAll() server = SmtpEdge(('127.0.0.1', 0), queue) server.start() gevent.sleep(0) client_sock = create_connection(server.server.address) client = Client(client_sock) client.get_banner() client.ehlo('there') client.mailfrom('sender@example.com') client.rcptto('rcpt@example.com') client.data() client.send_empty_data() client.quit() client_sock.close() # vim:et:fdm=marker:sts=4:sw=4:ts=4
from numpy import arange, dtype, int64, float64 from bolt import array, ones from bolt.utils import allclose def test_shape(sc): x = arange(2*3).reshape((2, 3)) b = array(x, sc) assert b.shape == x.shape x = arange(2*3*4).reshape((2, 3, 4)) b = array(x, sc) assert b.shape == x.shape def test_size(sc): x = arange(2*3*4).reshape((2, 3, 4)) b = array(x, sc, axis=0) assert b.size == x.size def test_split(sc): x = arange(2*3*4).reshape((2, 3, 4)) b = array(x, sc, axis=0) assert b.split == 1 b = array(x, sc, axis=(0, 1)) assert b.split == 2 def test_ndim(sc): x = arange(2**5).reshape(2, 2, 2, 2, 2) b = array(x, sc, axis=(0, 1, 2)) assert b.keys.ndim == 3 assert b.values.ndim == 2 assert b.ndim == 5 def test_mask(sc): x = arange(2*3*4).reshape((2, 3, 4)) b = array(x, sc, axis=0) assert b.mask == (1, 0, 0) b = array(x, sc, axis=(0, 1)) assert b.mask == (1,
1, 0) b = array(x, sc, axis=(0, 1, 2)) assert b.mask == (1, 1, 1) def test_cache(sc): x = arange(2*3).reshape((2, 3)) b = array(x, sc) b.cache() assert b._rdd.is_cached b.unpersist() assert not b._rdd.is_cached def test_repartition(sc): x = arange(2 * 3).reshape((2, 3)) b = array(x, sc) assert b._ordered b = b.repartition(10) assert not b._ordered assert b._rdd.ge
tNumPartitions() == 10 def test_concatenate(sc): from numpy import concatenate x = arange(2*3).reshape((2, 3)) b = array(x, sc) c = array(x) assert allclose(b.concatenate(x).toarray(), concatenate((x, x))) assert allclose(b.concatenate(b).toarray(), concatenate((x, x))) assert allclose(b.concatenate(c).toarray(), concatenate((x, x))) def test_dtype(sc): a = arange(2**8, dtype=int64) b = array(a, sc, dtype=int64) assert a.dtype == b.dtype assert b.dtype == dtype(int64) dtypes = b._rdd.map(lambda x: x[1].dtype).collect() for dt in dtypes: assert dt == dtype(int64) a = arange(2.0**8) b = array(a, sc) assert a.dtype == b.dtype assert b.dtype == dtype(float64) dtypes = b._rdd.map(lambda x: x[1].dtype).collect() for dt in dtypes: assert dt == dtype(float64) a = arange(2**8) b = array(a, sc) assert a.dtype == b.dtype assert b.dtype == dtype(int64) dtypes = b._rdd.map(lambda x: x[1].dtype).collect() for dt in dtypes: assert dt == dtype(int64) from numpy import ones as npones a = npones(2**8, dtype=bool) b = array(a, sc) assert a.dtype == b.dtype assert b.dtype == dtype(bool) dtypes = b._rdd.map(lambda x: x[1].dtype).collect() for dt in dtypes: assert dt == dtype(bool) b = ones(2**8, sc) assert b.dtype == dtype(float64) dtypes = b._rdd.map(lambda x: x[1].dtype).collect() for dt in dtypes: assert dt == dtype(float64) b = ones(2**8, sc, dtype=bool) assert b.dtype == dtype(bool) dtypes = b._rdd.map(lambda x: x[1].dtype).collect() for dt in dtypes: assert dt == dtype(bool) def test_astype(sc): from numpy import ones as npones a = npones(2**8, dtype=int64) b = array(a, sc, dtype=int64) c = b.astype(bool) assert c.dtype == dtype(bool) dtypes = c._rdd.map(lambda x: x[1].dtype).collect() for dt in dtypes: assert dt == dtype(bool) b = ones((100, 100), sc, dtype=int64) c = b.astype(bool) assert c.dtype == dtype(bool) dtypes = c._rdd.map(lambda x: x[1].dtype).collect() for dt in dtypes: assert dt == dtype(bool) b = ones((100, 100), sc) c = b.astype(bool) assert c.dtype == dtype(bool) dtypes = c._rdd.map(lambda x: x[1].dtype).collect() for dt in dtypes: assert dt == dtype(bool) def test_clip(sc): from numpy import arange a = arange(4).reshape(2, 2) b = array(a, sc) assert allclose(b.clip(0).toarray(), a.clip(0)) assert allclose(b.clip(2).toarray(), a.clip(2)) assert allclose(b.clip(1, 2).toarray(), a.clip(1, 2))
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('million', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='game', options={'verbose_name': 'Game', 'verbose_name_plural': 'Games'}, ), migrations.AlterField( model_name='game', name='name', field=models.CharField(max_length=5
0, verbose_name='Name'), ), migrations.AlterField( model_name='question', name='answer_four', field=models.CharField(max_length=50, verbose_name='Fourth Ans
wer'), ), migrations.AlterField( model_name='question', name='answer_four_correct', field=models.BooleanField(verbose_name='Fourth Answer is Correct'), ), migrations.AlterField( model_name='question', name='answer_one', field=models.CharField(max_length=50, verbose_name='First Answer'), ), migrations.AlterField( model_name='question', name='answer_one_correct', field=models.BooleanField(verbose_name='First Answer is Correct'), ), migrations.AlterField( model_name='question', name='answer_three', field=models.CharField(max_length=50, verbose_name='Third Answer'), ), migrations.AlterField( model_name='question', name='answer_three_correct', field=models.BooleanField(verbose_name='Third Answer is Correct'), ), migrations.AlterField( model_name='question', name='answer_two', field=models.CharField(max_length=50, verbose_name='Second Answer'), ), migrations.AlterField( model_name='question', name='answer_two_correct', field=models.BooleanField(verbose_name='Second Answer is Correct'), ), migrations.AlterField( model_name='question', name='game', field=models.ForeignKey(verbose_name='Game', to='million.Game', on_delete=models.CASCADE), ), migrations.AlterField( model_name='question', name='question', field=models.CharField(max_length=255, verbose_name='Question'), ), migrations.AlterField( model_name='question', name='value', field=models.IntegerField(verbose_name='Value'), ), ]
## Non-optimized: # # from sklearn import grid_search, datasets # from sklearn.ensemble import RandomForestClassifier # from sklearn.grid_search import GridSearchCV # # # digits = datasets.load_digits() # X, y = digits.data, digits.target # param_grid = {"max_depth": [3, None], # "max_features": [1, 3, 10], # "min_samples_split": [2, 3, 10], # "min_samples_leaf": [1, 3, 10], # "bootstrap": [True, False], # "criterion": ["gini", "entropy"], # "n_estimators": [10, 20, 40, 80]} # gs = grid_search.GridSearchCV(RandomForestClassifier(), param_grid=param_grid) # print(gs.fit(X, y)) ## Spark-optimized: print("Spark-optimized grid search:") from sklearn import grid_search, datasets from sklearn.ensemble import RandomForestClassifier # Use spark_sklearn’s grid search instead: from spark_sklearn import GridSearchCV digits = datasets.load_digits() X, y = digits.data, digits.target param_grid = {"max_depth": [3, None], "max_features": [1, 3, 10], "min_samples_split": [2, 3, 10], "min_samples_leaf": [1, 3, 10], "bootstrap": [True, False], "criterion": ["gini", "entropy"], "n_estimators": [10, 20, 40, 80]} gs = grid_searc
h.GridSearchCV(Ra
ndomForestClassifier(), param_grid=param_grid) print(gs.fit(X, y))
return None def _select_view_render(self): render = self.cfg.render _cls_view = None if render and render in self.cfg.gui_renders: _cls_view = self._try_import_render(render) if _cls_view: return _cls_view for render in self.cfg.gui_renders: _cls_view = self._try_import_render(render) if _cls_view: return _cls_view logging.getLogger('term_pylibui').error("unable to find a valid render") stop_alert("unable to find a valid render, supported render:{}".format(self.cfg.renders)) def get_application_name(self): return 'Multi-Tab Terminal Emulator in Python & pyGUI' def _create_view(self): return self._cls_view() def connect_to(self, conn_str = None, port = None, session_name = None, win = None): cfg = self.cfg.clone() if conn_str: cfg.set_conn_str(conn_str) elif session_name: cfg.session_name = session_name cfg.config_session() if port: cfg.port = port doc = self.make_new_document() doc.new_contents() doc.cfg = cfg i
f win: view = self._create_view(doc) self._create_new_tab(win, view) else: self.make_window(doc) def create_terminal(self, cfg): return TerminalPyGUI(cfg) def start(self): self.run() def open_app(self): self.connect_to() def open_window_cmd(self): self.connect_to() def make_window(self, document): view = self._create_view(document) w, h = view.get_prefered_size() win = Te
rmWindow(bounds = (0, 0, w + 10, h + 50), document = document) win.tabview = tabview = TermTabView() win.auto_position = False self._create_new_tab(win, view) win.place(tabview, left = 0, top = 0, right = 0, bottom = 0, sticky = 'nsew') win.center() win.show() view.become_target() def _remove_session_tab(self, win, view): selected_index = win.tabview.selected_index count = len(win.tabview.items) if selected_index < 0 or selected_index >= count: return win.tabview.remove_item(view) count = len(win.tabview.items) win.tabview.selected_index = -1 if count == 0: win.close_cmd() application()._check_for_no_windows() elif selected_index < count and selected_index >= 0: win.tabview.selected_index = selected_index else: win.tabview.selected_index = count - 1 def _on_session_stop(self, session): if not session.window or not session.term_widget: logging.getLogger('term_pylibui').warn('invalid session, window:{}, term_widget:{}'.format(session.window, session.term_widget)) return win = session.window view = session.term_widget self._remove_session_tab(win, view) def _create_new_tab(self, win, view): win.tabview.add_item(view) cfg = view.model.cfg session = create_session(cfg, self.create_terminal(cfg)) session.on_session_stop = self._on_session_stop session.term_widget = view session.window = win session.terminal.term_widget = view view.session = session view.tab_width = session.get_tab_width() self._session_task = Task(session.start, .1) #session.start() win.tabview.selected_index = len(win.tabview.items) - 1 def make_document(self, fileref): doc = TerminalPyGUIDoc() doc.cfg = self.cfg.clone() doc.title = 'Multi-Tab Terminal Emulator in Python & pyGUI' return doc def new_window_cmd(self): self.connect_to() def next_tab_cmd(self): self._change_cur_tab(1) def prev_tab_cmd(self): self._change_cur_tab(-1) def _change_cur_tab(self, step): win = self.get_target_window() tab_view = win.tabview count = len(tab_view.items) if count == 0: return selected_index = 0 if tab_view.selected_index < 0 else tab_view.selected_index new_index = selected_index + step if new_index < 0: new_index = count - 1 elif new_index >= count: new_index = 0 if new_index != selected_index: tab_view.selected_index = new_index def close_tab_cmd(self): win = self.get_target_window() tab_view = win.tabview if tab_view.selected_index < 0: return view = tab_view.items[tab_view.selected_index] if view.session.stopped: self._remove_session_tab(win, view) else: view.session.stop() def new_cmd(self): self.connect_to(win = self.get_target_window()) def open_session_cmd(self, *args): index, = args self.connect_to(session_name=self.cfg.get_session_names()[index], win=self.get_target_window()) def transfer_file_cmd(self): win = self.get_target_window() tab_view = win.tabview if tab_view.selected_index < 0: return view = tab_view.items[tab_view.selected_index] dlog = FileTransferDialog(view.session) dlog.present() class TerminalPyGUIDoc(Document): def new_contents(self): pass def read_contents(self, file): pass def write_contents(self, file): pass class TermTabView(TabView): def __init__(self, *args, **kwargs): TabView.__init__(self, *args, **kwargs) self._generic_tabbing = False def tab_changed(self, tab_index): if tab_index >= 0 and tab_index < len(self.items): v = self.items[tab_index] self.__focus_task = Task(lambda:v.become_target(), .01) class TerminalPyGUI(TerminalGUI): def __init__(self, cfg): super(TerminalPyGUI, self).__init__(cfg) def prompt_login(self, transport, username): dlog = LoginDialog(self.session, transport, username = username) dlog.present() def prompt_password(self, action): dlog = PasswordDialog(action) dlog.present() def report_error(self, msg): self.__alert_task = Task(lambda : stop_alert(msg), .001) def ask_user(self, msg): return ask(msg) def process_status_line(self, mode, status_line): TerminalGUI.process_status_line(self, mode, status_line) if status_line.startswith('PYMTERM_STATUS_CMD='): try: context = json.loads(status_line[len('PYMTERM_STATUS_CMD='):]) self.__status_cmd_task = Task(lambda:self.process_status_cmd(context), .01) except: logging.getLogger('term_pylibui').exception('invalid status cmd found') def process_status_cmd(self, context): if not 'ACTION' in context: logging.getLogger('term_pylibui').warn('action not found in status cmd') return action = context['ACTION'].upper() home = context['HOME'] pwd = context['PWD'] r_f = context['R_F'] global last_dir l_f = None result = None base_name = os.path.basename(r_f) if action == 'UPLOAD': result = FileDialogs.request_old_file("Choose file to upload:", default_dir = last_dir, file_types = file_types) elif action == 'DOWNLOAD': result = FileDialogs.request_new_file("Choose location to save download file:", default_dir = last_dir, default_name = base_name) else: logging.getLogger('term_pylibui').warn('action not valid:{} in status cmd'.format(action)) return if not isinstance(result, FileRef): return last_dir = result.dir l_f = result.path dlg = FileTransferProgressDialog(self.session, l_f, r_f, home,
the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_log.log as logging from pypowervm import const as pvm_const from pypowervm.tasks import scsi_mapper as tsk_map from pypowervm.tasks import storage as tsk_stg from pypowervm.wrappers import storage as pvm_stg from pypowervm.wrappers import virtual_io_server as pvm_vios from nova import conf from nova import exception from nova.image import glance from nova.virt.powervm.disk import driver as disk_dvr from nova.virt.powervm import vm LOG = logging.getLogger(__name__) CONF = conf.CONF IMAGE_API = glance.API() class LocalStorage(disk_dvr.DiskAdapter): def __init__(self, adapter, host_uuid): super(LocalStorage, self).__init__(adapter, host_uuid) self.capabilities = { 'shared_storage': False, 'has_imagecache': False, # NOTE(efried): 'snapshot' capability set dynamically below. } # Query to get the Volume Group UUID if not CONF.powervm.volume_group_name: raise exception.OptRequiredIfOtherOptValue( if_opt='disk_driver', if_value='localdisk', then_opt='volume_group_name') self.vg_name = CONF.powervm.volume_group_name vios_w, vg_w = tsk_stg.find_vg(adapter, self.vg_name) self._vios_uuid = vios_w.uuid self.vg_uuid = vg_w.uuid # Set the 'snapshot' capability dynamically. If we're hosting I/O on # the management partition, we can snapshot. If we're hosting I/O on # traditional VIOS, we are limited by the fact that a VSCSI device # can't be mapped to two partitions (the VIOS and the management) at # once. self.capabilities['snapshot'] = self.mp_uuid == self._vios_uuid LOG.info("Local Storage driver initialized: volume group: '%s'", self.vg_name) @property def _vios_uuids(self): """List the UUIDs of the Virtual I/O Servers hosting the storage. For localdisk, there's only one. """ return [self._vios_uuid] @staticmethod def _disk_match_func(disk_type, instance): """Return a matching function to locate the disk for an instance. :param disk_type: One of the DiskType enum values. :param instance: The instance whose disk is to be found. :return: Callable suitable for the match_func parameter of the pypowervm.tasks.scsi_mapper.find_maps method. """ disk_name = LocalStorage._get_disk_name( disk_type, instance, short=True) return tsk_map.gen_match_func(pvm_stg.VDisk, names=[disk_name]) @property def capacity(self): """Capacity of the storage in gigabytes.""" vg_wrap = self._get_vg_wrap() return float(vg_wrap.capacity) @property def capacity_used(self): """Capacity of the storage in gigabytes that is used.""" vg_wrap = self._get_vg_wrap() # Subtract available from capacity return float(vg_wrap.capacity) - float(vg_wrap.available_size) def delete_disks(self, storage_elems): """Removes the specified disks. :param storage_elems: A list of the storage elements that are to be deleted. Derived from the return value from detach_disk. """ # All of localdisk is done against the volume group. So reload # that (to get new etag) and then update against it. tsk_stg.rm_vg_storage(self._get_vg_wrap(), vdisks=storage_elems) def detach_disk(self, instance): """Detaches the storage adapters from the image disk. :param instance: Instance to disconnect the image for. :return: A list of all the backing storage elements that were disconnected from the I/O Server and VM. """ lpar_uuid = vm.get_pvm_uuid(instance) # Build the match function match_func = tsk_map.gen_match_func(pvm_stg.VDisk) vios_w = pvm_vios.VIOS.get( self._adapter, uuid=self._vios_uuid, xag=[pvm_const.XAG.VIO_SMAP]) # Remove the mappings. mappings = tsk_map.remove_maps( vios_w, lpar_uuid, match_func=match_func) # Update the VIOS with the removed mappings. vios_w.update() return [x.backing_storage for x in mappings] def disconnect_disk_from_mgmt(self, vios_uuid, disk_name): """Disconnect a disk from the management partition. :param vios_uuid: The UUID of the Virtual I/O Server serving the mapping. :param disk_name: The name of the disk to unmap. """ tsk_map.remove_vdisk_mapping(self._adapter, vios_uuid, self.mp_uuid, disk_names=[disk_name]) LOG.info("Unmapped boot disk %(disk_name)s from the management " "partition from Virtual I/O Server %(vios_name)s.", {'disk_name': disk_name, 'mp_uuid': self.mp_uuid, 'vios_name': vios_uuid}) def create_disk_from_image(self, context, instance, image_meta): """Creates a disk and copies the specified image to it. Cleans up the created disk if an error occurs. :param context: nova context used to retrieve image from glance :param instance: instance to create the disk for. :param image_meta: The metadata of the image of the instance. :return: The backing pypowervm storage object that was created. """ LOG.info('Create disk.', instance=instance) return self._upload_image(context, instance, image_meta) # TODO(esberglu): Copy vdisk when implementing image cache. def _upload_image(self, context, instance, image_meta): """Upload a new image. :param context: Nova context used to retrieve image from glance. :param image_meta: The metadata of the image of the instance. :return: The virtual disk containing the image. """ img_name = self._get_disk_name(disk_dvr.DiskType.BOOT, instance, short=True) # TODO(esberglu) Add check for cached image when adding imagecache. return tsk_stg.upload_new_vdisk( self._adapter, self._vios_uuid, self.vg_uuid, disk_dvr.IterableToFileAdapter( IMAGE_API.download(context, image_meta.id)), img_name, image_meta.size, d_size=image_meta.size, upload_type=tsk_stg.UploadType.IO_STREAM, file_for
mat=image_meta.disk_format)[0] def attach_disk(self, instance, disk_info, stg_ftsk): """Attaches the disk image to the Virtual Machine. :param instance: nova instance to connect the disk to. :param disk_info: The pypowervm storage element returned from
create_disk_from_image. Ex. VOptMedia, VDisk, LU, or PV. :param stg_ftsk: The pypowervm transaction FeedTask for the I/O Operations. The Virtual I/O Server mapping updates will be added to the FeedTask. This defers the updates to some later point in time. """ lpar_uuid = vm.get_pvm_uuid(instance) def add_func(vios_w): LOG.info("Adding logical volume disk connection to VIOS %(vios)s.", {'vios': vios_w.name}, instance=instance) mapping = tsk_map.build_vscsi_mapping( self._host_uuid, vios_w, lpar_uuid, disk_info) return tsk_map.add_map(vios_w, mapping) stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(add_func) def _get_vg_wrap(self): return pvm_stg.VG.get(self._adapter, uuid=self.vg_uuid,
######################################################################## # # Copyright 2014 Johns Hopkins University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed u
nder the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Contact: turbulence@pha.jhu.edu # Website: http://turbulence.pha.jhu.edu/ # ######################################################################## import sys sys.path[0] = '' import argparse parser = argparse.ArgumentParser( description = 'Test pyJHTDB installation.') parser.add_argument( '-p', '--plain', dest = 'plain', action = 'store_true', help = 'run plain test, i.e. turbc clone.') parser.add_argument( '--grid-splines', dest = 'grid_splines', action = 'store_true', help = 'run basic grid spline test.') parser.add_argument( '--cutout', dest = 'cutout', action = 'store_true', help = 'run cutout test.') parser.add_argument( '--misc', dest = 'misc', action = 'store_true', help = 'run misc test.') parser.add_argument( '--interpolator', dest = 'interpolator', action = 'store_true', help = 'run interpolator test.') opt = parser.parse_args() import pyJHTDB if opt.plain: pyJHTDB.test_plain() if opt.grid_splines: pyJHTDB.test_gs() if opt.interpolator: pyJHTDB.test_interpolator() if opt.misc and pyJHTDB.found_matplotlib: pyJHTDB.test_misc() if opt.cutout and pyJHTDB.found_h5py: pyJHTDB.test_cutout()
# Copyright 2022 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. #
You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the
License. """Data processing pipelines for lead sheets.""" import copy from magenta.pipelines import chord_pipelines from magenta.pipelines import melody_pipelines from magenta.pipelines import pipeline from magenta.pipelines import statistics from note_seq import chord_symbols_lib from note_seq import chords_lib from note_seq import events_lib from note_seq import lead_sheets_lib from note_seq import LeadSheet from note_seq import sequences_lib from note_seq.protobuf import music_pb2 import tensorflow.compat.v1 as tf class LeadSheetExtractor(pipeline.Pipeline): """Extracts lead sheet fragments from a quantized NoteSequence.""" def __init__(self, min_bars=7, max_steps=512, min_unique_pitches=5, gap_bars=1.0, ignore_polyphonic_notes=False, filter_drums=True, require_chords=True, all_transpositions=True, name=None): super(LeadSheetExtractor, self).__init__( input_type=music_pb2.NoteSequence, output_type=lead_sheets_lib.LeadSheet, name=name) self._min_bars = min_bars self._max_steps = max_steps self._min_unique_pitches = min_unique_pitches self._gap_bars = gap_bars self._ignore_polyphonic_notes = ignore_polyphonic_notes self._filter_drums = filter_drums self._require_chords = require_chords self._all_transpositions = all_transpositions def transform(self, input_object): quantized_sequence = input_object try: lead_sheets, stats = extract_lead_sheet_fragments( quantized_sequence, min_bars=self._min_bars, max_steps_truncate=self._max_steps, min_unique_pitches=self._min_unique_pitches, gap_bars=self._gap_bars, ignore_polyphonic_notes=self._ignore_polyphonic_notes, filter_drums=self._filter_drums, require_chords=self._require_chords, all_transpositions=self._all_transpositions) except events_lib.NonIntegerStepsPerBarError as detail: tf.logging.warning('Skipped sequence: %s', detail) lead_sheets = [] stats = [statistics.Counter('non_integer_steps_per_bar', 1)] except chord_symbols_lib.ChordSymbolError as detail: tf.logging.warning('Skipped sequence: %s', detail) lead_sheets = [] stats = [statistics.Counter('chord_symbol_exception', 1)] self._set_stats(stats) return lead_sheets def extract_lead_sheet_fragments(quantized_sequence, search_start_step=0, min_bars=7, max_steps_truncate=None, max_steps_discard=None, gap_bars=1.0, min_unique_pitches=5, ignore_polyphonic_notes=True, pad_end=False, filter_drums=True, require_chords=False, all_transpositions=False): """Extracts a list of lead sheet fragments from a quantized NoteSequence. This function first extracts melodies using melodies_lib.extract_melodies, then extracts the chords underlying each melody using chords_lib.extract_chords_for_melodies. Args: quantized_sequence: A quantized NoteSequence object. search_start_step: Start searching for a melody at this time step. Assumed to be the first step of a bar. min_bars: Minimum length of melodies in number of bars. Shorter melodies are discarded. max_steps_truncate: Maximum number of steps in extracted melodies. If defined, longer melodies are truncated to this threshold. If pad_end is also True, melodies will be truncated to the end of the last bar below this threshold. max_steps_discard: Maximum number of steps in extracted melodies. If defined, longer melodies are discarded. gap_bars: A melody comes to an end when this number of bars (measures) of silence is encountered. min_unique_pitches: Minimum number of unique notes with octave equivalence. Melodies with too few unique notes are discarded. ignore_polyphonic_notes: If True, melodies will be extracted from `quantized_sequence` tracks that contain polyphony (notes start at the same time). If False, tracks with polyphony will be ignored. pad_end: If True, the end of the melody will be padded with NO_EVENTs so that it will end at a bar boundary. filter_drums: If True, notes for which `is_drum` is True will be ignored. require_chords: If True, only return lead sheets that have at least one chord other than NO_CHORD. If False, lead sheets with only melody will also be returned. all_transpositions: If True, also transpose each lead sheet fragment into all 12 keys. Returns: A python list of LeadSheet instances. Raises: NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length (derived from its time signature) is not an integer number of time steps. """ sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence) stats = dict([('empty_chord_progressions', statistics.Counter('empty_chord_progressions'))]) melodies, melody_stats = melody_pipelines.extract_melodies( quantized_sequence, search_start_step=search_start_step, min_bars=min_bars, max_steps_truncate=max_steps_truncate, max_steps_discard=max_steps_discard, gap_bars=gap_bars, min_unique_pitches=min_unique_pitches, ignore_polyphonic_notes=ignore_polyphonic_notes, pad_end=pad_end, filter_drums=filter_drums) chord_progressions, chord_stats = chord_pipelines.extract_chords_for_melodies( quantized_sequence, melodies) lead_sheets = [] for melody, chords in zip(melodies, chord_progressions): # If `chords` is None, it's because a chord progression could not be # extracted for this particular melody. if chords is not None: if require_chords and all(chord == chords_lib.NO_CHORD for chord in chords): stats['empty_chord_progressions'].increment() else: lead_sheet = LeadSheet(melody, chords) if all_transpositions: for amount in range(-6, 6): transposed_lead_sheet = copy.deepcopy(lead_sheet) transposed_lead_sheet.transpose(amount) lead_sheets.append(transposed_lead_sheet) else: lead_sheets.append(lead_sheet) return lead_sheets, list(stats.values()) + melody_stats + chord_stats
from django.contrib.sites.models import Site from django.db.utils import IntegrityError from django.test import TestCase from django.template import Context, Template from simpleblocks.models import SimpleBlock def render_to_string(template, data): t = Template(template) c = Context(data) return t.render(c) class SimpleBlocksTest(TestCase): def setUp(self): """Actions to be executed before each test""" self.body = 'Test Body' self.site = Site.objects.get_current() self.template = '{% load simpleblocks_tags %}{% get_block "test" %}' self.data = {} def tearDown(self): """Actions to be executed after each test""" SimpleBlock.objects.all().delete() def create_block(self, key='test'): """Helper to create block""" data = {'body': self.body, 'key': key, 'site': self.site} return SimpleBlock.objects.create(**data) def testCreateBlock(self): """Test block creation""" data = {'body': self.body, 'key': 'test', 'site': self.site} block = SimpleBlock.objects.create(**data) assert block, 'Failed to create block' def testRenderedStatic(self): """Test the tag with a static key""" self.create_block() rendered = render_to_string(self.template, self.data) self.assertEquals(rendered, self.body) def testRenderedVariable(self): """Test the tag with a variabl
e key""" self.create_block() data = {'test_variable': 'test'} template = '{% load simpleblocks_tags %}{% get_block test_variable %}' rendered = render_to_string(template, data) self.assertEquals(rendered, self.body) def testFailedDuplicated(self): ""
"Test failure upon duplicated key and site""" self.create_block() with self.assertRaises(IntegrityError): self.create_block()
import sys import os.p
ath # Add /testing/ to PYTHONPATH:
this_file_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.normpath(os.path.join(this_file_dir, 'testing')))
e. """ import sys import os import re import traceback import signal import time import logging import getpass try: import autotest.common as common except ImportError: import common from autotest.client.shared.settings import settings require_atfork = settings.get_value('AUTOSERV', 'require_atfork_module', type=bool, default=True) try: import atfork atfork.monkeypatch_os_fork_functions() import atfork.stdlib_fixer # Fix the Python standard library for threading+fork safety with its # internal locks. http://code.google.com/p/python-atfork/ import warnings warnings.filterwarnings('ignore', 'logging module already imported') try: atfork.stdlib_fixer.fix_logging_module() except Exception: pass except ImportError, e: from autotest.client.shared.settings import settings if settings.get_value('AUTOSERV', 'require_atfork_module', type=bool, default=False): print >>sys.stderr, 'Please run utils/build_externals.py' print e sys.exit(1) from autotest.server import server_logging_config from autotest.server import server_job, autoserv_parser from autotest.server import autotest_remote from autotest.client.shared import pidfile, logging_manager def run_autoserv(pid_file_manager, results, parser): # send stdin to /dev/null dev_null = os.open(os.devnull, os.O_RDONLY) os.dup2(dev_null, sys.stdin.fileno()) os.close(dev_null) # Create separate process group os.setpgrp() # Implement SIGTERM handler def handle_sigterm(signum, frame): if pid_file_manager: pid_file_manager.close_file(1, signal.SIGTERM) os.killpg(os.getpgrp(), signal.SIGKILL) # Set signal handler signal.signal(signal.SIGTERM, handle_sigterm) # Ignore SIGTTOU's generated by output from forked children. signal.signal(signal.SIGTTOU, signal.SIG_IGN) # Server side tests that call shell scripts often depend on $USER being set # but depending on how you launch your autotest scheduler it may not be set. os.environ['USER'] = getpass.getuser() if parser.options.machines: machines = parser.options.machines.replace(',', ' ').strip().split() else: machines = [] machines_file = parser.options.machines_file label = parser.options.label group_name = parser.options.group_name user = parser.options.user client = parser.options.client server = parser.options.server install_before = parser.options.install_before install_after = parser.options.install_after verify = parser.options.verify repair = parser.options.repair cleanup = parser.options.cleanup no_tee = parser.options.no_tee parse_job = parser.options.parse_job execution_tag = parser.options.execution_tag if not execution_tag: execution_tag = parse_job host_protection = parser.options.host_protection ssh_user = parser.options.ssh_user ssh_port = parser
.options.ssh_port ssh_pass = parser.options.ssh_pass collect_crashinfo = parser.options.collect_crashinfo control_filename = parser.options.control_filename # can't be both a client and a server side test if client and server: parser.parser.error("Can not specify a test as both server and client!") if len(parser.args) < 1 and not (verify or repair or cleanup or collect_crashinfo):
parser.parser.error("Missing argument: control file") # We have a control file unless it's just a verify/repair/cleanup job if len(parser.args) > 0: control = parser.args[0] else: control = None if machines_file: machines = [] for m in open(machines_file, 'r').readlines(): # remove comments, spaces m = re.sub('#.*', '', m).strip() if m: machines.append(m) print "Read list of machines from file: %s" % machines_file print ','.join(machines) if machines: for machine in machines: if not machine or re.search('\s', machine): parser.parser.error("Invalid machine: %s" % str(machine)) machines = list(set(machines)) machines.sort() if group_name and len(machines) < 2: parser.parser.error("-G %r may only be supplied with more than one machine." % group_name) kwargs = {'group_name': group_name, 'tag': execution_tag} if control_filename: kwargs['control_filename'] = control_filename job = server_job.server_job(control, parser.args[1:], results, label, user, machines, client, parse_job, ssh_user, ssh_port, ssh_pass, **kwargs) job.logging.start_logging() job.init_parser() # perform checks job.precheck() # run the job exit_code = 0 try: try: if repair: job.repair(host_protection) elif verify: job.verify() else: job.run(cleanup, install_before, install_after, only_collect_crashinfo=collect_crashinfo) finally: while job.hosts: host = job.hosts.pop() host.close() except: exit_code = 1 traceback.print_exc() if pid_file_manager: pid_file_manager.num_tests_failed = job.num_tests_failed pid_file_manager.close_file(exit_code) job.cleanup_parser() sys.exit(exit_code) def main(): # grab the parser parser = autoserv_parser.autoserv_parser parser.parse_args() if len(sys.argv) == 1: parser.parser.print_help() sys.exit(1) if parser.options.no_logging: results = None else: output_dir = settings.get_value('COMMON', 'test_output_dir', default="") results = parser.options.results if not results: results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S') if output_dir: results = os.path.join(output_dir, results) results = os.path.abspath(results) resultdir_exists = False for filename in ('control.srv', 'status.log', '.autoserv_execute'): if os.path.exists(os.path.join(results, filename)): resultdir_exists = True if not parser.options.use_existing_results and resultdir_exists: error = "Error: results directory already exists: %s\n" % results sys.stderr.write(error) sys.exit(1) # Now that we certified that there's no leftover results dir from # previous jobs, lets create the result dir since the logging system # needs to create the log file in there. if not os.path.isdir(results): os.makedirs(results) logging_manager.configure_logging( server_logging_config.ServerLoggingConfig(), results_dir=results, use_console=not parser.options.no_tee, verbose=parser.options.verbose, no_console_prefix=parser.options.no_console_prefix) if results: logging.info("Results placed in %s" % results) # wait until now to perform this check, so it get properly logged if parser.options.use_existing_results and not resultdir_exists: logging.error("No existing results directory found: %s", results) sys.exit(1) if parser.options.write_pidfile: pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label, results) pid_file_manager.open_file() else: pid_file_manager = None autotest_remote.BaseAutotest.set_install_in_tmpdir( parser.options.install_in_tmpdir) exit_code = 0 try: try: run_autoserv(pid_file_manager, results, parser) except SystemExit, e: exit_code = e.code except: traceback.print_exc() # If we don't know what happened, we'll classify it as # an 'abort' and return 1.
toy_data_source = np.transpose(toy_data_source) # Now take the target from the whole dataset again stratified toy_data_target, _, true_toy_labels_target, _ = \ train_test_split( np.transpose(toy_data), true_toy_labels, test_size=toy_data.shape[1] - target_ncells, stratify=true_toy_labels ) toy_data_target = np.transpose(toy_data_target) elif mode == 7: ''' In splitting mode 7, the data are splitted as follows: common: number of shared clusters nsrc: number of source clusters ntrg: number of target clusters cluster_spec: = None: no subcluster structures = [1,2,[3,4],[5,6,7],8]: indicate the subcluster structure. The first level cluster structures are taken as one cluster - here for instance we would have 5 clusters: [1,2,3,4,5], where cluster 3 automatically involve the subcluster 3 and 4 from the original cluster_spec ''' if cluster_spec == None: if
min(true_toy_labels) == 1: true_toy_labels = true_toy_labels - 1 go_back_flag = True
nclusters, counts = np.unique(true_toy_labels, return_counts=True) ntrg = np.int(np.floor((len(nclusters) + common) / 2.)) nsrc = len(nclusters) - ntrg + common assert (nsrc + ntrg - common <= len(nclusters)) Cidx = np.random.choice(nclusters, common, False) if not nsrc - common == 0: Sidx = np.random.choice(np.setdiff1d(nclusters, Cidx), nsrc - common, False) # Indices of exclusive source clusters if not ntrg - common == 0: Tidx = np.random.choice(np.setdiff1d(nclusters, np.union1d(Sidx, Cidx)), ntrg - common, False) # Indices of exclusive target clusters else: Tidx = [] else: Sidx = [] Tidx = [] while not (sum(counts[Sidx.astype(int)]) >= source_ncells): Sidx = np.append(Sidx, Tidx[0]) Tidx = np.delete(Tidx, 0) if (sum(counts[Tidx.astype(int)]) <= target_ncells): target_ncells = sum(counts[Tidx.astype(int)]) ntrg = len(Tidx) nsrc = len(Sidx) else: # print cluster_spec nclusters = np.arange(len(cluster_spec)) # compute cluster dependence for the first level cluster structure # print nclusters ntrg = np.int(np.floor((len(nclusters) + common) / 2.)) # number of clusters in target nsrc = len(nclusters) - ntrg + common # number of clusters in source assert (nsrc + ntrg - common <= len(nclusters)) Cidx = np.random.choice(nclusters, common, False) # Indices of common clusters, chosen at random if not nsrc - common == 0: Sidx = np.random.choice(np.setdiff1d(nclusters, Cidx), nsrc - common, False) # Indices of exclusive source clusters if not ntrg - common == 0: Tidx = np.random.choice(np.setdiff1d(nclusters, np.union1d(Sidx, Cidx)), ntrg - common, False) # Indices of exclusive target clusters else: Tidx = [] else: Sidx = [] Tidx = [] # np.concatenate((np.array(Cidx).copy(),np.random.choice(np.setdiff1d(nclusters,Sidx),ntrg-common)),axis=0) Cidx = flatten([cluster_spec[c] for c in Cidx]) if ntrg > common: Tidx = flatten([cluster_spec[c] for c in Tidx]) else: Tidx = [] if nsrc > common: Sidx = flatten([cluster_spec[c] for c in Sidx]) else: Sidx = [] # excl_t_cells = sum(np.in1d(true_toy_labels, Tidx)) # excl_s_cells = sum(np.in1d(true_toy_labels,Sidx)) # excl_c_cells = sum(np.in1d(true_toy_labels, Cidx)) ## Make sure that the clusters have enough cells to fill up source or target data # try: # assert (source_ncells <= toy_data_source.shape[1]) # except AssertionError: # print("There aren't enough cells in the source clusters. Raise ncells") # sys.exit() ''' get shared cluster split for source and target data ''' if len(Cidx) > 0: shared_idx = np.in1d(true_toy_labels, Cidx) # shared_trg_size = target_ncells - sum(np.in1d(true_toy_labels,Tidx)) shared_trg_size = int(target_ncells * float(len(Cidx)) / (len(Tidx) + len(Cidx))) # shared_src_size = source_ncells - sum(np.in1d(true_toy_labels,Sidx)) # shared_src_size = int(source_ncells * float(len(Cidx)) / (len(Sidx) + len(Cidx))) if shared_trg_size + shared_src_size >= sum(shared_idx): to_take_away = np.ceil((shared_trg_size + shared_src_size - sum(shared_idx)) / 2) + 1 shared_trg_size = np.int(shared_trg_size - to_take_away) shared_src_size = np.int(shared_src_size - to_take_away) data_shared_target, data_shared_source, labels_shared_target, labels_shared_source = train_test_split( toy_data[:, shared_idx].transpose(), np.array(true_toy_labels)[shared_idx], train_size=shared_trg_size, test_size=shared_src_size) else: data_shared_target, data_shared_source, labels_shared_target, labels_shared_source = [], [], [], [] shared_trg_size = 0 shared_src_size = 0 ''' get cluster split for target data ''' if ntrg > common: trg_idx = np.in1d(true_toy_labels, Tidx) add_trg_size = int(target_ncells - shared_trg_size) if add_trg_size == sum(trg_idx): toy_data_target = toy_data[:, trg_idx].transpose() true_toy_labels_target = np.array(true_toy_labels)[trg_idx] else: if add_trg_size > sum(trg_idx): pdb.set_trace() toy_data_target, _, true_toy_labels_target, _ = train_test_split(toy_data[:, trg_idx].transpose(), np.array(true_toy_labels)[trg_idx], train_size=add_trg_size, test_size=0) if shared_trg_size != 0: toy_data_target = np.concatenate((data_shared_target, toy_data_target)) true_toy_labels_target = np.concatenate((labels_shared_target, true_toy_labels_target)) else: toy_data_target = data_shared_target true_toy_labels_target = labels_shared_target ''' get cluster split for source data ''' if nsrc > common: src_idx = np.in1d(true_toy_labels, Sidx) add_src_size = int(source_ncells - shared_src_size) if add_src_size > sum(src_idx): pdb.set_trace() elif add_src_size == sum(src_idx): toy_data_source = toy_data[:, src_idx].transpose() true_toy_labels_source = np.array(true_toy_labels)[src_idx] else: toy_data_source, _, true_toy_labels_source, _ = train_test_split(toy_data[:, src_idx].transpose(), np.array(true_toy_labels)[src_idx], train_size=add_src_size, test_size=0) if shared_src_size != 0: toy_data_source = np.concatenate((data_shared_source, toy_data_source)) true_toy_labels_source = np.concatenate((labels_shared_source, true
base from apache_beam.transforms.core import PTransform from apache_beam.utils.pipeline_options import PipelineOptions # Defining the new sink. # [START model_custom_sink_new_sink] class SimpleKVSink(iobase.Sink): def __init__(self, url, final_table_name): self._url = url self._final_table_name = final_table_name def initialize_write(self): access_token = simplekv.connect(self._url) return access_token def open_writer(self, access_token, uid): table_name = 'table' + uid return SimpleKVWriter(access_token, table_name) def finalize_write(self, access_token, table_names): for i, table_name in enumerate(table_names): simplekv.rename_table( access_token, table_name, self._final_table_name + str(i)) # [END model_custom_sink_new_sink] # Defining a writer for the new sink. # [START model_custom_sink_new_writer] class SimpleKVWriter(iobase.Writer): def __init__(self, access_token, table_name): self._access_token = access_token self._table_name = table_name self._table = simplekv.open_table(access_token, table_name) def write(self, record): key, value = record simplekv.write_to_table(self._access_token, self._table, key, value) def close(self): return self._table_name # [END model_custom_sink_new_writer] final_table_name = final_table_name_no_ptransform # Using the new sink in an example pipeline. # [START model_custom_sink_use_new_sink] p = beam.Pipeline(options=PipelineOptions()) kvs = p | 'CreateKVs' >> beam.Create(KVs) kvs | 'WriteToSimpleKV' >> beam.io.Write( SimpleKVSink('http://url_to_simple_kv/', final_table_name)) # [END model_custom_sink_use_new_sink] p.run().wait_until_finish() # We recommend users to start Sink class names with an underscore to # discourage using the Sink class directly when a PTransform for the sink is # available. We simulate that here by simply extending the previous Sink # class. class _SimpleKVSink(SimpleKVSink): pass # [START model_custom_sink_new_ptransform] class WriteToKVSink(PTransform): def __init__(self, url, final_table_name, **kwargs): super(WriteToKVSink, self).__init__(**kwargs) self._url = url self._final_table_name = final_table_name def expand(self, pcoll): return pcoll | iobase.Write(_SimpleKVSink(self._url, self._final_table_name)) # [END model_custom_sink_new_ptransform] final_table_name = final_table_name_with_ptransform # [START model_custom_sink_use_ptransform] p = beam.Pipeline(options=PipelineOptions()) kvs = p | 'CreateKVs' >> beam.core.Create(KVs) kvs | 'WriteToSimpleKV' >> WriteToKVSink( 'http://url_to_simple_kv/', final_table_name) # [END model_custom_sink_use_ptransform] p.run().wait_until_finish() def model_textio(renames): """Using a Read and Write transform to read/write text files.""" def filter_words(x): import re return re.findall(r'[A-Za-z\']+', x) import apache_beam as beam from apache_beam.utils.pipeline_options import PipelineOptions # [START model_textio_read] p = beam.Pipeline(op
tions=PipelineOptions()) # [START model_pipelineio_read] lines = p | 'ReadFromText'
>> beam.io.ReadFromText('path/to/input-*.csv') # [END model_pipelineio_read] # [END model_textio_read] # [START model_textio_write] filtered_words = lines | 'FilterWords' >> beam.FlatMap(filter_words) # [START model_pipelineio_write] filtered_words | 'WriteToText' >> beam.io.WriteToText( '/path/to/numbers', file_name_suffix='.csv') # [END model_pipelineio_write] # [END model_textio_write] p.visit(SnippetUtils.RenameFiles(renames)) p.run().wait_until_finish() def model_textio_compressed(renames, expected): """Using a Read Transform to read compressed text files.""" p = TestPipeline() # [START model_textio_write_compressed] lines = p | 'ReadFromText' >> beam.io.ReadFromText( '/path/to/input-*.csv.gz', compression_type=beam.io.fileio.CompressionTypes.GZIP) # [END model_textio_write_compressed] beam.assert_that(lines, beam.equal_to(expected)) p.visit(SnippetUtils.RenameFiles(renames)) p.run().wait_until_finish() def model_datastoreio(): """Using a Read and Write transform to read/write to Cloud Datastore.""" import uuid from google.cloud.proto.datastore.v1 import entity_pb2 from google.cloud.proto.datastore.v1 import query_pb2 import googledatastore import apache_beam as beam from apache_beam.utils.pipeline_options import PipelineOptions from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore project = 'my_project' kind = 'my_kind' query = query_pb2.Query() query.kind.add().name = kind # [START model_datastoreio_read] p = beam.Pipeline(options=PipelineOptions()) entities = p | 'Read From Datastore' >> ReadFromDatastore(project, query) # [END model_datastoreio_read] # [START model_datastoreio_write] p = beam.Pipeline(options=PipelineOptions()) musicians = p | 'Musicians' >> beam.Create( ['Mozart', 'Chopin', 'Beethoven', 'Vivaldi']) def to_entity(content): entity = entity_pb2.Entity() googledatastore.helper.add_key_path(entity.key, kind, str(uuid.uuid4())) googledatastore.helper.add_properties(entity, {'content': unicode(content)}) return entity entities = musicians | 'To Entity' >> beam.Map(to_entity) entities | 'Write To Datastore' >> WriteToDatastore(project) # [END model_datastoreio_write] def model_bigqueryio(): """Using a Read and Write transform to read/write to BigQuery.""" import apache_beam as beam from apache_beam.utils.pipeline_options import PipelineOptions # [START model_bigqueryio_read] p = beam.Pipeline(options=PipelineOptions()) weather_data = p | 'ReadWeatherStations' >> beam.io.Read( beam.io.BigQuerySource( 'clouddataflow-readonly:samples.weather_stations')) # [END model_bigqueryio_read] # [START model_bigqueryio_query] p = beam.Pipeline(options=PipelineOptions()) weather_data = p | 'ReadYearAndTemp' >> beam.io.Read( beam.io.BigQuerySource( query='SELECT year, mean_temp FROM samples.weather_stations')) # [END model_bigqueryio_query] # [START model_bigqueryio_query_standard_sql] p = beam.Pipeline(options=PipelineOptions()) weather_data = p | 'ReadYearAndTemp' >> beam.io.Read( beam.io.BigQuerySource( query='SELECT year, mean_temp FROM `samples.weather_stations`', use_standard_sql=True)) # [END model_bigqueryio_query_standard_sql] # [START model_bigqueryio_schema] schema = 'source:STRING, quote:STRING' # [END model_bigqueryio_schema] # [START model_bigqueryio_write] quotes = p | beam.Create( [{'source': 'Mahatma Ghandi', 'quote': 'My life is my message.'}]) quotes | 'Write' >> beam.io.Write( beam.io.BigQuerySink( 'my-project:output.output_table', schema=schema, write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE, create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED)) # [END model_bigqueryio_write] def model_composite_transform_example(contents, output_path): """Example of a composite transform. To declare a composite transform, define a subclass of PTransform. To override the apply method, define a method "apply" that takes a PCollection as its only parameter and returns a PCollection. """ import re import apache_beam as beam # [START composite_transform_example] # [START composite_ptransform_apply_method] # [START composite_ptransform_declare] class CountWords(beam.PTransform): # [END composite_ptransform_declare] def expand(self, pcoll): return (pcoll | beam.FlatMap(lambda x: re.findall(r'\w+', x)) | beam.combiners.Count.PerElement() | beam.Map(lambda (word, c): '%s: %s' % (word, c))) # [END composite_ptransform_apply_method] # [END composite_transform_example] p = TestPi
try: from setuptools import setup except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup import sys test_suite = "tests" tests_require = ["mongo-orchestration >= 0.2, < 0.4", "requests >= 2.5.1"] if sys.version_info[:2] == (2, 6): # Need unittest2 to run unittests in Python 2.6 tests_require.append("unittest2") test_suite = "unittest2.collector" try: with open("README.rst", "r") as fd: long_description = fd.read() except IOError: long_description = None # Install without README.rst setup(name='hzkgelastic2-doc-manager', version='0.2.1.dev0', maintainer='mongodb', description='Elastic2 plugin for mongo-connector', long_description=long_description, platforms=['any'], author='anna herlihy', author_email='mongodb-user@googlegroups.com', url='https://github.com/mongodb-labs/hzkgelastic2-doc-manager', install_requires=['mongo-connector >= 2.3.0', "elasticsearch>=2.0.0,<3.0.0"], packages=["mongo_connector", "mongo_connector.doc_managers"], extras_require={'aws': ['boto3 >= 1.4.0', 'requests-aws-sign >= 0.1.1']}, license="Ap
ache License, Version 2.0", classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "
Topic :: Database", "Topic :: Software Development :: Libraries :: Python Modules", "Operating System :: Unix", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX" ], keywords=['mongo-connector', "mongodb", "elastic", "elasticsearch"], test_suite=test_suite, tests_require=tests_require )
import os from qmpy import * from django.test import TestCase peak_locations = [] class MiedemaTestCase(TestCase): def setUp(self): read_elements() def test_methods(self): ## test that it generally works self.assertEqual(Miedema("FeNi").energy, -0.03) self.assertEqual(Miedema("FeNi").energy, -0.03) c = Composition.get("LiBe") self.assertEqual(Miedema(c).energy, -0.08) self.assertEqual(Miedema({"Pt": 1, "Ti": 3}).energy, -0.76) ## test that non-metals are ignored self.assertEqual(Miedema("Fe2O3").energy, None) ## test that it is quantity invariant self.assertEqual(Miedema("Fe5Ni5").energy, -0.03) class PDFTestCase(TestCase): def test_distances(self): pass class NearestNeighborTestCase(TestCase): def setUp(self): read_elements() sample_files_loc = os.path.join(INSTALL_PATH, "io", "files") self.fcc = io.poscar.read(os.path.join(sample_files_loc, "POSCAR_FCC")) self.bcc = io.poscar.read(os.path.join(sample_files_loc, "POSCAR_BCC")) self.sc = io.poscar.read(os.path.join(sample_files_loc, "POSC
AR_SC")) def test_heuristic(self): self.fcc.find_nearest_neighbors() self.assertEqual(len(self.fcc[0].neighbors), 12) self.bcc.find_nearest_neighbors() self.assertEqual(len(self.bcc[0].neighbors), 8) self.sc.find_nearest_neighbors() self.assertEqual(len(self.sc[0].neighbors), 6) def test_voronoi(self):
self.fcc.find_nearest_neighbors(method="voronoi") self.assertEqual(len(self.fcc[0].neighbors), 12) self.bcc.find_nearest_neighbors(method="voronoi") self.assertEqual(len(self.bcc[0].neighbors), 14) self.bcc.find_nearest_neighbors(method="voronoi", tol=5) self.assertEqual(len(self.bcc[0].neighbors), 8) self.sc.find_nearest_neighbors(method="voronoi") self.assertEqual(len(self.sc[0].neighbors), 6)
""" Redis check tests. """ import logging import os import unittest import subprocess import time import pprint import redis from tests.common import load_check from nose.plugins.skip import SkipTest logger = logging.getLogger() MAX_WAIT = 20 NOAUTH_PORT = 16379 AUTH_PORT = 26379 DEFAULT_PORT = 6379 MISSING_KEY_TOLERANCE = 0.5 class TestRedis(unittest.TestCase): def is_travis(self): global logger logger.info("Running on travis-ci") return "TRAVIS" in os.environ def wait4(self, p, pattern): """Waits until a specific pattern shows up in th
e stdout """ out = p.stdout
loop = 0 while True: l = out.readline() if l.find(pattern) > -1: break else: time.sleep(0.1) loop += 1 if loop >= MAX_WAIT: break def setUp(self): raise SkipTest("Requires Redis installed") if not self.is_travis(): self.redis_noauth = subprocess.Popen( ["redis-server", "tests/redisnoauth.cfg"], stdout=subprocess.PIPE) self.wait4(self.redis_noauth, "The server is now ready to accept connections") self.redis_auth = subprocess.Popen( ["redis-server", "tests/redisauth.cfg"], stdout=subprocess.PIPE) self.wait4(self.redis_auth, "The server is now ready to accept connections") def tearDown(self): if not self.is_travis(): self.redis_noauth.terminate() self.redis_auth.terminate() def test_redis_auth(self): # Test connection with password if not self.is_travis(): # correct password r = load_check('redisdb', {}, {}) instance = { 'host': 'localhost', 'port': AUTH_PORT, 'password': 'datadog-is-devops-best-friend' } r.check(instance) metrics = self._sort_metrics(r.get_metrics()) assert len(metrics) > 0, "No metrics returned" # wrong passwords instances = [ { 'host': 'localhost', 'port': AUTH_PORT, 'password': '' }, { 'host': 'localhost', 'port': AUTH_PORT, 'password': 'badpassword' } ] for instance in instances: r = load_check('redisdb', {}, {}) r.check(instance) metrics = self._sort_metrics(r.get_metrics()) assert len( metrics) == 0, "Should have failed with bad password; got %s instead" % metrics def test_redis_default(self): # Base test, uses the noauth instance if self.is_travis(): port = DEFAULT_PORT else: port = NOAUTH_PORT instance = { 'host': 'localhost', 'port': port } db = redis.Redis(port=port, db=14) # Datadog's test db db.flushdb() db.set("key1", "value") db.set("key2", "value") db.setex("expirekey", "expirevalue", 1000) r = load_check('redisdb', {}, {}) r.check(instance) metrics = self._sort_metrics(r.get_metrics()) assert metrics, "No metrics returned" # Assert we have values, timestamps and dimensions for each metric. for m in metrics: assert isinstance(m[1], int) # timestamp assert isinstance(m[2], (int, float, long)) # value dimensions = m[3]["dimensions"] expected_dimensions = {"redis_host": "localhost", "redis_port": port} for e in expected_dimensions: assert e in dimensions def assert_key_present(expected, present, tolerance): "Assert we have the rest of the keys (with some tolerance for missing keys)" e = set(expected) p = set(present) assert len(e - p) < tolerance * len(e), pprint.pformat((p, e - p)) # gauges collected? remaining_keys = [m[0] for m in metrics] expected = r.GAUGE_KEYS.values() assert_key_present(expected, remaining_keys, MISSING_KEY_TOLERANCE) # Assert that the keys metrics are tagged by db. just check db0, since # it's the only one we can guarantee is there. db_metrics = self._sort_metrics( [m for m in metrics if m[0] in ['redis.keys', 'redis.expires'] and "redis_db:db14" in m[3]["dimensions"]]) self.assertEqual(2, len(db_metrics)) self.assertEqual('redis.expires', db_metrics[0][0]) self.assertEqual(1, db_metrics[0][2]) self.assertEqual('redis.keys', db_metrics[1][0]) self.assertEqual(3, db_metrics[1][2]) # Run one more check and ensure we get total command count # and other rates time.sleep(5) r.check(instance) metrics = self._sort_metrics(r.get_metrics()) keys = [m[0] for m in metrics] assert 'redis.net.commands' in keys def _sort_metrics(self, metrics): def sort_by(m): return m[0], m[1], m[3] return sorted(metrics, key=sort_by) if __name__ == "__main__": unittest.main()
from django.core.urlresolvers import reverse_lazy from django.views.generic import View from hopes.forms import StudentForm, SchoolForm, OneTimeForm, SpecificDateTimeForm from hopes.models import Student, School, OneTime, SpecificDateTime from vanilla import CreateView, DeleteView, ListView, UpdateView class ListStudents(ListView): model = Student class CreateStudent(CreateView): model = Student form_class = StudentForm success_url = reverse_lazy('list_students') class EditStudent(UpdateView): model = Student form_class = StudentForm success_url = reverse_lazy('list_students') class DeleteStudent(DeleteView): model = Student success_url = reverse_lazy('list_students') """-----------------------------------------------------------""" class ListSchools(ListView): model = School class CreateSchool(CreateView): model = School form_class = SchoolForm
success_url = reverse_lazy('list_schools') class EditSchool(UpdateView): model = Scho
ol form_class = SchoolForm success_url = reverse_lazy('list_schools') class DeleteSchool(DeleteView): model = School success_url = reverse_lazy('list_schools') """----------------------------------------------------------""" class ListOneTime(ListView): model = OneTime class CreateOneTime(CreateView): model = OneTime form_class = OneTimeForm success_url = reverse_lazy('list_onetime') class EditOneTime(UpdateView): model = OneTime form_class = OneTimeForm success_url = reverse_lazy('list_onetime') class DeleteOneTime(DeleteView): model = OneTime success_url = reverse_lazy('list_onetime') """-----------------------------------------------------------""" from django.shortcuts import render_to_response, render from django.template import RequestContext, loader class ListSpecDateTime(View): def index(self, request): template = loader.get_template('hopes/simple_spec.html') context = { 'spec_list' : 'Test my string', } return render(request, 'hopes/simple_spec.html', context) class CreateSpecDateTime(CreateView): model = SpecificDateTime form_class = SpecificDateTimeForm success_url = reverse_lazy('list_spec_time') class EditSpecDateTime(UpdateView): model = SpecificDateTime form_class = SpecificDateTimeForm success_url = reverse_lazy('list_spec_time') class DeleteSpecDateTime(DeleteView): model = SpecificDateTime success_url = reverse_lazy('list_spec_time')
# -*- coding: utf-8 -*- # # This file is part of EUDAT B2Share. # Copyright (C) 2016 CERN. # # B2Share is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # B2Share is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with B2Share; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """B2Share Schemas module Command Line Interface.""" from __future__ import absolute_import import json import os import sys from sqlalchemy import exists, or_ import click from flask.cli import with_appcontext from invenio_db import db from invenio_files_rest.models import Location @click.group() def files(): """Files management commands.""" @files.command('add-location') @with_appcontext @click.argument('name') @click.argument('uri') @click.option('-d', '--default', is_flag=True, default=False, help='Use this location as the default location.') def add_location(name, uri, default): """Add a file storage loca
tion. The URI should point to the location where the files will be stored. The NAME will be used to reference this location. """ matching_locations = Location.query.filter(
or_(Location.uri == uri, Location.name == name)).all() if len(matching_locations) > 0: if matching_locations[0].name == name: raise click.BadParameter( 'Another location with the same name already exists.') else: raise click.BadParameter( 'Existing location "{}" has the same uri.'.format( matching_locations[0].name)) if default: db.session.query(Location).filter(Location.default == True).update( {Location.default: False}) location = Location(name=name, uri=uri, default=default) db.session.add(location) db.session.commit() @files.command('set-default-location') @with_appcontext @click.argument('name') def set_default_location(name): """Change the default file storage location. The NAME should point to an existing location. """ location = Location.query.filter(Location.name == name).one() if location.default: return db.session.query(Location).filter(Location.default == True).update( {Location.default: False}) location.default = True db.session.commit() @files.command('list-locations') @with_appcontext def list_location(): """List all file storage locations.""" locations = Location.query.order_by(Location.default.desc(), Location.name).all() for location in locations: click.echo(json.dumps({c.name: str(getattr(location, c.name)) for c in Location.__table__.columns}, sort_keys=True))
# -*- coding: utf-8 -*- # generated from catkin/cmake/template/__init__.py.in # keep symbol table as clean as possible by dele
ting all unnecessary symbols from os import path as os_path from sys import path as sys_path from pkgutil import extend_path __extended_path = "/home/pi/Documents/desenvolvimentoRos/src/tf2_ros/src".split(";") for p in reversed(__extended_path): sys_path.insert(0, p) del p del sys_path __path__ = extend_path(__path__, __name__) del extend_path __execfiles = [] for p in __extended_path: src_init_file = os_path.join(p, __name_
_ + '.py') if os_path.isfile(src_init_file): __execfiles.append(src_init_file) else: src_init_file = os_path.join(p, __name__, '__init__.py') if os_path.isfile(src_init_file): __execfiles.append(src_init_file) del src_init_file del p del os_path del __extended_path for __execfile in __execfiles: with open(__execfile, 'r') as __fh: exec(__fh.read()) del __fh del __execfile del __execfiles
# -*- coding: UTF-8 -*- from django.core.management.base import BaseCommand from django_datawatch.datawatch import Scheduler class Command(BaseCommand): def add_arguments(self, parser):
parser.add_argument( '--force', action='store_true', dest='force', default=False, help='Execute all checks.', ) parser.add_argument( '--slug', dest='slug', default=None, help='Slug of check to refresh, all checks will be refreshed if slug is not provided', ) def handle(self, force, slug, *args, **options): Scheduler().run_checks(force=force, slug=
slug)
from pupa.scrape import Jurisdiction, Organization from .bills import MNBillScraper from .committees import MNCommitteeScraper from .people import MNPersonScraper from .vote_events import MNVoteScraper from .events import MNEventScraper from .common import url_xpath """ Minnesota legislative data can be found at the Office of the Revisor of Statutes: https://www.revisor.mn.gov/ Votes: There are not detailed vote data for Senate votes, simply yes and no counts. Bill pages have vote counts and links to House details, so it makes more sense to get vote data from the bill pages. """ class Minnesota(Jurisdiction): division_id = "ocd-division/country:us/state:mn" classification = "government" name = "Minnesota" url =
"http://state.mn.us/" check_sessions = True scrapers = { "bills": MNBillScraper, "committees": MNCommitteeScraper, "people": MNPersonScraper, "vote_events": MNVoteScraper, "events": MNEventScraper,
} parties = [{'name': 'Republican'}, {'name': 'Democratic-Farmer-Labor'}] legislative_sessions = [ { '_scraped_name': '86th Legislature, 2009-2010', 'classification': 'primary', 'identifier': '2009-2010', 'name': '2009-2010 Regular Session' }, { '_scraped_name': '86th Legislature, 2010 1st Special Session', 'classification': 'special', 'identifier': '2010 1st Special Session', 'name': '2010, 1st Special Session' }, { '_scraped_name': '86th Legislature, 2010 2nd Special Session', 'classification': 'special', 'identifier': '2010 2nd Special Session', 'name': '2010, 2nd Special Session' }, { '_scraped_name': '87th Legislature, 2011-2012', 'classification': 'primary', 'identifier': '2011-2012', 'name': '2011-2012 Regular Session' }, { '_scraped_name': '87th Legislature, 2011 1st Special Session', 'classification': 'special', 'identifier': '2011s1', 'name': '2011, 1st Special Session' }, { '_scraped_name': '87th Legislature, 2012 1st Special Session', 'classification': 'special', 'identifier': '2012s1', 'name': '2012, 1st Special Session' }, { '_scraped_name': '88th Legislature, 2013-2014', 'classification': 'primary', 'identifier': '2013-2014', 'name': '2013-2014 Regular Session' }, { '_scraped_name': '88th Legislature, 2013 1st Special Session', 'classification': 'special', 'identifier': '2013s1', 'name': '2013, 1st Special Session' }, { '_scraped_name': '89th Legislature, 2015-2016', 'classification': 'primary', 'identifier': '2015-2016', 'name': '2015-2016 Regular Session' }, { '_scraped_name': '89th Legislature, 2015 1st Special Session', 'classification': 'special', 'identifier': '2015s1', 'name': '2015, 1st Special Session' }, { '_scraped_name': '90th Legislature, 2017-2018', 'classification': 'primary', 'identifier': '2017-2018', 'name': '2017-2018 Regular Session' }, ] ignored_scraped_sessions = [ '85th Legislature, 2007-2008', '85th Legislature, 2007 1st Special Session', '84th Legislature, 2005-2006', '84th Legislature, 2005 1st Special Session', '83rd Legislature, 2003-2004', '83rd Legislature, 2003 1st Special Session', '82nd Legislature, 2001-2002', '82nd Legislature, 2002 1st Special Session', '82nd Legislature, 2001 1st Special Session', '81st Legislature, 1999-2000', '80th Legislature, 1997-1998', '80th Legislature, 1998 1st Special Session', '80th Legislature, 1997 3rd Special Session', '80th Legislature, 1997 2nd Special Session', '80th Legislature, 1997 1st Special Session', '79th Legislature, 1995-1996', '79th Legislature, 1995 1st Special Session', '89th Legislature, 2015-2016', ] def get_organizations(self): legis = Organization('Minnesota Legislature', classification='legislature') upper = Organization('Minnesota Senate', classification='upper', parent_id=legis._id) lower = Organization('Minnesota House of Representatives', classification='lower', parent_id=legis._id) for n in range(1, 68): upper.add_post(label=str(n), role='Senator', division_id='ocd-division/country:us/state:mn/sldu:{}'.format(n)) lower.add_post(label=str(n) + 'A', role='Representative', division_id='ocd-division/country:us/state:mn/sldl:{}a'.format(n)) lower.add_post(label=str(n) + 'B', role='Representative', division_id='ocd-division/country:us/state:mn/sldl:{}b'.format(n)) yield legis yield upper yield lower def get_session_list(self): return url_xpath('https://www.revisor.mn.gov/revisor/pages/' 'search_status/status_search.php?body=House', '//select[@name="session"]/option/text()')
def saveData(X, f_out, colfmt='%i'): ''' Quick alias for saving data matricies. If X and f_out are tuples, this function will save multiple matricies at once. ''' import numpy as np
if isinstance(X, tuple): assert(len(X) == len(f_out)) for idx,Z in enumerate(X): np.savetxt(f_out[idx], Z, delimiter=',', fmt=colfmt) else: np.
savetxt(f_out, X, delimiter=',', fmt=colfmt)
#!/usr/bin/env python3 # # # # This file is part of librix-thinclient. # # librix-thinclient is free software: you can redistribute it and/o
r modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # librix-thinclient is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # al
ong with librix-thinclient. If not, see <http://www.gnu.org/licenses/>. __all__ = [ ]