prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
ines([machine]).to_dot()))
try:
dump[word] = Lexicon.dump_definition_graph(machine)
except:
traceback.print_exc()
logging.warning('skipping word {0}'.format(word))
return dump
@staticmethod
def create_from_dict(word2machine, primitives, cfg):
lexicon = Lexicon(cfg)
lexicon.lexicon = dict(word2machine)
lexicon.primitives = primitives
return lexicon
def __init__(self, cfg):
self.cfg = cfg
self.lexicon = {}
self.ext_lexicon = {}
self.oov_lexicon = {}
self._known_words = None
self.expanded = set()
self.expanded_lexicon = {}
self.stopwords = set(nltk_stopwords.words('english'))
self.stopwords.add('as') # TODO
self.stopwords.add('root') # TODO
self.full_graph = None
self.shortest_path_dict = None
def get_words(self):
return set(self.lexicon.keys()).union(set(self.ext_lexicon.keys()))
def known_words(self):
if self._known_words is None:
self._known_words = self.get_words()
return self._known_words
def add(self, printname, machine, external=True, oov=False):
if printname in self.oov_lexicon:
assert oov is False
del self.oov_lexicon[printname]
lexicon = self.oov_lexicon if oov else (
self.ext_lexicon if external else self.lexicon)
self._add(printname, machine, lexicon)
def _add(self, printname, machine, lexicon):
if printname in lexicon:
raise Exception("duplicate word in lexicon: '{0}'".format(lexicon))
lexicon[printname] = set([machine])
def get_expanded_definition(self, printname):
machine = self.expanded_lexicon.get(printname)
if machine is not None:
return machine
machine = copy.deepcopy(self.get_machine(printname))
self.expand_definition(machine)
self.expanded_lexicon[printname] = machine
return machine
def get_machine(self, printname, new_machine=False, allow_new_base=False,
allow_new_ext=False, allow_new_oov=True):
"""returns the lowest level (base < ext < oov) existing machine
for the printname. If none exist, creates a new machine in the lowest
level allowed by the allow_* flags. Will always create new machines
for uppercase printnames"""
# returns a new machine without adding it to any lexicon
if new_machine:
return Machine(printname, ConceptControl())
# TODO
if not printname:
return self.get_machine("_empty_")
if printname.isupper():
# return self.get_machine(printname, new_machine=True)
return self.get_machine(
printname=printname.lower(), new_machine=new_machine,
allow_new_base=allow_new_base, allow_new_ext=allow_new_ext,
allow_new_oov=allow_new_oov)
machines = self.lexicon.get(
printname, self.ext_lexicon.get(
printname, self.oov_lexicon.get(printname, set())))
if len(machines) == 0:
# logging.info(
# u'creating new machine for unknown word: "{0}"'.format(
# printname))
new_machine = Machine(printname, ConceptControl())
if allow_new_base:
self.add(printname, new_machine, external=False)
elif allow_new_ext:
self.add(printname, new_machine)
elif allow_new_oov:
self.add(printname, new_machine, oov=True)
else:
return None
return self.get_machine(printname)
else:
if len(machines) > 1:
debug_str = u'ambiguous printname: {0}, machines: {1}'.format(
printname,
[lex.get(printname, set([]))
for lex in (self.lexicon, self.ext_lexicon,
self.oov_lexicon)])
raise Exception(debug_str)
return next(iter(machines))
def expand_definition(self, machine, stopwords=[]):
def_machines = dict(
[(pn, m) for pn, m in [
(m2.printname(), m2) for m2 in MachineTraverser.get_nodes(
machine, names_only=False, keep_upper=True)]
if pn != machine.printname()])
self.expand(def_machines, stopwords=stopwords)
def expand(self, words_to_machines, stopwords=[], cached=False):
if len(stopwords) == 0:
stopwords = self.stopwords
for lemma, machine in words_to_machines.iteritems():
if (
(not cached or lemma not in self.expanded) and
lemma in self.known_words() and lemma not in stopwords):
# deepcopy so that the version in the lexicon keeps its links
definition = self.get_machine(lemma)
copied_def = copy.deepcopy(definition)
"""
for parent, i in list(definition.parents):
copied_parent = copy.deepcopy(parent)
for m in list(copied_parent.partitions[i]):
if m.printname() == lemma:
copied_parent.remove(m, i)
break
else:
raise Exception()
# "can't find {0} in partition {1} of {2}: {3}".format(
# ))
copied_parent.append(copied_def, i)
"""
case_machines = [
m for m in MachineTraverser.get_nodes(
copied_def, names_only=False, keep_upper=True)
if m.printname().startswith('=')]
machine.unify(copied_def, exclude_0_case=True)
for cm in case_machines:
if cm.printname() == "=AGT":
if machine.partitions[1]:
machine.partitions[1][0].unify(cm)
if cm.printname() == "=PAT":
if machine.partitions[2]:
machine.partitions[2][0].unify(cm)
self.expanded.add(lemma)
def get_full_graph(self, fullgraph_options):
if self.full_graph is not None:
| return self.full_graph
allwords = set()
allwords.update(
self.lexicon.keys(), self.ext_lexicon.keys(),
self.oo | v_lexicon.keys())
self.full_graph = nx.MultiDiGraph()
excluded_words = set()
# get excluded words set
with open(fullgraph_options.freq_file) as f:
for line_no, line in enumerate(f):
fields = line.strip().decode('utf-8').split('\t')
freq = int(fields[0])
word = fields[1]
if line_no > fullgraph_options.freq_cnt and (
fullgraph_options.freq_val == 0 or
fullgraph_options.freq_val > freq):
break
excluded_words.add(word)
machinegraph_options = MachineGraphOptions(
fullgraph_options=fullgraph_options)
# TODO: only for debugging
# until = 10
for i, word in enumerate(allwords):
# TODO: only for debugging
# if word not in ['dumb', 'intelligent', 'stupid']:
# continue
# if i > until:
# break
machine = self.get_machine(word)
MG = MachineGraph.create_from_machines(
[machine], machinegraph_options=machinegraph_options)
# TODO: maybe directed is better
G = MG.G.to_undirected()
# TODO: to print out all graphs
# try:
# fn = os.path.join(
# '/home/eszter/projects/4lang/data/graphs/allwords',
# u"{0}.dot".format(word)).encode('utf-8')
# with open(fn, 'w') as dot_obj:
# dot_obj |
import numpy as np
class Perceptron(object):
"""Perceptron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
"""Fit training data.
Paramete | rs
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.n_iter):
| errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, -1)
|
import io
import unittest
from unittest.mock import patch
from kattis import k_hnumbers
###############################################################################
class SampleInput(unittest.TestC | ase):
'''Problem statement sample inputs and outputs'''
def test_sample_input(self):
'''Run and assert problem statement sample input and output.'''
inputs = []
inputs.append('21')
inputs.append('85')
inputs.append('789')
inputs.append('0')
inputs = '\n'.join(inputs) + '\n'
outputs = []
outputs.append('21 0')
outputs.append('85 5')
outputs.append('789 62')
outputs = '\n'.join(outputs | ) + '\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_hnumbers.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Kcov(CMakePackage):
"""Code coverage tool for compiled programs, Python and Bash which uses
debugging information to collect and report data without special
compilation options"""
homepage = "http://simonkagstrom.github.io/kcov/index.html"
url = "https://github.com/SimonKagstrom/kcov/archive/38.tar.gz"
version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4')
depends_on('cmake@2.8.4:', type='build')
depends_on('zlib')
depends_on('curl') |
def cmake_args(self):
# Necessary at least on macOS, fixes linking error to LLDB
# https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb
return ['-DSPECIFY_RPATH=ON']
@run | _after('install')
@on_package_attributes(run_tests=True)
def test_install(self):
# The help message exits with an exit code of 1
kcov = Executable(self.prefix.bin.kcov)
kcov('-h', ignore_errors=1)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. | If not, see <http://www.gnu.org/licenses/>.
### Will be populated by the UI with it's own value
app = None
# We will need external commands here
import time
from shinken.external_command import ExternalCommand, ExternalCommandManager
# Our page
def get_page(cmd=None):
# First we look for the user sid
# so we bail out if it's a false one
user = app.get_user_auth()
if not user:
return {'status': 401, 'text': 'Invalid session'}
now | = int(time.time())
print "Ask us an /action page", cmd
elts = cmd.split('/')
cmd_name = elts[0]
cmd_args = elts[1:]
print "Got command", cmd_name
print "And args", cmd_args
# Check if the command exist in the external command list
if cmd_name not in ExternalCommandManager.commands:
return {'status': 404, 'text': 'Unknown command %s' % cmd_name}
extcmd = '[%d] %s' % (now, ';'.join(elts))
print "Got the; form", extcmd
# Ok, if good, we can launch the command
extcmd = extcmd.decode('utf8', 'replace')
e = ExternalCommand(extcmd)
print "Creating the command", e.__dict__
app.push_external_command(e)
return {'status': 200, 'text': 'Command launched'}
pages = {get_page: {'routes': ['/action/:cmd#.+#']}}
|
# TODO:
# - Figure out when to use previous runs' information
# - merge this module's parse_clustermerging and
# newick.parse_clustermerging
from StringIO import StringIO
from os import remove
import shutil
import os.path as op
import subprocess as sp
import sys
import numpy as np
CM_FILE = 'ClusterMerging.txt'
class ClusteringRun(object):
def __init__(self, prmtop=None, mask='@CA,C,O,N', start_n_clusters=2,
n_clusters=50, ptraj_trajin_fn='ptraj_trajin', cm_fn=CM_FILE,
cn_fns=None, prefix='c', log_fn=None, no_ssr_sst=False,
use_cpptraj=False):
self.prmtop = prmtop
self.mask = mask
self.start_n_clusters = start_n_clusters
self.n_clusters = n_clusters
self.ptraj_trajin_fn = ptraj_trajin_fn
self.cm_fn = cm_fn
self.no_ssr_sst = no_ssr_sst
if use_cpptraj:
self._ptraj_prg = 'cpptraj'
else:
self._ptraj_prg = 'ptraj'
if cn_fns is None:
self.cn_fns = {}
else:
self.cn_fns = cn_fns
self.prefix = prefix
self.log_fn = log_fn
# No clustering run necessary if all of the following conditions
# apply:
# - ClusterMerging.txt exists
# - self.cn_fns is not empty
# - every file in self.cn_fns exists
# Incidentally, fuck yeah generators. They make code look like
# Lisp, however. http://xkcd.com/297/
if all((op.exists(self.cm_fn),
self.cn_fns,
all(op.exists(fn) for fn in self.cn_fns.itervalues()))):
pass
else:
self.cluster()
pass
self.n_decoys = self._get_n_decoys()
def _get_n_decoys(self):
with open(self.cm_fn) as cm_file:
for i, _ in enumerate(cm_file, start=1):
# i is the number of nodes in the clustering tree and is
# always 1 lower than the number of decoys if we cluster
# up to a single cluster
# Did that just make sense to anyone?
pass
self.n_decoys = i + 1
return self.n_decoys
def _cluster(self, script, append=False):
def _run_ptraj(script_fn):
# God I hate plumbing
writemode = 'a' if append else 'w'
if self.log_fn is None:
log_fh = self.log_fn
else:
log_fh = open(self.log_fn, writemode)
with log_fh as logfile:
return sp.check_call([self._ptraj_prg, self.prmtop,
script_fn], stdout=logfile, stderr=logfile)
try:
remove('ptraj_script')
except OSError as e:
if e.errno == 2:
pass
shutil.copy(self.ptraj_trajin_fn, 'ptraj_script')
with open('ptraj_script', 'a') as ptraj_script:
ptraj_script.write(script)
return _run_ptraj(script_fn='ptraj_script')
def _run_c1(self):
ptraj_single = ('cluster out c1 representative none average none all '
'none averagelinkage clusters 1 rms {mask}'.format(mask=self.mask))
self._cluster(script=ptraj_single)
self.cn_fns[1] = '{prefix}1.txt'.format(prefix=self.prefix)
return self.cn_fns
def _run_cn(self):
clusterstring = ('cluster out c{n} representative none average none all '
'none ReadMerge clusters {n} rms {mask}')
ptraj_full = ('\n'.join(clusterstring.format(n=i, mask=self.mask) for i
in xrange(self.start_n_clusters, self.n_clusters+1)))
self._cluster(script=ptraj_full)
self.cn_fns.update({i: 'c{n}.txt'.format(n=i) for i in
xrange(self.start_n_clusters, self.n_clusters+1)})
return self.cn_fns
def _parse_clustermerging(self, reverse=True):
clustermerging = np.genfromtxt(self.cm_fn,
# - pSF values are only computed for the last 50 records
# - The results for n_cluster = 1 are not helpful
skip_header=self.n_decoys - 51, skip_footer=1,
dtype=[('n', 'i8'), ('rmsd', 'f8'), ('dbi', 'f8'), ('psf', 'f8')],
usecols=(0, 3, 4, 5), invalid_raise=False,
converters={0: lambda s: int(s.rstrip(':')) + self.n_decoys + 1})
if reverse:
step = -1
else:
step = 1
self._n = clustermerging['n'][::step]
self._dbi = clustermerging['dbi'][::step]
self._psf = clustermerging['psf'][::step]
self._rmsd = clustermerging['rmsd'][::step]
def _get_ssr | _ssts | (self):
ssr_ssts = []
for i, fn in sorted(self.cn_fns.iteritems()):
with open(fn) as fh:
for line in fh:
if line.startswith('#SSR/SST: '):
ssr_sst_pre = line.split()[1]
# '-nan' is the value for a single cluster
if ssr_sst_pre != '-nan':
ssr_sst = float(ssr_sst_pre)
ssr_ssts.append(ssr_sst)
self._ssr_sst = np.array(ssr_ssts)
def cluster(self):
if self.n_clusters == 1:
self._run_c1()
elif (self.n_clusters > 1 and op.exists(self.cm_fn) and
not self.no_ssr_sst):
# If we don't want to plot SSR/SST values, the subsequent
# clustering runs aren't necessary, because all other
# metrics are present in ClusterMerging.txt already
self._run_cn()
else:
print "No valid cluster number specified"
sys.exit(1)
def gather_metrics(self):
self._parse_clustermerging()
if self.no_ssr_sst:
imetrics = zip(self._n, self._rmsd, self._dbi, self._psf)
self.metrics = np.rec.fromrecords(imetrics,
names=('n', 'rmsd', 'dbi', 'psf'))
else:
self._get_ssr_ssts()
imetrics = zip(self._n, self._rmsd, self._dbi, self._psf,
self._ssr_sst)
self.metrics = np.rec.fromrecords(imetrics,
names=('n', 'rmsd', 'dbi', 'psf', 'ssr_sst'))
return self.metrics
|
import unittest
from katas.kyu_6.write_number_in_ex | panded_form import expanded_form
class ExpandedFormTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(expanded_form(12), '10 + 2')
def test_equal_2(self):
self.assertEqual(expanded_form(42), '40 + 2')
def test_equal_3(self):
self.assertEq | ual(expanded_form(70304), '70000 + 300 + 4')
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : nature.py
# Author : Stephane Grabli
# Date : 04/08/2005
# Purpose : Uses the NatureUP1D predicate to select the lines
# of a given type (among Nature.SILHOUETTE, Nature.CREA | SE, Nature.SUGGESTIVE_CONTOURS,
# Nature.BORDERS).
# The suggestive contours must have been enabled in the
# options dialog to appear in the View Map.
from freestyle import ChainSilhouetteIterator, IncreasingCol | orShader, \
IncreasingThicknessShader, Nature, Operators, TrueUP1D
from PredicatesU1D import pyNatureUP1D
from logical_operators import NotUP1D
Operators.select(pyNatureUP1D(Nature.SILHOUETTE))
Operators.bidirectional_chain(ChainSilhouetteIterator(), NotUP1D(pyNatureUP1D(Nature.SILHOUETTE)))
shaders_list = [
IncreasingThicknessShader(3, 10),
IncreasingColorShader(0.0, 0.0, 0.0, 1, 0.8, 0, 0, 1),
]
Operators.create(TrueUP1D(), shaders_list)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# (c) 2014 Rajat Agarwal
import os, sys
import unittest
import sqoot
if 'PUBLIC_API_KEY' in os.environ and 'PRIVATE_API_KEY' in os.environ:
PUBLIC_API_KEY = os.environ['PUBLIC_API_KEY']
PRIVATE_API_KEY = os.environ['PRIVATE_API_KEY']
else:
try:
from _creds import *
except ImportError:
print "Please create a creds.py file in this package, based upon creds.example.py"
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), | 'testdata')
sys.path.append('/home/ragarwal/sqoot')
class BaseEndpointTestCase(unittest.TestCase):
def setUp(self):
self.api = sqoot.Sqoot(
privateApiKey=PRIVATE_API_KEY,
publicAp | iKey=PUBLIC_API_KEY,
)
|
import sys |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'satori.core.settings')
application = get_wsgi_application()
# initialize thrift server structures - takes a long time and it's better
# to do it on startup than during the firs | t request
import satori.core.thrift_server
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Intel Corp.
#
"""
Interface for all resource control plugins.
"""
from abc import ABCMeta, abstractmethod
from ..plugin import DeclareFramework
@DeclareFramework('provisioner')
class Provisioner(object, metaclass=ABCMeta):
PROVISIONER_KEY = "provisioner"
PROVISIONER_IMAGE_KEY = "image"
PROVISIONER_BOOTSTRAP_KEY = "provisioner_bootstrap"
PROVISIONER_FILE_KEY = "provisioner_files"
PROVISIONER_KARGS_KEY = "provisioner_kernel_args"
PROVISIONER_UNSET_KEY = "UNDEF"
@abstractmethod
def add(self, device):
"""
Attempts to add a device to the provisioner. Does nothing if the device is already added.
:param device:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def delete(self, device):
"""
Attempts to remove a device from the provisioner. Does nothing if the device isn't already there.
:param device:
:return: Updated device with the correct fields removed
"""
pass
@abstractmethod
def set_ip_address(self, device, ip_address, interface="eth0"):
"""
Mutate the device to include this ip_address.
Save it to the DataStore
And set it in the provisioner
:param device:
:param ip_address:
:param interface:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def set_hardware_address(self, device, hardware_address, interface="eth0"):
"""
Same as Provisioner.set_ip_address
:param device:
:param hardware_address:
:param interface:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def set_image(self, device, image):
"""
Set an image (already known by the provisioner) to a given device.
:param device:
:param image:
:param kernel:
:param network_interface:
:return: Updated device with the new fields applied
:raise: ProvisionException, the image specified is not known to the provisioner
"""
pass
@abstractmethod
def set_bootstrap(self, device, bootstrap):
"""
:param device:
:param bootstrap:
:return: Updated device with the new fields applied
:raise: ProvisionException, the bootstrap specified is not known to the provisioner
"""
pass
@abstractmethod
def set_files(self, device, files):
"""
:param device:
:param files:
:return: Updated device with the new fields applied
:raise: ProvisionException, the file(s) specified is not known to the provisioner
"""
pass
@abstractmethod
def set_kernel_args(self, device, args):
"""
:param device:
:param args:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def list(self):
"""
List all devices that the provisioner knows about.
does this come the DataStore or Warewulf?
:return: return the list of device names
"""
pass
@abstractmethod
def list_images(self):
"""
List all the images this provisioner knows about.
:return: list of | known images (names only)
"""
pass
class ProvisionerException(Exception):
"""
A staple Exception thrown by the Provisioner
"""
def __init__(self, | msg, command_output=None):
super(ProvisionerException, self).__init__()
self.msg = msg
if command_output is not None:
self.cmd_stdout = command_output.stdout
self.cmd_stderr = command_output.stderr
self.cmd_return_code = command_output.return_code
def __str__(self):
return repr(self.msg) |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name | ': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime. | now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
|
#!/usr/bin/python -tt
from graph.node import Node, FileNode
def endswith(s, pats):
return any(s.endswith(p) for p in pats)
def create_graph(files):
retur | n Node(None)
| |
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as nexception
class ExternalNetworkInUse(nexception.InUse):
message = _("External network %(net_id)s cannot be updated to be made "
"non-external, since it has existing gateway ports")
# For backward compatibility the 'router' prefix is kept.
EXTERNAL = 'router:external'
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {EXTERNAL: {'allow_post': Tru | e,
'allow_put': True,
'default': False,
'is_visible': True,
'convert_to': attr.convert_to_boolean,
'enforce_policy': True,
'required_by_policy': True}}}
class External_net(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron | external network"
@classmethod
def get_alias(cls):
return "external-net"
@classmethod
def get_description(cls):
return _("Adds external network attribute to network resource.")
@classmethod
def get_updated(cls):
return "2013-01-14T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
#!/usr/bin/env python
import ez_setup
ez_setup.use_setuptools()
from setuptools im | port setup, find_packages
setup(name='moviepy',
version='0.2.1.6.9',
author='Zulko 2013',
description='Module for script-based video editing',
l | ong_description=open('README.rst').read(),
license='LICENSE.txt',
keywords="movie editing film mixing script-based",
packages= find_packages(exclude='docs'))
|
#
# Location-related classes for simplification of GPS traces.
# Author: James P. Biagioni (jbiagi1@uic.edu)
# Company: University of Illinois at Chicago
# Created: 5/16/11
#
import os
class Location:
def __init__(self, id, latitude, longitude, time):
self.id = id
self.latitude = latitude
self.longitude = longitude
self.orig_latitude = latitude
self.orig_longitude = longitude
self.time = time
self.prev_location = None
self.next_location = None
def __str__(self):
location_string = str(self.id) + "," + str(self.latitude) + "," + str(self.longitude) + "," + str(self.time)
if (self.prev_location is not None):
location_string += "," + str(self.prev_location.id)
else:
locat | ion_string += ",None"
if (self.next_location is not None):
location_string += "," + str(self.next_location.id)
else:
location_string += ",None"
return location_string
class Trip:
def __init__(self):
self.locations = []
def add_location(self, bus_locati | on):
self.locations.append(bus_location)
@property
def num_locations(self):
return len(self.locations)
@property
def start_time(self):
return self.locations[0].time
@property
def end_time(self):
return self.locations[-1].time
@property
def time_span(self):
return (self.locations[-1].time - self.locations[0].time)
class TripLoader:
@staticmethod
def get_all_trips(trips_path):
# storage for all trips
all_trips = []
# get trip filenames
trip_filenames = os.listdir(trips_path)
# iterate through all trip filenames
for trip_filename in trip_filenames:
# if filename starts with "trip_"
if (trip_filename.startswith("trip_") is True):
# load trip from file
curr_trip = TripLoader.load_trip_from_file(trips_path + trip_filename)
# add trip to all_trips list
all_trips.append(curr_trip)
# return all trips
return all_trips
@staticmethod
def load_trip_from_file(trip_filename):
# create new trip object
new_trip = Trip()
# create new trip locations dictionary
new_trip_locations = {} # indexed by location id
# open trip file
trip_file = open(trip_filename, 'r')
prev_location = None
# read through trip file, a line at a time
for trip_location in trip_file:
# parse out location elements
location_elements = trip_location.strip('\n').split(',')
# create new location object
new_location = Location(str(location_elements[0]), float(location_elements[1]), float(location_elements[2]), float(location_elements[3]))
# store new trip location
new_trip_locations[new_location.id] = new_location
if prev_location:
new_location.prev_location_id = prev_location.id
prev_location.next_location_id = new_location.id
else:
new_location.prev_location_id = "None"
prev_location = new_location
# store prev/next_location id
#new_location.prev_location_id = "None"#str(location_elements[4])
#new_location.next_location_id = "None"#str(location_elements[5])
#new_location.prev_location_id = str(location_elements[4])
#new_location.next_location_id = str(location_elements[5])
# add new location to trip
new_trip.add_location(new_location)
new_location.prev_location_id = prev_location.id
prev_location.next_location_id = new_location.id
# close trip file
trip_file.close()
# iterate through trip locations, and connect pointers
for trip_location in new_trip.locations:
# connect prev_location pointer
if (trip_location.prev_location_id != "None"):
trip_location.prev_location = new_trip_locations[trip_location.prev_location_id]
else:
trip_location.prev_location = None
# connect next_location pointer
if (trip_location.next_location_id != "None"):
trip_location.next_location = new_trip_locations[trip_location.next_location_id]
else:
trip_location.next_location = None
# return new trip
return new_trip
|
# -*- coding: utf-8 -*-
#
# Python GTK+ 3 Tutorial documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 29 18:42:04 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python GTK+ 3 Tutorial'
copyright = u'2012, Andrew Steele'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to d | ocumentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative | to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonGTK3Tutorialdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PythonGTK3Tutorial.tex', u'Python GTK+ 3 Tutorial Documentation',
u'Andrew Steele', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pythongtk3tutorial', u'Python GTK+ 3 Tutorial Documentation',
[u'Andrew Steele'], 1)
]
|
"""HTTP views to interact with the entity registry."""
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api.const import ERR_NOT_FOUND
from homeassistant.components.websocket_api.decorators import (
async_response,
require_admin,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_registry import async_get_registry
async def async_setup(hass):
"""Enable the Entity Registry views."""
hass.components.websocket_api.async_register_command(websocket_list_entities)
hass.components.websocket_api.async_register_command(websocket_get_entity)
hass.components.websocket_api.async_register_command(websocket_update_entity)
hass.components.websocket_api.async_register_command(websocket_remove_entity)
return True
@async_response
@websocket_api.websocket_command({vol.Required("type"): "config/entity_registry/list"})
async def websocket_list_entities(hass, connection, msg):
"""Handle list registry entries command.
Async friendly.
"""
registry = await async_get_registry(hass)
connection.send_message(
websocket_api.result_message(
msg["id"], [_entry_dict(entry) for entry in registry.entities.values()]
)
)
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/get",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_get_entity(hass, connection, msg):
"""Handle get entity registry entry command.
Async friendly.
"""
registry = await async_get_registry(hass)
entry = registry.entities.get(msg["entity_id"])
if entry is None:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
connection.send_message(websocket_api.result_message(msg["id"], _entry_dict(entry)))
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/update",
vol.Required("entity_id"): cv.entity_id,
# If passed in, we update value. Passing None will remove old value.
vol.Optional("name"): vol.Any(str, None),
vol.Optional("icon"): vol.Any(str, None),
vol.Optional("new_entity_id"): str,
# We only allow setting disabled_by user via API.
vol.Optional("disabled_by"): vol.Any("user", None),
}
)
async def websocket_update_entity(hass, connection, msg):
"""Handle update entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
changes = {}
for key in ("name", "icon", "disabled_by"):
if key in msg:
changes[key] = msg[key]
if "new_entity_id" in msg and msg["new_entity_id"] != msg["entity_id"]:
changes["new_entity_id"] = msg["new_entity_id"]
if hass.states.get(msg["new_entity_id"]) is not None:
connection.send_message(
websocket_api.error_message(
msg["id"], "invalid_info", "Entity is already registered"
)
)
return
try:
if changes:
entry = registry.async_update_entity(msg["entity_id"], **changes)
except ValueError as err:
connection.send_message(
websocket_api.error_message(msg["id"], "invalid_info", str(err))
)
else:
connection.send_message(
websocket_api.result_message(msg["id"], _entry_dict(entry))
)
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/remove",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_remove_entity(hass, connection, msg):
"""Handle remove entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
registry.async_remove(msg["entity_id"])
connection.send_message(websocket_api.result_message(msg["id"]))
@callback
def _entry_dict(entry):
"""Convert entry to API format."""
return {
" | config_entry_id": entry.config_entry_id,
"device_id": entry.device_id,
"disabled_by": entry.disabled_by,
"entity_id | ": entry.entity_id,
"name": entry.name,
"icon": entry.icon,
"platform": entry.platform,
"original_name": entry.original_name,
"original_icon": entry.original_icon,
}
|
"""ELASPIC RUN
"""
import os
import os.path as op
import logging
import argparse
from elaspic import conf, pipeline
logger = logging.getLogger(__name__)
def validate_args(args):
| if args.config_file and not os.path.isfile(args.config_file):
raise Exception('The configuration file {} does not exist!'.format(args.config_file))
if ((args.uniprot_id is None and args.structure_file is None) or
(args.uniprot_id is not None and args.structure_file is not None)):
raise Exception("""\
One of '-u' ('--uniprot_id') or '-p' ('--structure_file') must be specified!""")
if (args.uniprot_id and (
(ar | gs.config_file is None) and
(args.pdb_dir is None or args.blast_db_dir is None or
args.archive_dir is None))):
raise Exception("""\
When using the database pipeline, \
you must either provide a configuration file ('-c', '--config_file') or \
'--pdb_dir', '--blast_db_dir', and '--archive_dir'.""")
if args.sequence_file and not args.structure_file:
raise Exception("""\
A template PDB file must be specified using the '--structure_file' option, \
when you specify a target sequence using the '--sequence_file' option!""")
def elaspic(args):
validate_args(args)
# Read configurations
if args.config_file is not None:
conf.read_configuration_file(args.config_file)
elif args.uniprot_id:
conf.read_configuration_file(
DATABASE={
'connection_string': args.connection_string
},
EXTERNAL_DIRS={
'pdb_dir': args.pdb_dir,
'blast_db_dir': args.blast_db_dir,
'archive_dir': args.archive_dir,
})
elif args.structure_file:
unique_temp_dir = op.abspath(op.join(os.getcwd(), '.elaspic'))
os.makedirs(unique_temp_dir, exist_ok=True)
conf.read_configuration_file(
DEFAULT={
'unique_temp_dir': unique_temp_dir
},
EXTERNAL_DIRS={
'pdb_dir': args.pdb_dir,
'blast_db_dir': args.blast_db_dir,
'archive_dir': args.archive_dir
})
if args.uniprot_id:
# Run database pipeline
if args.uniprot_domain_pair_ids:
logger.debug('uniprot_domain_pair_ids: %s', args.uniprot_domain_pair_ids)
uniprot_domain_pair_ids_asint = (
[int(x) for x in args.uniprot_domain_pair_ids.split(',') if x]
)
else:
uniprot_domain_pair_ids_asint = []
# Run database pipeline
from elaspic import database_pipeline
pipeline = database_pipeline.DatabasePipeline(
args.uniprot_id, args.mutations,
run_type=args.run_type,
uniprot_domain_pair_ids=uniprot_domain_pair_ids_asint
)
pipeline.run()
elif args.structure_file:
# Run local pipeline
from elaspic import standalone_pipeline
pipeline = standalone_pipeline.StandalonePipeline(
args.structure_file, args.sequence_file, args.mutations,
mutation_format=args.mutation_format,
run_type=args.run_type,
)
pipeline.run()
def configure_run_parser(sub_parsers):
help = "Run ELASPIC"
description = help + ""
example = r"""
Examples
--------
$ elaspic run -p 4DKL.pdb -m A_M6A -n 1
$ elaspic run -u P00044 -m M1A -c config_file.ini
$ elaspic run -u P00044 -m M1A \
--connection_string=mysql://user:pass@localhost/elaspic \
--pdb_dir=/home/pdb/data/data/structures/divided/pdb \
--blast_db_dir=/home/ncbi/blast/db \
--archive_dir=/home/elaspic
"""
parser = sub_parsers.add_parser(
'run',
help=help,
description=description,
epilog=example,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-c', '--config_file', nargs='?', type=str,
help='ELASPIC configuration file.')
parser.add_argument(
'--connection_string', nargs='?', type=str,
help=('SQLAlchemy formatted string describing the connection to the database.'))
parser.add_argument(
'--pdb_dir', nargs='?', type=str,
help=("Folder containing PDB files in split format (e.g. 'ab/pdb1ab2.ent.gz')."))
parser.add_argument(
'--blast_db_dir', nargs='?', type=str,
help=("Folder containing NCBI `nr` and `pdbaa` databases."))
parser.add_argument(
'--archive_dir', nargs='?', type=str,
help=('Folder containing precalculated ELASPIC data.'))
parser.add_argument(
'-v', '--verbose', action='count',
help=('Specify verbosity level.'))
parser.add_argument(
'-u', '--uniprot_id',
help="The Uniprot ID of the protein that you want to mutate (e.g. 'P28223')."
"This option relies on a local elaspic database, which has to be specified "
"in the configuration file.")
parser.add_argument(
'-p', '--structure_file',
help="Full filename (including path) of the PDB file that you wish to mutate.")
parser.add_argument(
'-s', '--sequence_file',
help="Full filename (including path) of the FASTA file containing the sequence that you "
"wish to model. If you choose this option, you also have to specify "
"a template PDB file using the '--pdb-file' option.")
parser.add_argument(
'-m', '--mutations', nargs='?', default=[''],
help="Mutation(s) that you wish to evaluate.\n"
"If you used '--uniprot_id', mutations must be provided using uniprot coordinates "
"(e.g. 'D172E,R173H' or 'A_V10I').\n"
"If you used '--structure_file', mutations must be provided using the chain "
"and residue id (e.g. 'A_M1C,B_C20P' to mutate a residue with id '1' on chain A "
"to Cysteine, and residue with id '20' on chain B to Proline).\n"
"If you used '--sequence_file', mutations must be provided using the chain "
"and residue INDEX (e.g. '1_M1C,2_C20P' to mutate the first residue in sequence 1 "
"to Cysteine, and the 20th residue in sequence 2 to Proline).")
parser.add_argument(
'-n', '--mutation_format', nargs='?', default=None,
help="Mutation format:\n"
" 1. {pdb_chain}_{pdb_mutation},...\n"
" 2. {pdb_chain}_{sequence_mutation},...\n"
" 3. {sequence_pos}_{sequence_mutation}... (default)\n\n"
"If `sequence_file` is None, this does not matter "
"(always {pdb_chain}_{pdb_mutation})."
)
parser.add_argument(
'-i', '--uniprot_domain_pair_ids', nargs='?', default='',
help="List of uniprot_domain_pair_ids to analyse "
"(useful if you want to restrict your analysis to only a handful of domains).")
parser.add_argument(
'-t', '--run_type', nargs='?', type=str, default='all',
choices=sorted(pipeline.Pipeline._valid_run_types),
help='Type of analysis to perform.')
parser.set_defaults(func=elaspic)
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from concurrent import futures
import os
from oslo_config import cfg
from oslo_log import log as logging
from neutron.common import utils as common_utils
from neutron.conf.agent import common as config
from neutron.tests import base as te | sts_base
from neutron.tests.common import helpers
from neutron.tests.common import net_helpers
from neutron.tests.fullstack.resources import client as client_resource
from neutron.tests import tools
from neutron.tests.unit import testlib_api
# This is the directory from which infra fetches log files for fullstack tests
DEFAULT_LOG_DIR = os.path.join(helpers.get_test_log_ | path(),
'dsvm-fullstack-logs')
ROOTDIR = os.path.dirname(__file__)
LOG = logging.getLogger(__name__)
class BaseFullStackTestCase(testlib_api.MySQLTestCaseMixin,
testlib_api.SqlTestCase):
"""Base test class for full-stack tests."""
BUILD_WITH_MIGRATIONS = True
def setUp(self, environment):
super(BaseFullStackTestCase, self).setUp()
tests_base.setup_test_logging(
cfg.CONF, DEFAULT_LOG_DIR, '%s.txt' % self.get_name())
# NOTE(zzzeek): the opportunistic DB fixtures have built for
# us a per-test (or per-process) database. Set the URL of this
# database in CONF as the full stack tests need to actually run a
# neutron server against this database.
_orig_db_url = cfg.CONF.database.connection
cfg.CONF.set_override(
'connection', str(self.engine.url), group='database')
self.addCleanup(
cfg.CONF.set_override,
"connection", _orig_db_url, group="database"
)
# NOTE(ihrachys): seed should be reset before environment fixture below
# since the latter starts services that may rely on generated port
# numbers
tools.reset_random_seed()
# configure test runner to use rootwrap
self.setup_rootwrap()
config.setup_privsep()
self.environment = environment
self.environment.test_name = self.get_name()
self.useFixture(self.environment)
self.client = self.environment.neutron_server.client
self.safe_client = self.useFixture(
client_resource.ClientFixture(self.client))
def get_name(self):
class_name, test_name = self.id().split(".")[-2:]
return "%s.%s" % (class_name, test_name)
def _assert_ping_during_agents_restart(
self, agents, src_namespace, ips, restart_timeout=10,
ping_timeout=1, count=10):
with net_helpers.async_ping(
src_namespace, ips, timeout=ping_timeout,
count=count) as done:
LOG.debug("Restarting agents")
executor = futures.ThreadPoolExecutor(max_workers=len(agents))
restarts = [agent.restart(executor=executor)
for agent in agents]
futures.wait(restarts, timeout=restart_timeout)
self.assertTrue(all([r.done() for r in restarts]))
LOG.debug("Restarting agents - done")
# It is necessary to give agents time to initialize
# because some crucial steps (e.g. setting up bridge flows)
# happen only after RPC is established
agent_names = ', '.join({agent.process_fixture.process_name
for agent in agents})
common_utils.wait_until_true(
done,
timeout=count * (ping_timeout + 1),
exception=RuntimeError("Could not ping the other VM, "
"re-starting %s leads to network "
"disruption" % agent_names))
|
# -*- coding: utf-8 -*-
# Copyright 2017 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo.tests.common import TransactionCase
from odoo.exceptions import ValidationError
class TestResLang(TransactionCase):
def setUp(self):
super(TestResLang, self).setUp()
self.lang = self.env.ref('base.lang_en')
self.env.user.lang = self.lang.code
self.uom = self.env.ref('product.product_uom_dozen')
self.lang.default_uom_ids = [(6, 0, self.uom.ids)]
def test_check_default_uom_ids_fail(self):
"""It should not allow multiple UoMs of the same category."""
with self.assertRaises(ValidationError):
self.lang.default_uom_ids = [
(4, self.env.ref('product.product_uom_unit').id),
]
def test_check_default_uom_ids_pass(self):
"""It should allow multiple UoMs of different categories."""
self.lang.default_uom_ids = [
(4, self.env.ref('product.product_uom_kgm').id),
]
self.ass | ertEqual(len(self.lang.default_uom_ids), 2)
def test_default_uom_by_category_exist(self):
"""It should return the default UoM if existing."""
s | elf.assertEqual(
self.env['res.lang'].default_uom_by_category('Unit'),
self.uom,
)
def test_default_uom_by_category_no_exist(self):
"""It should return empty recordset when no default UoM."""
self.assertEqual(
self.env['res.lang'].default_uom_by_category('Volume'),
self.env['product.uom'].browse(),
)
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Acrisel LTD
# Copyright (C) 2008- Acrisel (acrisel.com) . All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import time
from acris import resource_pool as rp
from acris import threaded
import queue
from datetime import datetime
from acris import traced_method
traced=traced_method(print, True)
class MyResource1(rp.Resource): pass
class MyResource2(rp.Resource): pass
rp1=rp.ResourcePool('RP1', resource_cls=MyResource1, policy={'resource_limit': 2, }).load()
rp2=rp.ResourcePool('RP2', resource_cls=MyResource2, policy={'resource_limit': 1, }).load()
class Callback(object):
def __init__(self, notify_queue):
self.q=notify_queue
def __call__(self, ticket=None):
self.q.put(ticket)
@threaded
def worker_callback(name, rp):
print('[ %s ] %s getting resource' % (str(datetime.now()), name))
notify_queue=queue.Queue()
c | allback=Callback(notify_queue)
r=rp.get(callback=callback)
if not r:
print('[ | %s ] %s doing work before resource available' % (str(datetime.now()), name,))
print('[ %s ] %s waiting for resources' % (str(datetime.now()), name,))
ticket=notify_queue.get()
r=rp.get(ticket=ticket)
print('[ %s ] %s doing work (%s)' % (str(datetime.now()), name, repr(r)))
time.sleep(2)
print('[ %s ] %s returning (%s)' % (str(datetime.now()), name, repr(r)))
rp.put(*r)
r1=worker_callback('>>> w11-callback', rp1)
r2=worker_callback('>>> w21-callback', rp2)
r3=worker_callback('>>> w22-callback', rp2)
r4=worker_callback('>>> w12-callback', rp1)
|
import socket, sys, time, argparse
parser = argparse.ArgumentParser(description="This bad server accepts an HTTP connection and replies with a valid HTML document which links to assets. However, attemps to load the assets should result in a net::ERR_EMPTY_RESPONSE.")
parser.add_argument("-p", "--port", type=int, help="The port to listen for new connections on.", default=8080)
parser.add_argument("-t", "--tries", type=int, help="The number of attempts before asset requests will be responded to successfully", default=5)
args = parser.parse_args()
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('localhost', args.port))
serversocket.listen(5)
print "The bad web server is listening on port %s. Requests for the HTML index will always be replied to. Assets requests will be responded to after %s unsuccessful attempts.\n" % (args.port, args.tries)
response_text = """HTTP/1.0 200 OK
Server: BadWebServer v0.1
Content-Type: text/html
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<title>Bad Web Server</title>
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.8.0/jquery.min.js"></script>
<script src="/script.js" id="script01"></script>
<script type="text/javascript">
function refresh_location_hints() {
$('#for_script01').val($('#script01').attr('src'));
$('#for_css01').val($('#css01').attr('href'));
$('#for_img01').val($('#img01').attr('src'));
$('#img01').attr('alt', $('#img01').attr('src'));
setTimeout(function() {
refresh_location_hints();
}, 1000);
}
$(document).ready(function() {
setTimeout(function() {
refresh_location_hints();
}, 1000);
});
</script>
<style>
input { width: 600px; }
</style>
</head>
<body>
<header>
<h1>About Bad Web Server</h1>
<p>The bad web server will correctly transfer a valid HTML5 document to the browser when the browser requests the resource identified as '/'. The page will also request images, stylesheets and javascript resources from the server - but these should all result in the browser encountering a socket error and triggering a net::ERR_EMPTY_RESPONSE. The javascript will correctly load after the 5th attempt and display an alert to the user when it loads correctly, as will the CSS resource. We also import JQuery to dynamicly hint at the current location of each failed resource for testing.</p>
</header>
<article>
<input type="text" id="for_script01"> External Script (#script01) URL<br>
</article>
</body>
</html>"""
js_response_text = """HTTP/1.0 200 OK
Server: BadWebServer v0.1
Content-Type: text/javascript
alert("Javascript resource ('#script_01') loaded successfully after %s attempts");""" % args.tries
css_response_text = """HTTP/1.0 200 OK
Server: BadWebServer v0.1
Content-Type: text/stylesheet
* { margin: 5px; padding: 5px; }
body { background-color: #00ff00; color: #555555; }"""
css_requests = js_requests = 0
while True:
#accept connections from outside
(clientsocket, address) = serversocket.accept()
chunks = []
bytes_recd = 0
chunk = ""
while "\r\n\r\n" not in chunk:
chunk = clientsocket.recv(min(2048 - bytes_recd, 2048))
if chunk == '':
raise RuntimeError("socket connection broken (but not by me)")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
header = ''.join(chunks)
print "Received: " + header
request_line = header.split("\r\n")[0]
resource_marker = request_line.split()[1]
if resource_marker is "/" or resource_marker is "/index.html" or resource | _marker is "/index.htm":
print "^ INDEX - WILL REPLY ^"
clientsocket.send(response_text);
clientsocket.shutdown(0)
elif ".css" in resource_marker:
css_requests += 1
if cs | s_requests > args.tries:
css_requests = 0
print "^ FINAL CSS REQUEST - WILL REPLY ^"
clientsocket.send(css_response_text)
clientsocket.shutdown(0)
else:
print "^ CSS REQUEST #%s - WILL NOT REPLY ^" % css_requests
elif ".js" in resource_marker:
js_requests += 1
if js_requests > args.tries:
js_requests = 0
print "^ FINAL JS REQUEST - WILL REPLY ^"
clientsocket.send(js_response_text)
clientsocket.shutdown(0)
else:
print "^ JS REQUEST #%s - WILL NOT REPLY ^" % js_requests
else:
print "^ WILL NOT REPLY ^"
print "\n"
clientsocket.close() |
# -*- coding: utf-8 -*-
# Scrapy settings for aCloudGuru project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'aCloudGuru'
SPIDER_MODULES = ['aCloudGuru.spiders']
NEWSPIDER_MODULE = 'aCloudGuru.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'aCloudGuru (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCUR | RENT_REQUESTS_PER_IP = 16
# Disable | cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'aCloudGuru.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'aCloudGuru.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'aCloudGuru.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="hovertemplate", parent_name | ="choroplethmapbox", **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_ | type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
oncat(unwrapped_arrays, axis=1))
@np_utils.np_doc('vstack')
def vstack(tup):
arrays = [atleast_2d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a.data if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=0)
@np_utils.np_doc('dstack')
def dstack(tup):
arrays = [atleast_3d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a.data if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=2)
def _pad_left_to(n, old_shape):
old_shape = asarray(old_shape, dtype=np.int32).data
new_shape = array_ops.pad(
old_shape, [[math_ops.maximum(n - array_ops.size(old_shape), 0), 0]],
constant_values=1)
return asarray(new_shape)
def _atleast_nd(n, new_shape, *arys):
"""Reshape arrays to be at least `n`-dimensional.
Args:
n: The minimal rank.
new_shape: a function that takes `n` and the old shape and returns the
desired new shape.
*arys: ndarray(s) to be reshaped.
Returns:
The reshaped array(s).
"""
def f(x):
# pylint: disable=g-long-lambda
x = asarray(x)
return asarray(
np_utils.cond(
np_utils.greater(n, array_ops.rank(x)),
lambda: reshape(x, new_shape(n, array_ops.shape(x.data))).data,
lambda: x.data))
arys = list(map(f, arys))
if len(arys) == 1:
return arys[0]
else:
return arys
@np_utils.np_doc('atleast_1d')
def atleast_1d(*arys):
return _atleast_nd(1, _pad_left_to, *arys)
@np_utils.np_doc('atleast_2d')
def atleast_2d(*arys):
return _atleast_nd(2, _pad_left_to, *arys)
@np_utils.np_doc('atleast_3d')
def atleast_3d(*arys): # pylint: disable=missing-docstring
def new_shape(_, old_shape):
# pylint: disable=g-long-lambda
ndim_ = array_ops.size(old_shape)
return np_utils.cond(
math_ops.equal(ndim_, 0),
lambda: constant_op.constant([1, 1, 1], dtype=dtypes.int32),
lambda: np_utils.cond(
math_ops.equal(ndim_, 1), lambda: array_ops.pad(
old_shape, [[1, 1]], constant_values=1), lambda: array_ops.pad(
old_shape, [[0, 1]], constant_values=1)))
return _atleast_nd(3, new_shape, *arys)
@np_utils.np_doc('nonzero')
def nonzero(a):
a = atleast_1d(a).data
if a.shape.rank is None:
raise ValueError("The rank of `a` is unknown, so we can't decide how many "
| 'arrays to return.')
return nest.map_structure(
np_arrays.tensor_to_ndarray,
array_ops.unstack(
array_ops.where_v2(math_ops.cast(a, dtypes.bool)),
a.shape.rank,
axis=1))
@np_utils.np_doc('diag_indices')
def diag_indices(n, ndim=2): # pylint: disable=missing-docstring,redefined-outer-name
if n < 0:
raise ValueError(
'n | argument to diag_indices must be nonnegative, got {}'.format(n))
if ndim < 0:
raise ValueError(
'ndim argument to diag_indices must be nonnegative, got {}'.format(
ndim))
return (math_ops.range(n),) * ndim
@np_utils.np_doc('tri')
def tri(N, M=None, k=0, dtype=None): # pylint: disable=invalid-name,missing-docstring
M = M if M is not None else N
if dtype is not None:
dtype = np_utils.result_type(dtype)
else:
dtype = np_dtypes.default_float_type()
if k < 0:
lower = -k - 1
if lower > N:
r = array_ops.zeros([N, M], dtype)
else:
# Keep as tf bool, since we create an upper triangular matrix and invert
# it.
o = array_ops.ones([N, M], dtype=dtypes.bool)
r = math_ops.cast(
math_ops.logical_not(array_ops.matrix_band_part(o, lower, -1)), dtype)
else:
o = array_ops.ones([N, M], dtype)
if k > M:
r = o
else:
r = array_ops.matrix_band_part(o, -1, k)
return np_utils.tensor_to_ndarray(r)
@np_utils.np_doc('tril')
def tril(m, k=0): # pylint: disable=missing-docstring
m = asarray(m).data
if m.shape.ndims is None:
raise ValueError('Argument to tril should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to tril must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return np_utils.tensor_to_ndarray(
array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), m, z))
@np_utils.np_doc('triu')
def triu(m, k=0): # pylint: disable=missing-docstring
m = asarray(m).data
if m.shape.ndims is None:
raise ValueError('Argument to triu should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to triu must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return np_utils.tensor_to_ndarray(
array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), z, m))
@np_utils.np_doc('flip')
def flip(m, axis=None): # pylint: disable=missing-docstring
m = asarray(m).data
if axis is None:
return np_utils.tensor_to_ndarray(
array_ops.reverse(m, math_ops.range(array_ops.rank(m))))
axis = np_utils._canonicalize_axis(axis, array_ops.rank(m)) # pylint: disable=protected-access
return np_utils.tensor_to_ndarray(array_ops.reverse(m, [axis]))
@np_utils.np_doc('flipud')
def flipud(m): # pylint: disable=missing-docstring
return flip(m, 0)
@np_utils.np_doc('fliplr')
def fliplr(m): # pylint: disable=missing-docstring
return flip(m, 1)
@np_utils.np_doc('roll')
def roll(a, shift, axis=None): # pylint: disable=missing-docstring
a = asarray(a).data
if axis is not None:
return np_utils.tensor_to_ndarray(manip_ops.roll(a, shift, axis))
# If axis is None, the roll happens as a 1-d tensor.
original_shape = array_ops.shape(a)
a = manip_ops.roll(array_ops.reshape(a, [-1]), shift, 0)
return np_utils.tensor_to_ndarray(array_ops.reshape(a, original_shape))
@np_utils.np_doc('rot90')
def rot90(m, k=1, axes=(0, 1)): # pylint: disable=missing-docstring
m_rank = array_ops.rank(m)
ax1, ax2 = np_utils._canonicalize_axes(axes, m_rank) # pylint: disable=protected-access
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = math_ops.range(m_rank)
perm = array_ops.tensor_scatter_update(perm, [[ax1], [ax2]], [ax2, ax1])
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@np_utils.np_doc('vander')
def vander(x, N=None, increasing=False): # pylint: disable=missing-docstring,invalid-name
x = asarray(x).data
x_shape = array_ops.shape(x)
N = N or x_shape[0]
N_temp = np_utils.get_static_value(N) # pylint: disable=invalid-name
if N_temp is not None:
N = N_temp
if N < 0:
raise ValueError('N must be nonnegative')
else:
control_flow_ops.Assert(N >= 0, [N])
rank = array_ops.rank(x)
rank_temp = np_utils.get_static_value(rank)
if rank_temp is not None:
rank = rank_temp
if rank != 1:
raise ValueError('x must be a one-dimensional array')
else:
control_flow_ops.Assert(math_ops.equal(rank, 1), [rank])
if increasing:
start = 0
limit = N
delta = 1
else:
start = N - 1
limit = -1
delta = -1
x = array_ops.expand_dims(x, -1)
return np_utils.tensor_to_ndarray(
math_ops.pow(
x, math_ops.cast(math_ops.range(start, limit, delta), dtype=x.dtype)))
@np_utils.np_doc('ix_')
def ix_(*args): # pylint: disable=missing-docstring
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a).data
a_rank = array_ops.rank(a)
a_rank_temp = np_utils.get_static_value(a_rank)
|
# Quick script to calculate GPA given a class list file.
# Class list file should be a csv with COURSE_ID,NUM_UNITS,GRADE
# GRADE should be LETTER with potential modifiers after that
# registrar.mit.edu/classes-grades-evaluations/grades/calculating-gpa
import argparse
import pandas as pd
def get_parser():
# Get the argument parser for this script
parser = argparse.ArgumentParser( | )
parser.add_argument('-F', '--filename', help='Filename for grades')
return parser
class GPACalculator:
def __init__(self, fname):
# Load file via pandas
self.__data = pd.read_csv(
fname,
header=None,
names=['course', 'units', 'grade']
)
def calc_gpa(self):
# Map grades to grade points
grade_points = s | elf.__data.grade.apply(self.__grade_point_mapper)
# Multiply pointwise by units
grade_points_weighted = grade_points * self.__data.units
# Sum weighted units
weighted_units_sum = grade_points_weighted.sum()
# Divide by total units
gpa_raw = weighted_units_sum / self.__data.units.sum()
# Round to nearest tenth
return round(gpa_raw, 1)
def __grade_point_mapper(self, grade):
# Maps a string letter grade to a numerical value
# MIT 5.0 scale
grade_map = {
'A': 5,
'B': 4,
'C': 3,
'D': 2,
'F': 0,
}
first_char = grade[0].upper()
try:
return grade_map[first_char]
except:
raise ValueError('Invalid grade {grade}'.format(grade=grade))
if __name__ == '__main__':
# Set up argument parsing
parser = get_parser()
args = parser.parse_args()
# Make sure filename is present
if not args.filename:
raise ValueError('Must provide filename via -F, --filename')
# Create calculator
calc = GPACalculator(args.filename)
# Execute and print
gpa = calc.calc_gpa()
print(gpa)
|
mmand):
if isinstance(command, dict):
command = self._module.jsonify(command)
return exec_command(self._module, command)
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
rc, out, err = self.exec_command(cmd)
if rc != 0:
self._module.fail_json(msg=to_text(err))
try:
cfg = to_text(out, errors='surrogate_or_strict').strip()
except UnicodeError as e:
self._module.fail_json(msg=u'Failed to decode config: %s' % to_text(out))
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
responses = list()
for item in to_list(commands):
if item['output'] == 'json' and not is_json(item['command']):
cmd = '%s | json' % item['command']
elif item['output'] == 'text' and is_json(item['command']):
cmd = item['command'].rsplit('|', 1)[0]
else:
| cmd = item['command']
rc, out, err = self.exec_command(cmd)
try:
out = to_text(out, errors='surrogate_or_strict')
| except UnicodeError:
self._module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
if check_rc and rc != 0:
self._module.fail_json(msg=to_text(err))
if not check_rc and rc != 0:
try:
out = self._module.from_json(err)
except ValueError:
out = to_text(err).strip()
else:
try:
out = self._module.from_json(out)
except ValueError:
out = to_text(out).strip()
if item['output'] == 'json' and out != '' and isinstance(out, string_types):
self._module.fail_json(msg='failed to retrieve output of %s in json format' % item['command'])
responses.append(out)
return responses
def load_config(self, config, return_error=False, opts=None):
"""Sends configuration commands to the remote device
"""
if opts is None:
opts = {}
rc, out, err = self.exec_command('configure')
if rc != 0:
self._module.fail_json(msg='unable to enter configuration mode', output=to_text(err))
msgs = []
for cmd in config:
rc, out, err = self.exec_command(cmd)
if opts.get('ignore_timeout') and rc == 1:
msgs.append(err)
return msgs
elif rc != 0:
self._module.fail_json(msg=to_text(err))
elif out:
msgs.append(out)
self.exec_command('end')
return msgs
class Nxapi:
OUTPUT_TO_COMMAND_TYPE = {
'text': 'cli_show_ascii',
'json': 'cli_show',
'bash': 'bash',
'config': 'cli_conf'
}
def __init__(self, module):
self._module = module
self._nxapi_auth = None
self._device_configs = {}
self._module.params['url_username'] = self._module.params['username']
self._module.params['url_password'] = self._module.params['password']
host = self._module.params['host']
port = self._module.params['port']
if self._module.params['use_ssl']:
proto = 'https'
port = port or 443
else:
proto = 'http'
port = port or 80
self._url = '%s://%s:%s/ins' % (proto, host, port)
def _error(self, msg, **kwargs):
self._nxapi_auth = None
if 'url' not in kwargs:
kwargs['url'] = self._url
self._module.fail_json(msg=msg, **kwargs)
def _request_builder(self, commands, output, version='1.0', chunk='0', sid=None):
"""Encodes a NXAPI JSON request message
"""
try:
command_type = self.OUTPUT_TO_COMMAND_TYPE[output]
except KeyError:
msg = 'invalid format, received %s, expected one of %s' % \
(output, ','.join(self.OUTPUT_TO_COMMAND_TYPE.keys()))
self._error(msg=msg)
if isinstance(commands, (list, set, tuple)):
commands = ' ;'.join(commands)
msg = {
'version': version,
'type': command_type,
'chunk': chunk,
'sid': sid,
'input': commands,
'output_format': 'json'
}
return dict(ins_api=msg)
def send_request(self, commands, output='text', check_status=True,
return_error=False, opts=None):
# only 10 show commands can be encoded in each request
# messages sent to the remote device
if opts is None:
opts = {}
if output != 'config':
commands = collections.deque(to_list(commands))
stack = list()
requests = list()
while commands:
stack.append(commands.popleft())
if len(stack) == 10:
body = self._request_builder(stack, output)
data = self._module.jsonify(body)
requests.append(data)
stack = list()
if stack:
body = self._request_builder(stack, output)
data = self._module.jsonify(body)
requests.append(data)
else:
body = self._request_builder(commands, 'config')
requests = [self._module.jsonify(body)]
headers = {'Content-Type': 'application/json'}
result = list()
timeout = self._module.params['timeout']
for req in requests:
if self._nxapi_auth:
headers['Cookie'] = self._nxapi_auth
response, headers = fetch_url(
self._module, self._url, data=req, headers=headers,
timeout=timeout, method='POST'
)
self._nxapi_auth = headers.get('set-cookie')
if opts.get('ignore_timeout') and headers['status'] == -1:
result.append(headers['msg'])
return result
elif headers['status'] != 200:
self._error(**headers)
try:
response = self._module.from_json(response.read())
except ValueError:
self._module.fail_json(msg='unable to parse response')
if response['ins_api'].get('outputs'):
output = response['ins_api']['outputs']['output']
for item in to_list(output):
if check_status and item['code'] != '200':
if return_error:
result.append(item)
else:
self._error(output=output, **item)
elif 'body' in item:
result.append(item['body'])
# else:
# error in command but since check_status is disabled
# silently drop it.
# result.append(item['msg'])
return result
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
out = self.send_request(cmd)
cfg = str(out[0]).strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return re |
#!/usr/bin/env python
"""
Unscientific benchmarking of this versus the --release rust
implementation below using the %timeit Ipython magic (times in sec)
n_kmers, py_runtime, rust_runtime
6594204, 14.4, 0.578
Both give identical counts on the files tested (and printing kmers out
and diff'ing the results gi | ves no difference)
"""
from __future__ import print_function
import sys
from Bio.SeqIO import parse
from Bio.Seq import reverse_complement
def slid_win(seq, size=4, overlapping=True):
"""Returns a sliding window along self.seq."""
itr = iter(seq)
if overlapping:
buf = ''
for _ in range(size):
buf += next(itr)
for l in itr:
yield buf
buf = buf[1:] + l
yi | eld buf
else:
buf = ''
for l in itr:
buf += l
if len(buf) == size:
yield buf
buf = ''
filename = sys.argv[1]
n_total = 0
n_canonical = 0
for s in parse(filename, 'fastq'):
uppercase_seq = str(s.upper().seq)
for kmer in slid_win(uppercase_seq, 4):
canonical = min(kmer, reverse_complement(kmer))
if canonical == 'CAGC':
n_canonical += 1
n_total += 1
# print(canonical)
print(n_total, n_canonical)
|
from alphametics import solve
import unittest
class K | nownValues(unittest.TestCase):
def test_out(self):
'''TO + GO == | OUT'''
self.assertEqual(solve('TO + GO == OUT'), '21 + 81 == 102')
def test_too(self):
'''I + DID == TOO'''
self.assertEqual(solve('I + DID == TOO'), '9 + 191 == 200')
def test_mom(self):
'''AS + A == MOM'''
self.assertEqual(solve('AS + A == MOM'), '92 + 9 == 101')
def test_best(self):
'''HES + THE == BEST'''
self.assertEqual(solve('HES + THE == BEST'), '426 + 842 == 1268')
def test_late(self):
'''NO + NO + TOO == LATE'''
self.assertEqual(solve('NO + NO + TOO == LATE'), '74 + 74 + 944 == 1092')
def test_onze(self):
'''UN + UN + NEUF == ONZE'''
self.assertEqual(solve('UN + UN + NEUF == ONZE'), '81 + 81 + 1987 == 2149')
def test_deux(self):
'''UN + DEUX + DEUX + DEUX + DEUX == NEUF'''
self.assertEqual(solve('UN + DEUX + DEUX + DEUX + DEUX == NEUF'), '25 + 1326 + 1326 + 1326 + 1326 == 5329')
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2009, Mark Pilgrim, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
from django.conf import settings
from model_bakery import baker
from rest_framework.test import APIRequestFactory, force_authenticate
class ApiTestMixin:
"""
Mixin class for API tests.
Can be used for ViewSets too. Just override :meth:`.get_as_view_kwargs` - se example in the docs
for that method.
"""
apiview_class = None
def get_default_requestuser(self):
return None
def make_user(self, shortname='user@example.com', **kwargs):
return baker.make(settings.AUTH_USER_MODEL, shortname=shortname, **kwargs)
def make_superuser(self, shortname='super@example.com', **kwargs):
return self.make_user(shortname=shortname, is_superuser=True, **kwargs)
def get_as_view_kwargs(self):
"""
The kwargs for the ``as_view()``-method of the API view class.
If you are writing tests for a ViewSet, you have to override this
to define what action you are testing (list, retrieve, update, ...), like this::
def get_as_view_kwargs(self):
return {
'get': 'list'
}
"""
return {}
def add_authenticated_user_to_request(self, request, requestuser):
if requestuser:
force_authenticate(request, requestuser)
def make_request(self, method, viewkwargs=None, api_url='/test/', data=None, requestuser=None):
factory = APIRequestFactory()
request = getattr(factory, method)(api_url, format='json', data=data)
viewkwargs = viewkwargs or {}
if requestuser:
request.user = requestuser or self.get_default_requestuser()
response = self.apiview_class.as_view(**self.get_as_view_kwargs())(request, **viewkwargs)
response.render()
return response
def make_get_request(self, viewkwargs=None, api_url='/test/', data=None, requestuser=None):
return self.make_request(method='get', viewkwargs=viewkwargs,
api_url=api_url, data=data,
| requestuser=requestuser)
def make_post_request(self, viewkwargs=None, api_url='/test/', data=None, requestuser=None):
return self.make_request(method='post', viewkwargs=viewkwargs,
api_url=api_url, data=data,
requestuser=requestuser)
def make_put_request(self, viewkwargs=None, api_url='/test/', data=None, requestuser=None):
return s | elf.make_request(method='put', viewkwargs=viewkwargs,
api_url=api_url, data=data,
requestuser=requestuser)
def make_delete_request(self, viewkwargs=None, api_url='/test/', data=None, requestuser=None):
return self.make_request(method='delete', viewkwargs=viewkwargs,
api_url=api_url, data=data,
requestuser=requestuser)
|
# -*- coding: utf-8 -*-
"""Setup/installation tests for this package."""
from apm.buildout.testing import IntegrationTestCase
from plone import api
class TestInstall(IntegrationTestCase):
"""Test installation of apm.buildout into Plone."""
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
self.installer = api.portal.get_tool('portal_quickinstaller')
def test_product_installed(self):
"""Test if apm.buildout is installed with portal_quickinstaller."""
self.assertTrue(self.installer.isProductInstalled('apm.buildout'))
def test_uninstall(self):
"""Test if apm.buildout is cleanly uninstalled."""
self.installer.uninstallProducts(['apm.buildout'])
| self.assertFalse(self.installer.isProductInstalled('apm.buildout'))
# browse | rlayer.xml
def test_browserlayer(self):
"""Test that IApmBuildoutLayer is registered."""
from apm.buildout.interfaces import IApmBuildoutLayer
from plone.browserlayer import utils
self.failUnless(IApmBuildoutLayer in utils.registered_layers())
|
from django.forms import ModelForm
from .models import Depoimento
cl | ass DepoimentoForm(ModelForm) | :
class Meta:
model = Depoimento
|
pecify_new":
layout.addWidget( new_data_groupbox )
else:
new_data_groupbox.hide()
layout.addWidget( full_url_label )
layout.addWidget( buttonbox )
# Stretch factors
layout.setStretchFactor(data_groupbox, 3)
layout.setStretchFactor(node_groupbox, 1)
self.setLayout(layout)
self.setWindowTitle( "Select DVID Volume" )
# Initially disabled
data_groupbox.setEnabled(False)
node_groupbox.setEnabled(False)
new_data_groupbox.setEnabled(False)
# Save instance members
self._data_groupbox = data_groupbox
self._node_groupbox = node_groupbox
self._new_data_groupbox = new_data_groupbox
self._data_treewidget = data_treewidget
self._node_listwidget = node_listwidget
self._new_data_edit = new_data_edit
self._full_url_label = full_url_label
self._buttonbox = buttonbox
def sizeHint(self):
return QSize(700, 500)
def eventFilter(self, watched, event):
if watched == self._hostname_combobox \
and event.type() == QEvent.KeyPress \
and ( event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter):
self._connect_button.click()
return True
return False
def showEvent(self, event):
"""
Raise the window when it is shown.
For some reason, that doesn't happen automatically if this widget is also the main window.
"""
super(ContentsBrowser, self).showEvent(event)
self.raise_()
def _handle_new_hostname(self):
new_hostname = str( self._hostname_combobox.currentText() )
if '://' in new_hostname:
new_hostname = new_hostname.split('://')[1]
error_msg = None
self._repos_info = None
self._current_dset = None
self._hostname = None
try:
# Query the server
connection = httplib.HTTPConnection( new_hostname )
self._repos_info = pydvid.general.get_repos_info( connection )
self._hostname = new_hostname
self._connection = connection
except socket.error as ex:
error_msg = "Socket Error: {} (Error {})".format( ex.args[1], ex.args[0] )
except httplib.HTTPException as ex:
error_msg = "HTTP Error: {}".format( ex.args[0] )
if error_msg:
QMessageBox.critical(self, "Connection Error", error_msg)
self._populate_datasets_tree()
self._populate_node_list(-1)
else:
self._connect_button.setEnabled(False)
self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(True)
enable_contents = self._repos_info is not None
self._data_groupbox.setEnabled(enable_contents)
self._node_groupbox.setEnabled(enable_contents)
self._new_data_groupbox.setEnabled(enable_contents)
self._populate_datasets_tree()
def _populate_datasets_tree(self):
"""
Initialize the tree widget of datasets and volumes.
"""
self._data_treewidget.clear()
if self._repos_info is None:
return
for dset_uuid, dset_info in sorted(self._repos_info.items()):
dset_item = QTreeWidgetItem( self._data_treewidget, QStringList( dset_uuid ) )
dset_item.setData( 0, Qt.UserRole, (dset_uuid, "") )
for data_name in dset_info["DataInstances"].keys():
data_item = QTreeWidgetItem( dset_item, QStringList( data_name ) )
data_item.setData( 0, Qt.UserRole, (dset_uuid, data_name) )
if self._mode == 'specify_new':
# If we're in specify_new mode, only the dataset parent items are selectable.
flags = data_item.flags()
flags &= ~Qt.ItemIsSelectable
flags &= ~Qt.ItemIsEnabled
data_item.setFlags( flags )
# Expand everything
self._data_treewidget.expandAll()
# Select the first item by default.
if self._mode == "select_existing":
first_item = self._data_treewidget.topLevelItem(0).child(0)
else:
first_item = self._data_treewidget.topLevelItem(0)
self._data_treewidget.setCurrentItem( first_item, 0 )
def _handle_data_selection(self): |
"""
When the user clicks a new data item, respond by updating the node list.
"""
selected_items = self._data_treewidget.selectedItems()
if not selected_items:
return None
item = selected_items[0]
item_data = item.data(0, Qt.UserRole).toPyObject()
if not item_data:
return
dset_uuid, data_name = item_ | data
if self._current_dset != dset_uuid:
self._populate_node_list(dset_uuid)
self._update_display()
def _populate_node_list(self, dataset_uuid):
"""
Replace the contents of the node list widget
to show all the nodes for the currently selected dataset.
"""
self._node_listwidget.clear()
if self._repos_info is None:
return
# For now, we simply show the nodes in sorted order, without respect to dag order
all_uuids = sorted( self._repos_info[dataset_uuid]["DAG"]["Nodes"].keys() )
for node_uuid in all_uuids:
node_item = QListWidgetItem( node_uuid, parent=self._node_listwidget )
node_item.setData( Qt.UserRole, node_uuid )
self._current_dset = dataset_uuid
# Select the last one by default.
last_row = self._node_listwidget.count() - 1
last_item = self._node_listwidget.item( last_row )
self._node_listwidget.setCurrentItem( last_item )
self._update_display()
def _get_selected_node(self):
selected_items = self._node_listwidget.selectedItems()
if not selected_items:
return None
selected_node_item = selected_items[0]
node_item_data = selected_node_item.data(Qt.UserRole)
return str( node_item_data.toString() )
def _get_selected_data(self):
selected_items = self._data_treewidget.selectedItems()
if not selected_items:
return None, None
selected_data_item = selected_items[0]
data_item_data = selected_data_item.data(0, Qt.UserRole).toPyObject()
if selected_data_item:
dset_uuid, data_name = data_item_data
else:
dset_uuid = data_name = None
return dset_uuid, data_name
def _update_display(self):
"""
Update the path label to reflect the user's currently selected uuid and new volume name.
"""
hostname, dset_uuid, dataname, node_uuid = self.get_selection()
full_path = "http://{hostname}/api/node/{uuid}/{dataname}"\
"".format( hostname=self._hostname, uuid=node_uuid, dataname=dataname )
self._full_url_label.setText( full_path )
ok_button = self._buttonbox.button( QDialogButtonBox.Ok )
ok_button.setEnabled( dataname != "" )
if __name__ == "__main__":
"""
This main section permits simple command-line control.
usage: contents_browser.py [-h] [--mock-server-hdf5=MOCK_SERVER_HDF5] hostname:port
If --mock-server-hdf5 is provided, the mock server will be launched with the provided hdf5 file.
Otherwise, the DVID server should already be running on the provided hostname.
"""
import sys
import argparse
# Make the program quit on Ctrl+C
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from PyQt4.QtGui import QApplication
parser = argparse.ArgumentParser()
parser.add_argument("--mock-server-hdf5", required=False)
parser.add_argument("--mode", choices=["select_existing", "specify_new"], default="select_existing")
parser.add_argument("hostname", metavar="hostname:port", default="localhost:8000", nargs="?")
DEBUG = False
if DEBUG and len(sys.a |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing pe | rmissions and
# limitations under the License.
# [START main_method]
def main():
return 'main method'
# [END main_method]
# [START not_main]
def not_main():
return 'not main'
# [END not_main]
# [START also_not_main]
def also_not_main():
return 'also_not main'
# [END also_not_main | ]
# [START untested_method]
def untested_method():
return 'untested!'
# [END untested_method]
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
Intersection.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgi | s.PyQt.QtGui import QIcon
from qgis.core import Qgis, QgsFeatureRequest, QgsFeature, QgsGeometry, QgsWkbTypes, QgsWkbTypes
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputV | ector
from processing.tools import dataobjects, vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
wkbTypeGroups = {
'Point': (QgsWkbTypes.Point, QgsWkbTypes.MultiPoint, QgsWkbTypes.Point25D, QgsWkbTypes.MultiPoint25D,),
'LineString': (QgsWkbTypes.LineString, QgsWkbTypes.MultiLineString, QgsWkbTypes.LineString25D, QgsWkbTypes.MultiLineString25D,),
'Polygon': (QgsWkbTypes.Polygon, QgsWkbTypes.MultiPolygon, QgsWkbTypes.Polygon25D, QgsWkbTypes.MultiPolygon25D,),
}
for key, value in wkbTypeGroups.items():
for const in value:
wkbTypeGroups[const] = key
class Intersection(GeoAlgorithm):
INPUT = 'INPUT'
INPUT2 = 'INPUT2'
OUTPUT = 'OUTPUT'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'intersect.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Intersection')
self.group, self.i18n_group = self.trAlgorithm('Vector overlay tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterVector(self.INPUT2,
self.tr('Intersect layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Intersection')))
def processAlgorithm(self, progress):
vlayerA = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
vlayerB = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT2))
geomType = vlayerA.wkbType()
fields = vector.combineVectorFields(vlayerA, vlayerB)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
geomType, vlayerA.crs())
outFeat = QgsFeature()
index = vector.spatialindex(vlayerB)
selectionA = vector.features(vlayerA)
total = 100.0 / len(selectionA)
for current, inFeatA in enumerate(selectionA):
progress.setPercentage(int(current * total))
geom = inFeatA.geometry()
atMapA = inFeatA.attributes()
intersects = index.intersects(geom.boundingBox())
for i in intersects:
request = QgsFeatureRequest().setFilterFid(i)
inFeatB = vlayerB.getFeatures(request).next()
tmpGeom = inFeatB.geometry()
if geom.intersects(tmpGeom):
atMapB = inFeatB.attributes()
int_geom = QgsGeometry(geom.intersection(tmpGeom))
if int_geom.wkbType() == QgsWkbTypes.Unknown or QgsWkbTypes.flatType(int_geom.geometry().wkbType()) == QgsWkbTypes.GeometryCollection:
int_com = geom.combine(tmpGeom)
int_sym = geom.symDifference(tmpGeom)
int_geom = QgsGeometry(int_com.difference(int_sym))
if int_geom.isGeosEmpty() or not int_geom.isGeosValid():
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
self.tr('GEOS geoprocessing error: One or '
'more input features have invalid '
'geometry.'))
try:
if int_geom.wkbType() in wkbTypeGroups[wkbTypeGroups[int_geom.wkbType()]]:
outFeat.setGeometry(int_geom)
attrs = []
attrs.extend(atMapA)
attrs.extend(atMapB)
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
continue
del writer
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to ed | it it.
"""
def has_object_permissions(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD, or OPTIONS requests.
(request.method in permissions.S | AFE_METHODS) or (obj.owner == request.user)
|
import os
from IPython.lib import passwd
c.NotebookApp.ip = '*'
c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = False
c.MultiKernelManager.default_kernel_name = 'python3'
c.NotebookApp.pa | ssword = u'sha1:035a13e895a5:8a3398f1576a32cf938f9236db03f5e8 | 668356c5'
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS | IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_db import exception as db_exc
import testtools
from neutron.db import api as db_api
| from neutron.tests import base
class TestExceptionToRetryContextManager(base.BaseTestCase):
def test_translates_single_exception(self):
with testtools.ExpectedException(db_exc.RetryRequest):
with db_api.exc_to_retry(ValueError):
raise ValueError()
def test_translates_multiple_exception_types(self):
with testtools.ExpectedException(db_exc.RetryRequest):
with db_api.exc_to_retry((ValueError, TypeError)):
raise TypeError()
def test_passes_other_exceptions(self):
with testtools.ExpectedException(ValueError):
with db_api.exc_to_retry(TypeError):
raise ValueError()
def test_inner_exception_preserved_in_retryrequest(self):
try:
exc = ValueError('test')
with db_api.exc_to_retry(ValueError):
raise exc
except db_exc.RetryRequest as e:
self.assertEqual(exc, e.inner_exc)
|
import sys
import os
import time
import numpy
import cv2
import cv2.cv as cv
from PIL import Image
sys.path.insert(0, os.path.join(
os.path.dir | name(os.path.dirnam | e(os.path.dirname(__file__)))))
from picture.util import define
from picture.util.system import POINT
from picture.util.log import LOG as L
THRESHOLD = 0.96
class PatternMatch(object):
def __init__(self):
pass
@classmethod
def __patternmatch(self, reference, target):
L.info("reference : %s" % reference)
img_rgb = cv2.imread(reference)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(target, 0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
loc = numpy.where( res >= THRESHOLD)
result = None
for pt in zip(*loc[::-1]):
result = POINT(pt[0], pt[1], w, h)
return result
@classmethod
def bool(self, reference, target):
result = PatternMatch.__patternmatch(reference, target)
if result is None:
return False
else:
return True
@classmethod
def coordinate(self, reference, target):
return PatternMatch.__patternmatch(reference, target)
if __name__ == "__main__":
pmc = PatternMatch()
print pmc.bool(os.path.join(define.APP_TMP,"screen.png"),
os.path.join(define.APP_TMP,"login.png"))
|
y Kamil Koziara. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
I/O operations for gene annotation files.
"""
from __future__ import print_function
import sys
import csv
import collections
from Ontology.Data import GeneAnnotation, TermAssociation
from .Interfaces import OntoIterator, OntoReader
class TsvIterator(OntoIterator):
"""
Parses TSV files
"""
def __init__(self, file_handle):
self._reader = csv.reader(file_handle, delimiter='\t')
def __iter__(self):
return self._reader
def __next__(self):
return next(self._reader)
def next(self):
return next(self._reader)
# GAF version 2.0
GAF2 | 0FIELDS = ['DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence',
'With',
'Aspect',
| 'DB_Object_Name',
'Synonym',
'DB_Object_Type',
'Taxon_ID',
'Date',
'Assigned_By',
'Annotation_Extension',
'Gene_Product_Form_ID']
# GAF version 1.0
GAF10FIELDS = ['DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence',
'With',
'Aspect',
'DB_Object_Name',
'Synonym',
'DB_Object_Type',
'Taxon_ID',
'Date',
'Assigned_By']
GAF_VERSION = { "1.0" : GAF10FIELDS,
"2.0" : GAF20FIELDS}
def _split_multi(value):
if len(value) > 0:
return value.split('|')
else:
return []
def _to_goa(obj_rows, version):
row = obj_rows[0]
obj_id = row[1]
obj_attrs = {GAF20FIELDS[0] : row[0],
GAF20FIELDS[2] : row[2],
GAF20FIELDS[9] : row[9],
GAF20FIELDS[10] : _split_multi(row[10]),
GAF20FIELDS[11] : row[11],
GAF20FIELDS[12]: _split_multi(row[12])}
if version == "1.0":
row_len = 15
else:
row_len = 17
obj_attrs[GAF20FIELDS[15]] = _split_multi(row[15])
obj_attrs[GAF20FIELDS[16]] = row[16]
assocs = []
for row in obj_rows:
if len(row) == row_len:
assocs.append(TermAssociation(row[4],
{GAF20FIELDS[3] : _split_multi(row[3]),
GAF20FIELDS[5] : _split_multi(row[5]),
GAF20FIELDS[6] : row[6],
GAF20FIELDS[7] :_split_multi(row[7]),
GAF20FIELDS[8] : row[8],
GAF20FIELDS[13] : row[13],
GAF20FIELDS[14] : row[14]}
))
else:
raise ValueError("Invalid gaf file: Incorrect row length.")
return GeneAnnotation(obj_id, assocs, obj_attrs)
class GafReader(OntoReader):
"""
Reads GAF files into list of GeneAnnotation.
GAF file is list of tab separated values in the following order:
'DB', 'DB Object ID', 'DB Object Symbol', 'Qualifier', 'GO ID',
'DB:Reference', 'Evidence Code', 'With (or) From', 'Aspect',
'DB Object Name', 'DB Object Synonym', 'DB Object Type',
'Taxon', 'Date', 'Assigned By', 'Annotation Extension',
'Gene Product Form ID'
"""
_ID_IDX = 1
def __init__(self, file_handle, assoc_format = "dict"):
"""
Parameters:
----------
- assoc_format - states format of returned association:
o "dict" - as a dictionary (faster)
o "in_mem_sql" - as dict-like object with underlying in-memory database
(more memory efficient)
"""
self.handle = file_handle
self.assoc_format = assoc_format
def read(self):
first = self.handle.readline()
if first and first.startswith('!gaf-version:'):
version = first[(first.find(':') + 1):].strip()
else:
raise ValueError("Invalid gaf file: No version specified.")
if version not in GAF_VERSION:
raise ValueError("Incorrect version.")
tsv_iter = TsvIterator(self.handle)
if self.assoc_format == "dict":
raw_records = collections.defaultdict(list)
for row in tsv_iter:
first = row[0]
if not first.startswith('!'):
raw_records[row[self._ID_IDX]].append(row)
return dict([(k, _to_goa(v, version)) for k, v in raw_records.items()]) # Possible py2 slow down
elif self.assoc_format == "in_mem_sql":
try:
sqla = InSqlAssoc(GAF_VERSION[version], [1,4], lambda x: _to_goa(x, version))
except ImportError:
print("Error: To use in_mem_sql association you need to have sqlite3 bindings installed.", file=sys.stderr)
else:
for row in tsv_iter:
if not row[0].startswith('!'):
sqla.add_row(row)
return sqla
else:
raise ValueError("Incorrect assoc_format parameter.")
class InSqlAssoc(object):
"""
Immutable dictionary-like structure for storing annotations.
It provides slower access but is more memory efficient thus more suitable
for big annotations files.
"""
def __init__(self, fields, index, selection_to_obj_fun, db_path = ":memory:"):
"""
Parameters:
----------
- fields - name of the columns in db representation
- index - pair of fields indexing associations: (gene_id, ontology_term_id)
- selection_to_obj_fun - function transforming list of rows into
GeneAssociation
- db_path - path to database file, special value ":memory:" creates
database in memory
"""
import sqlite3
self.fields = fields
self.fun = selection_to_obj_fun
self.con = sqlite3.connect(db_path)
self.index = index
cur = self.con.cursor()
query = 'CREATE TABLE assocs ("' + self.fields[0] + '" '
for field in fields[1:]:
query += ', "' + field + '" '
query += ');'
cur.execute(query)
cur.execute('CREATE INDEX obj_idx ON assocs ({0});'.format(self.fields[index[0]]))
self.con.commit()
def add_row(self, row):
if len(row) != len(self.fields):
raise TypeError("Incorrect number of fields in a row.")
else:
cur = self.con.cursor()
cur.execute("INSERT INTO assocs VALUES (?" + (",?" * (len(self.fields) - 1)) + ");", row)
self.con.commit()
def __len__(self):
cur = self.con.cursor()
cur.execute('SELECT COUNT(DISTINCT "' + self.fields[self.index[0]] + '") FROM assocs;')
return cur.fetchone()[0]
def __contains__(self, key):
cur = self.con.cursor()
cur.execute('SELECT * FROM assocs WHERE "' + self.fields[self.index[0]]\
+ '" = ?;', [key])
return len(list(cur)) > 0 #TODO sth prettier
def __getitem__(self, key):
cur = self.con.cursor()
cur.execute('SELECT * FROM assocs WHERE "' + self.fields[self.index[0]]\
+ '" = ?;', [key])
return self.fun(list(cur))
def __iter__(self):
cur = self.con.cursor()
cur.execute('SELECT * FROM assocs ORDER BY "{0}"'.format(self.fields[self.index[0]]))
cur_id = None
row_list = []
for row in cur:
if cur_id and cur_id != row[self.index[0]]:
obj = self.fun(row_list)
row_list = [row]
cur_id = row[self.index[0]]
yield (cur_id, obj)
else:
cur_id = row[self.index[0]]
|
#!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import system,getenv,path
from time import clock,time
import json
which = int(argv[1])
submit_id = int(argv[2])
sname = argv[0]
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Tools.Load import *
import PandaCore.Tools.job_management as cb
import RedPanda.Cluster.convert_arrays as ca
Load('PFAnalyzer')
data_dir = getenv('CMSSW_BASE') + '/src/PandaAnalysis/data/'
stopwatch = clock()
def print_time(label):
global stopwatch
now_ = clock()
PDebug(sname+'.print_time:'+str(time()),
'%.3f s elapsed performing "%s"'%((now_-stopwatch),label))
stopwatch = now_
def copy_local(long_name):
replacements = {
r'\${EOS}':'root://eoscms.cern.ch//store/user/snarayan',
r'\${EOS2}':'root://eoscms.cern.ch//store/group/phys_exotica',
r'\${CERNBOX}':'root://eosuser//eos/user/s/snarayan',
r'\${CERNBOXB}':'root://eosuser//eos/user/b/bmaier',
}
full_path = long_name
for k,v in replacements.iteritems():
full_path = sub(k,v,full_path)
PInfo(sname,full_path)
panda_id = long_name.split('/')[-1].split('_')[-1].replace('.root','')
input_name = 'input_%s.root'%panda_id
# if the file is cached locally, why not use it?
local_path = full_path.replace('root://xrootd.cmsaf.mit.edu/','/mnt/hadoop/cms')
if path.isfile(local_path):
# apparently SmartCached files can be corrupted...
ftest = root.TFile(local_path)
if ftest and not(ftest.IsZombie()):
full_path = local_path
# xrdcp if remote, copy if local
if 'root://' in full_path:
cmd = 'xrdcp %s %s'%(full_path,input_name)
else:
cmd = 'cp %s %s'%(full_path,input_name)
'''
# rely on pxrdcp for local and remote copies
cmd = "pxrdcp %s %s"%(full_path,input_name)
PInfo(sname+'.copy_local',cmd)
'''
system(cmd)
if path.isfile(input_name):
PInfo(sname+'.copy_local','Successfully copied to %s'%(input_name))
return input_name
else:
PError(sname+'.copy_local','Failed to copy %s'%input_name)
return None
def fn(input_name,isData,full_path):
PInfo(sname+'.fn','Starting to process '+input_name)
# now we instantiate and configure the analyzer
analyzer = root.redpanda.PFAnalyzer()
# read the inputs
try:
fin = root.TFile.Open(input_name)
tree = fin.FindObjectAny("events")
except:
PError(sname+'.fn','Could not read %s'%input_name)
return False # file open error => xrootd?
if not tree:
PError(sname+'.fn','Could not recover tree in %s'%input_name)
return False
output_name = input_name.replace('input','output')
analyzer.SetOutputFile(output_name)
analyzer.Init(tree)
# run and save output
analyzer.Run()
analyzer.Terminate()
ret = path.isfile(output_name)
if ret:
PInfo(sname+'.fn','Successfully created %s'%(output_name))
return True
else:
PError(sname+'.fn','Failed in creating %s!'%(output_name))
return False
def cleanup(fname):
ret = system('rm -f %s'%(fname))
if ret:
PError(sname+'.cleanup','Removal of %s exited with code %i'%(fname,ret))
else:
PInfo(sname+'.cleanup','Removed '+fname)
return ret
def hadd(good_inputs):
good_outputs = ' '.join([x.replace('input','output') for x in good_inputs])
cmd = 'hadd -f output.root ' + good_outputs
ret = system(cmd)
if not ret:
PInfo(sname+'.hadd','Merging exited with code %i'%ret)
else:
PError(sname+'.hadd','Merging exited with code %i'%ret)
def stageout(infilename,outdir,outfilename):
if path.isdir(outdir): # assume it's a local copy
mvargs = 'mv $PWD/%s %s/%s'%(infilename,outdir,outfilename)
lsargs = 'ls %s/%s'%(outdir,outfilename)
else:
if system('which gfal-copy')==0:
mvargs = 'gfal-copy '
lsargs = 'gfal-ls '
elif system('which lcg-cp')==0:
mvargs = 'lcg-cp -v -D srmv2 -b '
lsargs = 'lcg-ls -v -D srmv2 -b '
else:
PError(sname+'.stageout','Could not find a stageout protocol!')
return -1
mvargs += 'file://$PWD/%s srm://t3serv006.mit.edu:8443/srm/v2/server?SFN=%s/%s'%(infilename,outdir,outfilename)
lsargs += 'srm://t3serv006.mit.edu:8443/srm/v2/server?SFN=%s/%s'%(outdir,outfilename)
PInfo(sname,mvargs)
ret = system(mvargs)
system('rm *.root')
if not ret:
PInfo(sname+'.stageout','Move exited with code %i'%ret)
else:
PError(sname+'.stageout','Move exited with code %i'%ret)
return ret
PInfo(sname,lsargs)
ret = system(lsargs)
if ret:
PError(sname+'.stageout','Output file is missing!')
return ret
return 0
def write_lock(outdir,outfilename,processed):
lockname = outfilename.replace('.root','.lock')
flock = open(lockname,'w')
for k,v in processed.iteritems():
flock.write(v+'\n')
PInfo(sname+'.write_lock','This job successfully processed %i inputs!'%len(processed))
flock.close()
return stageout(lockname,outdir+'/locks/',lockname)
if __name__ == "__main__":
sample_list = cb.read_sample_config('local.cfg',as_dict=False)
to_run = None #sample_list[which]
for s in sample_list:
if which==s.get_id():
to_run = s
break
if not to_run:
PError(sname,'Could not find a job for PROCID=%i'%(which))
exit(3)
outdir = 'XXXX' # will be replaced when building the job
outfilename = to_run.name+'_%i.root'%(submit_id)
processed = {}
print_time('loading')
for f in to_run.files:
input_name = copy_local(f)
print_time('copy %s'%input_name)
if input_name:
success = fn(input_name,(to_run.dtype!='MC'),f)
print_time('analyze %s'%input_name)
if success:
processed[input_name] = f
cleanup(input_name)
print_time('remove %s'%input_name)
if len(processed)==0:
exit(1)
hadd(list(processed))
print_time('hadd')
ca.process_file('output.root')
print_time('conversion')
ret1 = stageout('output.root',outdir,outfilename)
ret2 = stageout('output.npy',outdir,outfilename.replace('.root','.npy') | )
print_time('stageout')
system('rm -f *root *npy')
if not ret1 and not ret2:
write_lock(outdir,outfilename,processed)
print_time('create lock')
| else:
exit(-1*max(ret1, ret2))
exit(0)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('lugar', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DiasEfectivoLLuvia',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mes', models.IntegerField(choices=[(1, b'Enero'), (2, b'Febrero'), (3, b'Marzo'), (4, b'Abril'), (5, b'Mayo'), (6, b'Junio'), (7, b'Julio'), (8, b'Agosto'), (9, b'Septiembre'), (10, b'Octubre'), (11, b'Noviembre'), (12, b'Diciembre')])),
('year', models.IntegerField(verbose_name=b'A\xc3\xb1o')),
('dias_lluvia', models.FloatField()),
('comunidad', models.ForeignKey(to='lugar.Comunidad')),
('departamento', models.ForeignKey(to='lugar.Departamento')),
('municipio', models.ForeignKey(to='lugar.Municipio')),
('pais', models.ForeignKey(to='lugar.Pais')),
],
options={
'verbose_name': 'Dias Efectivo de LLuvia',
'verbose_name_plural': 'Dias Efectivo de LLuvia',
},
),
migrations.CreateModel(
name='Precipitacion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mes', models.IntegerField(choices=[(1, b'Enero'), (2, b'Febrero'), (3, b'Marzo'), (4, b'Abril'), (5, b'Mayo'), (6, b'Junio'), (7, b'Julio'), (8, b'Agosto'), (9, b'Septiembre'), (10, b'Octubre'), (11, b'Noviembre'), (12, b'Diciembre')])),
('year', models.IntegerField(verbose_name=b'A\xc3\xb1o')),
('precipitacion', models.FloatField()),
('total_precipitacion', models.FloatField(editable=False)),
('comunidad', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'municipio', chained_fi | eld=b'municipio', blank=True, auto_choose=True, to='lugar.Comunidad', null=True)),
('departamento', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'pais', chained_field=b'pais', auto_choose=True, to='lugar.Departamento')),
('municipio', smart_selects.db_fields.ChainedForeignKey(chai | ned_model_field=b'departamento', chained_field=b'departamento', auto_choose=True, to='lugar.Municipio')),
('pais', models.ForeignKey(to='lugar.Pais')),
],
options={
'verbose_name': 'Precipitaci\xf3n',
'verbose_name_plural': 'Precipitaci\xf3n',
},
),
migrations.CreateModel(
name='Temperatura',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mes', models.IntegerField(choices=[(1, b'Enero'), (2, b'Febrero'), (3, b'Marzo'), (4, b'Abril'), (5, b'Mayo'), (6, b'Junio'), (7, b'Julio'), (8, b'Agosto'), (9, b'Septiembre'), (10, b'Octubre'), (11, b'Noviembre'), (12, b'Diciembre')])),
('year', models.IntegerField(verbose_name=b'A\xc3\xb1o')),
('temperatura', models.FloatField()),
('total_temperatura', models.FloatField(editable=False)),
('comunidad', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'municipio', chained_field=b'municipio', blank=True, auto_choose=True, to='lugar.Comunidad', null=True)),
('departamento', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'pais', chained_field=b'pais', auto_choose=True, to='lugar.Departamento')),
('municipio', smart_selects.db_fields.ChainedForeignKey(chained_model_field=b'departamento', chained_field=b'departamento', auto_choose=True, to='lugar.Municipio')),
('pais', models.ForeignKey(to='lugar.Pais')),
],
options={
'verbose_name': 'Temperatura',
'verbose_name_plural': 'Temperatura',
},
),
]
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2016 The Bitcoin Unlimited developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
Test version bits' warning system.
Generate chains with block versions that appear to be signalling unknown
soft-forks, and test that warning alerts are generated.
'''
VB_PERIOD = 144 # versionbits period length for regtest
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
def on_inv(self, conn, message):
pass
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class VersionBitsWarningTest(BitcoinTestFramework):
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
# Open and close to create zero-length file
with open(self.alert_filename, 'w') as f:
pass
self.node_options = ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]
self.nodes.append(start_node(0, self.options.tmpdir, self.node_options))
import re
self.vb_pattern = re.compile("^Warning.*versionbit")
# Send numblocks blocks via peer with nVersionToUse set.
def send_blocks_with_version(self, peer, numblocks, nVersionToUse):
tip = self.nodes[0].getbestblockhash()
| height = self.nodes[ | 0].getblockcount()
block_time = self.nodes[0].getblockheader(tip)["time"]+1
tip = int(tip, 16)
for i in range(numblocks):
block = create_block(tip, create_coinbase(height+1), block_time)
block.nVersion = nVersionToUse
block.solve()
peer.send_message(msg_block(block))
block_time += 1
height += 1
tip = block.sha256
peer.sync_with_ping()
def test_versionbits_in_alert_file(self):
with open(self.alert_filename, 'r') as f:
alert_text = f.read()
assert(self.vb_pattern.match(alert_text))
def run_test(self):
# Setup the p2p connection and start up the network thread.
test_node = TestNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
test_node.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
# 1. Have the node mine one period worth of blocks
self.nodes[0].generate(VB_PERIOD)
# 2. Now build one period of blocks on the tip, with < VB_THRESHOLD
# blocks signaling some unknown bit.
nVersion = VB_TOP_BITS | (1<<VB_UNKNOWN_BIT)
self.send_blocks_with_version(test_node, VB_THRESHOLD-1, nVersion)
# Fill rest of period with regular version blocks
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1)
# Check that we're not getting any versionbit-related errors in
# getinfo()
assert(not self.vb_pattern.match(self.nodes[0].getinfo()["errors"]))
# 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling
# some unknown bit
self.send_blocks_with_version(test_node, VB_THRESHOLD, nVersion)
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD)
# Might not get a versionbits-related alert yet, as we should
# have gotten a different alert due to more than 51/100 blocks
# being of unexpected version.
# Check that getinfo() shows some kind of error.
assert(len(self.nodes[0].getinfo()["errors"]) != 0)
# Mine a period worth of expected blocks so the generic block-version warning
# is cleared, and restart the node. This should move the versionbit state
# to ACTIVE.
self.nodes[0].generate(VB_PERIOD)
stop_node(self.nodes[0], 0)
wait_bitcoinds()
# Empty out the alert file
with open(self.alert_filename, 'w') as f:
pass
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])
# Connecting one block should be enough to generate an error.
self.nodes[0].generate(1)
assert(len(self.nodes[0].getinfo()["errors"]) != 0)
stop_node(self.nodes[0], 0)
wait_bitcoinds()
self.test_versionbits_in_alert_file()
# Test framework expects the node to still be running...
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])
if __name__ == '__main__':
VersionBitsWarningTest().main()
|
#!/usr/bin/env python
#
# Copyright (C) 2017 DL
#
import psycopg2
from error_logger.adapter import base_adapter
class PostgresqlAdap | ter(base_adapter.BaseAdapter):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs | = kwargs
super(PostgresqlAdapter, self).__init__()
def create_connection(self):
return psycopg2.connect(*self._args, **self._kwargs)
|
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Checks that msvs_system_include_dirs works.
"""
import TestGyp
import sys
if sys.platform == 'win32':
te | st = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'system-include'
test.run_gyp('test.gyp', chdir=CHDIR)
test.bui | ld('test.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
#!/usr/local/bin python
# -*- coding: utf-8 -*-
from RPSOM import Model
from RPSOM.transition_graph import output_graph
if __name__=='__main__':
# learning rate alpha setup
alpha_max = [0.1, 0.5, 0.7]
alpha_min = [0.01, 0.1, 0.2]
# neighborhood radius sigma setup
sigma_max = [5, 7, 10]
sigma_min = [1, 2, 3]
epochs = 10
# RPSOM model setup
rpsom=Model.RPSOM(epochs, 15, 20, input_file="animal.csv", alpha_max=alpha_max, alpha_min=alpha_min, sigma_max=sigma_max, sigma_min=sigma_min, log_file="test.log")
#cb = [som.write_BMU for som in rpsom.som]
cb = None
# RPSOM train
rpsom.fit (trainX=rpsom.input_x, epochs=rpsom.epochs, verbose=0, cal | lbacks=cb)
# Output Map
# Output thickness map
rpsom.map_output | 2wrl_squ(grad=100, filename="test")
# Output grayscale 2D map
filename="example_animal"
rpsom.map_output2wrl_gray_squ(filename)
# Output transition graph
output_graph(rpsom)
rpsom.weight_output_csv ("rpsom_weight")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""Pytests for demo/app_template/__init__.py
"""
# Import standard packages.
import os
import s | ys
# Import installed packages.
# Import local packages.
sys.path.insert(0, os.path.curdir)
import demo
def test__all__(
ref_all=[
'main',
'template']
) -> None:
r"""Pytest for __all__
Notes:
* Check that expected modules are exported.
"""
test_all = demo.app_ | template.__all__
assert ref_all == test_all
for attr in ref_all:
assert hasattr(demo.app_template, attr)
return None
|
ve | rsion = ' | 1.1.0'
|
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="histogram2d.colorbar.tickformatstop",
**kwargs
):
super(TemplateitemnameValidator, self).__in | it__(
plotly_name=plotly_name,
parent_name=parent_name,
| edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
|
"""
Data models for the core Sana data engine. These should be extended as
required.
:Authors: Sana dev team
:Version: 2.0
"""
from .concept import Concept, Relationship, RelationshipCategory
from .device | import Devic | e
from .encounter import Encounter
from .events import Event
from .instruction import Instruction
from .location import Location
from .notification import Notification
from .observation import Observation
from .observer import Observer, Surgeon, SurgicalAdvocate
from .procedure import Procedure
from .subject import Subject, SurgicalSubject
__all__ = ['Concept', 'Relationship','RelationshipCategory',
'Device',
'Encounter',
'Event',
'Instruction',
'Location',
'Notification',
'Observation',
'Observer',
'Procedure',
'Subject',
'SurgicalSubject',
'Surgeon',
'SurgicalAdvocate']
|
# -*- coding: utf-8 -*-
import argparse
import six
parser = argparse.ArgumentParser(description="Minecraft Package Manager")
sub = parser.add_subparsers(help="command help")
# package commands
sync_parser = sub.add_parser("sync",
description="Synchronize local mod archive.",
| help="sync --help" | )
show_parser = sub.add_parser("show",
description="Show mod informations.",
help="show --help")
search_parser = sub.add_parser("search",
description="Search mod archive.",
help="search --help")
update_parser = sub.add_parser("update",
description="Update mods.",
help="update --help")
install_parser = sub.add_parser("install",
description="Install mods.",
help="install --help")
remove_parser = sub.add_parser("remove",
description="Remove mods.",
help="remove --help")
# repo commands
repo_add_parser = sub.add_parser("addrepo",
description="Add mod repository.",
help="addrepo --help")
repo_del_parser = sub.add_parser("rmrepo",
description="Remove mod repository.",
help="rmrepo --help")
repo_show_parser = sub.add_parser("lsrepo",
description="Show mod repository informations.",
help="lsrepo --help")
if __name__ == "__main__":
cmd = parser.parse_args()
six.print_("Done")
|
#!/usr/bin/python
import sys
sys.path.append('/homes/gws/aritter/twitter_nlp/python')
from twokenize import tokenize
from LdaFeatures import LdaFeatures
from Vocab import Vocab
from Dictionaries import Dictionaries
entityDocs = {}
prevText = None
for line in sys.stdin:
line = line.rstrip('\n')
fields = line.split('\t')
sid = fields[0]
text = fields[6]
words = tokenize(text)
confidence = 1.0 / float(fields[-1])
eType = fields[-2]
entity = fields[-3]
neTags = fields[-4].split(' ')
pos = fields[-5].split(' ')
words = fields[-6].split(' ')
#Just skip duplicate texts (will come from tweets with more than one entiity)
if prevText and prevText == text:
continue
prevText = text
features = LdaFeatures(words, neTags)
for i in range(len(features.entities)):
entity = ' '.join(features.words[features.entities[i][0]:features.entities[i][1]])
entityDocs[entity] = entityDocs.get(entity,[])
entityDocs[entity].append(features.features[i])
dictionaries = Dictionaries('/homes/gws/aritter/twitter_nlp/data/LabeledLDA_dictionaries')
vocab = Vocab()
keys = entityDocs.keys()
keys.sort(cmp=lambda a,b: cmp(len(entityDocs[b]),len(entityDocs[a])))
eOut = open('entities', 'w')
lOut = open('labels', 'w')
dOut = open('dictionaries', 'w')
for e in keys:
labels = dictionaries.GetDictVector(e)
###############################################################################
#NOTE: For now, only include entities which appear in one or more dictionary
# we could | modify this to give them membership in all, or no dictionaries
# (in LabeledLDA, don't impose any constraints)
###############################################################################
if sum(labels) > 0:
lOut.write(' '.join([str(x) for x in labels]) + "\n")
eOut.write("%s\n" % e)
print '\t'.join([' '.join([str(vocab. | GetID(x)) for x in f]) for f in entityDocs[e]])
vocab.SaveVocab('vocab')
for d in dictionaries.dictionaries:
dOut.write(d + "\n")
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""DenseNet, implemented in Gluon."""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201']
import os
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from ...contrib.nn import HybridConcurrent, Identity
# Helpers
def _make_dense_block(num_layers, bn_size, growth_rate, dropout, stage_index):
out = nn.HybridSequential(prefix='stage%d_'%stage_index)
with out.name_scope():
for _ in range(num_layers):
out.add(_make_dense_layer(growth_rate, bn_size, dropout))
return out
def _make_dense_layer(growth_rate, bn_size, dropout):
new_features = nn.HybridSequential(prefix='')
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(growth_rate, kernel_size=3, paddin | g=1, use_bias=False))
if dropout:
new_features.add(nn.Dropout(dropout))
out = HybridConcurrent(axis=1, prefix='')
out.add(Identity())
out.add(new_features)
return out
def _make_transition(num_output_feat | ures):
out = nn.HybridSequential(prefix='')
out.add(nn.BatchNorm())
out.add(nn.Activation('relu'))
out.add(nn.Conv2D(num_output_features, kernel_size=1, use_bias=False))
out.add(nn.AvgPool2D(pool_size=2, strides=2))
return out
# Net
class DenseNet(HybridBlock):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_init_features : int
Number of filters to learn in the first convolution layer.
growth_rate : int
Number of filters to add each layer (`k` in the paper).
block_config : list of int
List of integers for numbers of layers in each pooling block.
bn_size : int, default 4
Multiplicative factor for number of bottle neck layers.
(i.e. bn_size * k features in the bottleneck layer)
dropout : float, default 0
Rate of dropout after each dense layer.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self, num_init_features, growth_rate, block_config,
bn_size=4, dropout=0, classes=1000, **kwargs):
super(DenseNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.Conv2D(num_init_features, kernel_size=7,
strides=2, padding=3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
# Add dense blocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
self.features.add(_make_transition(num_features // 2))
num_features = num_features // 2
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.AvgPool2D(pool_size=7))
self.features.add(nn.Flatten())
self.output = nn.Dense(classes)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
densenet_spec = {121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]),
169: (64, 32, [6, 12, 32, 32]),
201: (64, 32, [6, 12, 48, 32])}
# Constructor
def get_densenet(num_layers, pretrained=False, ctx=cpu(),
root=os.path.join('~', '.mxnet', 'models'), **kwargs):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 121, 161, 169, 201.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
num_init_features, growth_rate, block_config = densenet_spec[num_layers]
net = DenseNet(num_init_features, growth_rate, block_config, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_params(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx)
return net
def densenet121(**kwargs):
r"""Densenet-BC 121-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(121, **kwargs)
def densenet161(**kwargs):
r"""Densenet-BC 161-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(161, **kwargs)
def densenet169(**kwargs):
r"""Densenet-BC 169-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(169, **kwargs)
def densenet201(**kwargs):
r"""Densenet-BC 201-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(201, **kwargs)
|
es, func, *args):
notnull_errcheck(res, func, *args)
rv = cast(res, c_void_p).value
_mpv_free(res)
return rv
def notnull_errcheck(res, func, *args):
if res is None:
raise RuntimeError('Underspecified error in MPV when calling {} with args {!r}: NULL pointer returned.'\
'Please consult your local debugger.'.format(func.__name__, args))
return res
ec_errcheck = ErrorCode.raise_for_ec
def _handle_gl_func(name, args=[], restype=None):
_handle_func(name, args, restype, errcheck=None, ctx=MpvOpenGLCbContext)
backend.mpv_client_api_version.restype = c_ulong
def _mpv_client_api_version():
ver = backend.mpv_client_api_version()
return ver>>16, ver&0xFFFF
backend.mpv_free.argtypes = [c_void_p]
_mpv_free = backend.mpv_free
backend.mpv_free_node_contents.argtypes = [c_void_p]
_mpv_free_node_contents = backend.mpv_free_node_contents
backend.mpv_create.restype = MpvHandle
_mpv_create = backend.mpv_create
_handle_func('mpv_create_client', [c_char_p], MpvHandle, notnull_errcheck)
_handle_func('mpv_client_name', [], c_char_p, errcheck=None)
_handle_func('mpv_initialize', [], c_int, ec_errcheck)
_handle_func('mpv_detach_destroy', [], None, errcheck=None)
_handle_func('mpv_terminate_destroy', [], None, errcheck=None)
_handle_func('mpv_load_config_file', [c_char_p], c_int, ec_errcheck)
_handle_func('mpv_suspend', [], None, errcheck=None)
_handle_func('mpv_resume', [], None, errcheck=None)
_handle_func('mpv_get_time_us', [], c_ulonglong, errcheck=None)
_handle_func('mpv_set_option', [c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_set_option_string', [c_char_p, c_char_p], c_int, ec_errcheck)
_handle_func('mpv_command', [POINTER(c_char_p)], c_int, ec_errcheck)
_handle_func('mpv_command_string', [c_char_p, c_char_p], c_int, ec_errcheck)
_handle_func('mpv_command_async', [c_ulonglong, POINTER(c_char_p)], c_int, ec_errcheck)
_handle_func('mpv_set_property', [c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_set_property_string', [c_char_p, c_char_p], c_int, ec_errcheck)
_handle_func('mpv_set_property_async', [c_ulonglong, c_char_p, MpvFormat,c_void_p],c_int, ec_errcheck)
_handle_func('mpv_get_property', [c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_get_property_string', [c_char_p], c_void_p, bytes_free_errcheck)
_handle_func('mpv_get_property_osd_string', [c_char_p], c_void_p, bytes_free_errcheck)
_handle_func('mpv_get_property_async', [c_ulonglong, c_char_p, MpvFormat], c_int, ec_errcheck)
_handle_func('mpv_observe_property', [c_ulonglong, c_char_p, MpvFormat], c_int, ec_errcheck)
_handle_func('mpv_unobserve_property', [c_ulonglong], c_int, ec_errcheck)
_handle_func('mpv_event_name', [c_int], c_char_p, errcheck=None, ctx=None)
_handle_func('mpv_error_string', [c_int], c_char_p, errcheck=None, ctx=None)
_handle_func('mpv_request_event', [MpvEventID, c_int], c_int, ec_errcheck)
_handle_func('mpv_request_log_messages', [c_char_p], c_int, ec_errcheck)
_handle_func('mpv_wait_event', [c_double], POINTER(MpvEvent), errcheck=None)
_handle_func('mpv_wakeup', [], None, errcheck=None)
_handle_func('mpv_set_wakeup_callback', [WakeupCallback, c_void_p], None, errcheck=None)
_handle_func('mpv_get_wakeup_pipe', [], c_int, errcheck=None)
_handle_func('mpv_get_sub_api', [MpvSubApi], c_void_p, notnull_errcheck)
_handle_gl_func('mpv_opengl_cb_set_update_callback', [OpenGlCbUpdateFn, c_void_p])
_handle_gl_func('mpv_opengl_cb_init_gl', [c_char_p, OpenGlCbGetProcAddrFn, c_void_p], c_int)
_handle_gl_func('mpv_opengl_cb_draw', [c_int, c_int, c_int], c_int)
_handle_gl_func('mpv_opengl_cb_render', [c_int, c_int], c_int)
_handle_gl_func('mpv_opengl_cb_report_flip', [c_ulonglong], | c_int)
_handle_gl_func('mpv_opengl_cb_uninit_gl', [], c_int)
def _ensure_encoding(possibly_bytes):
return possibly_bytes.decode('utf-8') if type(possibly_bytes) is bytes else possibly_bytes
def _event_generator(handle):
while True:
event = _mpv_wait_event(handle, -1).contents
if event.event_id.value == MpvEventID.NONE:
raise StopIteration()
yield event
def load | _lua():
""" Use this function if you intend to use mpv's built-in lua interpreter. This is e.g. needed for playback of
youtube urls. """
CDLL('liblua.so', mode=RTLD_GLOBAL)
def _event_loop(event_handle, playback_cond, event_callbacks, message_handlers, property_handlers, log_handler):
for event in _event_generator(event_handle):
try:
devent = event.as_dict() # copy data from ctypes
eid = devent['event_id']
for callback in event_callbacks:
callback(devent)
if eid in (MpvEventID.SHUTDOWN, MpvEventID.END_FILE):
with playback_cond:
playback_cond.notify_all()
if eid == MpvEventID.PROPERTY_CHANGE:
pc = devent['event']
name = pc['name']
if 'value' in pc:
proptype, _access = ALL_PROPERTIES[name]
if proptype is bytes:
args = (pc['value'],)
else:
args = (proptype(_ensure_encoding(pc['value'])),)
elif pc['format'] == MpvFormat.NONE:
args = (None,)
else:
args = (pc['data'], pc['format'])
for handler in property_handlers[name]:
handler(*args)
if eid == MpvEventID.LOG_MESSAGE and log_handler is not None:
ev = devent['event']
log_handler(ev['level'], ev['prefix'], ev['text'])
if eid == MpvEventID.CLIENT_MESSAGE:
# {'event': {'args': ['key-binding', 'foo', 'u-', 'g']}, 'reply_userdata': 0, 'error': 0, 'event_id': 16}
target, *args = devent['event']['args']
if target in message_handlers:
message_handlers[target](*args)
if eid == MpvEventID.SHUTDOWN:
_mpv_detach_destroy(event_handle)
return
except Exception as e:
traceback.print_exc()
class MPV(object):
""" See man mpv(1) for the details of the implemented commands. """
def __init__(self, *extra_mpv_flags, log_handler=None, start_event_thread=True, **extra_mpv_opts):
""" Create an MPV instance.
Extra arguments and extra keyword arguments will be passed to mpv as options. """
self._event_thread = None
self.handle = _mpv_create()
_mpv_set_option_string(self.handle, b'audio-display', b'no')
istr = lambda o: ('yes' if o else 'no') if type(o) is bool else str(o)
try:
for flag in extra_mpv_flags:
_mpv |
import npc
from mako.template import Template
def template_output(sectioner):
template_path = str(npc.settings.InternalSettings().get('listing.templates.markdown.sections.simple'))
section_template = Template(filename=template_path)
return section_template.render(sectioner=sectioner)
def test_generates_hashes_for_header_level(prefs):
sectioner = npc.formatters.sectioners.LastInitia | lSectioner(3, prefs)
sectioner.current_text = 'test text'
output = template_output(sectioner)
assert '###' in output
def test_includes_current_text(prefs):
sectioner = npc.formatters.sectioners.LastInitialSectioner(3, prefs)
sectioner.current_text = 'test text'
o | utput = template_output(sectioner)
assert 'test text' in output
def test_formatted_output(prefs):
sectioner = npc.formatters.sectioners.LastInitialSectioner(3, prefs)
sectioner.current_text = 'test text'
output = template_output(sectioner)
assert output == '### test text\n\n'
|
# encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import shutil
import socket
import sys
import time
import random
import seesaw
from seesaw.config import NumberConfigValue
from seesaw.externalprocess import ExternalProcess
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.1.5"):
raise Exception("This pipeline needs seesaw version 0.1.5 or higher.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20140927.01"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'qwikidisco'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
dirname = "/".join((item["data_dir"], item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix,
item_name.replace(':', '_'),
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
Simp | leTask.__init__(self, "MoveFiles")
def process(self, item):
| os.rename("%(item_dir)s/%(warc_file_base)s.txt.gz" % item,
"%(data_dir)s/%(warc_file_base)s.txt.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
class CustomProcessArgs(object):
def realize(self, item):
item_type, item_value = item['item_name'].split(':', 1)
if item_type == 'page':
# Expect something like page:0-99999 or page:100000-199999
start_num, end_num = item_value.split('-', 1)
return ['python', 'discover.py', start_num, end_num,
"%(item_dir)s/%(warc_file_base)s.txt.gz" % item]
else:
raise ValueError('unhandled item type: {0}'.format(item_type))
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
SCRIPT_SHA1 = get_hash(os.path.join(CWD, 'discover.py'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'python_version': sys.version,
'script_hash': SCRIPT_SHA1,
}
return d
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="Qwiki Discovery",
project_html="""
<img class="project-logo" alt="Project logo" src="http://archiveteam.org/images/1/10/Qwiki_Logo_June_2012.png" height="50px" title=""/>
<h2>Qwiki Phase 1.
<span class="links">
<a href="http://www.qwiki.com/">Website</a> ·
<a href="http://tracker.archiveteam.org/qwikidisco/">Leaderboard</a>
<a href="http://archiveteam.org/index.php?title=Qwiki">Wiki</a> ·
</span>
</h2>
<p>Qwiki shuts down. This is phase 1: content discovery.</p>
""",
utc_deadline=datetime.datetime(2014, 11, 1, 23, 59, 0)
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="qwikidisco"),
ExternalProcess('Scraper', CustomProcessArgs(),
max_tries=2,
accept_on_exit_code=[0],
env={
"item_dir": ItemValue("item_dir")
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.txt.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.txt.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp"
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_n | etwork_access: Whether the VMs in the linked virtual
network space would be able t | o access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network.
:type remote_virtual_network: :class:`SubResource
<azure.mgmt.network.v2017_03_01.models.SubResource>`
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or :class:`VirtualNetworkPeeringState
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkPeeringState>`
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, allow_virtual_network_access=None, allow_forwarded_traffic=None, allow_gateway_transit=None, use_remote_gateways=None, remote_virtual_network=None, peering_state=None, provisioning_state=None, name=None, etag=None):
super(VirtualNetworkPeering, self).__init__(id=id)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
"""
roidb
basic format [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
extended ['image', 'max_classes', 'max_overlaps', 'bbox_targets']
"""
from __future__ import print_function
import cv2
import numpy as np
from bbox_regression import compute_bbox_regression_targets
from rcnn.config import config
def prepare_roidb(imdb, roidb):
"""
add image path, max_classes, max_overlaps to roidb
:param imdb: image database, provide path
:param roidb: roidb
:return: None
""" |
p | rint('prepare roidb')
for i in range(len(roidb)): # image_index
roidb[i]['image'] = imdb.image_path_from_index(imdb.image_set_index[i])
if config.TRAIN.ASPECT_GROUPING:
size = cv2.imread(roidb[i]['image']).shape
roidb[i]['height'] = size[0]
roidb[i]['width'] = size[1]
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
max_overlaps = gt_overlaps.max(axis=1)
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_overlaps'] = max_overlaps
roidb[i]['max_classes'] = max_classes
# background roi => background class
zero_indexes = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_indexes] == 0)
# foreground roi => foreground class
nonzero_indexes = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_indexes] != 0)
def add_bbox_regression_targets(roidb):
"""
given roidb, add ['bbox_targets'] and normalize bounding box regression targets
:param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
:return: means, std variances of targets
"""
print('add bounding box regression targets')
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
num_images = len(roidb)
num_classes = roidb[0]['gt_overlaps'].shape[1]
for im_i in range(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = compute_bbox_regression_targets(rois, max_overlaps, max_classes)
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
# use fixed / precomputed means and stds instead of empirical values
means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (num_classes, 1))
stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (num_classes, 1))
else:
# compute mean, std values
class_counts = np.zeros((num_classes, 1)) + config.EPS
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
if cls_indexes.size > 0:
class_counts[cls] += cls_indexes.size
sums[cls, :] += targets[cls_indexes, 1:].sum(axis=0)
squared_sums[cls, :] += (targets[cls_indexes, 1:] ** 2).sum(axis=0)
means = sums / class_counts
# var(x) = E(x^2) - E(x)^2
stds = np.sqrt(squared_sums / class_counts - means ** 2)
# normalized targets
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] /= stds[cls, :]
return means.ravel(), stds.ravel()
|
ack')
elif synced_xml_head[0].getElementsByTagName('Video'):
synced_xml_items = synced_xml_head[0].getElementsByTagName('Video')
for synced_session_data in synced_xml_items:
if helpers.get_xml_attr(synced_session_data, 'ratingKey') == rating_key:
break
# Figure out which version is being played
if sync_id and synced_session_data:
media_info_all = synced_session_data.getElementsByTagName('Media')
else:
media_info_all = session.getElementsByTagName('Media')
stream_media_info = next((m for m in media_info_all if helpers.get_xml_attr(m, 'selected') == '1'), media_info_all[0])
part_info_all = stream_media_info.getElementsByTagName('Part')
stream_media_parts_info = next((p for p in part_info_all if helpers.get_xml_attr(p, 'selected') == '1'), part_info_all[0])
# Get the stream details
video_stream_info = audio_stream_info = subtitle_stream_info = None
for stream in stream_media_parts_info.getElementsByTagName('Stream'):
if helpers.get_xml_attr(stream, 'streamType') == '1':
video_stream_info = stream
elif helpers.get_xml_attr(stream, 'streamType') == '2':
audio_stream_info = stream
elif helpers.get_xml_attr(stream, 'streamType') == '3':
subtitle_stream_info = stream
video_id = audio_id = subtitle_id = None
if video_stream_info:
video_id = helpers.get_xml_attr(video_stream_info, 'id')
video_details = {'stream_video_bitrate': helpers.get_xml_attr(video_stream_info, 'bitrate'),
'stream_video_bit_depth': helpers.get_xml_attr(video_stream_info, 'bitDepth'),
'stream_video_codec_level': helpers.get_xml_attr(video_stream_info, 'level'),
'stream_video_ref_frames': helpers.get_xml_attr(video_stream_info, 'refFrames'),
'stream_video_language': helpers.get_xml_attr(video_stream_info, 'language'),
'stream_video_language_code': helpers.get_xml_attr(video_stream_info, 'languageCode'),
'stream_video_decision': helpers.get_xml_attr(video_stream_info, 'decision') or 'direct play'
}
else:
video_details = {'stream_video_bitrate': '',
'stream_video_bit_depth': '',
'stream_video_codec_level': '',
'stream_video_ref_frames': '',
'stream_video_language': '',
'stream_video_language_code': '',
'stream_video_decision': ''
}
if audio_stream_info:
audio_id = helpers.get_xml_attr(audio_stream_info, 'id')
audio_details = {'stream_audio_bitrate': helpers.get_xml_attr(audio_stream_info, 'bitrate'),
'stream_audio_bitrate_mode': helpers.get_xml_attr(audio_stream_info, 'bitrateMode'),
'stream_audio_sample_rate': helpers.get_xml_attr(audio_stream_info, 'samplingRate'),
'stream_audio_channel_layout_': helpers.get_xml_attr(audio_stream_info, 'audioChannelLayout'),
'stream_audio_language': helpers.get_xml_attr(audio_stream_info, 'language'),
'stream_audio_language_code': helpers.get_xml_attr(audio_stream_info, 'languageCode'),
'stream_audio_decision': helpers.get_xml_attr(audio_stream_info, 'decision') or 'direct play'
}
else:
audio_details = {'stream_audio_bitrate': '',
'stream_audio_bitrate_mode': '',
'stream_audio_sample_rate': '',
'stream_audio_channel_layout_': '',
'stream_audio_language': '',
'stream_audio_language_code': '',
'stream_audio_decision': ''
}
if subtitle_stream_info:
subtitle_id = helpers.get_xml_attr(subtitle_stream_info, 'id')
subtitle_selected = helpers.get_xml_attr(subtitle_stream_info, 'selected')
subtitle_details = {'stream_subtitle_codec': helpers.get_xml_attr(subtitle_stream_info, 'codec'),
'stream_subtitle_container': hel | pers.get_xml_attr(subtitle_stream_info, 'container'),
'stream_subtitle_format': helpers.get_xml_attr(subtitle_stream_info, 'format'),
'stream_subtitle_forced': int(helpers.get_xml_attr(subtitle_stream_info, 'forced') == '1'),
'stream_subtitle_location': helpers.get_xml_attr(subtitle_stream_info, 'location'),
'stream_subtitle_language': helpers.get_xml_attr(sub | title_stream_info, 'language'),
'stream_subtitle_language_code': helpers.get_xml_attr(subtitle_stream_info, 'languageCode'),
'stream_subtitle_decision': helpers.get_xml_attr(subtitle_stream_info, 'decision')
}
else:
subtitle_selected = None
subtitle_details = {'stream_subtitle_codec': '',
'stream_subtitle_container': '',
'stream_subtitle_format': '',
'stream_subtitle_forced': 0,
'stream_subtitle_location': '',
'stream_subtitle_language': '',
'stream_subtitle_language_code': '',
'stream_subtitle_decision': ''
}
# Get the bif thumbnail
indexes = helpers.get_xml_attr(stream_media_parts_info, 'indexes')
view_offset = helpers.get_xml_attr(session, 'viewOffset')
if indexes == 'sd':
part_id = helpers.get_xml_attr(stream_media_parts_info, 'id')
bif_thumb = '/library/parts/{part_id}/indexes/sd/{view_offset}'.format(part_id=part_id, view_offset=view_offset)
else:
bif_thumb = ''
stream_video_width = helpers.get_xml_attr(stream_media_info, 'width')
if helpers.cast_to_int(stream_video_width) >= 3840:
stream_video_resolution = '4k'
else:
stream_video_resolution = helpers.get_xml_attr(stream_media_info, 'videoResolution').rstrip('p')
stream_audio_channels = helpers.get_xml_attr(stream_media_info, 'audioChannels')
stream_details = {'stream_container': helpers.get_xml_attr(stream_media_info, 'container'),
'stream_bitrate': helpers.get_xml_attr(stream_media_info, 'bitrate'),
'stream_aspect_ratio': helpers.get_xml_attr(stream_media_info, 'aspectRatio'),
'stream_audio_codec': helpers.get_xml_attr(stream_media_info, 'audioCodec'),
'stream_audio_channels': stream_audio_channels,
'stream_audio_channel_layout': audio_details.get('stream_audio_channel_layout_') or common.AUDIO_CHANNELS.get(stream_audio_channels, stream_audio_channels),
'stream_video_codec': helpers.get_xml_attr(stream_media_info, 'videoCodec'),
'stream_video_framerate': helpers.get_xml_attr(stream_media_info, 'videoFrameRate'),
'stream_video_resolution': stream_video_resolution,
'stream_video_height': helpers.get_xml_attr(stream_media_info, 'height'),
'stream_video_width': helpers.get_xml_attr(stream_media_info, 'width'),
'stream_duration': helpers.get_xml_attr(stream_media_info, 'duration') or helpers.get_xml_attr(session, 'duration'),
|
"""Container overriding example."""
from depe | ndency_injector import containers, providers
class Service:
...
class ServiceStub:
...
class Container(containers.DeclarativeContainer):
service = pro | viders.Factory(Service)
class OverridingContainer(containers.DeclarativeContainer):
service = providers.Factory(ServiceStub)
if __name__ == "__main__":
container = Container()
overriding_container = OverridingContainer()
container.override(overriding_container)
service = container.service()
assert isinstance(service, ServiceStub)
|
from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
print s.createRegistrationToken()
print s.subcribe()
print "creating | endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, r | eceiver)
else:
print "From %s" % sender
print message
|
# swift_build_support/products/llvm.py --------------------------*- python -*-
# |
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------- | -----------------------------------------------------------------
from . import cmark
from . import product
from ..cmake import CMakeOptions
class LLVM(product.Product):
def __init__(self, args, toolchain, source_dir, build_dir):
product.Product.__init__(self, args, toolchain, source_dir,
build_dir)
# Add the cmake option for enabling or disabling assertions.
self.cmake_options.define(
'LLVM_ENABLE_ASSERTIONS:BOOL', args.llvm_assertions)
# Add the cmake option for LLVM_TARGETS_TO_BUILD.
self.cmake_options.define(
'LLVM_TARGETS_TO_BUILD', args.llvm_targets_to_build)
# Add the cmake options for vendors
self.cmake_options.extend(self._compiler_vendor_flags)
# Add the cmake options for compiler version information.
self.cmake_options.extend(self._version_flags)
@classmethod
def is_build_script_impl_product(cls):
"""is_build_script_impl_product -> bool
Whether this product is produced by build-script-impl.
"""
return True
@classmethod
def is_before_build_script_impl_product(cls):
"""is_before_build_script_impl_product -> bool
Whether this product is build before any build-script-impl products.
"""
return False
@property
def _compiler_vendor_flags(self):
if self.args.compiler_vendor == "none":
return []
if self.args.compiler_vendor != "apple":
raise RuntimeError("Unknown compiler vendor?!")
return [
('CLANG_VENDOR', 'Apple'),
('CLANG_VENDOR_UTI', 'com.apple.compilers.llvm.clang'),
# This is safe since we always provide a default.
('PACKAGE_VERSION', str(self.args.clang_user_visible_version))
]
@property
def _version_flags(self):
result = CMakeOptions()
if self.args.clang_compiler_version is not None:
result.define(
'CLANG_REPOSITORY_STRING',
"clang-{}".format(self.args.clang_compiler_version))
return result
@classmethod
def get_dependencies(cls):
return [cmark.CMark]
|
workinfo(self):
netinfo = aem.NetworkInfo()
adapterIds = netinfo.getAdapterIds()
self.assertNotEquals(None, adapterIds)
self.assertNotEquals(0, len(adapterIds))
adapterId = adapterIds[0]
self.assertNotEquals(None, aem.getMacAddress(adapterId))
self.assertNotEquals(None, netinfo.getNetworkReadBytes())
self.assertNotEquals(None, netinfo.getNetworkWriteBytes())
self.assertNotEquals(None, netinfo.getNetworkPacketRetransmitted())
def test_hwchangeinfo(self):
netinfo = aem.NetworkInfo()
testHwInfoFile = "/tmp/HwInfo"
aem.HwInfoFile = testHwInfoFile
if os.path.isfile(testHwInfoFile):
os.remove(testHwInfoFile)
hwChangeInfo = aem.HardwareChangeInfo(netinfo)
self.assertNotEquals(None, hwChangeInfo.getLastHardwareChange())
self.assertTrue(os.path.isfile, aem.HwInfoFile)
#No hardware change
lastChange = hwChangeInfo.getLastHardwareChange()
hwChangeInfo = aem.HardwareChangeInfo(netinfo)
self.assertEquals(lastChange, hwChangeInfo.getLastHardwareChange())
#Create mock hardware
waagent.SetFileContents(testHwInfoFile, ("0\nma-ca-sa-ds-02"))
hwChangeInfo = aem.HardwareChangeInfo(netinfo)
self.assertNotEquals(None, hwChangeInfo.getLastHardwareChange())
def test_linux_metric(self):
config = self.test_config()
metric = aem.LinuxMetric(config)
self.validate_cnm_metric(metric)
#Metric for CPU, network and memory
def validate_cnm_metric(self, metric):
self.assertNotEquals(None, metric.getCurrHwFrequency())
self.assertNotEquals(None, metric.getMaxHwFrequency())
self.assertNotEquals(None, metric.getCurrVMProcessingPower())
self.assertNotEquals(None, metric.getGuaranteedMemAssigned())
self.assertNotEquals(None, metric.getMaxVMProcessingPower())
self.assertNotEquals(None, metric.getNumOfCoresPerCPU())
self.assertNotEquals(None, metric.getNumOfThreadsPerCore())
self.assertNotEquals(None, metric.getPhysProcessingPowerPerVCPU())
| self.assertNotEquals(None, metric.getProcessorType())
self.assertNotEquals(None, metric.getReferenceComputeUnit())
self.assertNotEquals(None, metric.getVCPUMapping())
self.assertNotEquals(None, metric.getVMProcessingPowerConsumption())
self.assertNotEquals(None, metric.getCurrMemAssigned())
self.assertNotEquals(None, metric.getGuaranteedMemAs | signed())
self.assertNotEquals(None, metric.getMaxMemAssigned())
self.assertNotEquals(None, metric.getVMMemConsumption())
adapterIds = metric.getNetworkAdapterIds()
self.assertNotEquals(None, adapterIds)
self.assertNotEquals(0, len(adapterIds))
adapterId = adapterIds[0]
self.assertNotEquals(None, metric.getNetworkAdapterMapping(adapterId))
self.assertNotEquals(None, metric.getMaxNetworkBandwidth(adapterId))
self.assertNotEquals(None, metric.getMinNetworkBandwidth(adapterId))
self.assertNotEquals(None, metric.getNetworkReadBytes())
self.assertNotEquals(None, metric.getNetworkWriteBytes())
self.assertNotEquals(None, metric.getNetworkPacketRetransmitted())
self.assertNotEquals(None, metric.getLastHardwareChange())
def test_vm_datasource(self):
config = self.test_config()
config.configData["wad.isenabled"] = "0"
dataSource = aem.VMDataSource(config)
counters = dataSource.collect()
self.assertNotEquals(None, counters)
self.assertNotEquals(0, len(counters))
counterNames = [
"Current Hw Frequency",
"Current VM Processing Power",
"Guaranteed VM Processing Power",
"Max Hw Frequency",
"Max. VM Processing Power",
"Number of Cores per CPU",
"Number of Threads per Core",
"Phys. Processing Power per vCPU",
"Processor Type",
"Reference Compute Unit",
"vCPU Mapping",
"VM Processing Power Consumption",
"Current Memory assigned",
"Guaranteed Memory assigned",
"Max Memory assigned",
"VM Memory Consumption",
"Adapter Id",
"Mapping",
"Maximum Network Bandwidth",
"Minimum Network Bandwidth",
"Network Read Bytes",
"Network Write Bytes",
"Packets Retransmitted"
]
#print "\n".join(map(lambda c: str(c), counters))
for name in counterNames:
#print name
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertNotEquals(None, counter.value)
def test_storagemetric(self):
metrics = mock_getStorageMetrics()
self.assertNotEquals(None, metrics)
stat = aem.AzureStorageStat(metrics)
self.assertNotEquals(None, stat.getReadBytes())
self.assertNotEquals(None, stat.getReadOps())
self.assertNotEquals(None, stat.getReadOpE2ELatency())
self.assertNotEquals(None, stat.getReadOpServerLatency())
self.assertNotEquals(None, stat.getReadOpThroughput())
self.assertNotEquals(None, stat.getWriteBytes())
self.assertNotEquals(None, stat.getWriteOps())
self.assertNotEquals(None, stat.getWriteOpE2ELatency())
self.assertNotEquals(None, stat.getWriteOpServerLatency())
self.assertNotEquals(None, stat.getWriteOpThroughput())
def test_disk_info(self):
config = self.test_config()
mapping = aem.DiskInfo(config).getDiskMapping()
self.assertNotEquals(None, mapping)
def test_get_storage_key_range(self):
startKey, endKey = aem.getStorageTableKeyRange()
self.assertNotEquals(None, startKey)
self.assertEquals(13, len(startKey))
self.assertNotEquals(None, endKey)
self.assertEquals(13, len(endKey))
def test_storage_datasource(self):
aem.getStorageMetrics = mock_getStorageMetrics
config = self.test_config()
dataSource = aem.StorageDataSource(config)
counters = dataSource.collect()
self.assertNotEquals(None, counters)
self.assertNotEquals(0, len(counters))
counterNames = [
"Phys. Disc to Storage Mapping",
"Storage ID",
"Storage Read Bytes",
"Storage Read Op Latency E2E msec",
"Storage Read Op Latency Server msec",
"Storage Read Ops",
"Storage Read Throughput E2E MB/sec",
"Storage Write Bytes",
"Storage Write Op Latency E2E msec",
"Storage Write Op Latency Server msec",
"Storage Write Ops",
"Storage Write Throughput E2E MB/sec"
]
#print "\n".join(map(lambda c: str(c), counters))
for name in counterNames:
#print name
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertNotEquals(None, counter.value)
def test_writer(self):
testEventFile = "/tmp/Event"
if os.path.isfile(testEventFile):
os.remove(testEventFile)
writer = aem.PerfCounterWriter()
counters = [aem.PerfCounter(counterType = 0,
category = "test",
name = "test",
value = "test",
unit = "test")]
writer.write(counters, eventFile = testEventFile)
with open(testEventFile) as F:
content = F.read()
self.assertEquals(str(counters[0]), content)
testEventFile = "/dev/console"
print "=============================="
print "The warning below is expected."
self.assertRaises(IOError, writer.write, counters, 2, testEventFile)
print "=============================="
def test_easyHash(self):
hashVal = aem.easyHash('a')
self.a |
# -*- coding: utf-8 | -*-
# Generated by Django 1.10.2 on 2016-12-12 07:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mords_api', '0021_learningword'),
]
operations = [
migrations.RenameField(
model_name='learningword',
old_name='updated_date',
new_name=' | update_date',
),
]
|
import os
try:
from nltk.corpus import stopwords
stopset = set(stopwords.words('english'))
except ImportError:
print("Cannot use dbpedia without 'pip install nltk'")
try:
import ujson as json
except ImportError:
import json
def generate_testables(words, stopword_set, n_grams=4):
grams = set()
n = len(words)
for i in range(len(words)):
for j in range(n_grams):
if n - j > i:
ws = words[i: i + j + 1]
if any(['NN' not in x[1] for x in ws]):
continue
word_list = [x[0].lower() for x in ws]
if any([len(x) < 3 for x in word_list]):
continue
if set(word_list) & stopword_set:
continue
grams.add((" ".join([x[1] for x in ws]), " ".join(word_list)))
return grams
def get_dbpedia_from_words(pos_tags, db_dict, ok_entities=None):
if ok_entities is None:
ok_entities = ['Person', 'Organisation']
ws = generate_testables(pos_tags, stopset)
classes = []
| for x in ws:
if x[1] in db_dict:
for y in db_dict[x[1]]:
if y in ok_entities:
classes.append(('db_' + y + '_' + x[0], x))
break
retu | rn classes
def load_dbpedia():
# looks for 'dbpedia.json'; sky/sky/dbpedia.json
with open(os.path.join(os.path.dirname(__file__), 'dbpedia.json')) as f:
return json.load(f)
|
# MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import tensorflow as tf
import tensorflow_models as tf_models
from tensorflow_models.trainers import BaseTrainer
class Trainer(BaseTrainer):
def finalize_hook(self):
print('Done training for {} epochs'.format(self.epoch()))
def learning_hooks(self):
train_op = tf_models.get_inference('elbo')
#train_loss_op = tf_models.get_loss('train/elbo_discrete')
#test_loss_op = tf_models.get_loss('test/elbo_discrete')
train_loss_op = tf_models.get_loss('train/elbo')
test_loss_op = tf_models.get_loss('test/elbo')
x_train = tf_models.train_placeholder()
x_test = tf_models.test_placeholder()
next_train_batch = self._train_batch
next_test_batch = self._test_batch
def train(count_steps):
total_elbo = 0.
for idx in range(count_steps):
X_mb = next_train_batch()
_, this_elbo = self.sess.run([train_op, train_loss_op], feed_dict={x_train: X_mb})
total_elbo += this_elbo
return total_elbo / count_steps
def test():
total_loss = 0.
for idx in range(self.test_batches):
X_mb = next_test_batch()
this_loss = self.sess.run(test_loss_op, feed_dict={x_test: X_mb})
total_loss += this_loss
return total_loss / self.test_batches
return train, test
def initialize_hook(self):
# See where the test loss starts
if self._settings['resume_from'] is None:
| # Do a test evaluation before any training happens
test_loss = self.test()
self.results['costs_test'] += [test_loss]
else:
test_loss = self.results['costs_test'][-1]
print('epoch {:.3f}, test loss = {:.2f}'.for | mat(self.epoch(), test_loss))
def step_hook(self):
with tf_models.timer.Timer() as train_timer:
train_loss = self.train(self._batches_per_step)
test_loss = self.test()
self.results['times_train'] += [train_timer.interval]
self.results['costs_train'] += [train_loss]
self.results['costs_test'] += [test_loss]
def before_step_hook(self):
pass
def after_step_hook(self):
train_time = self.results['times_train'][-1]
train_loss = self.results['costs_train'][-1]
test_loss = self.results['costs_test'][-1]
examples_per_sec = self._settings['batch_size'] * self._batches_per_step / train_time
sec_per_batch = train_time / self._batches_per_step
print('epoch {:.3f}, train loss = {:.2f}, test loss = {:.2f} ({:.1f} examples/sec)'.format(self.epoch(), train_loss, test_loss, examples_per_sec))
def initialize_results_hook(self):
results = {}
results['costs_train'] = []
results['times_train'] = []
results['costs_test'] = []
return results
|
import | json_handler
class conf_handler:
__m_conf_path = Non | e
__m_conf = None
def __init__( self, conf_base = "../conf/", conf_name = "configuration.conf" ):
self.__m_conf_path = conf_base + conf_name
self.__m_conf = json_handler.json_handler(self.__m_conf_path)
def read_conf( self, field_name ):
if(self.__m_conf == None):
return None
try:
conf_data = self.__m_conf.object_search(field_name)
except:
return None
return conf_data
|
"" | "TruTeq transport."""
from vumi.transports.truteq.truteq import TruteqTransport
__all__ = ['TruteqTransport'] | |
# print('logout successful')
# else:
# print('logout unsuccessful')
# return 1
class xnat_query_subjects(object):
"""get the subject ids from xnat"""
def __init__(self,cookie,url_base,project):
self.cookie=cookie
self.url_base=url_base
self.project=project
def get_subjects(self):
subject_query = requests.get(self.url_base+'subjects', cookies=self.cookie)
if subject_query.ok:
subject_json = subject_query.json()
subject_list_dict = subject_json['ResultSet']['Result']
self.subject_ids = { x['label']:0 for x in subject_list_dict }
def filter_subjects(self,subjects):
import re
#catch and remove subjects with characters in the name
if subjects != "ALL": #if the subject list specifies who to download
missing_xnat_subjects = list(set(subjects) - set([int(x) for x in self.subject_ids.keys()]))
if missing_xnat_subjects:
self.filt_subject_ids = dict.fromkeys(list(set(subjects) - set(missing_xnat_subjects)))
print('xnat does not have data for these subjects: %s' % str(missing_xnat_subjects))
else:
self.filt_subject_ids = dict.fromkeys(subjects)
else:
self.filt_subject_ids = dict.fromkeys([int(x) for x in self.subject_ids.keys()]) #use all the subjects otherwise
class xnat_query_sessions(object):
"""get the sessions from a particular subject"""
def __init__(self,cookie,url_base,project,subject):
self.cookie=cookie
self.url_base=url_base
self.subject=subject
self.project=project
def get_sessions(self,session_labels=None):
import re
session_query = requests.get(self.url_base+'subjects/%s/experiments' % (self.subject), cookies=self.cookie)
if session_query.ok:
session_json = session_query.json()
session_list_dict = session_json['ResultSet']['Result']
#sort the session list (fix issues where they are uploaded in the wrong order)
session_list = [session['label'] for session in session_list_dict]
date_list = [session['date'] for session in session_list_dict]
session_list_comp = [re.sub('_[0-9]', '', session) for session in session_list]
date_list_comp = [session.replace('-','') for session in date_list]
print(str(session_list_comp))
print(str(date_list_comp))
if session_list_comp == date_list_comp:
print('date check passed')
else:
print('mismatch between label and date, exiting')
self.session_ids = False
return 1
session_list.sort()
if session_labels is not None:
num_sessions = int(session_json['ResultSet']['totalRecords'])
num_labels = len(session_labels)
if num_sessions != num_labels:
print('%s has the wrong number of sessions, expected: %s, found: %s' % (self.subject,str(num_labels),str(num_sessions)))
print('getting session info for available sessions (assuming they are in the correct order)')
self.session_ids = { sess_label : {session: 0} for sess_label,session in zip(session_labels[0:num_sessions],session_list) }
else:
self.session_ids = { sess_label : {session: 0} for sess_label,session in zip(session_labels,session_list) }
else:
#not supported in this script
self.session_ids = { x['label']: 0 for x in session_list_dict }
def filter_sessions(self,sessions):
#updates the session_ids dictionary
if sessions != "ALL":
#find all session that are not a part of the list
pop_list=list(set(self.session_ids.keys()) - set(sessions))
for key in pop_list:
self.session_ids.pop(key) #remove session from analysis
class xnat_query_scans(object):
"""get the scans from a particular session"""
def __init__(self,cookie,url_base,project,subject, | session):
self.cookie=cookie
self.url_base=url_base
self.subject=subject
self.session=session
self.project=project
def get_scans(self):
scan_query = requests.get(self.url_base+'subjects/%s/experiments/%s/scans/' % (self.subject,self.session), cookies=self.cookie)
if scan_query.ok:
scan_json = scan_query.json()
| scan_list_dict = scan_json['ResultSet']['Result']
self.scan_ids = { x['ID']:[{str(x['series_description']) },x['quality']] for x in scan_list_dict }
#ID is a number like 1,3,300
#type is a name like fMRI FLANKER, PU:Sag CUBE FLAIR, represented as a set?
#^use series_description instead of type to differentiate multiple
#scans as the same type (e.g. DTI 64 dir versus DTI extra B0)
#quality specifies if the scan is usable
class xnat_query_dicoms(object):
"""get the dicoms from a particular scan"""
def __init__(self,cookie,url_base,project,subject,session,scan):
self.cookie=cookie
self.url_base=url_base
self.subject=subject
self.session=session
self.scan=scan
def get_dicoms(self,out_dir):
#http://stackoverflow.com/questions/4917284/extract-files-from-zip-without-keeping-the-structure-using-python-zipfile
import zipfile
from io import BytesIO
import shutil
dicom_query = requests.get(self.url_base+'subjects/%s/experiments/%s/scans/%s/resources/DICOM/files?format=zip' % (self.subject,self.session,self.scan), cookies=self.cookie)
if dicom_query.ok:
dicom_zip = zipfile.ZipFile(BytesIO(dicom_query.content))
for member in dicom_zip.namelist():
filename = os.path.basename(member)
if not filename:
continue
source = dicom_zip.open(member)
target = open(os.path.join(out_dir,filename), "wb")
with source, target:
shutil.copyfileobj(source, target)
class subject_variables_dictionary(object):
def __init__(self,sub_vars):
self.sub_dict = {}
with open(sub_vars) as sub_file:
for line in sub_file:
#mac os specific
sub_entry = line.strip('\n').split(',')
self.sub_dict[sub_entry[0]] = sub_entry[1:]
def get_bids_var(self,sub_num):
#assume the sub_num is not zero-padded
#assume the entries are not zero-padded
return "".join(self.sub_dict[sub_num])
def parse_cmdline(args):
"""Parse command line arguments."""
import argparse
parser = argparse.ArgumentParser(
description=(
'download_xnat.py downloads xnat dicoms and saves them in BIDs compatible directory format'))
#Required arguments
requiredargs = parser.add_argument_group('Required arguments')
requiredargs.add_argument('-i','--input_json',
dest='input_json',required=True,
help='json file defining inputs for this script.')
parsed_args = parser.parse_args(args)
return parsed_args
def parse_json(json_file):
"""Parse json file."""
import json
with open(json_file) as json_input:
input_dict = json.load(json_input)
mandatory_keys = ['username','scan_dict','dcm_dir','sessions','session_labels','project','subjects','scans']
optional_keys = ['subject_variables_csv','zero_pad','nii_dir']
total_keys = mandatory_keys+optional_keys
print("total_keys: "+str(total_keys))
#are there any inputs in the json_file that are not supported?
extra_inputs = list(set(input_dict.keys()) - set(total_keys))
if extra_inputs:
print('option(s) not supported: %s' % str(extra_inputs))
#are there missing mandatory inputs?
missing_inputs = list(set(mandatory_keys) - set(input_dict.keys()))
if m |
# -*- coding: utf-8 -*-
'''
Created on 2014-2-17
@author: CL. | lam
'''
from sqlalchemy.sql.expression import and_
from rpac.widgets.components import RPACForm, RPACText, RPACCalendarPicker, \
RPACSelect
from rpac.model import ORDER_NEW, ORDER_INPROCESS, ORDER_COMPLETE, qry, \
PrintShop, ORDER_CANCEL
from rpac.model.orderin | g import ORDER_MANUAL
__all__ = ['order_search_form', ]
def getPrintShop():
return [("", ""), ] + [(unicode(p.id), unicode(p)) for p in qry(PrintShop).filter(and_(PrintShop.active == 0)).order_by(PrintShop.name).all()]
class OrderSearchForm(RPACForm):
fields = [
RPACText("no", label_text = "Job No"),
RPACText("customerpo", label_text = "Family Dollar PO#"),
RPACText("vendorpo", label_text = "Vendor PO"),
RPACCalendarPicker("create_time_from", label_text = "Create Date(from)"),
RPACCalendarPicker("create_time_to", label_text = "Create Date(to)"),
RPACSelect("status", label_text = "Status", options = [("", ""), (str(ORDER_NEW), "New"),
(str(ORDER_INPROCESS), "In Process"),
(str(ORDER_COMPLETE), "Completed"),
(str(ORDER_CANCEL), "Canelled"),
(str(ORDER_MANUAL), "Manual"),
]),
RPACSelect("printShopId", label_text = "Print Shop", options = getPrintShop),
]
order_search_form = OrderSearchForm()
|
# coding=utf-8
"""
Emulate a gmetric client for usage with
[Ganglia Monitoring System](http://ganglia.sourceforge.net/)
"""
from Handler import Handler
import logging
try:
import gmetric
except ImportError:
gmetric = None
class GmetricHandler(Handler):
"""
Implements the abstract Handler class, sending data the same way that
gmetric does.
"""
def __init__(self, config=None):
"""
Create a new instance of the GmetricHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
if gmetric is None:
logging.error("Failed to load gmetric module")
return
# Initialize Data
self.socket = None
# Initialize Options
self.host = self.config['host']
self.port = int(self.config['port'])
self.protocol = self.config['protocol']
if not self.protocol:
self.protocol = 'udp'
# Initialize
self.gmetric = gmetric.Gmetric(self.host, self.port, self.protocol)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GmetricHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port | ': 'Port',
'protocol': 'udp or tcp',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
c | onfig = super(GmetricHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8651,
'protocol': 'udp',
})
return config
def __del__(self):
"""
Destroy instance of the GmetricHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to a gmond instance
"""
# Just send the data as a string
self._send(metric)
def _send(self, metric):
"""
Send data to gmond.
"""
metric_name = self.get_name_from_path(metric.path)
tmax = "60"
dmax = "0"
slope = "both"
# FIXME: Badness, shouldn't *assume* double type
metric_type = "double"
units = ""
group = ""
self.gmetric.send(metric_name,
metric.value,
metric_type,
units,
slope,
tmax,
dmax,
group)
def _close(self):
"""
Close the connection
"""
self.gmetric = None
|
tem()
item.setText(asset['asset'])
item.asset = asset
self.asset.addItem(item)
def update_form(self):
self.asset.clear()
assets = lib.get_assets(lib.session['project'])
for _, asset in sorted(assets.items()):
self.add_asset(asset)
def update_preview(self):
state = self.state()
if not state['asset']:
self.preview.setText('Select an asset...')
return
name = state['asset']['asset']
if state['suffix']:
name += '_' + state['suffix']
next_publish = lib.get_next_publish(state['asset'], name)
self.preview.setText(next_publish['basename'])
def export(self):
# TODO: move to controller
state = self.state()
# Update export attribute settings
# These are used by CustomAttributesSet
lib.set_export_attrs(state['attrs'])
lib.set_export_attr_prefixes(state['attr_prefixes'])
if not state['asset']:
self.preview.setText('Select an asset...')
return
if state['selection']:
ss = api.gather_hierarchy(render_layers=state['render_layers'])
else:
ss = api.gather(
selection=state['selection'],
render_layers=state['render_layers'],
)
name = state['asset']['asset']
if state['suffix']:
name += '_' + state['suffix']
next_publish = lib.get_next_publish(state['asset'], name)
ss.export(
outdir=next_publish['dirname'],
name=next_publish['basename'].rsplit('.', 1)[0],
)
class ImportForm(QtWidgets.QWidget):
def __init__(self, parent=None):
super(ImportForm, self).__init__(parent=parent)
self.project = QtWidgets.QComboBox()
self.project.setSizeAdjustPolicy(
self.project.AdjustToMinimumContentsLengthWithIcon
)
self.asset = QtWidgets.QListWidget()
self.shadeset = QtWidgets.QListWidget()
self.selection = QtWidgets.QCheckBox('Selected &Hierarchies')
self.selection.setChecked(True)
self.render_layers = QtWidgets.QCheckBox('&Render Layers')
self.apply_button = QtWidgets.QPushButton('&Apply Shadeset')
options = QtWidgets.QGroupBox()
options_layout = QtWidgets.QVBoxLayout()
options_layout.addWidget(self.selection)
options_layout.addWidget(self.render_layers)
options.setLayout(options_layout)
self.layout = QtWidgets.QGridLayout()
self.layout.setContentsMargins(20, 20, 20, 20)
self.layout.setHorizontalSpacing(20)
self.layout.setRowStretch(3, 1)
self.layout.addWidget(QtWidgets.QLabel('Project'), 0, 0)
self.layout.addWidget(self.project, 1, 0)
self.layout.addWidget(QtWidgets.QLabel('Asset'), 2, 0)
self.layout.addWidget(self.asset, 3, 0)
self.layout.addWidget(QtWidgets.QLabel('ShadeSet'), 2, 1)
self.layout.addWidget(self.shadeset, 3, 1)
self.layout.addWidget(options, 4, 1)
self.layout.addWidget(self.apply_button, 5, 1)
self.setLayout(self.layout)
self.apply_button.clicked.connect(self.apply)
self.project.activated.connect(self.on_project_changed)
self.asset.currentItemChanged.connect(self.on_asset_changed)
self._projects = None
self.update_form()
def state(self):
# Get selected project
project = self.project.currentText()
# Get selected asset
asset_item = self.asset.currentItem()
asset = None
if asset_item:
asset = asset_item.asset
# Get selected shadeset
shadeset_item = self.shadeset.currentItem()
shadeset = None
if shadeset_item:
shadeset = shadeset_item.publish
return dict(
project=project,
asset=asset,
shadeset=shadeset,
selection=self.selection.isChecked(),
render_layers=self.render_layers.isChecked(),
)
def update_form(self):
self.project.blockSignals(True)
self.project.clear()
for project in sorted(lib.get_projects()):
self.project.addItem(project)
self.project.blockSignals(False)
if lib.session['project']:
index = self.project.findText(lib.session['project'])
if index:
self.project.setCurrentIndex(index)
self.update_asset_widget()
def add_asset(self, asset):
item = QtWidgets.QListWidgetItem()
item.setText(asset['asset'])
item.asset = asset
self.asset.addItem(item)
def on_project_changed(self):
project = self.project.currentText()
lib.set_project(project)
self.update_asset_widget()
def update_asset_widget(self):
self.asset.clear()
assets = lib.get_assets(lib.session['project'])
for _, asset in sorted(assets.items()):
self.add_asset(asset)
def on_asset_changed(self):
self.update_shadeset_widget()
def add_shadeset(self, publish):
item = QtWidgets.QListWidgetItem()
item.setText(publish['basename'].rsplit('.', 1)[0])
item.publish = publish
self.shadeset.addItem(item)
def update_shadeset_widget(self):
self.shadeset.clear()
state = self.state()
if state['asset']:
publishes = lib.get_publishes(state['asset'])
for name, versions in sorted(publishes.items()):
for version, publish in sorted(versions.items()):
self.add_shadeset(publish)
def apply(self):
# TODO: Move to controller
from maya import cmds
state = self.state()
publish = state['shadeset']
layer = cmds.editRenderLayerGlobals(q=True, crl=True)
if not layer == 'defaultRenderLayer':
# TODO log an error and return instead
raise Exception('You must be in the masterLayer to apply '
'a shadeset.')
reference_shadeset = True
pattern = publish['path'].split('.')[0] + '.*_shadingGroups.mb'
shaders_path = publish['path'].replace('.yml', '_shadingGroups.mb')
norm_path = os.path.normpath(shaders_path)
file_name = os.path.basename(norm_path)
for ref in cmds.ls(references=True):
# Ignore loose reference nodes
try:
ref_path = cmds.referenceQuery(ref, filename=True)
ref_path = ref_path.split('{')[0]
except RuntimeError as e:
if "not associated with a reference file" in str(e):
continue
raise
ref_name = os.path.basename(ref_path)
if norm_path == os.path.normpath(ref_path):
response = QtWidgets.QMessageBox.question(
self,
'Reapply shadeset...',
'Reapply {} ?'.format(file_name),
QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No,
| )
if response == QtWidgets.QMessageBox.No:
return
sel = cmds.ls(s | l=True, long=True)
cmds.file(ref_path, loadReference=ref)
cmds.select(sel, replace=True)
reference_shadeset = False
elif fnmatch(ref_path, pattern):
response = QtWidgets.QMessageBox.question(
self,
'Replace shadeset...',
'Replace {} with {}?'.format(ref_name, file_name),
QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No,
)
if response == QtWidgets.QMessageBox.No:
return
sel = cmds.ls(sl=True, long=True)
utils.update_reference(
ref_node=ref,
in_file=shaders_path,
namespace='sg'
)
cmds.select(sel, replace=True) |
#here we can use wrapper to accerlate the whole process, since many text may be same, we can save the intermediate results
import operator
from math import log10
import re
import string
import random
import heapq
"""First Part:
Word Segmentation"""
def memory(f):
#memorize function f
table = {}
def fmemo(*args):
if args not in table:
table[args] = f(*args)
return table[args]
return fmemo
#this memory procee is really important which makes the time from 2^n ->n^2*L
@memory
def Segment(text):
#return a list of words that is the best segmentation of the text"
#recursive implementation
if not text: return []
candidates = ([first]+Segment(remind) for (first,remind) in Split(text))
#TODO: actually we can store the Probabilty of each best Segment. there is no need to compute it again
return max(candidates,key=bPwords) #key specifies a one-argument ordering function
#L parameter is 20 in default ,this method returns a list of all possible (frist,rem) pairs, len(first)<=L
def Split(text,L=20):
return [(text[:i+1],text[i+1:]) for i in range(min(len(text),L))]
# The Naive Bayes probabilities of a sequence of words
def Pwords(words):
return Product([_P(word) for word in words])
def Product(nums):
return reduce(operator.mul,nums)
#P(word) == count(word)/N since the GOOGLE N is corpus size. and note that nearly most common 1/3 of a million words covers 98% of all tokens
#so we can use only this part of words, and we can eliminate those numbers and punctuations.
def constantWord(word,N):
return 1./N
def avoidLongWord(word,N):
return 10./(N*10**len(word))
class Pdict(dict):
#probability distribution of words estimated from the counts in datafile
def __init__(self,data,N=None,missing=constantWord):
for key,count in data:
self[key] = self.get(key,0) + int(count)
self.N = float(N or sum(self.itervalues()))
self.missing = missing
def __call__(self,key):
if key in self: return float(self[key]/self.N)
else: return self.missing(key,self.N)
def Datafile(name,sep='\t'):
for line in file(name):
yield line.split(sep)
_N = 1024908267229 #Number of tokens
_P = Pdict(Datafile('vocab.txt'),_N,avoidLongWord)
###biagram
##model P(W1:n) = TTk=1:nP(Wk|Wk-1)
def Pwords2(word,pre):
words = pre+' '+word
if words not in _P2:
return _P(word)
else: return _P2(pre+' '+word)/float(_P(pre))
_P2 = Pdict(Datafile('count_2w.txt'),_N)
@memory
def Segment2(text,pre="<S>"):
#return (log(P(words)),words) where words is the best segment
if not text: return (0.0,[])
candidates= [combine(log10(Pwords2(first,pre)),first,Segment2(remind,first)) for first,remind in Split(text)]
return max(candidates)
def combine(Pfirst, first, (Prem,rem)):
return (Pfirst+Prem,[first]+rem)
"""Second Part:
Secret Code"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def Encode(msg,key):
#encode string with the substitution key
return msg.translate(string.maketrans(ul(alphabet),ul(key)))
def ul(text): return text.upper()+text.lower()
def Encode_Shift(msg,n=10):
#encode string with a shift(caesar) cipher
return Encode(msg,alphabet[n:]+alphabet[:n])
#we can use the technique as above use a logPwords to decode without knowing the key
def logPwords(words):
if isinstance(words,str): words=getAllWords(words)
return sum(log10(_P(word)) for word in words)
def getAllWords(words):
#return a list of words in string lowercase,use pattern compare
return re.findall("[a-z]+",words.lower())
def Decode_Shift(msg):
candidates = [Encode_Shift(msg,n) for n in range(len(alphabet))]
return max(candidates,key=logPwords)
#note that above way is too easy
#here we want to substitute using a general cipher,in which any letter can be substitued for any other letter
"""
step1: given a encoded msg,split them into lowercase only words, and combine these words(remove those numbers and punctuations)
step2: from a random x, use local search to get to the local minimum, design the cost function
step3: repeat step2
"""
#use letter n-grams model
P3l = Pdict(Datafile("count_3l.txt"))
P2l = Pdict(Datafile("count_2l.txt"))
def localsearch(x,f,neighbors,steps=10000):
#local search to get a x that maximizes function cost f
fx = f(x)
neighborhood = iter(neighbors(x))
for i in range(steps):
#print i,fx
x2 = neighborhood.next()
fx2 = f(x2)
if fx2 > fx:
x,fx = x2,fx2
neighborhood = iter(neighbors(x))
print x
return x
_cat ="".join
def Shuffle(text):
text = list(text)
random.shuffle(text)
return text
def DecodeGeneral(msg,step=4000,re | starts=20):
#decode a general cipher string by using | local search
msg = cat(getAllWords(msg)) #just keep words of alphabet,lowercase
print msg
candidates= [localsearch(Encode(msg,key=cat(Shuffle(alphabet))),logP3letters,getNeighbors,step) for i in range(restarts)]
(p,words) = max(Segment2(text) for text in candidates)
return ' '.join(words)
def getNeighbors(msg):
#generate nearby strings
def swap(a,b):
return msg.translate(string.maketrans(a+b,b+a))
for bigram in heapq.nsmallest(20,set(ngrams(msg,2)),P2l):
print bigram
b1,b2=bigram
for c in alphabet:
if b1 == b2:
if P2l(c+c) > P2l(bigram): yield swap(c,b1)
else:
if P2l(c+b2) > P2l(bigram): yield swap(c,b1)
if P2l(b1+c) > P2l(bigram): yield swap(c,b2)
while True:
yield swap(random.choice(alphabet), random.choice(alphabet))
cat = ''.join
"""
Spelling Correction:
Find argmaxcP(c|w) which means type w, c is the candidates find highest probability of c
use bayes rule P(c|w) = P(w|c) +P(c)
P(c) is straightforward
P(w|c) is called error model,we need more data in http://www.dcs.bbk.ac.uk/~ROGER/corpora.html.
the data is not large enough, we can hope to just look up P(w=thaw|c=thew), changes are slim
we do some trick by ignoring the letters that are same, then we get P(w=a|c=e) the probability that a was typed
when the corrector is e
"""
def AllCorrections(text):
#spelling correction for all words in text
return re.sub('[a-zA-Z]+',lambda match:getCorrect(match.group(0)),text)
def getCorrect(word):
#return word that is most likely to be the correct spelling of word
candidates = getEdits(word).items()
c,edit = max(candidates, key=lambda (c,e):Pedit(e)*_P(c))
return c
_Pe=Pdict(Datafile('count_1edit.txt'))
_PSpellError = 1./20
def Pedit(edit):
#the probability of an edit,can be "" or 'a|b' or 'a|b + c|d'
if edit == "": return (1.- _PSpellError)
return _PSpellError*Product(_Pe(e) for e in edit.split("+"))
_Prefix = set(w[:i] for w in _P for i in range(len(w)+1))
#we can optimize it ,we don't need to consider all the edits, since merely of them are in vacabulary,thus we can precomputing all the
#possible prefixes,and split the word into two parts, head and tail, thus the head should be in the prefix
def getEdits(w,dis=2):
#return a dict of {correct:edit} pairs within dis edits of words
res = {}
def editsR(head,tail,d,edits):
def edit(L,R): return edits+[R+'|'+L]
C = head+tail
if C in _P:
e = '+'.join(edits)
if C not in res: res[C] = e
else: res[C] = max(res[C],e,key=Pedit)
if d<=0: return
extensions = [head+c for c in alphabet if head+c in _Prefix] ##given a head, all possible heads
pre = (head[-1] if head else '<') ## previous character
#########Insertion
for h in extensions:
editsR(h,tail,d-1,edit(pre+h[-1],pre))
if not tail: return
########Deletion
editsR(head,tail[1:],d-1,edit(pre,pre+tail[0]))
for h in extensions:
if h[-1] == tail[0]: ##match
editsR(h,tail[1:],d,edits)
else: ##relacement
editsR(h,tail[1:],d-1,edit(h[-1],tail[0]))
##transpose
editsR('',w,dis,[])
return res
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.lldxf.attributes import DXFAttr, RETURN_DEFAULT
def test_return_default():
attr = DXFAttr(
code=62,
de | fault=12,
validator=lambda x: False,
fixer=RETURN_DEFAULT,
)
assert attr.fixer(7) == 12
attr2 = DXFAttr(
code=63,
default=13,
validator=lambda x: False,
fixer=RETURN_DEFAULT,
)
assert attr2.fixer(7) == 13
if __name__ == "__main__":
pytest.main([__fil | e__])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Models.FeatureProcessing import *
from keras.models import Sequential
from keras.layers import Activation, Dense, LSTM
from keras.optimizers import Adam, SGD
import numpy as np
import abc
from ClassificationModule import ClassificationModule
class descriptionreponamelstm(ClassificationModule):
"""A basic lstm neural network"""
def __init__(self, num_hidden_layers=3):
ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character")
hidden_size = 300
self.maxlen = 300
# Set output_size
self.output_size = 7 # Hardcoded for 7 classes
model = Sequential()
# Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength())))
for _ in range(num_hidden_layers):
model.add(Dense(hidden_size))
model.add(Dense(self.output_size))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(),
metrics=['accuracy'])
self.model = model
print "\t-", self.name
def resetAllTraining(self):
"""Reset classification module to status before training"""
resetWeights(self.model)
def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True):
"""Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird."""
readme_vec = self.formatInputData(sample)
label_index = getLabelIndex(sample)
label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras
self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose)
def train(self, samples, nb_epoch=200, shuffle=True, verbose=True):
"""Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)"""
train_samples = []
train_lables = []
for sample in samples:
formatted_sample = self.formatInputData(sample)[0].tolist()
train_samples.append(formatted_sample)
train_lables.append(oneHot(getLabelIndex(sample)))
train_lables = np.asarray(train_lables)
train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights())
self.isTrained = True
return train_result
def predictLabel(self, sample):
"""Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifiz | ieren würde"""
if not self.isTrained:
return 0
sample = self.formatInputData(sample)
return np.argmax(self.model.predict(sample))
def predictLabelAndProbability(self, sample):
"""Return the probability the module assignes each label"""
if not self.isTrained:
| return [0, 0, 0, 0, 0, 0, 0, 0]
sample = self.formatInputData(sample)
prediction = self.model.predict(sample)[0]
return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned
def formatInputData(self, sample):
"""Extract description and transform to vector"""
sd = getDescription(sample)
sd += getName(sample)
# Returns numpy array which contains 1 array with features
return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0)
|
from app.models import db
class Page(db.Model):
"""Page model class"""
__tablename__ = 'pages'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
title = db.Column(db.String)
url = db.Column(db.String, nullable=False)
description = db.Column(db.String)
place = db.Column(db.String)
language = db.Column(db.String)
index = db.Column(db.Integer, default=0)
def __init__(self, name=None, title=None, description=None, url=None, place=None, index=None, language=None):
self.name = name
self.description = description
self.title = title
self.url = url
self.place = place
self.language = language
self.index = index
def __repr__(self):
return '<Page %r>' % self.name
def __str__(self):
retur | n unicode(self).encode('utf-8')
def __unicode__(self):
return self.name
@property
def serialize(self):
"""Return object data in easily seria | lizeable format"""
return {
'id': self.id,
'name': self.name,
'description': self.description,
'title': self.title,
'url': self.url,
'place': self.place,
'language': self.language
}
|
"""
Test the integrators.
"""
import os
# Third-party
import pytest
import numpy as np
# Project
from .. import (
LeapfrogIntegrator,
RK5Integrator,
DOPRI853Integrator,
Ruth4Integrator,
)
from gala.tests.optional_deps import HAS_TQDM
# Integrators to test
integrator_list = [
RK5Integrator,
DOPRI853Integrator,
LeapfrogIntegrator,
Ruth4Integrator,
]
# Gradient functions:
def sho_F(t, w, T): # noqa
"""Simple harmonic oscillat | or"""
q, p = w
wdot = np.zeros_like(w)
wdot[0] = p
wdot[1] = -((2 * np.pi / T) ** 2) * q
return wdot
def forced_sho_F(t, w, A, omega_d):
q, p = w
wdot = np.zeros_like(w)
wdot[0] = p
wdot[1] = -np.sin(q) + A * np.cos(omega_d * t)
return wdot
def lorenz_F(t, w, sigma, rho, beta):
x, y, z, *_ = w
wdot = np.zeros_like(w)
wdot[0] = sigma * (y - x)
wdot[1] = x * (rho - z) - y
wdot[2] = x * y - | beta * z
return wdot
def ptmass_F(t, w):
x, y, px, py = w
a = -1.0 / (x * x + y * y) ** 1.5
wdot = np.zeros_like(w)
wdot[0] = px
wdot[1] = py
wdot[2] = x * a
wdot[3] = y * a
return wdot
@pytest.mark.parametrize("Integrator", integrator_list)
def test_sho_forward_backward(Integrator):
integrator = Integrator(sho_F, func_args=(1.0,))
dt = 1e-4
n_steps = 10_000
forw = integrator.run([0.0, 1.0], dt=dt, n_steps=n_steps)
back = integrator.run([0.0, 1.0], dt=-dt, n_steps=n_steps)
assert np.allclose(forw.w()[:, -1], back.w()[:, -1], atol=1e-6)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_point_mass(Integrator):
q0 = np.array([1.0, 0.0])
p0 = np.array([0.0, 1.0])
integrator = Integrator(ptmass_F)
orbit = integrator.run(np.append(q0, p0), t1=0.0, t2=2 * np.pi, n_steps=1e4)
assert np.allclose(orbit.w()[:, 0], orbit.w()[:, -1], atol=1e-6)
@pytest.mark.skipif(not HAS_TQDM, reason="requires tqdm to run this test")
@pytest.mark.parametrize("Integrator", integrator_list)
def test_progress(Integrator):
q0 = np.array([1.0, 0.0])
p0 = np.array([0.0, 1.0])
integrator = Integrator(ptmass_F, progress=True)
_ = integrator.run(np.append(q0, p0), t1=0.0, t2=2 * np.pi, n_steps=1e2)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_point_mass_multiple(Integrator):
w0 = np.array(
[[1.0, 0.0, 0.0, 1.0], [0.8, 0.0, 0.0, 1.1], [2.0, 1.0, -1.0, 1.1]]
).T
integrator = Integrator(ptmass_F)
_ = integrator.run(w0, dt=1e-3, n_steps=1e4)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_driven_pendulum(Integrator):
integrator = Integrator(forced_sho_F, func_args=(0.07, 0.75))
_ = integrator.run([3.0, 0.0], dt=1e-2, n_steps=1e4)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_lorenz(Integrator):
sigma, rho, beta = 10.0, 28.0, 8 / 3.0
integrator = Integrator(lorenz_F, func_args=(sigma, rho, beta))
_ = integrator.run([0.5, 0.5, 0.5, 0, 0, 0], dt=1e-2, n_steps=1e4)
@pytest.mark.parametrize("Integrator", integrator_list)
def test_memmap(tmpdir, Integrator):
dt = 0.1
n_steps = 1000
nw0 = 10000
filename = os.path.join(str(tmpdir), "test_memmap.npy")
mmap = np.memmap(filename, mode="w+", shape=(2, n_steps + 1, nw0))
w0 = np.random.uniform(-1, 1, size=(2, nw0))
integrator = Integrator(sho_F, func_args=(1.0,))
_ = integrator.run(w0, dt=dt, n_steps=n_steps, mmap=mmap)
|
import numpy as np
import cv2
import time
start = time.time()
end = start + 3 #show video for three seconds - I do this to make su | re your stream doesn't get stuffed up by a bad exit. Remove in future.
cap = cv2.VideoCapture(0)
#while time.time() < end: #
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if frame == None:
continue
b,g,r = cv2.split(frame)
b_new = cv2.resize(b,(10,10))
g_new = cv2.resize(g,(10,10))
r_new = cv2.resize(r,(10,10))
out = cv2.merge((b_new,g_new,r_new))
cv2.imshow('frame',out)
if cv2.waitKey(1) & 0xFF == ord('q'):
ca | p.release()
cv2.destroyAllWindows()
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
# _*_ coding:utf-8 _*_
# Filename:ClientUI.py
# Python在线聊天客户端
from socket import *
from ftplib import FTP
import ftplib
import socket
import thread
import time
import sys
import codecs
import os
reload(sys)
sys.setdefaultencoding( "utf-8" )
class ClientMessage():
#设置用户名密码
def setUsrANDPwd(self,usr,pwd):
self.usr=usr
self.pwd=pwd
#设置目标用户
def setToUsr(self,toUsr):
self.toUsr=toUsr
self.ChatFormTitle=toUsr
#设置ip地址和端口号
def setLocalANDPort(self,local,port):
self.local = local
self.port = port
def check_info(self):
self.buffer = 1024
self.ADDR=(self.local,self.port)
self.udpCliSock = socket.socket(AF_INET, SOCK_DGRAM)
self.udpCliSock.sendto('0##'+self.usr+'##'+self.pwd,self.ADDR)
self.serverMsg ,self.ADDR = self.udpCliSock.recvfrom(self.buffer)
s=self.serverMsg.split('##')
if s[0]=='Y':
return True
elif s[0]== 'N':
return False
#接收消息
def receiveMessage(self):
self.buffer = 1024
self.ADDR=(self.local,self.port)
self.udpCliSock = socket.socket(AF_INET, SOCK_DGRAM)
self.udpCliSock.sendto('0##'+self.usr+'##'+self.pwd,self.ADDR)
while True:
#连接建立,接收服务器端消息
self.serverMsg ,self.ADDR = self.udpCliSock.recvfrom(self.buffer)
s=self.serverMsg.split('##')
if s[0]=='Y':
#self.chatText.insert(Tkinter.END,'客户端已经与服务器端建立连接......')
return True
elif s[0]== 'N':
#self.chatText.insert(Tkinter.END,'客户端与服务器端建立连接失败......')
return False
elif s[0]=='CLOSE':
i=5
while i>0:
self.chatText.insert(Tkinter.END,'你的账号在另一端登录,该客户端'+str(i)+'秒后退出......')
time.sleep(1)
i=i-1
self.chatText.delete(Tkinter.END)
os._exit(0)
#好友列表
elif s[0]=='F':
for eachFriend in s[1:len(s)]:
print eachFriend
#好友上线
elif s[0]=='0':
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime+' ' +'你的好友' + s[1]+'上线了')
#好友下线
elif s[0]=='1':
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime+' ' +'你的好友' + s[1]+'下线了')
#好友传来消息
elif s[0]=='2':
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime +' '+s[1] +' 说:\n')
self.chatText.insert(Tkinter.END, ' ' + s[3])
#好友传来文件
elif s[0]=='3':
filename=s[2]
f=FTP('192.168.1.105')
f.login('Coder', 'xianjian')
f.cwd(self.usr)
filenameD=filename[:-1].encode("cp936")
try:
f.retrbinary('RETR '+filenameD,open('..\\'+self.usr+'\\'+filenameD,'wb').write)
except ftplib.error_perm:
print 'ERROR:cannot read file "%s"' %file
self.chatText.insert(Tkinter.END,filename[:-1]+' 传输完成')
elif s[0]=='4':
agreement=raw_input(s[1]+'请求加你为好友,验证消息:'+s[3]+'你愿意加'+s[1]+'为好友吗(Y/N)')
if agreement=='Y':
self.udpCliSock.sendto('5##'+s[1]+'##'+s[2]+'##Y',self.ADDR)
elif agreement=='N':
self.udpCliSock.sendto('5##'+s[1 | ]+'##'+s[2]+'##N',self.ADDR)
elif s[0]=='5':
if s[3]=='Y':
print s[2]+'接受了你的好友请求'
elif s[3]=='N':
| print s[2]+'拒绝了你的好友请求'
#发送消息
def sendMessage(self):
#得到用户在Text中输入的消息
message = self.inputText.get('1.0',Tkinter.END)
#格式化当前的时间
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime +' 我 说:\n')
self.chatText.insert(Tkinter.END,' ' + message + '\n')
self.udpCliSock.sendto('2##'+self.usr+'##'+self.toUsr+'##'+message,self.ADDR);
#清空用户在Text中输入的消息
self.inputText.delete(0.0,message.__len__()-1.0)
#传文件
def sendFile(self):
filename = self.inputText.get('1.0',Tkinter.END)
theTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.chatText.insert(Tkinter.END, theTime +'我' + ' 传文件:\n')
self.chatText.insert(Tkinter.END,' ' + filename[:-1] + '\n')
f=FTP('192.168.1.105')
f.login('Coder', 'xianjian')
f.cwd(self.toUsr)
filenameU=filename[:-1].encode("cp936")
try:
#f.retrbinary('RETR '+filename,open(filename,'wb').write)
#将文件上传到服务器对方文件夹中
f.storbinary('STOR ' + filenameU, open('..\\'+self.usr+'\\'+filenameU, 'rb'))
except ftplib.error_perm:
print 'ERROR:cannot read file "%s"' %file
self.udpCliSock.sendto('3##'+self.usr+'##'+self.toUsr+'##'+filename,self.ADDR);
#加好友
def addFriends(self):
message= self.inputText.get('1.0',Tkinter.END)
s=message.split('##')
self.udpCliSock.sendto('4##'+self.usr+'##'+s[0]+'##'+s[1],self.ADDR);
#关闭消息窗口并退出
def close(self):
self.udpCliSock.sendto('1##'+self.usr,self.ADDR);
sys.exit()
#启动线程接收服务器端的消息
def startNewThread(self):
thread.start_new_thread(self.receiveMessage,())
def main():
client = ClientMessage()
client.setLocalANDPort('192.168.1.105', 8808)
client.setUsrANDPwd('12073127', '12073127')
client.setToUsr('12073128')
client.startNewThread()
if __name__=='__main__':
main()
|
"""
89. Gray Co | de
https://leetcode.com/problems/gray-code/
"""
from typing import List
class Solution:
def grayCode(self, n: int) -> List[int]:
res = [0]
for i in range(n):
res += [x + 2**i for x in reverse | d(res)]
return res
def main():
s = Solution()
print(s.grayCode(3))
if __name__ == '__main__':
raise(SystemExit(main()))
|
return sys.platform
# Set for cross builds explicitly
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxint]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
import _osx_support, distutils.sysconfig
osname, release, machine = _osx_support.get_platform_osx(
distutils.sysconfig.get_config_vars(),
osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
if os.name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os.name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
elif os.name == 'os2':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
else:
raise DistutilsPlatformError, \
"nothing known about platform '%s'" % os.name
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and 'HOME' not in os.environ:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if var_name in local_vars:
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError, var:
raise ValueError, "invalid variable '$%s'" % var
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
"""Generate a useful error message from an EnvironmentError (IOError or
OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
does what it can to deal with exception objects that don't have a
filename (which happens when the error is due to a two-file operation,
such as 'rename()' or 'link()'. Returns the error message as a string
prefixed with 'prefix'.
"""
# check for Python 1.5.2-styl | e {IO,OS}Error exception objects
if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
if exc.filename:
error = prefix + "%s: %s" % (exc.filename, exc.strerror)
else:
# two-argument functions in posix module don't
# include the filename in the exception object!
error = prefix + "%s" % exc.strerror
else:
error = prefix + str(exc[-1]) |
return error
# Needed by 'split_quoted()'
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _wordchars_re, _squote_re, _dquote_re
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
def split_quoted (s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
# This is a nice algorithm for splitting up a single string, since it
# doesn't require character-by-character examination. It was a little
# bit of a brain-bender to get it working right, though...
if _wordchars_re is None: _init_regex()
s = string.strip(s)
words = []
pos = 0
wh |
# -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewa | ll.org/log/.
from trac.core import *
class IPreferencePanelProvider(Interface):
def get_preference_panels(req):
"""Return a list of available preference panels.
The items returned by this function must be tuple of the form
`(panel, label)`.
"""
def render_preference_panel(req, panel):
"""Process a request for a preference panel.
| This function should return a tuple of the form `(template, data)`,
where `template` is the name of the template to use and `data` is the
data to be passed to the template.
"""
|
# encoding=utf-8
from django.utils.translation import ugettext_lazy as _
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.viewsets import ModelViewSet
from cmmedia.models import Image, Artist, Album, Music
from cmmedia | .serializers import ImageSerializer, ArtistSerializer, AlbumSerializer, MusicSerializer
class ResourceURLView(APIView):
allowed_methods = ['GET']
def get(self,request,*args,**kwargs):
return Response(self.get_url_dispach())
def get_url_dispach(self,format=None):
return {
_(u"images_url").strip(): reverse('resource_image', request=sel | f.request, format=format,),
_(u"artists_url").strip(): reverse('artists-list', request=self.request, format=format),
_(u"albums_url").strip(): reverse('albums-list', request=self.request, format=format),
_(u"musics_url").strip(): reverse('musics-list', request=self.request, format=format),
}
class ImageView(generics.CreateAPIView):
"""
创建和获取Image资源
"""
queryset = Image.objects.all()
serializer_class = ImageSerializer
class ArtistViewSet(ModelViewSet):
"""
创建,删除,更新,获取艺术家
"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
lookup_field = u'pk'
class AlbumViewSet(ModelViewSet):
"""
创建,删除,更新,获取专辑
"""
queryset = Album.objects.all()
serializer_class = AlbumSerializer
lookup_field = u'pk'
class MusicViewSet(ModelViewSet):
"""
创建,删除,更新,获取专辑
"""
queryset = Music.objects.all()
serializer_class = MusicSerializer
lookup_field = u'pk'
|
# -*- coding: utf-8 -*-
"""Analysis plugin that labels events according to rules in a tagging file."""
from plaso.analysis import interface
from plaso.analysis import manager
from plaso.engine import tagging_file
class TaggingAnalysisPlugin(interface.AnalysisPlugin):
"""Analysis plugin that labels events according to rules in a tagging file."""
NAME = 'tagging'
def __init__(self):
"""Initializes a tagging analysis plugin."""
super(TaggingAnalysisPlugin, self).__init__()
self._tagging_rules = None
def ExamineEvent(
self, analysis_mediator, event, event_data, event_data_stream):
"""Labels events according to the rules in a tagging file.
Args:
analysis_mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfVFS.
event (EventObject): event to examine.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
matched_label_names = []
for label_name, filter_objects in self._tagging_rules.items():
for filter_object in filter_objects:
# Note that tagging events based on existing labels is currently
# not | supported.
if filter_object.Match(event, event_data, event_data_stream, None):
matched_label_names.append(label_name)
br | eak
if matched_label_names:
event_tag = self._CreateEventTag(event, matched_label_names)
analysis_mediator.ProduceEventTag(event_tag)
for label_name in matched_label_names:
self._analysis_counter[label_name] += 1
self._analysis_counter['event_tags'] += 1
def SetAndLoadTagFile(self, tagging_file_path):
"""Sets the tagging file to be used by the plugin.
Args:
tagging_file_path (str): path of the tagging file.
"""
tagging_file_object = tagging_file.TaggingFile(tagging_file_path)
self._tagging_rules = tagging_file_object.GetEventTaggingRules()
manager.AnalysisPluginManager.RegisterPlugin(TaggingAnalysisPlugin)
|
from django.contrib import admin
# Register your models her | e.
from datatable.models import Serveur
# Register your models here.
class ServeurAdmin(admin.ModelAdmin):
list_display = ('In_Type', 'In_Nom', 'In_IP', 'statut')
list_filter = ('In_Type', 'In_Nom', 'In_IP', 'statut')
search_fields = ['In_Type', 'In_Nom', 'In_IP' ]
admin.site.register(Serveur, Serveur | Admin)
|
#!/usr/bin/python
from ming import *
import sys
srcdir=sys.argv[1]
m = | SWFMovie();
font = SWFFont(srcdir + "/../Media/test.ttf")
text = SWFText(1)
w = font.getStringWidth("The quick bro | wn fox jumps over the lazy dog. 1234567890")
text.setFont(font)
text.setColor(0,0,0,255)
text.setHeight(20)
text.moveTo(w,0)
text.addString("|")
m.add(text)
m.nextFrame()
m.save("test03.swf")
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Tick(object):
def __init__(self, order_book_id, dt, snapshot):
self._order_book_id = order_book_id
self._dt = dt
self._snapshot = snapshot
@property
def order_book_id(self):
return self._order_book_id
@property
def datetime(self):
return self._dt
@property
def open(self):
return self._snapshot['open']
@property
def last(self):
return self._snapshot | ['last']
@property
def high(self):
return self._snapshot['high']
@property
| def low(self):
return self._snapshot['low']
@property
def prev_close(self):
return self._snapshot['prev_close']
@property
def volume(self):
return self._snapshot['volume']
@property
def total_turnover(self):
return self._snapshot['total_turnover']
@property
def open_interest(self):
return self._snapshot['open_interest']
@property
def prev_settlement(self):
return self._snapshot['prev_settlement']
@property
def bid(self):
return self._snapshot['bid']
@property
def bid_volume(self):
return self._snapshot['bid_volume']
@property
def ask(self):
return self._snapshot['ask']
@property
def ask_volume(self):
return self._snapshot['ask_volume']
@property
def limit_up(self):
return self._snapshot['limit_up']
@property
def limit_down(self):
return self._snapshot['limit_down']
|
= None
try:
self.connection = pymysql.connect(
host=host, port=int(port), user=user, password=passwd, db=dbname, charset="utf8mb4")
self.cursor = self.connection.cursor()
except Exception as e_con:
print '数据库连接错误, 程序中止'
print e_con
exit(-1)
def test(self):
print '正在测试数据库连接'
print '数据库连接: ' + str(self.connection.get_host_info()) if self.connection else '数据库连接异常'
print '数据库游标: ' + str(self.cursor) if self.cursor else '数据库游标异常'
print '数据库连接测试完毕'
print '检查表 aabb 是否存在'
if self.exist_table('aabb'):
print '存在'
else:
print '不存在'
print '初始化项目 example'
self.init_project('example', 'example_')
self.new_project()
PyGdbUtil.log(0, '初始化完毕')
# 初始化项目
def init_project(self, project_name, table_prefix):
self.project = project_name
self.table_prefix = table_prefix
# 检测是否存在该项目 不存在->创建 返回True; 存在->返回 False
def new_project(self):
if not self.table_prefix:
PyGdbUtil.log(2, '未指定数据库前缀')
exist_project = self.exist_table(self.table_prefix + 'BreakPoint')
# 创建数据库表
if not exist_project:
self.create_table(self.table_prefix + "BreakPoint(bid INT AUTO_INCREMENT primary key, pid INT, lineNumber INT, funcName TEXT, funcList TEXT)")
self.create_table(self.table_prefix + "PStackSize(pid INT, tid INT, stackSize INT, pass TINYINT)")
self.create_table(self.table_prefix + "FStackSize(pid INT, tid INT, fid INT, stackSize INT)")
self.create_table(self.table_prefix + "FrameVariable(bid INT, varName CHAR, varValue TEXT, varSize INT)")
self.create_table(self.table_prefix + "FuncAdjacencyList(pid INT, tid INT, parFid INT, fid INT, cnt INT)")
self.create_table(self.table_prefix + "Function(fid INT, funcName CHAR(30))")
self.create_table(self.table_prefix + "TestCase(tid INT AUTO_INCREMENT primary key, testStr TEXT)")
self.commit()
return True
else:
return False
def clear_project(self):
if not self.table_prefix:
PyGdbUtil.log(2, '未指定数据库前缀')
exist_project = self.exist_table(self.table_prefix + 'BreakPoint')
if exist_project:
self.drop_table(self.table_prefix + "BreakPoint")
self.drop_table(self.table_prefix + "PStackSize")
self.drop_table(self.table_prefix + "FStackSize")
self.drop_table(self.table_prefix + "FrameVariable")
self.drop_table(self.table_prefix + "FuncAdjacencyList")
self.drop_table(self.table_prefix + "Function")
self.drop_table(self.table_prefix + "TestCase")
self.commit()
return True
else:
return False
# 插入测试用例
def insert_test_case(self, test_str):
self.execute("insert into " + self.table_prefix + "TestCase(testStr) VALUES('%s')" % test_str)
# 插入程序断点
def insert_breakpoint(self, pid, line_number, func_name):
# return # 测试
PyGdbUtil.log(0, str(pid) + " " + str(line_number) + " " + str(func_name))
self.execute("insert into " + self.table_prefix +
"BreakPoint(pid, lineNumber, funcName) VALUES (%s, %s, '%s')" % (pid, line_number, func_name))
# 插入函数
def inset_function(self, fid, func_name):
self.execute('insert into ' + self.table_prefix +
'Function(fid, funcName) VALUES (%s, "%s")' % (fid, func_name))
# 插入一个栈帧变量信息
def insert_frame_var(self, bid, var_name, var_value, var_size):
self.execute('insert into ' + self.table_prefix +
'FrameVariable(bid, varName, varValue, varSize) ' +
'VALUES (%s, "%s", "%s", %s)' % (bid, var_name, var_value, var_size))
# 插入栈帧大小
def insert_frame_stack_size(self, pid, tid, fid, size):
self.execute('insert into ' + self.table_prefix +
'FStackSize(pid, tid, fid, stackSize) VALUES (%s, %s, %s, %s)' %
(pid, tid, fid, size))
# 插入最大栈帧大小
def insert_max_stack_size(self, pid, tid, size):
self.execute('insert into ' + self.table_prefix +
'PStackSize(pid, tid, stackSize) VALUES (%s, %s, %s)' %(pid, tid, size))
# 根据函数名称获取 fid
def get_function_fid_by_name(self, func_name):
self.execute('select fid from ' + self.table_prefix + 'Function where funcName=' + func_name)
fetch_one = self.cursor.fetchone()
print "获取函数id: " + fetch_one
return fetch_one[0]
# 根据bid获取fid
def get_fid_by_bid(self, bid):
self.execute('select funcName from ' + self.table_prefix + 'BreakPoint where bid=' + str(bid))
fetch_one = self.cursor.fetchone()
fid = self.get_fid_by_func_name(str(fetch_one[0]))
return fid
# 根据函数名获取 fid
def get_fid_by_func_name(self, func_name):
self.execute('select fid from ' + self.table_prefix + 'Function where funcName="%s"' % (str(func_name)))
return self.cursor.fetchone()[0]
# 数据库中插入断点
def info_breakpoint_handler(self, pid, gdb_info_breakpoint):
ss = gdb_info_breakpoint.split("\n")
for s in ss:
if 0 < s.find("breakpoint keep y"):
s2 = s.split()
s3 = s2[8].split(":")
self.insert_breakpoint(pid, s3[1], s2[6])
# 添加有向边 a-->b
def insert_edge(self, pid, tid, func_name_a, func_name_b):
fid_a = self.get_fid_by_func_name(func_name_a)
fid_b = self.get_fid_by_func_name(func_name_b)
try:
self.execute('select cnt from ' + self.table_prefix +
'FuncAdjacencyList where pid=%s and tid=%s and parFid=%s and fid=%s' %
(pid, tid, fid_a, fid_b))
cnt = int(self.cursor.fetchone()[0]) + 1
self.execute('update ' + self.table_prefix +
'FuncAdjacencyList set cnt=%s where pid=%s and tid=%s and parFid=%s and fid=%s' %
(pid, tid, cnt, fid_a, fid_b))
except Exception:
cnt = 1
self.execute('insert into ' + self.table_prefix +
'FuncAdjacencyList(pid, tid, parFid, fid, cnt) VALUES (%s, %s, %s, %s, %s)' %
(pid, tid, fid_a, fid_b, cnt))
# 根据 gdb(info b) 的信息获取函数列表
def get_function_list(self, break_info):
func_list = []
string_list = break_info.split('\n')[1:]
for line in string_list:
word = line.split()
if len(word) >= 6:
func_list.append(word[6])
return func_list
# 将给出的函数列表插入数据库中
def insert_function_list(self, func_list):
fid = 0
func_list = list(set(func_list)) # 去重
for func in func_list:
fid += 1 |
self.inset_function(fid, func)
# 检查是否存在一张表
def exist_table(self, table_name):
try:
self.execute('select * from ' + table_name)
return True
| except Exception:
return False
# 创建表
def create_table(self, table_name):
try:
PyGdbUtil.log(0, "创建表" + table_name)
self.execute("create table if not exists " + table_name)
except Exception as e:
# print e
PyGdbUtil.log(2, "创建表" + table_name + "失败! 请检查数据表前缀是否有非法字符.")
# 删除表
def drop_table(self, table_name):
try:
PyGdbUtil.log(0, "删除表" + table_name)
self.execute('drop table if exists ' + table_name)
except Exception as e:
print e
PyGdbUtil.log(2, '删除表失败!')
# 获取测试样例
def get_test_case_by_tid(self, tid):
self.execute("SELECT testStr FROM " + self.table_prefix + "TestCase WHERE tid='%s'" % tid)
return self.cursor.fetchone()[0]
# 获取测试样例总数
def get_test_case_cnt(self):
self.execute('SELECT max(tid) FROM ' + self.table_prefix + 'TestCase')
return int(self.cursor.fetchone()[0])
# 获取指定程序的断点列表
def get_ |
import subprocess
import smtplib
import socket
from email.mime.text import MIMET | ext
import datetime
# Change to your own account information
to = 'rk.ryan.king@gmail.com'
gmail_user = 'rk.ryan.king@gmail.com'
gmail_password = 'nzwaahcmdzjchxsz'
smtpserver = smtplib.SMTP('smtp.gmail.com', 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(gmail_user, gmail_password)
today = datetime.date.today()
# Very Linux Specific
arg='ip route list'
p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)
data = p.communi | cate()
split_data = data[0].split()
ipaddr = split_data[split_data.index('src')+1]
my_ip = 'Your ip is %s' % ipaddr
msg = MIMEText(my_ip)
msg['Subject'] = 'IP For RaspberryPi on %s' % today.strftime('%b %d %Y')
msg['From'] = gmail_user
msg['To'] = to
smtpserver.sendmail(gmail_user, [to], msg.as_string())
smtpserver.quit() |
( "0x500", "0x5ff", "Via", "V" ),
("0x1800", "0x18ff", "Nagra", "N" ),
("0x4ae0", "0x4ae1", "Dre", "D" ),
( "0xd00", "0xdff", "CryptoW", "CW"),
( "0x900", "0x9ff", "NDS", "ND"),
( "0xb00", "0xbff", "Conax", "CO"),
("0x2600", "0x2600", "Biss", "BI")
)
def GetEcmInfo(self):
data = {}
try:
f = open('/tmp/ecm.info', 'rb')
ecm = f.readlines()
f.close()
info = {}
for line in ecm:
d = line.split(':', 1)
if len(d) > 1:
info[d[0].strip()] = d[1].strip()
# 1st values
data['caid'] = '0x00'
data['pid'] = ''
data['provider'] = ''
data['using'] = ''
data['decode'] = ''
data['source'] = ''
data['reader'] = ''
data['address'] = ''
data['address_from'] = ''
data['hops'] = '0'
data['ecm_time'] = '0'
data['caid'] = info.get('caid', '0')
data['provider'] = info.get('provider', '')
if data['provider'] == '':
data['provider'] = info.get('prov', ' ')
data['using'] = info.get('using', '')
data['reader'] = info.get('reader', '')
## CCcam
if data['using']:
data['using'] = info.get('using', '')
data['decode'] = info.get('decode', '')
data['source'] = info.get('source', '')
data['reader'] = info.get('reader', '')
data['address'] = info.get('address', 'Unknown')
data['address_from'] = info.get('from', 'Unknown')
data['hops'] = info.get('hops', '0')
data['ecm_time'] = info.get('ecm time', '?')
elif data['reader']:
data['caid'] = info.get('caid', '')
data['pid'] = info.get('pid', '')
data['provider'] = info.get('prov', '')
data['reader'] = info.get('reader', '')
data['address'] = info.get('from', 'Unknown')
data['hops'] = info.get('hops', '0')
data['ecm_time'] = info.get('ecm time', '?')
else:
data['decode'] = info.get('decode', '')
if data['decode']:
# gbox (untested)
if data['decode'] == 'Network':
cardid = 'id:' + info.get('prov', '')
try:
f = open('/tmp/share.info', 'rb')
share = f.readlines()
f.close()
for line in share:
if cardid in line:
data['address'] = line.strip()
break
else:
data['address'] = cardid
except:
data['address'] = data['decode']
else:
# adddess = slot or emu
data['address'] = data['decode']
if ecm[1].startswith('SysID'):
data['provider'] = ecm[1].strip()[6:]
if 'CaID 0x' in ecm[0] and 'pid 0x' in ecm[0]:
data['ecm_time'] = info.get('response', '?')
data['caid'] = ecm[0][ecm[0].find('CaID 0x')+7:ecm[0].find(',')]
data['pid'] = ecm[0][ecm[0].find('pid 0x')+6:ecm[0].find(' =')]
data['provider'] = info.get('prov', '0')[:4]
else:
source = info.get('source', None)
if source:
print "Found Source"
#wicardd
if 'CaID 0x' in ecm[0] and 'pid 0x' in ecm[0]:
data['caid'] = ecm[0][ecm[0].find('CaID 0x')+7:ecm[0].find(',')]
data['pid'] = ecm[0][ecm[0].find('pid 0x')+6:ecm[0].find(' =')]
data['provider'] = info.get('prov', '0')[2:]
# MGcam
else:
data['caid'] = info['caid'][2:]
data['pid'] = info['pid'][2:]
data['provider'] = info['prov'][2:]
time = " ?"
for line in ecm:
if line.find('msec') != -1:
line = line.split(' ')
if line[0]:
time = " (%ss)" % (float(line[0])/1000)
continue
data['address'] = source
data['ecm_time'] = time
else:
reader = info.get('reader', '')
if reader:
hops = info.get('hops', None)
if hops and hops != '0':
hops = ' @' + hops
else:
hops = ''
data['hops'] = hops
data['ecm_time'] = info.get('ecm time', '?')
data['address'] = reader
else:
data['hops'] = ""
data['ecm_time'] = ""
data['address'] = ""
except:
data['caid'] = '0x00'
data['provider'] = ''
data['pid'] = ''
data['using'] = ''
data['decode'] = ''
data['source'] = ''
data['reader'] = ''
data['address'] = ''
data['address_from'] = ''
data['hops'] = '0'
data['ecm_time'] = '0'
return data
def get_caName(self):
try:
f = open("/etc/egami/.emuname",'r')
name = f.readline().strip()
f.close()
except:
name = "Common Interface"
return name
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
is_crypted = info.getInfo(iServiceInformation.sIsCrypted)
if self.type == "CamName":
return self.get_caName()
elif self.type == "NetInfo":
if is_crypted != 1:
return ''
data = self.GetEcmInfo()
if data['using']:
return "Address: %s Hops: %s Ecm time: %ss" % (data['address'], data['hops'], data['ecm_time'])
elif data['reader']:
return "Address: %s Hops: %s Ecm time: %ss" % (data['address'], data['hops'], data['ecm_time'])
elif data['decode'] == "slot-1" or data['decode'] == "slot-2" or data['decode'] == "Network":
return "Decode: %s Ecm time: %s Pid: %s" % (data['address'], data['ecm_time'], data['pid'])
elif data['address']:
return "Address: %s Ecm time: %s Pid: %s" % (data['address'], data['ecm_time'], data['pid'])
elif self.type == "EcmInfo":
if is_crypted != 1:
return ''
data = self.GetEcmInfo()
return "CaId: %s Provider: %s" % (data['caid'], data['provider'])
elif self.type == "E-C-N":
if is_crypted != 1:
return 'Fta'
data = self.GetEcmInfo()
if data['using']:
if data['using'] == "fta":
return 'Fta'
elif data['using'] == 'emu':
return "Emulator"
elif data['using'] == 'sci':
return "Card"
else:
return "Network"
elif data['reader']:
pos = data['address_from'].find('.')
if pos > 1:
return "Network"
else:
return "Card"
elif data['decode']:
if data['decode'] == 'Network':
return 'Netowrk'
elif data['decode'] == 'slot-1' or data['decode'] == 'slot-2':
return 'Card'
elif data['address']:
if data['address'][:3] == "net":
return 'Network'
elif data['address'][:3] == "emu":
return 'Emulator'
else:
return 'Fta'
return ""
elif self.type == "CryptoBar":
data = self.GetEcmInfo()
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if int(data['caid'], 16) >= int(caid_entry[0], 16) and int(data['caid'], 16) <= int(caid_entry[1], 16):
color="\c0000??00"
else:
color = "\c007?7?7?"
try:
for caid in available_caids:
if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16):
color="\c00????00"
except:
pass
if res: res += " "
res += color + caid_entry[3]
res += "\c00??????"
return res
return ""
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
if self.type == "CryptoCaidSecaAvailable":
request_caid = "S"
request_selected = False
elif self.type == "CryptoCaid | ViaAvailable":
request_caid = "V"
request_selected = False
elif self.type == "CryptoCaidIrdetoAvailable":
reques | t_caid = "I"
request_selected = False
elif self.type == "CryptoCaidNDSAvailable":
request_caid = "ND"
request_selected = False
elif self.type == "CryptoCaidConaxAvailable":
request_caid = "CO"
request_selected = False
elif self.type == "CryptoCaidCryptoWAvailable":
request_caid = "CW"
request_selected = False
elif self.type == "CryptoCaidBetaAvailable":
request_caid = "B"
request_selected = False
elif self.type == "CryptoCaidNagraAvailable":
request_caid = "N"
request_selected = False
elif self.type == "CryptoCaidBissAvailable":
request_caid = "BI"
request_selected = False
elif self.type == "CryptoCaidDreAvailable":
request_caid = "D"
request_selected = False
elif self.type == "CryptoCaidSecaSelected":
request_caid = "S"
request_selected = True
e |
unction",)))
self.assertEqual(run("from __future__ import generators"),
fs(("generators",)))
self.assertEqual(run("from __future__ import generators, feature"),
fs(("generators", "feature")))
inp = "from __future__ import generators, print_function"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp ="from __future__ import print_function, generators"
self.assertEqual(run(inp), fs(("print_function", "generators")))
inp = "from __future__ import (print_function,)"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "from __future__ import (generators, print_function)"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp = "from __future__ import (generators, nested_scopes)"
self.assertEqual(run(inp), fs(("generators", "nested_scopes")))
inp = """from __future__ import generators
from __future__ import print_function"""
self.assertEqual(run(inp), fs(("generators", "print_function")))
invalid = ("from",
"from 4",
"from x",
"from x 5",
"from x im",
"from x import",
"from x import 4",
)
for inp in invalid:
self.assertEqual(run(inp), empty)
inp = "'docstring'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "'docstring'\n'somng'\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
inp = "# comment\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "# comment\n'doc'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "class x: pass\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
def test_get_headnode_dict(self):
class NoneFix(fixer_base.BaseFix):
pass
c | lass FileInputFix(fixer_base.BaseFix):
PATTERN = "file_input< any * >"
class SimpleFix(fixer_base.BaseFix):
PATTERN = "'name'"
no_head = NoneFix({}, [])
with_head = FileInputFix({}, [])
simple = SimpleFix({}, [])
d = refactor._get_headnode_dict([no_head, with_head, simple])
top_fixes = d.pop(pygram.python_symbols.file_input)
self.assertEqual(top_fixes, [with_head, no_head])
| name_fixes = d.pop(token.NAME)
self.assertEqual(name_fixes, [simple, no_head])
for fixes in d.values():
self.assertEqual(fixes, [no_head])
def test_fixer_loading(self):
from myfixes.fix_first import FixFirst
from myfixes.fix_last import FixLast
from myfixes.fix_parrot import FixParrot
from myfixes.fix_preorder import FixPreorder
rt = self.rt()
pre, post = rt.get_fixers()
self.check_instances(pre, [FixPreorder])
self.check_instances(post, [FixFirst, FixParrot, FixLast])
def test_naughty_fixers(self):
self.assertRaises(ImportError, self.rt, fixers=["not_here"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
def test_refactor_string(self):
rt = self.rt()
input = "def parrot(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertNotEqual(str(tree), input)
input = "def f(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertEqual(str(tree), input)
def test_refactor_stdin(self):
class MyRT(refactor.RefactoringTool):
def print_output(self, old_text, new_text, filename, equal):
results.extend([old_text, new_text, filename, equal])
results = []
rt = MyRT(_DEFAULT_FIXERS)
save = sys.stdin
sys.stdin = io.StringIO("def parrot(): pass\n\n")
try:
rt.refactor_stdin()
finally:
sys.stdin = save
expected = ["def parrot(): pass\n\n",
"def cheese(): pass\n\n",
"<stdin>", False]
self.assertEqual(results, expected)
def check_file_refactoring(self, test_file, fixers=_2TO3_FIXERS,
options=None, mock_log_debug=None,
actually_write=True):
test_file = self.init_test_file(test_file)
old_contents = self.read_file(test_file)
rt = self.rt(fixers=fixers, options=options)
if mock_log_debug:
rt.log_debug = mock_log_debug
rt.refactor_file(test_file)
self.assertEqual(old_contents, self.read_file(test_file))
if not actually_write:
return
rt.refactor_file(test_file, True)
new_contents = self.read_file(test_file)
self.assertNotEqual(old_contents, new_contents)
return new_contents
def init_test_file(self, test_file):
tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
self.addCleanup(shutil.rmtree, tmpdir)
shutil.copy(test_file, tmpdir)
test_file = os.path.join(tmpdir, os.path.basename(test_file))
os.chmod(test_file, 0o644)
return test_file
def read_file(self, test_file):
with open(test_file, "rb") as fp:
return fp.read()
def refactor_file(self, test_file, fixers=_2TO3_FIXERS):
test_file = self.init_test_file(test_file)
old_contents = self.read_file(test_file)
rt = self.rt(fixers=fixers)
rt.refactor_file(test_file, True)
new_contents = self.read_file(test_file)
return old_contents, new_contents
def test_refactor_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
self.check_file_refactoring(test_file, _DEFAULT_FIXERS)
def test_refactor_file_write_unchanged_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
debug_messages = []
def recording_log_debug(msg, *args):
debug_messages.append(msg % args)
self.check_file_refactoring(test_file, fixers=(),
options={"write_unchanged_files": True},
mock_log_debug=recording_log_debug,
actually_write=False)
# Testing that it logged this message when write=False was passed is
# sufficient to see that it did not bail early after "No changes".
message_regex = r"Not writing changes to .*%s" % \
re.escape(os.sep + os.path.basename(test_file))
for message in debug_messages:
if "Not writing changes" in message:
self.assertRegex(message, message_regex)
break
else:
self.fail("%r not matched in %r" % (message_regex, debug_messages))
def test_refactor_dir(self):
def check(structure, expected):
def mock_refactor_file(self, f, *args):
got.append(f)
save_func = refactor.RefactoringTool.refactor_file
refactor.RefactoringTool.refactor_file = mock_refactor_file
rt = self.rt()
got = []
dir = tempfile.mkdtemp(prefix="2to3-test_refactor")
try:
os.mkdir(os.path.join(dir, "a_dir"))
for fn in structure:
open(os.path.join(dir, fn), "wb").close()
rt.refactor_dir(dir)
finally:
refactor.RefactoringTool.refactor_file = save_func
shutil.rmtree(dir)
self.assertEqual(got,
[os.path.join(dir, path) for path in expected])
check([], [])
tree = ["nothing",
"hi.py",
".dumb",
".after.py",
"notpy.npy",
"sappy"]
expected = ["hi.py"]
check(tree, |
"""Leetcode 124. Binary Tree Maximum Path Sum
Hard
URL: https://leetcode.com/problems/binary-tree-maximum-path-sum/
Given a non-empty binary tree, find the maximum path sum.
For this problem, a path is defined as any sequence of nodes from some starting
node to any node in the tree along the parent-child connections. The path must
contain at least one node and does not need to go through the root.
Example 1:
Input: [1,2,3]
1
/ \
2 3
Output: 6
Example 2:
Input: [-10,9,20,null,null,15,7]
-10
/ \
9 20
/ \
15 7
Output: 42
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class SolutionLeftRightMaxPathDownSumRecur(object):
def _maxPathDownSum(self, root):
# Edge case.
if not root:
return 0
# Collect max path sum from root value, down pat | hs from left/right nodes.
# If one branch sum is less than 0, do not connect that branch by max(0, .).
left_max_down_sum = max(0, self._maxPathDownSum(root.left))
right_max_down_sum = max(0, self._maxPathDownSum(ro | ot.right))
self.max_path_sum = max(
left_max_down_sum + root.val + right_max_down_sum,
self.max_path_sum)
# Return max path down sum from left or right, including root values.
return root.val + max(left_max_down_sum, right_max_down_sum)
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
Time complexity: O(n).
Space complexity: O(logn) for balanced tree, O(n) for singly linked list.
"""
# Use global max path sum for memorization.
self.max_path_sum = -float('inf')
# Collect max path down sum from left or right and update global max sum.
self._maxPathDownSum(root)
return self.max_path_sum
def main():
# Output: 6
# 1
# / \
# 2 3
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
print SolutionLeftRightMaxPathDownSumRecur().maxPathSum(root)
# Output: 42
# -10
# / \
# 9 20
# / \
# 15 7
root = TreeNode(-10)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(7)
print SolutionLeftRightMaxPathDownSumRecur().maxPathSum(root)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
'''
Define the views for the straw web app
'''
from flask import render_template, session, request, render_template, jsonify, Flask, make_response
from time import sleep
from kafka.common import FailedPayloadsError, NotLeaderForPartitionError, KafkaUnavailableError
import md5, redis
import json, uuid
MAX_RESULTS = 100
EXPIRATION = 1
def attach_views(app):
@app.route('/_fetch_messages')
def fetch_messages():
# get a redis connection
redis_connection = redis.Redis(connection_pool=app.pool)
# update the query list in the view
if session.get('sid') is not None:
matches = redis_connection.lrange(session.get('sid'), 0, MAX_RESULTS)
return jsonify(result=matches)
@app.route('/', methods=['GET'])
def index():
if session.get('sid') is None:
session['sid'] = uuid.uuid4().hex
try:
query_list = session['queries']
except KeyError:
query_list = []
return render_template('index.html', query_list=query_list)
@app.route('/', methods=['POST'])
def search_box_control():
'''add to or clear the list of queries.'''
# we need a session
if session.get('sid') is None:
raise RuntimeError("No session.")
sid = session.get('sid')
# get a redis connection
redis_connection = redis.Redis(connection_pool=app.pool)
# if clear button pressed:
if 'clear' in request.form:
app.clear_user(session.get('sid'))
if session.has_key('queries'):
del session['queries']
return render_template("index.html", query_list=[], session=session)
# create a new query
text = request.form['text'].lower().split(" ")
# generate a unique query id
msg = {"type":"terms-query","terms":text,"minimum-match":len(text)}
data = json.dumps(msg)
qid = md5.new(data).hexdigest()
query_string = " ".join(text)
# add the qid and value to the query lookup store
try:
session['queries'].append(query_string)
except KeyError:
# sanity: clear any queries stored for this user but not in the session.
redis_connection.delete(sid+"-queries")
session['queries'] = [query_string]
# try three times to do the post to kafka.
post_success = False
for i in range(3):
try:
app.producer.send_messages("queries", data)
except (FailedPayloadsError, NotLeaderFor | PartitionError, KafkaUnavailableError) as e:
| # wait a bit and try again
print("Failed to post query {0} to kafka. Try #{1}".format(data, i))
sleep(0.25)
continue
post_success=True
break
if post_success==True:
# subscribe the user to the query
try:
app.user_channels[qid].add(sid)
except KeyError:
app.user_channels[qid] = set([sid])
app.subscriber.add_query(qid)
# link the id to the query text
redis_connection.set(qid, " ".join(text))
# add query to the list of things the user has subscribed to
redis_connection.lpush(sid +"-queries", qid)
# update the query list in the view
query_list = session["queries"]
return render_template("index.html", query_list=query_list)
@app.route('/about')
def about():
return render_template('%s.html' % 'about')
@app.route('/straw.pdf')
def pdf():
return app.send_static_file('assets/straw.pdf')
|
class UserForeignKey(models.ForeignKey):
"""
A replacement for models.ForeignKey('users.UserProfile').
This field uses UserEmailField to make form fields key off the user's email
instead of the primary key id. We also hook up autocomplete automatically.
"""
def __init__(self, *args, **kw):
super(UserForeignKey, self).__init__(UserProfile, *args, **kw)
def value_from_object(self, obj):
return getattr(obj, self.name).email
def formfield(self, **kw):
defaults = {'form_class': UserEmailField}
defaults.update(kw)
return models.Field.formfield(self, **defaults)
class UserEmailField(forms.EmailField):
def clean(self, value):
if value in validators.EMPTY_VALUES:
raise forms.ValidationError(self.error_messages['required'])
try:
return UserProfile.objects.get(email=value)
except UserProfile.DoesNotExist:
raise forms.ValidationError(_('No user with that email.'))
def widget_attrs(self, widget):
lazy_reverse = lazy(reverse, str)
return {'class': 'email-autocomplete',
'data-src': lazy_reverse('users.ajax')}
AbstractBaseUser._meta.get_field('password').max_length = 255
class UserProfile(amo.models.OnChangeMixin, amo.models.ModelBase,
AbstractBaseUser):
USERNAME_FIELD = 'username'
username = models.CharField(max_length=255, default='', unique=True)
display_name = models.CharField(max_length=255, default='', null=True,
blank=True)
email = models.EmailField(unique=True, null=True)
averagerating = models.CharField(max_length=255, blank=True, null=True)
bio = NoLinksField(short=False)
confirmationcode = models.CharField(max_length=255, default='',
blank=True)
deleted = models.BooleanField(default=False)
display_collections = models.BooleanField(default=False)
display_collections_fav = models.BooleanField(default=False)
emailhidden = models.BooleanField(default=True)
homepage = models.URLField(max_length=255, blank=True, default='')
location = models.CharField(max_length=255, blank=True, default='')
notes = models.TextField(blank=True, null=True)
notifycompat = models.BooleanField(default=True)
notifyevents = models.BooleanField(default=True)
occupation = models.CharField(max_length=255, default='', blank=True)
# This is essentially a "has_picture" flag right now
picture_type = models.CharField(max_length=75, default='', blank=True)
resetcode = models.CharField(max_length=255, default='', blank=True)
resetcode_expires = models.DateTimeField(default=datetime.now, null=True,
blank=True)
read_dev_agreement = models.DateTimeField(null=True, blank=True)
last_login_ip = models.CharField(default='', max_length=45, editable=False)
last_login_attempt = models.DateTimeField(null=True, editable=False)
last_login_attempt_ip = models.CharField(default='', max_length=45,
editable=False)
failed_login_attempts = models.PositiveIntegerField(default=0,
editable=False)
source = models.PositiveIntegerField(default=amo.LOGIN_SOURCE_UNKNOWN,
editable=False, db_index=True)
is_verified = models.BooleanField(default=True)
region = models.CharField(max_length | =11, null=True, blank=True,
editable=False)
lang = models.CharField(max_length=5, null=True, blank=True,
editable=False)
class Meta:
db_table = 'users'
def __init__(self, *args, **kw):
super(UserProfile, self).__init__(*args, **kw)
if self.username:
| self.username = smart_unicode(self.username)
def __unicode__(self):
return u'%s: %s' % (self.id, self.display_name or self.username)
def save(self, force_insert=False, force_update=False, using=None, **kwargs):
# we have to fix stupid things that we defined poorly in remora
if not self.resetcode_expires:
self.resetcode_expires = datetime.now()
super(UserProfile, self).save(force_insert, force_update, using,
**kwargs)
@property
def is_superuser(self):
return self.groups.filter(rules='*:*').exists()
@property
def is_staff(self):
from mkt.access import acl
return acl.action_allowed_user(self, 'Admin', '%')
def has_perm(self, perm, obj=None):
return self.is_superuser
def has_module_perms(self, app_label):
return self.is_superuser
def get_backend(self):
return 'django_browserid.auth.BrowserIDBackend'
def set_backend(self, val):
pass
backend = property(get_backend, set_backend)
def is_anonymous(self):
return False
def get_url_path(self, src=None):
# See: bug 880767.
return '#'
def my_apps(self, n=8):
"""Returns n apps"""
qs = self.addons.filter(type=amo.ADDON_WEBAPP)
qs = order_by_translation(qs, 'name')
return qs[:n]
@amo.cached_property
def is_developer(self):
return self.addonuser_set.exists()
@property
def name(self):
return smart_unicode(self.display_name or self.username)
@amo.cached_property
def reviews(self):
"""All reviews that are not dev replies."""
qs = self._reviews_all.filter(reply_to=None)
# Force the query to occur immediately. Several
# reviews-related tests hang if this isn't done.
return qs
def anonymize(self):
log.info(u"User (%s: <%s>) is being anonymized." % (self, self.email))
self.email = None
self.password = "sha512$Anonymous$Password"
self.username = "Anonymous-%s" % self.id # Can't be null
self.display_name = None
self.homepage = ""
self.deleted = True
self.picture_type = ""
self.save()
def check_password(self, raw_password):
# BrowserID does not store a password.
return True
def log_login_attempt(self, successful):
"""Log a user's login attempt"""
self.last_login_attempt = datetime.now()
self.last_login_attempt_ip = commonware.log.get_remote_addr()
if successful:
log.debug(u"User (%s) logged in successfully" % self)
self.failed_login_attempts = 0
self.last_login_ip = commonware.log.get_remote_addr()
else:
log.debug(u"User (%s) failed to log in" % self)
if self.failed_login_attempts < 16777216:
self.failed_login_attempts += 1
self.save()
def purchase_ids(self):
"""
I'm special casing this because we use purchase_ids a lot in the site
and we are not caching empty querysets in cache-machine.
That means that when the site is first launched we are having a
lot of empty queries hit.
We can probably do this in smarter fashion by making cache-machine
cache empty queries on an as need basis.
"""
# Circular import
from mkt.prices.models import AddonPurchase
@memoize(prefix='users:purchase-ids')
def ids(pk):
return (AddonPurchase.objects.filter(user=pk)
.values_list('addon_id', flat=True)
.filter(type=amo.CONTRIB_PURCHASE)
.order_by('pk'))
return ids(self.pk)
@contextmanager
def activate_lang(self):
"""
Activate the language for the user. If none is set will go to the site
default which is en-US.
"""
lang = self.lang if self.lang else settings.LANGUAGE_CODE
old = translation.get_language()
tower.activate(lang)
yield
tower.activate(old)
models.signals.pre_save.connect(save_signal, sender=UserProfile,
|
for | i in range (10):
p | rint i
|
from django.http import HttpResponse
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.forms.models import model_to_dict
from django.forms.util import ErrorList
from django.template import RequestContext
from django.db import IntegrityError
from django.core.exceptions import ObjectDoesNotExist
from common.models import PublicKey, UserProfile
from common.util import get_context
from user_manage.forms import UserForm, PublicKeyForm, ProfileForm
def user_settings(request):
user = request.user
new_pk = PublicKeyForm()
p_form = ProfileForm()
pubkeys = user.publickey_set.all()
profile = UserProfile.objects.get_or_create(user=user)[0]
if not user.is_authenticated():
return HttpResponse("Not authorized", status=401)
if request.method == 'GET':
user_form = UserForm(model_to_dict(user))
p_form = ProfileForm(model_to_dict(profile))
elif request.method == 'POST':
user_form = UserForm(request.POST, instance=user)
if user_form.is_valid():
user = user_form.save()
return redirect('user_settings')
context = get_context(request, { 'user_form' : user_form, 'pk_form': new_pk,
'keys' : pubkeys, 'profile_form': p_form })
return render_to_response('user_manage/user_settings.html', context, context_instance=RequestContext(request))
def user_profile(request):
user = request.user
if not user.is_authenticated():
return HttpResponse("Not authorized", status=401)
if request.method != "POST":
return HttpResponse("Method not allowed", 405)
else:
profile = user.get_profile()
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
profile = form.save()
return redirect('user_settings')
def pubkey_add(request):
user = request.user
if not user.is_authenticated():
return HttpResponse("You should be authenticated....", status=401)
if request.method == 'GET':
form = PublicKeyForm()
elif request.method == 'POST':
form = PublicKeyForm(request.POST)
if form.is_valid():
key = form.save(commit=False)
key.owner = user
try:
key.save()
return redirect('user_settings')
| except IntegrityError:
form._errors["description"] = ErrorList(["You have a public key with that name already"])
context = get_context(request, {'form' : form})
return render_to_response('user_manage/key_edit.html', context, context_instance=RequestContext(request))
def pubkey_delete(request, key_id):
pk = get_object_or_404(PublicKey, pk=key_id)
if pk.owner == request.user and request.meth | od == 'POST':
pk.delete()
return redirect('user_settings')
def pubkey_edit(request, key_id=None):
if key_id is not None:
pk = get_object_or_404(PublicKey, pk=key_id)
else:
pk = None
user = request.user
if not user.is_authenticated() or pk.owner != user:
return HttpResponse("Not allowed", status=401)
if request.method == 'POST':
if key_id is not None:
form = PublicKeyForm(request.POST, instance=pk)
else:
form = PublicKeyForm(request.POST)
if form.is_valid():
try:
pk = form.save()
return redirect('user_settings')
except IntegrityError:
form._errors["description"] = ErrorList(["You have a public key with that name already"])
context = get_context(request, {'form' : form, 'pk': pk})
return render_to_response('user_manage/key_edit.html', context, context_instance=RequestContext(request))
elif request.method == 'GET':
form = PublicKeyForm(model_to_dict(pk))
else:
return HttpResponse("Not implemented", status=405)
context = get_context(request, {'form' : form, 'pk' : pk})
return render_to_response('user_manage/key_edit.html', context, context_instance= RequestContext(request))
|
# pylint: skip-file
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
def __init__(self, filename=None, content=None, content_type='yaml'):
self.content = content
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
if self.filename and not self.content:
self.load(content_type=self.content_type)
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
| ''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmeth | od
def remove_entry(data, key):
''' remove data at location key '''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
curr_data = data
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key):
data = data[dict_key]
continue
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for add
# expected list entry
if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return curr_data
@staticmethod
def get_entry(data, key):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
with open(self.filename, 'w') as yfd:
yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
def read(self):
''' write to file '''
# check if it exists
if not self.exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents:
return None
# check if it is yaml
try:
if content_type == 'yaml':
self.yaml_dict = yaml.load(contents)
elif content_type == 'json':
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as _:
# Error loading yaml or json
return None
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
return entry
def delete(self, key):
''' remove key from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if not entry:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, key)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def put(self, key, value):
''' put key, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
result = Yedit.add_entry(self.yaml_dict, key, value)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def create(self, key, value):
''' create a yaml file '''
if not self.exists():
self.yaml_dict = {key: value}
return (True, self.yaml_dict)
return (False, self.yaml_dict)
|
ht 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
| # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF | ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Tester for topology_from_geometry
from absl.testing import absltest
import numpy as np
import pandas as pd
from google.protobuf import text_format
from smu import dataset_pb2
from smu.geometry import bond_length_distribution
from smu.geometry import smu_molecule
from smu.geometry import topology_from_geom
from smu.parser import smu_utils_lib
# Only needed so we can alter the default bond matching
# For the empirical bond length distributions, the resolution used.
# Which is not necessarily the same as what is used in the production system.
RESOLUTION = 1000
def triangular_distribution(min_dist, dist_max_value, max_dist):
"""Generate a triangular distribution.
Args:
min_dist: minimum X value
dist_max_value: X value of the triangle peak
max_dist: maximum X value
Returns:
Tuple of the X and Y coordinates that represent the distribution.
"""
population = np.zeros(RESOLUTION, dtype=np.float32)
x_extent = max_dist - min_dist
peak_index = int(round((dist_max_value - min_dist) / x_extent * RESOLUTION))
dy = 1.0 / peak_index
for i in range(0, peak_index):
population[i] = (i + 1) * dy
dy = 1.0 / (RESOLUTION - peak_index)
for i in range(peak_index, RESOLUTION):
population[i] = 1.0 - (i - peak_index) * dy
dx = x_extent / RESOLUTION
distances = np.arange(min_dist, max_dist, dx, dtype=np.float32)
return distances, population
class TestTopoFromGeom(absltest.TestCase):
def test_scores(self):
carbon = dataset_pb2.BondTopology.ATOM_C
single_bond = dataset_pb2.BondTopology.BondType.BOND_SINGLE
double_bond = dataset_pb2.BondTopology.BondType.BOND_DOUBLE
# For testing, turn off the need for complete matching.
smu_molecule.default_must_match_all_bonds = False
all_distributions = bond_length_distribution.AllAtomPairLengthDistributions(
)
x, y = triangular_distribution(1.0, 1.4, 2.0)
df = pd.DataFrame({"length": x, "count": y})
bldc1c = bond_length_distribution.EmpiricalLengthDistribution(df, 0.0)
all_distributions.add(carbon, carbon, single_bond, bldc1c)
x, y = triangular_distribution(1.0, 1.5, 2.0)
df = pd.DataFrame({"length": x, "count": y})
bldc2c = bond_length_distribution.EmpiricalLengthDistribution(df, 0.0)
all_distributions.add(carbon, carbon, double_bond, bldc2c)
bond_topology = text_format.Parse(
"""
atoms: ATOM_C
atoms: ATOM_C
bonds: {
atom_a: 0
atom_b: 1
bond_type: BOND_SINGLE
}
""", dataset_pb2.BondTopology())
geometry = text_format.Parse(
"""
atom_positions {
x: 0.0
y: 0.0
z: 0.0
},
atom_positions {
x: 0.0
y: 0.0
z: 0.0
}
""", dataset_pb2.Geometry())
geometry.atom_positions[1].x = 1.4 / smu_utils_lib.BOHR_TO_ANGSTROMS
matching_parameters = smu_molecule.MatchingParameters()
matching_parameters.must_match_all_bonds = False
fate = dataset_pb2.Conformer.FATE_SUCCESS
conformer_id = 1001
result = topology_from_geom.bond_topologies_from_geom(
all_distributions, conformer_id, fate, bond_topology, geometry,
matching_parameters)
self.assertIsNotNone(result)
self.assertLen(result.bond_topology, 2)
self.assertLen(result.bond_topology[0].bonds, 1)
self.assertLen(result.bond_topology[1].bonds, 1)
self.assertEqual(result.bond_topology[0].bonds[0].bond_type, single_bond)
self.assertEqual(result.bond_topology[1].bonds[0].bond_type, double_bond)
self.assertGreater(result.bond_topology[0].topology_score,
result.bond_topology[1].topology_score)
self.assertAlmostEqual(
np.sum(np.exp([bt.topology_score for bt in result.bond_topology])), 1.0)
self.assertAlmostEqual(result.bond_topology[0].geometry_score,
np.log(bldc1c.pdf(1.4)))
self.assertAlmostEqual(result.bond_topology[1].geometry_score,
np.log(bldc2c.pdf(1.4)))
def test_multi_topology_detection(self):
"""Tests that we can find multiple versions of the same topology."""
single = dataset_pb2.BondTopology.BondType.BOND_SINGLE
double = dataset_pb2.BondTopology.BondType.BOND_DOUBLE
all_dist = bond_length_distribution.AllAtomPairLengthDistributions()
for bond_type in [single, double]:
all_dist.add(
dataset_pb2.BondTopology.ATOM_N, dataset_pb2.BondTopology.ATOM_N,
bond_type,
bond_length_distribution.FixedWindowLengthDistribution(
1.0, 2.0, None))
# This conformer is a flat aromatic square of nitrogens. The single and
# double bonds can be rotated such that it's the same topology but
# individual bonds have switched single/double.
conformer = dataset_pb2.Conformer()
conformer.bond_topologies.add(bond_topology_id=123, smiles="N1=NN=N1")
conformer.bond_topologies[0].atoms.extend([
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_N,
])
conformer.bond_topologies[0].bonds.extend([
dataset_pb2.BondTopology.Bond(atom_a=0, atom_b=1, bond_type=single),
dataset_pb2.BondTopology.Bond(atom_a=1, atom_b=2, bond_type=double),
dataset_pb2.BondTopology.Bond(atom_a=2, atom_b=3, bond_type=single),
dataset_pb2.BondTopology.Bond(atom_a=3, atom_b=0, bond_type=double),
])
dist15a = 1.5 / smu_utils_lib.BOHR_TO_ANGSTROMS
conformer.optimized_geometry.atom_positions.extend([
dataset_pb2.Geometry.AtomPos(x=0, y=0, z=0),
dataset_pb2.Geometry.AtomPos(x=0, y=dist15a, z=0),
dataset_pb2.Geometry.AtomPos(x=dist15a, y=dist15a, z=0),
dataset_pb2.Geometry.AtomPos(x=dist15a, y=0, z=0),
])
matching_parameters = smu_molecule.MatchingParameters()
result = topology_from_geom.bond_topologies_from_geom(
bond_lengths=all_dist,
conformer_id=123,
fate=dataset_pb2.Conformer.FATE_SUCCESS,
bond_topology=conformer.bond_topologies[0],
geometry=conformer.optimized_geometry,
matching_parameters=matching_parameters)
self.assertLen(result.bond_topology, 2)
# The returned order is arbitrary so we figure out which is is marked
# as the starting topology.
starting_idx = min([
i for i, bt, in enumerate(result.bond_topology)
if bt.is_starting_topology
])
other_idx = (starting_idx + 1) % 2
starting = result.bond_topology[starting_idx]
self.assertTrue(starting.is_starting_topology)
self.assertEqual(smu_utils_lib.get_bond_type(starting, 0, 1), single)
self.assertEqual(smu_utils_lib.get_bond_type(starting, 1, 2), double)
self.assertEqual(smu_utils_lib.get_bond_type(starting, 2, 3), single)
self.assertEqual(smu_utils_lib.get_bond_type(starting, 3, 0), double)
other = result.bond_topology[other_idx]
self.assertFalse(other.is_starting_topology)
self.assertEqual(smu_utils_lib.get_bond_type(other, 0, 1), double)
self.assertEqual(smu_utils_lib.get_bond_typ |
-v [LEVEL] Choose `verbose` mode, or choose logging
level DEBUG, INFO, WARNING, ERROR, CRITICAL
python_version : 3.7
"""
from __future__ import print_function
import sys
import numpy as np
np.seterr(divide='print', invalid='raise')
import time
import re
from colorama import init, Fore, Back, Style
init()
import os
import argparse
from sira.logger import configure_logger
import logging
import logging.config
from sira.configuration import Configuration
from sira.scenario import Scenario
from sira.modelling.hazard import HazardsContainer
from sira.model_ingest import ingest_model
from sira.simulation import calculate_response
from sira.modelling.system_topology import SystemTopology
from sira.infrastructure_response import (
write_system_response,
plot_mean_econ_loss,
pe_by_component_class
)
from sira.fit_model import fit_prob_exceed_model
from sira.loss_analysis import run_scenario_loss_analysis
import numpy as np
def main():
# define arg parser
parser = argparse.ArgumentParser(
prog='sira', description="run sira", add_help=True)
# [Either] Supply config file and model file directly:
parser.add_argument("-c", "--config_file", type=str)
parser.add_argument("-m", "--model_file", type=str)
# [Or] Supply only the directory where the input files reside
parser.add_argument("-d", "--input_directory", type=str)
# Tell the code what tasks to do
parser.add_argument(
"-s", "--simulation", action='store_true', default=False)
parser.add_argument(
"-f", "--fit", action='store_true', default=False)
parser.add_argument(
"-l", "--loss_analysis", action='store_true', default=False)
parser.add_argument(
"-v", "--verbose", dest="loglevel", type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default="INFO",
help="Choose option for logging level from: \n"+
"DEBUG, INFO, WARNING, ERROR, CRITICAL.")
args = parser.parse_args()
# error handling
if args.input_directory and (args.config_file or args.model_file):
parser.error("--input_directory and [--config_file and --model_file]"
" are mutually exclusive ...")
sys.exit(2)
# error handling
if not any([args.simulation, args.fit, args.loss_analysis]):
parser.error(
"\nAt least one of these three flags is required:\n"
" --simulation (-s) or --fit (-f) or --loss_analysis (-s).\n"
" The options for fit or loss_analysis requires the -s flag, "
" or a previous completed run with the -s flag.")
sys.exit(2)
proj_root_dir = args.input_directory
if not os.path.isdir(proj_root_dir):
print("Invalid path supplied:\n {}".format(proj_root_dir))
sys.exit(1)
proj_input_dir = os.path.join(proj_root_dir, "input")
config_file_name = None
model_file_name = None
for fname in os.l | istdir(proj_input_dir):
confmatch = re.search(r"(?i)^config.*\.json$", fname)
if confmatch is not None:
config_file_name = confmatch.string
modelmatch = re.search(r"(?i)^model.*\.json$", fname)
if modelmatch is not None:
model_file_name = modelmatch.string
if config_file_name is None:
parser.error(
"C | onfig file not found. "
"A valid config file name must begin with the term `config`, "
"and must be a JSON file.\n")
sys.exit(2)
if model_file_name is None:
parser.error(
"Model file not found. "
"A valid model file name must begin the term `model`, "
"and must be a JSON file.\n")
sys.exit(2)
args.config_file = os.path.join(proj_input_dir, config_file_name)
args.model_file = os.path.join(proj_input_dir, model_file_name)
args.output = os.path.join(args.input_directory, "output")
if not os.path.isfile(args.config_file):
parser.error(
"Unable to locate config file "+str(args.config_file)+" ...")
sys.exit(2)
if not os.path.isfile(args.model_file):
parser.error(
"Unable to locate model file "+str(args.model_file)+" ...")
sys.exit(2)
args.output = os.path.join(
os.path.dirname(os.path.dirname(args.config_file)), "output")
try:
if not os.path.exists(args.output):
os.makedirs(args.output)
except Exception:
parser.error(
"Unable to create output folder " + str(args.output) + " ...")
sys.exit(2)
# ---------------------------------------------------------------------
# Set up logging
# ---------------------------------------------------------------------
timestamp = time.strftime('%Y.%m.%d %H:%M:%S')
log_path = os.path.join(args.output, "log.txt")
configure_logger(log_path, args.loglevel)
rootLogger = logging.getLogger(__name__)
print("\n")
rootLogger.info(Fore.GREEN +
'Simulation initiated at: {}\n'.format(timestamp) +
Fore.RESET)
# ---------------------------------------------------------------------
# Configure simulation model.
# Read data and control parameters and construct objects.
# ---------------------------------------------------------------------
config = Configuration(args.config_file, args.model_file, args.output)
scenario = Scenario(config)
hazards = HazardsContainer(config)
infrastructure = ingest_model(config)
# ---------------------------------------------------------------------
# SIMULATION
# Get the results of running a simulation
# ---------------------------------------------------------------------
# response_list = [
# {}, # [0] hazard level vs component damage state index
# {}, # [1] hazard level vs infrastructure output
# {}, # [2] hazard level vs component response
# {}, # [3] hazard level vs component type response
# [], # [4] array of infrastructure output per sample
# [], # [5] array of infrastructure econ loss per sample
# {}, # [6] hazard level vs component class dmg level pct
# {}] # [7] hazard level vs component class expected damage index
if args.simulation:
response_list = calculate_response(hazards, scenario, infrastructure)
# ---------------------------------------------------------------------
# Post simulation processing.
# After the simulation has run the results are aggregated, saved
# and the system fragility is calculated.
# ---------------------------------------------------------------------
write_system_response(response_list, infrastructure, scenario, hazards)
economic_loss_array = response_list[5]
plot_mean_econ_loss(scenario, economic_loss_array, hazards)
if config.HAZARD_INPUT_METHOD == "hazard_array":
pe_by_component_class(
response_list, infrastructure, scenario, hazards)
# ---------------------------------------------------------------------
# Visualizations
# Construct visualization for system topology
# ---------------------------------------------------------------------
sys_topology_view = SystemTopology(infrastructure, scenario)
sys_topology_view.draw_sys_topology(viewcontext="as-built")
rootLogger.info('Simulation completed...')
# -------------------------------------------------------------------------
# FIT MODEL ANALYSIS
# -------------------------------------------------------------------------
if args.fit:
args.pe_sys = None
existing_models = [
"potablewatertreatmentplant", "pwtp",
"wastewatertreatmentplant", "wwtp",
"watertreatmentplant", "wtp",
"powerstation",
"substation",
"potablewaterpumpstation"
]
if infrastructure.system_class.lower() == 'powerstation':
args.pe_sys = os.path.join(
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.