prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
"""
WSGI config for django_rest_test project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with a | n application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the sa | me mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "django_rest_test.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_rest_test.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..registration import MeasureImageSimilarity
def test_MeasureImageSimilarity_inputs():
input_map = dict(args=dict(argstr='%s',
),
dimension=dict(argstr='--dimensionality %d',
position=1,
),
environ=dict(nohash=True,
usedefault=True,
),
fixed_image=dict(mandatory=True,
),
fixed_image_mask=dict(argstr='%s',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
metric=dict(argstr='%s',
mandatory=True,
),
metric_weight=dict(requires=['metric'],
usedefault=True,
),
moving_image=dict(mandatory=True,
),
moving_image_mask=dict(requires=['fixed_image_mask'],
),
num_threads=dict(nohash=True,
usedefault=True,
),
radius_or_number_of_bins=dict(mandatory=True,
requires=['metric'],
),
sampling_percentage=dict(mandatory=True,
requires=['metric'],
),
sampling_strategy=dict(requires=['metric'],
usedefault=True,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = MeasureImageSimilarity.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_MeasureIm | ageSimilarity_outputs():
output_map = dict(similarity=dict(),
)
outputs = MeasureImageSimilarity.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits( | )[key], metakey) == value
|
import IMP
import IMP.test
import IMP.core
import IMP.atom
class Tests(IMP.test.TestCase):
def test_bonded(self):
"""Check close and destroy Hierarchy """
m = IMP.Model()
mh = IMP.atom.read_pdb(self.get_input_file_name("mini.pdb"), m)
nump = len(m.get_particle_indexes())
mhc = IMP.atom.create_clone(mh)
nnump = len(m.get_particle_indexes())
self.assertEqual(nump * 2, nnump)
IMP.atom.destroy(mhc)
mhc = None
self.assertEqual(nump, len(m.get_particle_indexes()))
IMP.atom.destroy(mh)
mh = None
self.as | sertEqual(0, len(m.get_particle_indexes()))
def test_destroy_child(self):
"""Destroy of a child should update the parent"""
m = IMP.Model()
mh = IMP.atom.read_pdb(self.get_input_file_name("mini.pdb"), m)
atoms = IMP.atom.get_by_type(mh, IMP.atom.ATOM_TYPE)
self.assertEqual(len(atoms), 68)
IMP.atom.destroy(atoms[0])
# This will fail if the atom | is not removed from the parent residue
atoms = IMP.atom.get_by_type(mh, IMP.atom.ATOM_TYPE)
self.assertEqual(len(atoms), 67)
if __name__ == '__main__':
IMP.test.main()
|
ot "
"information.")
self.adjust_slot(session, logger,
dbmachine, dbchassis, slot, multislot)
elif slot is not None:
dbchassis = None
for dbslot in dbmachine.chassis_slot:
if dbchassis and dbslot.chassis != dbchassis:
raise ArgumentError("Machine in multiple chassis, please "
"use --chassis argument.")
dbchassis = dbslot.chassis
if not dbchassis:
raise ArgumentError("Option --slot requires --chassis "
"information.")
self.adjust_slot(session, logger,
dbmachine, dbchassis, slot, multislot)
dblocation = get_location(session, **arguments)
if dblocation:
loc_clear_chassis = False
for dbslot in dbmachine.chassis_slot:
dbcl = dbslot.chassis.location
if dbcl != dblocation:
if chassis or slot is not None:
raise ArgumentError("{0} conflicts with chassis {1!s} "
"location {2}."
.format(dblocation, dbslot.chassis,
dbcl))
else:
loc_clear_chassis = True
if loc_clear_chassis:
del dbmachine.chassis_slot[:]
dbmachine.location = dblocation
if model:
# If overriding model, should probably overwrite default
# machine specs as well.
dbmodel = Model.get_unique(session, name=model, vendor=vendor,
compel=True)
if not dbmodel.model_type.isMachineType():
raise ArgumentError("The update_machine command cannot update "
"machines of type %s." %
dbmodel.model_type)
# We probably could do this by forcing either cluster or
# location data to be available as appropriate, but really?
# Failing seems reasonable.
if dbmodel.model_type != dbmachine.model.model_type and \
(dbmodel.model_type.isVirtualMachineType() or
dbmachine.model.model_type.isVirtualMachineType()):
raise ArgumentError("Cannot change machine from %s to %s." %
(dbmachine.model.model_type,
dbmodel.model_type))
old_nic_model = dbmachine.model.nic_model
new_nic_model = dbmodel.nic_model
if old_nic_model != new_nic_model:
for iface in dbmachine.interfaces:
if iface.model == old_nic_model:
iface.model = new_nic_model
dbmachine.model = dbmodel
if cpuname or cpuvendor:
dbcpu = Model.get_unique(session, name=cpuname, vendor=cpuvendor,
model_type=CpuType.Cpu, compel=True)
dbmachine.cpu_model = dbcpu
if cpucount is not None:
dbmachine.cpu_quantity = cpucount
if memory is not None:
dbmachine.memory = memory
if serial is not None:
dbmachine.serial_no = serial
if comments is not None:
dbmachine.comments = comments
if uuid:
q = session.query(Machine)
q = q.filter_by(uuid=uuid)
existing = q.first()
if existing:
raise ArgumentError("{0} is already using UUID {1!s}."
.format(existing, uuid))
dbmachine.uuid = uuid
elif clear_uuid:
dbmachine.uuid = None
if uri and not dbmachine.model.model_type.isVirtualMachineType():
raise ArgumentError("URI can be specified only for virtual "
"machines and the model's type is %s" %
dbmachine.model.model_type)
if uri is not None:
dbmachine.uri = uri
# FIXME: For now, if a machine has its interface(s) in a portgroup
# this command will need to be followed by an update_interface to
# re-evaluate the portgroup for overflow.
# It would be better to have --pg and --autopg options to let it
# happen at this point.
if cluster or vmhost or metacluster:
if not dbmachine.vm_container:
raise ArgumentError("Cannot convert a physical machine to "
"virtual.")
resholder = get_resource_holder(session, logger, hostname=vmhost,
cluster=cluster,
metacluster=metacluster,
compel=False)
move_vm(session, logger, dbmachine, resholder, remap_disk,
allow_metacluster_change, autoip, plenaries)
elif remap_disk:
update_disk_backing_stores(dbmachine, None, None, remap_disk)
if ip:
if dbm | achine.host:
for srv in dbmachine.host.services_provided:
si = srv.service_instance
plenaries.add(si, cls=PlenaryServiceInstanceToplevel)
update_primary_ip(session, logger, dbmachine, ip)
if dbmachine.location != old_location and dbmachine.host:
for vm in dbmachine.host.virtual_machines:
plenaries.add(vm)
vm. | location = dbmachine.location
session.flush()
# Check if the changed parameters still meet cluster capacity
# requiremets
if dbmachine.cluster:
dbmachine.cluster.validate()
if allow_metacluster_change and dbmachine.cluster.metacluster:
dbmachine.cluster.metacluster.validate()
if dbmachine.host and dbmachine.host.cluster:
dbmachine.host.cluster.validate()
for dbinterface in dbmachine.interfaces:
dbinterface.check_pg_consistency(logger=logger)
# The check to make sure a plenary file is not written out for
# dummy aurora hardware is within the call to write(). This way
# it is consistent without altering (and forgetting to alter)
# all the calls to the method.
with plenaries.transaction():
dsdb_runner = DSDBRunner(logger=logger)
dsdb_runner.update_host(dbmachine, oldinfo)
dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
return
def adjust_slot(self, session, logger,
dbmachine, dbchassis, slot, multislot):
for dbslot in dbmachine.chassis_slot:
# This update is a noop, ignore.
# Technically, this could be a request to trim the list down
# to just this one slot - in that case --clearchassis will be
# required.
if dbslot.chassis == dbchassis and dbslot.slot_number == slot:
return
if len(dbmachine.chassis_slot) > 1 and not multislot:
raise ArgumentError("Use --multislot to support a machine in more "
"than one slot, or --clearchassis to remove "
"current chassis slot information.")
if not multislot:
slots = ", ".join(str(dbslot.slot_number) for dbslot in
dbmachine.chassis_slot)
logger.info("Clearing {0:l} out of {1:l} slot(s) "
"{2}".format(dbmachine, dbchassis, slots))
del dbmachine.chassis_slot[:]
q = session.query(ChassisSlot)
q = q.filter_by(chassis=dbchassis, slot_number=slot)
dbslot = q.first()
if dbslot:
if dbslot.machine:
raise ArgumentError("{0} slot {1} already has machine "
"{2}.".format(dbchassis, s |
function
import argparse
import array
import math
import os
import random
import sys
import subprocess
def create_graph(nodes, edges, verbose):
if verbose: print('Creating random graph with {} nodes and {} edges...'.format(nodes, edges))
n1 = [ random.randint(0, nodes - 1) for x in xrange(edges) ]
n2 = [ random.randint(0, nodes - 1) for x in xrange(edges) ]
length = [ random.expovariate(1.0) for x in xrange(edges) ]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def compute_subgraphs(n, p):
return [(x*(n/p) + min(x, n%p), ((x+1)*(n/p)-1) + min(x + 1, n%p)) for x in xrange(0, p)]
def find_subgraph_index(n, subgraphs):
s = [i for i, (start, end) in zip(xrange(len(subgraphs)), subgraphs) if start <= n and n <= end]
assert len(s) == 1
return s[0]
def find_subgraph(n, subgraphs):
return subgraphs[find_subgraph_index(n, subgraphs)]
def create_clustered_DAG_graph(nodes, edges, nsubgraphs, cluster_factor, verbose):
if verbose: print('Creating clustered DAG graph with {} nodes and {} edges...'.format(nodes, edges))
subgraphs = compute_subgraphs(nodes, nsubgraphs)
def make_edge():
n1 = random.randint(0, nodes - 1)
if random.randint(1, 100) <= cluster_factor:
s = find_subgraph(n1, subgraphs)
n2 = random.randint(*s)
else:
n2 = random.randint(min(n1, nodes-1), nodes-1)
return (n1, n2)
n1, n2 = zip(*(make_edge() for x in xrange(edges)))
length = [random.expovariate(1.0) for x in xrange(edges)]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def create_clustered_geometric_graph(nodes, edges, nsubgraphs, cluster_factor, verbose):
if verbose: print('Creating clustered geometric graph with {} nodes and {} edges...'.format(nodes, edges))
blocks = int(math.sqrt(nsubgraphs))
assert blocks**2 == nsubgraphs
bounds = [((1.0*(i%blocks)/blocks, 1.0*(i%blocks + 1)/blocks),
(1.0*(i/blocks)/blocks, 1.0*(i/blocks + 1)/blocks))
for i in xrange(nsubgraphs)]
subgraphs = compute_subgraphs(nodes, nsubgraphs)
pos = [(random.uniform(*x), random.uniform(*y))
for (lo, hi), (x, y) in zip(subgraphs, bounds)
for _ in xrange(lo, hi+1)]
def make_edge():
n1 = random.randint(0, nodes - 1)
if random.randint(1, 100) <= cluster_factor:
s = find_subgraph(n1, subgraphs)
n2 = random.randint(*s)
else:
i = find_subgraph_index(n1, subgraphs)
ix, iy = i%blocks, i/blocks
if random.randint(0, 1) == 0:
s2 = subgraphs[((ix+1)%blocks) + iy*blocks]
else:
s2 = subgraphs[ix + ((iy+1)%blocks)*blocks]
n2 = random.randint(*s2)
return (n1, n2)
n1, n2 = zip(*(make_edge() for x in xrange(edges)))
length = [xlen + random.expovariate(1000/xlen if xlen > 0.0001 else 1)
for x in xrange(edges)
for xlen in [math.sqrt(sum((a - b)**2 for a, b in zip(pos[n1[x]], pos[n2[x]])))]]
return { 'nodes': nodes,
'edges': edges,
'n1': n1,
'n2': n2,
'length': length }
def metis_graph(g, metis, subgraphs, outdir, verbose):
if verbose: print('Running METIS...')
with open(os.path.join(outdir, 'graph.metis'), 'wb') as f:
f.write('{:3d} {:3d} 000\n'.format(g['nodes'], g['edges']))
for n in xrange(g['nodes']):
f.write(' '.join('{:3d} 1'.format(n2+1) for n1, n2 in zip(g['n1'], g['n2']) if n1 == n))
f.write('\n')
subprocess.check_call([metis, os.path.join(outdir, 'graph.metis'), str(subgraphs)])
with open(os.path.join(outdir, 'graph.metis.part.{}'.format(subgraphs)), 'rb') as f:
colors = [int(x) for x in f.read().split()]
mapping = dict(zip(sorted(xrange(g['nodes']), key = lambda x: colors[x]), range(g['nodes'])))
g['n1'] = [mapping[g['n1'][x]] for x in xrange(g['edges'])]
g['n2'] = [mapping[g['n2'][x]] for x in xrange(g['edges'])]
def sort_graph(g, verbose):
if verbose: print('Sorting graph...')
mapping = dict(zip(sorted(xrange(g['edges']), key = lambda x: (g['n1'][x], g['n2'][x])), range(g['edges'])))
g['n1'] = [g['n1'][mapping[x]] for x in xrange(g['edges'])]
g['n2'] = [g['n2'][mapping[x]] for x in xrange(g['edges'])]
g['length'] = [g['length'][mapping[x]] for x in xrange(g['edges'])]
def solve_graph(g, source, verbose):
if verbose: print('Solving graph...')
parent = [ -1 for x in xrange(g['nodes']) ]
dist = [ 1e100 for x in xrange(g['nodes']) ]
dist[source] = 0
while True:
count = 0
for n1, n2, length in zip(g['n1'], g['n2'], g['length']):
c2 = length + dist[n1]
if c2 < dist[n2]:
dist[n2] = c2
parent[n2] = n1
count += 1
#print 'count = {:d}'.format(count)
if count == 0:
break
# if verbose:
# for i, e in enumerate(zip(g['n1'], g['n2'], g['length'])):
# print('{:3d} {:3d} {:3d} {:5.3f}'.format(i, e[0], e[1], e[2]))
# for i, n in enumerate(zip(parent, dist)):
# print('{:3d} {:3d} {:5.3f}'.format(i, n[0], n[1]))
return dist
def write_graph(g, problems, outdir, verbose):
if verbose: print('Writing graph...')
with open(os.path.join(outdir, 'edges.dat'), 'wb') as f:
array.array('i', g['n1']).tofile(f)
array.array('i', g['n2']).tofile(f)
array.array('f', g['length']).tofile(f)
with open(os.path.join(outdir, 'graph.dot'), 'wb') as f:
f.write('digraph {\n')
f.write('\n'.join('{} -> {} [ style = "{}"]'.format(e1, e2, 'dotted' if e2 <= e1 else 'solid') for e1, e2 in zip(g['n1'], g['n2'])))
f.write('\n}\n')
with open(os.path.join(outdir, 'graph.txt'), 'w') as f:
f.write('nodes {:d}\n'.format(g['nodes']))
f.write('edges {:d}\n'.format(g['edges']))
f.write('data edges.dat\n')
sources = random.sample(xrange(g['nodes']), problems)
for s in sources:
parents = solve_graph(g, s, verbose)
with open(os.path.join(outdir, 'result_{:d}.dat'.format(s)), 'wb') as f2:
array.array('f', parents).tofile(f2)
f.write('source {:d} result_{:d}.dat\n'.format(s, s))
if __name__ == '__main__':
p = argparse.ArgumentParser(description='graph generator')
p.add_argument('--nodes', '-n', type=int, default=10)
p.add_argument('--edges', '-e', type=int, default=20)
p.add_argument('--type', '-t', default='random', choices=['random', 'clustered_DAG', 'clustered_geometric'])
p.add_argument('--subgraphs', '-s', type=int, default=1)
p.add_argument('--cluster-factor', '-c', type=int, default=95)
p.add_argument('--problems', '-p', type=int, default=1)
p.add_argument('--randseed', '-r', type=int, default=12345)
p.add_argument('--metis-path', default='./metis-install/bin/gpmetis')
p.add_argument('--metis', '-m', action='store_true')
p.add_argument('--outdir', '-o', required=True)
p.add_argument('--verbose', '-v', action='store_true')
args = p.parse_args()
random.seed(args.randseed)
if args.type == 'random':
G = create_graph(args.nodes, args.edges, args.verbose)
elif args.type == 'clustered_DAG':
G = create_clustered_DAG_graph(args.nodes, args.edges, args.subgraphs, args.cluster_factor, args.verbose)
elif args.type == 'clustered_geometric':
G = create_clustered_geometric_graph(args.nodes, args.edges, args.subgraphs, args.cluster_factor, ar | gs.verbose)
else:
assert false
try:
os.mkdir(args.outdir)
except:
pass
assert os.path.isdir(args.outdir)
if args.metis:
assert os.path.isfile(args.metis_path)
metis_graph(G, args.metis_path, args.subgraphs, args.outdir, args.verbose)
sort_graph(G, args.verbose)
write_grap | h(G, args.problems, args.outdir, args.verbose)
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all creative templates.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_template_service = client.GetService(
'CreativeTemplateService', version='v201502')
# Create a filter statement.
statement = dfp.FilterStateme | nt()
# Get creative templates by statement.
while True:
response = creative_template_service.getCreativeTemplatesByStatement(
statement.ToStatement())
if 'results' i | n response:
# Display results.
for template in response['results']:
print ('Creative template with id \'%s\', name \'%s\', and type \'%s\' '
'was found.' % (template['id'],
template['name'],
template['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
#!/usr/bin/python
"""
This example demonstrates several features of PyLaTeX.
It includes plain equations, tables, equations using numpy objects, tikz plots,
and figures.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
# begin-doc-include
import numpy as np
from pylatex import Document, Section, Subsection, Tabular, Math, TikZ, Axis, \
Plot, Figure, Package, Matrix
from pylatex.utils import italic
import os
if __name__ == '__main__':
image_filename = os.path.join(os.path.dirname(__file__), 'kitten.jpg')
doc = Document()
doc.packages.append(Package('geometry', options=['tmargin=1cm',
'lmargin=10cm']))
| with doc.create(Section('The simple stuff')):
doc.append('Some regular text and some')
doc.append(italic('italic text. '))
doc.append('\nAlso some crazy characters: $&#{}')
with | doc.create(Subsection('Math that is incorrect')):
doc.append(Math(data=['2*3', '=', 9]))
with doc.create(Subsection('Table of something')):
with doc.create(Tabular('rc|cl')) as table:
table.add_hline()
table.add_row((1, 2, 3, 4))
table.add_hline(1, 2)
table.add_empty_row()
table.add_row((4, 5, 6, 7))
a = np.array([[100, 10, 20]]).T
M = np.matrix([[2, 3, 4],
[0, 0, 1],
[0, 0, 2]])
with doc.create(Section('The fancy stuff')):
with doc.create(Subsection('Correct matrix equations')):
doc.append(Math(data=[Matrix(M), Matrix(a), '=', Matrix(M * a)]))
with doc.create(Subsection('Beautiful graphs')):
with doc.create(TikZ()):
plot_options = 'height=6cm, width=6cm, grid=major'
with doc.create(Axis(options=plot_options)) as plot:
plot.append(Plot(name='model', func='-x^5 - 242'))
coordinates = [
(-4.77778, 2027.60977),
(-3.55556, 347.84069),
(-2.33333, 22.58953),
(-1.11111, -493.50066),
(0.11111, 46.66082),
(1.33333, -205.56286),
(2.55556, -341.40638),
(3.77778, -1169.24780),
(5.00000, -3269.56775),
]
plot.append(Plot(name='estimate', coordinates=coordinates))
with doc.create(Subsection('Cute kitten pictures')):
with doc.create(Figure(position='h!')) as kitten_pic:
kitten_pic.add_image(image_filename, width='120px')
kitten_pic.add_caption('Look it\'s on its back')
doc.generate_pdf('full')
|
# -*- | coding: utf-8 -*-
from __future__ import unicode_lit | erals
from django.db import migrations
from corehq.form_processor.models import CaseTransaction
from corehq.sql_db.operations import RawSQLMigration, HqRunSQL
migrator = RawSQLMigration(('corehq', 'sql_accessors', 'sql_templates'), {
'TRANSACTION_TYPE_FORM': CaseTransaction.TYPE_FORM
})
class Migration(migrations.Migration):
dependencies = [
('sql_accessors', '0024_update_save_ledger_values'),
]
operations = [
HqRunSQL(
"DROP FUNCTION IF EXISTS get_ledger_values_for_cases(TEXT[])",
"SELECT 1"
),
migrator.get_migration('get_ledger_values_for_cases.sql'),
]
|
#!/usr/bin/python
import os
import psycopg2
import sys
import django_content_t | ype_mapping
file = open("/home/" + os.getlogin() + "/.pgpass", "r")
pgpasses = []
f | or line in file:
pgpasses.append(line.rstrip("\n").split(":"))
file.close()
for pgpass in pgpasses:
#print str(pgpass)
if pgpass[0] == "54.236.235.110" and pgpass[3] == "geonode":
src_pgpass = pgpass
if pgpass[0] == "54.197.226.56" and pgpass[3] == "geonode":
dst_pgpass = pgpass
src = psycopg2.connect(host=src_pgpass[0], database="geonode2", user=src_pgpass[3], password=src_pgpass[4])
dst = psycopg2.connect(host=dst_pgpass[0], database="geonode", user=dst_pgpass[3], password=dst_pgpass[4])
src_cur = src.cursor()
dst_cur = dst.cursor()
src_cur.execute("select resourcebase_ptr_id, content_type_id, object_id, doc_file, extension, popular_count, share_count from documents_document")
for src_row in src_cur:
assignments = []
#resourcebase_ptr_id
assignments.append(src_row[0])
#title_en
assignments.append(None)
#abstract_en
assignments.append(None)
#purpose_en
assignments.append(None)
#constraints_other_en
assignments.append(None)
#supplemental_information_en
assignments.append(None)
#distribution_description_en
assignments.append(None)
#data_quality_statement_en
assignments.append(None)
#content_type_id
assignments.append(django_content_type_mapping.get_django_content_type_id(src_row[1]))
#object_id
assignments.append(src_row[2])
#doc_file
assignments.append(src_row[3])
#extension
assignments.append(src_row[4])
#doc_type
assignments.append(None)
#doc_url
assignments.append(None)
try:
dst_cur.execute("insert into documents_document(resourcebase_ptr_id, title_en, abstract_en, purpose_en, constraints_other_en, supplemental_information_en, distribution_description_en, data_quality_statement_en, content_type_id, object_id, doc_file, extension, doc_type, doc_url) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", assignments)
dst.commit()
except Exception as error:
print
print type(error)
print str(error) + "select resourcebase_ptr_id, content_type_id, object_id, doc_file, extension, popular_count, share_count from documents_document"
print str(src_row)
dst.rollback()
dst.commit()
src_cur.close()
dst_cur.close()
src.close()
dst.close()
|
#coding=utf-8
# Copyright (C) 2014 by Víctor Romero Blanco <info at playcircular dot com>.
# http://playcircular.com/
# It's licensed under the AFFERO GENERAL PUBLIC LICENSE unless stated otherwise.
# You can get copies of the licenses here: http://www.affero.org/oagpl.html
# AFFERO GENERAL PUBLIC LICENSE is also included in the file called "LICENSE".
from django.contrib import admin
from django.conf import settings
from configuracion.models import *
from django.contrib.auth.models import User
from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseNotAllowed
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from grupos.models import *
from grupos.forms import *
from actividades.models import *
from usuarios.models import *
from django.core import serializers
from django.db.models import Q
###################################################################################################
@login_required
def recarga_actividad(request):
if request.is_ajax() and request.POST:
seleccionados = request.POST.get('seleccionados')
str_grupos = seleccionados.split(',')
id_grupos = []
for item in str_grupos:
numero = int(item)
id_grupos.append(numero)
if len(id_grupos) > 0:
n_grupos_administrados = Miembro.objects.filter(usuario=request.user,activo=True,nivel=u'Administrador').count()
try:
categorias = Idiomas_categoria.objects.filter((Q(categoria__grupo__in=id_grupos) | Q(categoria__superadmin=True)) & Q(idioma=request.LANGUAGE_CODE))
except Idiomas_categoria.DoesNotExist:
categorias = Idiomas_categoria.objects.filter(Q(categoria__grupo__in=id_grupos) | Q(categoria__superadmin=True)).order_by('-idioma_default')
if request.user.is_superuser or n_grupos_administrados > 0:
usuarios_qs = Miembro.objects.filter(grupo__in=id_grupos,activo=True).values_list( | 'usuario', flat=True)
if request.user.is_superuser:
#El Superadmin puede publicar sin que pernezca a n | ingún grupo para que no lo controlen los Admin de los grupos
usuarios_qs = list(usuarios_qs) + [request.user.pk]
usuarios = User.objects.filter(pk__in=usuarios_qs).distinct()
else:
usuarios = User.objects.filter(pk=request.user.pk)
datos = list(usuarios) + list(categorias)
else:
datos = []
else:
datos = []
#se devuelven los anios en formato json, solo nos interesa obtener como json
data = serializers.serialize("json", datos, fields=('pk','username','nombre','categoria'))
return HttpResponse(data, mimetype="application/javascript")
################################################################################################### |
"""Test the IPython.kernel public API
Authors
----- | --
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The | full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.testing import decorators as dec
from IPython.kernel import launcher, connect
from IPython import kernel
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@dec.parametric
def test_kms():
for base in ("", "Multi"):
KM = base + "KernelManager"
yield nt.assert_true(KM in dir(kernel), KM)
@dec.parametric
def test_kcs():
for base in ("", "Blocking"):
KM = base + "KernelClient"
yield nt.assert_true(KM in dir(kernel), KM)
@dec.parametric
def test_launcher():
for name in launcher.__all__:
yield nt.assert_true(name in dir(kernel), name)
@dec.parametric
def test_connect():
for name in connect.__all__:
yield nt.assert_true(name in dir(kernel), name)
|
"""
"""
from traceback import format_exc as debug
from vyapp.stdout import Stdout
from vyapp.tools import exec_quiet, set_status_msg
from vyapp.ask import *
import sys
def redirect_stdout(area):
try:
sys.stdout.remove(area)
except ValueErr | or:
pass
sys.stdout.append(Stdout(area))
set_status_msg('Output redirected to %s' % area.index('insert'))
def install(area):
area.install(('NORMAL', '<Control-W>', lambda event: event.widget.tag_delete_ranges(Stdout.TAG_COD | E)),
('NORMAL', '<Control-Tab>', lambda event: sys.stdout.restore()),
('NORMAL', '<Key-W>', lambda event: event.widget.tag_delete(Stdout.TAG_CODE)),
('NORMAL', '<Control-w>', lambda event: exec_quiet(sys.stdout.remove, event.widget)),
('NORMAL', '<Tab>', lambda event: redirect_stdout(event.widget)))
|
-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
"""
def __init__(self, command, parent=None, info_name=None, obj=None,
auto_envvar_prefix=None, default_map=None,
terminal_width=None, max_content_width=None,
resilient_parsing=False, allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None, help_option_names=None,
token_normalize_func=None, color=None):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed parameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj = obj
#: A dictionary (-like object) with defaults for parameters.
if default_map is None \
and parent is not None \
and parent.default_map is not None:
default_map = parent.default_map.get(info_name)
self.default_map = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`resultcallback`.
self.invoked_subcommand = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width = terminal_width
if max_content_width is None and parent is not None:
max_co | ntent_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width = max_con | tent_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ['--help']
#: The names for the help options.
self.help_option_names = h |
try:
from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
import xml2nrn
# module names derived from the namespace. Add new tags in proper namespace
import neuroml
import metadata
import morphml
import biophysics
class FileWrapper:
def __init__(self, source):
self.source = source
self.lineno = 0
def read(self, bytes):
s = self.source.readline()
self.lineno += 1
return s
# for each '{namespace}element' call the corresponding module.func
def handle(x2n, fw, event, node):
tag = node.tag.split('}')
# hopefully a namespace token corresponding to an imported module name
ns = tag[0]. | split('/')[-2]
tag = ns+'.'+tag[1] #namespace.element should correspond to module.func
f = None
try:
if event == 'start':
f = eval(tag)
elif event == 'end':
f = eval( | tag + '_end')
except:
pass
if f:
x2n.locator.lineno = fw.lineno
try:
f(x2n, node) # handle the element when it opens
except:
print tag,' failed at ', x2n.locator.getLineNumber()
elif event == 'start':
print 'ignore', node.tag # no function to handle the element
return 0
return 1
def rdxml(fname, ho = None):
f = FileWrapper(open(fname))
x2n = xml2nrn.XML2Nrn()
ig = None
for event, elem in etree.iterparse(f, events=("start", "end")):
if ig != elem:
if handle(x2n, f, event, elem) == 0:
ig = elem
if (ho):
ho.parsed(x2n)
if __name__ == '__main__':
rdxml('temp.xml')
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Aggregate top level objects from all Scapy modules.
"""
from scapy.base_classes import *
from scapy.config import *
from scapy.dadict import *
from scapy.data import *
from scapy.error import *
from scapy.themes import *
from scapy.arch import *
from scapy.interfaces import *
from scapy.plist import *
from scapy.fields import *
from scapy.packet import *
from scapy.asn1fields import *
from scapy.asn1packet import *
from scapy.utils import | *
from scapy.route import *
from scapy.sendrecv import *
from scapy.sessions import *
from scapy.supersocket import *
from scapy.volatile import *
from scapy.as_resolvers import *
from scapy.automaton import *
from scapy.a | utorun import *
from scapy.main import *
from scapy.consts import *
from scapy.compat import raw # noqa: F401
from scapy.layers.all import *
from scapy.asn1.asn1 import *
from scapy.asn1.ber import *
from scapy.asn1.mib import *
from scapy.pipetool import *
from scapy.scapypipes import *
if conf.ipv6_enabled: # noqa: F405
from scapy.utils6 import * # noqa: F401
from scapy.route6 import * # noqa: F401
from scapy.ansmachine import *
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.4.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\xf1\
\x00\
\x00\x09\x00\x78\x9c\xdd\x96\x51\x6f\x9b\x30\x10\xc7\xdf\xfb\x29\
\x3c\x1e\x9a\x4d\x15\xd0\x49\x7b\x98\x52\x48\x34\x92\x4c\xea\xd4\
\xaa\x54\x69\x55\xf5\xd1\x98\x0b\x71\x01\xdb\x35\x26\x09\xdf\x7e\
\x86\xb0\x96\xa4\x2c\xa4\x1d\x4f\xe3\xc5\xd8\x77\xbe\xdf\x9d\x8d\
\xff\xc6\x19\x6f\xd2\x04\xad\x40\x66\x94\x33\xd7\xf8\x6a\x9d\x1b\
\x08\x18\xe1\x21\x65\x91\x6b\xe4\x6a\x61\x7e\x37\xc6\xa3\x13\xe7\
\xd3\xf4\x66\x72\xf7\xe8\xcf\xd0\x26\x80\x44\xf7\xcb\x66\x77\xda\
\xe8\x04\xe9\xc7\x59\xf0\x24\x04\x89\xaa\x26\x74\x0d\xc6\x6b\x43\
\x65\x54\x54\x25\x30\xf2\x38\x8f\x53\x2c\xe3\x0c\x79\x58\x3a\xf6\
\x76\xf0\xd5\x29\xa8\xcd\x68\x29\x61\xe1\x1a\x4b\xa5\xc4\xd0\xb6\
\x41\x52\x62\xd2\x10\x2c\x51\xa8\x25\x67\xa6\x90\xfc\x09\x88\xca\
\x2c\x2e\x23\xbb\xc1\x68\x70\x66\x7a\x0a\x7a\x80\x00\xcd\xa9\x82\
\xb7\x1c\xfb\x0f\xa8\x93\xbd\x5e\xaf\x2d\x49\x75\xb5\x01\x66\x31\
\xe1\xa9\xc8\x95\x5e\x1e\x4b\xbf\xfd\x85\xec\x17\xb7\xea\x9d\xe4\
\x43\xeb\xd6\x88\xdc\x88\x9b\xbd\x09\xdc\x51\xc2\xb3\xb2\x28\xb7\
\xf7\x53\x6e\x0f\xde\x1e\xbb\x25\xf1\xa3\x98\x21\xac\x20\xe1\x42\
\x7f\x2e\x87\xe9\xd3\x17\xbf\x3e\xf8\x21\x27\x35\xff\x30\x94\x93\
\x3c\x05\xa6\xb0\xd2\xdf\x72\x1f\xdc\x20\xe1\xd1\x31\x60\x4f\xfb\
\xf5\xc1\x5b\x70\x99\xa7\xc7\x00\x7f\x96\x8e\x7d\x10\x45\x82\x19\
\xa8\x4e\xa4\x5f\xb9\xa1\x5b\xd5\x07\xf3\x59\x11\xbd\x49\x12\xda\
\x0e\xfc\x6e\x99\x93\xca\xaf\x1f\xa6\x89\x85\x68\xd5\x98\x1d\xa4\
\xf9\xa3\xf6\x3a\x1a\xea\xd8\xdb\x03\xff\x7e\x05\xf0\x2b\xfd\xfb\
\xb8\x0a\x6c\xf5\xb3\xa3\xa4\x1a\x72\x85\x59\x94\xe3\x08\x4a\x5a\
\xd6\x93\x2a\x88\x42\xd0\x66\x12\x65\xbf\x33\x11\x1f\x93\xb8\xcc\
\xe3\x92\x85\xb0\x19\x22\xbf\xf0\x2f\x3f\xb8\xd4\x7b\xbd\xbd\x45\
\x2f\x20\x3b\x74\x5f\x5d\x03\xcb\xff\xdb\x0b\xeb\xdb\xbf\xa1\x9f\
\xf0\x0a\x67\x44\x52\xa1\x86\x09\x27\x95\x98\x5a\x95\x65\x90\x62\
\x9a\x28\x3e\x1c\xcf\xef\xbd\x5f\xb3\xc9\x9d\x3b\x40\x67\x28\xac\
\x45\xd7\xaa\x48\x7a\x60\x70\x8a\x53\x71\xe1\xdd\x4c\x1f\x2b\x3b\
\x64\x04\x0b\xf8\xbc\x13\xe9\xcb\x45\x7b\xf2\x73\x60\x21\xba\xa2\
\x2c\xee\xcc\xfb\x75\xf3\x1d\x7b\xfb\x23\xf3\x1b\xc5\xa5\x8d\x58\
\
"
qt_resource_name = b"\
\x00\x15\
\x0c\xd3\x2e\x3c\
\x00\x44\
\x00\x65\x00\x66\x00\x61\x00\x75\x00\x6c\x00\x74\x00\x42\x00\x6f\x00\x6f\x00\x6b\x00\x6d\x00\x61\x00\x72\x00\x6b\x00\x73\x00\x2e\
\x00\x78\x00\x62\x00\x65\x00\x6c\
"
qt_resource | _struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00 | \x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
es)
class FakeBackend(BaseModel):
def __init__(self, instance_port):
self.instance_port = instance_port
self.policy_names = []
def __repr__(self):
return "FakeBackend(inp: %s, policies: %s)" % (self.instance_port, self.policy_names)
class FakeLoadBalancer(BaseModel):
def __init__(self, name, zones, ports, scheme='internet-facing', vpc_id=None, subnets=None):
self.name = name
self.health_check = None
self.instance_ids = []
self.zones = zones
self.listeners = []
self.backends = []
self.created_time = datetime.datetime.now()
self.scheme = scheme
self.attributes = FakeLoadBalancer.get_default_attributes()
self.policies = Policies()
self.policies.other_policies = []
self.policies.app_cookie_stickiness_policies = []
self.policies.lb_cookie_stickiness_policies = []
self.subnets = subnets or []
self.vpc_id = vpc_id or 'vpc-56e10e3d'
self.tags = {}
self.dns_name = "%s.us-east-1.elb.amazonaws.com" % (name)
for port in ports:
listener = FakeListener(
protocol=(port.get('protocol') or port['Protocol']),
load_balancer_port=(
port.get('load_balancer_port') or port['LoadBalancerPort']),
instance_port=(
port.get('instance_port') or port['InstancePort']),
ssl_certificate_id=port.get(
'ssl_certificate_id', port.get('SSLCertificateId')),
)
self.listeners.append(listener)
# it is unclear per the AWS documentation as to when or how backend
# information gets set, so let's guess and set it here *shrug*
backend = FakeBackend(
instance_port=(
port.get('instance_port') or port['InstancePort']),
)
self.backends.append(backend)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
elb_backend = elb_backends[region_name]
new_elb = elb_backend.create_load_balancer(
name=properties.get('LoadBalancerName', resource_name),
zones=properties.get('AvailabilityZones', []),
ports=properties['Listeners'],
scheme=properties.get('Scheme', 'internet-facing'),
)
instance_ids = properties.get('Instances', [])
for instance_id in instance_ids:
elb_backend.register_instances(new_elb.name, [instance_id])
policies = properties.get('Policies', [])
port_policies = {}
for policy in policies:
policy_name = policy["PolicyName"]
other_policy = OtherPolicy()
other_policy.policy_name = policy_name
elb_backend.create_lb_other_policy(new_elb.name, other_policy)
for port in policy.get("InstancePorts", []):
policies_for_port = port_policies.get(port, set())
policies_for_port.add(policy_name)
port_policies[port] = policies_for_port
for port, policies in port_policies.items():
elb_backend.set_load_balancer_policies_of_backend_server(
new_elb.name, port, list(policies))
health_check = properties.get('HealthCheck')
if health_check:
elb_backend.configure_health_check(
load_balancer_name=new_elb.name,
timeout=health_check['Timeout'],
healthy_threshold=health_check['HealthyThreshold'],
unhealthy_threshold=health_check['UnhealthyThreshold'],
interval=health_check['Interval'],
target=health_check['Target'],
)
return new_elb
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name)
return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
elb_backend = elb_backends[region_name]
try:
elb_backend.delete_load_balancer(resource_name)
except KeyError:
pass
@property
def physical_resource_id(self):
return self.name
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'CanonicalHostedZoneName':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneName" ]"')
elif attribute_name == 'CanonicalHostedZoneNameID':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneNameID" ]"')
elif attribute_name == 'DNSName':
return self.dns_name
elif attribute_name == 'SourceSecurityGroup.GroupName':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.GroupName" ]"')
elif attribute_name == 'SourceSecurityGroup.OwnerAlias':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.OwnerAlias" ]"')
raise UnformattedGetAttTemplateException()
@classmethod
def get_default_attributes(cls):
attributes = LbAttributes()
cross_zone_load_balancing = CrossZoneLoadBalancingAttr | ibute()
cross_zone_load_balancing.enabled = False
attributes.cross_zone_load_balancing = cross_zone_load_balancing
connection_draining = ConnectionDrainingAttribute()
connection_draining.enabled = False
attributes.connection_draining = connection_draining
access_log = AccessLogAttribute()
| access_log.enabled = False
attributes.access_log = access_log
connection_settings = ConnectionSettingAttribute()
connection_settings.idle_timeout = 60
attributes.connecting_settings = connection_settings
return attributes
def add_tag(self, key, value):
if len(self.tags) >= 10 and key not in self.tags:
raise TooManyTagsError()
self.tags[key] = value
def list_tags(self):
return self.tags
def remove_tag(self, key):
if key in self.tags:
del self.tags[key]
def delete(self, region):
''' Not exposed as part of the ELB API - used for CloudFormation. '''
elb_backends[region].delete_load_balancer(self.name)
class ELBBackend(BaseBackend):
def __init__(self, region_name=None):
self.region_name = region_name
self.load_balancers = {}
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_load_balancer(self, name, zones, ports, scheme='internet-facing', subnets=None):
vpc_id = None
ec2_backend = ec2_backends[self.region_name]
if subnets:
subnet = ec2_backend.get_subnet(subnets[0])
vpc_id = subnet.vpc_id
if name in self.load_balancers:
raise DuplicateLoadBalancerName(name)
new_load_balancer = FakeLoadBalancer(
name=name, zones=zones, ports=ports, scheme=scheme, subnets=subnets, vpc_id=vpc_id)
self.load_balancers[name] = new_load_balancer
return new_load_balancer
def create_load_balancer_listeners(self, name, ports):
balancer = self.load_balancers.get(name, None)
if balancer:
for port in ports:
protocol = port['protocol']
instance_port = port['instance_port']
lb_port = port['load_balancer_port']
ssl_certificate_id = port.get('sslcertificate_id')
for listener in balancer.listeners:
if lb_port == listener.load_balancer_port:
|
from fbchat import GroupData, User
def test_group_from_graphql(session):
data = {
"name": "Group ABC",
"thread_key": {"thread_fbid": "11223344"},
"image": None,
"is_group_thread": True,
"all_participants": {
"nodes": [
{"messaging_actor": {"__typename": "User", "id": "1234"}},
{"messaging_actor": | {"__typename": "User", "id": "2345"}},
{"messaging_actor": {"__typename": "User", "id": "3456"}},
]
},
"customization_info": {
"participant_customizations": [],
"outgoing_bubble_color": None,
"emoji": "😀",
},
"thread_admins": [{"id": "1234"}],
"group_approval_queue": {"nodes": []},
"approval_mode": 0,
"joinabl | e_mode": {"mode": "0", "link": ""},
"event_reminders": {"nodes": []},
}
assert GroupData(
session=session,
id="11223344",
photo=None,
name="Group ABC",
last_active=None,
message_count=None,
plan=None,
participants=[
User(session=session, id="1234"),
User(session=session, id="2345"),
User(session=session, id="3456"),
],
nicknames={},
color="#0084ff",
emoji="😀",
admins={"1234"},
approval_mode=False,
approval_requests=set(),
join_link="",
) == GroupData._from_graphql(session, data)
|
/// | <reference path="./testBlocks/enums.ts" />
enum EnumOfFlags {
W = 1,
X = 1 << 1,
Z = 1 << 3
}
let userDefinedTest7 = EnumOfFlag | s.W |
import itertools
import os
class TreeHasher():
"""uses BlockHasher recursively on a directory tree
Input and output generators are in the format: ( relative-filepath, chunk_nr, hexdigest)
"""
def __init__(self, block_hasher):
"""
:type block_hasher: BlockHasher
"""
self.block_hasher=block_hasher
def generat | e(self, start_path):
"""Use BlockHasher on every file in a tree, yielding the results
note that it only checks the contents of actual files. It ignores metadata like permissions and mtimes.
It also ignores empty directories, symlinks and s | pecial files.
"""
def walkerror(e):
raise e
for (dirpath, dirnames, filenames) in os.walk(start_path, onerror=walkerror):
for f in filenames:
file_path=os.path.join(dirpath, f)
if (not os.path.islink(file_path)) and os.path.isfile(file_path):
for (chunk_nr, hash) in self.block_hasher.generate(file_path):
yield ( os.path.relpath(file_path,start_path), chunk_nr, hash )
def compare(self, start_path, generator):
"""reads from generator and compares blocks
yields mismatches in the form: ( relative_filename, chunk_nr, compare_hexdigest, actual_hexdigest )
yields errors in the form: ( relative_filename, chunk_nr, compare_hexdigest, "message" )
"""
count=0
def filter_file_name( file_name, chunk_nr, hexdigest):
return ( chunk_nr, hexdigest )
for file_name, group_generator in itertools.groupby(generator, lambda x: x[0]):
count=count+1
block_generator=itertools.starmap(filter_file_name, group_generator)
for ( chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(os.path.join(start_path,file_name), block_generator):
yield ( file_name, chunk_nr, compare_hexdigest, actual_hexdigest )
|
# Microcosmos: an antsy game
# Copyright (C) 2010 Cyril ADRIAN <cyril.adrian@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 exclusively.
#
# This program is distributed in the hope that i | t will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Ge | neral Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
The Bugs model package provides bugs and their specific behaviour.
"""
from net.cadrian.microcosmos.model.bugs.antFemales import AntFemale, Target as AntFemaleTarget
from net.cadrian.microcosmos.model.bugs.antQueens import AntQueen
from net.cadrian.microcosmos.model.bugs.antSoldiers import AntSoldier
from net.cadrian.microcosmos.model.bugs.antWorkers import AntWorker
from net.cadrian.microcosmos.model.bugs.lice import Louse
|
sheet()
elif event.notice == 'add_image':
pass
elif event.notice == 'del_image':
pass
elif event.notice == 'add_character':
self.add_character()
elif event.notice == 'del_character':
self.del_character()
elif event.notice == 'add_animation':
pass
elif event.notice == 'del_animation':
pass
elif event.notice == 'add_sprite':
self.add_sprite()
elif event.notice == 'del_sprite':
self.del_sprite()
event.Skip()
def change_background(self):
dlg = dialogs.ChangeBackgroundDialog(self.parent)
res = dlg.ShowModal()
if res == wx.ID_OK:
back_type = dlg.back_type.GetValue()
back_spec = dlg.back_spec.GetValue()
self.resources.change_default_background(back_type, back_spec)
dlg.Destroy()
def add_sheet(self):
# definition_fields = Factory_sheet.definition_fields
# dialog with definition fields, source file with browse button
# resource w | ith same name , overwrite ?
filename = dialogs.open_sheet(self.parent)
if filename is not None:
dia = dial | ogs.AddSheetDialog(None, -1, "Insert sheet details",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
try:
self.resources.add_resource(
'sheets', self.settings['name'],
{'colorkey': self.settings['colorkey'],
'abs_path': filename})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_sheet(self):
# LISTCTR with very large icons ?
# use resources.find_deps
# print self.resources.find_deps('sheets', 'master')
# name = 'testsheet'
# self.resources.remove_resource('sheets', name)
# and everything associated to IT!!!
dia = dialogs.DelSheetDialog(None, -1, "Delete sheet",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('sheets',
self.settings['sheet']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK |
wx.ICON_INFORMATION)
try:
self.resources.remove_resource('sheets',
self.settings['sheet'])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def add_costume(self):
# dialog with definitions and a area selection on the sheet
dia = dialogs.AddCostumeDialog(None, -1, "Add a new costume",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
# print self.settings['name'], self.settings['rect'], \
# self.settings['sheet']
try:
self.resources.add_resource(
'costumes', self.settings['name'],
{'name': self.settings['name'],
'sheet': self.settings['sheet'],
'rect': self.settings['rect']})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_costume(self):
# LISTCTRL with large icons
dia = dialogs.DelCostumeDialog(None, -1, "Delete costume",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('costumes',
self.settings['costume']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK |
wx.ICON_INFORMATION)
try:
self.resources.remove_resource('costumes',
self.settings['costume'])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def add_sprite(self):
# dialog with definition, select from existing costumes,
# animations, sounds...
# or add empty
dia = dialogs.AddSpriteDialog(None, -1, "Add a new sprite",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
try:
self.resources.add_resource('sprites', self.settings['name'],
{'name': self.settings['name'],
'base_class': self.settings
['base_class'],
'costumes': self.settings
['costumes'],
'animations': [],
'sounds': [],
'self_sufficiency': 0,
'user_code': {'__init__': ''}})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_sprite(self):
# LISTCTRK with name + sprite definition
dia = dialogs.DelSpriteDialog(None, -1, "Delete a sprite",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
for x in self.resources.find_deps('sprites',
self.settings['sprite']):
for elem in x:
try:
self.resources.remove_resource(elem[0], elem[1])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK |
wx.ICON_INFORMATION)
try:
self.resources.remove_resource('sprites',
self.settings['sprite'])
except Exception as e:
wx.MessageBox(str(e), "Error", wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def add_character(self):
# dialog with definition, select from existing sprites or add empty
dia = dialogs.AddCharacterDialog(None, -1, "Add a new character",
self.resources)
result = dia.ShowModal()
if result == wx.ID_OK:
self.settings = dia.GetSettings()
try:
self.resources.add_resource('characters',
self.settings['name'],
{'sprites': []})
except ValueError as e:
wx.MessageBox(str(e), "Error",
wx.OK | wx.ICON_INFORMATION)
dia.Destroy()
return True
def del_character(self):
# LISTCTRK with name + sprite definit |
#!/usr/bin/env python
# Thu, 13 Mar 14 (PDT)
# bpf-filter.rb: Create a packet filter,
# use it to print udp records from a trace
# Copyright (C) 2 | 015, Nevil Brownlee, U Auckland | WAND
from plt_testing import *
t = get_example_trace('anon-v4.pcap')
filter = plt.filter('udp port 53') # Only want DNS packets
t.conf_filte | r(filter)
t.conf_snaplen(500)
#t.conf_promisc(True)
# Remember: on a live interface, must sudo to capture
# on a trace file, can't set promicuous
nfp = 0; offset = 12
for pkt in t:
nfp += 1
udp = pkt.udp
test_println("%4d:" % (nfp), get_tag())
print_udp(pkt.udp, offset, get_tag("nfp:"+str(nfp)))
test_println('')
if nfp == 4:
break
test_println("%d filtered packets" % nfp, get_tag())
|
eration number, which is only incremented
when learning is enabled
bucketIdx: the bucket index to store
Save duty cycle by normalizing it to the same iteration as
the rest of the duty cycles which is lastTotalUpdate.
This is done to speed up computation in inference since all of the duty
cycles can now be scaled by a single number.
The duty cycle is brought up to the current iteration only at inference and
only when one of the duty cycles gets too large (to avoid overflow to
larger data type) since the ratios between the duty cycles are what is
important. As long as all of the duty cycles are at the same iteration
their ratio is the same as it would be for any other iteration, because the
update is simply a multiplication by a scalar that depends on the number of
steps between the last update of the duty cycle and the current iteration.
"""
# If lastTotalUpdate has not been set, set it to the current iteration.
if self._lastTotalUpdate is None:
self._lastTotalUpdate = iteration
# Get the duty cycle stored for this bucket.
statsLen = len(self._stats) - 1
if bucketIdx > statsLen:
self._stats.extend(itertools.repeat(0.0, bucketIdx - statsLen))
# Update it now.
# duty cycle n steps ago is dc{-n}
# duty cycle for current iteration is (1-alpha)*dc{-n}*(1-alpha)**(n)+alpha
dc = self._stats[bucketIdx]
# To get the duty cycle from n iterations ago that when updated to the
# current iteration would equal the dc of the current iteration we simply
# divide the duty cycle by (1-alpha)**(n). This results in the formula
# dc'{-n} = dc{-n} + alpha/(1-alpha)**n where the apostrophe symbol is used
# to denote that this is the new duty cycle at that iteration. This is
# equivalent to the duty cycle dc{-n}
denom = ((1.0 - self._classifier.alpha) **
(iteration - self._lastTotalUpdate))
if denom > 0:
dcNew = dc + (self._classifier.alpha / denom)
# This is to prevent errors associated with inf rescale if too large
if denom == 0 or dcNew > DUTY_CYCLE_UPDATE_INTERVAL:
exp = ((1.0 - self._classifier.alpha) **
(iteration - self._lastTotalUpdate))
for (bucketIdxT, dcT) in enumerate(self._stats):
dcT *= exp
self._stats[bucketIdxT] = dcT
# Reset time since last update
self._lastTotalUpdate = iteration
# Add alpha since now exponent is 0
dc = self._stats[bucketIdx] + self._classifier.alpha
else:
dc = dcNew
self._stats[bucketIdx] = dc
if self._classifier.verbosity >= 2:
print "updated DC for {0!s}, bucket {1:d} to {2:f}".format(self._id, bucketIdx, dc)
def infer(self, votes):
"""Look up and return the votes for each bucketIdx for this bit.
Parameters:
--------------------------------------------------------------------
votes: a numpy array, initialized to all 0's, that should be filled
in with the votes for each bucket. The vote for bucket index N
should go into votes[N].
"""
# Place the duty cycle into the votes and update the running total for
# normalization
total = 0
for (bucketIdx, dc) in enumerate(self._stats):
# Not updating to current iteration since we are normalizing anyway
if dc > 0.0:
votes[bucketIdx] = dc
total += dc
# Experiment... try normalizing the votes from each bit
if total > 0:
votes /= total
if self._classifier.verbosity >= 2:
print "bucket votes for {0!s}:".format((self._id)), _pFormatArray(votes)
def __getstate__(self):
return dict((elem, getattr(self, elem)) for elem in self.__slots__)
def __setsta | te__(self, state):
version = 0
if "_version" in state:
version = state["_version"]
# Migrate from version 0 to version 1
if version == 0:
stats = state.pop("_stats")
assert isinstance(stats, dict)
maxBucket = max(stat | s.iterkeys())
self._stats = array.array("f", itertools.repeat(0.0, maxBucket + 1))
for (index, value) in stats.iteritems():
self._stats[index] = value
elif version == 1:
state.pop("_updateDutyCycles", None)
elif version == 2:
pass
else:
raise Exception("Error while deserializing {0!s}: Invalid version {1!s}".format(self.__class__, version))
for (attr, value) in state.iteritems():
setattr(self, attr, value)
self._version = BitHistory.__VERSION__
def write(self, proto):
proto.id = self._id
statsProto = proto.init("stats", len(self._stats))
for (bucketIdx, dutyCycle) in enumerate(self._stats):
statsProto[bucketIdx].index = bucketIdx
statsProto[bucketIdx].dutyCycle = dutyCycle
proto.lastTotalUpdate = self._lastTotalUpdate
proto.learnIteration = self._learnIteration
@classmethod
def read(cls, proto):
bitHistory = object.__new__(cls)
bitHistory._id = proto.id
for statProto in proto.stats:
statsLen = len(bitHistory._stats) - 1
if statProto.index > statsLen:
bitHistory._stats.extend(
itertools.repeat(0.0, statProto.index - statsLen))
bitHistory._stats[statProto.index] = statProto.dutyCycle
bitHistory._lastTotalUpdate = proto.lastTotalUpdate
bitHistory._learnIteration = proto.learnIteration
return bitHistory
class CLAClassifier(object):
"""
A CLA classifier accepts a binary input from the level below (the
"activationPattern") and information from the sensor and encoders (the
"classification") describing the input to the system at that time step.
When learning, for every bit in activation pattern, it records a history of
the classification each time that bit was active. The history is weighted so
that more recent activity has a bigger impact than older activity. The alpha
parameter controls this weighting.
For inference, it takes an ensemble approach. For every active bit in the
activationPattern, it looks up the most likely classification(s) from the
history stored for that bit and then votes across these to get the resulting
classification(s).
This classifier can learn and infer a number of simultaneous classifications
at once, each representing a shift of a different number of time steps. For
example, say you are doing multi-step prediction and want the predictions for
1 and 3 time steps in advance. The CLAClassifier would learn the associations
between the activation pattern for time step T and the classifications for
time step T+1, as well as the associations between activation pattern T and
the classifications for T+3. The 'steps' constructor argument specifies the
list of time-steps you want.
"""
__VERSION__ = 2
def __init__(self, steps=(1,), alpha=0.001, actValueAlpha=0.3, verbosity=0):
"""Constructor for the CLA classifier.
Parameters:
---------------------------------------------------------------------
steps: Sequence of the different steps of multi-step predictions to learn
alpha: The alpha used to compute running averages of the bucket duty
cycles for each activation pattern bit. A lower alpha results
in longer term memory.
verbosity: verbosity level, can be 0, 1, or 2
"""
# Save constructor args
self.steps = steps
self.alpha = alpha
self.actValueAlpha = actValueAlpha
self.verbosity = verbosity
# Init learn iteration index
self._learnIteration = 0
# This contains the offset between the recordNum (provided by caller) and
# learnIteration (internal only, always starts at 0).
self._recordNumMinusLearnIteration = None
# Max # of steps of prediction we need to support
maxSteps = max(self.steps) + 1
# History of the last _maxSteps activation patterns. We need to keep
# these so that we can associate the current iteration's classification
# with the activationPattern from N steps ago
self._patternNZHistory = deque(maxlen=maxSteps)
# These are the bit histories. Each one is a BitHistory instance, stored in
# th |
rg/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Windowing concepts.
A WindowInto transform logically divides up or groups the elements of a
PCollection into finite windows according to a windowing function (derived from
WindowFn).
The output of WindowInto contains the same elements as input, but they have been
logically assigned to windows. The next GroupByKey(s) transforms, including one
within a composite transform, will group by the combination of keys and windows.
Windowing a PCollection allows chunks of it to be processed individually, before
the entire PCollection is available. This is especially important for
PCollection(s) with unbounded size, since the full PCollection is never
available at once, since more data is continually arriving. For PCollection(s)
with a bounded size (aka. conventional batch mode), by default, all data is
implicitly in a single window (see GlobalWindows), unless WindowInto is
applied.
For example, a simple form of windowing divides up the data into fixed-width
time intervals, using FixedWindows.
Seconds are used as the time unit for the built-in windowing primitives here.
Integer or floating point seconds can be passed to the | se primitives.
Internally, seconds, with microsecond granularity, are stored as
timeutil.Timestamp and timeutil.Duration objects. This is done to avoid
precision errors that would occur with floating point representations.
Custom windowing function classes can be created, by subclassing from
WindowFn.
"""
from __future__ import absolute_import
import abc
from builtins imp | ort object
from builtins import range
from functools import total_ordering
from future.utils import with_metaclass
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from apache_beam.coders import coders
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import standard_window_fns_pb2
from apache_beam.transforms import timeutil
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
__all__ = [
'TimestampCombiner',
'WindowFn',
'BoundedWindow',
'IntervalWindow',
'TimestampedValue',
'GlobalWindow',
'NonMergingWindowFn',
'GlobalWindows',
'FixedWindows',
'SlidingWindows',
'Sessions',
]
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class TimestampCombiner(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.OutputTime.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.OutputTime.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.OutputTime.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(timestamp_combiner, window_fn):
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid TimestampCombiner: %s.' % timestamp_combiner)
class WindowFn(with_metaclass(abc.ABCMeta, urns.RunnerApiFn)):
"""An abstract windowing function defining a basic assign and merge."""
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(self, timestamp, element=None, window=None):
self.timestamp = Timestamp.of(timestamp)
self.element = element
self.window = window
@abc.abstractmethod
def assign(self, assign_context):
"""Associates windows to an element.
Arguments:
assign_context: Instance of AssignContext.
Returns:
An iterable of BoundedWindow.
"""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
raise NotImplementedError
@abc.abstractmethod
def merge(self, merge_context):
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
"""Returns whether this WindowFn merges windows."""
return True
@abc.abstractmethod
def get_window_coder(self):
raise NotImplementedError
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
"""Given input time and output window, returns output time for window.
If TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_WINDOWFN)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
self.end = Timestamp.of(end)
def max_timestamp(self):
return self.end.predecessor()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
# Order first by endpoint, then arbitrarily
return self.end != other.end or hash(self) != hash(other)
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def __le__(self, other):
if self.end != other.end:
return self.end <= other.end
return hash(self) <= hash(other)
def __gt__(self, other):
if self.end != other.end:
return self.end > other.end
return hash(self) > hash(other)
def __ge__(self, other):
if self.end != other.end:
return self.end >= other.end
return hash(self) >= hash(other)
def __hash__(self):
raise NotImplementedError
def __repr__(self):
return '[?, %s)' % float(self.end)
class IntervalWindow(BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __init__(self, start, end):
super(IntervalWindow, self).__init__(end)
self.start = Timestamp.of(start)
def __hash__(self):
return hash((self.start, self.end))
def __eq__(self, other):
return (self.start == other.start
and self.end == other.end
and type(self) == type(other))
def __ne__(self, other):
return not self == other
def __repr__(self):
return '[%s, %s)' % (float(self.start), float(self.end))
def intersects(self, other):
return other.start < self.end or self.start < other.end
def union(self, other):
return IntervalWindow(
min(self.start, other.start), max(self.end, other.end))
@total_ordering
class TimestampedValue(object):
"""A timestamped value having |
graphics.icon import Icon, CellRendererIcon
from jarabe.controlpanel.sectionview import SectionView
from jarabe.model.update import updater
from jarabe.model import bundleregistry
_DEBUG_VIEW_ALL = True
class ActivityUpdater(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = updater.get_instance()
self._id_progresss = self._model.connect('progress',
self.__progress_cb)
self._id_updates = self._model.connect('updates-available',
self.__updates_available_cb)
self._id_error = self._model.connect('error',
self.__error_cb)
self._id_finished = self._model.connect('finished',
self.__finished_cb)
self.set_spacing(style.DEFAULT_SPACING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._top_label = Gtk.Label()
self._top_label.set_line_wrap(True)
self._top_label.set_justify(Gtk.Justification.LEFT)
self._top_label.props.xalign = 0
self.pack_start(self._top_label, False, True, 0)
self._top_label.show()
separator = Gtk.HSeparator()
self.pack_start(separator, False, True, 0)
separator.show()
self._bottom_label = Gtk.Label()
self._bottom_label.set_line_wrap(True)
self._bottom_label.set_justify(Gtk.Justification.LEFT)
self._bottom_label.props.xalign = 0
self._bottom_label.set_markup(
_('Software updates correct errors, eliminate security '
'vulnerabilities, and provide new features.'))
self.pack_start(self._bottom_label, False, True, 0)
self._bottom_label.show()
self._update_box = None
self._progress_pane = None
state = self._model.get_state()
if state in (updater.STATE_IDLE, updater.STATE_CHECKED):
self._refresh()
elif state in (updater.STATE_CHECKING, updater.STATE_DOWNLOADING,
updater.STATE_UPDATING):
self._switch_to_progress_pane()
self._progress_pane.set_message(_('Update in progress...'))
self.connect('destroy', self.__destroy_cb)
def __destroy_cb(self, widget):
self._model.disconnect(self._id_progresss)
self._model.disconnect(self._id_updates)
self._model.disconnect(self._id_error)
self._model.disconnect(self._id_finished)
self._model.clean()
def _switch_to_update_box(self, updates):
if self._update_box in self.get_children():
return
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box is None:
self._update_box = UpdateBox(updates)
self._update_box.refresh_button.connect(
'clicked',
self.__refresh_button_clicked_cb)
self._update_box.install_button.connect(
'clicked',
self.__install_button_clicked_cb)
self.pack_start(self._update_box, expand=True, fill=True, padding=0)
self._update_box.show()
def _switch_to_progress_pane(self):
if self._progress_pane in self.get_children():
return
if self._model.get_state() == updater.STATE_CHECKING:
top_message = _('Checking for updates...')
else:
top_message = _('Installing updates...')
self._top_label.set_markup('<big>%s</big>' % top_message)
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
if self._progress_pane is None:
self._progress_pane = ProgressPane()
self._progress_pane.cancel_button.connect(
'clicked',
self.__cancel_button_clicked_cb)
self.pack_start(
self._progress_pane, expand=True, fill=False, padding=0)
self._progress_pane.show()
def _clear_center(self):
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
def __progress_cb(self, model, state, bundle_name, progress):
if state == updater.STATE_CHECKING:
if bundle_name:
message = _('Checking %s...') % bundle_name
else:
message = _('Looking for updates...')
elif state == updater.STATE_DOWNLOADING:
message = _('Downloading %s...') % bundle_name
elif state == updater.STATE_UPDATING:
message = _('Updating %s...') % bundle_name
self._switch_to_progress_pane()
self._progress_pane.set_message(message)
self._progress_pane.set_progress(progress)
def __updates_available_cb(self, model, updates):
logging.debug('ActivityUpdater.__updates_available_cb')
available_updates = len(updates)
if not available_updates:
top_message = _('Your software is up-to-date')
else:
top_message = ngettext('You can install %s update',
'You can install %s updates',
available_updates)
top_message = top_message % available_updates
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
if not available_updates:
self._clear_center()
else:
self._switch_to_update_box(updates)
def __error_cb(self, model, updates):
logging.debug('ActivityUpdater.__error_cb')
top_message = _('Can\'t connect to the activity server')
self._top_label.set_markup('<big>%s</big>' % top_message)
self._bottom_label.set_markup(
_('Verify your connection to internet and try again, '
'or try again later'))
self._clear_center()
def __refresh_button_clicked_cb(self, button):
self._refresh()
def _refresh(self):
self._model.check_updates()
def __install_button_clicked_cb(self, button):
self._model.update(self._update_box.get_bundles_to_update())
def __cancel_button_clicked_cb(self, button):
self._model.cancel()
def __finished_cb(self, model, installed_updates, failed_updates,
cancelled):
num_installed = len(installed_updates)
logging.debug('ActivityUpdater.__finished_cb')
top_message = ngettext('%s update was installed',
'%s updates were installed', num_installed)
top_message = top_message % num_installed
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
self._clear_center()
def undo(self):
self._model.cancel()
class ProgressPane(Gtk.VBox):
"""Container which replaces the `ActivityPane` during refresh or
install."""
def __init__(self):
Gtk.VBox.__init__(self)
self.set_s | pacing(style.DEFAULT_PADDING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._progress = Gtk.ProgressBar()
self.pack_start(self._progress, True, True, 0)
self._progress.show()
self._label = Gtk.Label()
self._label.set_line_wrap(True)
self._label.set_property('xalign', 0.5)
| self._label.modify_fg(Gtk.StateType.NORMAL,
style.COLOR_BUTTON_GREY.get_gdk_color())
self.pack_start(self._label, True, True, 0)
self._label.show()
alignment_box = Gtk.Alignment.new(xalign=0.5, yalign=0.5,
xscale=0, yscale=0)
self.pack_start(alignment_box, True, True, 0)
alignment_box.show()
self.cancel_button = Gtk.Button(stock=Gtk.STOCK_CA |
#### NOTICE: THIS FILE IS AUTOGENER | ATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_death_watch_mandalorian_belt_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","armor_mandal | orian_belt")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
If not, see <http://www.gnu.org/licenses/>.
#
from collections import OrderedDict
import xml.etree.E | lementTree as etree
class Introspectable:
| class Element(object):
""" This is a basic introspectable object. This class will make
sure that the given xml element is of correct type and provide
some helper functions to simplify work of the children.
Children objects must implement TagName attribute, which contains
the name of the expected xml tag.
All introspectable objects contain the following properties:
- name : str -- name of the object
- annotations : OrderedDict -- available annotations
"""
def __init__(self, element):
self.check(element, self.TagName)
self.element = element
self.name = element.attrib["name"]
self.annotations = self.find(SBus.Annotation)
def find(self, object_class):
return Introspectable.FindElements(self.element, object_class)
def check(self, element, tagname):
if element.tag != tagname:
raise ValueError('Unexpected tag name "%s" (%s expected)!'
% (element.tag, tagname))
if "name" not in element.attrib:
raise ValueError('Missing attribute name!')
def getAttr(self, name, default_value):
return self.element.attrib.get(name, default_value)
def getExistingAttr(self, name):
if name not in self.element.attrib:
raise ValueError('Element %s name="%s" is missing attribute %s'
% (self.TagName, self.name, name))
return self.element.attrib[name]
class Invokable(Element):
""" This is a base class for invokable objects -- methods and signals.
Invokable objects has available additional attributes:
- input OrderedDict -- input signature and arguments
- output : OrderedDict -- output signature and arguments
"""
def __init__(self, element):
super(Introspectable.Invokable, self).__init__(element)
self.key = self.getAttr("key", None)
self.arguments = self.find(SBus.Argument)
input = self.getInputArguments()
output = self.getOutputArguments()
self.input = SBus.Signature(input, self.annotations)
self.output = SBus.Signature(output, self.annotations)
return
def getInputArguments(self):
return self.getArguments("in")
def getOutputArguments(self):
return self.getArguments("out")
def getArguments(self, type):
args = OrderedDict()
for name, arg in self.arguments.items():
if type == "in" and arg.isInput():
args[name] = arg
continue
if type == "out" and arg.isOutput():
args[name] = arg
continue
return args
@staticmethod
def Introspect(path):
root = etree.parse(path).getroot()
return Introspectable.FindElements(root, SBus.Interface)
@staticmethod
def FindElements(parent, object_class):
dict = OrderedDict()
for child in parent:
if child.tag != object_class.TagName:
continue
object = object_class(child)
if object.name in dict:
raise ValueError('%s name="%s" is already present '
'in the same parent element\n'
% (object_class.TagName, object.name))
dict[object.name] = object
"""
Arguments can't be sorted and annotations order should be left on
the author of introspection. Otherwise we want to sort the dictionary
alphabetically based on keys.
"""
if object_class in [SBus.Argument, SBus.Annotation]:
return dict
return OrderedDict(sorted(dict.items()))
class SBus:
class Interface(Introspectable.Element):
TagName = "interface"
def __init__(self, element):
super(SBus.Interface, self).__init__(element)
self.methods = self.find(SBus.Method)
self.signals = self.find(SBus.Signal)
self.properties = self.find(SBus.Property)
return
class Method(Introspectable.Invokable):
TagName = "method"
def __init__(self, element):
super(SBus.Method, self).__init__(element)
class Signal(Introspectable.Invokable):
TagName = "signal"
def __init__(self, element):
super(SBus.Signal, self).__init__(element)
class Property(Introspectable.Invokable):
TagName = "property"
def __init__(self, element):
self.name = element.attrib["name"]
self.element = element
self.access = self.getExistingAttr("access")
self.type = self.getExistingAttr("type")
super(SBus.Property, self).__init__(element)
if self.key is not None:
raise ValueError('Keying is not supported on properties: %s '
% self.name)
def getInputArguments(self):
if not self.isWritable():
return {}
return {"value": SBus.Argument.Create("value", self.type, "in")}
def getOutputArguments(self):
if not self.isReadable():
return {}
return {"value": SBus.Argument.Create("value", self.type, "out")}
def isReadable(self):
return self.access == "read" or self.access == "readwrite"
def isWritable(self):
return self.access == "write" or self.access == "readwrite"
class Annotation(Introspectable.Element):
TagName = "annotation"
def __init__(self, element):
super(SBus.Annotation, self).__init__(element)
self.value = self.getAttr("value", None)
return
@staticmethod
def Find(annotations, name, default_value):
if name in annotations:
annotation = annotations[name]
if annotation.value is None:
return default_value
return annotation.value
return default_value
@staticmethod
def FindBool(annotations, name, Assume=False):
assume = "true" if Assume else "false"
value = SBus.Annotation.Find(annotations, name, assume)
if value.lower() == "true":
return True
else:
return False
@staticmethod
def CheckIfTrue(names, annotations):
for name in names:
if SBus.Annotation.FindBool(annotations, name, False):
return True
return False
@staticmethod
def CheckIfFalse(names, annotations):
for name in names:
if not SBus.Annotation.FindBool(annotations, name, True):
return False
return True
@staticmethod
def AtleastOneIsSet(names, annotations):
for name in names:
value = SBus.Annotation.Find(annotations, name, None)
if value is not None:
return True
return False
class Argument(Introspectable.Element):
TagName = "arg"
def __init__(self, element, Name=None, Type=None, Direction=None,
Key=None):
if element is None:
self.element = None
self.name = Name
self.signature = Type
self.direction = Direction
self.key = Key
return
super(SBus.Argument, self).__init__(element)
self.signature = self.getExistingAttr("type")
self.direction = self.getAttr("direction", "in")
self.key = self.getAttr("key", None)
def isInput(self):
|
from src.pla | tform.jboss.interfaces import JMXInterface
class FPrint(JMXInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "5 | .0"
|
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodisco | ver()
urlpatterns = patterns('',
| (r'^profiles/', include('easy_profiles.urls')),
(r'^admin/', include(admin.site.urls)),
)
|
path[5+int(path[0]=="/"):])) #yay for dirty hacks
def fullPathToIcon(path):
p = os.path.normpath(os.path.abspath(path))
return p[len(MODFOLDER)-5:].replace('\\','/')
def getIcon(name):
img = os.path.join(util.CACHE_DIR, name)
if os.path.isfile(img):
logger.debug("Using cached preview image for: " + name)
return img
return None
def getModInfo(modinfofile):
modinfo = modinfofile.parse({"name":"name","uid":"uid","version":"version","author":"author",
"description":"description","ui_only":"ui_only",
"icon":"icon"},
{"version":"1","ui_only":"false","description":"","icon":"","author":""})
modinfo["ui_only"] = (modinfo["ui_only"] == 'true')
if not "uid" in modinfo:
logger.warn("Couldn't find uid for mod %s" % modinfo["name"])
return None
#modinfo["uid"] = modinfo["uid"].lower()
try:
modinfo["version"] = int(modinfo["version"])
except:
try:
modinfo["version"] = float(modinfo["version"])
except:
modinfo["version"] = 0
logger.warn("Couldn't find version for mod %s" % modinfo["name"])
return (modinfofile, modinfo)
def parseModInfo(folder):
if not isModFolderValid(folder):
return None
modinfofile = luaparser.luaParser(os.path.join(folder,"mod_info.lua"))
return getModInfo(modinfofile)
modCache = {}
def getModInfoFromZip(zfile):
'''get the mod info from a zip file'''
if zfile in modCache:
return modCache[zfile]
r = None
if zipfile.is_zipfile(os.path.join(MODFOLDER,zfile)) :
zip = zipfile.ZipFile(os.path.join(MODFOLDER,zfile), "r", zipfile.ZIP_DEFLATED)
if zip.testzip() == None :
for member in zip.namelist() :
filename = os.path.basename(member)
if not filename:
continue
if filename == "mod_info.lua":
modinfofile = luaparser.luaParser("mod_info.lua")
modinfofile.iszip = True
modinfofile.zip = zip
r = getModInfo(modinfofile)
if r == None:
logger.debug("mod_info.lua not found in zip file %s" % zfile)
return None
f, info = r
if f.error:
logger.debug("Error in parsing mod_info.lua in %s" % zfile)
return None
m = ModInfo(**info)
print zfile
m.setFolder(zfile)
m.update()
modCache[zfile] = m
return m
def getModInfoFromFolder(modfolder): # modfolder must be local to MODFOLDER
if modfolder in modCache:
return modCache[modfolder]
r = parseModInfo(os.path.join(MODFOLDER,modfolder))
if r == None:
logger.debug("mod_info.lua not found in %s folder" % modfolder)
return None
f, info = r
if f.error:
logger.debug("Error in parsing %s/mod_info.lua" % modfolder)
return None
m = ModInfo(**info)
m.setFolder(modfolder)
m.update()
modCache[modfolder] = m
return m
def getActiveMods(uimods=None): # returns a list of ModInfo's containing information of the mods
"""uimods:
None - return all active mods
True - only return active UI Mods
False - only return active non-UI Mods
"""
active_mods = []
try:
if not os.path.exists(PREFSFILENAME):
logger.info("No game.prefs file found")
return []
l = luaparser.luaParser(PREFSFILENAME)
l.loweringKeys = False
modlist = l.parse({"active_mods":"active_mods"},{"active_mods":{}})["active_mods"]
if l.error:
logger.info("Error in reading the game.prefs file")
return []
uids = [uid for uid,b in modlist.items() if b == 'true']
#logger.debug("Active mods detected: %s" % str(uids))
allmods = []
for m in installedMods:
if ((uimods == True and m.ui_only) or (uimods == False and not m.ui_only) or uimods == None):
allmods.append(m)
active_mods = [m for m in allmods if m.uid in uids]
#logger.debug("Allmods uids: %s\n\nActive mods uids: %s\n" % (", ".join([mod.uid for mod in allmods]), ", ".join([mod.uid for mod in allmods])))
return active_mods
except:
return []
def setActiveMods(mods, keepuimods=True): #uimods works the same as in getActiveMods
"""
keepuimods:
None: Replace all active mods with 'mods'
True: Keep the UI mods already activated activated
False: Keep only the non-UI mods that were activated activated
So set it True if you want to set gameplay mods, and False if you want to set UI mods.
"""
if keepuimods != None:
keepTheseMods = getActiveMods(keepuimods) # returns the active UI mods if True, the active non-ui mods if False
else:
keepTheseMods = []
allmods = keepTheseMods + mods
s = "active_mods = {\n"
for mod in allmods:
s += "['%s'] = true,\n" % str(mod.uid)
s += "}"
try:
f = open(PREFSFILENAME, 'r')
data = f.read()
except:
logger.info("Couldn't read the game.prefs file")
return False
else:
f.close()
if re.search("active_mods\s*=\s*{.*?}", data, re.S):
data = re.sub("active_mods\s*=\s*{.*?}",s,data,1,re.S)
else:
data += "\n" + s
try:
f = open(PREFSFILENAME, 'w')
f.write(data)
except:
logger.info("Cound't write to the game.prefs file")
return False
else:
f.close()
return True
def updateModInfo(mod, info): #should probably not be used.
"""
Updates a mod_info.lua file with new data.
Because those files can be random lua this function can fail if the file is complicated enough
If every value however is on a seperate line, this should work.
"""
logger.warn("updateModInfo called. Probably not a good idea")
fname = mod.mod_info
try:
f = open(fname, 'r')
data = f.read()
except:
logger.info("Something went wrong reading %s" % fname)
return False
else:
f.close()
for k,v in info.items():
if type(v) in (bool,int): val = str(v).lower()
if type(v) in (unicode, str): val = '"' + v.replace('"', '\\"') + '"'
if re.search(r'^\s*'+k, data , re.M):
data = re.sub(r'^\s*' + k + r'\s*=.*$',"%s = %s" % (k,val), data, 1, re.M)
else:
if data[-1] != '\n': data += '\n'
data += "%s = %s" % (k, val)
try:
f = open(fname, 'w')
f.write(data)
except:
logger.info("Something went wrong writing to %s" % fname)
return False
else:
f.close()
return True
def generateThumbnail(sourcename, destname):
"""Given a dds file, generates a | png file (or whatever the extension of dest is"""
logger.debug("Creating png thumnail for %s to %s" % (sourcename, destname))
try:
img = bytearray()
buf = bytearray(16)
file = open(sourcename,"rb")
file.seek(128) # skip header
while file.readinto(buf):
img += buf[:3] + buf[4:7] + buf[8:11] + buf[12:15]
file.close()
size = int((len(img) | /3) ** (1.0/2))
imageFile = QtGui.QImage(img,size,size,QtGui.QImage.Format_RGB888).rgbSwapped().scaled(100,100,transformMode = QtCore.Qt.SmoothTransformation)
imageFile.save(destname)
except IOError:
return False
if os.path.isfile(destname):
return True
else:
return False
def downloadMod(item): #most of this function is stolen from fa.maps.downloadMap
if isinstance(item,basestring):
link = MODVAULT_DOWNLOAD_ROOT + urllib2.quote(item)
logger.debug("Getting mod from: " + link)
else:
link = item.link
logger.debug("Getting mod from: " + link)
link = urllib2.quote(link, "http://")
progress = QtGui.QProgressDialog()
progress.setCancelButtonText("Cancel")
progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCor |
"""
series.py
:copyright: (c) 2014-2015 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
This shows how to use **SeriesReader** to get the data in various ways
But you can use them with **Reader** class as well
"""
import os
from pyexcel.ext import ods3
from pyexcel import SeriesReader
from pyexcel.utils import to_dict, to_array
from pyexcel.filters import OddRowFilter, EvenColumnFilter
from pyexcel import Writer
import json
def main(base_dir):
# print all in json
#
# Column 1 Column 2 Column 3
# 1 4 7
# 2 5 8
# 3 6 9
reader = SeriesReader(os.path.join(base_dir,"example_series.ods"))
data = to_dict(reader)
print(json.dumps(data))
# output:
# {"Column 2": [4.0, 5.0, 6.0], "Column 3": [7.0, 8.0, 9.0], "Column 1": [1.0, 2.0, 3.0]}
# get the column headers
print(reader.colnames)
# [u'Column 1', u'Column 2', u'Column 3']
# get the content in one dimensional array
data = to_array(reader.enumerate())
print(data)
# [1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0]
# get the content in one dimensional array
# in reverse order
data = to_array(reader.reverse())
print(data)
# get the content in one dimensional array
# but iterate it vertically
data = to_array(reader.vertical())
print(data)
# [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
# get the content in one dimensional array
# but iterate it vertically in revserse
# order
data = to_array(reader.rvertical())
print(data)
#[9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]
# get a two dimensional array
data = to_array(reader.rows())
print(data)
#[[1.0, 4.0, 7.0], [2.0, 5.0, 8.0], [3.0, 6.0, 9.0]]
# get a two dimensional array in reverse
# order
data = to_array(reader.rrows())
print(data)
# [[3.0, 6.0, 9.0], [2.0, 5.0, 8.0], [1.0, 4.0, 7.0]]
# get a two dimen | sional array but stack columns
data = to_array(reader.columns())
print(data)
# [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
# get a two dimensional array but stack columns
# in reverse order
data = to_array(reader.rcolumns())
print(data)
#[[7.0, 8.0, 9.0], [4.0, 5.0, 6.0], [1.0, 2.0, 3.0]]
# filter out odd rows and even columns
reader.filte | r(OddRowFilter())
reader.filter(EvenColumnFilter())
data = to_dict(reader)
print(data)
# {u'Column 3': [8.0], u'Column 1': [2.0]}
# and you can write the filtered results
# into a file
w = Writer("example_series_filter.xls")
w.write_reader(reader)
w.close()
if __name__ == '__main__':
main(os.getcwd()) |
#!/usr/bin/env py | thon
print( | "Hello world!")
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Ap | ache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distribut | ed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import LinuxDevice, Parameter
class OdroidXU3LinuxDevice(LinuxDevice):
name = "odroidxu3_linux"
description = 'HardKernel Odroid XU3 development board (Ubuntu image).'
core_modules = [
'odroidxu3-fan',
]
parameters = [
Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
]
abi = 'armeabi'
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import | migrations, models
class Migration(migrations.Migration):
dependencies = [
('customers', '0009_recipient_type'),
]
operations = [
migrations.AlterModelOptions(
name='recipient',
options={'ordering': [ | 'last_name'], 'verbose_name_plural': 'Recipients'},
),
]
|
'''
Given: A positive integer N≤100000, a number x between 0 and 1, and a DNA string s of length at most 10 bp.
Return: The probability that if N random DNA strings having the same length as s are constructed with GC-content x
(see “Introduction to Random Strings”), then at least one of the strings equals s.
We allow for the same random string to be created more than once.
'''
# P(at least one of the strings equals to s) = 1 - P(none of the strings equals s)
def random_motif_match(N, x, s):
s_construct = {"A": (1 - x) / 2,
"T": (1 - x) / 2,
"C": x / 2,
"G": x / 2 | }
prob = 1
# probability of exactly equals to s
for b in s:
prob *= s_construct[b]
return 1 - (1 - prob) ** N
if __name__ == "__main__":
with open("data/rosalind_rstr.txt", "r") as f:
lines = f.readlines()
N = int(lines[0].rstr | ip().split(" ")[0])
x = float(lines[0].rstrip().split(" ")[1])
s = lines[1].rstrip()
with open("data/output_rstr.txt", "w") as o:
o.write(str(random_motif_match(N, x, s)))
print(random_motif_match(N, x, s))
|
#! /usr/bin/env python
"""Show file statistics by extension."""
import os
import sys
class Stats:
def __init__(self):
self.stats = {}
def statargs(self, args):
for arg in args:
if os.path.isdir(arg):
self.statdir(arg)
elif os.path.isfile(arg):
self.statfile(arg)
else:
sys.stderr.write("Can't find %s\n" % file)
self.addstats("<???>", "unknown", 1)
def statdir(self, dir):
self.addstats("<dir>", "dirs", 1)
try:
names = os.listdir(dir)
except os.error, err:
sys.stderr.write("Can't list %s: %s\n" % (file, err))
self.addstats(ext, "unlistable", 1)
return
names.sort()
for name in names:
if name.startswith(".#"):
continue # Skip CVS temp files
if name.ends | with("~"):
continue# Skip Emacs backup files
| full = os.path.join(dir, name)
if os.path.islink(full):
self.addstats("<lnk>", "links", 1)
elif os.path.isdir(full):
self.statdir(full)
else:
self.statfile(full)
def statfile(self, file):
head, ext = os.path.splitext(file)
head, base = os.path.split(file)
if ext == base:
ext = "" # E.g. .cvsignore is deemed not to have an extension
ext = os.path.normcase(ext)
if not ext:
ext = "<none>"
self.addstats(ext, "files", 1)
try:
f = open(file, "rb")
except IOError, err:
sys.stderr.write("Can't open %s: %s\n" % (file, err))
self.addstats(ext, "unopenable", 1)
return
data = f.read()
f.close()
self.addstats(ext, "bytes", len(data))
if '\0' in data:
self.addstats(ext, "binary", 1)
return
if not data:
self.addstats(ext, "empty", 1)
#self.addstats(ext, "chars", len(data))
lines = data.splitlines()
self.addstats(ext, "lines", len(lines))
del lines
words = data.split()
self.addstats(ext, "words", len(words))
def addstats(self, ext, key, n):
d = self.stats.setdefault(ext, {})
d[key] = d.get(key, 0) + n
def report(self):
exts = self.stats.keys()
exts.sort()
# Get the column keys
columns = {}
for ext in exts:
columns.update(self.stats[ext])
cols = columns.keys()
cols.sort()
colwidth = {}
colwidth["ext"] = max([len(ext) for ext in exts])
minwidth = 6
self.stats["TOTAL"] = {}
for col in cols:
total = 0
cw = max(minwidth, len(col))
for ext in exts:
value = self.stats[ext].get(col)
if value is None:
w = 0
else:
w = len("%d" % value)
total += value
cw = max(cw, w)
cw = max(cw, len(str(total)))
colwidth[col] = cw
self.stats["TOTAL"][col] = total
exts.append("TOTAL")
for ext in exts:
self.stats[ext]["ext"] = ext
cols.insert(0, "ext")
def printheader():
for col in cols:
print "%*s" % (colwidth[col], col),
print
printheader()
for ext in exts:
for col in cols:
value = self.stats[ext].get(col, "")
print "%*s" % (colwidth[col], value),
print
printheader() # Another header at the bottom
def main():
args = sys.argv[1:]
if not args:
args = [os.curdir]
s = Stats()
s.statargs(args)
s.report()
if __name__ == "__main__":
main()
|
return list(data)
else:
return [data]
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
# Note: In order to ensure that the MAC address is unicast the first byte
# must be even.
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
# Dictionary that maintains a list of validation functions
validators = {'type:dict': _validate_dict,
'type:dict_or_none': _validate_dict_or_none,
'type:dict_or_empty': _validate_dict_or_empty,
'type:dict_or_nodata': _validate_dict_or_nodata,
'type:fixed_ips': _validate_fixed_ips,
'type:hostroutes': _validate_hostroutes,
'type:ip_address': _validate_ip_address,
'type:ip_address_or_none': _validate_ip_address_or_none,
'type:ip_pools': _validate_ip_pools,
'type:mac | _address': _validate_mac_address,
'type | :mac_address_or_none': _validate_mac_address_or_none,
'type:nameservers': _validate_nameservers,
'type:non_negative': _validate_non_negative,
'type:range': _validate_range,
'type:regex': _validate_regex,
'type:regex_or_none': _validate_regex_or_none,
'type:string': _validate_string,
'type:string_or_none': _validate_string_or_none,
'type:not_empty_string': _validate_not_empty_string,
'type:not_empty_string_or_none':
_validate_not_empty_string_or_none,
'type:subnet': _validate_subnet,
'type:subnet_list': _validate_subnet_list,
'type:subnet_or_none': _validate_subnet_or_none,
'type:uuid': _validate_uuid,
'type:uuid_or_none': _validate_uuid_or_none,
'type:uuid_list': _validate_uuid_list,
'type:values': _validate_values,
'type:boolean': _validate_boolean}
# Define constants for base resource name
NETWORK = 'network'
NETWORKS = '%ss' % NETWORK
PORT = 'port'
PORTS = '%ss' % PORT
SUBNET = 'subnet'
SUBNETS = '%ss' % SUBNET
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
# attribute is not required, but will be generated by the plugin
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
# is different from an attribute that has been specified with a value of
# None. For example, if 'gateway_ip' is omitted in a request to
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
# and the default gateway_ip will be generated.
# However, if gateway_ip is specified as None, this means that
# the subnet does not have a gateway IP.
# The following is a short reference for understanding attribute info:
# default: default value of the attribute (if missing, the attribute
# becomes mandatory.
# allow_post: the attribute can be used on POST requests.
# allow_put: the attribute can be used on PUT requests.
# validate: specifies rules for validating data in the attribute.
# convert_to: transformation to apply to the value before it is returned
# is_visible: the attribute is returned in GET responses.
# required_by_policy: the attribute is required by the policy engine and
# should therefore be filled by the API layer even if not present in
# request body.
# enforce_policy: the attribute is actively part of the policy enforcing
# mechanism, ie: there might be rules which refer to this attribute.
RESOURCE_ATTRIBUTE_MAP = {
NETWORKS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'subnets': {'allow_post': False, 'allow_put': False,
'default': [],
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
},
PORTS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': None},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'mac_address': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:mac_address': None},
'enforce_policy': True,
'is_visible': True},
'fixed_ips': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'convert_list_to': convert_kvp_list_to_dict,
'validate': {'type:fixed_ips': None},
'enforce_policy': True,
'is_visible': True},
'device_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'device_owner': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '',
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SUBNETS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': None},
'is_visible': True},
'ip_version': {'allow_post': True, 'allow_put': False,
'convert_to': convert_to_int,
'validate': {'type:values': [4, 6]},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'cidr': {'allow_post': True, 'allow_put': False,
'validate': {'type:subnet': None},
'is_visible': True},
'gateway_ip': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
|
#### | NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/food/shared_dish_cho_nor_hoola.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATION | S ####
return result |
rg/XML/1998/namespace'}
tag_ns = '{http://www.tei-c.org/ns/1.0}'
xml_ns = '{http://www.w3.org/XML/1998/namespace}'
def __init__(self, file_path):
tree = etree.parse(file_path)
self.__root = xpath(tree.getroot(), './tei:text/tei:body', self.ns)[0]
@classmethod
def _words(self, text_root):
return [w.text for w in xpath(text_root, './/*', self.ns) if
w.tag == self.tag_ns + "w" or w.tag == self.tag_ns + "c"]
@classmethod
def _sents(self, text_root):
return [MTEFileReader._words(s) for s in xpath(text_root, './/tei:s', self.ns)]
@classmethod
def _paras(self, text_root):
return [MTEFileReader._sents(p) for p in xpath(text_root, './/tei:p', self.ns)]
@classmethod
def _lemma_words(self, text_root):
return [(w.text, w.attrib['lemma']) for w in xpath(text_root, './/tei:w', self.ns)]
@classmethod
def _tagged_words(self, text_root, tags=""):
if tags is None or tags == "":
return [(w.text, w.attrib['ana']) for w in xpath(text_root, './/tei:w', self.ns)]
else:
tags = re.compile('^' + re.sub("-",".",tags) + '.*$')
return [(w.text, w.attrib['ana']) for w in xpath(text_root, './/tei:w', self.ns)
if tags.match(w.attrib['ana'])]
@classmethod
def _lemma_sents(self, text_root):
return [MTEFileReader._lemma_words(s) for s in xpath(text_root, './/tei:s', self.ns)]
@classmethod
def _tagged_sents(self, text_root, tags=""):
# double list comprehension to remove empty sentences in case there is a sentence only containing punctuation marks
return [t for t in [MTEFileReader._tagged_words(s, tags) for s in xpath(text_root, './/tei:s', self.ns)] if len(t) > 0]
@classmethod
def _lemma_paras(self, text_root):
return [MTEFileReader._lemma_sents(p) for p in xpath(text_root, './/tei:p', self.ns)]
@classmethod
def _tagged_paras(self, text_root, tags=""):
return [t for t in [MTEFileReader._tagged_sents(p, tags) for p in xpath(text_root, './/tei:p', self.ns)] if len(t) > 0]
def words(self):
return MTEFileReader._words(self.__root)
def sents(self):
return MTEFileReader._sents(self.__root)
def paras(self):
return MTEFileReader._paras(self.__root)
def lemma_words(self):
return MTEFileReader._lemma_words(self.__root)
def tagged_words(self, tags=""):
return MTEFileReader._tagged_words(self.__root, tags)
def lemma_sents(self):
return MTEFileReader._lemma_sents(self.__root)
def tagged_sents(self, tags=""):
return MTEFileReader._tagged_sents(self.__root)
def lemma_paras(self):
return MTEFileReader._lemma_paras(self.__root)
def tagged_paras(self, tags=""):
return MTEFileReader._tagged_paras(self.__root)
class MTETagConverter:
"""
Class for converting msd tags to universal tags, more conversion
options are currently not implemented.
"""
mapping_msd_universal = {
'A': 'ADJ', 'S': 'ADP', 'R': 'ADV', 'C': 'CONJ',
'D': 'DET', 'N': 'NOUN', 'M': 'NUM', 'Q': 'PRT',
'P': 'PRON', 'V': 'VERB', '.': '.', '-': 'X'}
@staticmethod
def msd_to_universal(tag):
"""
This function converts the annotation from the Multex-East to the universal tagset
as described in Chapter 5 of the NLTK-Book
Unknown Tags will be mapped to X. Punctuation marks are not supported in MSD tags, so
"""
indicator = tag[0] if not tag[0] == "#" else tag[1]
if not indicator in MTETagConverter.mapping_msd_universal:
indicator = '-'
return MTETagConverter.mapping_msd_universal[indicator]
class MTECorpusReader(TaggedCorpusReader):
"""
Reader for corpora following the TEI-p5 xml scheme, such as MULTEXT-East.
MULTEXT-East contains part-of-speech-tagged words with a quite precise tagging
scheme. These tags can be converted to the Universal tagset
"""
def __init__(self, root=None, fileids=None, encoding='utf8'):
"""
Construct a new MTECorpusreader for a set of documents
located at the given root directory. Example usage:
>>> root = '/...path to corpus.../'
>>> reader = MTECorpusReader(root, 'oana-*.xml', 'utf8') # doctest: +SKIP
| :param root: The root directory for this corpus. (default points to location in multext config file)
| :param fileids: A list or regexp specifying the fileids in this corpus. (default is oana-en.xml)
:param enconding: The encoding of the given files (default is utf8)
"""
TaggedCorpusReader.__init__(self, root, fileids, encoding)
def __fileids(self, fileids):
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
# filter wrong userinput
fileids = filter(lambda x : x in self._fileids, fileids)
# filter multext-east sourcefiles that are not compatible to the teip5 specification
fileids = filter(lambda x : x not in ["oana-bg.xml", "oana-mk.xml"], fileids)
if not fileids:
print("No valid multext-east file specified")
return fileids
def readme(self):
"""
Prints some information about this corpus.
:return: the content of the attached README file
:rtype: str
"""
return self.open("00README.txt").read()
def raw(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a single string.
:rtype: str
"""
return concat([self.open(f).read() for f in self.__fileids(fileids)])
def words(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).words() for f in self.__fileids(fileids)], [])
def sents(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of sentences or utterances,
each encoded as a list of word strings
:rtype: list(list(str))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).sents() for f in self.__fileids(fileids)], [])
def paras(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of paragraphs, each encoded as a list
of sentences, which are in turn encoded as lists of word string
:rtype: list(list(list(str)))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).paras() for f in self.__fileids(fileids)], [])
def lemma_words(self, fileids=None):
"""
:param fileids: A list specifying the fileids that should be used.
:return: the given file(s) as a list of words, the corresponding lemmas
and punctuation symbols, encoded as tuples (word, lemma)
:rtype: list(tuple(str,str))
"""
return reduce(lambda a, b : a + b ,[MTEFileReader(os.path.join(self._root, f)).lemma_words() for f in self.__fileids(fileids)], [])
def tagged_words(self, fileids=None, tagset="msd", tags=None):
"""
:param fileids: A list specifying the fileids that should be used.
:param tagset: The tagset that should be used in the returned object,
either "universal" or "msd", "msd" is the default
:param tags: An MSD Tag that is used to filter all parts of the used corpus
that are not more precise or at least equal to the given tag
:return: the given file(s) as a list of tagged words and punctuation symbols
encoded as tuples (word, tag |
#!/usr/bin/env python
from setuptools import setup
NAME = 'coinshot'
DESCRIPTION = 'simple python module for pushover.net'
VERSION = open('VERSION').read().strip()
LONG_DESC = open('README.rst').read()
LICENSE = "MIT License"
setup(
name=NAME,
version=VERSION,
author='Charles Thomas',
author_email='ch@rlesthom.as',
packages=['coinshot'],
url='https://github.com/charlesthomas/%s' % NAME,
license=LICENSE,
description=DESCRIPTION,
long_description=LONG_DESC,
long_description_content_type='text/x-rst',
| install_requires=["simplejson >= 3.3.0"],
scripts=['bin/shoot'],
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Langua | ge :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Communications',
'Topic :: Software Development :: Libraries :: Python Modules']
)
|
import os
import tweepy
from query_db import query_db, send_user_queries_to_db, read_in_bb_file
from our_keys.twitter_keys import my_keys
from itertools import chain, repeat
u"""Reads in a file of cities and their bounding boxes. Queries the
database to get a list of all unique users who have tweeted from that
city. Queries Twitter api to get 200 tweets from each user, then inserts
200 tweets for up to 100 users per city into a separate database table
called "Tweet200."""
ROOT_DIR = os.path.abspath(os.getcwd())
def get_twitter_api():
u"""Gets twitter keys from key file."""
for our_set, our_keys in my_keys.items():
auth = tweepy.OAuthHandler(
our_keys['consumer_key'],
our_keys['consumer_secret']
)
auth.set_access_token(
our_keys['access_key'],
our_keys['access_secret']
)
print "Hi, I'm the key generator: ", our_keys['access_key']
yield tweepy.API(auth)
def get_unique_handles(vals):
u"""Takes in a list of tweets from a given city. Returns a dict of
unique user handles for each location."""
users = {}
for tweet in vals:
name = tweet[1]
if name in users:
users[name] += 1
else:
users[name] = 1
heavy_users = []
for user in users:
if users[user] > 2:
heavy_users.append(user)
return heavy_users
def format_tweet_history(history, user, city):
u"""Formats tweets pieces to be fed to sql query.
History is a list-like set of tweets. User is the screen name
as a string. City is the string name of the city we querried for."""
tweet_history = []
for tweet in history:
screen_name = user
text = tweet.text
if len(text) > 150:
print text
created_at = tweet.cr | eated_at.strftime('%m/%d/%Y, %H:%M')
location = tweet.geo
location_lat = None
| location_lng = None
if location:
location_lat = location['coordinates'][0]
location_lng = location['coordinates'][1]
hashtags = []
# if location:
tweet = (
screen_name, text, location_lat, location_lng,
created_at, hashtags, city
)
tweet_history.append(tweet)
return tweet_history
def check_list_low_tweeters():
with open(ROOT_DIR + "text/stop_names.txt", 'r') as a_file:
names = a_file.read().split("\n")
return names
def query_twitter_for_histories(users, city=None, cap=100, data_collection=True):
u"""Calls function to return a dict of cities and the unique users for each
city. Iterates over the dict to extract the tweet text/locations/timestamps
for each tweet, bundles results into DB-friendly tuples. Returns a list of
lists of tuples."""
api_generator = get_twitter_api()
api_generator = chain.from_iterable(repeat(tuple(api_generator), 1000))
api = api_generator.next()
city_tweets = []
user_count = 0
too_low_count = 0
for user in users:
if user_count > cap:
break
if user in check_list_low_tweeters() and data_collection is True:
continue
history = []
# tweet_history = []
try:
history = api.user_timeline(screen_name=user, count=200)
except tweepy.error.TweepError as err:
print "Tweepy Error: ", err.message
api = api_generator.next()
continue
if len(history) >= 200 or not data_collection:
user_count += 1
tweet_history = format_tweet_history(history, user, city)
# if len(tweet_history):
city_tweets.append(tweet_history)
print user_count
else:
print "Too few tweets in this user's history."
with open(ROOT_DIR + "text/stop_names.txt", 'a') as a_file:
a_file.write(user)
a_file.write("\n")
too_low_count += 1
total = user_count + too_low_count
print "total requests: ", total
return city_tweets
def process_each_city():
u"""Calls functions to insert user data into Tweet200 table."""
bb_dict = read_in_bb_file()
for city, values in bb_dict.items():
with open(ROOT_DIR + "text/stop_cities.txt", "r") as ffff:
stop_cities = ffff.read()
if city not in stop_cities:
vals = query_db(city, values)
print "Now checking ", city
handles = get_unique_handles(vals)
print city, len(handles)
if len(handles) >= 200:
print "Now querying twitter for histories"
tweets = query_twitter_for_histories(handles, city)
if len(tweets) >= 100:
send_user_queries_to_db(tweets, city)
else:
print "Not enough users with twitter histories in ", city
if __name__ == "__main__":
while True:
try:
process_each_city()
except Exception:
print "I got an exception"
continue
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Agent that gets the local keyboard input in the act() function.
Applies safety classifier(s) to process user and partner messages.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.message import Message
from parlai.utils.misc import display_messages
from parlai.utils.strings import colorize
from parlai.agents.local_human.local_human import LocalHumanAgent
from parlai.utils.safety import OffensiveStringMatcher, OffensiveLanguageClassifier
OFFENSIVE_USER_REPLY = '[ Sorry, could not process that message. Please try again. ]'
OFFENSIVE_BOT_REPLY = (
'[ Unsafe model reply detected. Clearing agent history. Please try again. ]'
)
class SafeLocalHumanAgent(LocalHumanAgent):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
| Add command-line arguments sp | ecifically for this agent.
"""
agent = parser.add_argument_group('Safe Local Human Arguments')
agent.add_argument(
'--safety',
type=str,
default='all',
choices={'none', 'string_matcher', 'classifier', 'all'},
help='Apply safety filtering to messages',
)
super().add_cmdline_args(parser, partial_opt=partial_opt)
return parser
def __init__(self, opt, shared=None):
super().__init__(opt)
self.id = 'safeLocalHuman'
self._init_safety(opt)
def _init_safety(self, opt):
"""
Initialize safety modules.
"""
if opt['safety'] == 'string_matcher' or opt['safety'] == 'all':
self.offensive_string_matcher = OffensiveStringMatcher()
if opt['safety'] == 'classifier' or opt['safety'] == 'all':
self.offensive_classifier = OffensiveLanguageClassifier()
self.self_offensive = False
def check_offensive(self, text):
"""
Check if text is offensive using string matcher and classifier.
"""
if text == '':
return False
if (
hasattr(self, 'offensive_string_matcher')
and text in self.offensive_string_matcher
):
return True
if hasattr(self, 'offensive_classifier') and text in self.offensive_classifier:
return True
return False
def observe(self, msg):
"""
Observe bot reply if and only if it passes.
"""
if self.self_offensive:
# User was offensive, they must try again
return
# Now check if bot was offensive
bot_offensive = self.check_offensive(msg.get('text', ''))
if not bot_offensive:
# View bot message
print(
display_messages(
[msg],
add_fields=self.opt.get('display_add_fields', ''),
prettify=self.opt.get('display_prettify', False),
verbose=self.opt.get('verbose', False),
)
)
msg.force_set('bot_offensive', False)
else:
msg.force_set('bot_offensive', True)
print(OFFENSIVE_BOT_REPLY)
def get_reply(self):
reply_text = input(colorize('Enter Your Message:', 'field') + ' ')
reply_text = reply_text.replace('\\n', '\n')
return reply_text
def act(self):
# get human reply
reply = Message(
{
'id': self.getID(),
'label_candidates': self.fixedCands_txt,
'episode_done': False,
}
)
reply_text = self.get_reply()
# check if human reply is offensive
self.self_offensive = self.check_offensive(reply_text)
while self.self_offensive:
print(OFFENSIVE_USER_REPLY)
reply_text = self.get_reply()
# check if human reply is offensive
self.self_offensive = self.check_offensive(reply_text)
# check for episode done
if '[DONE]' in reply_text or self.opt.get('single_turn', False):
raise StopIteration
# set reply text
reply['text'] = reply_text
# check if finished
if '[EXIT]' in reply_text:
self.finished = True
raise StopIteration
return reply
|
ython
import roslib
roslib.load_manifest('human_model')
import rospy
import json
import tf
import numpy
from abc import ABCMeta,abstractmethod
from tf.transformations import quaternion_multiply as quatMult,quaternion_conjugate
from collections import deque,defaultdict,OrderedDict
"""Module for converting tf data to construct a human model"""
def Vec(*args):
"""returns a vector (numpy float array) with the length of number of given arguments"""
return numpy.array(args,dtype=float)
def normalize(v):
"""returns unit vector or quaternion"""
return v/numpy.linalg.norm(v)
def quatRotatePoint(q,p,o=Vec(0,0,0)):
"""returns point p rotated around quaternion q with the origin o (default (0,0,0)"""
return quatMult(
quatMult(q,numpy.append(p-o,(0,))),
quaternion_conjugate(q)
)[:3]+o
def calculateQuaternion(v1,v2):
"""calculates the quaternion for rotating v1 to v2. Note that both v1 and v2 must be unit vector"" | "
cross=numpy.cross(v1,v2)
return normalize(numpy.append(cross,(1+numpy.dot(v1,v2),)))
class AveragePosition(object):
"""Example Position Class
Calculates the average of the last n positions lazily
Calculated value can be accessed or changed via pos attribute:
p=AveragePosition(10)
p.pos+=Vec(1,2,3)
print(p.pos)
If an alternative position class is needed to be defined thes | e functions,
must be defined in class:
@property
def pos(self):
...
@pos.setter
def pos(self,p):
...
def append(self,p):
...
"""
def __init__(self,n=100):
self.transformations=deque((Vec(0,0,0),),n)
self.calculated=None
@property
def pos(self):
if self.calculated is None:
self.calculated=numpy.average(self.transformations,0)
return self.calculated
@pos.setter
def pos(self,p):
self.calculated=p
"""appends the given position p to the deque, and resets the calculated
average value"""
def append(self,p):
self.calculated=None
self.transformations.append(p)
class JointTree(object):
"""Recursive data structure to define joint tree.It have following attributes:
length:distance to the parent (if not fixed frame)
fixedFrame:fixates the point to the fixedFrame+the displacement of the tree
invert:inverts the rotating axis for connected limb
displacement:used to preserve the position of the node with respect to its parent(resets on new position)
limbPos:position of the limb(resets on new position)
limbRot:orientation of the limb(resets on new position)
"""
def toDict(self,ordered=False):
"""Converts tree to dictionary which can be exported as JSON,if ordered is true
it returns an OrderedDict instead of dictionary and preserves the order of attributes"""
d=OrderedDict if ordered else dict
return d(
((
self.name,
d((
('length',self.length),
('invert',self.invert),
('fixedFrame',None if self.fixedFrame is None else tuple(self.fixedFrame)),
('children',tuple(i.toDict(ordered) for i in self.children)),
))
),))
@staticmethod
def fromDict(dictionary,pos):
"""converts a dictionary to JointTree"""
(k,v)=next(iter(dictionary.items()))
return JointTree(k,pos,**v)
def __init__(self,name,posFunc,**kwargs):
"""gets the name of the node and a function takes no argument,returns a Position class
(e.g. lambda : AveragePosition(10). It takes these optional arguments with the following default values:
length=0
invert=False
fixedFrame=None
children=[] (it can contain either dictionary or JointTree)
"""
self.name=name
self.currentPos=posFunc()
self.length=kwargs.get("length",0)
self.invert=kwargs.get("invert",False)
fixedFrame=kwargs.get("fixedFrame",None)
self.fixedFrame=None if fixedFrame is None else Vec(*fixedFrame)
self.children=[]
children=kwargs.get("children",[])
try:
if isinstance(children[0],dict):
for i in children:
(k,v)=next(iter(i.items()))
self.addChild(JointTree(k,posFunc,**v))
else:
for i in children:
self.addChild(i)
except IndexError:
pass
self.parent=None
self.__uncalculate()
def __uncalculate(self):
self.displacement=Vec(0,0,0)
self.limbPos=Vec(0,0,0)
self.limbRot=Vec(0,0,0,1)
def __iter__(self):
"""iterates over tree depth-first order"""
yield self
for i in self.children:
for j in iter(i):
yield j
def __getitem__(self,name):
"""returns the node with the given name, it raises a KeyError if there is no match"""
for c in self:
if c.name==name:
return c
raise KeyError("There is no node in tree with '{}' name".format(name))
def addChild(self,child):
"""adds new node to the tree"""
child.parent=self
self.children.append(child)
def collectPosition(self,ls):
"""gets the position of the joints from tf.TransformListener ls. It does nothing if there is no sent pose"""
try:
(trans,_)=ls.lookupTransform('/world',self.name,rospy.Time(0))
except tf.Exception as e:
return
self.currentPos.append(Vec(*trans))
self.__uncalculate()
def setPosition(self):
"""calculates the position of the joint"""
if self.fixedFrame is not None:
self.displacement+=self.fixedFrame-self.currentPos.pos
self.currentPos.pos+=self.displacement
elif self.parent is not None:
n=self.currentPos.pos+self.displacement
p=self.parent.currentPos.pos
n=normalize(n-p)*self.length+p
self.displacement=n-self.currentPos.pos
self.currentPos.pos=n
for i in self.children:
i.displacement+=self.displacement
self.displacement=Vec(0,0,0)
def connectLimbs(self):
"""calculates the pose of the limbs"""
p=self.currentPos.pos
for i in self.children:
c=i.currentPos.pos
i.limbPos=(p+c)/2
v2=normalize((p-c) if not i.invert else (c-p))
i.limbRot=calculateQuaternion(Vec(0,0,1),v2)
def sendPoses(self,br):
"""sends the pose of joints and limbs to given tf.TransformBroadcaster"""
br.sendTransform(self.currentPos.pos,(0,0,0,1),rospy.Time.now(),self.name+'_link','/world')
for i in self.children:
br.sendTransform(i.limbPos,i.limbRot,rospy.Time.now(),"{}_{}".format(self.name,i.name),'/world')
def applyDisplacement(self,displacement):
"""applies the given displacement to the parent and all of its children"""
for i in self:
i.currentPos.pos+=displacement
i.limbPos+=displacement
if __name__ == '__main__':
rospy.init_node('animator')
treeDict=json.loads(rospy.get_param("/tree"))
tree=JointTree.fromDict(treeDict,lambda : AveragePosition(10))
br = tf.TransformBroadcaster()
ls = tf.TransformListener()
rate = rospy.Rate(50.0)
while not rospy.is_shutdown():
for i in tree:
i.collectPosition(ls)
for i in tree:
i.setPosition()
(o,r,l) = ("SpineShoulder","ShoulderRight","ShoulderLeft")
#these three are special condition,They are aligned on a straight line
#Also note that the z value of ShoulderRight and ShoulderLeft equals to that of SpineShoulder
if i.name==o:
r=i[r]
l=i[l]
cr=r.currentPos.pos+r.displacement
cl=l.currentPos.pos+l.displacement
cr[2]=i.currentPos.pos[2]
cl[2]=i.currentPos.pos[2]
k=i.currentPos.pos-(cr+cl)/2
cr+=k
cl+=k
r.displacement=cr-r.currentPos.pos
l.displacement=cl-l.currentPos.pos
for i in tree:
i.connectLimbs()
#calculates the Orientation of Torso (Upper and Lower) and connected joints
q1=tree["SpineShoulder"].limbRot
q2=calculateQuaternion(Vec(0,1,0),normalize(tree["ShoulderRight"].currentPos.pos-tree["ShoulderLeft"].currentPos.pos))
tree["SpineShoulder"].limbRot=quatMult(q2,q1)
tree["ShoulderRight"].applyDisplacement(quatRotatePoint(q1,tree["ShoulderRight"].currentPos.pos,tree["SpineShoulder"].currentPos.pos)-tree["ShoulderRight"].currentPos.pos)
tree["ShoulderLeft"].applyDisplacement(quatRotatePoint(q1,tree["ShoulderLeft"].currentPos.pos,tree["SpineShoulder"].currentPos.pos)-tree["ShoulderLeft"].currentPos.pos)
v=tree["HipRight"].currentPos.pos-tree["HipLeft"].currentPos.pos
q2=calculateQuaternion(Vec(0,1,0),normalize(v))
q=quatMult(q2,q1)
tree["SpineBase"].limbRot=q
tree["HipRight"].applyDisplacement(quatRotatePoint(q,tree["SpineBase"].currentPos.pos+Vec(0.01,tree["HipRight"].length,-0.05),tree["SpineBase"].currentPos.pos)-tree["HipRight"].currentPos.pos)
|
#!/usr/bin/env python3
#
# This file generates an estimation of window size for the
# two queues for _each_ sample. It will not be exact, and
# it's correctness will vary with the variation of queue delay
# in the queue.
#
# The results are saved to:
# - derived/window
# each line formatted as: <sample id> <window ecn in bits> <window nonecn in bits>
#
# Dependency:
# - calc_queuedelay.py (for per sample queue stats)
import os
import sys
def get_rates(rate_file):
rates = []
with open(rate_file, 'r') as f:
for line in f:
# skip comments
if line[0] == '#':
continue
# format of rate file:
# <sample id> <sample time> <rate in b/s>
rates.append(int(line.split()[2]))
return rates
def get_rtts_with_queue(queue_file, base_rtt):
rtts = []
with open(queue_file, 'r') as f:
for line in f:
# skip comments
if line[0] == '#':
continue
# format of queue file:
# <sample time> <average_in_us> ...
# the average might be '-' if it is unknown
queue_avg = line.split()[1]
queue_avg = 0 if queue_avg == '-' else float(queue_avg)
# add rtt and normalize to seconds
# base rtt is i | n ms
rtts.append((queue_avg / 1000 + base_rtt) / 1000)
return rtts
def calc_window(rates, rtts_s):
windows = []
# all data should have same amount of samples
for i, rate in enumerate(rates):
rtt = r | tts_s[i] # rtt in seconds
windows.append(rate * rtt)
return windows
def write_window(file, window_ecn_list, window_nonecn_list):
with open(file, 'w') as f:
f.write('#sample_id window_ecn_in_bits window_nonecn_in_bits\n')
for i, window_ecn in enumerate(window_ecn_list):
window_nonecn = window_nonecn_list[i]
f.write('%d %d %d\n' % (i, window_ecn, window_nonecn))
def process_test(folder, base_rtt_ecn_ms, base_rtt_nonecn_ms):
write_window(
folder + '/derived/window',
calc_window(
get_rates(folder + '/ta/rate_ecn'),
get_rtts_with_queue(folder + '/derived/queue_ecn_samplestats', base_rtt_ecn_ms),
),
calc_window(
get_rates(folder + '/ta/rate_nonecn'),
get_rtts_with_queue(folder + '/derived/queue_nonecn_samplestats', base_rtt_nonecn_ms),
),
)
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage: %s <test_folder> <rtt_ecn_ms> <rtt_nonecn_ms>' % sys.argv[0])
sys.exit(1)
process_test(
sys.argv[1],
float(sys.argv[2]),
float(sys.argv[3]),
)
print('Generated win')
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
import copy
import json
import re
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
class Vimeo(Service, OpenGraphThumbMixin):
supported_domains = ["vimeo.com", "player.vimeo.com"]
def get(self):
data = self.get_urldata()
match_cfg_url = re.search('data-config-url="([^"]+)" data-fallback-url', data)
match_clip_page_cfg = re.search(r"vimeo\.clip_page_config\s*=\s*({.+?});", data)
if | match_cfg_url:
player_url = match_cfg_url.group(1).replace("&", "&")
elif match_clip_page_cfg:
page_config = json.loads(match_clip_page_cfg.group(1))
player_url = page_config["p | layer"]["config_url"]
else:
yield ServiceError(f"Can't find video file for: {self.url}")
return
player_data = self.http.request("get", player_url).text
if player_data:
jsondata = json.loads(player_data)
if ("hls" in jsondata["request"]["files"]) and ("fastly_skyfire" in jsondata["request"]["files"]["hls"]["cdns"]):
hls_elem = jsondata["request"]["files"]["hls"]["cdns"]["fastly_skyfire"]
yield from hlsparse(self.config, self.http.request("get", hls_elem["url"]), hls_elem["url"], output=self.output)
avail_quality = jsondata["request"]["files"]["progressive"]
for i in avail_quality:
yield HTTP(copy.copy(self.config), i["url"], i["height"], output=self.output)
else:
yield ServiceError("Can't find any streams.")
return
|
# Copyright (c) 2013 eBay Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Weighers that weigh hosts by their capacity, including following two
weighers:
1. Capacity Weigher. Weigh hosts by their available capacity.
The default is to spread volumes across all hosts evenly. If you prefer
stacking, you can set the 'capacity_weight_multiplier' option to a negative
number and the weighing has the opposite effect of the default.
2. Allocated Capacity Weigher. Weigh hosts by their allocated capacity.
The default behavior is to place new volume to the host allocated the least
space. This weigher is intended to simulate the behavior of SimpleScheduler.
If you prefer to place volumes to host allocated the most space, you can
set the 'allocated_capacity_weight_multiplier' option to a positive number
and the weighing has the opposite effect of the default.
"""
import math
from oslo.config import cfg
from cinder.openstack.common.scheduler import weights
capacity_weight_opts = [
cfg.FloatOpt('capacity_weight_multiplier',
default=1.0,
help='Multiplier used for weighing volume capacity. '
'Negative numbers mean to stack vs spread.'),
cfg.FloatOpt('allocated_capacity_weight_multiplier',
default=-1.0,
help='Multiplier used for weighing volume capacity. '
'Negative numbers mean to stack vs spread.'),
]
CONF = cfg.CONF
CONF.register_opts(capacity_weight_opts)
class CapacityWeigher(weights.BaseHostWeigher):
def _weight_multiplier(self):
"""Override the weight multiplier."""
return CONF.capacity_weight_multiplier
def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. We want spreading to be the default."""
reserved = float(host_state.reserved_percentage) / 100
free_space = host_state.free_capacity_gb
if free_space == 'infinite' or free_space == 'unknown':
#(zhiteng) 'infinite' and 'unknown' are treated the same
# here, for sorting purpose.
| free = float('inf')
else:
free = math.floor(host_state.free_capacity_gb * (1 - reserved))
return free
class All | ocatedCapacityWeigher(weights.BaseHostWeigher):
def _weight_multiplier(self):
"""Override the weight multiplier."""
return CONF.allocated_capacity_weight_multiplier
def _weigh_object(self, host_state, weight_properties):
# Higher weights win. We want spreading (choose host with lowest
# allocated_capacity first) to be the default.
allocated_space = host_state.allocated_capacity_gb
return allocated_space
|
from PyQt4 import QtCore
import acq4.Manager
import acq4.util.imageAnalysis as imageAnalysis
run = True
man = acq4.Manager.getManager()
cam = man.getDevice('Camera')
frames = []
def collect(frame):
global frames
frames.append(frame)
cam.sigNewFrame.connect(collect)
def measure():
if len(frames) == 0:
QtCore.QTimer.singleShot(100, measure)
return
global run
if run:
global frames
frame = frames[-1]
frames = []
img = frame.data()
w,h = img.shape
img = img[2*w/5:3*w/5, 2*h/5:3*h/5]
w,h = img.shape
fit = imageAnalysis.fitGaussian2D(img, [100, w/2., h/2., w/4., | 0])
# conve | rt sigma to full width at 1/e
fit[0][3] *= 2 * 2**0.5
print "WIDTH:", fit[0][3] * frame.info()['pixelSize'][0] * 1e6, "um"
print " fit:", fit
else:
global frames
frames = []
QtCore.QTimer.singleShot(2000, measure)
measure()
|
"""
This version of julian is currently in d | evelopment and is not considered s | table.
""" |
import timeit
import pyximport; pyximport.install()
from mod2 import cysum, cysum2
def pysum(start, step, count):
ret = sta | rt
for i in range(count):
ret += step
return ret
print('Python',
timeit.timeit('pysum(0, 1, 100)', 'from __main__ import pysum'))
print('Cython', timeit.timeit('cysum(0, 1, 100)', 'from __main__ import cysum'))
print( | 'Cython with types',
timeit.timeit('cysum2(0, 1, 100)', 'from __main__ import cysum2'))
|
# Copyright (C) 2008 LibreSoft
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors :
# Carlos Garcia Campos <carlosgc@gsyc.escet.urjc.es>
from pycvsanaly2.Database import (SqliteDatabase, MysqlDatabase,
TableAlreadyExists, statement)
from pycvsanaly2.extensions import (Extension, register_extension,
ExtensionRunError)
from pycvsanaly2.extensions.file_types import guess_file_type
from pycvsanaly2.utils import to_utf8, uri_to_filename
class DBFileType(object):
id_counter = 1
__insert__ = """INSERT INTO file_types (id, file_id, type)
values (?, ?, ?)"""
def __init__(self, id, type, file_id):
if id is None:
self.id = DBFileType.id_counter
DBFileType.id_counter += 1
else:
self.id = id
self.type = to_utf8(type)
self.file_id = file_id
class FileTypes(Extension):
def __init__(self):
self.db = None
def __create_table(self, cnn):
cursor = cnn.cursor()
if isinstance(self.db, SqliteDatabase):
import sqlite3.dbapi2
try:
cursor.execute("CREATE TABLE file_types (" +
"id integer primary key," +
"file_id integer," +
"type varchar" +
")")
except sqlite3.dbapi2.OperationalError:
cursor.close()
raise TableAlreadyExists
except:
raise
elif isinstance(self.db, MysqlDatabase):
import MySQLdb
try:
cursor.execute("CREATE TABLE file_types (" +
"id INT primary key," +
"file_id integer REFERENCES files(id)," +
"type mediumtext" +
") CHARACTER SET=utf8")
except MySQLdb.OperationalError, e:
if e.args[0] == 1050:
cursor.close()
raise TableAlreadyExists
raise
except:
raise
cnn.commit()
cursor.close()
def __create_indices(self, cnn):
cursor = cnn.cursor()
if isinstance(self.db, MysqlDatabase):
import MySQLdb
try:
cursor.execute("create index parent_id on file_links(parent_id)")
except MySQLdb.OperationalError, e:
if e.args[0] != 1061:
cursor.close()
raise
try:
cursor.execute("create index repository_id on files(repository_id)")
except MySQLdb.OperationalError, e:
if e.args[0] != 1061:
cursor.close()
raise
cursor.close( | )
def __get_files_for_repository(self, repo_id, cursor):
query = "SELECT ft.file_id from file_types ft, files f " + \
"WHERE f.id = ft.file_id and f.repository_id = ?"
cursor.execute(statement(query, self.db.place_holder), (repo_id,))
files = [res[0] for res in cursor.fetchall()]
return files
def run(self, repo, uri, db):
self.db = db
path = uri_to_filename(uri)
if pat | h is not None:
repo_uri = repo.get_uri_for_path(path)
else:
repo_uri = uri
cnn = self.db.connect()
cursor = cnn.cursor()
cursor.execute(statement("SELECT id from repositories where uri = ?",
db.place_holder), (repo_uri,))
repo_id = cursor.fetchone()[0]
files = []
try:
self.__create_table(cnn)
except TableAlreadyExists:
cursor.execute(statement("SELECT max(id) from file_types",
db.place_holder))
id = cursor.fetchone()[0]
if id is not None:
DBFileType.id_counter = id + 1
files = self.__get_files_for_repository(repo_id, cursor)
except Exception, e:
raise ExtensionRunError(str(e))
self.__create_indices(cnn)
query = """select distinct f.id fid, f.file_name fname
from files f
where f.repository_id = ?
and not exists (select id from file_links where parent_id = f.id)"""
cursor.execute(statement(query, db.place_holder), (repo_id,))
write_cursor = cnn.cursor()
rs = cursor.fetchmany()
while rs:
types = []
for file_id, file_name in rs:
if file_id in files:
continue
type = guess_file_type(file_name)
types.append(DBFileType(None, type, file_id))
if types:
file_types = [(type.id, type.file_id, type.type) \
for type in types]
write_cursor.executemany(statement(DBFileType.__insert__,
self.db.place_holder),
file_types)
rs = cursor.fetchmany()
cnn.commit()
write_cursor.close()
cursor.close()
cnn.close()
def backout(self, repo, uri, db):
update_statement = """delete from file_types where
file_id in (select id from files f
where f.repository_id = ?)"""
self._do_backout(repo, uri, db, update_statement)
register_extension("FileTypes", FileTypes)
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestFakeQuantizeOp(OpTest):
def setUp(self):
self.op_type = "fake_quantize"
self.attrs = {
'bit_length': 8,
'quantize_type': 'abs_max',
'window_size': 10000
}
self.inputs = {
'X': np.random.random((10, 10)).astype("float32"),
'InScales': np.zeros(self.attrs['window_size']).astype("float32"),
'InCurrentIter': np.zeros(1).astype("float32"),
'InMovingScale': np.zeros(1).astype("float32")
}
self.scale = {
'abs_max': np.max(np.abs(self.inputs['X'])).astype("float32")
}
self.outputs = {
'Out': np.round(self.inputs['X'] / self.scale['abs_max'] * (
(1 << (self.attrs['bit_length'] - 1)) - 1)),
| 'OutScales': np.zeros(self.attrs['window_size']).astype("float32"),
'OutMovingScale':
np.array([self.scale['abs_max']]).astype("float32"),
'OutCurrentIter': np.zeros(1).astype("float32")
}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
| unittest.main()
|
# -*- | coding: utf-8 -*-
import re
class StringParser(object):
@staticmethod
def removeCFU(stringToParse):
updatedString = re.sub('\s?[0-9] CFU.*', '', stringToParse)
return updatedString
@staticmethod
def startsWithUpper(stringToParse):
stringToPar | se = stringToParse[0].upper()+stringToParse[1:]
return stringToParse |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-22 14:42
from __future__ impo | rt unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0002_event'),
]
operations = [
migrations.AddField(
model_name='event',
name='publish_date',
field=models.DateTimeField(default='2017-09-22 16:45', verbose_name='publish_date'),
preserve_default=False,
),
| ]
|
st of fractional coordinates of kpoints as arrays or list of
Kpoint objects for BANDS mode calculation (standard path of
high symmetry k-points is automatically set as default)
tmax:
Maximum temperature (K) for calculation (default=1300)
tgrid:
Temperature interval for calculation (default=50)
symprec: 1e-3 is the default in pymatgen. If the kmesh has been
generated using a different symprec, it has to be specified
to avoid a "factorization error" in BoltzTraP calculation.
If a kmesh that spans the whole Brillouin zone has been used,
or to disable all the symmetries, | set symprec to None.
cb_cut: by default 10% of the highest conduction bands are
removed because they are often not accurate.
Tune cb_cut to change the percentage (0-100) of bands
that are removed.
timeout: overall time limit (in seconds): mainly to avoid infinite
l | oop when trying to find Fermi levels.
"""
self.lpfac = lpfac
self._bs = bs
self._nelec = nelec
self.dos_type = dos_type
self.energy_grid = energy_grid
self.error = []
self.run_type = run_type
self.band_nb = band_nb
self.spin = spin
self.cond_band = cond_band
self.tauref = tauref
self.tauexp = tauexp
self.tauen = tauen
self.soc = soc
self.kpt_line = kpt_line
self.cb_cut = cb_cut / 100.0
if isinstance(doping, list) and len(doping) > 0:
self.doping = doping
else:
self.doping = []
for d in [1e16, 1e17, 1e18, 1e19, 1e20, 1e21]:
self.doping.extend([1 * d, 2.5 * d, 5 * d, 7.5 * d])
self.doping.append(1e22)
self.energy_span_around_fermi = energy_span_around_fermi
self.scissor = scissor
self.tmax = tmax
self.tgrid = tgrid
self._symprec = symprec
if self.run_type in ("DOS", "BANDS"):
self._auto_set_energy_range()
self.timeout = timeout
self.start_time = time.time()
def _auto_set_energy_range(self):
"""
automatically determine the energy range as min/max eigenvalue
minus/plus the buffer_in_ev
"""
emins = [min(e_k[0] for e_k in self._bs.bands[Spin.up])]
emaxs = [max(e_k[0] for e_k in self._bs.bands[Spin.up])]
if self._bs.is_spin_polarized:
emins.append(min(e_k[0] for e_k in self._bs.bands[Spin.down]))
emaxs.append(max(e_k[0] for e_k in self._bs.bands[Spin.down]))
min_eigenval = Energy(min(emins) - self._bs.efermi, "eV").to("Ry")
max_eigenval = Energy(max(emaxs) - self._bs.efermi, "eV").to("Ry")
# set energy range to buffer around min/max EV
# buffer does not increase CPU time but will help get equal
# energies for spin up/down for band structure
const = Energy(2, "eV").to("Ry")
self._ll = min_eigenval - const
self._hl = max_eigenval + const
en_range = Energy(max((abs(self._ll), abs(self._hl))), "Ry").to("eV")
self.energy_span_around_fermi = en_range * 1.01
print("energy_span_around_fermi = ", self.energy_span_around_fermi)
@property
def bs(self):
"""
:return: The BandStructure
"""
return self._bs
@property
def nelec(self):
"""
:return: Number of electrons
"""
return self._nelec
def write_energy(self, output_file):
"""
Writes the energy to an output file.
:param output_file: Filename
"""
with open(output_file, "w") as f:
f.write("test\n")
f.write(f"{len(self._bs.kpoints)}\n")
if self.run_type == "FERMI":
sign = -1.0 if self.cond_band else 1.0
for i, kpt in enumerate(self._bs.kpoints):
eigs = []
eigs.append(
Energy(
self._bs.bands[Spin(self.spin)][self.band_nb][i] - self._bs.efermi,
"eV",
).to("Ry")
)
f.write(
"%12.8f %12.8f %12.8f %d\n"
% (
kpt.frac_coords[0],
kpt.frac_coords[1],
kpt.frac_coords[2],
len(eigs),
)
)
for e in eigs:
f.write("%18.8f\n" % (sign * float(e)))
else:
for i, kpt in enumerate(self._bs.kpoints):
eigs = []
if self.run_type == "DOS":
spin_lst = [self.spin]
else:
spin_lst = self._bs.bands
for spin in spin_lst:
# use 90% of bottom bands since highest eigenvalues
# are usually incorrect
# ask Geoffroy Hautier for more details
nb_bands = int(math.floor(self._bs.nb_bands * (1 - self.cb_cut)))
for j in range(nb_bands):
eigs.append(
Energy(
self._bs.bands[Spin(spin)][j][i] - self._bs.efermi,
"eV",
).to("Ry")
)
eigs.sort()
if self.run_type == "DOS" and self._bs.is_spin_polarized:
eigs.insert(0, self._ll)
eigs.append(self._hl)
f.write(
"%12.8f %12.8f %12.8f %d\n"
% (
kpt.frac_coords[0],
kpt.frac_coords[1],
kpt.frac_coords[2],
len(eigs),
)
)
for e in eigs:
f.write("%18.8f\n" % (float(e)))
def write_struct(self, output_file):
"""
Writes the structure to an output file.
:param output_file: Filename
"""
if self._symprec is not None:
sym = SpacegroupAnalyzer(self._bs.structure, symprec=self._symprec)
elif self._symprec is None:
pass
with open(output_file, "w") as f:
if self._symprec is not None:
f.write(
"{} {}\n".format(
self._bs.structure.composition.formula,
sym.get_space_group_symbol(),
)
)
elif self._symprec is None:
f.write("{} {}\n".format(self._bs.structure.composition.formula, "symmetries disabled"))
f.write(
"{}\n".format(
"\n".join(
[
" ".join(["%.5f" % Length(i, "ang").to("bohr") for i in row])
for row in self._bs.structure.lattice.matrix
]
)
)
)
if self._symprec is not None:
ops = sym.get_symmetry_dataset()["rotations"]
elif self._symprec is None:
ops = [[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]
f.write(f"{len(ops)}\n")
for c in ops:
for row in c:
f.write("{}\n".format(" ".join(str(i) for i in row)))
def write_def(self, output_file):
"""
Writes the def to an output file.
:param output_file: Filename
"""
# This function is useless in std version of BoltzTraP code
# because x_trans script overwrite BoltzTraP.def
with open(output_file, "w") as f:
|
from seth import versioning
from seth.tests import IntegrationTestBase
from seth.classy.rest import generics
class DefaultVersioningResource(generics.GenericApiView):
def get(self, **kwargs):
return {}
class NotShowVersionResource(generics.GenericApiView):
display_version = False
def get(self, **kwargs):
return {}
class BaseVersioningTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
config.register_resource(DefaultVersioningResource, '/test_basic')
config.register_resource(NotShowVersionResource, '/test_do_not_display_version')
def test_default_setup(self):
r = self.app.get('/test_basic')
self.assertEqual(r.status_int, 200)
self.assertIn('API-Version', r.headers.keys())
self.assertEqual(r.headers['API-Version'], '1.0')
def test_do_not_display_version(self):
r = self.app.get('/test_do_not_display_version')
self.assertEqual(r.status_int, 200)
self.assertNotIn('API-Version', r.headers.keys())
class CustomVersioningPoliciesTestCase(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class NoGetVersionInfoPolicy(versioning.BaseVersioningPolicy):
default_version = '2.0'
class NoGetVersionInfonResource(generics.GenericApiView):
versioning_policy = NoGetVersionInfoPolicy
def get(self, **kwargs):
return {}
config.register_resource(NoGetVersionInfonResource, '/test_no_get_version_info')
class AnotherVersionPolicy(versioning.BaseVersioningPolicy):
default_version = '2.0'
def get_version_info(self, request, *args, **kwargs):
return '2.0'
class AnotherVersionResource(generics.GenericApiView):
versioning_policy = AnotherVersionPolicy
def get(self, **kwargs):
return {}
config.register_resource(AnotherVersionResource, '/test_another_version')
class PredefineVersionPolicy(versioning.BaseVersioningPolicy):
default_version = None
def get_default_version(self, request):
return '666'
def get_version_info(self, request, *args, **kwargs):
return '666'
class PredefineVersionResource(generics.GenericApiView):
versioning_policy = PredefineVersionPolicy
def get(self, **kwargs):
return {}
config.register_resource(PredefineVersionResource, '/test_predefine')
def test_raises_NotImplementedError_if_get_version_info_is_not_provided(self):
self.assertRaises(NotImplementedError, lambda: self.app.get('/test_no_get_version_info'))
def test_another_version_set(self):
r = self.app.get('/test_another_version')
self.assertEqual(r.status_int, 200)
self.assertIn('API-Version', r.headers.keys())
self.assertEqual(r.headers['API-Version'], '2.0')
def test_predefine_version(self):
r = self.app.get('/test_predefine')
self.assertEqual(r.status_int, 200)
self.assertIn('API-Version', r.headers.keys())
self.assertEqual(r.headers['API-Version'], '666')
class CheckParamsVersionPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class CheckQueryParamsResource(generics.GenericApiView):
versioning_policy = versioning.CheckQueryParamsVersioningPolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResource, '/test_query_params')
class AllowVersionOnePolicy(versioning.CheckQueryParamsVersioningPolicy):
| default_version = '22.0'
def get_allowed_version(self):
return ['5.0']
class CheckQueryParamsResourceSecond(generics.GenericApiView):
versioning_policy = AllowVersionOnePolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResourceSecond, '/test_allow_version')
def test_no_version_in_query_params_all_versions_allowed(self):
r = s | elf.app.get('/test_query_params')
self.assertEqual(r.status_int, 200)
def test_wrong_version_in_query_params_all_versions_allowed(self):
r = self.app.get('/test_query_params?version=2.0')
self.assertEqual(r.status_int, 200)
def test_correct_version_in_query_params_all_versions_allowed(self):
r = self.app.get('/test_query_params?version=1.0')
self.assertEqual(r.status_int, 200)
def test_allow_default_version(self):
r = self.app.get('/test_allow_version?version=22.0')
self.assertEqual(r.status_int, 200)
def test_allowed_versions(self):
r = self.app.get('/test_allow_version?version=5.0')
self.assertEqual(r.status_int, 200)
def test_wrong_version_in_query_params_allowed_are_set(self):
r = self.app.get('/test_allow_version?version=1.0', expect_errors=True)
self.assertEqual(r.status_int, 404)
def test_no_version_in_query_params_allowed_are_set(self):
r = self.app.get('/test_allow_version', expect_errors=True)
self.assertEqual(r.status_int, 404)
class CheckHeaderVersionPolicy(IntegrationTestBase):
def extend_app_configuration(self, config):
config.include('seth')
class AllowVersionOnePolicy(versioning.CheckHeaderVersioningPolicy):
default_version = '22.0'
def get_allowed_version(self):
return ['5.0']
class CheckQueryParamsResourceSecond(generics.GenericApiView):
versioning_policy = AllowVersionOnePolicy
def get(self, **kwargs):
return {}
config.register_resource(CheckQueryParamsResourceSecond, '/test_allow_header')
def test_allow_default_version(self):
r = self.app.get('/test_allow_header', headers={'Api-Version': '22.0'})
self.assertEqual(r.status_int, 200)
def test_allowed_versions(self):
r = self.app.get('/test_allow_header', headers={'Api-Version': '5.0'})
self.assertEqual(r.status_int, 200)
def test_wrong_version_in_headers(self):
r = self.app.get('/test_allow_header', headers={'Api-Version': '666.0'}, expect_errors=True)
self.assertEqual(r.status_int, 404)
def test_no_header_in_request(self):
r = self.app.get('/test_allow_header', expect_errors=True)
self.assertEqual(r.status_int, 404)
def test_wrong_header_set(self):
r = self.app.get('/test_allow_header', headers={'Api-WRONG': '22.0'}, expect_errors=True)
self.assertEqual(r.status_int, 404) |
INATE = 0x8
SEL_SCOPE = 0x10
SEL_ | DIR_LTR = 0x20
SEL_DIR_RT | L = 0x40
SEL_IN_RANGE = 0x80
SEL_OUT_OF_RANGE = 0x100
SEL_DEFINED = 0x200
SEL_PLACEHOLDER_SHOWN = 0x400
class Immutable(object):
"""Immutable."""
__slots__ = ('_hash',)
def __init__(self, **kwargs):
"""Initialize."""
temp = []
for k, v in kwargs.items():
temp.append(type(v))
temp.append(v)
super(Immutable, self).__setattr__(k, v)
super(Immutable, self).__setattr__('_hash', hash(tuple(temp)))
@classmethod
def __base__(cls):
"""Get base class."""
return cls
def __eq__(self, other):
"""Equal."""
return (
isinstance(other, self.__base__()) and
all([getattr(other, key) == getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __ne__(self, other):
"""Equal."""
return (
not isinstance(other, self.__base__()) or
any([getattr(other, key) != getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __hash__(self):
"""Hash."""
return self._hash
def __setattr__(self, name, value):
"""Prevent mutability."""
raise AttributeError("'{}' is immutable".format(self.__class__.__name__))
def __repr__(self): # pragma: no cover
"""Representation."""
return "{}({})".format(
self.__base__(), ', '.join(["{}={!r}".format(k, getattr(self, k)) for k in self.__slots__[:-1]])
)
__str__ = __repr__
class ImmutableDict(Mapping):
"""Hashable, immutable dictionary."""
def __init__(self, *args, **kwargs):
"""Initialize."""
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if (
is_dict and not all([isinstance(v, Hashable) for v in arg.values()]) or
not is_dict and not all([isinstance(k, Hashable) and isinstance(v, Hashable) for k, v in arg])
):
raise TypeError('All values must be hashable')
self._d = dict(*args, **kwargs)
self._hash = hash(tuple([(type(x), x, type(y), y) for x, y in sorted(self._d.items())]))
def __iter__(self):
"""Iterator."""
return iter(self._d)
def __len__(self):
"""Length."""
return len(self._d)
def __getitem__(self, key):
"""Get item: `namespace['key']`."""
return self._d[key]
def __hash__(self):
"""Hash."""
return self._hash
def __repr__(self): # pragma: no cover
"""Representation."""
return "{!r}".format(self._d)
__str__ = __repr__
class Namespaces(ImmutableDict):
"""Namespaces."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('Namespace keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('Namespace keys and values must be Unicode strings')
super(Namespaces, self).__init__(*args, **kwargs)
class CustomSelectors(ImmutableDict):
"""Custom selectors."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
super(CustomSelectors, self).__init__(*args, **kwargs)
class Selector(Immutable):
"""Selector."""
__slots__ = (
'tag', 'ids', 'classes', 'attributes', 'nth', 'selectors',
'relation', 'rel_type', 'contains', 'lang', 'flags', '_hash'
)
def __init__(
self, tag, ids, classes, attributes, nth, selectors,
relation, rel_type, contains, lang, flags
):
"""Initialize."""
super(Selector, self).__init__(
tag=tag,
ids=ids,
classes=classes,
attributes=attributes,
nth=nth,
selectors=selectors,
relation=relation,
rel_type=rel_type,
contains=contains,
lang=lang,
flags=flags
)
class SelectorNull(Immutable):
"""Null Selector."""
def __init__(self):
"""Initialize."""
super(SelectorNull, self).__init__()
class SelectorTag(Immutable):
"""Selector tag."""
__slots__ = ("name", "prefix", "_hash")
def __init__(self, name, prefix):
"""Initialize."""
super(SelectorTag, self).__init__(
name=name,
prefix=prefix
)
class SelectorAttribute(Immutable):
"""Selector attribute rule."""
__slots__ = ("attribute", "prefix", "pattern", "xml_type_pattern", "_hash")
def __init__(self, attribute, prefix, pattern, xml_type_pattern):
"""Initialize."""
super(SelectorAttribute, self).__init__(
attribute=attribute,
prefix=prefix,
pattern=pattern,
xml_type_pattern=xml_type_pattern
)
class SelectorContains(Immutable):
"""Selector contains rule."""
__slots__ = ("text", "_hash")
def __init__(self, text):
"""Initialize."""
super(SelectorContains, self).__init__(
text=text
)
class SelectorNth(Immutable):
"""Selector nth type."""
__slots__ = ("a", "n", "b", "of_type", "last", "selectors", "_hash")
def __init__(self, a, n, b, of_type, last, selectors):
"""Initialize."""
super(SelectorNth, self).__init__(
a=a,
n=n,
b=b,
of_type=of_type,
last=last,
selectors=selectors
)
class SelectorLang(Immutable):
"""Selector language rules."""
__slots__ = ("languages", "_hash",)
def __init__(self, languages):
"""Initialize."""
super(SelectorLang, self).__init__(
languages=tuple(languages)
)
def __iter__(self):
"""Iterator."""
return iter(self.languages)
def __len__(self): # pragma: no cover
"""Length."""
return len(self.languages)
def __getitem__(self, index): # pragma: no cover
"""Get item."""
return self.languages[index]
class SelectorList(Immutable):
"""Selector list."""
__slots__ = ("selectors", "is_not", "is_html", "_hash")
def __init__(self, selectors=tuple(), is_not=False, is_html=False):
"""Initialize."""
super(SelectorList, self).__init__(
selectors=tuple(selectors),
is_not=is_not,
is_html=is_html
)
def __iter__(self):
"""Iterator."""
return iter(self.selectors)
def __len__(self):
"""Length."""
return len(self.selectors)
def __getitem__(self, index):
"""Get item."""
return self.selectors[index]
def _pickle(p):
return p.__base__(), tuple([getattr(p, s) for s in p.__slots__[:-1]])
def pickle_register( |
order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 d | imensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((1 | 0, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), s |
#!/usr/bin/env python
# Copyright (c) 2016 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import os
import unittest
import mock
from click.testing import CliRunner
with mock.patch('functest.cli.commands.cli_testcase.CliTestcase.__init__',
mock.Mock(return_value=None)), \
mock.patch('functest.cli.commands.cli_tier.CliTier.__init__',
mock.Mock(return_value=None)):
os.environ['OS_AUTH_URL'] = ''
from functest.cli import cli_base
class CliBaseTesting(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
self._openstack = cli_base.OPENSTACK
self._env = cli_base.ENV
self._testcase = cli_base.TESTCASE
self._tier = cli_base.TIER
def test_os_check(self):
with mock.patch.object(self._openstack, 'check') as mock_method:
result = self.runner.invoke(cli_base.os_check)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_os_show_credentials(self):
with mock.patch.object(self._openstack, 'show_credentials') \
as mock_method:
result = self.runner.invoke(cli_base.os_show_credentials)
self.assertEqual(result.exit_code, 0)
| self.asse | rtTrue(mock_method.called)
def test_env_show(self):
with mock.patch.object(self._env, 'show') as mock_method:
result = self.runner.invoke(cli_base.env_show)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_testcase_list(self):
with mock.patch.object(self._testcase, 'list') as mock_method:
result = self.runner.invoke(cli_base.testcase_list)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_testcase_show(self):
with mock.patch.object(self._testcase, 'show') as mock_method:
result = self.runner.invoke(cli_base.testcase_show, ['testname'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_testcase_run(self):
with mock.patch.object(self._testcase, 'run') as mock_method:
result = self.runner.invoke(cli_base.testcase_run,
['testname', '--noclean'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_list(self):
with mock.patch.object(self._tier, 'list') as mock_method:
result = self.runner.invoke(cli_base.tier_list)
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_show(self):
with mock.patch.object(self._tier, 'show') as mock_method:
result = self.runner.invoke(cli_base.tier_show, ['tiername'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_gettests(self):
with mock.patch.object(self._tier, 'gettests') as mock_method:
result = self.runner.invoke(cli_base.tier_gettests, ['tiername'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
def test_tier_run(self):
with mock.patch.object(self._tier, 'run') as mock_method:
result = self.runner.invoke(cli_base.tier_run,
['tiername', '--noclean'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(mock_method.called)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
|
from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
Th | is test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : evaluate the antenna vsh coefficient with a downsampling factor of 2
4 : evaluates the relative er | ror of reconstruction (vsh3) for various values of order l
5 : display the results
"""
filename = 'S1R1.mat'
A = Antenna(filename,'ant/UWBAN/Matfile')
B = Antenna(filename,'ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
B.Ftheta = B.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
B.Fphi = B.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
A = vsh(A,dsf)
B = vsh(B,dsf)
tn = []
tet = []
tep = []
te = []
tmse = []
l = 20
A.C.s1tos2(l)
B.C.s1tos2(l)
u = np.shape(A.C.Br.s2)
Nf = u[0]
Nk = u[1]
tr = np.arange(2,Nk)
A.C.s2tos3_new(Nk)
B.C.s2tos3(1e-6)
UA = np.sum(A.C.Cr.s3*np.conj(A.C.Cr.s3),axis=0)
UB = np.sum(B.C.Cr.s3*np.conj(B.C.Cr.s3),axis=0)
ua = A.C.Cr.ind3
ub = B.C.Cr.ind3
da ={}
db ={}
for k in range(Nk):
da[str(ua[k])]=UA[k]
db[str(ub[k])]=UB[k]
tu = []
for t in sort(da.keys()):
tu.append(da[t] - db[t])
errelTha,errelPha,errela = A.errel(l,20,dsf,typ='s3')
errelThb,errelPhb,errelb = B.errel(l,20,dsf,typ='s3')
print "a: nok",errela,errelPha,errelTha
print "b: ok ",errelb,errelPhb,errelThb
for r in tr:
E = A.C.s2tos3_new(r)
errelTh,errelPh,errel = A.errel(l,20,dsf,typ='s3')
print 'r : ',r,errel,E
tet.append(errelTh)
tep.append(errelPh)
te.append(errel)
#
line1 = plt.plot(array(tr),10*log10(array(tep)),'b')
line2 = plt.plot(array(tr),10*log10(array(tet)),'r')
line3 = plt.plot(array(tr),10*log10(array(te)),'g')
#
plt.xlabel('order l')
plt.ylabel(u'$\epsilon_{rel}$ (dB)',fontsize=18)
plt.title('Evolution of reconstruction relative error wrt order')
plt.legend((u'$\epsilon_{rel}^{\phi}$',u'$\epsilon_{rel}^{\\theta}$',u'$\epsilon_{rel}^{total}$'))
plt.legend((line1,line2,line3),('a','b','c'))
plt.show()
plt.legend(('errel_phi','errel_theta','errel'))
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Utilities related to QWebHistory."""
from PyQt5.QtCore import QByteArray, QDataStream, QIODevice, QUrl
from qutebrowser.utils import utils, qtutils
HISTORY_STREAM_VERSION = 2
BACK_FORWARD_TREE_VERSION = 2
class TabHistoryItem:
"""A single item in the tab history.
Attributes:
url: The QUrl of this item.
original_url: The QUrl of this item which was originally requested.
title: The title as string of this item.
active: Whether this item is the item currently navigated to.
user_data: The user data for this item.
"""
def __init__(self, url, title, *, original_url=None, active=False,
user_data=None):
self.url = url
if original_url is None:
self.original_url = url
else:
self.original_url = original_url
self.title = title
self.active = active
self.user_data = user_data
def __repr__(self):
return utils.get_repr(self, constructor=True, url=self.url,
original_url=self.original_url, title=self.title,
active=self.active, user_data=self.user_data)
def _encode_url(url):
"""Encode an QUrl suitable to pass to QWebHistory."""
data = bytes(QUrl.toPercentEncoding(url.toString(), b':/#?&+=@%*'))
return data.decode('ascii')
def _serialize_item(i, item, stream):
"""Serialize a single WebHistoryItem into a QDataStream.
Args:
i: The index of the current item.
item: The WebHistoryItem to write.
stream: The QDataStream to write to.
"""
### Source/WebCore/history/qt/HistoryItemQt.cpp restoreState
## urlString
stream.writeQString(_encode_url(item.url))
## title
stream.writeQString(item.title)
## originalURLString
stream.writeQString(_encode_url(item.original_url))
### Source/WebCore/history/HistoryItem.cpp decodeBackForwardTree
## backForwardTreeEncodingVersion
stream.writeUInt32(BACK_FORWARD_TREE_VERSION)
## size (recursion stack)
stream.writeUInt64(0)
## node->m_documentSequenceNumber
# If two HistoryItems have the same document sequence number, then they
# refer to the same instance of a document. Traversing history from one
# such HistoryItem to another preserves the document.
stream.writeInt64(i + 1)
## size (node->m_documentState)
stream.writeUInt64(0)
## node->m_formContentType
# info used to repost form data
stream.writeQString(None)
## hasFormData
stream.writeBool(False)
## node->m_itemSequenceNumber
# If two HistoryItems have the same item sequence number, then they are
# clones of one another. Traversing history from one such HistoryItem to
# another is a no-op. HistoryItem clones are created for parent and
# sibling frames when only a subframe navigates.
stream.writeInt64(i + 1)
## node->m_referrer
stream.writeQString(None)
## node->m_scrollPoint (x)
try:
stream.writeInt32(item.user_data['scroll-pos'].x())
except (KeyError, TypeError):
stream.writeInt32(0)
## node->m_scrollPoint (y)
try:
stream.writeInt32(item.user_data['scroll-pos'].y())
except (KeyError, TypeError):
stream.writeInt32(0)
## node->m_pageScaleFactor
stream.writeFloat(1)
## hasStateObject
# Support for HTML5 History
stream.writeBool(False)
## node->m_target
stream.writeQString(None)
### Source/WebCore/history/qt/HistoryItemQt.cpp restoreState
## validUserData
# We could restore the user data here, but we prefer to use the
# QWebHistoryItem API for that.
stream.writeBool(False)
def serialize(items):
"""Serialize a list of QWebHistoryItems to a data stream.
Args:
items: An iterable of WebHistoryItems.
Return:
A (stream, data, user_data) tuple.
stream: The reseted QDataStream.
data: The QByteArray with the raw data.
user_data: A list with each item's user data.
Warning:
If 'data' goes out of scope, reading from 'stream' will result in a
segfault!
"""
data = QByteArray()
stream = QDataStream(data, QIODevice.ReadWrite)
user_data = []
| current_idx = None
for i, item in enumerate(items):
if item.active:
if current_idx is not None:
raise ValueError("Multiple active items ({} and {}) "
"found!".format(current_idx, i))
else:
current_idx = i
if items:
if current_idx is None:
raise ValueError("No active item found!")
else:
| current_idx = 0
### Source/WebKit/qt/Api/qwebhistory.cpp operator<<
stream.writeInt(HISTORY_STREAM_VERSION)
stream.writeInt(len(items))
stream.writeInt(current_idx)
for i, item in enumerate(items):
_serialize_item(i, item, stream)
user_data.append(item.user_data)
stream.device().reset()
qtutils.check_qdatastream(stream)
return stream, data, user_data
|
"""
@brief test tree node (time=50s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, ExtTestCase
from pyquickhelper.pycode.venv_helper import create_virtual_env
class TestVenvHelper(ExtTestCase):
def test_venv_empty(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if _ | _name__ != "__main__":
# does not accept virtual environment
return
temp = get_temp_folder(__file__, "temp_venv_empty")
out = create_virtual_env(temp, fLOG=fLOG)
fLOG("-----")
fLOG(out)
fLOG("-----")
pyt = os.path.join(temp, "Scripts")
self.assertExists(pyt)
lo = os.listdir(pyt)
self.assertNotEmpty(lo)
if __name__ | == "__main__":
unittest.main()
|
ath(root)
self.show_hidden = show_hidden
self.base_path = base_path.rstrip("/")
def propfind(self, path, depth, request_xml):
# TODO implement support for allprop
paths = self._build_paths(path, depth)
return multi_status.MultiStatus(self._get_properties(paths, request_xml))
def _build_paths(self, path, depth):
path = path.strip("/")
path = os.path.abspath(os.path.join(self.root, path))
if path.startswith(self.root) and os.path.exists(path):
paths = [path]
if os.path.isdir(path) and depth == 1:
for p in os.listdir(path):
if self._show(p):
paths.append(os.path.join(path, p))
for i, p in enumerate(paths):
if os.path.isdir(p) and p[:-1] != "/":
paths[i] = p + "/"
return paths
raise IOError
def _show(self, filename):
return self.show_hidden or not filename.startswith(".")
def _get_properties(self, paths, request_xml):
result = []
for p in paths:
prop_stat = multi_status.PropStat(status.OK)
try:
st = os.stat(p)
fs_st = os.statvfs(p.encode("utf-8"))
except:
continue
name = self._build_displayname(p)
is_dir = os.path.isdir(p)
for property_ in request_xml.find("{DAV:}propfind", "{DAV:}prop"):
if property_ == "{DAV:}resourcetype":
prop_stat.add_resourcetype(is_dir)
elif property_ == "{DAV:}creationdate":
prop_stat.add_creationdate(epoch2iso8601(st.st_ctime))
elif property_ == "{DAV:}displayname":
prop_stat.add_displayname(name)
elif property_ == "{DAV:}getcontentlength":
if not is_dir:
prop_stat.add_getcontentlength(st.st_size)
elif property_ == "{DAV:}getcontenttype":
if not is_dir:
ct = mimetypes.guess_type(p)[0] or "application/octet-stream"
prop_stat.add_getcontenttype(ct)
elif property_ == "{DAV:}getetag":
prop_stat.add_getetag(md5.new("%s%s" % (name.encode("utf-8"), st.st_mtime)).hexdigest())
elif property_ == "{DAV:}getlastmodified":
prop_stat.add_getlastmodified(epoch2iso1123(st.st_mtime))
elif property_ == "{DAV:}quota-available-bytes":
prop_stat.add_quota_available_bytes(fs_st.f_bavail * fs_st.f_frsize)
elif property_ == "{DA | V:}quota-used-bytes":
prop_stat.add_quota_used_bytes((fs | _st.f_blocks - fs_st.f_bavail) * fs_st.f_frsize)
else:
print "Request for not supported property %s" % property_
href = self.base_path + p[len(self.root):]
result.append(multi_status.Response(href, prop_stat))
return result
def _build_displayname(self, path):
cut = len(self.root)
return os.path.basename(os.path.normpath(path[cut:]))
def head(self, path):
return self.get(path, False)
def get(self, path, with_body=True):
filename = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not filename.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif not os.path.exists(filename):
return response.Response(status.NOT_FOUND)
if os.path.isdir(filename):
body = None
content_length = "0"
if with_body:
body = self._get_collection(filename)
content_length = str(len(body))
return response.Response(status.OK,
{"Content-Type": "text/html",
"Content-Length": content_length},
[body] if with_body else None)
else:
st = os.stat(filename)
headers = {"Content-Type": mimetypes.guess_type(filename)[0] or "application/octet-stream",
"Content-Length": str(st.st_size)}
return response.Response(status.OK,
headers,
FileIterator(filename) if with_body else None)
def _get_collection(self, path):
filenames = os.listdir(path)
directories = [f for f in filenames if self._show(f) and os.path.isdir(os.path.join(path, f))]
files = [f for f in filenames if self._show(f) and os.path.isfile(os.path.join(path, f))]
directories.sort(key=lambda d: d.lower())
files.sort(key=lambda f: f.lower())
filenames = directories + files
result = u"""\
<html>
<head>
<title>Content of %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
</head>
<body>
<ul style="padding:0;margin:0;list-style-type:none;">
""" % os.path.basename(path)
tplDirectory = """<li><a href="%s">[%s]</a></li>\n"""
tplFile = """<li><a href="%s">%s</a></li>\n"""
for f in filenames:
p = os.path.join(path, f)
href = self.base_path + p[len(self.root):]
if os.path.isdir(p):
result += tplDirectory % (href, f)
else:
result += tplFile % (href, f)
result += """\
</ul>
</body>
</html>
"""
return result.encode("utf-8")
def put(self, path, content_length, body):
filename = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not filename.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif os.path.isdir(filename):
return response.Response(status.NOT_ALLOWED)
elif not os.path.isdir(os.path.dirname(filename)):
return response.Response(status.CONFLICT)
created = not os.path.exists(filename)
f = open(filename, "wb")
if content_length:
remaining = content_length
while remaining > 0:
buf = body.read(min(remaining, BLOCK_SIZE))
if len(buf):
f.write(buf)
remaining -= len(buf)
else:
break
f.close()
if created:
return response.Response(status.CREATED)
else:
return response.Response(status.NO_CONTENT)
def mkcol(self, path):
dirname = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not dirname.startswith(self.root):
return response.Response(status.FORBIDDEN)
elif os.path.exists(dirname):
return response.Response(status.NOT_ALLOWED)
elif not os.path.isdir(os.path.dirname(dirname)):
return response.Response(status.CONFLICT)
os.mkdir(dirname)
return response.Response(status.CREATED, {}, None)
def delete(self, path):
filename = os.path.abspath(os.path.join(self.root, path.strip("/")))
if not filename.startswith(self.root):
return response.Response(status.FORBIDDEN)
if os.path.isfile(filename):
os.remove(filename)
elif os.path.isdir(filename):
shutil.rmtree(filename)
elif not os.path.exists(filename):
return response.Response(status.NOT_FOUND)
return response.Response(status.NO_CONTENT)
def move(self, src, dst, overwrite):
if not dst.startswith(self.base_path):
return response.Response(status.FORBIDDEN)
source = os.path.join(self.root, src.strip("/"))
source = os.path.abspath(source)
destination = dst[len(self.base_path):]
destination = os.path.join(self.root, destination.strip("/"))
destination = os.path.abspath(destination)
if not source.startswith(self.root) or not destination.startswith(self.root):
return response.Response(status.FORBIDDEN)
|
# Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import scipy
from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf
import scipy as sp
from ..util.misc import safe_exp, safe_square, safe_cube, safe_quad, safe_three_times
class GPTransformation(object):
"""
Link function class for doing non-Gaussian likelihoods approximation
:param Y: observed output (Nx1 numpy.darray)
.. note:: Y values allowed depend on the likelihood_function used
"""
def __init__(self):
pass
def transf(self,f):
"""
Gaussian process tranformation function, latent space -> output space
"""
raise NotImplementedError
def dtransf_df(self,f):
"""
derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d2transf_df2(self,f):
"""
second derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d3transf_df3(self,f):
"""
third derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def to_dict(self):
raise NotImplementedError
def _to_dict(self):
return {}
@staticmethod
def from_dict(input_dict):
import copy
input_dict = copy.deepcopy(input_dict)
link_class = input_dict.pop('class')
import GPy
link_class = eval(link_class)
return link_class._from_dict(link_class, input_dict)
@staticmethod
def _from_dict(link_class, input_dict):
return link_class(**input_dict)
class Identity(GPTransformation):
"""
.. math::
g(f) = f
"""
def transf(self,f):
return f
def dtransf_df(self,f):
return np.ones_like(f)
def d2transf_df2(self,f):
return np.zeros_like(f)
def d3transf_df3(self,f):
return np.zeros_like(f)
def to_dict(self):
input_dict = super(Identity, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Identity"
return input_dict
class Probit(GPTransformation):
"""
.. math::
g(f) = \\Phi^{-1} (mu)
"""
def transf(self,f):
return std_norm_cdf(f)
def dtransf_df(self,f):
return std_norm_pdf(f)
def d2transf_df2(self,f):
return -f * std_norm_pdf(f)
def d3transf_df3(self,f):
return (safe_square(f)-1.)*std_norm_pdf(f)
def to_dict(self):
input_dict = super(Probit, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Probit"
return input_dict
class Cloglog(GPTransformation):
"""
Complementary log-log link
.. math::
p(f) = 1 - e^{-e^f}
or
f = \log (-\log(1-p))
"""
def transf(self,f):
ef = safe_exp(f)
return 1-np.exp(-ef)
def dtransf_df(self,f):
ef = safe_exp(f)
return np.exp(f-ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
return -np.exp(f-ef)*(ef-1.)
| def d3transf_df3(se | lf,f):
ef = safe_exp(f)
ef2 = safe_square(ef)
three_times_ef = safe_three_times(ef)
r_val = np.exp(f-ef)*(1.-three_times_ef + ef2)
return r_val
class Log(GPTransformation):
"""
.. math::
g(f) = \\log(\\mu)
"""
def transf(self,f):
return safe_exp(f)
def dtransf_df(self,f):
return safe_exp(f)
def d2transf_df2(self,f):
return safe_exp(f)
def d3transf_df3(self,f):
return safe_exp(f)
class Log_ex_1(GPTransformation):
"""
.. math::
g(f) = \\log(\\exp(\\mu) - 1)
"""
def transf(self,f):
return scipy.special.log1p(safe_exp(f))
def dtransf_df(self,f):
ef = safe_exp(f)
return ef/(1.+ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
return aux*(1.-aux)
def d3transf_df3(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
daux_df = aux*(1.-aux)
return daux_df - (2.*aux*daux_df)
class Reciprocal(GPTransformation):
def transf(self,f):
return 1./f
def dtransf_df(self, f):
f2 = safe_square(f)
return -1./f2
def d2transf_df2(self, f):
f3 = safe_cube(f)
return 2./f3
def d3transf_df3(self,f):
f4 = safe_quad(f)
return -6./f4
class Heaviside(GPTransformation):
"""
.. math::
g(f) = I_{x \\geq 0}
"""
def transf(self,f):
#transformation goes here
return np.where(f>0, 1, 0)
def dtransf_df(self,f):
raise NotImplementedError("This function is not differentiable!")
def d2transf_df2(self,f):
raise NotImplementedError("This function is not differentiable!")
|
def dev_nav(active=None):
from uliweb import settings
out = "<span>"
for i in settings.MENUS_DEVELOP.nav:
if active!=i["name"]:
out += "<a href='%s'>%s<a> "%(i["link"],i["title"])
else:
out += "<strong>%s</strong> "%(i["title"])
out += "</span>"
| return out
| |
import os
f | rom traits.api import HasTraits
from traitsui.api import View, Item
from enable.savage.trait_defs.ui.svg_button import SVGButton
pause_icon = os.path.join(os.path.dirname(__file__), 'player_pause.svg') |
resume_icon = os.path.join(os.path.dirname(__file__), 'player_play.svg')
class SVGDemo(HasTraits):
pause = SVGButton('Pause', filename=pause_icon,
toggle_filename=resume_icon,
toggle_state=True,
toggle_label='Resume',
toggle_tooltip='Resume',
tooltip='Pause', toggle=True)
trait_view = View(Item('pause'))
SVGDemo().configure_traits()
|
if sock is not None:
sock.close()
def exithook(self):
"override for specific exit action"
os._exit(0)
def debug(self, *args):
if not self.debugging:
return
s = self.location + " " + str(threading.current_thread().name)
for a in args:
s = s + " " + str(a)
print(s, file=sys.__stderr__)
def register(self, oid, object):
self.objtable[oid] = object
def unregister(self, oid):
try:
del self.objtable[oid]
except KeyError:
pass
def localcall(self, seq, request):
self.debug("localcall:", request)
try:
how, (oid, methodname, args, kwargs) = request
except TypeError:
return ("ERROR", "Bad request format")
if oid not in self.objtable:
return ("ERROR", "Unknown object id: %r" % (oid,))
obj = self.objtable[oid]
if methodname == "__methods__":
methods = {}
_getmethods(obj, methods)
return ("OK", methods)
if methodname == "__attributes__":
attributes = {}
_getattributes(obj, attributes)
return ("OK", attributes)
if not hasattr(obj, methodname):
return ("ERROR", "Unsupported method name: %r" % (methodname,))
method = getattr(obj, methodname)
try:
if how == 'CALL':
ret = method(*args, **kwargs)
if isinstance(ret, RemoteObject):
ret = remoteref(ret)
return ("OK", ret)
elif how == 'QUEUE':
request_queue.put((seq, (method, args, kwargs)))
return("QUEUED", None)
else:
return ("ERROR", "Unsupported message type: %s" % how)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except OSError:
raise
except Exception as ex:
return ("CALLEXC", ex)
except:
msg = "*** Internal Error: rpc.py:SocketIO.localcall()\n\n"\
" Object: %s \n Method: %s \n Args: %s\n"
print(msg % (oid, method, args), file=sys.__stderr__)
traceback.print_exc(file=sys.__stderr__)
return ("EXCEPTION", None)
def remotecall(self, oid, methodname, args, kwargs):
self.debug("remotecall:asynccall: ", oid, methodname)
seq = self.asynccall(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def remotequeue(self, oid, methodname, args, kwargs):
self.debug("remotequeue:asyncqueue: ", oid, methodname)
seq = self.asyncqueue(oid, methodname, args, kwargs)
return self.asyncreturn(seq)
def asynccall(self, oid, methodname, args, kwargs):
request = ("CALL", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.current_thread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asynccall:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncqueue(self, oid, methodname, args, kwargs):
request = ("QUEUE", (oid, methodname, args, kwargs))
seq = self.newseq()
if threading.current_thread() != self.sockthread:
cvar = threading.Condition()
self.cvars[seq] = cvar
self.debug(("asyncqueue:%d:" % seq), oid, methodname, args, kwargs)
self.putmessage((seq, request))
return seq
def asyncreturn(self, seq):
self.debug("asyncreturn | :%d:call getresponse(): " % seq)
response = self.getresponse(seq, wait=0.05)
self.debug(("asyncreturn:%d:response: " % seq), response)
return self.decoderesponse(response)
def decoderesponse(self, response):
how, what = response
if how == "OK":
return what
if how == "QUEUED":
return None
| if how == "EXCEPTION":
self.debug("decoderesponse: EXCEPTION")
return None
if how == "EOF":
self.debug("decoderesponse: EOF")
self.decode_interrupthook()
return None
if how == "ERROR":
self.debug("decoderesponse: Internal ERROR:", what)
raise RuntimeError(what)
if how == "CALLEXC":
self.debug("decoderesponse: Call Exception:", what)
raise what
raise SystemError(how, what)
def decode_interrupthook(self):
""
raise EOFError
def mainloop(self):
"""Listen on socket until I/O not ready or EOF
pollresponse() will loop looking for seq number None, which
never comes, and exit on EOFError.
"""
try:
self.getresponse(myseq=None, wait=0.05)
except EOFError:
self.debug("mainloop:return")
return
def getresponse(self, myseq, wait):
response = self._getresponse(myseq, wait)
if response is not None:
how, what = response
if how == "OK":
response = how, self._proxify(what)
return response
def _proxify(self, obj):
if isinstance(obj, RemoteProxy):
return RPCProxy(self, obj.oid)
if isinstance(obj, list):
return list(map(self._proxify, obj))
# XXX Check for other types -- not currently needed
return obj
def _getresponse(self, myseq, wait):
self.debug("_getresponse:myseq:", myseq)
if threading.current_thread() is self.sockthread:
# this thread does all reading of requests or responses
while 1:
response = self.pollresponse(myseq, wait)
if response is not None:
return response
else:
# wait for notification from socket handling thread
cvar = self.cvars[myseq]
cvar.acquire()
while myseq not in self.responses:
cvar.wait()
response = self.responses[myseq]
self.debug("_getresponse:%s: thread woke up: response: %s" %
(myseq, response))
del self.responses[myseq]
del self.cvars[myseq]
cvar.release()
return response
def newseq(self):
self.nextseq = seq = self.nextseq + 2
return seq
def putmessage(self, message):
self.debug("putmessage:%d:" % message[0])
try:
s = dumps(message)
except pickle.PicklingError:
print("Cannot pickle:", repr(message), file=sys.__stderr__)
raise
s = struct.pack("<i", len(s)) + s
while len(s) > 0:
try:
r, w, x = select.select([], [self.sock], [])
n = self.sock.send(s[:BUFSIZE])
except (AttributeError, TypeError):
raise OSError("socket no longer exists")
s = s[n:]
buff = b''
bufneed = 4
bufstate = 0 # meaning: 0 => reading count; 1 => reading data
def pollpacket(self, wait):
self._stage0()
if len(self.buff) < self.bufneed:
r, w, x = select.select([self.sock.fileno()], [], [], wait)
if len(r) == 0:
return None
try:
s = self.sock.recv(BUFSIZE)
except OSError:
raise EOFError
if len(s) == 0:
raise EOFError
self.buff += s
self._stage0()
return self._stage1()
def _stage0(self):
if self.bufstate == 0 and len(self.buff) >= 4:
s = self.buff[:4]
self.buff = self.buff[4:]
self.bufneed = struct.unpack("<i", s)[0]
self.bufstate = 1
def _stage1(self):
if self.bufstate == 1 and len(self.buff) >= self.bufneed:
packet = self.buff[:self.bufneed]
self.buff = self.buff[self.bufneed:]
self.bufneed = 4
self.bufstate = 0
return packet
def pollmessa |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import easyiceconfig as EasyIce
import jderobotComm as comm
import sys, signal
sys.path.append('/usr/local/share/jderobot/python/visualHFSM_py')
import traceback, threading, time
from automatagui import AutomataGui, QtGui, GuiSubautomata
from jderobot import MotorsPrx
from jderobot import LaserPrx
class Automata():
def __init__(self):
self.lock = threading.Lock()
self.displayGui = False
self.StatesSub1 = [
"GoForward",
"GoBack",
]
self.sub1 = "GoForward"
self.run1 = True
def calculate_obstacle(self):
self.laserData = self.KobukiLaser.getLaserData()
min_dist = 1000
for i in range(len(self.laserData.values)):
if self.laserData.values[i] < min_dist:
min_dist = self.laserData.values[i]
if min_dist < 1.0:
return True
else:
return False
def startThreads(self):
self.t1 = threading.Thread(target=self.subautomata1)
self.t1.start()
def createAutomata(self):
guiSubautomataList = []
# Creating subAutomata1
guiSubautomata1 = GuiSubautomata(1,0, self.automataGui)
guiSubautomata1 | .newGuiNode(1, 0, 69, 163, 1, 'GoForward')
guiSubautomata1.newGuiNode(2, 0, 255, 117, 0, 'GoBack')
guiSubautomata1.newGuiTransition((69, 163), (255, 117), (139, 78), 1, 1, 2)
guiSubautomata1.newGuiTransition((255, 117), (69, 163), (189, 196), 2, 2, 1)
guiSubautomataList.append(guiSubautomat | a1)
return guiSubautomataList
def shutDown(self):
self.run1 = False
def runGui(self):
app = QtGui.QApplication(sys.argv)
self.automataGui = AutomataGui()
self.automataGui.setAutomata(self.createAutomata())
self.automataGui.loadAutomata()
self.startThreads()
self.automataGui.show()
app.exec_()
def subautomata1(self):
self.run1 = True
cycle = 100
t_activated = False
t_fin = 0
while(self.run1):
totala = time.time() * 1000000
# Evaluation if
if(self.sub1 == "GoForward"):
if(self.calculate_obstacle()):
self.sub1 = "GoBack"
if self.displayGui:
self.automataGui.notifySetNodeAsActive('GoBack')
elif(self.sub1 == "GoBack"):
if(not self.calculate_obstacle()):
self.sub1 = "GoForward"
if self.displayGui:
self.automataGui.notifySetNodeAsActive('GoForward')
# Actuation if
if(self.sub1 == "GoForward"):
self.KobukiMotors.sendV(0.5)
self.KobukiMotors.sendW(0.0)
elif(self.sub1 == "GoBack"):
self.KobukiMotors.sendV(-0.3)
self.KobukiMotors.sendW(0.2)
totalb = time.time() * 1000000
msecs = (totalb - totala) / 1000;
if(msecs < 0 or msecs > cycle):
msecs = cycle
else:
msecs = cycle - msecs
time.sleep(msecs / 1000)
if(msecs < 33 ):
time.sleep(33 / 1000);
def connectToProxys(self):
self.ic = EasyIce.initialize(sys.argv)
self.ic,self.node = comm.init(self.ic)
# Contact to KobukiMotors
self.KobukiMotors = comm.getMotorsClient(self.ic, 'automata.KobukiMotors')
if(not self.KobukiMotors):
raise Exception('could not create client with KobukiMotors')
print('KobukiMotors connected')
# Contact to KobukiLaser
self.KobukiLaser = comm.getLaserClient(self.ic, 'automata.KobukiLaser')
if(not self.KobukiLaser):
raise Exception('could not create client with KobukiLaser')
print('KobukiLaser connected')
def destroyIc(self):
self.KobukiMotors.stop()
self.KobukiLaser.stop()
comm.destroy(self.ic, self.node)
def start(self):
if self.displayGui:
self.guiThread = threading.Thread(target=self.runGui)
self.guiThread.start()
else:
self.startThreads()
def join(self):
if self.displayGui:
self.guiThread.join()
self.t1.join()
def readArgs(self):
for arg in sys.argv:
splitedArg = arg.split('=')
if splitedArg[0] == '--displaygui':
if splitedArg[1] == 'True' or splitedArg[1] == 'true':
self.displayGui = True
print('runtime gui enabled')
else:
self.displayGui = False
print('runtime gui disabled')
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
automata = Automata()
try:
automata.connectToProxys()
automata.readArgs()
automata.start()
automata.join()
sys.exit(0)
except:
traceback.print_exc()
automata.destroyIc()
sys.exit(-1)
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS IoT Analytics"
prefix = "iotanalytics"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
B | atchPutMessage = Action("BatchPutMessage")
CancelPipelineReprocessing = Action("CancelPipelineReprocessing")
CreateChannel = Action("CreateChannel")
CreateDataset = Action("CreateDataset")
CreateDatasetContent = Action("CreateDatasetContent")
CreateDatastore = A | ction("CreateDatastore")
CreatePipeline = Action("CreatePipeline")
DeleteChannel = Action("DeleteChannel")
DeleteDataset = Action("DeleteDataset")
DeleteDatasetContent = Action("DeleteDatasetContent")
DeleteDatastore = Action("DeleteDatastore")
DeletePipeline = Action("DeletePipeline")
DescribeChannel = Action("DescribeChannel")
DescribeDataset = Action("DescribeDataset")
DescribeDatastore = Action("DescribeDatastore")
DescribeLoggingOptions = Action("DescribeLoggingOptions")
DescribePipeline = Action("DescribePipeline")
GetDatasetContent = Action("GetDatasetContent")
ListChannels = Action("ListChannels")
ListDatasetContents = Action("ListDatasetContents")
ListDatasets = Action("ListDatasets")
ListDatastores = Action("ListDatastores")
ListPipelines = Action("ListPipelines")
ListTagsForResource = Action("ListTagsForResource")
PutLoggingOptions = Action("PutLoggingOptions")
RunPipelineActivity = Action("RunPipelineActivity")
SampleChannelData = Action("SampleChannelData")
StartPipelineReprocessing = Action("StartPipelineReprocessing")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateChannel = Action("UpdateChannel")
UpdateDataset = Action("UpdateDataset")
UpdateDatastore = Action("UpdateDatastore")
UpdatePipeline = Action("UpdatePipeline")
|
IN | STALLED_APPS= ["django_nose"]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-xunit',
'--xunit-file=jenkins/n | osetests.xml',
]
|
import os
from anchore_engine.analyzers.syft.handlers.common import save_entry_to_findings
from anchore_engine.analyzers.utils import dig
def save_entry(findings, engine_entry, pkg_key=None):
if not pkg_key:
pkg_name = engine_entry.get("name", "")
pkg_version = engine_entry.get(
"version", engine_entry.get("latest", "")
) # rethink this... ensure it's right
pkg_key = engine_entry.get(
"location",
"/virtual/pypkg/site-packages/{}-{}".format(pkg_name, pkg_version),
)
save_entry_to_findings(findings, engine_entry, "pkgs.python", pkg_key)
def translate_and_save_entry(findings, artifact):
"""
Handler function to map syft results for the python package type into the engine "raw" document format.
"""
if "python-package-cataloger" not in artifact["foundBy"]:
# engine only includes python findings for egg and wheel installations (with rich metadata)
return
site_pkg_root = artifact["metadata"]["sitePackagesRootPath"]
name = artifact["name"]
# anchore engine always uses the name, however, the name may not be a top-level package
# instead default to the first top-level package unless the name is listed among the
# top level packages explicitly defined in the metadata. Note that the top-level package
# is optional!
pkg_key_names = dig(artifact, "metadata", "topLevelPackages", force_default=[])
pkg_key_name = None
for key_name in pkg_key_names:
if name in key_name:
pkg_key_name = name
else:
pkg_key_name = key_name
if not pkg_key_name:
pkg_key_name = name
pkg_key = os.path.join(site_pkg_root, pkg_key_name)
origin = dig(artifact, "metadata", "author", force_default="")
email = dig(artifact, "metadata", "authorEmail", default=None)
if email:
origin += " <%s>" % email
files = []
for file in dig(artifact, "metadata", "files", force_default=[]):
files.append(os.path.join(site_pkg_root, file["path"]))
# craft the artifact document
pkg_value = {
"name": name,
"version": artifact["version"],
"latest": artifact["version"],
"files": files,
"origin": origin,
"license": dig(artifact, "metadata", "license", force_default=""),
"location": site_pkg_root,
"type": "pyt | hon",
"cpes": artifact.get("cpes", []),
}
# inject the artifact docu | ment into the "raw" analyzer document
save_entry(findings, pkg_value, pkg_key)
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import curses
import curses.wrapper
from curses.ascii import isprint
from twisted.internet import reactor
class CursesStdIO:
def __init__(self, stdscr, callback=None):
self.statusText = "Synapse test app -"
self.searchText = ""
self.stdscr = stdscr
self.logLine = ""
self.callback = callback
self._setup()
def _setup(self):
self.stdscr.nodelay(1) # Make non blocking
self.rows, self.cols = self.stdscr.getmaxyx()
self.lines = []
curses.use_default_colors()
self.paintStatus(self.statusText)
self.stdscr.refresh()
def set_callback(self, callback):
self.callback = callback
def fileno(self):
"""We want to select on FD 0"""
return 0
def connectionLost(self, reason):
self.close()
def print_line(self, text):
"""add a line to the internal list of lines"""
self.lines.append(text)
self.redraw()
def print_log(self, text):
self.logLine = text
self.redraw()
def redraw(self):
"""method for redisplaying lines based on internal list of lines"""
self.stdscr.clear()
self.paintStatus(self.statusText)
i = 0
index = len(self.lines) - 1
while i < (self.rows - 3) and index >= 0:
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index], curses.A_NORMAL)
i = i + 1
index = index - 1
self.printLogLine(self.logLine)
self.stdscr.refresh()
def paintStatus(self, text):
if len(text) > self.cols:
raise RuntimeError("TextTooLongError")
self.stdscr.addstr(
self.rows - 2, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def printLogLine(self, text):
self.stdscr.addstr(
0, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def doRead(self):
"""Input is ready!"""
curses.noecho()
c = self.stdscr.getch() # read a character
if c == curses.KEY_BACKSPACE:
self.searchText = self.searchText[:-1]
elif c == curses.KEY_ENTER or c == 10:
text = self.searchText
self.searchText = ""
self.print_line(">> %s" % text)
try:
if self.callback:
self.callback.on_line(text)
except Exception as e:
self.print_line(str(e))
self.stdscr.refresh()
elif isprint(c):
if len(self.searchText) == self.cols - 2:
return
self.searchText = self.searchText + chr(c)
self.stdscr.addstr(
self.rows - 1,
0,
self.searchText + (" " * (self.cols - len(self.searchText) - 2)),
)
self.paintStatus(self.statusText + " %d" % len(self.searchText))
self.stdscr.move(self.rows - 1, len(self.searchText))
self.stdscr.refresh()
def logPrefix(self):
return "CursesStdIO"
def close(self):
"""clean up"""
curses.nocbreak()
self.stdscr.k | eypad(0)
curses.echo()
curses.endwin()
class Callbac | k:
def __init__(self, stdio):
self.stdio = stdio
def on_line(self, text):
self.stdio.print_line(text)
def main(stdscr):
screen = CursesStdIO(stdscr) # create Screen object
callback = Callback(screen)
screen.set_callback(callback)
stdscr.refresh()
reactor.addReader(screen)
reactor.run()
screen.close()
if __name__ == "__main__":
curses.wrapper(main)
|
TextSearch(self.auth, terms)
else:
# Note: I don't want to bother with `maxNumResults` so we set
# it to a big number.
BIG = 1000000
issues = self.server.jira1.getIssuesFromTextSearchWithProject(
self.auth, project_keys, terms, BIG)
if len(issues) == BIG:
log.warn("*%s* matches returned for %r (projects %s), "
"the result might not include a matches",
BIG, terms, ', '.join(project_keys))
return issues
def issue_types(self, project_key=None):
if project_key:
project = self.project(project_key)
issue_types = self.server.jira1.getIssueTypesForProject(
self.auth, project["id"])
else:
if "issue_types" not in self.cache:
self.cache["issue_types"] = self.server.jira1.getIssueTypes(self.auth)
issue_types = self.cache["issue_types"]
return issue_types
def issue_type(self, issue_id):
assert isinstance(issue_id, str)
for t in self.issue_types():
if t["id"] == issue_id:
return t
else:
raise JiraShellError("unknown issue type: %r" % issue_id)
def components(self, project_key):
if "components" not in self.cache:
self.cache["components"] = {}
if project_key not in self.cache["components"]:
components = self.server.jira1.getComponents(self.auth, project_key)
components.sort(key=operator.itemgetter("name"))
self.cache["components"][project_key] = components
return self.cache["components"][project_key]
def component(self, project_key, component_id):
assert isinstance(component_id, str)
for c in self.components(project_key):
if c["id"] == component_id:
return c
else:
raise JiraShellError("unknown component id: %r" % component_id)
def component_id(self, project_key, name):
"""Return the project component id from the given id, name, or unique
substring match on the name.
"""
componentObj = None
components = self.components(project_key)
name_lower = name.lower()
# - if int, then try id match first
if isinstance(name, int):
for r in components:
if int(r["id"]) == name:
componentObj = r
break
else:
raise JiraShellError("no component with id %r" % name)
if not componentObj:
# - try full name match
for r in components:
if r["name"].lower() == name_lower:
componentObj = r
break
if not componentObj:
# - try substring match
matches = [r for r in components
if name_lower in r["name"].lower()]
if len(matches) == 1:
componentObj = matches[0]
elif len(matches) > 1:
raise JiraShellError(
"'%s' is ambiguous: matching components: \"%s\"" % (
name, '", "'.join([r["name"] for r in matches])))
if not componentObj:
raise JiraShellError("no component found matching %r" % name)
return componentObj["id"]
def versions(self, project_key, exclude_archived=None,
exclude_released=None):
versions = self.server.jira1.getVersions(self.auth, project_key)
if exclude_archived:
versions = [v for v in versions if v["archived"] != "true"]
if exclude_released:
versions = [v for v in versions if v["released"] != "true"]
versions.sort(key=lambda v: int(v["sequence"]))
return versions
def version(self, version_id):
assert isinstance(version_id, str)
for v in self.versions():
if v["id"] == version_id:
return v
else:
raise JiraShellError("unknown version: %r" % version_id)
def resolutions(self):
if "resolutions" not in self.cache:
self.cache["resolutions"] = self.server.jira1.getResolutions(self.auth)
return self.cache["resolutions"]
def resolution_id(self, name):
"""Return the resolution id from the given id, name, or unique
substring match on the name.
"""
resolutionObj = None
resolutions = self.resolutions()
name_lower = name.lower()
# - if int, then try id match first
if isinstance(name, int):
for r in resolutions:
if int(r["id"]) == name:
resolutionObj = r
break
else:
raise JiraShellError("no resolution with id %r" % name)
if not resolutionObj:
# - try full name match
for r in resolutions:
if r["name"].lower() == name_lower:
resolutionObj = r
break
if not resolutionObj:
# - try substring match
matches = [r for r in resolutions
if name_lower in r["name"].lower()]
if len(matches) == 1:
resolutionObj = matches[0]
elif len(mat | ches) > 1:
raise JiraShellError(
"'%s' is ambiguous: matching resolutions: \"%s\"" % (
name, '", "'.join([r["name"] for r in matches])))
if | not resolutionObj:
raise JiraShellError("no resolution found matching %r" % name)
return resolutionObj["id"]
def resolve(self, key):
"""Resolve the given issue.
TODO: what is the result when the workflow change is illegal?
"""
# 5 === "Resolved". Is that true for all Jiras?
res = self._jira_soap_call("progressWorkflowAction", [key, "5"])
def statuses(self):
if "statuses" not in self.cache:
self.cache["statuses"] = self.server.jira1.getStatuses(self.auth)
return self.cache["statuses"]
def status(self, status_id):
assert isinstance(status_id, str)
for s in self.statuses():
if s["id"] == status_id:
return s
else:
raise JiraShellError("unknown status: %r" % status_id)
def status_id(self, name):
"""Get the id of the status matching the given name.
@param name {str} Case-insensitive status name.
"""
statuses = self.statuses()
name_lower = name.lower()
for s in statuses:
if name_lower == s["name"].lower():
return s["id"]
else:
raise JiraShellError("unknown status name: %r" % name)
def create_issue(self, data):
return self.server.jira1.createIssue(self.auth, data)
def update_issue(self, key, data):
# Actual helpful docs on updateIssue():
# https://jira.atlassian.com/browse/JRA-10588
if log.isEnabledFor(logging.DEBUG):
log.debug("calling updateIssue(%r, %s)", key, json.dumps(data))
return self.server.jira1.updateIssue(self.auth, key, data)
#---- JiraShell
class JiraShell(cmdln.Cmdln):
name = "jirash"
jira_url = None
def get_optparser(self):
parser = cmdln.Cmdln.get_optparser(self)
parser.add_option("--version", action="store_true",
help="show version and exit")
parser.add_option("-d", "--debug", action="store_true",
help="debug logging")
parser.add_option("-J", "--jira-url", dest="jira_url",
help="Jira base URL. Otherwise defaults to 'jira_url' value from config file.")
return parser
def _generate_cfg(self, cfg_path):
url = raw_input("Jira URL: ")
username = raw_input("Username: ")
password = getpass.getpass("Password: ")
# TODO Attempt login to validate before saving
config = {
'jira_url': url,
url: {
'username': username,
'password': password,
|
#!/usr/bin/env python
# runTests.py -- Portage Unit Test Functionality
# Copyright 2006-2017 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os, sys
import os.path as osp
import grp
import platform
import pwd
import signal
def debug_signal(signum, frame):
import pdb
pdb.set_trace()
if platform.python_implementation() == 'Jython':
debug_signum = signal.SIGUSR2 # bug #424259
else:
debug_signum = signal.SIGUSR1
signal.signal(debug_signum, debug_signal)
# Pretend that the current user's uid/gid are the 'portage' uid/gid,
# so things go smoothly regardless of the current user and global
# user/group configuration.
os.en | viron["PORTAGE_USERNAME"] = pwd.getpwuid(os.getuid()).pw_name
os.environ["PORT | AGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
# Insert our parent dir so we can do shiny import "tests"
# This line courtesy of Marienz and Pkgcore ;)
repoman_pym = osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__))))
sys.path.insert(0, repoman_pym)
# Add in the parent portage python modules
portage_pym = osp.dirname(osp.dirname(repoman_pym))+'/pym'
sys.path.insert(0, portage_pym)
# import our centrally initialized portage instance
from repoman._portage import portage
portage._internal_caller = True
# Ensure that we don't instantiate portage.settings, so that tests should
# work the same regardless of global configuration file state/existence.
portage._disable_legacy_globals()
if os.environ.get('NOCOLOR') in ('yes', 'true'):
portage.output.nocolor()
import repoman.tests as tests
from portage.const import PORTAGE_BIN_PATH
path = os.environ.get("PATH", "").split(":")
path = [x for x in path if x]
insert_bin_path = True
try:
insert_bin_path = not path or \
not os.path.samefile(path[0], PORTAGE_BIN_PATH)
except OSError:
pass
if insert_bin_path:
path.insert(0, PORTAGE_BIN_PATH)
os.environ["PATH"] = ":".join(path)
if __name__ == "__main__":
sys.exit(tests.main())
|
import os
import | py
import pytest
import numpy as np
import openpnm as op
from openpnm.models.misc import from_ne | ighbor_pores
@pytest.mark.skip(reason="'netgen' is only available on conda")
class STLTest:
def setup_class(self):
np.random.seed(10)
self.net = op.network.Cubic(shape=[2, 2, 2])
self.net["pore.diameter"] = 0.5 + np.random.rand(self.net.Np) * 0.5
Dt = from_neighbor_pores(target=self.net, prop="pore.diameter") * 0.5
self.net["throat.diameter"] = Dt
self.net["throat.length"] = 1.0
def teardown_class(self):
os.remove(f"{self.net.name}.stl")
os.remove("custom_stl.stl")
def test_export_data_stl(self):
op.io.to_stl(network=self.net)
assert os.path.isfile(f"{self.net.name}.stl")
op.io.to_stl(network=self.net, filename="custom_stl")
assert os.path.isfile("custom_stl.stl")
if __name__ == '__main__':
# All the tests in this file can be run with 'playing' this file
t = STLTest()
self = t # For interacting with the tests at the command line
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print(f'Running test: {item}')
try:
t.__getattribute__(item)()
except TypeError:
t.__getattribute__(item)(tmpdir=py.path.local())
t.teardown_class()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nova` -- Cloud IaaS Platform
===================== | ==============
.. automodule:: nova
:platform: Unix
:synopsis: Infrastructure-as-a-Service Cloud platform.
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: | Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
from exception import * |
"""
EtcUdevRules - file ``/etc/udev/rules.d/``
==========================================
This module is similar to the :py:mod:`insights.parsers.udev_rules`
but parse .rules files under ``/etc/ude/rules.d/`` directory instead.
The parsers included in this module are:
UdevRules40Redhat - file ``/etc/udev/rules.d/40-redhat.rules``
--------------------------------------------------------------
"""
from insights import parser
from insights.core import LogFileOutput
from insights.specs import Specs
from insights.util import deprecated
@parser(Specs.etc_udev_40_redhat_rules)
class UdevRules40Redhat(LogFileOutput):
"""
.. warning::
This parser is deprecated, please use
:py:class:`insights.parsers.udev_rules.UdevRules40Redhat` instead.
Read the content of ``/etc/udev/rules.d/40-redhat.rules`` file.
.. note::
The syntax of the `.rules` file is complex, and no rules require to
get the serialized parsed result currently. An only existing rule's
| supposed to check the syntax of some specific line, so here the
:class:`insights.core.LogFileOutput` is the base class.
Sample input::
# do not edit this file, it will be overwritten on update
# CPU hotadd request
SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1"
# Memory hotadd r | equest
SUBSYSTEM!="memory", ACTION!="add", GOTO="memory_hotplug_end"
PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end"
LABEL="memory_hotplug_end"
Examples:
>>> 'LABEL="memory_hotplug_end"' in udev_rules.lines
True
"""
def __init__(self, *args, **kwargs):
deprecated(UdevRules40Redhat, "Import UdevRules40Redhat from insights.parsers.udev_rules instread.")
super(UdevRules40Redhat, self).__init__(*args, **kwargs)
|
import numpy as np
import theano
from theano import tensor as T
from generateTrainDataonText import createTrain
from neuralmodels.utils import permute
from neuralmodels.loadcheckpoint import *
from neuralmodels.costs import softmax_loss
from neuralmodels.models import *
from neuralmodels.predictions import OutputMaxProb, OutputSampleFromDiscrete
from neuralmodels.layers import *
def text_prediction(class_ids_reverse, | p_labels):
N = p_labels.shape[1]
T = p_labels.shape[0]
text_output = []
for i in range(N):
t = ''
for j in p_labels[:,i]:
t = t + class_ids_reverse[j]
text_output.append(t)
return text_output
if __name__ == '__main__':
num_samples = 10000
num_validation = 100
num_train = num_samples - num_validation
len_samples = 300
epochs = 30
batch_size = 100
learning_rate_decay = 0.97
deca | y_after=5
[X,Y,num_classes,class_ids_reverse] = createTrain('shakespeare_input.txt',num_samples,len_samples)
inputD = num_classes
outputD = num_classes
permutation = permute(num_samples)
X = X[:,permutation]
Y = Y[:,permutation]
X_tr = X[:,:num_train]
Y_tr = Y[:,:num_train]
X_valid = X[:,num_train:]
Y_valid = Y[:,num_train:]
# Creating network layers
layers = [OneHot(num_classes),LSTM(),LSTM(),LSTM(),softmax(num_classes)]
trY = T.lmatrix()
# Initializing network
rnn = RNN(layers,softmax_loss,trY,1e-3)
# Fitting model
rnn.fitModel(X_tr,Y_tr,1,'checkpoints/',epochs,batch_size,learning_rate_decay,decay_after)
# Printing a generated sentence
out = rnn.predict_language_model(X_valid[:,:1],1000,OutputSampleFromDiscrete)
# Print the sentence here
text_produced = text_prediction(class_ids_reverse,out)
|
import getpass
import json
import getopt
from genericpath import isfile
from os.path import sep
from pingdumb.main_module import url_type
def read_config():
f_path = "." + sep + "pingdumb.json"
if not isfile(f_path):
f = open(f_path, 'w')
conf = {
"url": "jellyms.kr",
"smtpServer": "smtp.gmail.com:587",
"smtpUser": "",
"toEmail": "",
"interval": 300,
}
f.write(json.dumps(conf))
f.close()
return conf
else:
f = open(f_path, 'r+b')
conf = json.loads(f.read().decode('utf-8'))
f.close()
return conf
def write_config(conf):
if 'smtpPw' i | n conf:
del conf['smtpPw']
f_path = "." + sep + "pingdumb.json"
f = open(f_path, 'w')
f.truncate()
f.write(json.dumps(conf))
f.close()
def input_conf(message, default):
value = input(message)
if not value:
return default
return value
def set_config():
configure = read_config()
url_for_test = input_conf(
"URL to test? (" + configure["url"] + ")", configure["url"]
)
url_for_test = url_type(url_for_test)
recv_mail | = input_conf(
"Receive mail? (" + configure["toEmail"] + ")",
configure["toEmail"]
)
s_server = input_conf(
"SMTP server? (" + configure["smtpServer"] + ")",
configure["smtpServer"]
)
s_user = input_conf(
"SMTP Server username? (" + configure["smtpUser"] + ")",
configure["smtpUser"]
)
s_pw = getpass.getpass("SMTP Server password?", "")
interval = input_conf(
"interval of seconds? (" + str(configure["interval"]) + ")",
configure["interval"]
)
interval = int(interval)
configure["url"] = url_for_test
configure["toEmail"] = recv_mail
configure["smtpServer"] = s_server
configure["smtpUser"] = s_user
configure["smtpPw"] = s_pw
configure["interval"] = interval
return configure
def configure_to_tuple():
configure = read_config()
return configure["url"], configure["smtpServer"], \
configure["smtpUser"], configure["toEmail"], configure["interval"]
def extract_password_with_argv(argv):
opts, args = getopt.getopt(argv, 'p')
for o, a in opts:
if o == "-p":
return getpass.getpass("SMTP Server password", "")
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import difflib
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import easy_template
class EasyTemplateTestCase(unittest.TestCase):
def _RunTest(self, template, expected, template_dict):
src = cStringIO.StringIO(template)
dst = cStringIO.StringIO()
easy_template.RunTemplate(src, dst, template_dict)
if dst.getvalue() != expected:
expected_lines = expected.splitlines(1)
actual_lines = dst.getvalue().splitlines(1)
diff = ''.join(difflib.unified_diff(
expected_lines, actual_lines,
fromfile='expected', tofile='actual'))
self.fail('Unexpected output:\n' + diff)
def testEmpty(self):
self._RunTest('', '', {})
def testNewlines(self):
self._RunTest('\n\n', '\n\n', {})
def testNoInterpolation(self):
template = """I love paris in the
the springtime [don't you?]
{this is not interpolation}.
"""
self._RunTest(template, template, {})
def testSimpleInterpolation(self):
self._RunTest(
'{{foo}} is my favorite number',
'42 is my favorite number',
{'foo': 42})
def testLineContinuations(self):
template = "Line 1 \\\nLine 2\n"""
self._RunTest(template, template, {})
def testIfStatement(self):
template = r"""
[[if foo:]]
foo
[[else:]]
not foo
[[]]"""
self._RunTest(template, "\n foo\n", {'foo': True})
self._RunTest(template, "\n not foo\n", {'foo': False})
def testForStatement(self):
template = r"""[[for beers in [99, 98, 1]:]]
{{beers}} bottle{{(beers != 1) and 's' or ''}} of beer on the wall...
[[]]"""
expected = r"""99 bottles of beer on the wall...
98 bottles of beer on the wall...
1 bottle of beer on the wall...
"""
self._RunTest(template, expected, {})
def testListVariables(self):
template = r"""
[[for i, item in enumerate(my_list):]]
{{i+1}}: {{item}}
[[]]
"""
self._RunTest(template, "\n1: Banana\n2: Grapes\n3: Kumquat\n",
{'my_list': ['Banana', 'Grapes', 'Kumquat']})
def testListInterpolation(self):
template = "{{', '.join(growing[0:-1]) + ' and ' + growing[-1]}} grow..."
self._RunTest(template, "Oats, peas, beans and barley grow...",
{'growing': ['Oats', 'peas', 'beans', 'barley']})
self._RunTest(template, "Love and laughter grow...",
{'growing': ['Love', 'laughter']})
def testComplex(self):
template = r"""
struct {{name}} {
[[for field in fields:]]
[[ if field['type'] == 'array':]]
{{field['basetype']}} {{field['name']}}[{{field['size']}}];
[[ else:]]
{{field['type']}} {{field['name']}};
[[ ]]
[[]]
};"""
expected = r" | ""
struct Foo {
std::string name;
int problems[99];
};"""
self._RunTest(template, expected, {
'name': 'Foo',
'fields': [
{'name': 'name', 'type': 'std::string'},
{'name': 'problems', 'type': 'array', 'basetype': 'int', 'size': 99}]})
def testModulo(s | elf):
self._RunTest('No expression %', 'No expression %', {})
self._RunTest('% before {{3 + 4}}', '% before 7', {})
self._RunTest('{{2**8}} % after', '256 % after', {})
self._RunTest('inside {{8 % 3}}', 'inside 2', {})
self._RunTest('Everywhere % {{8 % 3}} %', 'Everywhere % 2 %', {})
if __name__ == '__main__':
unittest.main()
|
"""
WSGI config for Tuteria-Application-Test project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to t | he Django one. For | example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=10)
clusters = kmeans.fit_predict(digits.data)
print kmeans.cluster_centers_.shape
#------------------------------------------------------------
# visualize the cluster centers
fi | g = plt.figure(figsize=(8, 3))
for | i in range(10):
ax = fig.add_subplot(2, 5, 1 + i)
ax.imshow(kmeans.cluster_centers_[i].reshape((8, 8)),
cmap=plt.cm.binary)
from sklearn.manifold import Isomap
X_iso = Isomap(n_neighbors=10).fit_transform(digits.data)
#------------------------------------------------------------
# visualize the projected data
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].scatter(X_iso[:, 0], X_iso[:, 1], c=clusters)
ax[1].scatter(X_iso[:, 0], X_iso[:, 1], c=digits.target)
|
from typing import List
from collections import defaultdict, Counter
class Solution:
def shortestCompletingWordV1(self, licensePlate: str, words: List[str]) -> str:
# build the signature of licensePlate
sig = defaultdict(int)
for c in licensePlate.upper():
if c.isalpha():
sig[c] += 1
# search for the min length word matching the signature
ans = ''
for word in words:
wsig = sig.copy()
for c in word:
| cu = c.upper()
if cu not in wsig:
continue
wsig[cu] -= 1
if wsig[cu] == 0:
del wsig[cu]
if len(wsig) == 0 and (len(word) < len(ans) or ans == ''):
ans = word
break
return ans
def shortestComple | tingWordV2(self, licensePlate: str, words: List[str]) -> str:
"""
In first line, just filter out all none letters from the plate and make sure all letters are lower case.
In second line, produce Counter of each words and use Counter operater & (intersection) to extract the count of shared letters between the word and the plate.
If all the counts are equal, this returns true. Then, just extract the word that satisfies this condition and has the shortest length.
This is slower than V1 though
"""
pc = Counter(filter(lambda x : x.isalpha(), licensePlate.lower()))
return min([w for w in words if Counter(w) & pc == pc], key=len)
# TESTS
tests = [
{
'licensePlate': "1s3 PSt",
'words': ["step", "steps", "stripe", "stepple"],
'expected': "steps"
},
{
'licensePlate': "1s3 456",
'words': ["looks", "pest", "stew", "show"],
'expected': "pest"
},
{
'licensePlate': "AN87005",
'words': ["participant","individual","start","exist","above","already","easy","attack","player","important"],
'expected': "important"
}
]
for t in tests:
sol = Solution()
actual = sol.shortestCompletingWordV2(t['licensePlate'], t['words'])
print('Shorted completing word matching', t['licensePlate'], 'in', t['words'], '->', actual)
assert(actual == t['expected'])
assert(t['expected'] == sol.shortestCompletingWordV2(t['licensePlate'], t['words']))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-28 15:02
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Mi | gration):
dependencies = [
('base', '0106_auto_20170428_111 | 9'),
]
operations = [
migrations.AddField(
model_name='learningunit',
name='learning_container',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='base.LearningContainer'),
),
]
|
ne,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unico | de
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:re | turn: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=(last_finish_time - processor.start_time).total_seconds(),
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(self.waitables.keys() - [self._signal_conn], timeout=0)
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = cast(AbstractDagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._processor_factory(
file_path, callback_to_execute_for_file, self._dag_ids, self._pickle_dags
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
# Sort the file paths by the parsing order mode
list_mode = conf.get("scheduler", "file_parsing_sort_mode")
files_with_mtime = {}
file_paths = []
is_mtime_mode = list_mode == "modified_time"
file_paths_recently_processed = []
for file_path in self._file_paths:
if is_mtime_mode:
files_with_mtime[file_path] = os.path.getmtime(file_path)
file_modified_time = timezone.make_aware(datetime.fromtimestamp(files_with_mtime[file_p |
import socket
import fcntl
import struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
r | eturn socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:1 | 5])
)[20:24])
>>> get_ip_address('lo')
'127.0.0.1'
>>> get_ip_address('eth0')
'38.113.228.130'
|
],
24,
background="#555555"
),
left=bar.Gap(16),
right=bar.Gap(20),
x=0, y=0, width=600, height=480
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.WindowName(),
widget.Clock()
],
30,
),
bottom=bar.Gap(24),
left=bar.Gap(12),
x=600, y=0, width=300, height=580
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.WindowName(),
widget.Clock()
],
30,
),
bottom=bar.Gap(16),
right=bar.Gap(40),
x=0, y=480, width=500, height=400
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.WindowName(),
widget.Clock()
],
30,
),
left=bar.Gap(20),
right=bar.Gap(24),
x=500, y=580, width=400, height=400
),
]
screens = fake_screens
xephyr_config = {
"xinerama": False,
"two_screens": False,
"width": 900,
"height": 980
}
fakescreen_config = pytest.mark.parametrize("xephyr, qtile", [(xephyr_config, FakeScreenConfig)], indirect=True)
@fakescreen_config
def test_basic(qtile):
qtile.testWindow("zero")
assert qtile.c.layout.info()["clients"] == ["zero"]
assert qtile.c.screen.info() == {
'y': 0, 'x': 0, 'index': 0, 'width': 600, 'height': 480}
qtile.c.to_screen(1)
qtile.testWindow("one")
assert qtile.c.layout.info()["clients"] == ["one"]
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
qtile.c.to_screen(2)
qtile.testXeyes()
assert qtile.c.screen.info() == {
'y': 480, 'x': 0, 'index': 2, 'width': 500, 'height': 400}
qtile.c.to_screen(3)
qtile.testXclock()
assert qtile.c.screen.info() == {
'y': 580, 'x': 500, 'index': 3, 'width': 400, 'height': 400}
@fakescreen_config
def test_gaps(qtile):
g = qtile.c.screens()[0]["gaps"]
assert g["bottom"] == (0, 456, 600, 24)
assert g["left"] == (0, 0, 16, 456)
assert g["right"] == (580, 0, 20, 456)
g = qtile.c.screens()[1]["gaps"]
assert g["top"] == (600, 0, 300, 30)
assert g["bottom"] == (600, 556, 300, 24)
assert g["left"] == (600, 30, 12, 526)
g = qtile.c.screens()[2]["gaps"]
assert g["top"] == (0, 480, 500, 30)
assert g["bottom"] == (0, 864, 500, 16)
assert g["right"] == (460, 510, 40, 354)
g = qtile.c.screens()[3]["gaps"]
assert g["top"] == (500, 580, 400, 30)
assert g["left"] == (500, 610, 20, 370)
assert g["right"] == (876, 610, 24, 370)
@fakescreen_config
def test_maximize_with_move_to_screen(qtile):
"""Ensure that maximize respects bars"""
qtile.testXclock()
qtile.c.window.toggle_maximize()
assert qtile.c.window.info()['width'] == 564
assert qtile.c.window.info()['height'] == 456
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 0
assert qtile.c.window.info()['group'] == 'a'
# go to second screen
qtile.c.to_screen(1)
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
assert qtile.c.group.info()['name'] == 'b'
qtile.c.group['a'].toscreen()
assert qtile.c.window.info()['width'] == 288
assert qtile.c.window.info()['height'] == 526
assert qtile.c.window.info()['x'] == 612
assert qtile.c.window.info()['y'] == 30
assert qtile.c.window.info()['group'] == 'a'
@fakescreen_config
def test_float_first_on_second_screen(qtile):
qtile.c.to_screen(1)
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
qtile.testXclock()
# I don't know where y=30, x=12 comes from...
assert qtile.c.window.info()['float_info'] == {
'y': 30, 'x': 12, 'width': 164, 'height': 164
}
qtile.c.window.toggle_floating()
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == 612
assert qtile.c.window.info()['y'] == 30
assert qtile.c.window.info()['group'] == 'b'
assert qtile.c.window.info()['float_info'] == {
'y': 30, 'x': 12, 'width': 164, 'height': 164
}
@fakescreen_config
def test_float_change_screens(qtile):
# add some eyes, and float clock
qtile.testXeyes()
qtile.testXclock()
qtile.c.window.toggle_floating()
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.group.info()['floating_info']['clients'] == ['xclock']
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
# 16 is given by the left gap width
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 0
assert qtile.c.window.info()['group'] == 'a'
# put on group b
assert qtile.c.screen.info() == {
'y': 0, 'x': 0, 'index': 0, 'width': 600, 'height': 480}
assert qtile.c.group.info()['name'] == 'a'
qtile.c.to_screen(1)
assert qtile.c.group.info()['name'] == 'b'
assert qtile.c.screen.info() == {
'y': 0, 'x': 600, 'index': 1, 'width': 300, 'height': 580}
qtile.c.group['a'].toscreen()
assert qtile.c.group.info()['name'] == 'a'
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.window.info()['name'] | == 'xclock'
# width/height unchanged
assert qtile.c.window.info()['width'] == | 164
assert qtile.c.window.info()['height'] == 164
# x is shifted by 600, y is shifted by 0
assert qtile.c.window.info()['x'] == 616
assert qtile.c.window.info()['y'] == 0
assert qtile.c.window.info()['group'] == 'a'
assert qtile.c.group.info()['floating_info']['clients'] == ['xclock']
# move to screen 3
qtile.c.to_screen(2)
assert qtile.c.screen.info() == {
'y': 480, 'x': 0, 'index': 2, 'width': 500, 'height': 400}
assert qtile.c.group.info()['name'] == 'c'
qtile.c.group['a'].toscreen()
assert qtile.c.group.info()['name'] == 'a'
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.window.info()['name'] == 'xclock'
# width/height unchanged
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
# x is shifted by 0, y is shifted by 480
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 480
# now screen 4 for fun
qtile.c.to_screen(3)
assert qtile.c.screen.info() == {
'y': 580, 'x': 500, 'index': 3, 'width': 400, 'height': 400}
assert qtile.c.group.info()['name'] == 'd'
qtile.c.group['a'].toscreen()
assert qtile.c.group.info()['name'] == 'a'
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.window.info()['name'] == 'xclock'
# width/height unchanged
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
# x is shifted by 500, y is shifted by 580
assert qtile.c.window.info()['x'] == 516
assert qtile.c.window.info()['y'] == 580
# and back to one
qtile.c.to_screen(0)
assert qtile.c.screen.info() == {
'y': 0, 'x': 0, 'index': 0, 'width': 600, 'height': 480}
assert qtile.c.group.info()['name'] == 'b'
qtile.c.group['a'].toscreen()
assert qtile.c.group.info()['name'] == 'a'
assert set(qtile.c.group.info()['windows']) == set(('xeyes', 'xclock'))
assert qtile.c.window.info()['name'] == 'xclock'
# back to the original location
assert qtile.c.window.info()['width'] == 164
assert qtile.c.window.info()['height'] == 164
assert qtile.c.window.info()['x'] == 16
assert qtile.c.window.info()['y'] == 0
@fakescreen_config
def test_flo |
# Copyright (C) 2015 https://github.com/thof
#
# This file is part of decapromolist.
#
# decapromolist is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import urllib2
from lxml import html
from utils import Utils
class GetSubcategories:
def getCategories2(self):
dataCat = []
headers = {'User-agent': 'Mozilla/5.0'}
req = urllib2.Request('https://www.decathlon.pl/pl/menu-load-sub-categories?categoryId=394904', None, headers)
req = urllib2.urlopen(req)
content = req.read().decode('UTF-8')
response = html.fromstring(content)
for cat in response.xpath('//a'):
url = cat.attrib['href']
start = url.find('-')+1
| subId = url[start:url.find('-', start)]
# subId = cat.attrib['data-sec | ondary-category-id']
subName = cat.text
data = {'subId': int(subId), 'url': Utils.getConfig()['siteURL'] + url, 'subName': subName}
dataCat.append(data)
return dataCat
def getCategories(self):
categories = []
catUrl = []
content = urllib2.urlopen(Utils.getConfig()['siteURL']).read()
response = html.fromstring(content)
for cat in response.xpath('//li/@primarycategoryid'):
if cat not in categories:
categories.append(cat)
for cat in categories:
url = "{}/pl/getSubNavigationMenu?primaryCategoryId={}".format(Utils.getConfig()['siteURL'], cat)
catUrl.append(url)
return catUrl
def getSubcategories(self, catUrl):
dataCat = []
for url in catUrl:
content = urllib2.urlopen(url).read()
jsonData = json.loads(content)
for cat in jsonData['category']['categories']:
for subcat in cat['categories']:
data = {'id': int(cat['id']), 'name': cat['label'], 'subId': int(subcat['id']),
'subName': subcat['label'], 'url': Utils.getConfig()['siteURL'] + subcat['uri']}
dataCat.append(data)
return dataCat
@staticmethod
def getThirdLevelCat(catUrl):
dataCat = []
for url in catUrl:
content = urllib2.urlopen(url).read()
jsonData = json.loads(content)
for cat in jsonData['category']['categories']:
data = {'id': int(jsonData['category']['id']), 'name': jsonData['category']['label'],
'subId': int(cat['id']), 'subName': cat['label']}
if cat['uri'].find(Utils.getConfig()['siteURL']) == -1:
data['url'] = Utils.getConfig()['siteURL'] + cat['uri']
else:
data['url'] = cat['uri']
data['subId'] = int(cat['uri'][cat['uri'].find("C-")+2:cat['uri'].find("-", cat['uri'].find("C-")+2)])
dataCat.append(data)
return dataCat
def saveSubcategories(self, dataCat):
Utils.renameFile(Utils.getConfig()['subcatFile'])
Utils.saveJsonFile(Utils.getConfig()['subcatFile'], dataCat)
if __name__ == "__main__":
proc = GetSubcategories()
# catUrl = proc.getCategories()
# dataCat = proc.getSubcategories(catUrl)
dataCat = proc.getCategories2()
proc.saveSubcategories(dataCat)
print "Done"
|
f | rom django.db.backends import BaseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
curs | or.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
|
"""
2x2 plotting anal | ysis for 2 datasets pluggin
| ------------- | ------------- |
| contours from | pcolor from |
| both datasets | dataset 1 | |
| ------------- | ------------- |
| pcolor diff | pcolor from |
| both datasets | dataset 2 |
| ------------- | ------------- |
colorbar location = bottom
"""
class SampleException(Exception):
pass
_NDATASETS = 2
_NPANNELS = 4
def run(cases, compares, domain, **kwargs):
"""plugin run function"""
case_names = cases.keys()
compare_names = compares.keys()
dsets = cases.values()+compares.values()
if len(dsets) != _NDATASETS:
raise SampleException('Incorrect number of datasets provided')
# get_monthly_means(*dsets)
# get_seasonal_means()
# get_annual_means()
# get_full_means()
return
def __plot():
return
|
# import logging
from ast.visit import visit as v
from ast.node import Node
from ast.body.methoddeclaration import MethodDeclaration
from ast.stmt.minrepeatstmt import MinrepeatStmt
class Desugar(object):
def __init__(self):
self._cur_mtd = None
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
@v.when(Node)
def visit(self, node):
for c in node.childrenNodes: c.accept(self)
@v.when(MethodDeclaration)
def visit(self, node):
self._cur_mtd = node
for c in node.childrenNodes: c.accept(self)
|
@v.when(MinrepeatStmt)
def visit(self, node):
raise NotImplementedError
# Old impl
# @v.when(Statement)
# def visit(self, node):
# if node.kind == C.S.MINREPEAT:
# b = '\n'.join(map(str, node.b))
# body = u""
# for i in xrange(9): # TODO: parameterize
# body += u" | ""
# if (??) {{ {} }}
# """.format(b)
# logging.debug(
# "desugaring minrepeat @ {}".format(self._cur_mtd.name))
# return to_statements(self._cur_mtd, body)
# return [node]
|
# -*- coding: utf-8 -*-
# Simple script to test sending UTF8 text with the GrowlNotifier class
import loggi | ng
logging.basicConfig(level=logging.DEBUG)
from gntp.notifier import GrowlNotifier
import platform
growl = GrowlNotifier(notifications=['Testing'],password='password',hostname='ayu')
growl.s | ubscribe(platform.node(),platform.node(),12345)
|
from __future__ import absolute_import
from unittest import TestCase, skip
from ..goodman_ccd import get_args, MainApp
class MainAppTest(TestCase):
def setUp(self):
self.main_app = MainApp()
def tes | t___call__(self):
self.assertRaises( | SystemExit, self.main_app)
def test___call___show_version(self):
arguments = ['--version']
args = get_args(arguments=arguments)
self.assertRaises(SystemExit, self.main_app, args)
|
# | !flask/bin/python
from gb import app
app.run(debug=True) | |
_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_schema
short_description: Add or remove PostgreSQL schema from a remote host
description:
- Add or remove PostgreSQL schema from a remote host.
version_added: "2.3"
options:
name:
description:
- Name of the schema to add or remove.
required: true
database:
description:
- Name of the database to connect to.
default: postgres
login_user:
description:
- The username used to authenticate with.
login_password:
description:
- The password used to authenticate with.
login_host:
description:
- Host running the database.
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
owner:
description:
- Name of the role to set as owner of the schema.
port:
description:
- Database port to connect to.
default: 5432
session_role:
version_added: "2.8"
description: |
Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
state:
description:
- The schema state.
default: present
choices: [ "present", "absent" ]
cascade_drop:
description:
- Drop schema with CASCADE to remove child objects
type: bool
default: false
version_added: '2.8'
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection
will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
more information on the modes.
- Default of C(prefer) matches libpq default.
default: prefer
choices: ["disable", "allow", "prefer", "require", "verify-ca", "verify-full"]
version_added: '2.8'
ssl_rootcert:
description:
- Specifies the name of a file containing SSL certificate authority (CA)
certificate(s). If the file exists, the server's certificate will be
verified to be signed by one of these authorities.
version_added: '2.8'
notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed
on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before
using this module.
requirements: [ psycopg2 ]
author:
- Flavien Chantelot (@Dorn-) <contact@flavien.io>
- Thomas O'Donnell (@andytom)
'''
EXAMPLES = '''
# Create a new schema with name "acme"
- postgresql_schema:
name: acme
# Create a new schema "acme" with a user "bob" who will own it
- postgresql_schema:
name: acme
owner: bob
# Drop schema "acme" with cascade
- postgresql_schema:
name: acme
ensure: absent
cascade_drop: yes
'''
RETURN = '''
schema:
description: Name of the schema
returned: success, changed
type: str
sample: "acme"
'''
import traceback
PSYCOPG2_IMP_ERR = None
try:
import psycopg2
import psycopg2.extras
except ImportError:
PSYCOPG2_IMP_ERR = traceback.format_exc()
postgresqldb_found = False
else:
postgresqldb_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, schema, owner):
query = "ALTER SCHEMA %s OWNER TO %s" % (
pg_quote_identifier(schema, 'schema'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
return True
def get_schema_info(cursor, schema):
query = """
SELECT schema_owner AS owner
FROM information_schema.schemata
WHERE schema_name = %(schema)s
"""
cursor.execute(query, {'schema': schema})
return cursor.fetchone()
def schema_exists(cursor, schema):
query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = %(schema)s"
cursor.execute(query, {'schema': schema})
return cursor.rowcount == 1
def schema_delete(cursor, schema, cascade):
if schema_exists(cursor, schema):
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
if cascade:
query += " CASCADE"
cursor.execute(query)
return True
else:
return False
def schema_create(cursor, schema, owner):
if not schema_exists(cursor, schema):
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
if owner:
query_fragments.append('AUTHORIZATION %s' % pg_quote_identifier(owner, 'role'))
query = ' '.join(query_fragments)
cursor.execute(que | ry)
return True
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return set_owner(cursor, schema, owner)
else:
return False
def schema_matches(cursor, schema, owner):
if not schema_exists(cursor, schema):
return False
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
re | turn False
else:
return True
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
port=dict(default="5432"),
schema=dict(required=True, aliases=['name']),
owner=dict(default=""),
database=dict(default="postgres"),
cascade_drop=dict(type="bool", default=False),
state=dict(default="present", choices=["absent", "present"]),
ssl_mode=dict(default='prefer', choices=[
'disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full']),
ssl_rootcert=dict(default=None),
session_role=dict(),
),
supports_check_mode=True
)
if not postgresqldb_found:
module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
schema = module.params["schema"]
owner = module.params["owner"]
state = module.params["state"]
sslrootcert = module.params["ssl_rootcert"]
cascade_drop = module.params["cascade_drop"]
session_role = module.params["session_role"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"database": "database",
"ssl_mode": "sslmode",
"ssl_rootcert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
module.fail_json(
msg='psycopg2 must be at least 2.4.3 in order to user the ss |
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
},
"url": "ws://127.0.0.1:8080/ws"
}
}
]
}
]
}
myapp = """#!/usr/bin/env python
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
class MySession(ApplicationSession):
log = Logger()
def onJoin(self, details):
self.log.info("Loaded the component!")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_start_run_guest(self):
"""
A basic start of a guest.
"""
expected_stdout = [
"Entering reactor event loop", "Loaded the component!"
]
expected_stderr = []
def _check(lc, reactor):
if "Loaded the component!" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
| },
{
"type": "guest",
"executable": sys | .executable,
"arguments": [os.path.join(self.code_location, "myapp.py")]
}
]
}
myapp = """#!/usr/bin/env python
print("Loaded the component!")
"""
self._start_run(config, myapp, expected_stdout, expected_stderr,
_check)
def test_start_utf8_logging(self):
"""
Logging things that are UTF8 but not Unicode should work fine.
"""
expected_stdout = [
"Entering reactor event loop", u"\u2603"
]
expected_stderr = []
def _check(lc, reactor):
if u"\u2603" in self.stdout.getvalue():
lc.stop()
try:
reactor.stop()
except:
pass
config = {
"controller": {
},
"workers": [
{
"type": "router",
"options": {
"pythonpath": ["."]
},
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "*",
"publish": True,
"subscribe": True,
"call": True,
"register": True
}
]
}
]
}
],
"transports": [
{
"type": "web",
"endpoint": {
"type": "tcp",
"port": 8080
},
"paths": {
"/": {
"directory": ".",
"type": "static"
},
"ws": {
"type": "websocket"
}
}
}
]
},
{
"type": "container",
"options": {
"pythonpath": [self.code_location]
},
"components": [
{
"type": "class",
"classname": "myapp.MySession",
"realm": "realm1",
"transport": {
"type": "websocket",
"endpoint": {
"type": "tcp",
"host": "127.0.0.1",
"port": 8080
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import pecan
import wsme
from wsme import types as wtypes
from pecan i | mport rest
class APIBase(wtypes.Base):
created_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is created"""
updated_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is updated"""
def as_dict(self):
"""R | ender this object as a dict of its fields."""
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k) and getattr(self, k) != wsme.Unset)
class CyborgController(rest.RestController):
def _handle_patch(self, method, remainder, request=None):
"""Routes ``PATCH`` _custom_actions."""
# route to a patch_all or get if no additional parts are available
if not remainder or remainder == ['']:
controller = self._find_controller('patch_all', 'patch')
if controller:
return controller, []
pecan.abort(404)
controller = getattr(self, remainder[0], None)
if controller and not inspect.ismethod(controller):
return pecan.routing.lookup_controller(controller, remainder[1:])
# route to custom_action
match = self._handle_custom_action(method, remainder, request)
if match:
return match
# finally, check for the regular patch_one/patch requests
controller = self._find_controller('patch_one', 'patch')
if controller:
return controller, remainder
pecan.abort(405)
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(ker | nel):
result = Intangible()
result.template = "object/draft_schematic/space/chassis/shared_hu | tt_medium_s02.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
# note: we use 127.0.0.1 rather than localhost because on some platforms
# localhost might not be correctly configured as an alias for the loopback
# address. (ahem: Windows)
SERVER_HOST = '127.0.0.1'
def ShouldEnableTernCompleter():
"""Returns whether or not the tern completer is 'installed'. That is whether
or not the tern submodule has a 'node_modules' directory. This is pretty much
the only way we can know if the user added '--tern-completer' on
install or manually ran 'npm install' in the tern submodule directory."""
if not PATH_TO_NODE:
_logger.warning( 'Not using Tern completer: unable to find node' )
return False
_logger.info( 'Using node binary from: ' + PATH_TO_NODE )
installed = os.path.exists( PATH_TO_TERN_BINARY )
if not installed:
_logger.info( 'Not using Tern completer: not installed at ' +
PATH_TO_TERN_BINARY )
return False
return True
def GlobalConfigExists( tern_config ):
"""Returns whether or not the global config file with the supplied path
exists. This method primarily exists to allow testability and simply returns
whether the supplied file exists."""
return os.path.exists( tern_config )
def FindTernProjectFile( starting_directory ):
for folder in utils.PathsToAllParen | tFolders( starting_directory ):
tern_project = os.path.join( folder, '.tern-project' )
if os.path.exists( tern_project ):
return tern_project
# As described here: http://ternjs.net/doc/manual.html#server a global
# .tern-config file is also supported for the Tern server. This can pro | vide
# meaningful defaults (for libs, and possibly also for require paths), so
# don't warn if we find one. The point is that if the user has a .tern-config
# set up, then she has deliberately done so and a ycmd warning is unlikely
# to be anything other than annoying.
tern_config = os.path.expanduser( '~/.tern-config' )
if GlobalConfigExists( tern_config ):
return tern_config
return None
class TernCompleter( Completer ):
"""Completer for JavaScript using tern.js: http://ternjs.net.
The protocol is defined here: http://ternjs.net/doc/manual.html#protocol"""
def __init__( self, user_options ):
super( TernCompleter, self ).__init__( user_options )
self._server_keep_logfiles = user_options[ 'server_keep_logfiles' ]
# Used to ensure that starting/stopping of the server is synchronised
self._server_state_mutex = threading.RLock()
self._do_tern_project_check = False
with self._server_state_mutex:
self._server_stdout = None
self._server_stderr = None
self._Reset()
self._StartServer()
def _WarnIfMissingTernProject( self ):
# The Tern server will operate without a .tern-project file. However, it
# does not operate optimally, and will likely lead to issues reported that
# JavaScript completion is not working properly. So we raise a warning if we
# aren't able to detect some semblance of manual Tern configuration.
# We do this check after the server has started because the server does
# have nonzero use without a project file, however limited. We only do this
# check once, though because the server can only handle one project at a
# time. This doesn't catch opening a file which is not part of the project
# or any of those things, but we can only do so much. We'd like to enhance
# ycmd to handle this better, but that is a FIXME for now.
if self._ServerIsRunning() and self._do_tern_project_check:
self._do_tern_project_check = False
tern_project = FindTernProjectFile( os.getcwd() )
if not tern_project:
_logger.warning( 'No .tern-project file detected: ' + os.getcwd() )
raise RuntimeError( 'Warning: Unable to detect a .tern-project file '
'in the hierarchy before ' + os.getcwd() +
' and no global .tern-config file was found. '
'This is required for accurate JavaScript '
'completion. Please see the User Guide for '
'details.' )
else:
_logger.info( 'Detected .tern-project file at: ' + tern_project )
def _GetServerAddress( self ):
return 'http://' + SERVER_HOST + ':' + str( self._server_port )
def ComputeCandidatesInner( self, request_data ):
query = {
'type': 'completions',
'types': True,
'docs': True,
'filter': False,
'caseInsensitive': True,
'guess': False,
'sort': False,
'includeKeywords': False,
'expandWordForward': False,
'omitObjectPrototype': False
}
completions = self._GetResponse( query,
request_data[ 'start_codepoint' ],
request_data ).get( 'completions', [] )
def BuildDoc( completion ):
doc = completion.get( 'type', 'Unknown type' )
if 'doc' in completion:
doc = doc + '\n' + completion[ 'doc' ]
return doc
return [ responses.BuildCompletionData( completion[ 'name' ],
completion.get( 'type', '?' ),
BuildDoc( completion ) )
for completion in completions ]
def OnFileReadyToParse( self, request_data ):
self._WarnIfMissingTernProject()
# Keep tern server up to date with the file data. We do this by sending an
# empty request just containing the file data
try:
self._PostRequest( {}, request_data )
except:
# The server might not be ready yet or the server might not be running.
# in any case, just ignore this we'll hopefully get another parse request
# soon.
pass
def GetSubcommandsMap( self ):
return {
'RestartServer': ( lambda self, request_data, args:
self._RestartServer() ),
'StopServer': ( lambda self, request_data, args:
self._StopServer() ),
'GoToDefinition': ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoTo': ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToReferences': ( lambda self, request_data, args:
self._GoToReferences( request_data ) ),
'GetType': ( lambda self, request_data, args:
self._GetType( request_data) ),
'GetDoc': ( lambda self, request_data, args:
self._GetDoc( request_data) ),
'RefactorRename': ( lambda self, request_data, args:
self._Rename( request_data, args ) ),
}
def SupportedFiletypes( self ):
return [ 'javascript' ]
def DebugInfo( self, request_data ):
with self._server_state_mutex:
if self._ServerIsRunning():
return ( 'JavaScript completer debug information:\n'
' Tern running at: {0}\n'
' Tern process ID: {1}\n'
' Tern executable: {2}\n'
' Tern logfiles:\n'
' {3}\n'
' {4}'.format( self._GetServerAddress(),
self._server_handle.pid,
PATH_TO_TERN_BINARY,
self._server_stdout,
self._server_stderr ) )
if self._server_stdout and self._server_stderr:
return ( 'JavaScript completer debug information:\n'
' Tern no longer running\n'
' Tern executable: {0}\n'
' Tern logfiles:\n'
' {1}\n'
' {2}\n'.format( PATH_TO_TERN_BINARY,
self._server_stdout,
self._server_stderr ) )
return ( 'JavaScript completer debug information:\n'
' Tern is not running\n'
|
import csv
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from scipy.optimize import curve_fit
def countKey(key,listDataDicts):
outDict = {}
for row in listDataDicts:
try:
outDict[row[key]] += 1
except KeyError:
outDict[row[key]] = 1
return outDict
def avgUse30Days(key, listDataDicts):
totalDays = 0
numberUsers = 0
for person in listDataDicts:
if int(person[key]) < 31 :
totalDays += int(person[key])
numberUsers += 1
return (1.0*totalDays/numberUsers)
def avgUse30DaysWithZeros(key, listDataDicts):
totalDays = 0
numberUsers = 0
for person in listDataDicts:
if ( int(person[key]) < 31 ):
totalDays += int(person[key])
numberUsers += 1
elif ( int(person[key]) == 93 ):
numberUsers += 1
else:
pass
return (1.0*totalDays/numberUsers)
def powerLaw(x,a,b):
return a*(x**(-b))
def expDecay(x,a,b):
return a*np.exp(b*x)
listDataDicts = []
with open('34933-0001-Data.tsv', 'rb') as tsvFile:
tsvReader = csv.DictReader(tsvFile,delimiter='\t')
for row in tsvReader:
listDataDicts.append(row)
ageFirstUseKeys = ['CIGTRY', 'SNUFTRY', 'CHEWTRY', 'CIGARTRY', 'ALCTRY', 'MJAGE', 'COCAGE', 'HERAGE', 'HALLAGE', 'INHAGE', 'ANALAGE', 'TRANAGE', 'STIMAGE', 'SEDAGE']
useLast30Keys = ['CIG30USE','SNF30USE','CHW30USE','CGR30USE','ALCDAYS','MJDAY30A','COCUS30A','HER30USE','HAL30USE','INHDY30A','PRDAYPMO','TRDAYPMO','STDAYPMO','SVDAYPMO']
xdata = []
ydata = []
for person in listDataDicts:
for i in range(len(ageFirstUseKeys)):
if (int(person[ageFirstUseKeys[i]]) < 900) and (int(person[useLast30Keys[i]]) < 31):
xdata.append(int(person[ageFirstUseKeys[i]]))
ydata.append(int(person[useLast30Keys[i]]))
slope,intercept,rValue,pValue,stdErr = stats.linregress(xdata,ydata | )
print "Drug First Use Age vs Usage Frequency Linear Regression"
print "Slope: %f, Intercept: %f, | RSQ-Value: %f, P-Value: %f, Standard Error: %f,\n 95%% Confidence Interval: %f +- %f\n" %(slope,intercept,rValue*rValue,pValue,stdErr, slope, 1.96*stdErr)
'''# Curve fit with a power law
xfit = range(90)
popt1, pcov1 = curve_fit(powerLaw, xdata, ydata)
print "Power Law Curve fit: ",popt1,np.sqrt(np.diag(pcov1)),"\n"
fitLiney1 = np.zeros(len(xfit))
for i in range(len(xfit)):
fitLiney1[i] = powerLaw( xfit[i], popt1[0], popt1[1] )
'''
xdata2 = [ x for x in range(89) ]
ydata2 = [ (x*slope + intercept) for x in range(89) ]
plt.plot(xdata,ydata,'b.',xdata2,ydata2,'r-')
plt.title("Age of First Use vs Usage in the Last 30 Days")
plt.xlabel("Age of First Use")
plt.ylabel("Usage in the Past 30 Days)")
plt.legend(["Data","Linear Fit"])
plt.xlim(0,90)
plt.ylim(0,31)
plt.tight_layout()
plt.show() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.