src
stringlengths 721
1.04M
|
|---|
import logging
import struct
from .utility import get_word
if len(logging._handlerList) == 0:
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s")
LOG = logging.getLogger(__name__)
class PeTools(object):
BITNESS_MAP = {0x14c: 32, 0x8664: 64}
@staticmethod
def mapBinary(binary):
# This is a pretty rough implementation but does the job for now
mapped_binary = bytearray([])
pe_offset = PeTools.getPeOffset(binary)
if pe_offset:
num_sections = 0
bitness = 0
section_infos = []
optional_header_size = 0xF8
if pe_offset and len(binary) >= pe_offset + 0x8:
num_sections = struct.unpack("H", binary[pe_offset + 0x6:pe_offset + 0x8])[0]
bitness = PeTools.getBitness(binary)
if bitness == 64:
optional_header_size = 0x108
if pe_offset and num_sections and len(binary) >= pe_offset + optional_header_size + num_sections * 0x28:
for section_index in range(num_sections):
section_offset = section_index * 0x28
slice_start = pe_offset + optional_header_size + section_offset + 0x8
slice_end = pe_offset + optional_header_size + section_offset + 0x8 + 0x10
virt_size, virt_offset, raw_size, raw_offset = struct.unpack("IIII", binary[slice_start:slice_end])
section_info = {
"section_index": section_index,
"virt_size": virt_size,
"virt_offset": virt_offset,
"raw_size": raw_size,
"raw_offset": raw_offset,
}
section_infos.append(section_info)
max_virt_section_offset = 0
min_raw_section_offset = 0xFFFFFFFF
if section_infos:
for section_info in section_infos:
max_virt_section_offset = max(max_virt_section_offset, section_info["virt_size"] + section_info["virt_offset"])
max_virt_section_offset = max(max_virt_section_offset, section_info["raw_size"] + section_info["virt_offset"])
if section_info["raw_offset"] > 0x200:
min_raw_section_offset = min(min_raw_section_offset, section_info["raw_offset"])
if max_virt_section_offset:
mapped_binary = bytearray([0] * max_virt_section_offset)
mapped_binary[0:min_raw_section_offset] = binary[0:min_raw_section_offset]
for section_info in section_infos:
mapped_binary[section_info["virt_offset"]:section_info["virt_offset"] + section_info["raw_size"]] = binary[section_info["raw_offset"]:section_info["raw_offset"] + section_info["raw_size"]]
LOG.debug("Mapping %d: raw 0x%x (0x%x bytes) -> virtual 0x%x (0x%x bytes)", section_info["section_index"], section_info["raw_offset"], section_info["raw_size"], section_info["virt_offset"], section_info["virt_size"])
LOG.debug("Mapped binary of size %d bytes (%d sections) to memory view of size %d bytes", len(binary), num_sections, len(mapped_binary))
return bytes(mapped_binary)
@staticmethod
def getBitness(binary):
bitness_id = 0
pe_offset = PeTools.getPeOffset(binary)
if pe_offset:
if pe_offset and len(binary) >= pe_offset + 0x6:
bitness_id = struct.unpack("H", binary[pe_offset + 0x4:pe_offset + 0x6])[0]
return PeTools.BITNESS_MAP.get(bitness_id, 0)
@staticmethod
def getBaseAddressFromPeHeader(binary):
pe_offset = PeTools.getPeOffset(binary)
if pe_offset:
if pe_offset and len(binary) >= pe_offset + 0x38:
base_addr = struct.unpack("I", binary[pe_offset + 0x34:pe_offset + 0x38])[0]
LOG.debug("Changing base address from 0 to: 0x%x for inference of reference counts (based on PE header)", base_addr)
return base_addr
return 0
@staticmethod
def getPeOffset(binary):
if len(binary) >= 0x40:
pe_offset = get_word(binary, 0x3c)
return pe_offset
return 0
@staticmethod
def checkPe(binary):
pe_offset = PeTools.getPeOffset(binary)
if pe_offset and len(binary) >= pe_offset + 6:
bitness = get_word(binary, pe_offset + 4)
return bitness in PeTools.BITNESS_MAP
return False
|
from __future__ import unicode_literals, with_statement
import re
import os
import subprocess
from django.utils.encoding import smart_str
from django.core.files.temp import NamedTemporaryFile
from sorl.thumbnail.base import EXTENSIONS
from sorl.thumbnail.compat import b
from sorl.thumbnail.conf import settings
from sorl.thumbnail.engines.base import EngineBase
from sorl.thumbnail.compat import OrderedDict
size_re = re.compile(r'^(?:.+) (?:[A-Z]+) (?P<x>\d+)x(?P<y>\d+)')
class Engine(EngineBase):
"""
Image object is a dict with source path, options and size
"""
def write(self, image, options, thumbnail):
"""
Writes the thumbnail image
"""
if options['format'] == 'JPEG' and options.get(
'progressive', settings.THUMBNAIL_PROGRESSIVE):
image['options']['interlace'] = 'line'
image['options']['quality'] = options['quality']
args = settings.THUMBNAIL_CONVERT.split(' ')
args.append(image['source'] + '[0]')
for k in image['options']:
v = image['options'][k]
args.append('-%s' % k)
if v is not None:
args.append('%s' % v)
flatten = "on"
if 'flatten' in options:
flatten = options['flatten']
if settings.THUMBNAIL_FLATTEN and not flatten == "off":
args.append('-flatten')
suffix = '.%s' % EXTENSIONS[options['format']]
with NamedTemporaryFile(suffix=suffix, mode='rb') as fp:
args.append(fp.name)
args = map(smart_str, args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
out, err = p.communicate()
if err:
raise Exception(err)
thumbnail.write(fp.read())
def cleanup(self, image):
os.remove(image['source']) # we should not need this now
def get_image(self, source):
"""
Returns the backend image objects from a ImageFile instance
"""
with NamedTemporaryFile(mode='wb', delete=False) as fp:
fp.write(source.read())
return {'source': fp.name, 'options': OrderedDict(), 'size': None}
def get_image_size(self, image):
"""
Returns the image width and height as a tuple
"""
if image['size'] is None:
args = settings.THUMBNAIL_IDENTIFY.split(' ')
args.append(image['source'])
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
m = size_re.match(str(p.stdout.read()))
image['size'] = int(m.group('x')), int(m.group('y'))
return image['size']
def is_valid_image(self, raw_data):
"""
This is not very good for imagemagick because it will say anything is
valid that it can use as input.
"""
with NamedTemporaryFile(mode='wb') as fp:
fp.write(raw_data)
fp.flush()
args = settings.THUMBNAIL_IDENTIFY.split(' ')
args.append(fp.name)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retcode = p.wait()
return retcode == 0
def _orientation(self, image):
# return image
# XXX need to get the dimensions right after a transpose.
if settings.THUMBNAIL_CONVERT.endswith('gm convert'):
args = settings.THUMBNAIL_IDENTIFY.split()
args.extend(['-format', '%[exif:orientation]', image['source']])
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
result = p.stdout.read().strip()
if result and result != b('unknown'):
result = int(result)
options = image['options']
if result == 2:
options['flop'] = None
elif result == 3:
options['rotate'] = '180'
elif result == 4:
options['flip'] = None
elif result == 5:
options['rotate'] = '90'
options['flop'] = None
elif result == 6:
options['rotate'] = '90'
elif result == 7:
options['rotate'] = '-90'
options['flop'] = None
elif result == 8:
options['rotate'] = '-90'
else:
# ImageMagick also corrects the orientation exif data for
# destination
image['options']['auto-orient'] = None
return image
def _colorspace(self, image, colorspace):
"""
`Valid colorspaces
<http://www.graphicsmagick.org/GraphicsMagick.html#details-colorspace>`_.
Backends need to implement the following::
RGB, GRAY
"""
image['options']['colorspace'] = colorspace
return image
def _crop(self, image, width, height, x_offset, y_offset):
"""
Crops the image
"""
image['options']['crop'] = '%sx%s+%s+%s' % (width, height, x_offset, y_offset)
image['size'] = (width, height) # update image size
return image
def _scale(self, image, width, height):
"""
Does the resizing of the image
"""
image['options']['scale'] = '%sx%s!' % (width, height)
image['size'] = (width, height) # update image size
return image
def _padding(self, image, geometry, options):
"""
Pads the image
"""
# The order is important. The gravity option should come before extent.
image['options']['background'] = options.get('padding_color')
image['options']['gravity'] = 'center'
image['options']['extent'] = '%sx%s' % (geometry[0], geometry[1])
return image
|
import shutil
from typing import List, Tuple, Optional
import attr
import click
from furl import furl # type: ignore
from pyffdl.__version__ import __version__
from pyffdl.sites import (
AdultFanFictionStory,
ArchiveOfOurOwnStory,
FanFictionNetStory,
HTMLStory,
TwistingTheHellmouthStory,
TGStorytimeStory,
)
from pyffdl.utilities import get_url_from_file, list2text
AVAILABLE_SITES = {
"fanfiction.net": FanFictionNetStory,
"fictionpress.com": FanFictionNetStory,
"adult-fanfiction.org": AdultFanFictionStory,
"archiveofourown.org": ArchiveOfOurOwnStory,
"tthfanfic.org": TwistingTheHellmouthStory,
"tgstorytime.com": TGStorytimeStory,
}
@attr.s()
class URL:
url: furl = attr.ib()
file: Optional[str] = attr.ib(default=None)
def download(urls: List[URL], verbose: bool = False, force: bool = False) -> None:
for url in urls:
if not url.url:
continue
try:
host = ".".join(url.url.host.split(".")[-2:])
site = AVAILABLE_SITES.get(host)
if not site:
click.echo(
f"{__file__} is currently only able to download from {list2text(list(AVAILABLE_SITES.keys()))}."
)
return
story = site.parse(url.url, verbose, force)
if url.file:
story.filename = url.file
story.run()
except AttributeError as e:
raise e
# print(e)
# error = "There were problems with parsing the URL."
# with open("pyffdl.log", "a") as fp:
# click.echo(error, file=fp)
# click.echo(error, err=True)
@click.group()
@click.version_option(version=__version__)
def cli() -> None:
pass
@cli.command( # noqa: unused-function
"download", help="Download a new fanfiction story."
)
@click.option(
"-f",
"--from",
"from_file",
type=click.File(),
help="Load a list of URLs from a plaintext file.",
)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("url_list", nargs=-1)
def cli_download(
from_file: click.File, url_list: Tuple[str, ...], verbose: bool = False
) -> None:
urls = [URL(furl(x)) for x in url_list]
if from_file:
urls += [
URL(furl(x.strip("\n"))) for x in from_file.readlines() if not x.startswith("#")
]
download(urls, verbose)
@cli.command( # noqa: unused-function
"html", help="Download a single story, using a list of chapter URLs."
)
@click.option(
"-f",
"--from",
"from_file",
type=click.File(),
help="Load a list of URLs from a plaintext file.",
)
@click.option("-a", "--author", help="Name of the author", type=str, required=True)
@click.option("-t", "--title", help="Title of the story", type=str, required=True)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("url_list", nargs=-1)
def cli_html(
from_file: click.File,
author: str,
title: str,
url_list: Tuple[str, ...],
verbose: bool = False,
):
urls = [URL(furl(x)) for x in url_list]
if from_file:
urls += [
URL(furl(x.strip("\n"))) for x in from_file.readlines() if not x.startswith("#")
]
if not urls:
click.echo("You must provide at least one URL to download.")
return
story = HTMLStory(
chapters=[x.url.tostr() for x in urls],
author=author,
title=title,
url=furl("http://httpbin.org/status/200"),
)
story.verbose = verbose
story.run()
@cli.command( # noqa: unused-function
"update", help="Update an existing .epub fanfiction file."
)
@click.option(
"-f",
"--force",
is_flag=True,
default=False,
help="Completely refresh the ebook file.",
)
@click.option(
"-b", "--backup", is_flag=True, default=False, help="Backup the original file."
)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("filenames", type=click.Path(dir_okay=False, exists=True), nargs=-1)
def cli_update(
force: bool, backup: bool, filenames: List[click.Path], verbose: bool = False
) -> None:
if backup:
for filename in filenames:
shutil.copy(f"{filename}", f"{filename}.bck")
stories = [
URL(get_url_from_file(x), str(x) if not force else None) for x in filenames
]
download(stories, verbose, force)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from sure import scenario
from pyeqs import QuerySet
from tests.helpers import prepare_data, cleanup_data, add_document
@scenario(prepare_data, cleanup_data)
def test_simple_search_with_host_string(context):
"""
Connect with host string
"""
# When create a queryset
t = QuerySet("localhost", index="foo")
# And there are records
add_document("foo", {"bar": "baz"})
# And I do a search
results = t[0:1]
# Then I get a the expected results
len(results).should.equal(1)
results[0]['_source'].should.equal({"bar": "baz"})
@scenario(prepare_data, cleanup_data)
def test_simple_search_with_host_dict(context):
"""
Connect with host dict
"""
# When create a queryset
connection_info = {"host": "localhost", "port": 9200}
t = QuerySet(connection_info, index="foo")
# And there are records
add_document("foo", {"bar": "baz"})
# And I do a search
results = t[0:1]
# Then I get a the expected results
len(results).should.equal(1)
results[0]['_source'].should.equal({"bar": "baz"})
@scenario(prepare_data, cleanup_data)
def test_simple_search_with_host_list(context):
"""
Connect with host list
"""
# When create a queryset
connection_info = [{"host": "localhost", "port": 9200}]
t = QuerySet(connection_info, index="foo")
# And there are records
add_document("foo", {"bar": "baz"})
# And I do a search
results = t[0:1]
# Then I get a the expected results
len(results).should.equal(1)
results[0]['_source'].should.equal({"bar": "baz"})
|
# encoding: utf-8
from woo.dem import *
from woo.fem import *
import woo.core, woo.dem, woo.pyderived, woo.models, woo.config
import math
from minieigen import *
class CylDepot(woo.core.Preprocessor,woo.pyderived.PyWooObject):
''''Deposition of particles inside cylindrical tube. This preprocessor was created for pre-generation of dense packing inside cylindrical chamber, which is then exported to STL serving as input to `OpenFOAM <http://openfoam.org>`__ for computing permeability (pressure loss) of the layering.
The target specimen (particle packing) dimensions are :obj:`htDiam` (cylinder height and diameter); the packing is inside cylindrical wall, with inlet at the bottom and outlet at the top; this cylinder may be longer than the specimen, which is set by :obj:`extraHt`.
The height of compact packing is not known in advance precisely (it is a function of PSD, material, layering etc); the estimate is set by :obj:`relSettle`, which is used to compute :obj:`ht0`, the height of loose packing (where particles are initially generated), which then settles down in gravity.
The packing is settled when :obj:`unbalanced energy <woo.utils.unbalancedEnergy>` (the ratio of kinetic to elastic energy) drops below :obj:`unbE`.
Once settled, particles are clipped to the required height. The actual value of settlement is computed and printed (so that it can be used iteratively as input for the next simulation).
Layering specification uses the functionality of :obj:`woo.dem.LayeredAxialBias`, which distributed fractions along some axis in particle generation space. Suppose that we are to model the following two scenarios (which correspond to :obj:`preCooked` variants ``Brisbane 1`` and ``Brisbane 2``, which have spherical particles with piecewise-linear PSD distributed in layered fractions:
.. image:: fig/depot-brisbane.*
This arrangement is achieved with the following settings:
1. ``Brisbane 1``:
PSD is defined (via :obj:`PsdSphereGenerator.psdPts <woo.dem.PsdSphereGenerator.psdPts>`) to match relative height (and thus mass) of fractions (on the right), which is 0.1777 for the coarser fraction (12.5-20mm) and 0.8222 for the finer fraction. This we set by::
psdPts=[(6.3e-3,0),(12.5e-3,.82222),(20e-3,1)]
Layering is achieved by assigning :obj:`~woo.dem.LayeredAxialBias` to :obj:`bias`. The layers are distributed along the normalized height by setting :obj:`~woo.dem.LayeredAxialBias.layerSpec` to::
[VectorX([12.5e-3,20e-3,0,.1777]),VectorX([0,12.5e-3,.1777,1]
where each ``VectorX`` contains first minimum and maximum diameter, and at least one axial height range (in normalized coordinates).
2. ``Brisbane 2``:
PSD: set :obj:`~woo.dem.PsdSphereGenerator.psdPts` to::
psdPts=[(6.3e-3,0),(12.5e-3,.4111),(20e-3,1)]
Layering (:obj:`~woo.dem.LayeredAxialBias.layerSpec`) is set as::
layerSpec=[VectorX([12.5e-3,20e-3, 0,.1777,.5888,1]),VectorX([0,12.5e-3, .177777,.58888]
where the coarse fraction is distributed uniformly over both intervals in 0-0.1777 *and* 0.5888-1.0.
Resulting heights of fractions vitally depend on :obj:`relSettlement`, so it may take some experimentation to get the result right:
.. image:: fig/depot-brisbane-3d.png
'''
_classTraits=None
_PAT=woo.pyderived.PyAttrTrait # less typing
#defaultPsd=[(.007,0),(.01,.4),(.012,.7),(.02,1)]
defaultPsd=[(5e-3,.0),(6.3e-3,.12),(8e-3,.53),(10e-3,.8),(12.5e-3,.94),(20e-3,1)]
def postLoad(self,I):
if self.preCooked and (I==None or I==id(self.preCooked)):
print 'Applying pre-cooked configuration "%s".'%self.preCooked
if self.preCooked=='Brisbane 1':
self.gen=woo.dem.PsdSphereGenerator(psdPts=[(6.3e-3,0),(12.5e-3,.82222),(20e-3,1)],discrete=False)
self.bias=woo.dem.LayeredAxialBias(axis=2,fuzz=0,layerSpec=[VectorX([12.5e-3,1,0,.177777]),VectorX([0,12.5e-3,.177777,1])])
self.relSettle=.38
elif self.preCooked=='Brisbane 2':
self.gen=woo.dem.PsdSphereGenerator(psdPts=[(6.3e-3,0),(12.5e-3,.41111),(20e-3,1)],discrete=False)
self.bias=woo.dem.LayeredAxialBias(axis=2,fuzz=0,layerSpec=[VectorX([12.5e-3,1,0,.177777,.58888,1]),VectorX([0,12.5e-3,.177777,.58888])])
self.relSettle=.37
else: raise RuntimeError('Unknown precooked configuration "%s"'%self.preCooked)
self.preCooked=''
self.ht0=self.htDiam[0]/self.relSettle
# if I==id(self.estSettle):
_attrTraits=[
_PAT(str,'preCooked','',noDump=True,noGui=False,startGroup='General',choice=['','Brisbane 1','Brisbane 2'],triggerPostLoad=True,doc='Apply pre-cooked configuration (i.e. change other parameters); this option is not saved.'),
_PAT(Vector2,'htDiam',(.45,.1),unit='m',doc='Height and diameter of the resulting cylinder; the initial cylinder has the height of :obj:`ht0`, and particles are, after stabilization, clipped to :obj:`htDiam`, the resulting height.'),
_PAT(float,'relSettle',.3,triggerPostLoad=True,doc='Estimated relative height after deposition (e.g. 0.4 means that the sample will settle around 0.4 times the original height). This value has to be guessed, as there is no exact relation to predict the amount of settling; 0.3 is a good initial guess, but it may depend on the PSD.'),
_PAT(float,'ht0',.9,guiReadonly=True,noDump=True,doc='Initial height (for loose sample), computed automatically from :obj:`relSettle` and :obj:`htDiam`.'),
_PAT(woo.dem.ParticleGenerator,'gen',woo.dem.PsdSphereGenerator(psdPts=defaultPsd,discrete=False),'Object for particle generation'),
_PAT(woo.dem.SpatialBias,'bias',woo.dem.PsdAxialBias(psdPts=defaultPsd,axis=2,fuzz=.1,discrete=True),doc='Uneven distribution of particles in space, depending on their radius. Use axis=2 for altering the distribution along the cylinder axis.'),
_PAT(woo.models.ContactModelSelector,'model',woo.models.ContactModelSelector(name='linear',damping=.4,numMat=(1,1),matDesc=['everything'],mats=[woo.dem.FrictMat(density=2e3,young=2e5,tanPhi=0)]),doc='Contact model and materials.'),
_PAT(int,'cylDiv',40,'Fineness of cylinder division'),
_PAT(float,'unbE',0.005,':obj:`Unbalanced energy <woo._utils2.unbalancedEnergy>` as criterion to consider the particles settled.'),
# STL output
_PAT(str,'stlOut','',startGroup='STL output',filename=True,doc='Output file with triangulated particles (not the boundary); if empty, nothing will be exported at the end.'),
_PAT(float,'stlTol',.2e-3,unit='m',doc='Tolerance for STL export (maximum distance between ideal shape and triangulation; passed to :obj:`_triangulated.spheroidsToStl`)'),
_PAT(Vector2,'extraHt',(.5,.5),unit='m',doc='Extra height to be added to bottom and top of the resulting packing, when the new STL-exported cylinder is created.'),
_PAT(float,'cylAxDiv',-1.,'Fineness of division of the STL cylinder; see :obj:`woo.triangulated.cylinder` ``axDiv``. The defaults create nearly-square triangulation'),
]
def __init__(self,**kw):
woo.core.Preprocessor.__init__(self)
self.wooPyInit(self.__class__,woo.core.Preprocessor,**kw)
def __call__(self):
pre=self
self.postLoad(None) # ensure consistency
mat=pre.model.mats[0]
S=woo.core.Scene(
pre=self.deepcopy(),
trackEnergy=True, # for unbalanced energy tracking
dtSafety=.9,
fields=[
DemField(
gravity=(0,0,-10),
par=woo.triangulated.cylinder(Vector3(0,0,0),Vector3(0,0,pre.ht0),radius=pre.htDiam[1]/2.,div=pre.cylDiv,capA=True,capB=False,wallCaps=True,mat=mat)
)
],
engines=DemField.minimalEngines(model=pre.model)+[
woo.dem.CylinderInlet(
node=woo.core.Node((0,0,0),Quaternion((0,1,0),-math.pi/2)),
height=pre.ht0,
radius=pre.htDiam[1]/2.,
generator=pre.gen,
spatialBias=pre.bias,
maxMass=-1,maxNum=-1,massRate=0,maxAttempts=2000,materials=pre.model.mats,glColor=float('nan'),
nDo=1 # place all particles at once, then let settle it all down
),
woo.core.PyRunner(100,'import woo.pre.depot; S.pre.checkProgress(S)'),
],
)
if 'opengl' in woo.config.features: S.gl.demField.colorBy='radius'
return S
def checkProgress(self,S):
u=woo.utils.unbalancedEnergy(S)
print("unbalanced E: %g/%g"%(u,S.pre.unbE))
if not u<S.pre.unbE: return
r,h=S.pre.htDiam[1]/2.,S.pre.htDiam[0]
# check how much was the settlement
zz=woo.utils.contactCoordQuantiles(S.dem,[.999])
print 'Compaction done, settlement from %g (loose) to %g (dense); rel. %g, relSettle was %g.'%(S.pre.ht0,zz[0],zz[0]/S.pre.ht0,S.pre.relSettle)
# delete everything abot; run this engine just once, explicitly
woo.dem.BoxOutlet(box=((-r,-r,0),(r,r,h)))(S,S.dem)
S.stop()
# delete the triangulated cylinder
for p in S.dem.par:
if isinstance(p.shape,Facet): S.dem.par.remove(p.id)
# create a new (CFD-suitable) cylinder
# bits for marking the mesh parts
S.lab.cylBits=8,16,32
cylMasks=[DemField.defaultBoundaryMask | b for b in S.lab.cylBits]
S.dem.par.add(woo.triangulated.cylinder(Vector3(0,0,-S.pre.extraHt[0]),Vector3(0,0,S.pre.htDiam[0]+S.pre.extraHt[1]),radius=S.pre.htDiam[1]/2.,div=S.pre.cylDiv,axDiv=S.pre.cylAxDiv,capA=True,capB=True,wallCaps=False,masks=cylMasks,mat=S.pre.model.mats[0]))
if S.pre.stlOut:
n=woo.triangulated.spheroidsToSTL(S.pre.stlOut,S.dem,tol=S.pre.stlTol,solid="particles")
n+=woo.triangulated.facetsToSTL(S.pre.stlOut,S.dem,append=True,mask=S.lab.cylBits[0],solid="lateral")
n+=woo.triangulated.facetsToSTL(S.pre.stlOut,S.dem,append=True,mask=S.lab.cylBits[1],solid="bottom")
n+=woo.triangulated.facetsToSTL(S.pre.stlOut,S.dem,append=True,mask=S.lab.cylBits[2],solid="top")
print 'Exported %d facets to %s'%(n,S.pre.stlOut)
else:
print 'Not running STL export (stlOut empty)'
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function to build box predictor from configuration."""
import collections
import tensorflow.compat.v1 as tf
from object_detection.predictors import convolutional_box_predictor
from object_detection.predictors import convolutional_keras_box_predictor
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.predictors import mask_rcnn_keras_box_predictor
from object_detection.predictors import rfcn_box_predictor
from object_detection.predictors import rfcn_keras_box_predictor
from object_detection.predictors.heads import box_head
from object_detection.predictors.heads import class_head
from object_detection.predictors.heads import keras_box_head
from object_detection.predictors.heads import keras_class_head
from object_detection.predictors.heads import keras_mask_head
from object_detection.predictors.heads import mask_head
from object_detection.protos import box_predictor_pb2
def build_convolutional_box_predictor(is_training,
num_classes,
conv_hyperparams_fn,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
apply_sigmoid_to_scores=False,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None):
"""Builds the ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: If True, apply the sigmoid on the output
class_predictions.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: Constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
Returns:
A ConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
apply_sigmoid_to_scores=apply_sigmoid_to_scores,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise)
other_heads = {}
return convolutional_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth)
def build_convolutional_keras_box_predictor(is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None,
name='BoxPredictor'):
"""Builds the Keras ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
Returns:
A Keras ConvolutionalBoxPredictor class.
"""
box_prediction_heads = []
class_prediction_heads = []
other_heads = {}
for stack_index, num_predictions_per_location in enumerate(
num_predictions_per_location_list):
box_prediction_heads.append(
keras_box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range,
name='ConvolutionalBoxHead_%d' % stack_index))
class_prediction_heads.append(
keras_class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
name='ConvolutionalClassHead_%d' % stack_index))
return convolutional_keras_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_heads=box_prediction_heads,
class_prediction_heads=class_prediction_heads,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
name=name)
def build_weight_shared_convolutional_box_predictor(
is_training,
num_classes,
conv_hyperparams_fn,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
keyword_args=None):
"""Builds and returns a WeightSharedConvolutionalBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
keyword_args: A dictionary with additional args.
Returns:
A WeightSharedConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = (
class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
kernel_size=kernel_size,
class_prediction_bias_init=class_prediction_bias_init,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
use_depthwise=use_depthwise,
score_converter_fn=score_converter_fn))
other_heads = {}
return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise)
def build_weight_shared_convolutional_keras_box_predictor(
is_training,
num_classes,
conv_hyperparams,
freeze_batchnorm,
inplace_batchnorm_update,
num_predictions_per_location_list,
depth,
num_layers_before_predictor,
box_code_size,
kernel_size=3,
add_background_class=True,
class_prediction_bias_init=0.0,
use_dropout=False,
dropout_keep_prob=0.8,
share_prediction_tower=False,
apply_batch_norm=True,
use_depthwise=False,
apply_conv_hyperparams_to_heads=False,
apply_conv_hyperparams_pointwise=False,
score_converter_fn=tf.identity,
box_encodings_clip_range=None,
name='WeightSharedConvolutionalBoxPredictor',
keyword_args=None):
"""Builds the Keras WeightSharedConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
depth: depth of conv layers.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
use_dropout: Whether to apply dropout to class prediction head.
dropout_keep_prob: Probability of keeping activiations.
share_prediction_tower: Whether to share the multi-layer tower between box
prediction and class prediction heads.
apply_batch_norm: Whether to apply batch normalization to conv layers in
this predictor.
use_depthwise: Whether to use depthwise separable conv2d instead of conv2d.
apply_conv_hyperparams_to_heads: Whether to apply conv_hyperparams to
depthwise seperable convolution layers in the box and class heads. By
default, the conv_hyperparams are only applied to layers in the predictor
tower when using depthwise separable convolutions.
apply_conv_hyperparams_pointwise: Whether to apply the conv_hyperparams to
the pointwise_initializer and pointwise_regularizer when using depthwise
separable convolutions. By default, conv_hyperparams are only applied to
the depthwise initializer and regularizer when use_depthwise is true.
score_converter_fn: Callable score converter to perform elementwise op on
class scores.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
name: A string name scope to assign to the box predictor. If `None`, Keras
will auto-generate one from the class name.
keyword_args: A dictionary with additional args.
Returns:
A Keras WeightSharedConvolutionalBoxPredictor class.
"""
if len(set(num_predictions_per_location_list)) > 1:
raise ValueError('num predictions per location must be same for all'
'feature maps, found: {}'.format(
num_predictions_per_location_list))
num_predictions_per_location = num_predictions_per_location_list[0]
box_prediction_head = keras_box_head.WeightSharedConvolutionalBoxHead(
box_code_size=box_code_size,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
use_depthwise=use_depthwise,
apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads,
box_encodings_clip_range=box_encodings_clip_range,
name='WeightSharedConvolutionalBoxHead')
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=(
num_classes + 1 if add_background_class else num_classes),
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=num_predictions_per_location,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise,
apply_conv_hyperparams_to_heads=apply_conv_hyperparams_to_heads,
score_converter_fn=score_converter_fn,
name='WeightSharedConvolutionalClassHead')
other_heads = {}
return (
convolutional_keras_box_predictor.WeightSharedConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams=conv_hyperparams,
depth=depth,
num_layers_before_predictor=num_layers_before_predictor,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
kernel_size=kernel_size,
apply_batch_norm=apply_batch_norm,
share_prediction_tower=share_prediction_tower,
use_depthwise=use_depthwise,
apply_conv_hyperparams_pointwise=apply_conv_hyperparams_pointwise,
name=name))
def build_mask_rcnn_keras_box_predictor(is_training,
num_classes,
fc_hyperparams,
freeze_batchnorm,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNKerasBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for fully connected dense ops.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNKerasBoxPredictor class.
"""
box_prediction_head = keras_box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = keras_mask_head.MaskRCNNMaskHead(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_keras_box_predictor.MaskRCNNKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
freeze_batchnorm=freeze_batchnorm,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_mask_rcnn_box_predictor(is_training,
num_classes,
fc_hyperparams_fn,
use_dropout,
dropout_keep_prob,
box_code_size,
add_background_class=True,
share_box_across_classes=False,
predict_instance_masks=False,
conv_hyperparams_fn=None,
mask_height=14,
mask_width=14,
mask_prediction_num_conv_layers=2,
mask_prediction_conv_depth=256,
masks_are_class_agnostic=False,
convolve_then_upsample_masks=False):
"""Builds and returns a MaskRCNNBoxPredictor class.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for fully connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
add_background_class: Whether to add an implicit background class.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
predict_instance_masks: If True, will add a third stage mask prediction
to the returned class.
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_num_conv_layers: Number of convolution layers applied to
the image_features in mask prediction branch.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediction branch. If set
to 0, the depth of the convolution layers will be automatically chosen
based on the number of object classes and the number of channels in the
image features.
masks_are_class_agnostic: Boolean determining if the mask-head is
class-agnostic or not.
convolve_then_upsample_masks: Whether to apply convolutions on mask
features before upsampling using nearest neighbor resizing. Otherwise,
mask features are resized to [`mask_height`, `mask_width`] using
bilinear resizing before applying convolutions.
Returns:
A MaskRCNNBoxPredictor class.
"""
box_prediction_head = box_head.MaskRCNNBoxHead(
is_training=is_training,
num_classes=num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
box_code_size=box_code_size,
share_box_across_classes=share_box_across_classes)
class_prediction_head = class_head.MaskRCNNClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob)
third_stage_heads = {}
if predict_instance_masks:
third_stage_heads[
mask_rcnn_box_predictor.
MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead(
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=mask_height,
mask_width=mask_width,
mask_prediction_num_conv_layers=mask_prediction_num_conv_layers,
mask_prediction_conv_depth=mask_prediction_conv_depth,
masks_are_class_agnostic=masks_are_class_agnostic,
convolve_then_upsample=convolve_then_upsample_masks)
return mask_rcnn_box_predictor.MaskRCNNBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
third_stage_heads=third_stage_heads)
def build_score_converter(score_converter_config, is_training):
"""Builds score converter based on the config.
Builds one of [tf.identity, tf.sigmoid] score converters based on the config
and whether the BoxPredictor is for training or inference.
Args:
score_converter_config:
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter.
is_training: Indicates whether the BoxPredictor is in training mode.
Returns:
Callable score converter op.
Raises:
ValueError: On unknown score converter.
"""
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY):
return tf.identity
if score_converter_config == (
box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID):
return tf.identity if is_training else tf.sigmoid
raise ValueError('Unknown score converter.')
BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange',
['min', 'max'])
def build(argscope_fn, box_predictor_config, is_training, num_classes,
add_background_class=True):
"""Builds box predictor based on the configuration.
Builds box predictor based on the configuration. See box_predictor.proto for
configurable options. Also, see box_predictor.py for more details.
Args:
argscope_fn: A function that takes the following inputs:
* hyperparams_pb2.Hyperparams proto
* a boolean indicating if the model is in training mode.
and returns a tf slim argscope for Conv and FC hyperparameters.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.BoxPredictor object.
Raises:
ValueError: On unknown box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams_fn=conv_hyperparams_fn,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams,
is_training)
conv_hyperparams_fn = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams_fn = argscope_fn(
config_box_predictor.conv_hyperparams, is_training)
return build_mask_rcnn_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams_fn=fc_hyperparams_fn,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams_fn=conv_hyperparams_fn,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams,
is_training)
box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams_fn=conv_hyperparams_fn,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof))
def build_keras(hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update,
num_predictions_per_location_list, box_predictor_config,
is_training, num_classes, add_background_class=True):
"""Builds a Keras-based box predictor based on the configuration.
Builds Keras-based box predictor based on the configuration.
See box_predictor.proto for configurable options. Also, see box_predictor.py
for more details.
Args:
hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams
proto and returns a `hyperparams_builder.KerasLayerHyperparams`
for Conv or FC hyperparameters.
freeze_batchnorm: Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
inplace_batchnorm_update: Whether to update batch norm moving average
values inplace. When this is false train op must add a control
dependency on tf.graphkeys.UPDATE_OPS collection in order to update
batch norm statistics.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
box_predictor_config: box_predictor_pb2.BoxPredictor proto containing
configuration.
is_training: Whether the models is in training mode.
num_classes: Number of classes to predict.
add_background_class: Whether to add an implicit background class.
Returns:
box_predictor: box_predictor.KerasBoxPredictor object.
Raises:
ValueError: On unknown box predictor, or one with no Keras box predictor.
"""
if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor):
raise ValueError('box_predictor_config not of type '
'box_predictor_pb2.BoxPredictor.')
box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof')
if box_predictor_oneof == 'convolutional_box_predictor':
config_box_predictor = box_predictor_config.convolutional_box_predictor
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
return build_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
min_depth=config_box_predictor.min_depth,
max_depth=config_box_predictor.max_depth,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_depthwise=config_box_predictor.use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
if box_predictor_oneof == 'weight_shared_convolutional_box_predictor':
config_box_predictor = (
box_predictor_config.weight_shared_convolutional_box_predictor)
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
apply_batch_norm = config_box_predictor.conv_hyperparams.HasField(
'batch_norm')
# During training phase, logits are used to compute the loss. Only apply
# sigmoid at inference to make the inference graph TPU friendly. This is
# required because during TPU inference, model.postprocess is not called.
score_converter_fn = build_score_converter(
config_box_predictor.score_converter, is_training)
# Optionally apply clipping to box encodings, when box_encodings_clip_range
# is set.
box_encodings_clip_range = None
if config_box_predictor.HasField('box_encodings_clip_range'):
box_encodings_clip_range = BoxEncodingsClipRange(
min=config_box_predictor.box_encodings_clip_range.min,
max=config_box_predictor.box_encodings_clip_range.max)
keyword_args = None
return build_weight_shared_convolutional_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=inplace_batchnorm_update,
num_predictions_per_location_list=num_predictions_per_location_list,
depth=config_box_predictor.depth,
num_layers_before_predictor=(
config_box_predictor.num_layers_before_predictor),
box_code_size=config_box_predictor.box_code_size,
kernel_size=config_box_predictor.kernel_size,
add_background_class=add_background_class,
class_prediction_bias_init=(
config_box_predictor.class_prediction_bias_init),
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
share_prediction_tower=config_box_predictor.share_prediction_tower,
apply_batch_norm=apply_batch_norm,
use_depthwise=config_box_predictor.use_depthwise,
apply_conv_hyperparams_to_heads=(
config_box_predictor.apply_conv_hyperparams_to_heads),
apply_conv_hyperparams_pointwise=(
config_box_predictor.apply_conv_hyperparams_pointwise),
score_converter_fn=score_converter_fn,
box_encodings_clip_range=box_encodings_clip_range,
keyword_args=keyword_args)
if box_predictor_oneof == 'mask_rcnn_box_predictor':
config_box_predictor = box_predictor_config.mask_rcnn_box_predictor
fc_hyperparams = hyperparams_fn(config_box_predictor.fc_hyperparams)
conv_hyperparams = None
if config_box_predictor.HasField('conv_hyperparams'):
conv_hyperparams = hyperparams_fn(
config_box_predictor.conv_hyperparams)
return build_mask_rcnn_keras_box_predictor(
is_training=is_training,
num_classes=num_classes,
add_background_class=add_background_class,
fc_hyperparams=fc_hyperparams,
freeze_batchnorm=freeze_batchnorm,
use_dropout=config_box_predictor.use_dropout,
dropout_keep_prob=config_box_predictor.dropout_keep_probability,
box_code_size=config_box_predictor.box_code_size,
share_box_across_classes=(
config_box_predictor.share_box_across_classes),
predict_instance_masks=config_box_predictor.predict_instance_masks,
conv_hyperparams=conv_hyperparams,
mask_height=config_box_predictor.mask_height,
mask_width=config_box_predictor.mask_width,
mask_prediction_num_conv_layers=(
config_box_predictor.mask_prediction_num_conv_layers),
mask_prediction_conv_depth=(
config_box_predictor.mask_prediction_conv_depth),
masks_are_class_agnostic=(
config_box_predictor.masks_are_class_agnostic),
convolve_then_upsample_masks=(
config_box_predictor.convolve_then_upsample_masks))
if box_predictor_oneof == 'rfcn_box_predictor':
config_box_predictor = box_predictor_config.rfcn_box_predictor
conv_hyperparams = hyperparams_fn(config_box_predictor.conv_hyperparams)
box_predictor_object = rfcn_keras_box_predictor.RfcnKerasBoxPredictor(
is_training=is_training,
num_classes=num_classes,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
crop_size=[config_box_predictor.crop_height,
config_box_predictor.crop_width],
num_spatial_bins=[config_box_predictor.num_spatial_bins_height,
config_box_predictor.num_spatial_bins_width],
depth=config_box_predictor.depth,
box_code_size=config_box_predictor.box_code_size)
return box_predictor_object
raise ValueError(
'Unknown box predictor for Keras: {}'.format(box_predictor_oneof))
|
# Copyright (c) 2014, Georgios Is. Detorakis (gdetor@gmail.com) and
# Nicolas P. Rougier (nicolas.rougier@inria.fr)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script illustrated the bivariate plot presented in [1].
import math
import numpy as np
import matplotlib
matplotlib.use('macosx')
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
import matplotlib.patheffects as PathEffects
matplotlib.rc('xtick', direction = 'out')
matplotlib.rc('ytick', direction = 'out')
matplotlib.rc('xtick.major', size = 8, width=1)
matplotlib.rc('xtick.minor', size = 4, width=1)
matplotlib.rc('ytick.major', size = 8, width=1)
matplotlib.rc('ytick.minor', size = 4, width=1)
matplotlib.rc('text', usetex=True )
matplotlib.rc('font', serif='Times')
#indices = [(3, 18) , (26, 18) , (10, 7) , (25, 11) , (3, 21) , (8, 11) , (21, 14) , (20, 16) , (8, 19) , (16, 5) , (0, 9) , (17, 15) , (7, 20) , (20, 0) , (27, 19) , (4, 24) ]
indices = [(10, 21) , (29, 16) , (28, 14) , (20, 17) , (13, 19) , (3, 15) , (23, 18) , (0, 18) , (8, 31) , (16, 11) , (0, 20) , (24, 13) , (11, 2) , (1, 1) , (19, 20) , (2, 21)]
if __name__=='__main__':
Z = np.load('areas-ref.npy')
X, Y = Z[:,0], Z[:,1]
fig = plt.figure(figsize=(8,8), facecolor="white")
ax = plt.subplot(1,1,1,aspect=1)
plt.scatter(X+0.01,Y+0.01,s=3, edgecolor='k', facecolor='k')
# Show some points
I = [a*32+b for (a,b) in indices]
# I = [3,143,149,189,1,209,192,167,64,87,10,40,68,185,61,198]
plt.scatter(X[I],Y[I],s=5,color='k')
for i in range(len(I)):
x,y = X[i],Y[i]
letter = ord('A')+i
plt.scatter(X[I[i]], Y[I[i]], s=40, facecolor='None', edgecolor='k')
# label = plt.annotate(" %c" % (chr(letter)), (x+.25,y+.25), weight='bold', fontsize=16,
# path_effects=[PathEffects.withStroke(linewidth=2, foreground="w", alpha=.75)])
plt.annotate(" %c" % (chr(ord('A')+i)), (X[I[i]]+.25,Y[I[i]]+.25), weight='bold')
# Select some points by cliking them
# letter = ord('A')
# def onclick(event):
# global letter
# #print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
# # event.button, event.x, event.y, event.xdata, event.ydata)
# C = (X-event.xdata)**2 + (Y-event.ydata)**2
# I = np.argmin(C)
# plt.ion()
# x,y = X[I],Y[I]
# # print x, y, I, np.unravel_index(I,(32,32))
# print np.unravel_index(I,(32,32)), ",",
# plt.scatter(x, y, s=40, facecolor='None', edgecolor='k')
# label = plt.annotate(" %c" % (chr(letter)), (x+.25,y+.25), weight='bold', fontsize=16,
# path_effects=[PathEffects.withStroke(linewidth=2, foreground="w", alpha=.75)])
# #label.set_bbox(dict(facecolor='white', edgecolor='None', alpha=0.65 ))
# plt.ioff()
# letter = letter+1
# cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.xlabel(r'Excitatory area (mm2)')
plt.ylabel(r'Inhibitory area (mm2')
plt.xscale('log')
plt.yscale('log')
plt.xticks([5,10,30], ['5','10','30'])
plt.yticks([5,10,30], ['5','10','30'])
plt.xlim(5,30)
plt.ylim(5,30)
plt.text(5.5,26, "n = 1024")
plt.plot([1,100],[1,100], ls='--', color='k')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.savefig('bivariate.pdf', dpi=72)
plt.show()
|
#!/usr/bin/env python
# -*- encoding: utf-8; indent-tabs-mode: nil -*-
"""
crawler
~~~~~~~
desc
:copyright: (c) 2015 Menglong TAN.
"""
import os
import sys
import re
import urllib2
import time
import BeautifulSoup
import logging
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(fmt)
logger.addHandler(ch)
class Star(object):
def __init__(self):
self.name = ""
self.gender = ""
self.nation = ""
self.birth = ""
self.horoscope = ""
self.height = ""
def __repr__(self):
return "%s\t%s\t%s\t%s\t%s\t%s" % (self.name, self.gender, self.nation,
self.birth, self.horoscope,
self.height)
def extract_list(url):
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5"}
req = urllib2.Request(url, headers=headers)
resp = None
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print "Error Code:", e.code
return
except urllib2.URLError, e:
print "Error Reason:", e.reason
return
soup = BeautifulSoup.BeautifulSoup(resp.read())
stars = []
for star in soup.findAll("div", attrs={"class":"item-intro left"}):
s = Star()
s.name = str(star.find("a", attrs={"style":"overflow: hidden;text-overflow: ellipsis;white-space: nowrap;width:140px;"}).contents[0]).strip()
for p in star.findAll("p"):
if str(p.contents[0]).startswith("<span class=\"txt\">性别:</span>"):
s.gender = str(p.contents[1]).strip()
elif str(p.contents[0]).startswith("<span class=\"txt\">国籍:</span>"):
s.nation = str(p.contents[2]).strip()
elif str(p.contents[0]).startswith("<span class=\"txt\">出生日期:</span>"):
s.birth = str(p.contents[1]).strip()
elif str(p.contents[0]).startswith("<span class=\"txt\">星座:</span>"):
s.horoscope = str(p.contents[1].contents[0]).strip()
elif str(p.contents[0]).startswith("<span class=\"txt\">身高:</span>"):
s.height = str(p.contents[1]).strip()
stars.append(s)
return stars
if __name__ == "__main__":
list_url = "http://ku.ent.sina.com.cn/star/search&page_no="
total_page = 1068
f = open("stars.dat", "w+")
for i in range(total_page):
logger.info("progress: %d/%d", i + 1, total_page)
stars = extract_list(list_url + str(i + 1))
for star in stars:
f.write(str(star) + "\n")
f.flush()
time.sleep(2)
f.close()
|
import json
import logging
import sys
from functools import wraps
from django.conf import settings
from django.core.cache import caches
from django.core.validators import ValidationError, validate_email
from django.views.decorators.csrf import requires_csrf_token
from django.views.defaults import server_error
from django.http import (Http404, HttpResponse, HttpResponseNotAllowed,
HttpResponseServerError)
import dogstats_wrapper as dog_stats_api
from edxmako.shortcuts import render_to_response
import zendesk
from microsite_configuration import microsite
import calc
import track.views
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
def ensure_valid_course_key(view_func):
"""
This decorator should only be used with views which have argument course_key_string (studio) or course_id (lms).
If course_key_string (studio) or course_id (lms) is not valid raise 404.
"""
@wraps(view_func)
def inner(request, *args, **kwargs):
course_key = kwargs.get('course_key_string') or kwargs.get('course_id')
if course_key is not None:
try:
CourseKey.from_string(course_key)
except InvalidKeyError:
raise Http404
response = view_func(request, *args, **kwargs)
return response
return inner
@requires_csrf_token
def jsonable_server_error(request, template_name='500.html'):
"""
500 error handler that serves JSON on an AJAX request, and proxies
to the Django default `server_error` view otherwise.
"""
if request.is_ajax():
msg = {"error": "The edX servers encountered an error"}
return HttpResponseServerError(json.dumps(msg))
else:
return server_error(request, template_name=template_name)
def handle_500(template_path, context=None, test_func=None):
"""
Decorator for view specific 500 error handling.
Custom handling will be skipped only if test_func is passed and it returns False
Usage:
@handle_500(
template_path='certificates/server-error.html',
context={'error-info': 'Internal Server Error'},
test_func=lambda request: request.GET.get('preview', None)
)
def my_view(request):
# Any unhandled exception in this view would be handled by the handle_500 decorator
# ...
"""
def decorator(func):
"""
Decorator to render custom html template in case of uncaught exception in wrapped function
"""
@wraps(func)
def inner(request, *args, **kwargs):
"""
Execute the function in try..except block and return custom server-error page in case of unhandled exception
"""
try:
return func(request, *args, **kwargs)
except Exception: # pylint: disable=broad-except
if settings.DEBUG:
# In debug mode let django process the 500 errors and display debug info for the developer
raise
elif test_func is None or test_func(request):
# Display custom 500 page if either
# 1. test_func is None (meaning nothing to test)
# 2. or test_func(request) returns True
log.exception("Error in django view.")
return render_to_response(template_path, context)
else:
# Do not show custom 500 error when test fails
raise
return inner
return decorator
def calculate(request):
''' Calculator in footer of every page. '''
equation = request.GET['equation']
try:
result = calc.evaluator({}, {}, equation)
except:
event = {'error': map(str, sys.exc_info()),
'equation': equation}
track.views.server_track(request, 'error:calc', event, page='calc')
return HttpResponse(json.dumps({'result': 'Invalid syntax'}))
return HttpResponse(json.dumps({'result': str(result)}))
class _ZendeskApi(object):
CACHE_PREFIX = 'ZENDESK_API_CACHE'
CACHE_TIMEOUT = 60 * 60
def __init__(self):
"""
Instantiate the Zendesk API.
All of `ZENDESK_URL`, `ZENDESK_USER`, and `ZENDESK_API_KEY` must be set
in `django.conf.settings`.
"""
self._zendesk_instance = zendesk.Zendesk(
settings.ZENDESK_URL,
settings.ZENDESK_USER,
settings.ZENDESK_API_KEY,
use_api_token=True,
api_version=2,
# As of 2012-05-08, Zendesk is using a CA that is not
# installed on our servers
client_args={"disable_ssl_certificate_validation": True}
)
def create_ticket(self, ticket):
"""
Create the given `ticket` in Zendesk.
The ticket should have the format specified by the zendesk package.
"""
ticket_url = self._zendesk_instance.create_ticket(data=ticket)
return zendesk.get_id_from_url(ticket_url)
def update_ticket(self, ticket_id, update):
"""
Update the Zendesk ticket with id `ticket_id` using the given `update`.
The update should have the format specified by the zendesk package.
"""
self._zendesk_instance.update_ticket(ticket_id=ticket_id, data=update)
def get_group(self, name):
"""
Find the Zendesk group named `name`. Groups are cached for
CACHE_TIMEOUT seconds.
If a matching group exists, it is returned as a dictionary
with the format specifed by the zendesk package.
Otherwise, returns None.
"""
cache = caches['default']
cache_key = '{prefix}_group_{name}'.format(prefix=self.CACHE_PREFIX, name=name)
cached = cache.get(cache_key)
if cached:
return cached
groups = self._zendesk_instance.list_groups()['groups']
for group in groups:
if group['name'] == name:
cache.set(cache_key, group, self.CACHE_TIMEOUT)
return group
return None
def _record_feedback_in_zendesk(
realname,
email,
subject,
details,
tags,
additional_info,
group_name=None,
require_update=False
):
"""
Create a new user-requested Zendesk ticket.
Once created, the ticket will be updated with a private comment containing
additional information from the browser and server, such as HTTP headers
and user state. Returns a boolean value indicating whether ticket creation
was successful, regardless of whether the private comment update succeeded.
If `group_name` is provided, attaches the ticket to the matching Zendesk group.
If `require_update` is provided, returns False when the update does not
succeed. This allows using the private comment to add necessary information
which the user will not see in followup emails from support.
"""
zendesk_api = _ZendeskApi()
additional_info_string = (
u"Additional information:\n\n" +
u"\n".join(u"%s: %s" % (key, value) for (key, value) in additional_info.items() if value is not None)
)
# Tag all issues with LMS to distinguish channel in Zendesk; requested by student support team
zendesk_tags = list(tags.values()) + ["LMS"]
# Per edX support, we would like to be able to route white label feedback items
# via tagging
white_label_org = microsite.get_value('course_org_filter')
if white_label_org:
zendesk_tags = zendesk_tags + ["whitelabel_{org}".format(org=white_label_org)]
new_ticket = {
"ticket": {
"requester": {"name": realname, "email": email},
"subject": subject,
"comment": {"body": details},
"tags": zendesk_tags
}
}
group = None
if group_name is not None:
group = zendesk_api.get_group(group_name)
if group is not None:
new_ticket['ticket']['group_id'] = group['id']
try:
ticket_id = zendesk_api.create_ticket(new_ticket)
if group is None:
# Support uses Zendesk groups to track tickets. In case we
# haven't been able to correctly group this ticket, log its ID
# so it can be found later.
log.warning('Unable to find group named %s for Zendesk ticket with ID %s.', group_name, ticket_id)
except zendesk.ZendeskError:
log.exception("Error creating Zendesk ticket")
return False
# Additional information is provided as a private update so the information
# is not visible to the user.
ticket_update = {"ticket": {"comment": {"public": False, "body": additional_info_string}}}
try:
zendesk_api.update_ticket(ticket_id, ticket_update)
except zendesk.ZendeskError:
log.exception("Error updating Zendesk ticket with ID %s.", ticket_id)
# The update is not strictly necessary, so do not indicate
# failure to the user unless it has been requested with
# `require_update`.
if require_update:
return False
return True
DATADOG_FEEDBACK_METRIC = "lms_feedback_submissions"
def _record_feedback_in_datadog(tags):
datadog_tags = [u"{k}:{v}".format(k=k, v=v) for k, v in tags.items()]
dog_stats_api.increment(DATADOG_FEEDBACK_METRIC, tags=datadog_tags)
def submit_feedback(request):
"""
Create a new user-requested ticket, currently implemented with Zendesk.
If feedback submission is not enabled, any request will raise `Http404`.
If any configuration parameter (`ZENDESK_URL`, `ZENDESK_USER`, or
`ZENDESK_API_KEY`) is missing, any request will raise an `Exception`.
The request must be a POST request specifying `subject` and `details`.
If the user is not authenticated, the request must also specify `name` and
`email`. If the user is authenticated, the `name` and `email` will be
populated from the user's information. If any required parameter is
missing, a 400 error will be returned indicating which field is missing and
providing an error message. If Zendesk ticket creation fails, 500 error
will be returned with no body; if ticket creation succeeds, an empty
successful response (200) will be returned.
"""
if not settings.FEATURES.get('ENABLE_FEEDBACK_SUBMISSION', False):
raise Http404()
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
if (
not settings.ZENDESK_URL or
not settings.ZENDESK_USER or
not settings.ZENDESK_API_KEY
):
raise Exception("Zendesk enabled but not configured")
def build_error_response(status_code, field, err_msg):
return HttpResponse(json.dumps({"field": field, "error": err_msg}), status=status_code)
additional_info = {}
required_fields = ["subject", "details"]
if not request.user.is_authenticated():
required_fields += ["name", "email"]
required_field_errs = {
"subject": "Please provide a subject.",
"details": "Please provide details.",
"name": "Please provide your name.",
"email": "Please provide a valid e-mail.",
}
for field in required_fields:
if field not in request.POST or not request.POST[field]:
return build_error_response(400, field, required_field_errs[field])
subject = request.POST["subject"]
details = request.POST["details"]
tags = dict(
[(tag, request.POST[tag]) for tag in ["issue_type", "course_id"] if tag in request.POST]
)
if request.user.is_authenticated():
realname = request.user.profile.name
email = request.user.email
additional_info["username"] = request.user.username
else:
realname = request.POST["name"]
email = request.POST["email"]
try:
validate_email(email)
except ValidationError:
return build_error_response(400, "email", required_field_errs["email"])
for header, pretty in [
("HTTP_REFERER", "Page"),
("HTTP_USER_AGENT", "Browser"),
("REMOTE_ADDR", "Client IP"),
("SERVER_NAME", "Host")
]:
additional_info[pretty] = request.META.get(header)
success = _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info)
_record_feedback_in_datadog(tags)
return HttpResponse(status=(200 if success else 500))
def info(request):
''' Info page (link from main header) '''
return render_to_response("info.html", {})
# From http://djangosnippets.org/snippets/1042/
def parse_accept_header(accept):
"""Parse the Accept header *accept*, returning a list with pairs of
(media_type, q_value), ordered by q values.
"""
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0)
media_params = []
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, tuple(media_params), q))
result.sort(lambda x, y: -cmp(x[2], y[2]))
return result
def accepts(request, media_type):
"""Return whether this request has an Accept header that matches type"""
accept = parse_accept_header(request.META.get("HTTP_ACCEPT", ""))
return media_type in [t for (t, p, q) in accept]
|
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
# MIMICS THE requests API (http://docs.python-requests.org/en/latest/)
# DEMANDS data IS A JSON-SERIALIZABLE STRUCTURE
# WITH ADDED default_headers THAT CAN BE SET USING mo_logs.settings
# EG
# {"debug.constants":{
# "pyLibrary.env.http.default_headers":{"From":"klahnakoski@mozilla.com"}
# }}
from __future__ import absolute_import
from __future__ import division
from contextlib import closing
from copy import copy
from mmap import mmap
from numbers import Number
from tempfile import TemporaryFile
from requests import sessions, Response
from jx_python import jx
from mo_dots import Data, coalesce, wrap, set_default, unwrap, Null
from mo_future import text_type, PY2
from mo_json import value2json, json2value
from mo_logs import Log
from mo_logs.strings import utf82unicode, unicode2utf8
from mo_logs.exceptions import Except
from mo_math import Math
from mo_threads import Lock
from mo_threads import Till
from mo_times.durations import Duration
from pyLibrary import convert
from pyLibrary.env.big_data import safe_size, ibytes2ilines, icompressed2ibytes
DEBUG = False
FILE_SIZE_LIMIT = 100 * 1024 * 1024
MIN_READ_SIZE = 8 * 1024
ZIP_REQUEST = False
default_headers = Data() # TODO: MAKE THIS VARIABLE A SPECIAL TYPE OF EXPECTED MODULE PARAMETER SO IT COMPLAINS IF NOT SET
default_timeout = 600
_warning_sent = False
def request(method, url, zip=None, retry=None, **kwargs):
"""
JUST LIKE requests.request() BUT WITH DEFAULT HEADERS AND FIXES
DEMANDS data IS ONE OF:
* A JSON-SERIALIZABLE STRUCTURE, OR
* LIST OF JSON-SERIALIZABLE STRUCTURES, OR
* None
Parameters
* zip - ZIP THE REQUEST BODY, IF BIG ENOUGH
* json - JSON-SERIALIZABLE STRUCTURE
* retry - {"times": x, "sleep": y} STRUCTURE
THE BYTE_STRINGS (b"") ARE NECESSARY TO PREVENT httplib.py FROM **FREAKING OUT**
IT APPEARS requests AND httplib.py SIMPLY CONCATENATE STRINGS BLINDLY, WHICH
INCLUDES url AND headers
"""
global _warning_sent
if not default_headers and not _warning_sent:
_warning_sent = True
Log.warning(
"The pyLibrary.env.http module was meant to add extra "
"default headers to all requests, specifically the 'Referer' "
"header with a URL to the project. Use the `pyLibrary.debug.constants.set()` "
"function to set `pyLibrary.env.http.default_headers`"
)
if isinstance(url, list):
# TRY MANY URLS
failures = []
for remaining, u in jx.countdown(url):
try:
response = request(method, u, zip=zip, retry=retry, **kwargs)
if Math.round(response.status_code, decimal=-2) not in [400, 500]:
return response
if not remaining:
return response
except Exception as e:
e = Except.wrap(e)
failures.append(e)
Log.error(u"Tried {{num}} urls", num=len(url), cause=failures)
if 'session' in kwargs:
session = kwargs['session']
del kwargs['session']
sess = Null
else:
sess = session = sessions.Session()
session.headers.update(default_headers)
with closing(sess):
if zip is None:
zip = ZIP_REQUEST
if isinstance(url, text_type):
# httplib.py WILL **FREAK OUT** IF IT SEES ANY UNICODE
url = url.encode('ascii')
_to_ascii_dict(kwargs)
timeout = kwargs['timeout'] = coalesce(kwargs.get('timeout'), default_timeout)
if retry == None:
retry = Data(times=1, sleep=0)
elif isinstance(retry, Number):
retry = Data(times=retry, sleep=1)
else:
retry = wrap(retry)
if isinstance(retry.sleep, Duration):
retry.sleep = retry.sleep.seconds
set_default(retry, {"times": 1, "sleep": 0})
if 'json' in kwargs:
kwargs['data'] = value2json(kwargs['json']).encode('utf8')
del kwargs['json']
try:
headers = kwargs['headers'] = unwrap(coalesce(kwargs.get('headers'), {}))
set_default(headers, {'Accept-Encoding': 'compress, gzip'})
if zip and len(coalesce(kwargs.get('data'))) > 1000:
compressed = convert.bytes2zip(kwargs['data'])
headers['content-encoding'] = 'gzip'
kwargs['data'] = compressed
_to_ascii_dict(headers)
else:
_to_ascii_dict(headers)
except Exception as e:
Log.error(u"Request setup failure on {{url}}", url=url, cause=e)
errors = []
for r in range(retry.times):
if r:
Till(seconds=retry.sleep).wait()
try:
if DEBUG:
Log.note(u"http {{method}} to {{url}}", method=method, url=url)
return session.request(method=method, url=url, **kwargs)
except Exception as e:
errors.append(Except.wrap(e))
if " Read timed out." in errors[0]:
Log.error(u"Tried {{times}} times: Timeout failure (timeout was {{timeout}}", timeout=timeout, times=retry.times, cause=errors[0])
else:
Log.error(u"Tried {{times}} times: Request failure of {{url}}", url=url, times=retry.times, cause=errors[0])
if PY2:
def _to_ascii_dict(headers):
if headers is None:
return
for k, v in copy(headers).items():
if isinstance(k, text_type):
del headers[k]
if isinstance(v, text_type):
headers[k.encode('ascii')] = v.encode('ascii')
else:
headers[k.encode('ascii')] = v
elif isinstance(v, text_type):
headers[k] = v.encode('ascii')
else:
def _to_ascii_dict(headers):
pass
def get(url, **kwargs):
kwargs.setdefault('allow_redirects', True)
kwargs.setdefault('stream', True)
return HttpResponse(request('get', url, **kwargs))
def get_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
response = get(url, **kwargs)
try:
c = response.all_content
return json2value(utf82unicode(c))
except Exception as e:
if Math.round(response.status_code, decimal=-2) in [400, 500]:
Log.error(u"Bad GET response: {{code}}", code=response.status_code)
else:
Log.error(u"Good GET requests, but bad JSON", cause=e)
def options(url, **kwargs):
kwargs.setdefault('allow_redirects', True)
kwargs.setdefault('stream', True)
return HttpResponse(request('options', url, **kwargs))
def head(url, **kwargs):
kwargs.setdefault('allow_redirects', False)
kwargs.setdefault('stream', True)
return HttpResponse(request('head', url, **kwargs))
def post(url, **kwargs):
kwargs.setdefault('stream', True)
return HttpResponse(request('post', url, **kwargs))
def delete(url, **kwargs):
return HttpResponse(request('delete', url, **kwargs))
def post_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
if 'json' in kwargs:
kwargs['data'] = unicode2utf8(value2json(kwargs['json']))
elif 'data' in kwargs:
kwargs['data'] = unicode2utf8(value2json(kwargs['data']))
else:
Log.error(u"Expecting `json` parameter")
response = post(url, **kwargs)
c = response.content
try:
details = json2value(utf82unicode(c))
except Exception as e:
Log.error(u"Unexpected return value {{content}}", content=c, cause=e)
if response.status_code not in [200, 201]:
Log.error(u"Bad response", cause=Except.wrap(details))
return details
def put(url, **kwargs):
return HttpResponse(request('put', url, **kwargs))
def patch(url, **kwargs):
kwargs.setdefault('stream', True)
return HttpResponse(request('patch', url, **kwargs))
def delete(url, **kwargs):
kwargs.setdefault('stream', False)
return HttpResponse(request('delete', url, **kwargs))
class HttpResponse(Response):
def __new__(cls, resp):
resp.__class__ = HttpResponse
return resp
def __init__(self, resp):
pass
self._cached_content = None
@property
def all_content(self):
# response.content WILL LEAK MEMORY (?BECAUSE OF PYPY"S POOR HANDLING OF GENERATORS?)
# THE TIGHT, SIMPLE, LOOP TO FILL blocks PREVENTS THAT LEAK
if self._content is not False:
self._cached_content = self._content
elif self._cached_content is None:
def read(size):
if self.raw._fp.fp is not None:
return self.raw.read(amt=size, decode_content=True)
else:
self.close()
return None
self._cached_content = safe_size(Data(read=read))
if hasattr(self._cached_content, "read"):
self._cached_content.seek(0)
return self._cached_content
@property
def all_lines(self):
return self.get_all_lines()
def get_all_lines(self, encoding='utf8', flexible=False):
try:
iterator = self.raw.stream(4096, decode_content=False)
if self.headers.get('content-encoding') == 'gzip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.headers.get('content-type') == 'application/zip':
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
elif self.url.endswith('.gz'):
return ibytes2ilines(icompressed2ibytes(iterator), encoding=encoding, flexible=flexible)
else:
return ibytes2ilines(iterator, encoding=encoding, flexible=flexible, closer=self.close)
except Exception as e:
Log.error(u"Can not read content", cause=e)
class Generator_usingStream(object):
"""
A BYTE GENERATOR USING A STREAM, AND BUFFERING IT FOR RE-PLAY
"""
def __init__(self, stream, length, _shared=None):
"""
:param stream: THE STREAM WE WILL GET THE BYTES FROM
:param length: THE MAX NUMBER OF BYTES WE ARE EXPECTING
:param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
:return:
"""
self.position = 0
file_ = TemporaryFile()
if not _shared:
self.shared = Data(
length=length,
locker=Lock(),
stream=stream,
done_read=0,
file=file_,
buffer=mmap(file_.fileno(), length)
)
else:
self.shared = _shared
self.shared.ref_count += 1
def __iter__(self):
return Generator_usingStream(None, self.shared.length, self.shared)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def next(self):
if self.position >= self.shared.length:
raise StopIteration
end = min(self.position + MIN_READ_SIZE, self.shared.length)
s = self.shared
with s.locker:
while end > s.done_read:
data = s.stream.read(MIN_READ_SIZE)
s.buffer.write(data)
s.done_read += MIN_READ_SIZE
if s.done_read >= s.length:
s.done_read = s.length
s.stream.close()
try:
return s.buffer[self.position:end]
finally:
self.position = end
def close(self):
with self.shared.locker:
if self.shared:
s, self.shared = self.shared, None
s.ref_count -= 1
if s.ref_count==0:
try:
s.stream.close()
except Exception:
pass
try:
s.buffer.close()
except Exception:
pass
try:
s.file.close()
except Exception:
pass
def __del__(self):
self.close()
|
import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from poliastro.bodies import (
Sun, Mercury, Venus, Earth, Moon, Mars,
Jupiter, Saturn, Uranus, Neptune, Pluto,
Body
)
from poliastro.patched_conics import compute_soi
def test_compute_soi():
# Data from Table A.2., Curtis "Orbital Mechanics for Engineering Students"
data = [
# body, SOI radius (m)
(Sun, None),
(Mercury, 1.12e8),
(Venus, 6.16e8),
(Earth, 9.25e8),
# (Moon, 6.61e7),
(Mars, 5.77e8),
(Jupiter, 4.82e10),
(Saturn, 5.48e10),
(Uranus, 5.18e10),
(Neptune, 8.66e10),
# (Pluto, 3.08e9)
]
for row in data:
body, expected_r_SOI = row
if expected_r_SOI is not None:
expected_r_SOI = expected_r_SOI * u.m
else:
continue
r_SOI = compute_soi(body)
assert_quantity_allclose(r_SOI, expected_r_SOI, rtol=1e-1)
@pytest.mark.parametrize("missing_body", [Moon, Pluto])
def test_compute_missing_body_soi_raises_error(missing_body):
with pytest.raises(RuntimeError) as excinfo:
compute_soi(missing_body)
assert "To compute the semimajor axis for Moon and Pluto use the JPL ephemeris" in excinfo.exconly()
def test_compute_soi_given_a():
parent = Body(None, 1 * u.km ** 3 / u.s ** 2, "Parent")
body = Body(parent, 1 * u.km ** 3 / u.s ** 2, "Body")
r_SOI = compute_soi(body, 1 * u.km)
assert r_SOI == 1 * u.km
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
from module_info import get_function, get_method_attr, get_function_doc, get_function_short_description
from module_info import create_header_str
from pandas_info import get_pandas_modules, init_pandas_logging
from sdc_info import get_sdc_modules, init_sdc_logging
from texttable import Texttable
import os
PANDAS_API_STR = 'Pandas API: ' # This substring prepends Pandas API name in the documentation
APIREF_RELPATH = r'./_api_ref/' # Relative path to API Reference folder
RST_MODULES = {
'api_reference.rst': ['pandas'],
'io.rst': ['pandas.io.api', 'pandas.io.clipboards', 'pandas.io.common', 'pandas.io.excel',
'pandas.io.feather_format', 'pandas.io.formats.console', 'pandas.io.formats.format',
'pandas.io.formats.printing', 'pandas.io.gbq', 'pandas.io.html', 'pandas.io.json',
'pandas.io.msgpack', 'pandas.io.msgpack.exceptions', 'pandas.io.packers', 'pandas.io.parquet',
'pandas.io.parsers', 'pandas.io.pickle', 'pandas.io.pytables', 'pandas.io.sas',
'pandas.io.sas.sasreader', 'pandas.io.spss', 'pandas.io.sql', 'pandas.io.stata'],
'series.rst': ['pandas.Series'],
'dataframe.rst': ['pandas.DataFrame'],
''
'general_functions.rst': [],
}
pandas_modules = [] # List of Pandas submodules along with its functions and classes
sdc_modules = [] # List of Intel SDC submodules along with its functions and classes
def generate_module_doc(the_module):
module_doc = None
module_name = the_module['module_name']
# First, look up if there is RST file documenting particular module
for rst in RST_MODULES:
for mod in RST_MODULES[rst]:
if mod == module_name:
return module_doc # If there is a documentation for a given module then just return
# If there is no RST file then we create the documentation based on module's docstring
module_obj = the_module['module_object']
module_description = get_function_short_description(module_obj).strip()
if module_description is None:
module_description = ''
module_doc = module_description + '\n\nFor details please refer to Pandas API Reference for :py:mod:`' + \
module_name + '`\n\n'
return module_doc
def generate_api_index_for_module(the_module):
module_description = generate_module_doc(the_module)
if module_description is None:
module_description = ''
module_doc = ''
module_header_flag = False
# Document functions first, if any
tab = Texttable()
for func in the_module['functions']: # Iterate through the module functions
name = func['function_name']
obj = getattr(the_module['module_object'], name) # Retrieve the function object
description = get_function_short_description(obj).strip()
tab.add_rows([[name, description]], header=False)
module_name = ''
func_doc = tab.draw()
if func_doc and func_doc != '': # If the function list is not empty then add module name to the document
module_name = the_module['module_name']
module_doc += create_header_str(module_name, '~') + '\n\n' + module_description + '\n\n' + \
create_header_str('Functions:', '-') + \
'\n\n' + func_doc + '\n\n'
module_header_flag = True
# Document classes
classes_header_flag = False
for the_class in the_module['classes']: # Iterate through the module classes
tab.reset()
class_name = the_class['class_name']
class_obj = the_class['class_object']
class_description = class_obj.__doc__
if not class_description:
class_description = ''
class_doc = ''
class_header_flag = False
# Document class attributes first, if any
for attr in the_class['class_attributes']: # Iterate through the class attributes
name = attr
obj = getattr(the_class['class_object'], name) # Retrieve the attribute object
description = get_function_short_description(obj).strip()
tab.add_rows([[name, description]], header=False)
attr_doc = tab.draw()
if attr_doc and attr_doc != '': # If the attribute list is not empty then add class name to the document
class_header_flag = True
class_doc += create_header_str(class_name, '^') + '\n\n' + class_description + '\n\n' + \
create_header_str('Attributes:', '+') + \
'\n\n' + attr_doc + '\n\n'
# Document class methods, if any
for method in the_class['class_methods']: # Iterate through the class methods
name = method
obj = getattr(the_class['class_object'], name) # Retrieve the method object
description = get_function_short_description(obj).strip()
tab.add_rows([[name, description]], header=False)
method_doc = tab.draw()
if method_doc and method_doc != '': # If the method list is not empty then add class name to the document
if not class_header_flag:
class_doc += create_header_str(class_name, '^') + '\n\n' + class_description + '\n\n' + \
create_header_str('Methods:', '+') + \
'\n\n' + method_doc + '\n\n'
class_header_flag = True
else:
class_doc += create_header_str('Methods:', '+') + \
'\n\n' + method_doc + '\n\n'
if not module_header_flag: # There is no module header yet
if class_header_flag: # There were methods/attributes for the class
module_doc += create_header_str(module_name, '~') + '\n\n' + module_description + '\n\n' + \
create_header_str('Classes:', '-') + \
'\n\n' + class_doc + '\n\n'
module_header_flag = True
classes_header_flag = True
else: # The module header has been added
if class_header_flag: # There are new methods/attributes for the class
if not classes_header_flag: # First class of the module description
module_doc += create_header_str('Classes:', '-') + '\n\n'
module_doc += '\n\n' + class_doc + '\n\n'
return module_doc
def get_module_rst_fname(the_module):
file_name = the_module['module_name']
file_name = file_name.replace('.', '/')
file_name = APIREF_RELPATH + file_name + '.rst'
return file_name
def generate_api_index():
doc = '.. _apireference::\n\nAPI Reference\n*************\n\n' \
'.. toctree::\n :maxdepth: 1\n\n'
for the_module in pandas_modules: # Iterate through pandas_modules
module_doc = generate_api_index_for_module(the_module)
if len(module_doc) > 0:
file_name = get_module_rst_fname(the_module)
write_rst(file_name, module_doc)
doc += ' ' + file_name + '\n'
return doc
def generate_sdc_object_doc(sdc_func):
sdc_titled_sections = get_function_doc(sdc_func, True)
sdc_see_also_text = next((sec['text'] for sec in sdc_titled_sections
if sec['title'].lower().strip() == 'see also'), '')
sdc_limitations_text = next((sec['text'] for sec in sdc_titled_sections
if sec['title'].lower().strip() == 'limitations'), '')
sdc_examples_text = next((sec['text'] for sec in sdc_titled_sections
if sec['title'].lower().strip() == 'examples'), '')
# Get respective Pandas API name
pandas_name = sdc_titled_sections[0]['text'].strip()
pandas_name = pandas_name.replace(PANDAS_API_STR, '')
pandas_name = pandas_name.replace('\n', '')
# Find respective Pandas API
doc_object = get_method_attr(pandas_name, pandas_modules)
if not doc_object:
doc_object = get_function(pandas_name, pandas_modules)
if not doc_object:
raise NameError('Pandas API:' + pandas_name + 'does not exist')
# Extract Pandas API docstring as the list of sections
pandas_titled_sections = []
if doc_object:
pandas_titled_sections = get_function_doc(doc_object, False)
# Form final docstring which is a combination of Pandas docstring for the description, Parameters section,
# Raises section, Returns section. See Also, Limitations and Examples sections (if any) are taken from SDC docstring
short_description_section = pandas_titled_sections[0]['text'] + '\n\n'
pandas_titled_sections.pop(0)
long_description_section = ''
while pandas_titled_sections[0]['title'] == '':
long_description_section += pandas_titled_sections[0]['text'] + '\n\n'
pandas_titled_sections.pop(0)
raises_section = parameters_section = returns_section = see_also_section = \
limitations_section = examples_section = ''
for section in pandas_titled_sections:
title = section['title'].lower().strip()
if title == 'raises':
raises_section = 'Raises\n------\n\n' + section['text'] + '\n\n'
elif title == 'parameters':
parameters_section = 'Parameters\n----------\n\n' + section['text'] + '\n\n'
elif title == 'return' or title == 'returns':
returns_section = 'Returns\n-------\n\n' + section['text'] + '\n\n'
if sdc_see_also_text:
see_also_section = '\n.. seealso::\n\n' + sdc_see_also_text + '\n\n'
if sdc_limitations_text:
limitations_section = 'Limitations\n-----------\n\n' + sdc_limitations_text + '\n\n'
if sdc_examples_text:
examples_section = 'Examples\n-----------\n\n' + sdc_examples_text + '\n\n'
rst_label = pandas_name.replace('.', '_')
n = len(pandas_name)
docstring = \
'.. _' + rst_label + ':\n\n' + \
pandas_name + '\n' + '*'*n + '\n' + \
short_description_section + \
long_description_section + \
parameters_section + \
returns_section + \
raises_section + \
limitations_section + \
examples_section + \
see_also_section
file_name = rst_label + '.rst'
return file_name, docstring
def write_rst(file_name, docstring):
directory = os.path.dirname(file_name)
if len(directory) > 0 and not os.path.exists(directory):
os.makedirs(directory)
file = open(file_name, 'w')
file.write(docstring)
file.close()
if __name__ == "__main__":
init_pandas_logging()
pandas_modules = get_pandas_modules()
init_sdc_logging()
sdc_modules = get_sdc_modules()
for the_module in sdc_modules:
if the_module['module_name'] == 'sdc.datatypes.hpat_pandas_series_functions':
for func in the_module['functions']:
file_name, doc = generate_sdc_object_doc(func['function_object'])
write_rst(APIREF_RELPATH + file_name, doc)
doc = generate_api_index()
write_rst('apireference.rst', doc)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# written for python 3 but also run on python 2
from __future__ import absolute_import, division, print_function, unicode_literals
"""
make index json file from text file tree.
Expects Python 3
https://github.com/sukuba/js-py-document-search
"""
import argparse
import os
import shutil
import datetime
import jsngram.jsngram
import jsngram.dir2
def remove_entries(dest):
"""
remove files and subdirectories at dest
"""
for entry in os.listdir(dest):
fullpath = os.path.join(dest, entry)
if os.path.isfile(fullpath):
os.remove(fullpath)
else:
shutil.rmtree(fullpath)
def make_index_by_files_inc(n, shorter, src, dest, flat, ignore, files_at_once, verbose_print):
"""
text files in src directory will be indexed.
"""
ix = jsngram.jsngram.JsNgram(n, shorter, src, dest, flat, ignore)
entries = jsngram.dir2.list_files(src)
n = len(entries)
for files in (entries[i:i+files_at_once] for i in range(0, n, files_at_once)):
ix.add_files_to_json(files, verbose_print)
print('%d indexes in %d files' % (len(ix.db), len(files)))
for f in files:
print(' ' + f)
print('%d files processed.' % len(entries))
return ix
def make_index(args):
"""
make index json file from text file tree
expected args; src, dest, size, noshorter, flat, once, ignore, verbose
"""
start_time = datetime.datetime.now()
print('Start: ', start_time)
print('Removing current index files ...')
remove_entries(args.dest)
print('Building index files ...')
ix = make_index_by_files_inc(args.size, not args.noshorter, args.src, args.dest,
args.flat, args.ignore, args.once, args.verbose)
print('Adjusting index files ...')
entries = jsngram.dir2.list_files(args.dest)
for entry in entries:
fullpath = os.path.join(args.dest, entry)
jsngram.json2.json_end(fullpath)
print('%d indexes' % len(entries))
print('Done.')
end_time = datetime.datetime.now()
span = end_time - start_time
sspan = '%d seconds' % span.seconds if span.seconds < 3600 else '%d hours' % (span.days * 24)
print('End: ', end_time, ' / runtime: ', sspan)
def main():
r"""
正規化済みのテキスト群からインデックスファイルを作る。
make_index.py E:\scratch txt idx
第1引数: 基準ディレクトリ(フルパス)
第2引数: 変換元テキストディレクトリ(基準からの相対パス)
第3引数: インデックス出力先ディレクトリ(基準からの相対パス)
--size: N-gramの文字長(デフォルト 2)
--noshorter: 文字長より短いインデックスは作成しない(デフォルト False)
--flat: ディレクトリ型でなく、ファイル型のインデックスを作成する(デフォルト False)
--once: 一度にインデックスを作成するファイル数(デフォルト 100)
--ignore: 単語区切りとして、インデックスから除外する文字パターン(正規表現; デフォルト [\s,.,.、。]+)
--verbose: 冗長な情報を出力する
入力は、単一ディレクトリ配下にtree構造で配置された、正規化済みの utf-8 text ファイル群。
出力は、N-gramによりtree構造に作成したインデックスファイル群。
"""
parser = argparse.ArgumentParser(description='正規化済みのテキスト群からインデックスファイルを作る')
parser.add_argument('base', help='基準ディレクトリ(フルパス)')
parser.add_argument('src', help='変換元テキストディレクトリ(基準からの相対パス)')
parser.add_argument('dest', help='インデックス出力先ディレクトリ(基準からの相対パス)')
parser.add_argument('-s', '--size', type=int, default=2, help='N-gramの文字長')
parser.add_argument('-n', '--noshorter', action='store_true', help='文字長より短いインデックスは作成しない')
parser.add_argument('-f', '--flat', action='store_true', help='ディレクトリ型でなく、ファイル型のインデックスを作成する')
parser.add_argument('-o', '--once', type=int, default=100, help='一度にインデックスを作成するファイル数')
parser.add_argument('-i', '--ignore', type=str, default=r'[\s,.,.、。]+', help='単語区切りとして、インデックスから除外する文字パターン')
parser.add_argument('-v', '--verbose', action='store_true', help='冗長な情報を出力する')
args = parser.parse_args()
args.src = os.path.join(args.base, args.src)
args.dest = os.path.join(args.base, args.dest)
if args.verbose:
print(args)
make_index(args)
if __name__ == '__main__':
main()
|
# Basic infrastructure for Bubble Shooter
import simplegui
import random
import math
# Global constants
WIDTH = 800
HEIGHT = 600
FIRING_POSITION = [WIDTH // 2, HEIGHT]
FIRING_LINE_LENGTH = 60
FIRING_ANGLE_VEL_INC = 0.02
BUBBLE_RADIUS = 20
COLOR_LIST = ["Red", "Green", "Blue", "White"]
# global variables
firing_angle = math.pi / 2
firing_angle_vel = 0
bubble_stuck = True
# firing sound
firing_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/Collision8-Bit.ogg")
# helper functions to handle transformations
def angle_to_vector(ang):
return [math.cos(ang), math.sin(ang)]
def dist(p,q):
return math.sqrt((p[0]-q[0])**2+(p[1]-q[1])**2)
# class defintion for Bubbles
class Bubble:
def __init__(self, sound = None):
self.pos = list(FIRING_POSITION)
self.vel = [0, 0]
self.color = random.choice(COLOR_LIST)
self.sound = sound
def update(self):
self.pos[0] += self.vel[0]
self.pos[1] += self.vel[1]
if self.pos[0] <= BUBBLE_RADIUS or self.pos[0] >= WIDTH - BUBBLE_RADIUS:
self.vel[0] = - self.vel[0]
def fire_bubble(self, vel):
self.vel = vel
if self.sound:
self.sound.play()
def is_stuck(self, stuck_bubbles):
if self.pos[1] <= BUBBLE_RADIUS:
return True
for a in stuck_bubbles:
if self.collide(a):
return True
return False
def collide(self, bubble):
return dist(self.pos, bubble.pos) <= BUBBLE_RADIUS*2
def draw(self, canvas):
canvas.draw_circle(self.pos, BUBBLE_RADIUS, 1, "White", self.color)
# define keyhandlers to control firing_angle
def keydown(key):
global a_bubble, firing_angle_vel, bubble_stuck
if simplegui.KEY_MAP["space"] == key:
bubble_stuck = False
vel = angle_to_vector(firing_angle)
a_bubble.fire_bubble([4 * vel[0], -4 * vel[1]])
elif simplegui.KEY_MAP["left"] == key:
firing_angle_vel += FIRING_ANGLE_VEL_INC
elif simplegui.KEY_MAP["right"] == key:
firing_angle_vel -= FIRING_ANGLE_VEL_INC
def keyup(key):
global firing_angle_vel
if simplegui.KEY_MAP["left"] == key:
firing_angle_vel -= FIRING_ANGLE_VEL_INC
elif simplegui.KEY_MAP["right"] == key:
firing_angle_vel += FIRING_ANGLE_VEL_INC
# define draw handler
def draw(canvas):
global firing_angle, a_bubble, bubble_stuck
# update firing angle
firing_angle += firing_angle_vel
#draw firing line
orient = angle_to_vector(firing_angle)
upper_endpoint = [FIRING_POSITION[0] + FIRING_LINE_LENGTH * orient[0],
FIRING_POSITION[1] - FIRING_LINE_LENGTH * orient[1]]
canvas.draw_line(FIRING_POSITION, upper_endpoint, 4, "White")
# update a_bubble and check for sticking
a_bubble.update()
if a_bubble.is_stuck(stuck_bubbles):
bubble_stuck = True
stuck_bubbles.add(a_bubble)
a_bubble = Bubble(firing_sound)
# draw a bubble and stuck bubbles
a_bubble.draw(canvas)
for b in stuck_bubbles:
b.draw(canvas)
# create frame and register handlers
frame = simplegui.create_frame("Bubble Shooter", WIDTH, HEIGHT)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
frame.set_draw_handler(draw)
# create initial buble and start frame
a_bubble = Bubble(firing_sound)
stuck_bubbles = set([])
frame.start()
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ACS_preProcessing1Dialog
A QGIS plugin
Transfer stream data
-------------------
begin : 2017-07-26
git sha : $Format:%H$
copyright : (C) 2017 by Laura Bienstein
email : laura.bienstein@rub.de
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt4 import QtGui, uic
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'acs_preProcessing1_dialog_base.ui'))
class ACS_preProcessing1Dialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(ACS_preProcessing1Dialog, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
|
import os
import sys
import tempfile
from pathlib import Path
import arcpy
import pandas as pd
import numpy as np
import geopandas as gpd
#constants
#WGS_1984 coordinate system
WGS_1984 = \
"GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984', "+\
"SPHEROID['WGS_1984',6378137.0,298.257223563]], "+\
"PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]; "+\
"-400 -400 1000000000;-100000 10000;-100000 10000; "+\
"8.98315284119522E-09;0.001;0.001;IsHighPrecision"
#functions
def gdb_path(in_fc):
"""
Returns the properties of a input gis data
"""
if arcpy.Exists(in_fc):
desc = arcpy.Describe(in_fc)
in_fc = desc.catalogPath
fc_name = desc.name
else:
fc_name = os.path.basename(in_fc)
dirname = os.path.dirname(in_fc)
workspace = arcpy.Describe(dirname).dataType
if workspace == 'FeatureDataset':
GDB = os.path.dirname(dirname)
elif workspace == 'Workspace':
GDB = dirname
elif workspace == 'Folder':
GDB = ''
else:
GDB = ''
return GDB, workspace, dirname, fc_name
def get_fields(in_fc, output_type = 'list'):
#Gets list of fileds from a feature class
fields = arcpy.ListFields(in_fc)
if output_type == 'list':
output = [f.name for f in fields]
elif output_type == 'dict':
output = {f.name: f.type for f in fields}
else:
output = ''
return output
#pandas convertor for ArcGIS
def gdf_to_fc(gdf, fc):
"""
converts a geopandas dataframe to a layer in a ESRI file geodatabase.
Notes:
- gdf have to have geometry field.
"""
if 'geometry' not in gdf.columns.values:
sys.exit()
GDB, workspace, dirname, fc_name = gdb_path(fc)
# convert fc to a gpkg in a temporary directory
tmp_dir = tempfile.TemporaryDirectory()
p = Path(tmp_dir.name)
n = fc_name + '.shp'
gdf.to_file(str(p/n))
fc_cols = get_fields(str(p/n))[2:]
#copy the file into a feature class
fc = arcpy.CopyFeatures_management(str(p/n), fc)
gdf_cols = gdf.columns.tolist()
gdf_cols.remove('geometry')
#fixing the columns
if gdf_cols:
col_dict = {col: gdf_cols[indx] for indx, col in enumerate(fc_cols) }
for col in col_dict:
if col_dict[col] != col:
arcpy.AlterField_management(fc, col, col_dict[col], clear_field_alias="true")
# Delete temporary directory
tmp_dir.cleanup()
return fc
def gdf_to_tbl(gdf, tbl):
gdf_cols = gdf.columns.values.tolist()
if 'geometry' in gdf_cols:
gdf_cols.remove('geometry')
gdf = gdf[gdf_cols].copy()
x = np.array(np.rec.fromrecords(gdf.values))
names = gdf.dtypes.index.tolist()
names = [str(arcpy.ValidateTableName(name)) for name in names]
x.dtype.names = tuple(names)
arcpy.da.NumPyArrayToTable(x, tbl)
return tbl
def fc_to_gdf(fc):
#use scratch work space for temporary files
GDB, workspace, dirname, fc_name = gdb_path(fc)
if GDB != '':
gdf = gpd.read_file(GDB, layer = fc_name)
else:
desc = arcpy.Describe(fc)
fc_path = desc.catalogPath
gdf = gpd.read_file(fc_path)
return gdf
def tbl_to_gdf(tbl, fieldnames = None):
gdf = fc_to_gdf(tbl)
if fieldnames != None:
fieldnames = [f for f in fieldnames if f in gdf.columns()]
else:
fieldnames = get_fields(tbl)[1:]
return gdf[fieldnames].copy()
|
'''This module provides a class for Reference calls to the CC API'''
from currencycloud.http import Http
from currencycloud.resources import BeneficiaryRequiredDetails, ConversionDates, Currency, SettlementAccount, PayerRequiredDetails, PaymentPurposeCode, BankDetails, PaymentFeeRule
class Reference(Http):
'''This class provides an interface to the Reference endpoints of the CC API'''
def beneficiary_required_details(self, **kwargs):
'''Returns required beneficiary details and their basic validation formats.'''
response = self.get('/v2/reference/beneficiary_required_details', query=kwargs)['details']
return [BeneficiaryRequiredDetails(self, **c) for c in response]
def conversion_dates(self, **kwargs):
'''Returns dates for which dates this currency pair can not be traded.'''
return ConversionDates(self, **self.get('/v2/reference/conversion_dates', query=kwargs))
def currencies(self):
'''Returns a list of all the currencies that are tradeable.'''
response = self.get('/v2/reference/currencies')['currencies']
return [Currency(self, **c) for c in response]
def payment_dates(self, **kwargs):
'''
This call returns a list of dates that are invalid when making payments of a specific
currency.
'''
return self.get('/v2/reference/payment_dates', query=kwargs)
def settlement_accounts(self, **kwargs):
'''Returns settlement account information, detailing where funds need to be sent to.'''
response = self.get('/v2/reference/settlement_accounts', query=kwargs)['settlement_accounts']
return [SettlementAccount(self, **c) for c in response]
def payer_required_details(self, **kwargs):
'''Returns required payer details and their basic validation formats.'''
response = self.get('/v2/reference/payer_required_details', query=kwargs)['details']
return [PayerRequiredDetails(self, **c) for c in response]
def payment_purpose_codes(self, **kwargs):
'''Returns a list of valid purpose codes for the specified currency.'''
response = self.get('/v2/reference/payment_purpose_codes', query=kwargs)['purpose_codes']
return [PaymentPurposeCode(self, **c) for c in response]
def bank_details(self, **kwargs):
'''Returns the details of the bank related to the specified identifier.'''
response = self.get('/v2/reference/bank_details', query=kwargs)
return BankDetails(self, **response)
def payment_fee_rules(self, **kwargs):
'''Returns a list of payment fee rules.'''
response = self.get('/v2/reference/payment_fee_rules', query=kwargs)['payment_fee_rules']
return [PaymentFeeRule(self, **c) for c in response]
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
from weblate import appsettings
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.core.cache import cache
import traceback
import multiprocessing
from weblate.trans.checks import CHECKS
from weblate.trans.models.source import Source
from weblate.trans.models.unitdata import Check, Comment, Suggestion
from weblate.trans.models.changes import Change
from weblate.trans.search import update_index_unit, fulltext_search, more_like
from weblate.accounts.models import (
notify_new_contributor, notify_new_translation
)
from weblate.trans.filelock import FileLockException
from weblate.trans.mixins import LoggerMixin
from weblate.trans.util import (
is_plural, split_plural, join_plural, get_distinct_translations,
calculate_checksum,
)
SIMPLE_FILTERS = {
'fuzzy': {'fuzzy': True},
'untranslated': {'translated': False},
'translated': {'translated': True},
'suggestions': {'has_suggestion': True},
'comments': {'has_comment': True},
}
SEARCH_FILTERS = ('source', 'target', 'context', 'location', 'comment')
def more_like_queue(checksum, source, top, queue):
"""
Multiprocess wrapper around more_like.
"""
result = more_like(checksum, source, top)
queue.put(result)
class UnitManager(models.Manager):
# pylint: disable=W0232
def update_from_unit(self, translation, unit, pos):
"""
Process translation toolkit unit and stores/updates database entry.
"""
# Get basic unit data
src = unit.get_source()
ctx = unit.get_context()
checksum = unit.get_checksum()
created = False
# Try getting existing unit
created = False
try:
dbunit = translation.unit_set.get(checksum=checksum)
except Unit.MultipleObjectsReturned:
# Some inconsistency (possibly race condition), try to recover
translation.unit_set.filter(checksum=checksum).delete()
dbunit = None
except Unit.DoesNotExist:
dbunit = None
# Create unit if it does not exist
if dbunit is None:
dbunit = Unit(
translation=translation,
checksum=checksum,
source=src,
context=ctx
)
created = True
# Update all details
dbunit.update_from_unit(unit, pos, created)
# Return result
return dbunit, created
def filter_checks(self, rqtype, translation, ignored=False):
"""
Filtering for checks.
"""
if translation is None:
return self.all()
# Filter checks for current project
checks = Check.objects.filter(
project=translation.subproject.project,
ignore=ignored
)
filter_translated = True
# Filter by language
if rqtype == 'allchecks':
return self.filter(has_failing_check=True)
elif rqtype == 'sourcechecks':
checks = checks.filter(language=None)
filter_translated = False
elif CHECKS[rqtype].source:
checks = checks.filter(language=None)
filter_translated = False
elif CHECKS[rqtype].target:
checks = checks.filter(language=translation.language)
# Filter by check type
if rqtype not in ('allchecks', 'sourcechecks'):
checks = checks.filter(check=rqtype)
checks = checks.values_list('contentsum', flat=True)
ret = self.filter(contentsum__in=checks)
if filter_translated:
ret = ret.filter(translated=True)
return ret
def filter_type(self, rqtype, translation, ignored=False):
"""
Basic filtering based on unit state or failed checks.
"""
if rqtype in SIMPLE_FILTERS:
return self.filter(**SIMPLE_FILTERS[rqtype])
elif rqtype == 'sourcecomments' and translation is not None:
coms = Comment.objects.filter(
language=None,
project=translation.subproject.project
)
coms = coms.values_list('contentsum', flat=True)
return self.filter(contentsum__in=coms)
elif rqtype in CHECKS or rqtype in ['allchecks', 'sourcechecks']:
return self.filter_checks(rqtype, translation, ignored)
else:
# Catch anything not matching including 'all'
return self.all()
def count_type(self, rqtype, translation):
"""
Cached counting of failing checks (and other stats).
"""
# Try to get value from cache
cache_key = 'counts-%s-%s-%s' % (
translation.subproject.get_full_slug(),
translation.language.code,
rqtype
)
ret = cache.get(cache_key)
if ret is not None:
return ret
# Actually count units
ret = self.filter_type(rqtype, translation).count()
# Update cache
cache.set(cache_key, ret)
return ret
def review(self, date, user):
"""
Returns units touched by other users since given time.
"""
if user.is_anonymous():
return self.none()
try:
sample = self.all()[0]
except IndexError:
return self.none()
changes = Change.objects.content().filter(
translation=sample.translation,
timestamp__gte=date
).exclude(user=user)
return self.filter(id__in=changes.values_list('unit__id', flat=True))
def search(self, translation, params):
"""
High level wrapper for searching.
"""
base = self.all()
if params['type'] != 'all':
base = self.filter_type(
params['type'],
translation,
params['ignored']
)
if 'lang' in params and params['lang']:
base = base.filter(translation__language__code=params['lang'])
if not params['q']:
return base
if params['search'] in ('exact', 'substring'):
queries = []
modifier = ''
if params['search'] != 'exact':
modifier = '__icontains'
for param in SEARCH_FILTERS:
if params[param]:
queries.append(param)
query = reduce(
lambda q, value:
q | Q(**{'%s%s' % (value, modifier): params['q']}),
queries,
Q()
)
return base.filter(query)
else:
lang = self.all()[0].translation.language.code
return base.filter(
checksum__in=fulltext_search(
params['q'],
lang,
params
)
)
def same_source(self, unit):
"""
Finds units with same source.
"""
checksums = fulltext_search(
unit.get_source_plurals()[0],
unit.translation.language.code,
{'source': True}
)
return self.filter(
checksum__in=checksums,
translation__language=unit.translation.language,
translated=True
).exclude(
pk=unit.id
)
def more_like_this(self, unit, top=5):
"""
Finds closely similar units.
"""
if appsettings.MT_WEBLATE_LIMIT >= 0:
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=more_like_queue,
args=(unit.checksum, unit.source, top, queue)
)
proc.start()
proc.join(appsettings.MT_WEBLATE_LIMIT)
if proc.is_alive():
proc.terminate()
if queue.empty():
raise Exception('Request timed out.')
more_results = queue.get()
else:
more_results = more_like(unit.checksum, unit.source, top)
same_results = fulltext_search(
unit.get_source_plurals()[0],
unit.translation.language.code,
{'source': True}
)
checksums = more_results - same_results
return self.filter(
checksum__in=checksums,
translation__language=unit.translation.language,
translated=True
).exclude(
pk=unit.id
)
def same(self, unit):
"""
Units with same source within same project.
"""
project = unit.translation.subproject.project
return self.filter(
contentsum=unit.contentsum,
translation__subproject__project=project,
translation__language=unit.translation.language
).exclude(
pk=unit.id
)
class Unit(models.Model, LoggerMixin):
translation = models.ForeignKey('Translation')
checksum = models.CharField(max_length=40, db_index=True)
contentsum = models.CharField(max_length=40, db_index=True)
location = models.TextField(default='', blank=True)
context = models.TextField(default='', blank=True)
comment = models.TextField(default='', blank=True)
flags = models.TextField(default='', blank=True)
source = models.TextField()
previous_source = models.TextField(default='', blank=True)
target = models.TextField(default='', blank=True)
fuzzy = models.BooleanField(default=False, db_index=True)
translated = models.BooleanField(default=False, db_index=True)
position = models.IntegerField(db_index=True)
has_suggestion = models.BooleanField(default=False, db_index=True)
has_comment = models.BooleanField(default=False, db_index=True)
has_failing_check = models.BooleanField(default=False, db_index=True)
num_words = models.IntegerField(default=0)
priority = models.IntegerField(default=100, db_index=True)
objects = UnitManager()
class Meta(object):
permissions = (
('save_translation', "Can save translation"),
('save_template', "Can save template"),
)
ordering = ['priority', 'position']
app_label = 'trans'
def __init__(self, *args, **kwargs):
"""
Constructor to initialize some cache properties.
"""
super(Unit, self).__init__(*args, **kwargs)
self._all_flags = None
self._source_info = None
self._suggestions = None
self.old_translated = self.translated
self.old_fuzzy = self.fuzzy
def has_acl(self, user):
"""
Checks whether current user is allowed to access this
subproject.
"""
return self.translation.subproject.project.has_acl(user)
def check_acl(self, request):
"""
Raises an error if user is not allowed to access this project.
"""
self.translation.subproject.project.check_acl(request)
def __unicode__(self):
return '%s on %s' % (
self.checksum,
self.translation,
)
@property
def log_prefix(self):
return '{0}/{1}/{2}[{3}]: '.format(
self.translation.subproject.project.slug,
self.translation.subproject.slug,
self.translation.language.code,
self.pk
)
def get_absolute_url(self):
return '%s?checksum=%s' % (
self.translation.get_translate_url(), self.checksum
)
def update_from_unit(self, unit, pos, created):
"""
Updates Unit from ttkit unit.
"""
# Store current values for use in Translation.check_sync
self.old_fuzzy = self.fuzzy
self.old_translated = self.translated
# Get unit attributes
location = unit.get_locations()
flags = unit.get_flags()
target = unit.get_target()
source = unit.get_source()
comment = unit.get_comments()
fuzzy = unit.is_fuzzy()
translated = unit.is_translated()
previous_source = unit.get_previous_source()
contentsum = unit.get_contentsum()
# Monolingual files handling (without target change)
if unit.template is not None and target == self.target:
if source != self.source and translated:
# Store previous source and fuzzy flag for monolingual files
if previous_source == '':
previous_source = self.source
fuzzy = True
translated = False
else:
# We should keep calculated flags if translation was
# not changed outside
previous_source = self.previous_source
fuzzy = self.fuzzy
translated = self.translated
# Update checks on fuzzy update or on content change
same_content = (
target == self.target and source == self.source
)
same_state = (
fuzzy == self.fuzzy and
translated == self.translated and
not created
)
# Check if we actually need to change anything
if (not created and
location == self.location and
flags == self.flags and
same_content and same_state and
translated == self.translated and
comment == self.comment and
pos == self.position and
contentsum == self.contentsum and
previous_source == self.previous_source):
return
# Ensure we track source string
source_info, source_created = Source.objects.get_or_create(
checksum=self.checksum,
subproject=self.translation.subproject
)
contentsum_changed = self.contentsum != contentsum
# Store updated values
self.position = pos
self.location = location
self.flags = flags
self.source = source
self.target = target
self.fuzzy = fuzzy
self.translated = translated
self.comment = comment
self.contentsum = contentsum
self.previous_source = previous_source
self.priority = source_info.priority
self.save(
force_insert=created,
backend=True,
same_content=same_content,
same_state=same_state
)
# Create change object for new source string
if source_created:
Change.objects.create(
translation=self.translation,
action=Change.ACTION_NEW_SOURCE,
unit=self,
)
if contentsum_changed:
self.update_has_failing_check(recurse=False, update_stats=False)
self.update_has_comment(update_stats=False)
self.update_has_suggestion(update_stats=False)
def is_plural(self):
"""
Checks whether message is plural.
"""
return is_plural(self.source)
def get_source_plurals(self):
"""
Returns source plurals in array.
"""
return split_plural(self.source)
def get_target_plurals(self):
"""
Returns target plurals in array.
"""
# Is this plural?
if not self.is_plural():
return [self.target]
# Split plurals
ret = split_plural(self.target)
# Check if we have expected number of them
plurals = self.translation.language.nplurals
if len(ret) == plurals:
return ret
# Pad with empty translations
while len(ret) < plurals:
ret.append('')
# Delete extra plurals
while len(ret) > plurals:
del ret[-1]
return ret
def propagate(self, request, change_action=None):
"""
Propagates current translation to all others.
"""
allunits = Unit.objects.same(self).filter(
translation__subproject__allow_translation_propagation=True
)
for unit in allunits:
unit.target = self.target
unit.fuzzy = self.fuzzy
unit.save_backend(request, False, change_action=change_action)
def save_backend(self, request, propagate=True, gen_change=True,
change_action=None, user=None):
"""
Stores unit to backend.
Optional user parameters defines authorship of a change.
"""
# Update lock timestamp
self.translation.update_lock(request)
# For case when authorship specified, use user from request
if user is None:
user = request.user
# Store to backend
try:
(saved, pounit) = self.translation.update_unit(self, request, user)
except FileLockException:
self.log_error('failed to lock backend for %s!', self)
messages.error(
request,
_(
'Failed to store message in the backend, '
'lock timeout occurred!'
)
)
return False
# Handle situation when backend did not find the message
if pounit is None:
self.log_error('message %s disappeared!', self)
messages.error(
request,
_(
'Message not found in backend storage, '
'it is probably corrupted.'
)
)
# Try reloading from backend
self.translation.check_sync(True)
return False
# Get old unit from database (for notifications)
oldunit = Unit.objects.get(id=self.id)
# Return if there was no change
# We have to explicitly check for fuzzy flag change on monolingual
# files, where we handle it ourselves without storing to backend
if (not saved and
oldunit.fuzzy == self.fuzzy and
oldunit.target == self.target):
# Propagate if we should
if propagate:
self.propagate(request, change_action)
return False
# Update translated flag
self.translated = pounit.is_translated()
# Update comments as they might have been changed (eg, fuzzy flag
# removed)
self.flags = pounit.get_flags()
# Save updated unit to database
self.save(backend=True)
# Update translation stats
old_translated = self.translation.translated
self.translation.update_stats()
# Notify subscribed users about new translation
notify_new_translation(self, oldunit, request.user)
# Update user stats
user.profile.translated += 1
user.profile.save()
# Generate Change object for this change
if gen_change:
self.generate_change(request, user, oldunit, change_action)
# Force commiting on completing translation
if (old_translated < self.translation.translated and
self.translation.translated == self.translation.total):
self.translation.commit_pending(request)
Change.objects.create(
translation=self.translation,
action=Change.ACTION_COMPLETE,
user=request.user,
author=user
)
# Update related source strings if working on a template
if self.translation.is_template():
self.update_source_units()
# Propagate to other projects
if propagate:
self.propagate(request, change_action)
return True
def update_source_units(self):
"""Updates source for units withing same component.
This is needed when editing template translation for monolingual
formats.
"""
# Find relevant units
same_source = Unit.objects.filter(
translation__subproject=self.translation.subproject,
context=self.context,
)
# Update source and contentsum
previous_source = same_source[0].source
same_source.update(
source=self.target,
contentsum=calculate_checksum(self.source, self.context),
)
same_source.filter(
translated=True
).exclude(
id=self.id
).update(
translated=False,
fuzzy=True,
previous_source=previous_source,
)
# Update source index, it's enough to do it for one as we index by
# checksum which is same for all
update_index_unit(self, True)
def generate_change(self, request, author, oldunit, change_action):
"""
Creates Change entry for saving unit.
"""
# Notify about new contributor
user_changes = Change.objects.filter(
translation=self.translation,
user=request.user
)
if not user_changes.exists():
notify_new_contributor(self, request.user)
# Action type to store
if change_action is not None:
action = change_action
elif oldunit.translated:
action = Change.ACTION_CHANGE
else:
action = Change.ACTION_NEW
# Should we store history of edits?
if self.translation.subproject.save_history:
history_target = self.target
else:
history_target = ''
# Create change object
Change.objects.create(
unit=self,
translation=self.translation,
action=action,
user=request.user,
author=author,
target=history_target
)
def save(self, *args, **kwargs):
"""
Wrapper around save to warn when save did not come from
git backend (eg. commit or by parsing file).
"""
# Warn if request is not coming from backend
if 'backend' not in kwargs:
self.log_error(
'Unit.save called without backend sync: %s',
''.join(traceback.format_stack())
)
else:
del kwargs['backend']
# Pop parameter indicating that we don't have to process content
same_content = kwargs.pop('same_content', False)
same_state = kwargs.pop('same_state', False)
# Keep the force_insert for parent save
force_insert = kwargs.get('force_insert', False)
# Store number of words
if not same_content or not self.num_words:
self.num_words = len(self.get_source_plurals()[0].split())
# Actually save the unit
super(Unit, self).save(*args, **kwargs)
# Update checks if content or fuzzy flag has changed
if not same_content or not same_state:
self.run_checks(same_state, same_content, force_insert)
# Update fulltext index if content has changed or this is a new unit
if force_insert or not same_content:
update_index_unit(self, force_insert)
def suggestions(self):
"""
Returns all suggestions for this unit.
"""
if self._suggestions is None:
self._suggestions = Suggestion.objects.filter(
contentsum=self.contentsum,
project=self.translation.subproject.project,
language=self.translation.language
)
return self._suggestions
def cleanup_checks(self, source, target):
"""
Cleanups listed source and target checks.
"""
if len(source) == 0 and len(target) == 0:
return False
todelete = Check.objects.filter(
contentsum=self.contentsum,
project=self.translation.subproject.project
).filter(
(Q(language=self.translation.language) & Q(check__in=target)) |
(Q(language=None) & Q(check__in=source))
)
if todelete.exists():
todelete.delete()
return True
return False
def checks(self):
"""
Returns all checks for this unit (even ignored).
"""
return Check.objects.filter(
contentsum=self.contentsum,
project=self.translation.subproject.project,
language=self.translation.language
)
def source_checks(self):
"""
Returns all source checks for this unit (even ignored).
"""
return Check.objects.filter(
contentsum=self.contentsum,
project=self.translation.subproject.project,
language=None
)
def active_checks(self):
"""
Returns all active (not ignored) checks for this unit.
"""
return Check.objects.filter(
contentsum=self.contentsum,
project=self.translation.subproject.project,
language=self.translation.language,
ignore=False
)
def active_source_checks(self):
"""
Returns all active (not ignored) source checks for this unit.
"""
return Check.objects.filter(
contentsum=self.contentsum,
project=self.translation.subproject.project,
language=None,
ignore=False
)
def get_comments(self):
"""
Returns list of target comments.
"""
return Comment.objects.filter(
contentsum=self.contentsum,
project=self.translation.subproject.project,
).filter(
Q(language=self.translation.language) | Q(language=None),
)
def get_source_comments(self):
"""
Returns list of target comments.
"""
return Comment.objects.filter(
contentsum=self.contentsum,
project=self.translation.subproject.project,
language=None,
)
def get_checks_to_run(self, same_state, is_new):
"""
Returns list of checks to run on state change.
Returns tuple of checks to run and whether to do cleanup.
"""
checks_to_run = CHECKS
cleanup_checks = True
if (not same_state or is_new) and not self.translated:
# Check whether there is any message with same source
project = self.translation.subproject.project
same_source = Unit.objects.filter(
translation__language=self.translation.language,
translation__subproject__project=project,
contentsum=self.contentsum,
translated=True,
).exclude(
id=self.id,
translation__subproject__allow_translation_propagation=False,
)
# We run only checks which span across more units
checks_to_run = {}
# Delete all checks if only message with this source is fuzzy
if not same_source.exists():
checks = self.checks()
if checks.exists():
checks.delete()
self.update_has_failing_check(True)
elif 'inconsistent' in CHECKS:
# Consistency check checks across more translations
checks_to_run['inconsistent'] = CHECKS['inconsistent']
# Run source checks as well
for check in CHECKS:
if CHECKS[check].source:
checks_to_run[CHECKS[check].check_id] = CHECKS[check]
cleanup_checks = False
return checks_to_run, cleanup_checks
def run_checks(self, same_state=True, same_content=True, is_new=False):
"""
Updates checks for this unit.
"""
was_change = False
checks_to_run, cleanup_checks = self.get_checks_to_run(
same_state, is_new
)
if len(checks_to_run) == 0:
return
src = self.get_source_plurals()
tgt = self.get_target_plurals()
old_target_checks = set(
self.checks().values_list('check', flat=True)
)
old_source_checks = set(
self.source_checks().values_list('check', flat=True)
)
# Run all checks
for check in checks_to_run:
check_obj = CHECKS[check]
# Target check
if check_obj.target and check_obj.check_target(src, tgt, self):
if check in old_target_checks:
# We already have this check
old_target_checks.remove(check)
else:
# Create new check
Check.objects.create(
contentsum=self.contentsum,
project=self.translation.subproject.project,
language=self.translation.language,
ignore=False,
check=check,
for_unit=self.pk
)
was_change = True
# Source check
if check_obj.source and check_obj.check_source(src, self):
if check in old_source_checks:
# We already have this check
old_source_checks.remove(check)
else:
# Create new check
Check.objects.create(
contentsum=self.contentsum,
project=self.translation.subproject.project,
language=None,
ignore=False,
check=check
)
was_change = True
# Delete no longer failing checks
if cleanup_checks:
was_change |= self.cleanup_checks(
old_source_checks, old_target_checks
)
# Update failing checks flag
if was_change or is_new or not same_content:
self.update_has_failing_check(was_change)
def update_has_failing_check(self, recurse=False, update_stats=True):
"""
Updates flag counting failing checks.
"""
has_failing_check = self.translated and self.active_checks().exists()
# Change attribute if it has changed
if has_failing_check != self.has_failing_check:
self.has_failing_check = has_failing_check
self.save(backend=True, same_content=True, same_state=True)
# Update translation stats
if update_stats:
self.translation.update_stats()
# Invalidate checks cache if there was any change
# (above code cares only about whether there is failing check
# while here we care about any changed in checks)
self.translation.invalidate_cache()
if recurse:
for unit in Unit.objects.same(self):
unit.update_has_failing_check(False)
def update_has_suggestion(self, update_stats=True):
"""
Updates flag counting suggestions.
"""
has_suggestion = len(self.suggestions()) > 0
if has_suggestion != self.has_suggestion:
self.has_suggestion = has_suggestion
self.save(backend=True, same_content=True, same_state=True)
# Update translation stats
if update_stats:
self.translation.update_stats()
def update_has_comment(self, update_stats=True):
"""
Updates flag counting comments.
"""
has_comment = len(self.get_comments()) > 0
if has_comment != self.has_comment:
self.has_comment = has_comment
self.save(backend=True, same_content=True, same_state=True)
# Update translation stats
if update_stats:
self.translation.update_stats()
def nearby(self):
"""
Returns list of nearby messages based on location.
"""
return Unit.objects.filter(
translation=self.translation,
position__gte=self.position - appsettings.NEARBY_MESSAGES,
position__lte=self.position + appsettings.NEARBY_MESSAGES,
).select_related()
def translate(self, request, new_target, new_fuzzy):
"""
Stores new translation of a unit.
"""
# Update unit and save it
self.target = join_plural(new_target)
self.fuzzy = new_fuzzy
saved = self.save_backend(request)
return saved
@property
def all_flags(self):
"""
Returns union of own and subproject flags.
"""
if self._all_flags is None:
self._all_flags = set(
self.flags.split(',') +
self.source_info.check_flags.split(',') +
self.translation.subproject.all_flags
)
self._all_flags.discard('')
return self._all_flags
@property
def source_info(self):
"""
Returns related source string object.
"""
if self._source_info is None:
self._source_info = Source.objects.get(
checksum=self.checksum,
subproject=self.translation.subproject
)
return self._source_info
def get_secondary_units(self, user):
'''
Returns list of secondary units.
'''
secondary_langs = user.profile.secondary_languages.exclude(
id=self.translation.language.id
)
project = self.translation.subproject.project
return get_distinct_translations(
Unit.objects.filter(
checksum=self.checksum,
translated=True,
translation__subproject__project=project,
translation__language__in=secondary_langs,
)
)
|
import os
import sys
import termios
import fcntl
from blessings import Terminal
def getch():
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
c = None
try:
while 1:
try:
c = sys.stdin.read(1)
break
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
return c
prefix = '\x1b\x5b'
lookup = {
'\x1b\x5b\x41': 'up',
'\x1b\x5b\x42': 'down',
'\x1b\x5b\x44': 'left',
'\x1b\x5b\x43': 'right',
}
def get_arrow_key_or_character():
buf = ''
while True:
buf += getch()
if buf in lookup:
return lookup[buf]
if buf and not prefix.startswith(buf):
return buf
def menu(menu_items):
if not menu_items:
return None
# hide cursor
sys.stdout.write("\033[?25l")
sys.stdout.flush()
try:
term = Terminal()
print '\n' * (len(menu_items) - 2)
focus = 0
while True:
for i, line in enumerate(menu_items):
with term.location(0, term.height - len(menu_items) + i):
if i == focus:
print term.on_red(term.bright_white(line)),
else:
print line,
k = get_arrow_key_or_character()
if k == 'down':
focus += 1
elif k == 'up':
focus -= 1
elif k == '\n':
break
# make sure we don't go outside menu
if focus < 0:
focus = 0
if focus == len(menu_items):
focus = len(menu_items) - 1
finally:
# show cursor again
sys.stdout.write("\033[?25h")
sys.stdout.flush()
print '' # Write a newline to avoid next output writing over the last line of the menu
return menu_items[focus]
m = menu(['foo 1', 'foo 2', 'foo 3', 'foo 4', 'foo 5', 'foo 6'])
print 'chosen:', m
|
from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand
from doc.actions import parse_doc
from optparse import make_option
from docutil.commands_util import recocommand
from docutil.str_util import smart_decode
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--pname', action='store', dest='pname',
default='-1', help='Project unix name'),
make_option('--dname', action='store', dest='dname',
default='-1', help='Document name'),
make_option('--release', action='store', dest='release',
default='-1', help='Project Release'),
make_option('--skip_refs', action='store_true', dest='skip_refs',
default=False, help='Skip code reference identification'),
)
help = "Parse document model"
@recocommand
def handle_noargs(self, **options):
pname = smart_decode(options.get('pname'))
dname = smart_decode(options.get('dname'))
release = smart_decode(options.get('release'))
skip = options.get('skip_refs')
parse_doc(pname, dname, release, not skip)
|
import unittest2 as unittest
from nose.plugins.attrib import attr
from mock import patch, MagicMock, call
from jnpr.junos.exception import FactLoopError
from jnpr.junos import Device
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
__author__ = "Stacy Smith"
__credits__ = "Jeremy Schulman, Nitin Kumar"
@attr('unit')
class TestFactCache(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager_setup
self.dev = Device(host='1.1.1.1', user='rick', password='password123')
self.dev.open()
def test_factcache_unknown_fact(self):
with self.assertRaises(KeyError):
unknown = self.dev.facts['unknown']
def test_factcache_fact_loop(self):
# The personality fact calls the
# model fact.
# Change the callback for the model
# fact to be the same as the personality fact
# in order to induce a fact loop.
self.dev.facts._callbacks['model'] = \
self.dev.facts._callbacks['personality']
# Now, trying to fetch the personality
# fact should cause a FactLoopError
with self.assertRaises(FactLoopError):
personality = self.dev.facts['personality']
def test_factcache_return_unexpected_fact(self):
# Create a callback for the foo fact.
self.dev.facts._callbacks['foo'] = get_foo_bar_fact
# Now, trying to access the foo fact should cause a
# RunTimeError because the bar fact is also unexpectedly provided
with self.assertRaises(RuntimeError):
foo = self.dev.facts['foo']
@patch('jnpr.junos.factcache.warnings')
def test_factcache_nonmatching_old_and_new_fact(self, mock_warn):
# Set fact style to 'both'
self.dev._fact_style = 'both'
# Create a callback for the foo fact.
self.dev.facts._callbacks['foo'] = get_foo_fact
# Cache the new-style foo fact
self.dev.facts._cache['foo'] = 'foo'
# Set the old-style foo fact to a different value
self.dev._ofacts['foo'] = 'bar'
# Now, trying to access the foo fact should cause a
# RunTimeWarning because the values of the new and old-style facts
# do not match
foo = self.dev.facts['foo']
mock_warn.assert_has_calls([call.warn(
'New and old-style facts do not match for the foo fact.\n'
' New-style value: foo\n Old-style value: bar\n',
RuntimeWarning)])
def test_factcache_fail_to_return_expected_fact(self):
# Create a callback for the foo fact.
self.dev.facts._callbacks['foo'] = get_bar_fact
self.dev.facts._callbacks['bar'] = get_bar_fact
# Now, trying to access the foo fact should cause a
# RunTimeError because the foo fact is not provided
with self.assertRaises(RuntimeError):
foo = self.dev.facts['foo']
def test_factcache_delete_fact(self):
# Create a callback for the foo fact.
self.dev.facts._callbacks['foo'] = get_foo_fact
foo = self.dev.facts['foo']
# Now, trying to delete the foo fact should cause a
# RunTimeError
with self.assertRaises(RuntimeError):
self.dev.facts.pop('foo', None)
def test_factcache_set_fact(self):
# Create a callback for the foo fact.
self.dev.facts._callbacks['foo'] = get_foo_fact
foo = self.dev.facts['foo']
# Now, trying to set the foo fact should cause a
# RunTimeError
with self.assertRaises(RuntimeError):
self.dev.facts['foo'] = 'bar'
def test_factcache_iter_facts(self):
# Override the callbacks
self.dev.facts._callbacks = {'foo': get_foo_fact,
'bar': get_bar_fact,
'_hidden': get_foo_bar_fact}
# Now, get the length of the facts
self.assertEqual(len(list(self.dev.facts)), 2)
def test_factcache_len_facts(self):
# Override the callbacks
self.dev.facts._callbacks = {'foo': get_foo_fact,
'bar': get_bar_fact}
# Now, get the length of the facts
self.assertEqual(len(self.dev.facts), 2)
def test_factcache_string_repr(self):
# Override the callbacks to only support foo and bar facts.
self.dev.facts._callbacks = {'foo': get_foo_fact,
'bar': get_bar_fact}
# Set values for foo and bar facts
self.dev.facts._cache['foo'] = 'foo'
self.dev.facts._cache['bar'] = {'bar': 'bar'}
# Now, get the string (pretty) representation of the facts
self.assertEqual(str(self.dev.facts), "{'bar': {'bar': 'bar'}, "
"'foo': 'foo'}")
def test_factcache_repr_facts(self):
# Override the callbacks
self.dev.facts._callbacks = {'foo': get_foo_fact,
'bar': get_bar_fact}
# Now, get the length of the facts
self.assertEqual(str(self.dev.facts), "{'bar': 'bar', 'foo': 'foo'}")
def test_factcache_refresh_single_key(self):
# Override the callbacks
self.dev.facts._callbacks = {'foo': get_foo_fact,
'bar': get_bar_fact,
'_hidden': get_hidden_fact}
# Populate the cache
self.dev.facts._cache['foo'] = 'before'
self.dev.facts._cache['bar'] = 'before'
self.dev.facts._cache['_hidden'] = 'before'
# Confirm the cached values
self.assertEqual(self.dev.facts['foo'], 'before')
self.assertEqual(self.dev.facts['bar'], 'before')
self.assertEqual(self.dev.facts['_hidden'], 'before')
# Refresh just the foo fact
self.dev.facts._refresh(keys='foo')
# Confirm the values now
self.assertEqual(self.dev.facts['foo'], 'foo')
self.assertEqual(self.dev.facts['bar'], 'before')
self.assertEqual(self.dev.facts['_hidden'], 'before')
def test_factcache_refresh_two_keys(self):
# Override the callbacks
self.dev.facts._callbacks = {'foo': get_foo_fact,
'bar': get_bar_fact,
'_hidden': get_hidden_fact}
# Populate the cache
self.dev.facts._cache['foo'] = 'before'
self.dev.facts._cache['bar'] = 'before'
self.dev.facts._cache['_hidden'] = 'before'
# Confirm the cached values
self.assertEqual(self.dev.facts['foo'], 'before')
self.assertEqual(self.dev.facts['bar'], 'before')
self.assertEqual(self.dev.facts['_hidden'], 'before')
# Refresh the foo and _hidden facts
self.dev.facts._refresh(keys=('foo', '_hidden'))
# Confirm the values now
self.assertEqual(self.dev.facts['foo'], 'foo')
self.assertEqual(self.dev.facts['bar'], 'before')
self.assertEqual(self.dev.facts['_hidden'], True)
def test_factcache_refresh_unknown_fact(self):
# Override the callbacks
self.dev.facts._callbacks = {'foo': get_foo_fact,
'_hidden': get_hidden_fact}
# Populate the cache
self.dev.facts._cache['foo'] = 'before'
self.dev.facts._cache['_hidden'] = 'before'
# Confirm the cached values
self.assertEqual(self.dev.facts['foo'], 'before')
self.assertEqual(self.dev.facts['_hidden'], 'before')
# Refresh just the unknown bar fact which should raise a RuntimeError
with self.assertRaises(RuntimeError):
self.dev.facts._refresh(keys=('bar'))
def test_factcache_refresh_all_facts(self):
# Override the callbacks
self.dev.facts._callbacks = {'foo': get_foo_fact,
'bar': get_bar_fact,
'_hidden': get_hidden_fact}
# Populate the cache
self.dev.facts._cache['foo'] = 'before'
self.dev.facts._cache['bar'] = 'before'
self.dev.facts._cache['_hidden'] = 'before'
# Confirm the cached values
self.assertEqual(self.dev.facts['foo'], 'before')
self.assertEqual(self.dev.facts['bar'], 'before')
self.assertEqual(self.dev.facts['_hidden'], 'before')
# Refresh all facts
self.dev.facts._refresh()
# Confirm the values now
self.assertEqual(self.dev.facts['foo'], 'foo')
self.assertEqual(self.dev.facts['bar'], 'bar')
self.assertEqual(self.dev.facts['_hidden'], True)
@patch('jnpr.junos.device.warnings')
def test_factcache_refresh_exception_on_failure(self, mock_warn):
with self.assertRaises(ValueError):
# Refresh all facts with exception on failure
self.dev.facts._refresh(exception_on_failure=True)
@patch('jnpr.junos.device.warnings')
@patch('jnpr.junos.factcache.warnings')
def test_factcache_refresh_warnings_on_failure(self,
mock_warn,
mock_device_warn):
# Refresh all facts with warnings on failure
self.dev.facts._refresh(warnings_on_failure=True)
mock_warn.assert_has_calls([call.warn(
'Facts gathering is incomplete. To know the reason call '
'"dev.facts_refresh(exception_on_failure=True)"',
RuntimeWarning)])
# mock_warn.assert_called_once('Facts gathering is incomplete. '
# 'To know the reason call '
# '"dev.facts_refresh('
# 'exception_on_failure=True)"',
# RuntimeWarning)
def _mock_manager_setup(self, *args, **kwargs):
if kwargs:
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
def get_foo_fact(device):
return {'foo': 'foo'}
def get_foo_bar_fact(device):
return {'foo': 'foo',
'bar': 'bar', }
def get_bar_fact(device):
return {'bar': 'bar', }
def get_hidden_fact(device):
return {'_hidden': True, }
|
#!/usr/bin/python2.7
import sqlalchemy as sqla
import codecs, re
uistring = '/mnt/500G/Games/dragonnest/extract/resource/uistring/uistring.xml'
message_re = re.compile(r'<message mid="(\d+)"><!\[CDATA\[(.+)\]\]></message>', re.UNICODE|re.DOTALL)
def readlines(f, bufsize):
buf = u''
data = True
while data:
data = f.read(bufsize)
buf += data
lines = buf.split('\r\n')
buf = lines.pop()
for line in lines:
yield line
yield buf
messages = []
with codecs.open(uistring, encoding='utf-8', mode='r') as f:
for line in readlines(f, 524288):
match = message_re.match(line)
if match:
messages.append({ 'id' : int(match.group(1)), '_Message' : match.group(2) })
engine = sqla.create_engine('sqlite:///dnt.db', echo=False)
metadata = sqla.MetaData()
table = sqla.Table('UISTRING', metadata,
sqla.Column('id', sqla.Integer, primary_key=True),
sqla.Column('_Message', sqla.Text))
metadata.create_all(engine)
engine.connect().execute(table.insert(), messages)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = []
setup_requirements = ['pytest-runner']
test_requirements = ['pytest']
setup(
name='yamicache',
version='0.6.0',
description="Yet another in-memory caching package",
long_description=readme + '\n\n' + history,
author="Timothy McFadden",
author_email='tim@timandjamie.com',
url='https://github.com/mtik00/yamicache',
packages=find_packages(include=['yamicache']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=True,
keywords='yamicache',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
# Copyright 2006-2014 Mark Diekhans
import unittest, sys, cPickle
if __name__ == '__main__':
sys.path.extend(["../../..", "../../../.."])
from pycbio.sys.symEnum import SymEnum, SymEnumValue
from pycbio.sys.testCaseBase import TestCaseBase
class Color(SymEnum):
red = 1
green = 2
blue = 3
class GeneFeature(SymEnum):
promoter = 1
utr5 = SymEnumValue(2, "5'UTR")
cds = SymEnumValue(3, "CDS")
utr3 = SymEnumValue(4, "3'UTR")
coding = cds
class SymEnumTests(TestCaseBase):
def testBasics(self):
self.assertEqual(Color.red.name, "red")
self.assertEqual(Color.green.name, "green")
self.assertEqual(Color.blue.name, "blue")
self.assertTrue(Color.red < Color.blue)
self.assertTrue(Color.red == Color.red)
self.assertTrue(Color.red != Color.blue)
self.assertTrue(Color.red is not None)
self.assertTrue(None != Color.red)
def testLookup(self):
self.assertTrue(Color.red == Color("red"))
self.assertTrue(Color.green == Color("green"))
self.assertTrue(Color.green != Color("red"))
def testStrings(self):
self.assertTrue(str(Color.red) == "red")
self.assertTrue(str(Color.green) == "green")
self.assertTrue(sorted([str(c) for c in Color]), ["red", "green", "blue"])
def testAliases(self):
class Name(SymEnum):
Fred = 1
Rick = 2
Richard = Dick = HeyYou = Rick
Bill = 3
self.assertTrue(Name("Richard") is Name.Rick)
self.assertEqual(Name("Dick"), Name.Rick)
self.assertTrue(Name("Dick") is Name.Rick)
self.assertTrue(Name("Rick") == Name.Rick)
self.assertTrue(Name("HeyYou") == Name.Rick)
self.assertTrue(Name("Fred") == Name.Fred)
self.assertTrue(Name("Fred") is Name.Fred)
self.assertEqual([n for n in Name], [Name.Fred, Name.Rick, Name.Bill])
def testSetOps(self):
colSet = set([Color.blue, Color.green])
self.assertTrue(Color.green in colSet)
self.assertFalse(Color.red in colSet)
def testNumberDef(self):
class NumDef(SymEnum):
neg = -2
zero = 0
pos= 2
big = 3
values = [(v.name, v.value) for v in NumDef]
self.assertEqual(values, [('neg', -2), ('zero', 0), ('pos', 2), ('big', 3)])
self.assertEqual(NumDef(2), NumDef.pos)
def __testColorPickleProtocol(self, protocol):
stuff = {Color.red: "red one",
Color.green: "green one"}
world = cPickle.dumps((Color, stuff,), protocol)
color, stuff2 = cPickle.loads(world)
self.assertTrue(Color.red in stuff2)
self.assertTrue(Color.green in stuff2)
def testColorPickle2(self):
self.assertTrue(cPickle.HIGHEST_PROTOCOL == 2)
self.__testColorPickleProtocol(2)
def testColorPickle1(self):
self.__testColorPickleProtocol(1)
def testColorPickle0(self):
self.__testColorPickleProtocol(0)
def testExtNameLookup(self):
self.assertEqual(GeneFeature.promoter, GeneFeature("promoter"))
self.assertEqual(GeneFeature.utr5, GeneFeature("5'UTR"))
self.assertEqual(GeneFeature.utr5, GeneFeature("utr5"))
self.assertEqual(GeneFeature.cds, GeneFeature("CDS"))
self.assertEqual(GeneFeature.utr3, GeneFeature("3'UTR"))
self.assertEqual(GeneFeature.utr3, GeneFeature("utr3"))
self.assertEqual(GeneFeature.cds, GeneFeature("coding"))
def testExtNameStrings(self):
self.assertEqual(str(GeneFeature.promoter), "promoter")
self.assertEqual(str(GeneFeature.utr5), "5'UTR")
self.assertEqual(str(GeneFeature.cds), "CDS")
self.assertEqual(str(GeneFeature.utr3), "3'UTR")
self.assertNotEqual(str(GeneFeature.utr3), "utr3")
self.assertEqual(str(GeneFeature.coding), "CDS")
self.assertEqual(sorted([str(c) for c in GeneFeature]), ["3'UTR", "5'UTR", "CDS", "promoter"])
def __testGeneFeaturePickleProtocol(self, protocol):
stuff = {GeneFeature.utr3: "UTR'3 one",
GeneFeature.cds: "CDS one"}
world = cPickle.dumps((GeneFeature, stuff,), protocol)
geneFeature, stuff2 = cPickle.loads(world)
self.assertTrue(GeneFeature.utr3 in stuff2)
self.assertTrue(GeneFeature.cds in stuff2)
def testGeneFeaturePickle2(self):
self.assertTrue(cPickle.HIGHEST_PROTOCOL == 2)
self.__testGeneFeaturePickleProtocol(2)
def testGeneFeaturePickle1(self):
self.__testGeneFeaturePickleProtocol(1)
def testGeneFeaturePickle0(self):
self.__testGeneFeaturePickleProtocol(0)
def suite():
ts = unittest.TestSuite()
ts.addTest(unittest.makeSuite(SymEnumTests))
return ts
if __name__ == '__main__':
unittest.main()
|
import uuid
import math
import time
import logging
import redis as engine
from zencore.errors import WrongParameterTypeError
from .types import smart_force_to_string
logger = logging.getLogger(__name__)
class RedisLock(object):
def __init__(self, url, name=None, app_name=None, expire=None, prefix="zencore:lock:", tick=5, **kwargs):
self.url = url
self.connection = engine.Redis.from_url(url, **kwargs)
self.app_name = app_name or str(uuid.uuid4())
self.prefix = prefix
self.expire = expire
self.tick = tick
if name:
self.setup(name)
def setup(self, name):
self.lock_name = ":".join([self.prefix, name])
self.signal_name = ":".join([self.prefix, name, "signal"])
def acquire(self, blocking=True, timeout=-1):
stime = time.clock()
while True:
result = self.acquire_nowait()
if result:
return True
if not blocking:
return False
if timeout == 0:
return False
if timeout > 0:
delta = math.ceil(timeout - (time.clock() - stime))
if delta < 0:
return False
if delta > self.tick:
delta = self.tick
else:
delta = self.tick
event = self.connection.blpop(self.signal_name, timeout=delta)
if event is None:
return False
def acquire_nowait(self):
result = self.connection.setnx(self.lock_name, self.app_name)
if result:
if self.expire:
self.connection.expire(self.lock_name, self.expire)
self.connection.delete(self.signal_name)
return True
return False
def release(self):
if self.is_lock_owner():
self.connection.delete(self.lock_name)
self.connection.rpush(self.signal_name, 1)
def force_clean(self):
self.connection.delete(self.lock_name)
self.connection.rpush(self.signal_name, 1)
def get_current_lock_owner(self, ):
return smart_force_to_string(self.connection.get(self.lock_name))
def is_lock_owner(self):
return self.get_current_lock_owner() == self.app_name
class Counter(object):
def __init__(self, connection, namespace):
self.connection = connection
self.namespace = namespace
def incr(self, name):
key = self.make_counter_key(name)
self.connection.incr(key)
def get(self, name):
key = self.make_counter_key(name)
return int(self.connection.get(key))
def getall(self):
keys = self.connection.keys(self.make_counter_pattern())
if not keys:
return {}
keys = [key.decode("utf-8") for key in keys]
values = [int(value) for value in self.connection.mget(*keys)]
return dict(zip(keys, values))
def make_counter_key(self, name):
return "{}:{}".format(self.namespace, name)
def make_counter_pattern(self):
return "{}:*".format(self.namespace)
def get_redis(config):
"""
从配置文件获取redis对象。
"""
if isinstance(config, engine.StrictRedis):
return config
if isinstance(config, str):
return engine.Redis.from_url(config)
if isinstance(config, dict):
url = config.get("url")
host = config.get("host")
if url:
db = config.get("db", None)
options = config.get("options", {})
return engine.Redis.from_url(url, db, **options)
if host:
return engine.Redis(**config)
logger.error("get_redis got wrong parameter type error.")
raise WrongParameterTypeError()
# ###########################################################################
# 重复或不推荐使用
# ###########################################################################
make_redis_instance = get_redis
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities that should not be distributed with source."""
__author__ = 'thmiller@google.com (Tom Miller)'
import atom
import inspect
dull_types = [str, unicode, dict, list, type(None)]
def walk_attributes(myobject, object_name, tabitem='=', step=True, tablevel=0):
"""Walk through attributes of an instance.
Just flat out prints varying values of dir() for instances and their
attributes.
Args:
myobject: instance to walk through
object_name: Name of the instance being walked through
tabitem: String to show depth into myobject. Set to '' to disable.
step: bool Use raw_input('') after printing each attribute
tablevel: Depth into myobject (starts at 0)
Returns:
NATHING!
"""
print tabitem*tablevel + 'Object: ' + object_name
print tabitem*tablevel + 'Type: ' + str(type(myobject))
attr_list = [attr for attr in dir(myobject)
if not attr.startswith('_') and
not inspect.ismethod(getattr(myobject, attr))]
print tabitem*tablevel + 'Attributes: '
print tabitem*tablevel + str(attr_list)
dull_attr = [attr for attr in attr_list
if type(getattr(myobject, attr)) in dull_types]
if dull_attr:
print tabitem*tablevel + '(basic attributes: ' + str(dull_attr) + ')'
loopable_attr = [attr for attr in attr_list
if not type(getattr(myobject, attr)) in dull_types]
for attr_name in loopable_attr:
new_object = getattr(myobject, attr_name)
if step:
raw_input('')
walk_attributes(new_object, attr_name, tablevel=tablevel+1)
|
#!/usr/bin/env python
# Copyright (c) 2011, Development Seed, Inc.
# 2011, Andrew Harvey <andrew.harvey4@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Development Seed, Inc. nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from sys import path
from os.path import join
import argparse
#################################
## argparse
parser = argparse.ArgumentParser(description='Configure an MML file with datasource settings')
parser.add_argument('--host', default='localhost')
parser.add_argument('--port', default='5432')
parser.add_argument('--dbname', default='abs')
parser.add_argument('--user', default='abs')
parser.add_argument('--password', default='abs')
parser.add_argument('--srs', default='+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over')
parser.add_argument('--shapedir', default='./layers/')
# Increase performance if you are only rendering a particular area by
# specifying a bounding box to restrict queries. Format is "XMIN,YMIN,XMAX,YMAX" in the
# same units as the database (probably spherical mercator meters). The
# whole world is "-20037508.34,-20037508.34,20037508.34,20037508.34".
# Leave blank to let Mapnik estimate.
parser.add_argument('--extent', default='12570320.00,-5403474.50,17711958.00,-1636391.88')
parser.add_argument('--mml', required=True)
args = parser.parse_args()
#################################
## configure mml
mml = join(path[0], args.mml + '/' + args.mml + '.mml')
shoreline_300 = args.shapedir.rstrip('/') + '/shoreline_300.shp'
processed_p = args.shapedir.rstrip('/') + '/processed_p.shp'
with open(mml, 'r') as f:
newf = json.loads(f.read())
f.closed
with open(mml, 'w') as f:
for layer in newf["Layer"]:
if "Datasource" in layer:
ds_type = layer["Datasource"].get("type")
if ds_type and ds_type == "postgis":
layer["Datasource"]["host"] = args.host
layer["Datasource"]["port"] = args.port
layer["Datasource"]["dbname"] = args.dbname
layer["Datasource"]["user"] = args.user
layer["Datasource"]["password"] = args.password
layer["Datasource"]["extent"] = args.extent
layer["srs"] = args.srs
else:
if layer["id"] == "shoreline_300":
layer["Datasource"] = dict();
layer["Datasource"]["file"] = shoreline_300
layer["Datasource"]["type"] = 'shape'
layer["geometry"] = 'polygon'
layer["srs"] = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over'
elif layer["id"] == "processed_p":
layer["Datasource"] = dict();
layer["Datasource"]["file"] = processed_p
layer["Datasource"]["type"] = 'shape'
layer["geometry"] = 'polygon'
layer["srs"] = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over'
f.write(json.dumps(newf, indent=2))
f.closed
|
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from flask import Flask, request, render_template
import requests
from bs4 import BeautifulSoup
import os
app = Flask(__name__)
header = {"User-Agent":"instadown", "e-mail":"contato@contato.com"}
def get_data(url):
r = requests.get(url, headers=header)
_url_video = ''
if r.status_code == 200:
sopa = BeautifulSoup(r.content)
for meta in sopa.findAll("meta"):
if meta.get("property") == "og:title" and meta.get("content") != None:
_content_title = meta.get("content")
if meta.get("property") == "og:video" and meta.get("content") != None:
_url_video = meta.get("content")
elif meta.get("property") == "og:image" and meta.get("content") != None:
_url_image = meta.get("content")
if _url_video == '':
return dict(title=_content_title, image=_url_image)
else:
return dict(title=_content_title, video=_url_video)
return None
@app.route('/', methods=['GET', 'POST'])
def post():
if request.method == 'POST':
_url = request.form['url']
data = get_data(_url)
print data
return render_template('home.html', content_dow=data)
return render_template('home.html')
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
"""
Provides useful functions and classes. Most useful are probably
printTreeDocs and printTreeSpec.
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
import sys
__all__ = ('printImported', 'Callback')
def printImported():
"""Output a list of pubsub modules imported so far"""
ll = [mod for mod in sys.modules.keys() if mod.find('pubsub') >= 0] # iter keys ok
ll.sort()
print('\n'.join(ll))
class Callback:
"""
This can be used to wrap functions that are referenced by class
data if the data should be called as a function. E.g. given
>>> def func(): pass
>>> class A:
....def __init__(self): self.a = func
then doing
>>> boo=A(); boo.a()
will fail since Python will try to call a() as a method of boo,
whereas a() is a free function. But if you have instead
"self.a = Callback(func)", then "boo.a()" works as expected.
"""
def __init__(self, callable_obj):
self.__callable = callable_obj
def __call__(self, *args, **kwargs):
return self.__callable(*args, **kwargs)
|
from django.template import RequestContext
from django.core.context_processors import csrf
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponse, HttpResponseRedirect
import json
from urllib import urlopen
from subprocess import call
def test(request):
c = {}
return render_to_response('test.html', c)
def list_spiders(request):
response = urlopen("http://127.0.0.1:6800/listspiders.json?project=default")
return HttpResponse(response.read(), content_type="application/json")
def list_jobs(request):
response = urlopen("http://127.0.0.1:6800/listjobs.json?project=default")
return HttpResponse(response.read(), content_type="application/json")
def list_items(request, spider, dump):
BASE = "http://127.0.0.1:6800/items/default"
if spider == '':
url = BASE
else:
print "spider: ", spider
print "dump: ", dump
url = BASE + '/' + spider + '/' + dump
print "url: ", url
response = urlopen(url)
return HttpResponse(response.read())
def deploy(request, spider):
url = "curl http://127.0.0.1:6800/schedule.json -d project=default -d spider=%s" % (spider)
try:
call(url.split())
return HttpResponse("Done!")
except:
return HttpResponse("Failed!")
def render(request, spider, job):
url = "http://127.0.0.1:6800/items/default/%s/%s.jl" % (spider, job)
response = urlopen(url).read()
if "404 - No Such Resource" in response:
return HttpResponse(response)
else:
response = response.split('\n')
try:
while True:
response.remove('')
except:
pass
response = [json.loads(x) for x in response]
return HttpResponse(json.dumps(response), content_type="application/json")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 5, delta, theta, alpha low, alpha high, beta low, beta high, batch size = 10 and
balanced data set
@author: yaric
"""
import experiment as ex
import config
from time import time
n_hidden = 5
batch_size = 10
experiment_name = 'cA_%d_%d_dt-th-a_l-a_h-b_l-b_h' % (n_hidden, batch_size) # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'IO_TXT', 'IO_SKY', 'KS_10_2', 'RO_10_2']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = batch_size
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = n_hidden
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l,beta_h'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
print("Start analysis for noise records: %s" % noise_ids)
ex.runEEGAnalyzerWithIDs(ids_list=noise_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
noise_dir = "%s/%s/%s" % (config.analyzer_out_dir, experiment_name, noise_ids[0])
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s\nNoise dir: %s"
% (signal_dir, noise_dir))
ex.runClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
noise_dir=noise_dir,
out_suffix=out_suffix)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
|
# -*- coding: utf-8 -*-
#
# django-tellafriend documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 6 20:14:06 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-tellafriend'
copyright = u'2010, Philipp Bosch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-tellafrienddoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-tellafriend.tex', u'django-tellafriend Documentation',
u'Philipp Bosch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-tellafriend', u'django-tellafriend Documentation',
[u'Philipp Bosch'], 1)
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
pattern = r'^[0-9.]+$'
try:
addr = raw_input('input ip address: \n')
if re.match(pattern, addr):
pass
else:
print('A incorrect address')
addr = '192.168.1.1'
except ValueError: print "A incorrect address"
command = 'ipset -L | grep '
command = command + addr
os.system('ssh -24xCt tech@base221 sudo {0}'.format(command))
os.system('ssh -24xCt tech@base222 sudo {0}'.format(command))
os.system('ssh -24xCt tech@base241 sudo {0}'.format(command))
def check(command):
os.system('ssh -24xCt denis@{0} {1} {2}'.format('echo21','sudo',command))
p1 = threading.Thread(target=check, name="t1", args=command)
p1.start()
#def check_ip(server, command):
# os.system('ssh -24xCt denis@{0} {1} {2}'.format(server,'sudo',command))
#check1 = threading.Thread(target=check_ip, args = ('echo21', command))
#check1 = threading.Thread(target=check_ip, args = ('echo22', command))
#check1 = threading.Thread(target=check_ip, args = ('echo41', command))
#check1 = threading.Thread(target=check_ip, args = ('delta91', command))
#check1 = threading.Thread(target=check_ip, args = ('delta92', command))
#check1 = threading.Thread(target=check_ip, args = ('delta8', command))
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
# ==========================================================================
# Copyright (C) 2016 Dr. Alejandro Pina Ortega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==========================================================================
"""
Class for slots of type 1
"""
# ==========================================================================
# Program: type1.py
# Author: ajpina
# Date: 12/23/16
# Version: 0.1.1
#
# Revision History:
# Date Version Author Description
# - 12/23/16: 0.1.1 ajpina Defines mandatory methods and properties
#
# ==========================================================================
__author__ = 'ajpina'
import numpy as np
from uffema.slots import Slot
from uffema.misc.constants import *
class Type1(Slot):
@property
def h0(self):
return self._h0
@h0.setter
def h0(self, value):
self._h0 = value
@property
def h2(self):
return self._h2
@h2.setter
def h2(self, value):
self._h2 = value
@property
def w0(self):
return self._w0
@w0.setter
def w0(self, value):
self._w0 = value
@property
def w1(self):
return self._w1
@w1.setter
def w1(self, value):
self._w1 = value
@property
def w2(self):
return self._w2
@w2.setter
def w2(self, value):
self._w2 = value
@property
def so_position(self):
return self._so_position
@so_position.setter
def so_position(self, value):
self._so_position = value
@property
def s_position(self):
return self._s_position
@s_position.setter
def s_position(self, value):
self._s_position = value
@property
def liner_thickness(self):
return self._liner_thickness
@liner_thickness.setter
def liner_thickness(self, value):
self._liner_thickness = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def __init__(self, slot_settings, stator_mode):
super(Type1, self).__init__(slot_settings)
self.h0 = slot_settings['h0']
self.h2 = slot_settings['h2']
self.w0 = slot_settings['w0']
self.w1 = slot_settings['w1']
self.w2 = slot_settings['w2']
self.so_position = slot_settings['SOpos']
self.s_position = slot_settings['Spos']
# It is assumed an insulation liner of 0.5mm thickness
self.liner_thickness = 0.5e-3
self.type = self.type + 'Type1'
def get_slot_center(self):
return self.h0 + (2.0/5.0)*self.h2
def get_type(self):
return 'Type1'
def get_area(self):
return 0
def get_slot_total_height(self):
return self.h0 + self.h2
def get_conductor_area_width(self):
return (self.w1 + self.w2) / 2.0
def get_conductor_area_height(self):
return self.h2
def get_coil_area_base_point(self, inner_radius):
return inner_radius + self.h0
def get_slot_opening_geometry(self, inner_radius):
angle_slot_opening_bottom = np.arcsin(-(self.w0/2.0)/ inner_radius + self.h0 )
angle_slot_opening_top = np.arcsin(-(self.w0 / 2.0) / inner_radius )
points = {
'2': [inner_radius, 0, 0],
'3': [inner_radius + self.h0, 0, 0],
'4': [(inner_radius + self.h0)*np.cos(angle_slot_opening_bottom), (inner_radius + self.h0)*np.sin(angle_slot_opening_bottom) , 0],
'5': [(inner_radius)*np.cos(angle_slot_opening_bottom), (inner_radius)*np.sin(angle_slot_opening_bottom) , 0]
}
lines = {
'1': [2, 3],
'2': [3, 4],
'3': [4, 5],
'4': [5, 2]
}
return points, lines
def get_slot_wedge_geometry(self, inner_radius):
points = None
lines = None
return points, lines
def get_backiron_geometry(self, inner_radius, outer_radius, slot_number):
slot_pitch = 360 * DEG2RAD / slot_number
angle_slot_base = np.arcsin(-(self.w2 / 2.0) / (inner_radius + self.h2))
points = {
'6': [inner_radius + self.h2, 0, 0],
'7': [outer_radius, 0, 0],
'8': [outer_radius * np.cos( -slot_pitch/2.0 ), outer_radius * np.sin( -slot_pitch/2.0 ), 0],
'9': [(inner_radius + self.h0 + self.h2) * np.cos( -slot_pitch/2.0 ),
(inner_radius + self.h0 + self.h2) * np.sin( -slot_pitch/2.0 ) , 0],
'10': [(inner_radius + self.h2) * np.cos(angle_slot_base),
(inner_radius + self.h2) * np.sin(angle_slot_base), 0]
}
lines = {
'5': [6, 7],
'6': [7, 1, 8],
'7': [8, 9],
'8': [9, 10],
'9': [10, 1, 6]
}
return points, lines
def get_tooth_geometry(self, inner_radius, slot_number):
slot_pitch = 360 * DEG2RAD / slot_number
angle_slot_top = np.arcsin(-(self.w1 / 2.0) / (inner_radius + self.h0))
points = {
'11': [(inner_radius + self.h0 ) * np.cos( -slot_pitch/2.0 ),
(inner_radius + self.h0 ) * np.sin( -slot_pitch/2.0 ) , 0],
'12': [(inner_radius + self.h0) * np.cos(angle_slot_top), (inner_radius + self.h0) * np.sin(angle_slot_top),
0]
}
lines = {
'10': [9, 11],
'11': [11, 1, 12],
'12': [12, 10],
'-8': [0]
}
return points, lines
def get_coil_area_geometry(self, inner_radius):
points = None
lines = {
'13': [12, 1, 4],
'-2': [0],
'14': [3, 6],
'-9': [0],
'-12': [0]
}
return points, lines
def get_toothtip_geometry(self, inner_radius, slot_number):
slot_pitch = 360 * DEG2RAD / slot_number
points = {
'14': [inner_radius * np.cos( -slot_pitch/2.0 ), inner_radius * np.sin( -slot_pitch/2.0 ) , 0]
}
lines = {
'15': [11, 14],
'16': [14, 1, 5],
'-3': [0],
'-13': [0],
'-11': [0]
}
return points, lines
def get_stator_airgap_geometry(self, airgap_radius, slot_number):
slot_pitch = 360 * DEG2RAD / slot_number
points = {
'15': [airgap_radius * np.cos( -slot_pitch/2.0 ), airgap_radius * np.sin( -slot_pitch/2.0 ) , 0],
'16': [airgap_radius, 0, 0]
}
lines = {
'17': [14, 15],
'18': [15, 1, 16],
'19': [16, 2],
'-4': [0],
'-16': [0]
}
return points, lines
def get_stator_airgap_boundary(self):
return {'18': [15, 1, 16]}
def get_outer_stator_boundary(self):
return [6]
def get_master_boundary(self):
return [7, 10, 15, 17]
|
# -*- coding: utf-8 -*-
"""
This file is part of Radar.
Radar is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Radar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with Radar. If not, see <http://www.gnu.org/licenses/>.
Copyright 2015 Lucas Liendo.
"""
from errno import EINTR
from abc import ABCMeta
from argparse import ArgumentParser
from ..logger import RadarLogger
from ..platform_setup import Platform
class CLIError(Exception):
pass
class RadarLauncherError(Exception):
pass
class CLI(object):
def __init__(self, default_main_config_path, program_name='', version=''):
self._program_name = program_name
self._version = version
self._options = self._build_parser(default_main_config_path).parse_args()
def __getattr__(self, option):
try:
return getattr(self._options, option)
except AttributeError:
raise CLIError('Error - Option: \'{:}\' does not exist.'.format(option))
def _build_parser(self, default_main_config_path):
parser = ArgumentParser(prog=self._program_name)
parser.add_argument('-c', '--config', dest='main_config', action='store', default=default_main_config_path, required=False)
parser.add_argument('-v', '--version', action='version', version=self._version)
return parser
class RadarLauncher(object):
__metaclass__ = ABCMeta
PROGRAM_NAME = ''
PROGRAM_VERSION = ''
THREAD_POLLING_TIME = 0.2
AVAILABLE_PLATFORMS = {}
def __init__(self):
cli = CLI(self._get_default_main_config_path(), program_name=self.PROGRAM_NAME, version=self.PROGRAM_VERSION)
self._platform_setup = self._setup_platform(cli.main_config)
def _get_default_main_config_path(self):
return self.AVAILABLE_PLATFORMS[Platform.get_platform_type()].MAIN_CONFIG_PATH
def _setup_platform(self, path):
platform = Platform.get_platform_type()
try:
PlatformSetup = self.AVAILABLE_PLATFORMS[platform]
platform_setup = PlatformSetup(path).configure(self).build()
except KeyError:
raise RadarLauncherError('Error - Platform : \'{:}\' is not available.'.format(platform))
return platform_setup
def _start_threads(self, threads):
[t.start() for t in threads]
def _join_threads(self):
while any([t.is_alive() for t in self._threads]):
[t.join(self.THREAD_POLLING_TIME) for t in self._threads if t.is_alive()]
def stop(self, *args):
[t.stop_event.set() for t in self._threads]
# Let's try to re-join the threads one more time for graceful termination.
def _resume_interrupted_call(self, error):
if error.errno != EINTR:
raise error
self._join_threads()
def run(self):
try:
RadarLogger.log('Starting {:}.'.format(self.PROGRAM_NAME))
self._start_and_join_threads()
except IOError as e:
self._resume_interrupted_call(e)
except Exception as e:
RadarLogger.log('Error - {:} raised an error. Details : {:}.'.format(self.__class__.__name__, e))
finally:
RadarLogger.log('Shutting down {:}.'.format(self.PROGRAM_NAME))
self._platform_setup.tear_down()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools.protocol.responses import BaseResponse
class LeaderAndIsrV0Response(BaseResponse):
schema = [
{'name': 'error', 'type': 'int16'},
{'name': 'partitions',
'type': 'array',
'item_type': [
{'name': 'topic', 'type': 'string'},
{'name': 'partition', 'type': 'int32'},
{'name': 'error', 'type': 'int16'},
]},
]
|
"""
Test CuisineCore module
"""
import unittest
from unittest.mock import patch, PropertyMock
import copy
from JumpScale import j
@patch('JumpScale.core.redis.Redis.hget')
@patch('JumpScale.core.redis.Redis.hset')
class TestCuisineCore(unittest.TestCase):
def setUp(self):
self.dump_env = {
'HOME': '/root',
'HOSTNAME': 'js8-core',
'JSBASE': '/js/path',
}
self.core = j.tools.cuisine.local.core
self.dir_paths = {'appDir': '/js/path/apps',
'base': '/js/path',
'binDir': '/js/path/bin',
'cfgDir': '/optvar//cfg',
'codeDir': '/opt/code/',
'goDir': '/optvar/go/',
'homeDir': '/root',
'hrdDir': '/optvar//hrd',
'jsLibDir': '/js/path/lib/JumpScale/',
'libDir': '/js/path/lib/',
'logDir': '/optvar//log',
'optDir': '/opt/',
'pidDir': '/optvar//pid',
'tmpDir': '/optvar//tmp',
'tmplsDir': '/js/path/templates',
'varDir': '/optvar/'
}
def tearDown(self):
pass
def test_isJS8Sandbox_property(self, cache_set_mock, cache_get_mock):
"""
Test accessing the isJS8Sandbox property
"""
cache_get_mock.return_value = None
self.assertIsNotNone(self.core.isJS8Sandbox)
@patch('JumpScale.j.tools.cuisine.local.core.getenv')
def test_dir_paths_property_if_JSBASE_and_linux(self, getenv_mock, cache_set_mock, cache_get_mock):
"""
Happy Path: Test accessing the dir_paths property if JSBASE in env
"""
cache_get_mock.return_value = None
getenv_mock.return_value = self.dump_env
result = self.core.dir_paths
self.assertEqual(result, self.dir_paths)
@patch('JumpScale.j.tools.cuisine.local.core.getenv')
def test_dir_paths_property_if_linux(self, getenv_mock, cache_set_mock, cache_get_mock):
"""
Happy Path: Test accessing the dir_paths property if JSBASE not found in env
"""
cache_get_mock.return_value = None
# remove JSBASE from dump_env
dump_env = copy.deepcopy(self.dump_env)
del dump_env['JSBASE']
getenv_mock.return_value = dump_env
expected_result = {
'appDir': '/opt/jumpscale8//apps',
'base': '/opt/jumpscale8/',
'binDir': '/opt/jumpscale8//bin',
'cfgDir': '/optvar//cfg',
'codeDir': '/opt/code/',
'goDir': '/optvar/go/',
'homeDir': '/root',
'hrdDir': '/optvar//hrd',
'jsLibDir': '/opt/jumpscale8//lib/JumpScale/',
'libDir': '/opt/jumpscale8//lib/',
'logDir': '/optvar//log',
'optDir': '/opt/',
'pidDir': '/optvar//pid',
'tmpDir': '/optvar//tmp',
'tmplsDir': '/opt/jumpscale8//templates',
'varDir': '/optvar/'
}
result = self.core.dir_paths
self.assertEqual(result, expected_result)
@patch('JumpScale.tools.cuisine.CuisineCore.CuisineCore.isMac', new_callable=PropertyMock)
@patch('JumpScale.j.tools.cuisine.local.core.getenv')
def test_dir_paths_property_if_not_linux(self, getenv_mock, mac_mock, cache_set_mock, cache_get_mock):
"""
Happy Path: Test accessing the dir_paths property if JSBASE not found in env and not linux
"""
cache_get_mock.return_value = None
mac_mock.return_value = True
# remove JSBASE from dump_env
dump_env = copy.deepcopy(self.dump_env)
del dump_env['JSBASE']
getenv_mock.return_value = dump_env
expected_result = {
'appDir': '/root/opt/jumpscale8//apps',
'base': '/root/opt/jumpscale8/',
'binDir': '/root/opt/jumpscale8//bin',
'cfgDir': '/root/optvar//cfg',
'codeDir': '/root/opt/code/',
'goDir': '/root/optvar/go/',
'homeDir': '/root',
'hrdDir': '/root/optvar//hrd',
'jsLibDir': '/root/opt/jumpscale8//lib/JumpScale/',
'libDir': '/root/opt/jumpscale8//lib/',
'logDir': '/root/optvar//log',
'optDir': '/root/opt/',
'pidDir': '/root/optvar//pid',
'tmpDir': '/root/optvar//tmp',
'tmplsDir': '/root/opt/jumpscale8//templates',
'varDir': '/root/optvar/'
}
result = self.core.dir_paths
self.assertEqual(result, expected_result)
self.assertEqual(mac_mock.call_count, 2)
@patch('JumpScale.tools.cuisine.CuisineCore.CuisineCore.isMac', new_callable=PropertyMock)
@patch('JumpScale.j.tools.cuisine.local.core.getenv')
def test_dir_paths_property_if_JSBASE_and_not_linux(self, getenv_mock, mac_mock, cache_set_mock, cache_get_mock):
"""
Happy Path: Test accessing the dir_paths property if JSBASE in env and not linux
"""
cache_get_mock.return_value = None
mac_mock.return_value = True
getenv_mock.return_value = self.dump_env
expected_result = {
'appDir': '/js/path/apps',
'base': '/js/path',
'binDir': '/js/path/bin',
'cfgDir': '/root/optvar//cfg',
'codeDir': '/root/opt/code/',
'goDir': '/root/optvar/go/',
'homeDir': '/root',
'hrdDir': '/root/optvar//hrd',
'jsLibDir': '/js/path/lib/JumpScale/',
'libDir': '/js/path/lib/',
'logDir': '/root/optvar//log',
'optDir': '/root/opt/',
'pidDir': '/root/optvar//pid',
'tmpDir': '/root/optvar//tmp',
'tmplsDir': '/js/path/templates',
'varDir': '/root/optvar/'
}
result = self.core.dir_paths
self.assertEqual(result, expected_result)
mac_mock.assert_called_once_with()
@unittest.skip("Needs fixing")
def test_args_replace(self):
"""
Test args replace
"""
with patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
cuisine_core.getenv = MagicMock()
cuisine_core.getenv.return_value = self.dump_env
cuisine_core.run = MagicMock()
cuisine_core.run.return_value = (0, 'hostname', '')
input_text = "$base:$appDir:$tmplsDir:$varDir:$binDir:$codeDir:$cfgDir:$homeDir:$jsLibDir:$libDir:$logDir:$pidDir:$tmpDir:$hostname"
expected_output = "/opt/jumpscale8/:/opt/jumpscale8//apps:/opt/jumpscale8//templates:/optvar/:/opt/jumpscale8//bin:/opt/code/:/optvar//cfg:/root:/opt/jumpscale8//lib/JumpScale/:/opt/jumpscale8//lib/:/optvar//log:/optvar//pid:/optvar//tmp:hostname"
actual_output = cuisine_core.args_replace(input_text)
self.assertEqual(expected_output, actual_output)
@unittest.skip("Needs fixing")
def test_file_get_tmp_path(self):
"""
Test file get tmp path
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
cuisine_core.run = mock.MagicMock()
cuisine_core.run.return_value = (0, 'hostname', '')
cuisine_core.getenv = mock.MagicMock()
cuisine_core.getenv.return_value = self.dump_env
j.data.idgenerator.generateXCharID.return_value = 10 * 'a'
expected_output = '/optvar//tmp/aaaaaaaaaa'
actual_output = cuisine_core.file_get_tmp_path()
self.assertEquals(expected_output, actual_output)
expected_output = '/optvar//tmp/path'
actual_output = cuisine_core.file_get_tmp_path(basepath="path")
self.assertEquals(expected_output, actual_output)
@unittest.skip("Needs fixing")
def test_file_download(self):
"""
Test file download
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
url = 'http://hallo.com/downloadme.txt'
to = '/tmp/path'
cuisine_core.file_exists = mock.MagicMock()
cuisine_core.file_exists.return_value = False
cuisine_core.createDir = mock.MagicMock()
cuisine_core.file_unlink = mock.MagicMock()
cuisine_core.run = mock.MagicMock()
cuisine_core.run.side_effect = [(33, '', 'err'), (0, 'Ok', '')]
cuisine_core.touch = mock.MagicMock()
cuisine_core.file_download(url, to)
self.assertTrue(cuisine_core.touch.called)
self.assertFalse(j.sal.fs.getBaseName.called)
@unittest.skip("Needs fixing")
def test_file_download_fail(self):
"""
Test file download wth failure
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
from JumpScale.core.errorhandling import JSExceptions
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
url = 'http://hallo.com/downloadme.txt'
to = '/tmp/path'
cuisine_core.file_exists = mock.MagicMock()
cuisine_core.file_exists.return_value = False
cuisine_core.createDir = mock.MagicMock()
cuisine_core.file_unlink = mock.MagicMock()
cuisine_core.run = mock.MagicMock()
cuisine_core.run.side_effect = [(32, '', 'err'), (0, 'Ok', '')]
cuisine_core.touch = mock.MagicMock()
j.exceptions.RuntimeError = JSExceptions.RuntimeError
self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_download, url, to)
@unittest.skip("Needs fixing")
def test_file_expand(self):
"""
Test file expand
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
path = '/tmp/file.tgz'
to = '/tmp/dest'
cuisine_core.run = mock.MagicMock()
cuisine_core.args_replace = mock.MagicMock()
cuisine_core.file_expand(path, to)
@unittest.skip("Needs fixing")
def test_file_expand_fail(self):
"""
Test file expand failure case
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
from JumpScale.core.errorhandling import JSExceptions
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
path = '/tmp/file.txt'
to = '/tmp/dest'
cuisine_core.run = mock.MagicMock()
cuisine_core.args_replace = mock.MagicMock()
cuisine_core.args_replace.side_effect = (path, to)
j.exceptions.RuntimeError = JSExceptions.RuntimeError
self.assertRaises(JSExceptions.RuntimeError, cuisine_core.file_expand, path, to)
@unittest.skip("Needs fixing")
def test_touch(self):
"""
Test touch
"""
with mock.patch("JumpScale.j") as j_mock:
from JumpScale import j
import JumpScale.tools.cuisine.CuisineCore
JumpScale.tools.cuisine.CuisineCore.j = j
from JumpScale.tools.cuisine.CuisineCore import CuisineCore
executor_mock = mock.MagicMock()
j.tools.executor.getLocal.return_value = executor_mock
executor = j.tools.executor.getLocal()
cuisine = j.tools.cuisine.local
cuisine_core = CuisineCore(executor, cuisine)
cuisine_core.run = mock.MagicMock()
cuisine_core.args_replace = mock.MagicMock()
cuisine_core.file_write = mock.MagicMock()
self.assertIsNone(cuisine_core.touch('/tmp/hello'))
self.assertTrue(cuisine_core.file_write.called)
# def file_upload_binary(self, local, remote):
# def file_upload_local(self, local, remote):
# def file_download_binary(self, local, remote):
# def file_download_local(self,remote, local):
# def file_copy(self, source, dest, recursive=False, overwrite=False):
# def file_move(self, source, dest, recursive=False):
# def joinpaths(self, *args):
# def fs_find(self,path,recursive=True,pattern="",findstatement="",type="",contentsearch="",extendinfo=False):
# def sudo(self, cmd, die=True,showout=True):
# def run(self,cmd,die=True,debug=None,checkok=False,showout=True,profile=False,replaceArgs=True,check_is_ok=False):
# def run_script(self,content,die=True,profile=False):
# def command_location(self,command):
# def tmux_execute_jumpscript(self,script,sessionname="ssh", screenname="js"):
# def execute_jumpscript(self,script):
# def execute_jumpscript(self,script):
|
# -*- coding: utf8 -*-
import unittest
import urllib
from django.utils import translation
from mock import Mock
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from amo.tests.test_helpers import render
from addons.models import Addon
from devhub import helpers
from files.models import File, Platform
from versions.models import Version
def test_dev_page_title():
translation.activate('en-US')
request = Mock()
request.APP = None
addon = Mock()
addon.name = 'name'
ctx = {'request': request, 'addon': addon}
title = 'Oh hai!'
s1 = render('{{ dev_page_title("%s") }}' % title, ctx)
s2 = render('{{ page_title("%s :: Developer Hub") }}' % title, ctx)
eq_(s1, s2)
s1 = render('{{ dev_page_title() }}', ctx)
s2 = render('{{ page_title("Developer Hub") }}', ctx)
eq_(s1, s2)
s1 = render('{{ dev_page_title("%s", addon) }}' % title, ctx)
s2 = render('{{ page_title("%s :: %s") }}' % (title, addon.name), ctx)
eq_(s1, s2)
class TestDevBreadcrumbs(unittest.TestCase):
def setUp(self):
self.request = Mock()
self.request.APP = None
def test_no_args(self):
s = render('{{ dev_breadcrumbs() }}', {'request': self.request})
doc = pq(s)
crumbs = doc('li')
eq_(len(crumbs), 2)
eq_(crumbs.text(), 'Developer Hub My Add-ons')
eq_(crumbs.eq(1).children('a'), [])
def test_no_args_with_default(self):
s = render('{{ dev_breadcrumbs(add_default=True) }}',
{'request': self.request})
doc = pq(s)
crumbs = doc('li')
eq_(crumbs.text(), 'Add-ons Developer Hub My Add-ons')
eq_(crumbs.eq(1).children('a').attr('href'), reverse('devhub.index'))
eq_(crumbs.eq(2).children('a'), [])
def test_with_items(self):
s = render("""{{ dev_breadcrumbs(items=[('/foo', 'foo'),
('/bar', 'bar')]) }}'""",
{'request': self.request})
doc = pq(s)
crumbs = doc('li>a')
eq_(len(crumbs), 4)
eq_(crumbs.eq(2).text(), 'foo')
eq_(crumbs.eq(2).attr('href'), '/foo')
eq_(crumbs.eq(3).text(), 'bar')
eq_(crumbs.eq(3).attr('href'), '/bar')
def test_with_addon(self):
addon = Mock()
addon.name = 'Firebug'
addon.id = 1843
s = render("""{{ dev_breadcrumbs(addon) }}""",
{'request': self.request, 'addon': addon})
doc = pq(s)
crumbs = doc('li')
eq_(crumbs.text(), 'Developer Hub My Add-ons Firebug')
eq_(crumbs.eq(1).text(), 'My Add-ons')
eq_(crumbs.eq(1).children('a').attr('href'), reverse('devhub.addons'))
eq_(crumbs.eq(2).text(), 'Firebug')
eq_(crumbs.eq(2).children('a'), [])
def test_with_addon_and_items(self):
addon = Mock()
addon.name = 'Firebug'
addon.id = 1843
addon.slug = 'fbug'
s = render("""{{ dev_breadcrumbs(addon,
items=[('/foo', 'foo'),
('/bar', 'bar')]) }}""",
{'request': self.request, 'addon': addon})
doc = pq(s)
crumbs = doc('li')
eq_(len(crumbs), 5)
eq_(crumbs.eq(2).text(), 'Firebug')
eq_(crumbs.eq(2).children('a').attr('href'),
reverse('devhub.addons.edit', args=[addon.slug]))
eq_(crumbs.eq(3).text(), 'foo')
eq_(crumbs.eq(3).children('a').attr('href'), '/foo')
eq_(crumbs.eq(4).text(), 'bar')
eq_(crumbs.eq(4).children('a').attr('href'), '/bar')
def test_summarize_validation():
v = Mock()
v.errors = 1
v.warnings = 1
eq_(render('{{ summarize_validation(validation) }}',
{'validation': v}),
u'1 error, 1 warning')
v.errors = 2
eq_(render('{{ summarize_validation(validation) }}',
{'validation': v}),
u'2 errors, 1 warning')
v.warnings = 2
eq_(render('{{ summarize_validation(validation) }}',
{'validation': v}),
u'2 errors, 2 warnings')
def test_log_action_class():
v = Mock()
for k, v in amo.LOG_BY_ID.iteritems():
if v.action_class is not None:
cls = 'action-' + v.action_class
else:
cls = ''
eq_(render('{{ log_action_class(id) }}', {'id': v.id}), cls)
class TestDisplayUrl(unittest.TestCase):
def setUp(self):
self.raw_url = u'http://host/%s' % 'フォクすけといっしょ'.decode('utf8')
def test_utf8(self):
url = urllib.quote(self.raw_url.encode('utf8'))
eq_(render('{{ url|display_url }}', {'url': url}),
self.raw_url)
def test_unicode(self):
url = urllib.quote(self.raw_url.encode('utf8'))
url = unicode(url, 'utf8')
eq_(render('{{ url|display_url }}', {'url': url}),
self.raw_url)
def test_euc_jp(self):
url = urllib.quote(self.raw_url.encode('euc_jp'))
eq_(render('{{ url|display_url }}', {'url': url}),
self.raw_url)
class TestDevFilesStatus(amo.tests.TestCase):
def setUp(self):
platform = Platform.objects.create(id=amo.PLATFORM_ALL.id)
self.addon = Addon.objects.create(type=1, status=amo.STATUS_UNREVIEWED)
self.version = Version.objects.create(addon=self.addon)
self.file = File.objects.create(version=self.version,
platform=platform,
status=amo.STATUS_UNREVIEWED)
def expect(self, expected):
cnt, msg = helpers.dev_files_status([self.file], self.addon)[0]
eq_(cnt, 1)
eq_(msg, expected)
def test_unreviewed_lite(self):
self.addon.status = amo.STATUS_LITE
self.file.status = amo.STATUS_UNREVIEWED
self.expect(amo.STATUS_CHOICES[amo.STATUS_UNREVIEWED])
def test_unreviewed_public(self):
self.addon.status = amo.STATUS_PUBLIC
self.file.status = amo.STATUS_UNREVIEWED
self.expect(amo.STATUS_CHOICES[amo.STATUS_NOMINATED])
def test_unreviewed_nominated(self):
self.addon.status = amo.STATUS_NOMINATED
self.file.status = amo.STATUS_UNREVIEWED
self.expect(amo.STATUS_CHOICES[amo.STATUS_NOMINATED])
def test_unreviewed_lite_and_nominated(self):
self.addon.status = amo.STATUS_LITE_AND_NOMINATED
self.file.status = amo.STATUS_UNREVIEWED
self.expect(amo.STATUS_CHOICES[amo.STATUS_NOMINATED])
def test_reviewed_lite(self):
self.addon.status = amo.STATUS_LITE
self.file.status = amo.STATUS_LITE
self.expect(amo.STATUS_CHOICES[amo.STATUS_LITE])
def test_reviewed_public(self):
self.addon.status = amo.STATUS_PUBLIC
self.file.status = amo.STATUS_PUBLIC
self.expect(amo.STATUS_CHOICES[amo.STATUS_PUBLIC])
def test_disabled(self):
self.addon.status = amo.STATUS_PUBLIC
self.file.status = amo.STATUS_DISABLED
self.expect(amo.STATUS_CHOICES[amo.STATUS_DISABLED])
|
import random
import os
from math import sqrt
from multiprocessing import Pool
import simulation.Networks as Network
import simulation.Contagion as Contagion
import simulation.System as System
import simulation.MCMC as MCMC
random.seed(int("54e22d", 16))
steps = 1000000
side = 100
spins = side * side
def observablesWrap(openedFile):
def wrap(system, observations):
observables(system, observations)
data = str.format("{},{},{},{}", observations['lastEnergy'], \
observations['lastSqEnergy'], observations['lastMag'],\
observations['lastSqMag'])
openedFile.write(data + '\n')
return wrap
def observables(system, observations):
E = 0.0
M = 0.0
S = system.wolffIsing.network.nodes
for k in range(system.wolffIsing.network.numNodes):
M += S[k]['spin']
E -= S[k]['spin'] * sum([S[neighbor]['spin'] for neighbor in system.wolffIsing.network.edges[k]])
observations['lastEnergy'] = E
observations['lastSqEnergy'] = E ** 2
observations['lastMag'] = M
observations['lastSqMag'] = M ** 2
def randomSpin(x, y):
return {'spin': random.choice([-1, 1])}
def experimentSingleNetwork(network):
def closure(temperature):
return experiment(temperature, network)
return closure
def experiment(temperature, network):
print("starting temperature", temperature)
filename = 'data/ising.random.100.' + \
str.format("{}", temperature) + ".csv"
openedFile = open(filename, 'w')
contagion = Contagion.WolffIsing(network, 1/float(temperature), random.random)
system = System.WolffIsing(contagion, random.randint)
montecarlo = MCMC.MCMC(system, random.random)
observations = {}
observations['lastEnergy'] = 0
observations['lastMag'] = 0
observations['lastSqEnergy'] = 0
observations['lastSqMag'] = 0
montecarlo.simulate(observablesWrap(openedFile), observations, steps)
openedFile.close()
return observations
if __name__ == '__main__':
network = Network.Lattice2DNP(side, side, {}, {})
network.build(randomSpin)
temperatures = [float(x)/10 for x in range(1, 50)]
temperatures.reverse()
experiments = map(experimentSingleNetwork(network), temperatures)
results = [result for result in experiments]
|
from coinpy.lib.vm.stack_valtype import cast_to_number, valtype_from_number
from coinpy.lib.vm.opcode_impl.flow import op_verify
import functools
def arithmetic_op(vm, func, arity):
if len(vm.stack) < arity:
raise Exception("Not enought arguments")
args = [cast_to_number(vm.stack.pop()) for _ in range(arity)]
result = func(*reversed(args))
vm.stack.append(valtype_from_number(result))
arithmetic_unary_op = functools.partial(arithmetic_op, arity=1)
arithmetic_binary_op = functools.partial(arithmetic_op, arity=2)
arithmetic_ternary_op = functools.partial(arithmetic_op, arity=3)
"""
OP_1ADD: a -> a+1
1 is added to a.
"""
def op_1add(vm, instr):
arithmetic_unary_op(vm, lambda a: a + 1)
"""
OP_1SUB: a -> a - 1
1 is substracted from a.
"""
def op_1sub(vm, instr):
arithmetic_unary_op(vm, lambda a: a - 1)
"""
OP_2MUL: a -> a * 2
a is multiplied by 2.
"""
def op_2mul(vm, instr):
arithmetic_unary_op(vm, lambda a: a * 2)
"""
OP_2DIV: a -> a / 2
a is divided by 2.
"""
def op_2div(vm, instr):
arithmetic_unary_op(vm, lambda a: a / 2)
"""
OP_0NOTEQUAL: a -> a != 0 ? 1 : 0
if a is not equal to 0, return 1, otherwise return 0.
"""
def op_0notequal(vm, instr):
arithmetic_unary_op(vm, lambda x: 1 if (x != 0) else 0)
"""
OP_NEGATE: a -> -a
return the opposite of a.
"""
def op_negate(vm, instr):
arithmetic_unary_op(vm, lambda a: -a)
"""
OP_ABS: a -> (a>0) ? a : -a
Return the absolute value of a.
"""
def op_abs(vm, instr):
arithmetic_unary_op(vm, lambda a: abs(a))
"""
OP_NOT: a -> (a==0) ? 1 : -0
if a equals 0 return 1, otherwise return 0.
"""
def op_not(vm, instr):
arithmetic_unary_op(vm, lambda a: 1 if a == 0 else 0)
"""
OP_0NOTEQUAL: a -> (a!=0) ? 1 : 0
if a is different than 0 return 1, otherwise return 0.
"""
def op_0noteequal(vm, instr):
arithmetic_unary_op(vm, lambda a: 0 if a == 0 else 1)
"""
OP_ADD: a b -> a+b
a is added to b.
"""
def op_add(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: x1 + x2)
"""
OP_SUB: a b -> a-b
b is subtracted from a.
"""
def op_sub(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a - b)
"""
OP_MUL: a b -> a*b
a is multiplied by b.
"""
def op_mul(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a * b)
"""
OP_DIV: a b -> a/b
a is divided by b.
"""
def op_div(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a / b)
"""
OP_MOD: a b -> a%b
Returns the remainder after dividing a by b.
"""
def op_mod(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a % b)
"""
OP_LSHIFT: a b -> a<<b
Shifts a left b bits, preserving sign.
"""
def op_lshift(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a << b)
"""
OP_RSHIFT: a b -> a >> b
Shifts a right b bits, preserving sign.
"""
def op_rshift(vm, instr):
arithmetic_binary_op(vm, lambda a, b: a >> b)
"""
OP_BOOLAND: a b -> a&b
If both a and b are not 0, the output is 1. Otherwise 0.
"""
def op_booland(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 != 0 and x2 != 0) and 1 or 0)
"""
OP_BOOLAND: a b -> a|b
If both a and b are not 0, the output is 1. Otherwise 0.
"""
def op_boolor(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 != 0 or x2 != 0) and 1 or 0)
"""
OP_NUMEQUAL : a b -> (a==b) ? 1 : 0
Returns 1 if the numbers are equal, 0 otherwise.
"""
def op_numequal(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 == x2) and 1 or 0)
"""
OP_NUMEQUALVERIFY: a b -> (a==b) ? 1 : 0
Same as OP_NUMEQUAL, but runs OP_VERIFY afterward.
"""
def op_numequalverify(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 == x2) and 1 or 0)
op_verify(vm, instr)
"""
OP_NUMEQUAL : a b -> (a!=b) ? 1 : 0
Returns 1 if the numbers are equal, 0 otherwise.
"""
def op_numnotequal(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 != x2) and 1 or 0)
"""
OP_LESSTHAN : a b -> (a<b) ? 1 : 0
Returns 1 if a is less than b, 0 otherwise.
"""
def op_lessthan(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 < x2) and 1 or 0)
"""
OP_GREATERTHAN : a b -> (a>b) ? 1 : 0
Returns 1 if a is less than b, 0 otherwise.
"""
def op_greaterthan(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 > x2) and 1 or 0)
"""
OP_LESSTHANOREQUAL : a b -> (a<=b) ? 1 : 0
Returns 1 if a is less than or equal to b, 0 otherwise.
"""
def op_lessthanorequal(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 <= x2) and 1 or 0)
"""
OP_GREATERTHANOREQUAL: a b -> (a>=b) ? 1 : 0
Returns 1 if a is greater than or equal to b, 0 otherwise.
"""
def op_greaterthanorequal(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: (x1 >= x2) and 1 or 0)
"""
OP_MIN: a b -> min(a, b)
Returns the smaller of a and b.
"""
def op_min(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: min(x1, x2))
"""
OP_MAX: a b -> max(a, b)
Returns the smaller of a and b.
"""
def op_max(vm, instr):
arithmetic_binary_op(vm, lambda x1,x2: max(x1, x2))
"""
OP_WITHIN: x min max -> (min <= x < max) ? 1 : 0
Returns 1 if x is within the specified range (left-inclusive), 0 otherwise.
"""
def op_within(vm, instr):
arithmetic_ternary_op(vm, lambda x, min, max: 1 if (min <= x < max) else 0)
|
#!/usr/bin/env python
#!coding:utf-8
import unittest
import sys
import set_path
from nfa import NFA
class TestNFA(unittest.TestCase):
EPSILON = 'epsilon'
start = 'q0'
final = {'q0'}
sigma = ['0', '1', 'epsilon']
states = ['q0', 'q1', 'q2']
ttable = [[{}, {'q1'}, {'q2'}],
[{'q1', 'q2'}, {'q2'}, {}],
[{'q0'}, {}, {}]]
nfa = NFA(states, sigma, ttable, start, final)
def test_e_transition(self):
self.nfa.transition('q0', self.EPSILON)
self.assertEqual(self.nfa.is_accepted(), True)
def test_1_transition(self):
self.nfa.transition('q0', '1')
self.assertEqual(self.nfa.is_accepted(), False)
def test_multi_transition(self):
self.nfa.transitions({'q0', 'q2'}, '1')
self.assertEqual(self.nfa.is_accepted(), False)
self.assertSetEqual(self.nfa.current, {'q1'})
def test_sequence_110(self):
self.nfa.reset()
self.nfa.handle('110')
self.assertEqual(self.nfa.is_accepted(), True)
self.assertSetEqual(self.nfa.current, {'q0', 'q2'})
def test_sequence_100(self):
self.nfa.reset()
self.nfa.handle('100')
self.assertEqual(self.nfa.is_accepted(), True)
self.assertSetEqual(self.nfa.current, {'q0', 'q1', 'q2'})
def test_sequence_000(self):
self.nfa.reset()
self.nfa.handle('000')
self.assertEqual(self.nfa.is_accepted(), True)
self.assertSetEqual(self.nfa.current, {'q0', 'q2'})
def test_sequence_111(self):
self.nfa.reset()
self.nfa.handle('111')
self.assertEqual(self.nfa.is_accepted(), False)
self.assertSetEqual(self.nfa.current, set())
if __name__ == "__main__":
unittest.main()
|
# From Python 2.5.1
# tempfile.py unit tests.
import tempfile
import os
import sys
import re
import errno
import warnings
import unittest
from test import test_support
warnings.filterwarnings("ignore",
category=RuntimeWarning,
message="mktemp", module=__name__)
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform == 'mac':
TEST_FILES = 32
elif sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assert_(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.failUnless(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = self.r.next()
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in xrange(TEST_FILES):
s = r.next()
self.nameCheck(s, '', '', '')
self.failIf(s in dict)
dict[s] = 1
def test_supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.failIf(len(cand) == 0)
for c in cand:
self.assert_(isinstance(c, basestring),
"%s is not a string" % c)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
added = []
try:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
os.environ[envname] = os.path.abspath(envname)
added.append(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assert_(dirname in cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assert_(dirname in cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
finally:
for p in added:
del os.environ[p]
test_classes.append(test__candidate_tempdir_list)
# We test _get_default_tempdir by testing gettempdir.
class test__get_candidate_names(TC):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assert_(isinstance(obj, tempfile._RandomNameSequence))
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assert_(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
# XXX: CPython assigns _close/_unlink as class vars but this
# would rebind Jython's close/unlink (to be classmethods)
# because they're not built-in functions (unfortunately
# built-in functions act differently when binding:
# http://mail.python.org/pipermail/python-dev/2003-April/034749.html)
self._close = os.close
self._unlink = os.unlink
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
# XXX: self.test_choose_directory expects the file to have been deleted
# (via __del__) by the time it's called, which is CPython specific
# garbage collection behavior. We need to delete it now in Jython
self._close(self.fd)
self._unlink(self.name)
def __del__(self):
self._close(self.fd)
if os.path.exists(self.name):
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write("blat")
self.do_create(pre="a").write("blat")
self.do_create(suf="b").write("blat")
self.do_create(pre="a", suf="b").write("blat")
self.do_create(pre="aa", suf=".txt").write("blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = range(TEST_FILES)
for i in extant:
extant[i] = self.do_create(pre="aa")
# XXX: Ensure mkstemped files are deleted (can't rely on Java's
# GC)
for i in extant:
i.__del__()
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write("blat")
finally:
os.rmdir(dir)
# XXX: Jython can't set the write mode yet
def _test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
if not has_stat:
return # ugh, can't use TestSkipped.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0600
if sys.platform in ('win32', 'os2emx', 'mac'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if not has_spawnl:
return # ugh, can't use TestSkipped.
if test_support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32'):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.failIf(retval < 0,
"child process caught fatal signal %d" % -retval)
self.failIf(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
# _mkstemp_inner can create files in text mode
if not has_textmode:
return # ugh, can't use TestSkipped.
self.do_create(bin=0).write("blat\n")
# XXX should test that the file really is a text file
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assert_(isinstance(p, basestring))
self.assert_(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assert_(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assert_(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write("blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assert_(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = range(TEST_FILES)
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, basestring)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use TestSkipped.
if os.name == 'java':
# Java doesn't support stating files for permissions
return
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0777 # Mask off sticky bits inherited from /tmp
expected = 0700
if sys.platform in ('win32', 'os2emx', 'mac'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
class mktemped:
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
# XXX: Assign _unlink here, instead of as a class var. See
# mkstemped.__init__ for an explanation
self._unlink = os.unlink
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0600))
# XXX: test_mktemp.tearDown expects the file to have been deleted
# (via __del__) by the time it's called, which is CPython specific
# garbage collection behavior. We need to delete it now in Jython
self._unlink(self.name)
#def __del__(self):
# self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = range(TEST_FILES)
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
test_classes.append(test_mktemp)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class test_NamedTemporaryFile(TC):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.failUnless(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write('blat')
f.close()
self.failIf(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write('abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
class test_TemporaryFile(TC):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write('blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write('abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
def test_main():
test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
# XXX: Nudge Java's GC in an attempt to trigger any temp file's
# __del__ (cause them to be deleted) that hasn't been called
from java.lang import System
System.gc()
|
"""
The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so
the first ten triangle numbers are:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
By converting each letter in a word to a number corresponding to its
alphabetical position and adding these values we form a word value. For example,
the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a
triangle number then we shall call the word a triangle word.
Using words.txt (right click and 'Save Link/Target As...'), a 16K text file
containing nearly two-thousand common English words, how many are triangle
words?
"""
import os
# Precomputes a list of the 100 first triangular numbers
TRIANGULAR_NUMBERS = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def solution():
"""
Finds the amount of triangular words in the words file.
>>> solution()
162
"""
script_dir = os.path.dirname(os.path.realpath(__file__))
wordsFilePath = os.path.join(script_dir, "words.txt")
words = ""
with open(wordsFilePath) as f:
words = f.readline()
words = list(map(lambda word: word.strip('"'), words.strip("\r\n").split(",")))
words = list(
filter(
lambda word: word in TRIANGULAR_NUMBERS,
map(lambda word: sum(map(lambda x: ord(x) - 64, word)), words),
)
)
return len(words)
if __name__ == "__main__":
print(solution())
|
## 1. Overview ##
import pandas as pd
food_info = pd.read_csv('food_info.csv')
col_names = food_info.columns.tolist()
print(food_info.head(3))
## 2. Transforming a Column ##
div_1000 = food_info["Iron_(mg)"] / 1000
add_100 = food_info["Iron_(mg)"] + 100
sub_100 = food_info["Iron_(mg)"] - 100
mult_2 = food_info["Iron_(mg)"]*2
sodium_grams = food_info["Sodium_(mg)"] / 1000
sugar_milligrams = food_info["Sugar_Tot_(g)"] * 1000
## 3. Performing Math with Multiple Columns ##
water_energy = food_info["Water_(g)"] * food_info["Energ_Kcal"]
print(water_energy[0:5])
grams_of_protein_per_gram_of_water = food_info['Protein_(g)']/ food_info['Water_(g)']
milligrams_of_calcium_and_iron = food_info['Calcium_(mg)']+ food_info['Iron_(mg)']
## 4. Create a Nutritional Index ##
weighted_protein = 2 * food_info['Protein_(g)']
weighted_fat = -0.75 * food_info['Lipid_Tot_(g)']
initial_rating = weighted_protein + weighted_fat
## 5. Normalizing Columns in a Data Set ##
print(food_info["Protein_(g)"][0:5])
max_protein = food_info["Protein_(g)"].max()
normalized_protein = food_info["Protein_(g)"]/food_info["Protein_(g)"].max()
normalized_fat = food_info["Lipid_Tot_(g)"]/food_info["Lipid_Tot_(g)"].max()
## 6. Creating a New Column ##
food_info['Normalized_Protein'] = normalized_protein
food_info['Normalized_Fat'] = normalized_fat
## 7. Create a Normalized Nutritional Index ##
food_info["Normalized_Protein"] = food_info["Protein_(g)"] / food_info["Protein_(g)"].max()
food_info["Normalized_Fat"] = food_info["Lipid_Tot_(g)"] / food_info["Lipid_Tot_(g)"].max()
food_info['Norm_Nutr_Index'] = 2 * food_info["Normalized_Protein"] - 0.75 * food_info["Normalized_Fat"]
## 8. Sorting a DataFrame by a Column ##
food_info["Normalized_Protein"] = food_info["Protein_(g)"] / food_info["Protein_(g)"].max()
food_info["Normalized_Fat"] = food_info["Lipid_Tot_(g)"] / food_info["Lipid_Tot_(g)"].max()
food_info["Norm_Nutr_Index"] = 2*food_info["Normalized_Protein"] + (-0.75*food_info["Normalized_Fat"])
food_info.sort_values("Norm_Nutr_Index",inplace =True,ascending = False)
|
import os
from prettytable import PrettyTable
from counterpartycli import wallet, util
# TODO: inelegant
def get_view(view_name, args):
if view_name == 'balances':
return wallet.balances(args.address)
elif view_name == 'asset':
return wallet.asset(args.asset)
elif view_name == 'wallet':
return wallet.wallet()
elif view_name == 'pending':
return wallet.pending()
elif view_name == 'getinfo':
return util.api('get_running_info')
elif view_name == 'getrows':
method = 'get_{}'.format(args.table)
if args.filter:
filters = [tuple(f) for f in args.filter]
else:
filters = []
params = {
'filters': filters,
'filterop': args.filter_op,
'order_by': args.order_by,
'order_dir': args.order_dir,
'start_block': args.start_block,
'end_block': args.end_block,
'status': args.status,
'limit': args.limit,
'offset': args.offset
}
return util.api(method, params)
def print_balances(balances):
lines = []
lines.append('')
lines.append('Balances')
table = PrettyTable(['Asset', 'Amount'])
for asset in balances:
table.add_row([asset, balances[asset]])
lines.append(table.get_string())
lines.append('')
print(os.linesep.join(lines))
def print_asset(asset):
lines = []
lines.append('')
lines.append('Informations')
table = PrettyTable(header=False, align='l')
table.add_row(['Asset Name:', asset['asset']])
table.add_row(['Asset ID:', asset['asset_id']])
table.add_row(['Divisible:', asset['divisible']])
table.add_row(['Locked:', asset['locked']])
table.add_row(['Supply:', asset['supply']])
table.add_row(['Issuer:', asset['issuer']])
table.add_row(['Description:', '‘' + asset['description'] + '’'])
table.add_row(['Balance:', asset['balance']])
lines.append(table.get_string())
if asset['addresses']:
lines.append('')
lines.append('Addresses')
table = PrettyTable(['Address', 'Balance'])
for address in asset['addresses']:
balance = asset['addresses'][address]
table.add_row([address, balance])
lines.append(table.get_string())
if asset['sends']:
lines.append('')
lines.append('Sends')
table = PrettyTable(['Type', 'Quantity', 'Source', 'Destination'])
for send in asset['sends']:
table.add_row([send['type'], send['quantity'], send['source'], send['destination']])
lines.append(table.get_string())
lines.append('')
print(os.linesep.join(lines))
def print_wallet(wallet):
lines = []
for address in wallet['addresses']:
table = PrettyTable(['Asset', 'Balance'])
for asset in wallet['addresses'][address]:
balance = wallet['addresses'][address][asset]
table.add_row([asset, balance])
lines.append(address)
lines.append(table.get_string())
lines.append('')
total_table = PrettyTable(['Asset', 'Balance'])
for asset in wallet['assets']:
balance = wallet['assets'][asset]
total_table.add_row([asset, balance])
lines.append('TOTAL')
lines.append(total_table.get_string())
lines.append('')
print(os.linesep.join(lines))
def print_pending(awaiting_btcs):
table = PrettyTable(['Matched Order ID', 'Time Left'])
for order_match in awaiting_btcs:
order_match = format_order_match(order_match)
table.add_row(order_match)
print(table)
def print_getrows(rows):
if len(rows) > 0:
headers = list(rows[0].keys())
table = PrettyTable(headers)
for row in rows:
values = list(row.values())
table.add_row(values)
print(table)
else:
print("No result.")
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
# vim: set fileencoding=utf-8 sw=2 ts=2 et :
from __future__ import absolute_import
from __future__ import with_statement
from logging import getLogger
import networkx as NX
import yaml
from systems.collector import Aggregate, CResource
from systems.registry import get_registry
from systems.typesystem import EResource, Transition, ResourceRef
__all__ = ('Realizer', )
LOGGER = getLogger(__name__)
DESC_LIMIT = 64
def describe(thing):
return '%s' % str(thing)[:DESC_LIMIT]
class CycleError(Exception):
pass
class Node(object):
def __init__(self):
if type(self) == Node:
raise TypeError
def __repr__(self):
return '<%s>' % self
def __str__(self):
return type(self).__name__
class CheckPointNode(Node):
pass
class ExpandableNode(Node):
def __init__(self, res):
super(ExpandableNode, self).__init__()
if type(self) == ExpandableNode:
# Abstract class
raise TypeError
self._res = res
class BeforeExpandableNode(ExpandableNode):
def __str__(self):
return 'Before %s' % self._res
class AfterExpandableNode(ExpandableNode):
def __str__(self):
return 'After %s' % self._res
class GraphFirstNode(Node, yaml.YAMLObject):
yaml_tag = u'GraphFirstNode'
class GraphLastNode(Node, yaml.YAMLObject):
yaml_tag = u'GraphLastNode'
node_types = (CheckPointNode, BeforeExpandableNode, AfterExpandableNode,
GraphFirstNode, GraphLastNode,
Transition, Aggregate, CResource, EResource, ResourceRef)
class ResourceGraph(yaml.YAMLObject):
"""
A graph of resources and transitions linked by dependencies.
Resources are positioned as two sentinels in the transition graph.
Invariant: directed, acyclic.
"""
def __init__(self, top=None):
self._graph = NX.DiGraph()
self._first = GraphFirstNode()
self._last = GraphLastNode()
self._graph.add_edge(self._first, self._last)
# Contains CResource and EResource, despite the name.
# Used to enforce max one resource per id.
self.__expandables = {}
# Received references, by name.
self.__received_refs = {}
# What nodes were processed (meaning expanding or collecting)
self.__processed = set()
# Pre-bound args pased by ref. Allow putting extra depends on them.
if top is not None:
if not isinstance(top, ResourceGraph):
raise TypeError(top, ResourceGraph)
self.__top = top
else:
self.__top = self
yaml_tag = u'!ResourceGraph'
@classmethod
def from_yaml(cls, loader, ynode):
rg = cls()
# Deep because of aliases and anchors, I think.
mp = loader.construct_mapping(ynode, deep=True)
pred_rels = mp['nodes']
for rel in pred_rels:
rg._add_node(rel['node'], depends=rel['depends'])
return rg
@classmethod
def to_yaml(cls, dumper, rg):
# This is incomplete.
pred_rels = [{'node': node, 'depends': list(depends), }
for (node, depends) in rg._iter_pred_rels()]
return dumper.represent_mapping(cls.yaml_tag, {
'nodes': pred_rels,
})
def _iter_node_preds(self, node0):
return (node
for node in self._graph.predecessors_iter(node0)
if node not in (self._first, self._last))
def _iter_pred_rels(self):
return ((node, self._iter_node_preds(node))
for node in self.sorted_nodes()
if node not in (self._first, self._last))
def sorted_nodes(self):
return NX.topological_sort(self._graph)
def sorted_transitions(self):
return [n for n in self.sorted_nodes()
if isinstance(n, Transition)]
def iter_uncollected_resources(self):
for nod in self._graph.nodes_iter():
if isinstance(nod, CResource):
if not nod in self.__processed:
yield nod
def iter_unexpanded_resources(self):
for nod in self._graph.nodes_iter():
if isinstance(nod, EResource):
if not nod in self.__processed:
yield nod
def iter_unexpanded_aggregates(self):
for agg in self._graph.nodes_iter():
if isinstance(agg, Aggregate):
if not agg in self.__processed:
yield agg
def iter_unprocessed(self):
for nod in self.iter_uncollected_resources():
yield nod
for nod in self.iter_unexpanded_resources():
yield nod
for nod in self.iter_unexpanded_aggregates():
yield nod
def has_unprocessed(self):
l = list(self.iter_unprocessed())
return bool(l) # Tests for non-emptiness
def require_acyclic(self):
if not NX.is_directed_acyclic_graph(self._graph):
# XXX NX doesn't have a 1-line method for listing those cycles
raise CycleError
def _add_node(self, node, depends=()):
if not isinstance(node, node_types):
raise TypeError(node, node_types)
self._graph.add_node(node)
self._graph.add_edge(self._first, node)
self._graph.add_edge(node, self._last)
for dep in depends:
depn = self._intern(dep)
self._add_node_dep(depn, node)
return node
def add_checkpoint(self, depends=()):
return self._add_node(CheckPointNode(), depends)
def add_transition(self, transition, depends=()):
if not isinstance(transition, Transition):
raise TypeError(transition, Transition)
return self._add_node(transition, depends)
def _add_aggregate(self, aggregate, depends=()):
if not isinstance(aggregate, Aggregate):
raise TypeError(aggregate, Aggregate)
return self._add_node(aggregate, depends)
def add_resource(self, resource, depends=()):
"""
Add a resource.
If an identical resource exists, it is returned.
"""
if not isinstance(resource, (CResource, EResource)):
raise TypeError(resource, (CResource, EResource))
if resource.identity in self.__expandables:
# We have this id already.
# Either it's the exact same resource, or a KeyError is thrown.
resource = self._intern(resource)
# XXX Need to bypass _intern for already expanded.
# XXX When we use add_to_top, we sometimes have to deal
# with a resource that's already been expanded.
# Those are not in the graph anymore. How do we refer to them?
else:
self.__expandables[resource.identity] = resource
# Even if already there, we need to add the depends.
resource = self._add_node(resource, depends)
# If already there, notice we aliase it.
return self.make_ref(resource)
def make_ref(self, res, depends=()):
res = self._intern(res)
if not isinstance(res, (CResource, EResource)):
raise TypeError(res, (CResource, EResource))
depends = list(depends)
depends.append(res)
return self._add_node(ResourceRef(res), depends)
def make_alias_ref(self, ref, depends=()):
ref = self._intern(ref)
if not isinstance(ref, ResourceRef):
raise TypeError(ref, ResourceRef)
depends = list(depends)
depends.append(ref)
return self._add_node(ResourceRef(ref.unref), depends)
def add_to_top(self, res):
"""
Add a resource to the top ResourceGraph.
Use it to put things that you don't necessarily
want to be after the outside dependencies the current graph has.
"""
ref = self.__top.add_resource(res)
return self._add_node(ref)
def _add_node_dep(self, node0, node1):
if not isinstance(node0, node_types):
raise TypeError(node0, node_types)
if not isinstance(node1, node_types):
raise TypeError(node1, node_types)
if not self._graph.has_node(node0):
raise KeyError(node0)
if not self._graph.has_node(node1):
raise KeyError(node1)
if self._graph.has_edge(node0, node1):
return False
if node0 == node1:
# Disallow self-loops to keep acyclic invariant.
# Also they don't make sense.
raise ValueError(node0)
# Invariant check
rev_path = NX.shortest_path(self._graph, node1, node0)
if rev_path is not False:
raise CycleError(rev_path)
self._graph.add_edge(node0, node1)
return True
def _intern(self, thing):
if not isinstance(thing, node_types):
raise TypeError
if thing not in self._graph:
raise KeyError(thing)
return thing
def add_dependency(self, elem0, elem1):
node0 = self._intern(elem0)
node1 = self._intern(elem1)
return self._add_node_dep(node0, node1)
def _is_direct_rconnect(self, r0, r1):
s0 = self._intern(r0)
s1 = self._intern(r1)
# shortest_path is also a test for connectedness.
return bool(NX.shortest_path(self._graph, s0, s1))
def resources_connected(self, r0, r1):
return self._is_direct_rconnect(r0, r1) \
or self._is_direct_rconnect(r1, r0)
def draw(self, fname):
return self.draw_agraph(fname)
def draw_agraph(self, fname):
# XXX pygraphviz has steep dependencies (x11 libs)
# and recommends (texlive) for a headless box.
# We duplicate the graph, otherwise networkx / pygraphviz
# would make a lossy conversion (sometimes refusing to convert), by adding
# nodes as their string representation. Madness, I know.
gr2 = NX.create_empty_copy(self._graph, False)
for node in self._graph.nodes_iter():
gr2.add_node(id(node))
for (n0, n1) in self._graph.edges_iter():
gr2.add_edge(id(n0), id(n1))
names = dict((id(node), { 'label': describe(node)})
for node in self._graph.nodes_iter())
gr2.delete_node(id(self._first))
gr2.delete_node(id(self._last))
g = NX.to_agraph(gr2, {
'graph': {
'nodesep': '0.2',
'rankdir': 'TB',
'ranksep': '0.5',
},
'node': {
'shape': 'box',
},
},
names)
g.write(fname + '.dot')
# Dot is good for DAGs.
g.layout(prog='dot')
g.draw(fname + '.svg')
with open(fname + '.yaml', 'w') as f:
yaml.dump(self, f)
# Fails with the expanded graph, due to instancemethod
#yaml.load(yaml.dump(self))
def draw_matplotlib(self, fname):
# Pyplot is stateful and awkward to use.
import matplotlib.pyplot as P
# Disable hold or it definitely won't work (probably a bug).
P.hold(False)
NX.draw(self._graph)
P.savefig(fname)
def collect_resources(self, r0s, r1):
"""
Replace an iterable of resources with one new resource.
May break the acyclic invariant, caveat emptor.
"""
# The invariant is kept iff the r0s don't have paths linking them.
# For our use case (collectors), we could allow paths provided they are
# internal to r0s. This introduces self-loops that we would then remove.
for r0 in r0s:
r0 = self._intern(r0)
if r0 in self.__processed:
raise RuntimeError
if r1 in self._graph:
raise ValueError(r1)
r1 = self._add_aggregate(r1)
for r0 in r0s:
r0 = self._intern(r0)
self._move_edges(r0, r1)
self.__processed.add(r0)
self.require_acyclic()
def _move_edges(self, n0, n1):
if n0 == n1:
raise RuntimeError
n0 = self._intern(n0)
n1 = self._intern(n1)
# list is used as a temporary
# add after delete in case of same.
for pred in list(self._graph.predecessors_iter(n0)):
self._graph.delete_edge(pred, n0)
self._graph.add_edge(pred, n1)
for succ in list(self._graph.successors_iter(n0)):
self._graph.delete_edge(n0, succ)
self._graph.add_edge(n1, succ)
self._graph.delete_node(n0)
# Can't undo. Invariant will stay broken.
def _split_node(self, res):
res = self._intern(res)
before = self._add_node(BeforeExpandableNode(res))
after = self._add_node(AfterExpandableNode(res))
self._graph.add_edge(before, after)
for pred in list(self._graph.predecessors_iter(res)):
self._graph.delete_edge(pred, res)
self._graph.add_edge(pred, before)
for succ in list(self._graph.successors_iter(res)):
self._graph.delete_edge(res, succ)
self._graph.add_edge(after, succ)
self._graph.delete_node(res)
return before, after
def _receive_by_ref(self, name, ref):
if name in self.__received_refs:
raise RuntimeError(name, ref)
ref = self._add_node(ref)
self.__received_refs[name] = ref
return ref
def _pass_by_ref(self, subgraph, name, ref):
# The origin/value distinction is important
# for aliased arguments (two refs, same val).
ref = self._intern(ref)
if not isinstance(ref, ResourceRef):
raise TypeError(ref, ResourceRef)
subgraph._receive_by_ref(name, ref)
def expand_resource(self, res):
"""
Replace res by a small resource graph.
The resource_graph is inserted in the main graph
between the sentinels that represent the resource.
"""
res = self._intern(res)
# We're processing from the outside in.
if res in self.__processed:
raise RuntimeError
resource_graph = ResourceGraph(self.__top)
if isinstance(res, EResource):
for (name, ref) in res.iter_passed_by_ref():
# ref will be present in both graphs.
self._pass_by_ref(resource_graph, name, ref)
elif isinstance(res, Aggregate):
pass
else:
raise TypeError(res)
res.expand_into(resource_graph)
# We expand from the outside in
if bool(resource_graph.__processed):
raise RuntimeError
# Do not skip sentinels.
for n in resource_graph._graph.nodes_iter():
self._add_node(n)
for (n0, n1) in resource_graph._graph.edges_iter():
self._add_node_dep(n0, n1)
for (id1, res1) in resource_graph.__expandables.iteritems():
# We expand from the outside in.
assert res1 not in self.__processed
if id1 in self.__expandables:
# Pass by reference if you must use the same resource
# in different contexts.
raise RuntimeError('ResourceBase collision.', res, res1)
else:
self.__expandables[id1] = res1
before, after = self._split_node(res)
self.__processed.add(res)
self._move_edges(resource_graph._first, before)
self._move_edges(resource_graph._last, after)
# What may break the invariant:
# Passing a ref to res, and making res depend on ref.
# ref ends up on both sides of ref.before.
self.require_acyclic()
class Realizer(object):
"""
A graph of realizables linked by dependencies.
"""
def __init__(self, expandable):
self.__resources = ResourceGraph()
self.__expandable = expandable
self.__state = 'init'
def require_state(self, state):
"""
Raise an exception if we are not in the required state.
"""
if self.__state != state:
raise RuntimeError(u'Realizer state should be «%s»' % state)
def ensure_frozen(self):
"""
Build the finished dependency graph.
Merge identical realizables, collect what can be.
"""
if self.__state == 'frozen':
return
# Order is important
self.require_state('init')
self.__expandable.expand_into(self.__resources)
#self.__resources.draw('/tmp/freezing')
self._expand()
#self.__resources.draw('/tmp/pre-collect')
self._collect()
self._expand_aggregates()
assert not bool(list(self.__resources.iter_unprocessed()))
self.__state = 'frozen'
#self.__resources.draw('/tmp/frozen')
def _collect(self):
# Collects compatible nodes into merged nodes.
def can_merge(part0, part1):
for n0 in part0:
for n1 in part1:
if self.__resources.resources_connected(n0, n1):
return False
return True
def possibly_merge(partition):
# Merge once if possible. Return true if did merge.
e = dict(enumerate(partition))
n = len(partition)
# Loop over the triangle of unordered pairs
for i in xrange(n):
for j in xrange(i + 1, n):
part0, part1 = e[i], e[j]
if can_merge(part0, part1):
partition.add(part0.union(part1))
partition.remove(part0)
partition.remove(part1)
return True
return False
reg = get_registry()
for collector in reg.collectors:
# Pre-partition is made of parts acceptable for the collector.
pre_partition = collector.partition(
[r for r in self.__resources.iter_uncollected_resources()
if collector.filter(r)])
for part in pre_partition:
# Collector parts are split again, the sub-parts are merged
# when dependencies allow.
# Not a particularly efficient algorithm, just simple.
# Gives one solution among many possibilities.
partition = set(frozenset((r, ))
for r in part
for part in pre_partition)
while possibly_merge(partition):
pass
# Let the collector handle the rest
for part in partition:
if not bool(part):
# Test for emptiness.
# Aggregate even singletons.
continue
merged = collector.collect(part)
self.__resources.collect_resources(part, merged)
assert not bool(list(self.__resources.iter_uncollected_resources()))
def _expand(self):
# Poor man's recursion
while True:
fresh = set(r
for r in self.__resources.iter_unexpanded_resources())
if bool(fresh) == False: # Test for emptiness
break
for r in fresh:
self.__resources.expand_resource(r)
assert not bool(list(self.__resources.iter_unexpanded_resources()))
def _expand_aggregates(self):
for a in list(self.__resources.iter_unexpanded_aggregates()):
self.__resources.expand_resource(a)
assert not bool(list(self.__resources.iter_unexpanded_aggregates()))
# Enforce the rule that aggregates can only expand into transitions.
if self.__resources.has_unprocessed():
raise RuntimeError(list(self.__resources.iter_unprocessed()))
def realize(self):
"""
Realize all realizables and transitions in dependency order.
"""
self.ensure_frozen()
for t in self.__resources.sorted_transitions():
t.realize()
self.__state = 'realized'
|
# -*- coding: utf-8 -*-
#
# BrodyHopfield.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Spike synchronization through subthreshold oscillation
------------------------------------------------------------
This script reproduces the spike synchronization behavior
of integrate-and-fire neurons in response to a subthreshold
oscillation. This phenomenon is shown in Fig. 1 of [1]_
Neurons receive a weak 35 Hz oscillation, a gaussian noise current
and an increasing DC. The time-locking capability is shown to
depend on the input current given. The result is then plotted using
pylab. All parameters are taken from the above paper.
References
~~~~~~~~~~~~~
.. [1] Brody CD and Hopfield JJ (2003). Simple networks for
spike-timing-based computation, with application to olfactory
processing. Neuron 37, 843-852.
"""
#################################################################################
# First, we import all necessary modules for simulation, analysis, and plotting.
import nest
import nest.raster_plot
###############################################################################
# Second, the simulation parameters are assigned to variables.
N = 1000 # number of neurons
bias_begin = 140. # minimal value for the bias current injection [pA]
bias_end = 200. # maximal value for the bias current injection [pA]
T = 600 # simulation time (ms)
# parameters for the alternative-current generator
driveparams = {'amplitude': 50., 'frequency': 35.}
# parameters for the noise generator
noiseparams = {'mean': 0.0, 'std': 200.}
neuronparams = {'tau_m': 20., # membrane time constant
'V_th': 20., # threshold potential
'E_L': 10., # membrane resting potential
't_ref': 2., # refractory period
'V_reset': 0., # reset potential
'C_m': 200., # membrane capacitance
'V_m': 0.} # initial membrane potential
###############################################################################
# Third, the nodes are created using ``Create``. We store the returned handles
# in variables for later reference.
neurons = nest.Create('iaf_psc_alpha', N)
sd = nest.Create('spike_detector')
noise = nest.Create('noise_generator')
drive = nest.Create('ac_generator')
###############################################################################
# Set the parameters specified above for the generators using ``SetStatus``.
nest.SetStatus(drive, driveparams)
nest.SetStatus(noise, noiseparams)
###############################################################################
# Set the parameters specified above for the neurons. Neurons get an internal
# current. The first neuron additionally receives the current with amplitude
# `bias_begin`, the last neuron with amplitude `bias_end`.
nest.SetStatus(neurons, neuronparams)
nest.SetStatus(neurons, [{'I_e':
(n * (bias_end - bias_begin) / N + bias_begin)}
for n in neurons])
###############################################################################
# Set the parameters for the ``spike_detector``: recorded data should include
# the information about global IDs of spiking neurons and the time of
# individual spikes.
nest.SetStatus(sd, {"withgid": True, "withtime": True})
###############################################################################
# Connect alternative current and noise generators as well as
# spike detectors to neurons
nest.Connect(drive, neurons)
nest.Connect(noise, neurons)
nest.Connect(neurons, sd)
###############################################################################
# Simulate the network for time `T`.
nest.Simulate(T)
###############################################################################
# Plot the raster plot of the neuronal spiking activity.
nest.raster_plot.from_device(sd, hist=True)
|
from __future__ import with_statement
from app import app
from flask import request, jsonify
from werkzeug.exceptions import BadRequest
import threading
import subprocess
import os
import sys
import traceback
import datetime
import uuid
import time
from newman_es.es_search import initialize_email_addr_cache
from utils.file import spit
from newman_es.config.newman_config import index_creator_prefix
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
BASE_DIR = os.path.abspath("{}/../".format(SITE_ROOT))
WORK_DIR = os.path.abspath("{}/../work_dir/".format(SITE_ROOT))
ingest_parent_dir = "/vagrant/newman-ingester/"
_INGESTER_LOCK=threading.Lock()
_INGESTER_CONDITION=threading.Condition(_INGESTER_LOCK)
INGESTER_AVAILABLE=0
INGESTER_BUSY=1
def fmtNow():
return datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
# TODO need to add an ingest id for monitoring specific ingests
@app.route('/ingester/status')
def ingest_status(*args, **kwargs):
if not _INGESTER_LOCK.locked():
return jsonify({"status_code" : INGESTER_AVAILABLE, "status_message" : "Ingester available."})
return jsonify({"status_code" : INGESTER_BUSY, "status_message" : "Currently ingesting, please see logs for detailing information."})
@app.route('/ingester/ingest_id')
def get_ingest_id():
'''
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
create a time based uuid1. can get time back with uuid.time
:return: json containing the id
'''
u = uuid.uuid1(clock_seq=long(time.time()*1e9))
dt = datetime.datetime.fromtimestamp((u.time - 0x01b21dd213814000L)*100/1e9)
str_time = dt.strftime('%Y-%m-%dT%H:%M:%S')
return jsonify({"ingest_id" : str(u), "datetime" : str_time})
@app.route('/ingester/cases')
def list_cases():
path = os.path.normpath(ingest_parent_dir)
if not path:
return jsonify({"message" : "Ensure parent directory exists and is readable by user: " + ingest_parent_dir })
contents_cases = os.listdir(path)
cases = {}
for case in contents_cases:
if not os.path.isdir(path+"/"+case):
continue
case_dir = os.listdir(path+"/"+case)
if not case in cases:
cases[case] = {}
for type in case_dir:
type_dir = path+"/"+case+"/"+type
if type in ["emls", "mbox", "pst"] and os.path.isdir(type_dir):
contents_datasets = os.listdir(type_dir)
datasets = [ds for ds in contents_datasets if os.path.isdir(type_dir+"/"+ds)]
if not type in cases[case]:
cases[case][type] = {}
cases[case][type]=datasets
return jsonify({"cases" : cases})
@app.route('/ingester/extract', methods=['POST'])
def extract():
'''
case-id - used to group multiple ingests
ingest-id - id for a single execution of ingest
alternate-id - product_id or external id reference
label - user label for ingest
file - name of file to ingest
type - type of ingest pst|mbox|eml
{"case_id" : "email@x.y_case", "ingest_id" : "<AUTOGENERATED>", "alt_ref_id" : "email@x.y_ref", "label":"email@x.y_label", "type":"mbox", "force_language":"en"}
'''
global _INGESTER_CONDITION
params = request.get_json()
app.logger.info(params)
try:
case_id = params["case_id"]
ingest_id = params["ingest_id"]
alt_ref_id = params["alt_ref_id"]
label = params["label"]
type = params["type"]
force_language = params.get("force_language", "en")
except KeyError as ke:
raise BadRequest("Request is missing param key/value for '{}'".format(ke.message))
# path = "{}/{}".format(ingest_parent_dir, type)
if not ingest_id or not type:
raise TypeError("Encountered a 'None' value for 'email', 'type''")
# Add the prefix for the newman indexes
ingest_id = index_creator_prefix() + ingest_id
logname = "{}_{}_{}_{}".format(case_id,type,label, fmtNow())
ingester_log = "{}/{}.ingester.log".format(WORK_DIR, logname)
# errfile = "{}/{}.err.log".format(work_dir, logname)
service_status_log = "{}/{}.status.log".format(WORK_DIR, logname)
spit(service_status_log, "[Start] email address={}\n".format(ingest_id), True)
def extract_thread():
try:
if not _INGESTER_CONDITION.acquire(False):
spit(service_status_log, "Ingester is currently processing data, you must wait until current ingest is completed before ingesting again. If you believe this is an error check the ingester logs.")
return
else:
args = ["./bin/ingest.sh", ingest_id, ingest_parent_dir, type, case_id, alt_ref_id, label, force_language]
app.logger.info("Running ingest: {}".format(" ".join(args)))
spit(service_status_log, "[Running] {} \n".format(" ".join(args)))
with open(ingester_log, 'w') as t:
kwargs = {'stdout': t, 'stderr': t, 'cwd': BASE_DIR, 'bufsize' : 1 }
subp = subprocess.Popen(args, **kwargs)
out, err = subp.communicate()
rtn = subp.returncode
if rtn != 0:
app.logger.error("Ingester return with non-zero code: {} \n".format(rtn))
spit(service_status_log, "[Error] Ingester return with non-zero code: {} \n".format(rtn))
else:
app.logger.info("Done Ingesting data. Reloading the email_addr cache.")
spit(service_status_log, "[Done Ingesting data. Reloading the email_addr cache.]")
initialize_email_addr_cache(ingest_id, update=True)
spit(service_status_log, "[Complete.]")
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
spit(service_status_log, "[Error] <{}>\n".format(e))
tb = traceback.extract_tb(exc_traceback)
spit(service_status_log,"[Error] <{}>\n".format(tb))
finally:
_INGESTER_CONDITION.release()
if not _INGESTER_LOCK.locked():
thr = threading.Thread(target=extract_thread, args=())
thr.start()
return jsonify({'log' : logname })
return jsonify({'log' : logname, 'status' : "Ingester is currently processing data, you must wait until current ingest is completed before ingesting again. If you believe this is an error check the ingester logs." })
|
#!/usr/bin/env python
import sys
import vtk
from PyQt4 import QtCore, QtGui
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.frame = QtGui.QFrame()
self.vl = QtGui.QVBoxLayout()
self.vtkWidget = QVTKRenderWindowInteractor(self.frame)
self.vl.addWidget(self.vtkWidget)
self.ren = vtk.vtkRenderer()
self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
# Create source
source = vtk.vtkSphereSource()
source.SetCenter(0, 0, 0)
source.SetRadius(5.0)
# Create a mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
# Create an actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self.ren.AddActor(actor)
self.ren.ResetCamera()
self.frame.setLayout(self.vl)
self.setCentralWidget(self.frame)
self.show()
self.iren.Initialize()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
|
#!/usr/bin/python
import sys
import json
from client import client
from colors import colors
from mpos_osx import get_mouse_pos, Point
from time import sleep
from clone import clone_from
PORT = 8888
variables = {'@win': 'C:\\Windows',
'@sys': 'C:\\Windows\\system32',
'@yas': '10.71.36',
' ' : '&',
'_' : ' ',
'`' : '{BACKSPACE}'}
BACKUPVARS = variables
def save_vars():
with open('vars.json', 'w') as f:
json.dump(variables, f)
def load_vars():
with open('vars.json', 'r') as f:
variables = json.load(f)
def set_title(title):
sys.stdout.write("\x1b]2;%s\x07" % title)
def set_var_args(cmd):
args = cmd.split('&')
for i in range(1, len(args)):
key = '@%d' % i
value = args[i]
variables[key] = value
def add_var(cmd):
parts = cmd.split(' ')
if len(parts) != 4:
return 'usage: let "@var" = "value"'
key = parts[1]
value = parts[3]
value = swap_vars(value)
variables[key] = value
return 'added var %s to %s' % (key, value)
def swap_vars(cmd):
for key, val in variables.iteritems():
cmd = cmd.replace(key, val)
return cmd
def print_vars():
print(colors.OKGREEN + repr(variables).replace(',','\n') + colors.ENDC)
def main():
ip = raw_input(colors.HEADER + 'ip: ' + colors.ENDC)
if ip == 'exit':
return 0
ip = swap_vars(ip)
s = client(ip, PORT, 5)
try:
if s.connect() == False:
print(colors.FAIL + ('Failed to connect to: %s' % ip) + colors.ENDC)
return 1
except:
print(colors.FAIL + ('Failed to connect to: %s' % ip) + colors.ENDC)
return 1
s.send('usr')
user = s.recv()
#swallow
s.recv()
set_title(user)
print(colors.OKGREEN + ('connected to %s' % ip) + colors.ENDC)
while 1:
cmd = raw_input(colors.HEADER + user + ': ' + colors.ENDC)
if cmd == 'exit':
break
elif cmd.startswith('let'):
print(colors.WARNING + add_var(cmd) + colors.ENDC)
continue
elif cmd == 'vars':
print_vars()
continue
elif cmd == 'vload':
load_vars()
continue
elif cmd == 'vsave':
save_vars()
continue
elif cmd == 'vreset':
variables = BACKUPVARS
continue
cmd = swap_vars(cmd)
set_var_args(cmd)
s.send(cmd)
response = s.recv()
if response == 'waiting':
continue
else:
print(colors.OKBLUE + response + colors.ENDC)
#swallow waiting message
s.recv()
if s.connected == True:
s.disconnect()
def scan_in_range(domain, start, stop):
open_ip = []
for i in range(int(start), int(stop)):
ip = '%s.%d' % (domain, i)
sys.stdout.write('\rscanning %s' % ip)
sys.stdout.flush()
s = client(ip, PORT, 1)
if s.connect() == True:
s.send('usr')
usr = s.recv()
#swallow
s.recv()
s.disconnect()
open_ip.append((ip, usr))
print('\n')
return open_ip
def connect_mode():
exit_code = -1
while exit_code != 0:
#print(colors.WARNING + 'connect' + colors.ENDC)
set_title('Connect')
exit_code = main()
set_title('')
def scan_mode():
set_title('Scan')
print(colors.WARNING + 'scan'+ colors.ENDC)
domain = raw_input(colors.HEADER + 'domain: ' + colors.ENDC)
start = raw_input(colors.HEADER + 'start: ' + colors.ENDC)
end = raw_input(colors.HEADER + 'end: ' + colors.ENDC)
domain = swap_vars(domain)
print('scanning...')
set_title('Scanning')
ips = scan_in_range(domain, start, end)
if len(ips) == 0:
print('no hosts found :(')
else:
print(colors.HEADER + 'found: ')
for host in ips:
print(colors.OKGREEN + repr(host))
print(colors.ENDC)
set_title('Scan Complete')
def capture_mode():
set_title('Connect')
ip = raw_input(colors.HEADER + 'ip: ' + colors.ENDC)
if ip == 'exit':
return
ip = swap_vars(ip)
s = client(ip, PORT, 5)
try:
if s.connect() == False:
print(colors.FAIL + ('Failed to connect to: %s' % ip) + colors.ENDC)
return
except:
print(colors.FAIL + ('Failed to connect to: %s' % ip) + colors.ENDC)
return
set_title('Capturing')
print(colors.OKGREEN + 'capturing mouse position: control+c to exit' + colors.ENDC)
while 1:
pos = get_mouse_pos()
#translate y coord for windows
pos.Y = abs(900 - pos.Y)
s.send('mpos&%d&%d' % (pos.X, pos.Y))
#swallow
s.recv()
def clone_mode():
set_title('Clone')
ip = raw_input(colors.HEADER + 'ip: ' + colors.ENDC)
if ip == 'exit':
return
ip = swap_vars(ip)
s = client(ip, PORT, 5)
try:
if s.connect() == False:
print(colors.FAIL + ('Failed to connect to: %s' % ip) + colors.ENDC)
return
except:
print(colors.FAIL + ('Failed to connect to: %s' % ip) + colors.ENDC)
return
print('connected')
top = raw_input(colors.HEADER + 'top: ' + colors.ENDC)
set_title('Cloning')
clone_from(top, s)
print('done!')
set_title('Clone Complete')
if __name__ == '__main__':
mode = ''
if len(sys.argv) == 2:
mode = sys.argv[1]
else:
print(colors.WARNING + 'usage: python rat.py <connect/scan/ver>' + colors.ENDC)
sys.exit()
with open('header', 'r') as f:
print(colors.HEADER + f.read() + colors.ENDC)
try:
if mode == 'connect':
connect_mode()
elif mode == 'scan':
scan_mode()
elif mode == 'mouse':
capture_mode()
elif mode == 'clone':
clone_mode()
elif mode == 'ver':
print(colors.OKGREEN + 'R.A.T v4.1 "The Rat and the Python" (osx)\n' + colors.ENDC)
except KeyboardInterrupt:
print(colors.FAIL + 'Keyboard Interrupt.. Exiting...' + colors.ENDC)
set_title('Exited')
|
# Copyright (c) 2013 Calin Crisan
# This file is part of motionEye.
#
# motionEye is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import fcntl
import logging
import os.path
import re
import stat
import subprocess
import time
_resolutions_cache = {}
_ctrls_cache = {}
_ctrl_values_cache = {}
_DEV_V4L_BY_ID = '/dev/v4l/by-id/'
def find_v4l2_ctl():
try:
return subprocess.check_output('which v4l2-ctl', shell=True).strip()
except subprocess.CalledProcessError: # not found
return None
def list_devices():
global _resolutions_cache, _ctrls_cache, _ctrl_values_cache
logging.debug('listing v4l devices...')
try:
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl --list-devices', shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
except subprocess.CalledProcessError:
logging.debug('failed to list devices (probably no devices installed)')
return []
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
name = None
devices = []
for line in output.split('\n'):
if line.startswith('\t'):
device = line.strip()
device = find_persistent_device(device)
devices.append((device, name))
logging.debug('found device %(name)s: %(device)s' % {
'name': name, 'device': device})
else:
name = line.split('(')[0].strip()
# clear the cache
_resolutions_cache = {}
_ctrls_cache = {}
_ctrl_values_cache = {}
return devices
def list_resolutions(device):
global _resolutions_cache
if device in _resolutions_cache:
return _resolutions_cache[device]
logging.debug('listing resolutions of device %(device)s...' % {'device': device})
resolutions = set()
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d %(device)s --list-formats-ext | grep -vi stepwise | grep -oE "[0-9]+x[0-9]+" || true' % {
'device': device}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
for pair in output.split('\n'):
pair = pair.strip()
if not pair:
continue
width, height = pair.split('x')
width = int(width)
height = int(height)
if (width, height) in resolutions:
continue # duplicate resolution
if width < 96 or height < 96: # some reasonable minimal values
continue
if width % 16 or height % 16: # ignore non-modulo 16 resolutions
continue
resolutions.add((width, height))
logging.debug('found resolution %(width)sx%(height)s for device %(device)s' % {
'device': device, 'width': width, 'height': height})
if not resolutions:
logging.debug('no resolutions found for device %(device)s, adding the defaults' % {'device': device})
# no resolution returned by v4l2-ctl call, add common default resolutions
resolutions.add((320, 240))
resolutions.add((640, 480))
resolutions.add((800, 480))
resolutions.add((1024, 576))
resolutions.add((1024, 768))
resolutions.add((1280, 720))
resolutions.add((1280, 800))
resolutions.add((1280, 960))
resolutions.add((1280, 1024))
resolutions.add((1440, 960))
resolutions.add((1440, 1024))
resolutions.add((1600, 1200))
resolutions = list(sorted(resolutions, key=lambda r: (r[0], r[1])))
_resolutions_cache[device] = resolutions
return resolutions
def device_present(device):
try:
st = os.stat(device)
return stat.S_ISCHR(st.st_mode)
except:
return False
def find_persistent_device(device):
try:
devs_by_id = os.listdir(_DEV_V4L_BY_ID)
except OSError:
return device
for p in devs_by_id:
p = os.path.join(_DEV_V4L_BY_ID, p)
if os.path.realpath(p) == device:
return p
return device
def get_brightness(device):
return _get_ctrl(device, 'brightness')
def set_brightness(device, value):
_set_ctrl(device, 'brightness', value)
def get_contrast(device):
return _get_ctrl(device, 'contrast')
def set_contrast(device, value):
_set_ctrl(device, 'contrast', value)
def get_saturation(device):
return _get_ctrl(device, 'saturation')
def set_saturation(device, value):
_set_ctrl(device, 'saturation', value)
def get_hue(device):
return _get_ctrl(device, 'hue')
def set_hue(device, value):
_set_ctrl(device, 'hue', value)
def _get_ctrl(device, control):
global _ctrl_values_cache
if not device_present(device):
return None
if device in _ctrl_values_cache and control in _ctrl_values_cache[device]:
return _ctrl_values_cache[device][control]
controls = _list_ctrls(device)
properties = controls.get(control)
if properties is None:
logging.warn('control %(control)s not found for device %(device)s' % {
'control': control, 'device': device})
return None
value = int(properties['value'])
# adjust the value range
if 'min' in properties and 'max' in properties:
min_value = int(properties['min'])
max_value = int(properties['max'])
value = int(round((value - min_value) * 100.0 / (max_value - min_value)))
else:
logging.warn('min and max values not found for control %(control)s of device %(device)s' % {
'control': control, 'device': device})
logging.debug('control %(control)s of device %(device)s is %(value)s%%' % {
'control': control, 'device': device, 'value': value})
return value
def _set_ctrl(device, control, value):
global _ctrl_values_cache
if not device_present(device):
return
controls = _list_ctrls(device)
properties = controls.get(control)
if properties is None:
logging.warn('control %(control)s not found for device %(device)s' % {
'control': control, 'device': device})
return
_ctrl_values_cache.setdefault(device, {})[control] = value
# adjust the value range
if 'min' in properties and 'max' in properties:
min_value = int(properties['min'])
max_value = int(properties['max'])
value = int(round(min_value + value * (max_value - min_value) / 100.0))
else:
logging.warn('min and max values not found for control %(control)s of device %(device)s' % {
'control': control, 'device': device})
logging.debug('setting control %(control)s of device %(device)s to %(value)s' % {
'control': control, 'device': device, 'value': value})
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d %(device)s --set-ctrl %(control)s=%(value)s' % {
'device': device, 'control': control, 'value': value}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
def _list_ctrls(device):
global _ctrls_cache
if device in _ctrls_cache:
return _ctrls_cache[device]
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d %(device)s --list-ctrls' % {
'device': device}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
controls = {}
for line in output.split('\n'):
if not line:
continue
match = re.match('^\s*(\w+)\s+\(\w+\)\s+\:\s*(.+)', line)
if not match:
continue
(control, properties) = match.groups()
properties = dict([v.split('=', 1) for v in properties.split(' ') if v.count('=')])
controls[control] = properties
_ctrls_cache[device] = controls
return controls
|
""""
Introduction Adventure
Author: Ignacio Avas (iavas@sophilabs.com)
"""
import codecs
import io
import sys
import unittest
from story.adventures import AdventureVerificationError, BaseAdventure
from story.translation import gettext as _
class TestOutput(unittest.TestCase):
"""Variables Adventure test"""
def __init__(self, candidate_code, file_name='<inline>'):
"""Init the test"""
super(TestOutput, self).__init__()
self.candidate_code = candidate_code
self.file_name = file_name
def setUp(self):
self.__old_stdout = sys.stdout
sys.stdout = self.__mockstdout = io.StringIO()
def tearDown(self):
sys.stdout = self.__old_stdout
self.__mockstdout.close()
def runTest(self):
"""Makes a simple test of the output"""
code = compile(self.candidate_code, self.file_name, 'exec', optimize=0)
self.assertIn('languages',
code.co_names,
'Should have defined languages variable')
exec(code)
lines = self.__mockstdout.getvalue().split('\n')
self.assertEqual([str(["ADA", "Pascal", "Fortran", "Smalltalk"]), ''],
lines,
'Should have same output'
)
class Adventure(BaseAdventure):
"""Lists Adventure"""
title = _('Lists')
@classmethod
def test(cls, sourcefile):
"""Test against the provided file"""
suite = unittest.TestSuite()
raw_program = codecs.open(sourcefile).read()
suite.addTest(TestOutput(raw_program, sourcefile))
result = unittest.TextTestRunner().run(suite)
if not result.wasSuccessful():
raise AdventureVerificationError()
|
class Field:
""" A field keeps track of all positions on a field and the content of
these posisions.
:param length: Length of the field.
:param height: The height of the field.
"""
def __init__(self, width=10, height=10):
self.width = 10
self.height = 10
self.field = self.create_field()
def create_field(self):
""" Creates a field based on the dimensions.
:return: Dict filled with (x, y) tuples as keys.
"""
return {(x, y): None for x in range(self.width)
for y in range(self.width)}
def set_cell_content(self, x, y, content):
""" Set content of a cell.
:param x: The x coordinate of the cell.
:param y: The y coordinate of the cell.
:param content: The content for the cell.
:raises: KeyError when coordinates are invalid.
"""
cell = (x, y)
if cell not in self.field:
raise KeyError
self.field[cell] = content
def get_cell_content(self, x, y):
""" Return content of a cell.
:param x: The x coordinate of the cell.
:param y: The y coordinate of the cell.
:raises: KeyError when coordinates are invalid.
"""
return self.field[(x, y)]
|
import datetime
import hmac
import base64
import hashlib
import asyncio
from xml.etree.ElementTree import fromstring as parse_xml
from xml.etree.ElementTree import tostring as xml_tostring
from xml.etree.ElementTree import Element, SubElement
from functools import partial
from urllib.parse import quote
import aiohttp
from . import errors
amz_uriencode = partial(quote, safe='~')
amz_uriencode_slash = partial(quote, safe='~/')
S3_NS = 'http://s3.amazonaws.com/doc/2006-03-01/'
NS = {'s3': S3_NS}
_SIGNATURES = {}
SIGNATURE_V4 = 'v4'
class Key(object):
def __init__(self, *, key, last_modified, etag, size, storage_class):
self.key = key
self.last_modified = last_modified
self.etag = etag
self.size = size
self.storage_class = storage_class
@classmethod
def from_xml(Key, el):
return Key(
key=el.find('s3:Key', namespaces=NS).text,
last_modified=datetime.datetime.strptime(
el.find('s3:LastModified', namespaces=NS).text,
'%Y-%m-%dT%H:%M:%S.000Z'),
etag=el.find('s3:ETag', namespaces=NS).text,
size=int(el.find('s3:Size', namespaces=NS).text),
storage_class=el.find('s3:StorageClass', namespaces=NS).text)
def __repr__(self):
return '<Key {}:{}>'.format(self.key, self.size)
class Request(object):
def __init__(self, verb, resource, query, headers, payload):
self.verb = verb
self.resource = amz_uriencode_slash(resource)
self.params = query
self.query_string = '&'.join(k + '=' + v
for k, v in sorted((amz_uriencode(k), amz_uriencode(v))
for k, v in query.items()))
self.headers = headers
self.payload = payload
self.content_md5 = ''
@property
def url(self):
return 'https://{0.headers[HOST]}{0.resource}?{0.query_string}' \
.format(self)
def _hmac(key, val):
return hmac.new(key, val, hashlib.sha256).digest()
def _signkey(key, date, region, service):
date_key = _hmac(("AWS4" + key).encode('ascii'),
date.encode('ascii'))
date_region_key = _hmac(date_key, region.encode('ascii'))
svc_key = _hmac(date_region_key, service.encode('ascii'))
return _hmac(svc_key, b'aws4_request')
@partial(_SIGNATURES.setdefault, SIGNATURE_V4)
def sign_v4(req, *,
aws_key, aws_secret, aws_token, aws_service='s3', aws_region='us-east-1', **_):
time = datetime.datetime.utcnow()
date = time.strftime('%Y%m%d')
timestr = time.strftime("%Y%m%dT%H%M%SZ")
req.headers['x-amz-date'] = timestr
if isinstance(req.payload, bytes):
payloadhash = hashlib.sha256(req.payload).hexdigest()
else:
payloadhash = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
req.headers['x-amz-content-sha256'] = payloadhash
if aws_token:
req.headers['x-amz-security-token'] = aws_token
signing_key = _signkey(aws_secret, date, aws_region, aws_service)
headernames = ';'.join(k.lower() for k in sorted(req.headers))
creq = (
"{req.verb}\n"
"{req.resource}\n"
"{req.query_string}\n"
"{headers}\n\n"
"{headernames}\n"
"{payloadhash}".format(
req=req,
headers='\n'.join(k.lower() + ':' + req.headers[k].strip()
for k in sorted(req.headers)),
headernames=headernames,
payloadhash=payloadhash
))
string_to_sign = (
"AWS4-HMAC-SHA256\n{ts}\n"
"{date}/{region}/{service}/aws4_request\n"
"{reqhash}".format(
ts=timestr,
date=date,
region=aws_region,
service=aws_service,
reqhash=hashlib.sha256(creq.encode('ascii')).hexdigest(),
))
sig = hmac.new(signing_key, string_to_sign.encode('ascii'),
hashlib.sha256).hexdigest()
ahdr = ('AWS4-HMAC-SHA256 '
'Credential={key}/{date}/{region}/{service}/aws4_request, '
'SignedHeaders={headers}, Signature={sig}'.format(
key=aws_key, date=date, region=aws_region, service=aws_service,
headers=headernames,
sig=sig,
))
req.headers['Authorization'] = ahdr
def _hmac_old(key, val):
return hmac.new(key, val, hashlib.sha1).digest()
class MultipartUpload(object):
def __init__(self, bucket, key, upload_id):
self.bucket = bucket
self.key = key
self.upload_id = upload_id
self.xml = Element('CompleteMultipartUpload')
self.parts = 0
self._done = False
self._uri = '/' + self.key + '?uploadId=' + self.upload_id
@asyncio.coroutine
def add_chunk(self, data):
assert isinstance(data, (bytes, memoryview, bytearray)), data
# figure out how to check chunk size, all but last one
# assert len(data) > 5 << 30, "Chunk must be at least 5Mb"
if self._done:
raise RuntimeError("Can't add_chunk after commit or close")
self.parts += 1
result = yield from self.bucket._request(Request("PUT",
'/' + self.key, {
'uploadId': self.upload_id,
'partNumber': str(self.parts),
}, headers={
'CONTENT-LENGTH': str(len(data)),
'HOST': self.bucket._host,
# next one aiohttp adds for us anyway, so we must put it here
# so it's added into signature
'CONTENT-TYPE': 'application/octed-stream',
}, payload=data))
try:
if result.status != 200:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
etag = result.headers['ETAG']
finally:
result.close()
chunk = SubElement(self.xml, 'Part')
SubElement(chunk, 'PartNumber').text = str(self.parts)
SubElement(chunk, 'ETag').text = etag
@asyncio.coroutine
def commit(self):
if self._done:
raise RuntimeError("Can't commit twice or after close")
self._done = True
data = xml_tostring(self.xml)
result = yield from self.bucket._request(Request("POST",
'/' + self.key, {
'uploadId': self.upload_id,
}, headers={
'CONTENT-LENGTH': str(len(data)),
'HOST': self.bucket._host,
'CONTENT-TYPE': 'application/xml',
}, payload=data))
try:
xml = yield from result.read()
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, xml)
xml = parse_xml(xml)
return xml.find('s3:ETag', namespaces=NS)
finally:
result.close()
@asyncio.coroutine
def close(self):
if self._done:
return
self._done = True
result = yield from self.bucket._request(Request("DELETE",
'/' + self.key, {
'uploadId': self.upload_id,
}, headers={'HOST': self.bucket._host}, payload=b''))
try:
xml = yield from result.read()
if result.status != 204:
raise errors.AWSException.from_bytes(result.status, xml)
finally:
result.close()
class Bucket(object):
def __init__(self, name, *,
port=80,
aws_key, aws_secret, aws_token,
aws_region='us-east-1',
aws_endpoint='s3.amazonaws.com',
signature=SIGNATURE_V4,
connector=None):
self._name = name
self._connector = None
self._aws_sign_data = {
'aws_key': aws_key,
'aws_secret': aws_secret,
'aws_token': aws_token,
'aws_region': aws_region,
'aws_service': 's3',
'aws_bucket': name,
}
self._host = self._name + '.' + aws_endpoint
if port != 80:
self._host = self._host + ':' + str(port)
self._signature = signature
@asyncio.coroutine
def exists(self, prefix=''):
result = yield from self._request(Request(
"GET",
"/",
{'prefix': prefix,
'separator': '/',
'max-keys': '1'},
{'HOST': self._host},
b'',
))
data = (yield from result.read())
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, data)
x = parse_xml(data)
return any(map(Key.from_xml,
x.findall('s3:Contents', namespaces=NS)))
@asyncio.coroutine
def list(self, prefix='', max_keys=1000):
result = yield from self._request(Request(
"GET",
"/",
{'prefix': prefix,
'max-keys': str(max_keys)},
{'HOST': self._host},
b'',
))
data = (yield from result.read())
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, data)
x = parse_xml(data)
if x.find('s3:IsTruncated', namespaces=NS).text != 'false':
raise AssertionError(
"File list is truncated, use bigger max_keys")
return list(map(Key.from_xml,
x.findall('s3:Contents', namespaces=NS)))
def list_by_chunks(self, prefix='', max_keys=1000, after_filename=None):
final = False
if after_filename:
marker = after_filename
else:
marker = ''
@asyncio.coroutine
def read_next():
nonlocal final, marker
result = yield from self._request(Request(
"GET",
"/",
{'prefix': prefix,
'max-keys': str(max_keys),
'marker': marker},
{'HOST': self._host},
b'',
))
data = (yield from result.read())
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, data)
x = parse_xml(data)
result = list(map(Key.from_xml,
x.findall('s3:Contents', namespaces=NS)))
if(x.find('s3:IsTruncated', namespaces=NS).text == 'false' or
len(result) == 0):
final = True
else:
marker = result[-1].key
return result
while not final:
yield read_next()
@asyncio.coroutine
def download(self, key):
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request(
"GET", '/' + key, {}, {'HOST': self._host}, b''))
if result.status != 200:
raise errors.AWSException.from_bytes(
result.status, (yield from result.read()))
return result
@asyncio.coroutine
def upload(self, key, data,
content_length=None,
content_type='application/octed-stream',
last_modified=None):
"""Upload file to S3
The `data` might be a generator or stream.
the `content_length` is unchecked so it's responsibility of user to
ensure that it matches data.
Note: Riak CS doesn't allow to upload files without content_length.
"""
if isinstance(key, Key):
key = key.key
if isinstance(data, str):
data = data.encode('utf-8')
headers = {
'HOST': self._host,
'CONTENT-TYPE': content_type,
"x-amz-server-side-encryption": "AES256",
}
if content_length is not None:
headers['CONTENT-LENGTH'] = str(content_length)
if last_modified:
headers.update({"x-amz-last-modified": last_modified})
headers.update({"x-amz-server-side-encryption": "AES256"})
result = yield from self._request(Request("PUT", '/' + key, {},
headers=headers, payload=data))
try:
if result.status != 200:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
return result
finally:
result.close()
@asyncio.coroutine
def delete(self, key):
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request("DELETE", '/' + key, {},
{'HOST': self._host}, b''))
try:
if result.status != 204:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
return result
finally:
result.close()
@asyncio.coroutine
def get(self, key):
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request(
"GET", '/' + key, {}, {'HOST': self._host}, b''))
if result.status != 200:
raise errors.AWSException.from_bytes(
result.status, (yield from result.read()))
data = yield from result.read()
return data
@asyncio.coroutine
def _request(self, req):
_SIGNATURES[self._signature](req, **self._aws_sign_data)
if isinstance(req.payload, bytes):
req.headers['CONTENT-LENGTH'] = str(len(req.payload))
return (yield from aiohttp.request(req.verb, req.url,
chunked='CONTENT-LENGTH' not in req.headers,
headers=req.headers,
data=req.payload,
connector=self._connector))
@asyncio.coroutine
def upload_multipart(self, key,
content_type='application/octed-stream',
MultipartUpload=MultipartUpload):
"""Upload file to S3 by uploading multiple chunks"""
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request("POST",
'/' + key, {'uploads': ''}, {
'HOST': self._host,
'CONTENT-TYPE': content_type,
}, payload=b''))
try:
if result.status != 200:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
xml = yield from result.read()
upload_id = parse_xml(xml).find('s3:UploadId',
namespaces=NS).text
assert upload_id, xml
return MultipartUpload(self, key, upload_id)
finally:
result.close()
|
# ---------------------------------------------------------------------------- #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ---------------------------------------------------------------------------- #
from .core.datesres import *
from game import current
class GameTime:
def __init__(self, wd, h, m, d):
self._wd = wd
self._h = h
self._m = m
self._d = d
@classmethod
def now(cls):
return cls(*current.time_tuple())
@property
def weekday(self): return self._wd
@property
def hour(self): return self._h
@property
def minute(self): return self._m
@property
def day(self): return self._d
def todays_name(self): return WEEKDAYS[self._wd]
def tomorrows_name(self):WEEKDAYS[tomorrows_weekday_idx()]
def tomorrows_weekday_idx(self):
if self._wd + 1 == 6: return 0
return self._wd + 1
def gametime():
return current.time_tuple()
def _gt_wd(): return current.time_tuple()[0]
def _gt_hour(): return current.time_tuple()[1]
def _gt_min(): return current.time_tuple()[2]
def _gt_day(): return current.time_tuple()[3]
gametime.weekday = _gt_wd
gametime.hour = _gt_hour
gametime.min = _gt_min
gametime.day = _gt_day
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SessionBlockTweet'
db.create_table(u'twit_sessionblocktweet', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('timeslot', self.gf('django.db.models.fields.DateTimeField')()),
('event', self.gf('django.db.models.fields.related.ForeignKey')(related_name='session_tweets', to=orm['sked.Event'])),
('next', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='previous', unique=True, null=True, to=orm['twit.SessionBlockTweet'])),
))
db.send_create_signal(u'twit', ['SessionBlockTweet'])
# Adding M2M table for field sessions on 'SessionBlockTweet'
db.create_table(u'twit_sessionblocktweet_sessions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('sessionblocktweet', models.ForeignKey(orm[u'twit.sessionblocktweet'], null=False)),
('session', models.ForeignKey(orm[u'sked.session'], null=False))
))
db.create_unique(u'twit_sessionblocktweet_sessions', ['sessionblocktweet_id', 'session_id'])
def backwards(self, orm):
# Deleting model 'SessionBlockTweet'
db.delete_table(u'twit_sessionblocktweet')
# Removing M2M table for field sessions on 'SessionBlockTweet'
db.delete_table('twit_sessionblocktweet_sessions')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sked.event': {
'Meta': {'ordering': "('-start_date',)", 'object_name': 'Event'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sked_events'", 'to': u"orm['auth.User']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'event'", 'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'registration_is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'registration_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'session_label': ('django.db.models.fields.CharField', [], {'default': "'session'", 'max_length': '64'}),
'session_length': ('timedelta.fields.TimedeltaField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'sked.location': {
'Meta': {'ordering': "('-event__start_date', 'name')", 'object_name': 'Location'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'locations'", 'to': u"orm['sked.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_official': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'sked.session': {
'Meta': {'ordering': "('-event__start_date', 'start_time')", 'unique_together': "(('event', 'slug'),)", 'object_name': 'Session'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'to': u"orm['sked.Event']"}),
'extra_data': ('jsonfield.fields.JSONField', [], {'default': "'{}'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sked.Location']", 'null': 'True', 'blank': 'True'}),
'published_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'approved_sked_sessions'", 'null': 'True', 'to': u"orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'speakers': ('jsonfield.fields.JSONField', [], {'default': "'[]'"}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
},
u'twit.sessionblocktweet': {
'Meta': {'ordering': "('-timeslot', 'sent_at')", 'object_name': 'SessionBlockTweet'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'session_tweets'", 'to': u"orm['sked.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'next': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'previous'", 'unique': 'True', 'null': 'True', 'to': u"orm['twit.SessionBlockTweet']"}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'sessions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sked.Session']", 'symmetrical': 'False'}),
'timeslot': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['twit']
|
#!/bin/env python3
import sys
import collections
def main():
sites = collections.defaultdict(set)
for filename in sys.argv[1:]:
try:
with open(filename) as open_file:
for line in open_file:
add_sites(line, sites, filename)
except EnvironmentError as err:
print(err)
print_sites(sites)
def add_sites(line, sites, filename):
i = 0
size = len(line)
while True:
site = None
i = line.find("http://", i)
if i == -1:
break
i += 7
for j in range(i, size):
if not line[j].isalnum() and line[j] not in ".-":
site = line[i:j]
break
if site:
sites[site].add(filename)
def print_sites(sites):
for site in sorted(sites):
print("{} is referred to in:".format(site))
for filename in sorted(sites[site], key=str.lower):
print("\t{}".format(filename))
if __name__ == '__main__':
main()
|
import functools
import itertools
import types
import typing as tp # NOQA
import unittest
import numpy
import six
from chainer.testing import _bundle
from chainer import utils
def _param_to_str(obj):
if isinstance(obj, type):
return obj.__name__
return repr(obj)
def _shorten(s, maxlen):
# Shortens the string down to maxlen, by replacing the middle part with
# a 3-dots string '...'.
ellipsis = '...'
if len(s) <= maxlen:
return s
n1 = (maxlen - len(ellipsis)) // 2
n2 = maxlen - len(ellipsis) - n1
s = s[:n1] + ellipsis + s[-n2:]
assert len(s) == maxlen
return s
def _make_class_name(base_class_name, i_param, param):
# Creates a class name for a single combination of parameters.
SINGLE_PARAM_MAXLEN = 100 # Length limit of a single parameter value
PARAMS_MAXLEN = 5000 # Length limit of the whole parameters part
param_strs = [
'{}={}'.format(k, _shorten(_param_to_str(v), SINGLE_PARAM_MAXLEN))
for k, v in sorted(param.items())]
param_strs = _shorten(', '.join(param_strs), PARAMS_MAXLEN)
cls_name = '{}_param_{}_{{{}}}'.format(
base_class_name, i_param, param_strs)
return cls_name
def _parameterize_test_case_generator(base, params):
# Defines the logic to generate parameterized test case classes.
for i, param in enumerate(params):
yield _parameterize_test_case(base, i, param)
def _parameterize_test_case(base, i, param):
cls_name = _make_class_name(base.__name__, i, param)
def __str__(self):
name = base.__str__(self)
return '%s parameter: %s' % (name, param)
mb = {'__str__': __str__}
for k, v in sorted(param.items()):
if isinstance(v, types.FunctionType):
def create_new_v():
f = v
def new_v(self, *args, **kwargs):
return f(*args, **kwargs)
return new_v
mb[k] = create_new_v()
else:
mb[k] = v
def method_generator(base_method):
# Generates a wrapped test method
@functools.wraps(base_method)
def new_method(self, *args, **kwargs):
try:
return base_method(self, *args, **kwargs)
except unittest.SkipTest:
raise
except Exception as e:
s = six.StringIO()
s.write('Parameterized test failed.\n\n')
s.write('Base test method: {}.{}\n'.format(
base.__name__, base_method.__name__))
s.write('Test parameters:\n')
for k, v in sorted(param.items()):
s.write(' {}: {}\n'.format(k, v))
utils._raise_from(e.__class__, s.getvalue(), e)
return new_method
return (cls_name, mb, method_generator)
def parameterize(*params):
# TODO(niboshi): Add documentation
return _bundle.make_decorator(
lambda base: _parameterize_test_case_generator(base, params))
def _values_to_dicts(names, values):
assert isinstance(names, six.string_types)
assert isinstance(values, (tuple, list))
def safe_zip(ns, vs):
if len(ns) == 1:
return [(ns[0], vs)]
assert isinstance(vs, (tuple, list)) and len(ns) == len(vs)
return zip(ns, vs)
names = names.split(',')
params = [dict(safe_zip(names, value_list)) for value_list in values]
return params
def from_pytest_parameterize(names, values):
# Pytest-style parameterization.
# TODO(niboshi): Add documentation
return _values_to_dicts(names, values)
def parameterize_pytest(names, values):
# Pytest-style parameterization.
# TODO(niboshi): Add documentation
return parameterize(*from_pytest_parameterize(names, values))
def product(parameter):
# TODO(niboshi): Add documentation
if isinstance(parameter, dict):
return product_dict(*[
_values_to_dicts(names, values)
for names, values in sorted(parameter.items())])
elif isinstance(parameter, list):
# list of lists of dicts
if not all(isinstance(_, list) for _ in parameter):
raise TypeError('parameter must be list of lists of dicts')
if not all(isinstance(_, dict) for l in parameter for _ in l):
raise TypeError('parameter must be list of lists of dicts')
return product_dict(*parameter)
else:
raise TypeError(
'parameter must be either dict or list. Actual: {}'.format(
type(parameter)))
def product_dict(*parameters):
# TODO(niboshi): Add documentation
return [
{k: v for dic in dicts for k, v in six.iteritems(dic)}
for dicts in itertools.product(*parameters)]
# TODO(kataoka): product_dict is patched by tests/conftest.py while tests are
# collected if CHAINER_TEST_PAIRWISE_PARAMETERIZATION is configured
# accordingly. Also used in
# tests/chainer_tests/testing_tests/test_parameterized.py
_product_dict_orig = product_dict
def _pairwise_product_dict(*parameters):
if len(parameters) <= 2:
return _product_dict_orig(*parameters)
return list(_pairwise_product_dict_iter(*parameters))
def _pairwise_product_dict_iter(
*parameters: tp.Iterable[tp.Dict[str, tp.Any]]
) -> tp.Iterator[tp.Dict[str, tp.Any]]:
"""Generate combinations that cover all pairs.
The argument is the same as `chainer.testing.product_dict`.
"""
parameter_lists = [list(dicts) for dicts in parameters] # type: tp.List[tp.List[tp.Dict[str, tp.Any]]] # NOQA
for nd_index in sorted(_nd_indices_to_cover_each_2d(
[len(dicts) for dicts in parameter_lists])):
yield {
k: v
for i, dicts in zip(nd_index, parameter_lists)
for k, v in dicts[i].items()}
def _nd_indices_to_cover_each_2d(
shape: tp.Sequence[int]
) -> tp.Iterator[tp.Tuple[int, ...]]:
rs = numpy.random.RandomState(seed=0)
n = len(shape)
indices = [list(range(length)) for length in shape] # type: tp.List[tp.List[int]] # NOQA
# `(k_i, k_j) in uncovered[(i, j)]` iff it has not been yielded
# `nd_index` such that `(nd_index[i], nd_inde[j]) == (k_i, k_j)`.
uncovered = {} # type: tp.Dict[tp.Tuple[int, int], tp.Set[tp.Tuple[int, int]]] # NOQA
for i, j in itertools.combinations(range(n), 2):
uncovered[(i, j)] = set(itertools.product(indices[i], indices[j]))
nd_indices = list(itertools.product(*indices)) # type: tp.List[tp.Tuple[int, ...]] # NOQA
rs.shuffle(nd_indices)
for nd_index in nd_indices:
count = 0
for i, j in itertools.combinations(range(n), 2):
try:
uncovered[(i, j)].remove((nd_index[i], nd_index[j]))
except KeyError:
pass
else:
count += 1
if count > 0:
yield nd_index
|
# Copyright (C) 2011 Statoil ASA, Norway.
#
# The file '__init__.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
ert - Ensemble Reservoir Tool - a package for reservoir modeling.
The ert package itself has no code, but contains several subpackages:
ert.ecl: Package for working with ECLIPSE files. The far most mature
package in ert.
ert.job_queue:
ert.util:
The ert package is based on wrapping the libriaries from the ERT C
code with ctypes; an essential part of ctypes approach is to load the
shared libraries with the ctypes.CDLL() function. The ctypes.CDLL()
function uses the standard methods of the operating system,
i.e. standard locations configured with ld.so.conf and the environment
variable LD_LIBRARY_PATH.
To avoid conflict with other application using the ert libraries the
Python code should be able to locate the shared libraries without
(necessarily) using the LD_LIBRARY_PATH variable. The default
behaviour is to try to load from the library ../../lib64, but by using
the enviornment variable ERT_LIBRARY_PATH you can alter how ert looks
for shared libraries. This module will set the ert_lib_path of the
ert.cwrap.clib module; the actual loading will take place in that
module.
1. By default the code will try to load the shared libraries from
'../../lib64' relative to the location of this file.
2. Depending on the value of ERT_LIBRARY_PATH two different
behaviours can be imposed:
Existing path: the package will look in the path pointed to
by ERT_LIBRARY_PATH for shared libraries.
Arbitrary value: the package will use standard load order for
the operating system.
If the fixed path, given by the default ../../lib64 or ERT_LIBRARY_PATH
alternative fails, the loader will try the default load behaviour
before giving up completely.
"""
import os.path
import cwrap.clib
import sys
import warnings
try:
import ert_site_init
except ImportError:
pass
required_version_hex = 0x02060000
# 1. Start by initialing the ert_lib_path variable to None
ert_lib_path = None
# 2. Try to load the __ert_lib_path module; this module has been
# configured by cmake during the build configuration process. The
# module should contain the variable lib_path pointing to the
# directory with shared object files.
try:
import __ert_lib_path
ert_lib_path = __ert_lib_path.lib_path
except ImportError:
pass
# 3. Using the environment variable ERT_LIBRARY_PATH it is possible to
# override the default algorithms. If the ERT_LIBRARY_PATH is set
# to a non existing directory a warning will go to stderr and the
# setting will be ignored.
env_lib_path = os.getenv("ERT_LIBRARY_PATH")
if env_lib_path:
if os.path.isdir( env_lib_path ):
ert_lib_path = os.getenv("ERT_LIBRARY_PATH")
else:
sys.stderr.write("Warning: Environment variable ERT_LIBRARY_PATH points to nonexisting directory:%s - ignored" % env_lib_path)
# Check that the final ert_lib_path setting corresponds to an existing
# directory.
if ert_lib_path:
if not os.path.exists( ert_lib_path ):
ert_lib_path = None
# Set the module variable ert_lib_path of the ert.cwrap.clib module;
# this is where the actual loading will be performed.
cwrap.clib.ert_lib_path = ert_lib_path
if sys.hexversion < required_version_hex:
raise Exception("ERT Python requires at least version 2.6 of Python")
|
'''
Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all
unique quadruplets in the array which gives the sum of target.
Link: https://leetcode.com/problems/4sum/#/description
Example:
For example, given array S = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
Solution:
Here I use dic to store the indices for two sum values, then using set to handle the duplicate results and
convert answers into list when finished.
Source: None
'''
import collections
import itertools
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums, dic = sorted(nums), collections.defaultdict(list)
for (i, a), (j, b) in itertools.combinations(enumerate(nums), 2):
dic[a + b].append([i, j])
res = set(tuple(sorted(nums[i] for i in (head + tail))) for ab in dic \
if target - ab in dic for head in dic[ab] for tail in dic[target - ab] \
if len(set(head + tail)) == 4)
return list(map(list, res))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# config.py
#
# Copyright 2016 Andrei Tumbar <atuser@Kronos>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import os, sys
import platform
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('GtkSource', '3.0')
from gi.repository import Gtk, GObject, GLib, GtkSource, Pango, Gdk
os.chdir ( os.path.dirname ( os.path.realpath ( __file__ ) ) )
import filetab, filemanager, builderset, project, configitem, configfile
class Config:
config_file_relative = ""
config_file_full = ""
__file_lines = None
__file = None
notebook = None
open_dialogue = None
var_dict = {}
var_list = []
list_vars = [ "output_files", "input_files" ]
conf_vars = [ "title", "css", "js" ]
variables_box = Gtk.Box ( )
configitems = []
rows = []
row_raw = []
current_file = {}
current = None
def __init__ ( self, curr_dir, config, notebook, open_dialogue ):
self.open_dialogue = open_dialogue
self.dir = curr_dir
self.notebook = notebook
self.new_config ( config )
def remove_config ( self ):
self.input.destroy ( )
self.output.destroy ( )
self.treeview.destroy ( )
self.var_store = None
self.var_rend = None
self.val_rend = None
self.treeview.destroy ( )
self.var_dict = {}
self.var_list = []
self.list_vars = [ "output_files", "input_files" ]
self.conf_vars = [ "title", "css", "js" ]
self.variables_box = Gtk.Box ( )
self.configitems = []
self.current_file = {}
self.current = None
def new_config ( self, config ):
self.config_file_relative = config
self.config_file_full = self.get_path ( config )
self.__file_lines = open ( self.config_file_relative, "r" ).readlines ( )
self.input = configitem.ConfigItem ( )
self.output = configitem.ConfigItem ( )
self.input.connect ( "new_config", self.get_new )
self.output.connect ( "new_config", self.get_new )
self.input.connect ( "remove_item", self.get_remove )
self.output.connect ( "remove_item", self.get_remove )
for l in self.__file_lines:
if l [ 0 ] == "#" or l == "" or l == "\n":
continue
var, val = l.split ( "=" )
# Remove the whitespace
var = var.strip ( )
val = val.strip ( )
self.var_dict [ var ] = val
self.var_list.append ( var )
if var in self.list_vars:
self.var_dict [ var ] = val.split ( "," )
for var in self.list_vars:
if not var:
continue
buff = self.var_dict [ var ]
exec ( "self.%s.set_notebook ( self.notebook )" % var.replace ( "_files", "" ) )
exec ( "self.%s.set_dialogue ( self.open_dialogue )" % var.replace ( "_files", "" ) )
exec ( "self.%s.add_items ( buff )" % var.replace ( "_files", "" ) )
self.__init_vars__ ( )
for var in self.var_list:
if ( not isinstance ( self.var_dict [ var ], list ) ):
self.add_var ( var )
def get_remove (self, buff_cfg, buff_item):
curr = "output"
if buff_cfg == self.input:
curr = "input"
self.var_dict [ curr + "_files" ].pop ( self.var_dict [ curr + "_files" ].index (buff_item.full_path))
def get_path ( self, _in ):
if self.dir [ -1 ] == "/":
return self.dir + _in
return self.dir + "/" + _in
def get_new ( self, a, confitem ):
if ( confitem == self.input ):
self.current = "input"
else:
self.current = "output"
def add ( self, __files ):
if platform.system () == "Windows":
__files[0] = __files [0][1:]
if ( self.current == "input" ):
self.input.add_items ( __files, remove=False )
self.var_dict ["input_files"].append (__files[0])
else:
self.output.add_items ( __files, remove=False )
self.var_dict ["output_files"].append (__files[0])
def update_file ( self, var, val ):
self.current_file [ var ] = val
def __init_vars__ ( self ):
self.var_store = Gtk.ListStore ( str, str )
self.treeview = Gtk.TreeView.new_with_model ( self.var_store )
self.var_rend = Gtk.CellRendererText ( )
self.val_rend = Gtk.CellRendererText ( )
self.val_rend.set_property('editable', True)
column_1 = Gtk.TreeViewColumn ( "Variables", self.var_rend, text=0 )
column_2 = Gtk.TreeViewColumn ( "Value", self.val_rend, text=1 )
self.treeview.append_column ( column_1 )
self.treeview.append_column ( column_2 )
self.val_rend.connect ( "edited", self.vars_changes )
def vars_changes ( self, renderer, path, new_text ):
self.var_store.set ( self.var_store.get_iter ( path ), 1, new_text )
self.var_dict [ self.var_store.get_value ( self.var_store.get_iter ( path ), 0 ) ] = new_text
def add_var ( self, var, add_to_list=False ):
if ( add_to_list ):
self.var_list.append ( var )
self.var_dict [ var ] = ""
self.var_store.append ( [ var, self.var_dict [ var ] ] )
def open_file ( self, path ):
self.__file_lines = open ( path, "r" ).readlines ( )
self.__file = open ( path, "w" ).readlines ( )
def remove_var ( self ):
model, treeiter = self.treeview.get_selection ( ).get_selected ( )
self.var_dict.pop ( model [ treeiter ] [ 0 ], None )
self.var_list.pop ( self.var_list.index ( model [ treeiter ] [ 0 ] ) )
print (self.var_list)
self.var_store.remove ( treeiter )
def get_conf_out ( self ):
out_buff = []
for x in self.var_list:
buff = self.var_dict [ x ]
if ( isinstance ( self.var_dict [ x ], list ) ):
buff = ",".join ( self.var_dict [ x ] )
buff += ","
out_buff.append ( x + " = " + buff )
return out_buff
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import time
import datetime
import re
import threading
import os.path, json, ast, traceback
import shutil
import signal
import importlib
try:
importlib.import_module("PyQt4")
except Exception:
sys.exit("Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'")
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum_xvg.i18n import _, set_language
from electrum_xvg.util import print_error, print_msg
from electrum_xvg.plugins import run_hook, always_hook
from electrum_xvg import WalletStorage, Wallet
from electrum_xvg.bitcoin import MIN_RELAY_TX_FEE
try:
import icons_rc
except Exception:
sys.exit("Error: Could not import icons_rc.py, please generate it with: 'pyrcc4 icons.qrc -o gui/qt/icons_rc.py'")
from util import *
from main_window import ElectrumWindow
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class ElectrumGui:
def __init__(self, config, network, app=None):
set_language(config.get('language'))
self.network = network
self.config = config
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
if app is None:
self.app = QApplication(sys.argv)
self.app.installEventFilter(self.efilter)
def build_tray_menu(self):
m = QMenu()
m.addAction(_("Show/Hide"), self.show_or_hide)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Electrum-XVG"), self.close)
self.tray.setContextMenu(m)
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
icon = QIcon(":icons/electrum_dark_icon.png") if self.dark_icon else QIcon(':icons/electrum_light_icon.png')
self.tray.setIcon(icon)
def show_or_hide(self):
self.tray_activated(QSystemTrayIcon.DoubleClick)
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if self.current_window.isMinimized() or self.current_window.isHidden():
self.current_window.show()
self.current_window.raise_()
else:
self.current_window.hide()
def close(self):
self.current_window.close()
def go_full(self):
self.config.set_key('lite_mode', False, True)
self.lite_window.hide()
self.main_window.show()
self.main_window.raise_()
self.current_window = self.main_window
def go_lite(self):
self.config.set_key('lite_mode', True, True)
self.main_window.hide()
self.lite_window.show()
self.lite_window.raise_()
self.current_window = self.lite_window
def init_lite(self):
import lite_window
if not self.check_qt_version():
if self.config.get('lite_mode') is True:
msg = "Electrum was unable to load the 'Lite GUI' because it needs Qt version >= 4.7.\nChanging your config to use the 'Classic' GUI"
QMessageBox.warning(None, "Could not start Lite GUI.", msg)
self.config.set_key('lite_mode', False, True)
sys.exit(0)
self.lite_window = None
return
actuator = lite_window.MiniActuator(self.main_window)
actuator.load_theme()
self.lite_window = lite_window.MiniWindow(actuator, self.go_full, self.config)
driver = lite_window.MiniDriver(self.main_window, self.lite_window)
def check_qt_version(self):
qtVersion = qVersion()
return int(qtVersion[0]) >= 4 and int(qtVersion[2]) >= 7
def set_url(self, uri):
self.current_window.pay_to_URI(uri)
def run_wizard(self, storage, action):
import installwizard
if storage.file_exists and action != 'new':
msg = _("The file '%s' contains an incompletely created wallet.")%storage.path + '\n'\
+ _("Do you want to complete its creation now?")
if not util.question(msg):
if util.question(_("Do you want to delete '%s'?")%storage.path):
os.remove(storage.path)
QMessageBox.information(None, _('Warning'), _('The file was removed'), _('OK'))
return
return
wizard = installwizard.InstallWizard(self.config, self.network, storage, self.app)
wizard.show()
if action == 'new':
action, wallet_type = wizard.restore_or_create()
else:
wallet_type = None
try:
wallet = wizard.run(action, wallet_type)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e), _('OK'))
return
return wallet
def main(self, url):
last_wallet = self.config.get('gui_last_wallet')
if last_wallet is not None and self.config.get('wallet_path') is None:
if os.path.exists(last_wallet):
self.config.cmdline_options['default_wallet_path'] = last_wallet
try:
storage = WalletStorage(self.config.get_wallet_path())
except BaseException as e:
QMessageBox.warning(None, _('Warning'), str(e), _('OK'))
self.config.set_key('gui_last_wallet', None)
return
if storage.file_exists:
try:
wallet = Wallet(storage)
except BaseException as e:
QMessageBox.warning(None, _('Warning'), str(e), _('OK'))
return
action = wallet.get_action()
else:
action = 'new'
if action is not None:
wallet = self.run_wizard(storage, action)
if not wallet:
return
else:
wallet.start_threads(self.network)
# init tray
self.dark_icon = self.config.get("dark_icon", False)
icon = QIcon(":icons/electrum_dark_icon.png") if self.dark_icon else QIcon(':icons/electrum_light_icon.png')
self.tray = QSystemTrayIcon(icon, None)
self.tray.setToolTip('Electrum-XVG')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
# main window
self.main_window = w = ElectrumWindow(self.config, self.network, self)
self.current_window = self.main_window
#lite window
self.init_lite()
# plugins interact with main window
run_hook('init_qt', self)
w.load_wallet(wallet)
# initial configuration
if self.config.get('hide_gui') is True and self.tray.isVisible():
self.main_window.hide()
self.lite_window.hide()
else:
if self.config.get('lite_mode') is True:
self.go_lite()
else:
self.go_full()
s = Timer()
s.start()
self.windows.append(w)
if url:
self.set_url(url)
w.connect_slots(s)
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
self.app.exec_()
if self.tray:
self.tray.hide()
# clipboard persistence
# see http://www.mail-archive.com/pyqt@riverbankcomputing.com/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
w.close_wallet()
|
# encoding: utf-8
"""
origin.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.bgp.message.update.attribute.attribute import Attribute
# =================================================================== Origin (1)
@Attribute.register()
class Origin(Attribute):
ID = Attribute.CODE.ORIGIN
FLAG = Attribute.Flag.TRANSITIVE
CACHING = True
IGP = 0x00
EGP = 0x01
INCOMPLETE = 0x02
def __init__(self, origin, packed=None):
self.origin = origin
self._packed = self._attribute(packed if packed else bytes([origin]))
def __eq__(self, other):
return self.ID == other.ID and self.FLAG == other.FLAG and self.origin == other.origin
def __ne__(self, other):
return not self.__eq__(other)
def pack(self, negotiated=None):
return self._packed
def __len__(self):
return len(self._packed)
def __repr__(self):
if self.origin == 0x00:
return 'igp'
if self.origin == 0x01:
return 'egp'
if self.origin == 0x02:
return 'incomplete'
return 'invalid'
@classmethod
def unpack(cls, data, direction, negotiated):
return cls(data[0], data)
@classmethod
def setCache(cls):
# there can only be three, build them now
IGP = Origin(Origin.IGP)
EGP = Origin(Origin.EGP)
INC = Origin(Origin.INCOMPLETE)
cls.cache[Attribute.CODE.ORIGIN][IGP.pack()] = IGP
cls.cache[Attribute.CODE.ORIGIN][EGP.pack()] = EGP
cls.cache[Attribute.CODE.ORIGIN][INC.pack()] = INC
Origin.setCache()
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import fixtures
import git
import testtools
LOREM_IPSUM = """\
Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy
nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi
enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis
nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in
hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu
feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui
blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla
facilisi.
Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit
lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure
dolor in hendrerit in vulputate velit esse molestie consequat, vel illum
dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio
dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te
feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing
elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam
erat volutpat.
Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie
consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et
accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit
augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet,
consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut
laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis
nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea
commodo consequat."""
class DiveDir(fixtures.Fixture):
"""Dive into given directory and return back on cleanup.
:ivar path: The target directory.
"""
def __init__(self, path):
self.path = path
def setUp(self):
super(DiveDir, self).setUp()
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.path)
class GitRepo(fixtures.Fixture):
"""Create an empty git repo in which to operate."""
def __init__(self):
self.repo = None
self.path = ''
self._file_list = set()
def setUp(self):
super(GitRepo, self).setUp()
tempdir = fixtures.TempDir()
self.addCleanup(tempdir.cleanUp)
tempdir.setUp()
self.path = os.path.join(tempdir.path, 'git')
os.mkdir(self.path)
g = git.Git(self.path)
g.init()
self.repo = git.Repo(self.path)
self.repo.git.config('user.email', 'user@example.com')
self.repo.git.config('user.name', 'Example User')
self._create_file_commit()
def _create_file(self, contents=None):
if not contents:
contents = LOREM_IPSUM
# always want to ensure the files added to the repo are unique no
# matter which branch they are added to, as otherwise there may
# be conflicts caused by replaying local changes and performing
# merges
while True:
tmpfile = tempfile.NamedTemporaryFile(dir=self.repo.working_dir,
delete=False)
if tmpfile.name not in self._file_list:
self._file_list.add(tmpfile.name)
break
tmpfile.close()
os.remote(tmpfile.name)
tmpfile.write(contents)
tmpfile.close()
return tmpfile.name
def _create_file_commit(self, change_id=None):
filename = self._create_file()
self.repo.git.add(filename)
message = "Adding %s" % os.path.basename(filename)
if change_id:
message = message + "\n\nChange-Id: %s" % change_id
self.repo.git.commit(m=message)
def add_commits(self, num=1, ref="HEAD", change_ids=None):
"""Create the given number of commits using generated files"""
if ref != "HEAD":
self.repo.git.checkout(ref)
num = max(num, len(change_ids))
ids = list(change_ids) + [None] * (num - len(change_ids))
for x in range(num):
self._create_file_commit(ids[x])
class BaseTestCase(testtools.TestCase):
"""Base Test Case for all tests."""
def setUp(self):
super(BaseTestCase, self).setUp()
self.testrepo = self.useFixture(GitRepo())
repo_path = self.testrepo.path
self.useFixture(DiveDir(repo_path))
self.repo = self.testrepo.repo
|
from django.dispatch.dispatcher import Signal
from location.models import LocationSnapshot
location_updated = Signal(providing_args=['user', 'from_', 'to'])
location_changed = Signal(providing_args=['user', 'from_', 'to'])
class watch_location(object):
def __init__(self, user):
self.user = user
def _get_current_location(self):
return LocationSnapshot.objects.filter(
source__user=self.user,
).order_by('-date')[0]
def __enter__(self):
self.original_location = None
try:
self.original_location = self._get_current_location()
except IndexError:
pass
return self
def __exit__(self, *args):
current_location = self._get_current_location()
if self.original_location != current_location:
location_updated.send(
sender=self,
user=self.user,
from_=self.original_location,
to=current_location,
)
if (
self.original_location and
self.original_location.location
!= current_location.location
):
location_changed.send(
sender=self,
user=self.user,
from_=self.original_location,
to=current_location,
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# armoni.py
#
# Copyright 2012-2014 Ángel Coto <codiasw@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details (http://www.gnu.org/licenses/gpl.txt)
#
# Descripcion:
# Este programa verifica periódicamente los archivos que se defina en archivo de
# configuración y emite alertas si han cambiado o no. Las alerta que se generan
# por defecto son ante cambio de los archivos, pero opcionalmente puede indicarse
# por parámetro de línea de comando que emita alertas ante no cambio. Las alertas
# son registradas en log también son enviadas por correo.
# Historial de versión
# 1.0.1: Incorpora los destinatarios en el mensaje que se guarda en log de eventos,
# relativo a la notificación de cumplimiento de regla
# 1.1.0: * Simplifica el método de comparación
# * Actualiza el listado de archivos cada vez que se hace ronda de monitoreo.
# Esto funciona en modalidad de directorio.
import os
import smtplib
from artamiz import calcsum, enllavado
from ConfigParser import SafeConfigParser
from time import sleep, localtime, strftime
from sys import argv
from getpass import getpass
from base64 import standard_b64decode, standard_b64encode
from email.mime.text import MIMEText
from email.Header import Header
from socket import gethostname
### Define la versión del programa
Programa = 'armoni'
Ver = '1.1.0 (beta)'
Copyright = 'Copyright (c) 2012-2014 Angel Coto <codiasw@gmail.com>'
Maquina = gethostname()
### Inicializa variables de mensajes
Error1 = "* Error 1: Error al leer archivo '{0}'."
Error2 = "* Error 2: El campo '{0}' no tiene formáto válido."
Error3 = "* Error 3: '{0}' no es directorio."
Error4 = "* Error 4: '{0}' no es un archivo."
Error5 = "* Error 5: '{0}' no es valor esperado para '{1}'."
MensajeLog1 = "{0}\t{1}\t{2}\t{3}" #Mensaje que se graba en el log de monitoreo
ErrorLog1 = "{0}\tERROR\tError en la comunicación o autenticación con el servidor de correo"
ErrorLog2 = "{0}\tERROR\tError al intentar enviar el mensaje luego de contactar exitosamente al servidor de correo"
ErrorLog3 = "{0}\tERROR\t{1} finalizó debido a errores en archivo de ini"
ErrorLog4 = "{0}\tERROR\t{1} finalizó porque ninguno de los archivos se puede analizar"
ErrorLog5 = "{0}\tERROR\tNo se pudo verificar archivos\t{1}"
EventoLog0 = "{0}\tINFORMATIVO\t{1} inició con éxito con parámetros\t{2}\t{3}\t{4}\t{5}"
EventoLog1 = "{0}\tINFORMATIVO\tSe notificó cumplimiento de la regla\t{1}\t{2}"
EventoLog2 = "{0}\tINFORMATIVO\tNo fue posible notificar cumplimiento de la regla\t{1}"
EventoLog3 = "{0}\tINFORMATIVO\tSe notificó el inicio de {1}\t{2}"
EventoLog4 = "{0}\tINFORMATIVO\tNo fue posible notificar el inicio de {1}"
EventoLog5 = "{0}\tINFORMATIVO\tSe excluyen archivos del monitoreo\t{1}"
EventoLog6 = "{0}\tINFORMATIVO\tInicio de ciclo de verificación"
EventoLog7 = "{0}\tINFORMATIVO\tFin de ciclo de verificación"
EventoLog100 = "{0}\tINFORMATIVO\t{1} fue detenido"
class Correo:
def __init__(self, Servidor, Puerto, Cuenta, Pwd = None):
self.Cuenta = Cuenta
self.Pwd = Pwd
self.Servidor = Servidor
self.Puerto = Puerto
self.Asunto = ''
self.Mensaje = ''
def CreaMensaje(self, Mensaje): #Método genérico para cualquier mensaje preelaborado
self.Mensaje = Mensaje
def CreaAsunto(self, Asunto): #Método genérico para cualquier asunto preelaborado
self.Asunto = Asunto
def CreaAsuntoLog(self, CausaAlerta): #Método específico para crear el asunto de correo de alerta
if CausaAlerta == 'cambio':
self.Asunto = Programa + '@' + Maquina + ': ** Reportando cambios en archivos'
else:
self.Asunto = Programa + '@' + Maquina + ': ** Reportando archivos que no han cambiado'
def CreaMensajeLog(self, Archivos, CausaAlerta, Intervalo, Hora): #Método específico para crear mensaje de correo de alerta
self.Mensaje = '------------ Reporte de ' + Programa + '@' + Maquina + ' en fecha ' + Hora + ' ------------\n\n'
if CausaAlerta == 'cambio':
self.Mensaje = self.Mensaje + 'Se detectó que los siguientes archivos se modificaron en los últimos ' + str(Intervalo) + ' minutos:\n\n'
else:
self.Mensaje = self.Mensaje + 'Se detectó que los siguientes archivos no han cambiado en los últimos ' + str(Intervalo) + ' minutos:\n\n'
Parrafo = ''
for Archivo in Archivos:
Parrafo = Parrafo + ' * ' + Archivo + '\n'
self.Mensaje = self.Mensaje + Parrafo + '\n' + Programa + '-' + Ver
def EnviarCorreo(self, Remitente, Destinatarios): #Método genérico para enviar correo
# Construye el mensaje simple (texto y sin adjunto)
Asunto = self.Asunto.decode('utf-8')
Asunto = Header(Asunto,'utf-8')
Mensaje = MIMEText(self.Mensaje,'plain','utf-8')
Mensaje['From'] = Remitente
Mensaje['To'] = Remitente
Mensaje['Subject'] = Asunto
Mensaje = Mensaje.as_string()
# Conecta con el servidor de correo
if self.Servidor == 'smtp.gmail.com':
try:
mailServer = smtplib.SMTP(self.Servidor,self.Puerto)
mailServer.starttls()
mailServer.login(self.Cuenta, standard_b64decode(self.Pwd))
except:
return 1
else:
try:
mailServer = smtplib.SMTP(self.Servidor, self.Puerto)
# mailServer.set_debuglevel(True) #Usar en caso de requerir ver comunicación con server
except:
return 1
# Envía el mensaje
try:
mailServer.sendmail(Remitente, Destinatarios, Mensaje)
return 0
except:
return 2
finally:
mailServer.quit()
class Log:
def __init__(self, Archivo):
self.Archivo = Archivo
self.TamanoMaximo = 1048576
def GrabaRegistroLog(self, Registro):
ArchivoLog = open(self.Archivo, 'a')
ArchivoLog.write(Registro + '\n')
ArchivoLog.close()
if self.VerificaTamano():
self.RenombraLog()
def VerificaTamano(self):
if os.path.getsize(self.Archivo) >= self.TamanoMaximo:
return True
else:
return False
def RenombraLog(self):
Parte1 = os.path.splitext(os.path.basename(self.Archivo))[0]
Extension = os.path.splitext(os.path.basename(self.Archivo))[1]
Complemento = hora = strftime("_%Y%m%d_%H%M%S", localtime())
Nuevonombre = Parte1 + Complemento + Extension
os.rename(self.Archivo,Nuevonombre)
class Parametros:
def __init__(self, Ini, TipoObjeto):
self.ArchivoIni = Ini
self.Error = False
if os.path.isfile(self.ArchivoIni):
if TipoObjeto == 'directorio':
self.Directorios = self.LeeLista('datos_monitoreo','directorios')
if self.Directorios <> False:
self.ValidaDirectorios()
else:
self.Archivos = self.LeeLista('datos_monitoreo','archivos')
if self.Archivos <> False:
self.ValidaArchivos()
self.MinutosIntervalo = self.LeeNumerico('datos_monitoreo','minutos_intervalo')
self.Intervalo = self.MinutosIntervalo * 60
self.Servidor = self.LeeString('datos_servidor_correo', 'servidor')
self.RequiereAutenticacion = self.LeeString('datos_servidor_correo','requiere_autenticacion', ['si', 'no'])
self.Puerto = self.LeeNumerico('datos_servidor_correo', 'puerto')
self.Cuenta = self.LeeString('datos_servidor_correo', 'cuenta')
self.De = self.LeeString('datos_correo', 'de')
self.Para = self.LeeLista('datos_correo', 'para')
self.ParaAdmin = self.LeeLista('datos_correo', 'para_admin')
else:
print(error1.format(self.ArchivoIni))
self.Error = True
def ValidaDirectorios(self):
for Directorio in self.Directorios:
if not os.path.isdir(Directorio):
print(Error3.format(Directorio))
self.Error = True
if self.Error:
return False
else:
return True
def ValidaArchivos(self):
for Archivo in self.Archivos:
if not os.path.isfile(Archivo):
print(Error4.format(Archivo))
self.Error = True
if self.Error:
return False
else:
return True
def LeeLista(self, seccion, opcion):
parser = SafeConfigParser()
parser.read(self.ArchivoIni)
valor = parser.get(seccion,opcion).strip()
cadena = ''
Lista = []
if valor.strip() <> '':
for caracter in valor:
if caracter <> ';':
cadena = cadena + caracter
else:
Lista.append(cadena.strip())
cadena = ''
Lista.append(cadena.strip())
return Lista
else:
print(Error2.format(opcion))
self.Error = True
return False
def LeeString(self, seccion, opcion, valores = None):
parser = SafeConfigParser()
parser.read(self.ArchivoIni)
MiString = parser.get(seccion,opcion)
MiString = MiString.strip()
if MiString <> '':
ValorValido = True
if valores <> None:
if MiString not in valores:
ValorValido = False
if ValorValido:
return MiString
else:
print(Error5.format(MiString,opcion))
self.Error = True
return False
else:
print(Error2.format(opcion))
self.Error = True
return False
def LeeNumerico(self, seccion, opcion):
parser = SafeConfigParser()
parser.read(self.ArchivoIni)
Numero = 0
try:
Numero = int(parser.get(seccion,opcion))
return Numero
except:
print(Error2.format(opcion))
self.Error = True
return False
class Monitor:
def __init__(self):
self.Archivos = []
self.ArchivosError = []
def ArchivoVerificable(self, Archivo):
if os.path.isfile(Archivo):
if os.access(Archivo, os.R_OK):
if not enllavado(Archivo):
Verificable = True
else:
Verificable = False
self.ArchivosError.append([Archivo, 'enllavado'])
else:
Verificable = False
self.ArchivosError.append([Archivo, 'sinpermisolectura'])
else:
Verificable = False
self.ArchivosError.append([Archivo, 'noexiste'])
return Verificable
def CargaArchivos(self, TipoObjeto, Objetos): #Carga inicial de archivos y sus hash sha1
self.Archivos = []
Resultado = False
for Archivo in Objetos:
RegistroArchivo = []
RegistroArchivoError = []
if os.path.isfile(Archivo): # Si el archivo existe
if os.access(Archivo,os.R_OK): # Si tiene permiso de lectura
if not enllavado(Archivo): #Si no está enllavado (comprobado con función de artamiz)
RegistroArchivo.append(Archivo)
RegistroArchivo.append(calcsum(Archivo,'a','sha1')) #Guarda el hash sha1 del archivo
self.Archivos.append(RegistroArchivo)
else:
RegistroArchivoError.append(Archivo)
RegistroArchivoError.append('enllavado')
self.ArchivosError.append(RegistroArchivoError)
else:
RegistroArchivoError.append(Archivo)
RegistroArchivoError.append('sinpermisolectura')
self.ArchivosError.append(RegistroArchivoError)
if self.Archivos:
Resultado = True
return Resultado
def VerificaArchivos(self, CausaAlerta):
Indice = 0
Alerta = False
Alertas = []
self.ArchivosError = []
for Archivo in self.Archivos: #Recorre la lista de archivos
if self.ArchivoVerificable(Archivo[0]):
NuevoHash = calcsum(Archivo[0], 'a', 'sha1')
if CausaAlerta == 'nocambio':
if Archivo[1] == NuevoHash:
Alerta = True
Alertas.append(Archivo[0])
elif CausaAlerta == 'cambio':
if Archivo[1] <> NuevoHash:
Alerta = True
Alertas.append(Archivo[0])
else:
None
self.Archivos[Indice] = [Archivo[0], NuevoHash]
Indice = Indice + 1
return Alerta, Alertas
def main():
def HintDeUso():
print(' Monitorea la variación de archivos.\n')
print(' Uso: python {0} [?,-nC, -a]\n'.format(Programa))
print(' Opciones:')
print(' <ninguna>: Alerta si hay cambios en directorios.')
print(' -nC: Alerta cuando no hay cambios en los objetos monitoreados.')
print(' -a: Monitorea archivos en lugar de directorios completos.')
print(' ?: Muestra esta ayuda.\n')
print(' Este programa es software libre bajo licencia GPLv3.\n')
def PantallaInicial():
if os.name == 'posix':
os.system('clear')
elif os.name == 'nt':
os.system('cls')
else:
None
print('{0} {1}. {2}\n'.format(Programa,Ver,Copyright))
def LeeParametrosLc():
CausaAlerta = 'cambio'
TipoObjeto = 'directorio'
ParametroOk = True
try:
ar1 = argv[1]
if argv[1] == '-nC':
CausaAlerta = 'nocambio'
elif argv[1] == '-a':
TipoObjeto = 'archivo'
else:
ParametroOk = False
except:
None
if ParametroOk:
try:
ar2 = argv[2]
if ar2 == '-nC':
CausaAlerta = 'nocambio'
elif ar2 == '-a':
TipoObjeto = 'archivo'
else:
ParametroOk = False
except:
None
return ParametroOk, CausaAlerta, TipoObjeto
def HoraTexto():
return strftime('%Y-%m-%d %H:%M:%S', localtime())
def ImprimeLinea():
print('------------------------------------------------------------------------------')
def CargaInicial():
if TipoObjeto == 'directorio':
Archivos = []
for Directorio in ParametrosIni.Directorios:
ListaArchivos = os.listdir(Directorio)
for Archivo in ListaArchivos:
Archivos.append(os.path.join(Directorio, Archivo))
ResultadoCarga = MiMonitor.CargaArchivos(TipoObjeto, Archivos)
else:
ResultadoCarga = MiMonitor.CargaArchivos(TipoObjeto, ParametrosIni.Archivos)
if MiMonitor.ArchivosError:
PreparaRegistroErr(EventoLog5.format(HoraTexto(),MiMonitor.ArchivosError))
return ResultadoCarga
def PreparaRegistroErr(Registro):
LogServicio.GrabaRegistroLog(Registro)
print(Registro)
def PreparaRegistroLog(Archivo, Hora, Causa):
RegistroLog = MensajeLog1.format(Hora,Causa,Archivo,ParametrosIni.MinutosIntervalo)
LogMonitoreo.GrabaRegistroLog(RegistroLog)
print(RegistroLog)
def PreparaCorreoLog(Alertas, CausaAlerta, Hora):
MiCorreo.CreaAsuntoLog(CausaAlerta)
MiCorreo.CreaMensajeLog(Alertas, CausaAlerta, ParametrosIni.MinutosIntervalo, Hora)
ResultadoEnvio = MiCorreo.EnviarCorreo(ParametrosIni.De, ParametrosIni.Para)
Hora = HoraTexto() #Actualiza la hora para el registro de eventos
if ResultadoEnvio == 0:
PreparaRegistroErr(EventoLog1.format(Hora,CausaAlerta,ParametrosIni.Para))
elif ResultadoEnvio == 1:
PreparaRegistroErr(EventoLog2.format(Hora,CausaAlerta))
PreparaRegistroErr(ErrorLog1.format(Hora))
else:
PreparaRegistroErr(EventoLog2.format(Hora,CausaAlerta))
PreparaRegistroErr(ErrorLog2.format(Hora))
def InformaInicio(Hora):
if TipoObjeto == 'directorio':
Objetos = str(ParametrosIni.Directorios)
else:
Objetos = str(ParametrosIni.Archivos)
PreparaRegistroErr(EventoLog0.format(Hora,Programa,CausaAlerta,ParametrosIni.MinutosIntervalo,TipoObjeto,Objetos))
Texto = Programa + '@' + Maquina + ': ** Se inició el servicio'
MiCorreo.CreaAsunto(Texto)
Texto = 'El servicio ' + Programa + '-' + Ver + ' inició.\n\n'
Texto = Texto + 'Equipo : ' + Maquina + '\n'
Texto = Texto + 'Hora : ' + Hora + '\n'
Texto = Texto + 'Regla : ' + CausaAlerta + '\n'
Texto = Texto + 'Tipo objeto: ' + TipoObjeto + '\n'
if TipoObjeto == 'directorio':
Texto = Texto + 'Directorios: ' + str(ParametrosIni.Directorios) + '\n\n'
else:
Texto = Texto + 'Archivos : ' + str(ParametrosIni.Archivos) + '\n\n'
Texto = Texto + 'La actividad del monitoreo se puede consultar en los log del servicio.'
MiCorreo.CreaMensaje(Texto)
ResultadoEnvio = MiCorreo.EnviarCorreo(ParametrosIni.De, ParametrosIni.ParaAdmin)
Hora = HoraTexto() #Actualiza la hora para el log de eventos
if ResultadoEnvio == 0:
PreparaRegistroErr(EventoLog3.format(Hora, Programa, ParametrosIni.ParaAdmin))
elif ResultadoEnvio == 1:
PreparaRegistroErr(EventoLog4.format(Hora, Programa))
PreparaRegistroErr(ErrorLog1.format(Hora))
else:
PreparaRegistroErr(EventoLog4.format(Hora, Programa))
PreparaRegistroErr(ErrorLog2.format(Hora))
def MonitoreaArchivos():
PreparaRegistroErr(EventoLog6.format(HoraTexto()))
HayAlerta, Alertas = MiMonitor.VerificaArchivos(CausaAlerta)
Hora = HoraTexto()
for ArchivoError in MiMonitor.ArchivosError:
PreparaRegistroLog(ArchivoError[0], Hora, ArchivoError[1])
if HayAlerta:
for Archivo in Alertas:
PreparaRegistroLog(Archivo, Hora, CausaAlerta)
PreparaCorreoLog(Alertas, CausaAlerta, Hora)
# if HayAlerta or MiMonitor.ArchivosError:
# ImprimeLinea()
PreparaRegistroErr(EventoLog7.format(HoraTexto()))
ImprimeLinea()
try:
PantallaInicial()
ParametrosLcOk, CausaAlerta, TipoObjeto = LeeParametrosLc()
if ParametrosLcOk:
ParametrosIni = Parametros('armoni.ini', TipoObjeto) #Crea el objeto de parámetros
LogServicio = Log('armoni.err') #Para registrar eventos del servicio
if not ParametrosIni.Error:
LogMonitoreo = Log('armoni.log') #Para registrar las actividades del monitoreo
MiMonitor = Monitor() #Crea el objeto monitor
if ParametrosIni.RequiereAutenticacion == 'si':
Pwd = standard_b64encode(getpass("Password de '" + ParametrosIni.Cuenta + "': "))
MiCorreo = Correo(ParametrosIni.Servidor, ParametrosIni.Puerto, ParametrosIni.Cuenta, Pwd)
else:
MiCorreo = Correo(ParametrosIni.Servidor, ParametrosIni.Puerto, ParametrosIni.Cuenta)
print("\nIniciando el servicio de verificación archivos con la regla '"+ CausaAlerta + "'...")
if CargaInicial():
print("\nServicio iniciado")
ImprimeLinea()
InformaInicio(HoraTexto())
ImprimeLinea()
Error = False
sleep(ParametrosIni.Intervalo)
while not Error:
MonitoreaArchivos()
if TipoObjeto == 'directorio':
if not CargaInicial():
None
#Error = True
sleep(ParametrosIni.Intervalo)
else:
PreparaRegistroErr(ErrorLog4.format(HoraTexto(),Programa))
else:
PreparaRegistroErr(ErrorLog3.format(HoraTexto(),Programa))
else:
HintDeUso()
except(KeyboardInterrupt, SystemExit):
pass
PreparaRegistroErr(EventoLog100.format(HoraTexto(), Programa))
if __name__ == '__main__':
main()
else:
None
|
#!/usr/bin/env python3
import json, sys, time as dt, pprint, math
import urllib
import imgur_config
from Imgur.Factory import Factory
from Imgur.Auth.Expired import Expired
try:
from urllib.request import urlopen as UrlLibOpen
from urllib.request import HTTPError
except ImportError:
from urllib2 import urlopen as UrlLibOpen
from urllib2 import HTTPError
def center_pad(s, length):
num_dashes = float(length - len(s) - 2) / 2
num_dashes_left = math.floor(num_dashes)
num_dashes_right = math.ceil(num_dashes)
return ('=' * num_dashes_left) + ' ' + s + ' ' + ('=' * num_dashes_right)
def two_column_with_period(left, right, length):
num_periods = (length - (len(left) + len(right) + 2))
return left + ' ' + ('.' * num_periods) + ' ' + right
def upload(image, name):
#config = imgur_config.config()
#factory = Factory(config)
#
# action = "upload"
#
# #handle_unauthorized_commands(factory, "upload")
#
# imgur = factory.build_api()
#
# req = factory.build_request_upload_from_data(image, name)
# res = imgur.retrieve(req)
# return(res['link'])
data = urllib.urlencode({"image":image, "name":name})
u = urllib.urlopen("https://api.imgur.com/3/image", data)
return u
|
"""
Functions about shapes.
"""
from typing import Optional, List, Dict, TYPE_CHECKING
import pandas as pd
from pandas import DataFrame
import numpy as np
import utm
import shapely.geometry as sg
from . import constants as cs
from . import helpers as hp
# Help mypy but avoid circular imports
if TYPE_CHECKING:
from .feed import Feed
def build_geometry_by_shape(
feed: "Feed",
shape_ids: Optional[List[str]] = None,
*,
use_utm: bool = False,
) -> Dict:
"""
Return a dictionary with structure shape_id -> Shapely LineString
of shape.
Parameters
----------
feed : Feed
shape_ids : list
IDs of shapes in ``feed.shapes`` to restrict output to; return
all shapes if ``None``.
use_utm : boolean
If ``True``, then use local UTM coordinates; otherwise, use
WGS84 coordinates
Returns
-------
dictionary
Has the structure
shape_id -> Shapely LineString of shape.
If ``feed.shapes is None``, then return ``None``.
Return the empty dictionary if ``feed.shapes is None``.
"""
if feed.shapes is None:
return {}
# Note the output for conversion to UTM with the utm package:
# >>> u = utm.from_latlon(47.9941214, 7.8509671)
# >>> print u
# (414278, 5316285, 32, 'T')
d = {}
shapes = feed.shapes.copy()
if shape_ids is not None:
shapes = shapes[shapes["shape_id"].isin(shape_ids)]
if use_utm:
for shape, group in shapes.groupby("shape_id"):
lons = group["shape_pt_lon"].values
lats = group["shape_pt_lat"].values
xys = [
utm.from_latlon(lat, lon)[:2] for lat, lon in zip(lats, lons)
]
d[shape] = sg.LineString(xys)
else:
for shape, group in shapes.groupby("shape_id"):
lons = group["shape_pt_lon"].values
lats = group["shape_pt_lat"].values
lonlats = zip(lons, lats)
d[shape] = sg.LineString(lonlats)
return d
def shapes_to_geojson(
feed: "Feed", shape_ids: Optional[List[str]] = None
) -> Dict:
"""
Return a (decoded) GeoJSON FeatureCollection of LineString features
representing ``feed.shapes``.
Each feature will have a ``shape_id`` property.
The coordinates reference system is the default one for GeoJSON,
namely WGS84.
If a list of shape IDs is given, then return only the LineString
features corresponding to those shape IDS.
Return the empty dictionary if ``feed.shapes is None``
"""
geometry_by_shape = feed.build_geometry_by_shape(shape_ids=shape_ids)
if geometry_by_shape:
fc = {
"type": "FeatureCollection",
"features": [
{
"properties": {"shape_id": shape},
"type": "Feature",
"geometry": sg.mapping(linestring),
}
for shape, linestring in geometry_by_shape.items()
],
}
else:
fc = {}
return fc
def get_shapes_intersecting_geometry(
feed: "Feed", geometry, geo_shapes=None, *, geometrized: bool = False
) -> DataFrame:
"""
Return the slice of ``feed.shapes`` that contains all shapes that
intersect the given Shapely geometry, e.g. a Polygon or LineString.
Parameters
----------
feed : Feed
geometry : Shapley geometry, e.g. a Polygon
Specified in WGS84 coordinates
geo_shapes : GeoPandas GeoDataFrame
The output of :func:`geometrize_shapes`
geometrize : boolean
If ``True``, then return the shapes DataFrame as a GeoDataFrame
of the form output by :func:`geometrize_shapes`
Returns
-------
DataFrame or GeoDataFrame
Notes
-----
- Requires GeoPandas
- Specifying ``geo_shapes`` will skip the first step of the
algorithm, namely, geometrizing ``feed.shapes``
- Assume the following feed attributes are not ``None``:
* ``feed.shapes``, if ``geo_shapes`` is not given
"""
if geo_shapes is not None:
f = geo_shapes.copy()
else:
f = geometrize_shapes(feed.shapes)
cols = f.columns
f["hit"] = f["geometry"].intersects(geometry)
f = f[f["hit"]][cols]
if geometrized:
return f
else:
return ungeometrize_shapes(f)
def append_dist_to_shapes(feed: "Feed") -> "Feed":
"""
Calculate and append the optional ``shape_dist_traveled`` field in
``feed.shapes`` in terms of the distance units ``feed.dist_units``.
Return the resulting Feed.
Notes
-----
- As a benchmark, using this function on `this Portland feed
<https://transitfeeds.com/p/trimet/43/1400947517>`_
produces a ``shape_dist_traveled`` column that differs by at most
0.016 km in absolute value from of the original values
- Assume the following feed attributes are not ``None``:
* ``feed.shapes``
"""
if feed.shapes is None:
raise ValueError(
"This function requires the feed to have a shapes.txt file"
)
feed = feed.copy()
f = feed.shapes
m_to_dist = hp.get_convert_dist("m", feed.dist_units)
def compute_dist(group):
# Compute the distances of the stops along this trip
group = group.sort_values("shape_pt_sequence")
shape = group["shape_id"].iat[0]
if not isinstance(shape, str):
group["shape_dist_traveled"] = np.nan
return group
points = [
sg.Point(utm.from_latlon(lat, lon)[:2])
for lon, lat in group[["shape_pt_lon", "shape_pt_lat"]].values
]
p_prev = points[0]
d = 0
distances = [0]
for p in points[1:]:
d += p.distance(p_prev)
distances.append(d)
p_prev = p
group["shape_dist_traveled"] = distances
return group
g = f.groupby("shape_id", group_keys=False).apply(compute_dist)
# Convert from meters
g["shape_dist_traveled"] = g["shape_dist_traveled"].map(m_to_dist)
feed.shapes = g
return feed
def geometrize_shapes(
shapes: DataFrame, *, use_utm: bool = False
) -> DataFrame:
"""
Given a GTFS shapes DataFrame, convert it to a GeoPandas
GeoDataFrame and return the result.
The result has a ``'geometry'`` column of WGS84 LineStrings
instead of the columns ``'shape_pt_sequence'``, ``'shape_pt_lon'``,
``'shape_pt_lat'``, and ``'shape_dist_traveled'``.
If ``use_utm``, then use local UTM coordinates for the geometries.
Notes
------
Requires GeoPandas.
"""
import geopandas as gpd
f = shapes.copy().sort_values(["shape_id", "shape_pt_sequence"])
def my_agg(group):
d = {}
d["geometry"] = sg.LineString(
group[["shape_pt_lon", "shape_pt_lat"]].values
)
return pd.Series(d)
g = f.groupby("shape_id").apply(my_agg).reset_index()
g = gpd.GeoDataFrame(g, crs=cs.WGS84)
if use_utm:
lat, lon = f.loc[0, ["shape_pt_lat", "shape_pt_lon"]].values
crs = hp.get_utm_crs(lat, lon)
g = g.to_crs(crs)
return g
def ungeometrize_shapes(geo_shapes) -> DataFrame:
"""
The inverse of :func:`geometrize_shapes`.
Produces the columns:
- ``'shape_id'``
- ``'shape_pt_sequence'``
- ``'shape_pt_lon'``
- ``'shape_pt_lat'``
If ``geo_shapes`` is in UTM coordinates (has a UTM CRS property),
then convert thoes UTM coordinates back to WGS84 coordinates,
which is the standard for a GTFS shapes table.
"""
geo_shapes = geo_shapes.to_crs(cs.WGS84)
F = []
for index, row in geo_shapes.iterrows():
F.extend(
[
[row["shape_id"], i, x, y]
for i, (x, y) in enumerate(row["geometry"].coords)
]
)
return pd.DataFrame(
F,
columns=[
"shape_id",
"shape_pt_sequence",
"shape_pt_lon",
"shape_pt_lat",
],
)
|
# coding=utf-8
from emft.core import constant
from emft.core.logging import make_logger
from emft.gui.base import GridLayout, HSpacer, Label, VLayout, VSpacer
from emft.gui.main_ui_tab_widget import MainUiTabChild
LOGGER = make_logger(__name__)
class TabChildAbout(MainUiTabChild):
def tab_clicked(self):
pass
@property
def tab_title(self) -> str:
return 'About'
def __init__(self, parent=None):
super(TabChildAbout, self).__init__(parent)
repo_label = Label(
'''<a href='{link}'>{link}</a>'''.format(link=constant.LINK_REPO)
)
repo_label.setOpenExternalLinks(True)
changelog_label = Label(
'''<a href='{link}'>{link}</a>'''.format(link=constant.LINK_CHANGELOG)
)
changelog_label.setOpenExternalLinks(True)
self.setLayout(
VLayout(
[
GridLayout(
[
[Label('Github repository: '), repo_label, HSpacer()],
[Label('Changelog: '), changelog_label, HSpacer()],
],
[0, 0, 1]
),
VSpacer(),
]
)
)
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from dace.util import getSite
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView
from lac.content.processes.services_processes.behaviors import (
SeeModerationService, SeeModerationUnitService)
from lac.content.service import (
ModerationService, ModerationServiceUnit)
from lac.utilities.utils import (
ObjectRemovedException, generate_navbars)
@view_config(
name='seemoderationservice',
context=ModerationService,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeModerationServiceView(BasicView):
title = ''
name = 'seemoderationservice'
behaviors = [SeeModerationService]
template = 'lac:views/services_processes/moderation_service/templates/see_moderation_service.pt'
viewid = 'seemoderationservice'
def update(self):
self.execute(None)
result = {}
try:
navbars = generate_navbars(self, self.context, self.request)
except ObjectRemovedException:
return HTTPFound(self.request.resource_url(getSite(), ''))
values = {'object': self.context,
'navbar_body': navbars['navbar_body']}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
item['messages'] = navbars['messages']
item['isactive'] = navbars['isactive']
result.update(navbars['resources'])
result['coordinates'] = {self.coordinates: [item]}
return result
@view_config(
name='seemoderationserviceunit',
context=ModerationServiceUnit,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeModerationServiceUnitView(SeeModerationServiceView):
title = ''
name = 'seemoderationserviceunit'
behaviors = [SeeModerationUnitService]
template = 'lac:views/services_processes/moderation_service/templates/see_moderation_service.pt'
viewid = 'seemoderationserviceunit'
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{SeeModerationService: SeeModerationServiceView})
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{SeeModerationUnitService: SeeModerationServiceUnitView})
|
"""Meanshift clustering.
Authors: Conrad Lee conradlee@gmail.com
Alexandre Gramfort alexandre.gramfort@inria.fr
Gael Varoquaux gael.varoquaux@normalesup.org
"""
from collections import defaultdict
import numpy as np
from ..utils import extmath, check_random_state
from ..base import BaseEstimator
from ..ball_tree import BallTree
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwith to use with MeanShift algorithm
Parameters
----------
X: array [n_samples, n_features]
Input points
quantile: float, default 0.3
should be between [0, 1]
0.5 means that the median is all pairwise distances is used
n_samples: int
The number of samples to use. If None, all samples are used.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
bandwidth: float
The bandwidth parameter
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
d, _ = BallTree(X).query(X, int(X.shape[0] * quantile),
return_distance=True)
bandwidth = np.mean(np.max(d, axis=1))
return bandwidth
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
cluster_all=True, max_iterations=300):
"""Perform MeanShift Clustering of data using a flat kernel
Seed using a binning technique for scalability.
Parameters
----------
X : array [n_samples, n_features]
Input points
bandwidth : float, optional
kernel bandwidth
If bandwidth is not defined, it is set using
a heuristic given by the median of all pairwise distances
seeds: array [n_seeds, n_features]
point used as initial kernel locations
bin_seeding: boolean
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None
min_bin_freq: int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
Returns
-------
cluster_centers : array [n_clusters, n_features]
Coordinates of cluster centers
labels : array [n_samples]
cluster labels for each point
Notes
-----
See examples/plot_meanshift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth)
else:
seeds = X
n_points, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
ball_tree = BallTree(X) # to efficiently look up nearby points
# For each seed, climb gradient until convergence or max_iterations
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
points_within = X[ball_tree.query_radius([my_mean], bandwidth)[0]]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iterations, addS the cluster
if extmath.norm(my_mean - my_old_mean) < stop_thresh or \
completed_iterations == max_iterations:
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
cc_tree = BallTree(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = cc_tree.query_radius([center], bandwidth)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
centers_tree = BallTree(cluster_centers)
labels = np.zeros(n_points, dtype=np.int)
distances, idxs = centers_tree.query(X, 1)
if cluster_all:
labels = idxs.flatten()
else:
labels[:] = -1
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array [n_samples, n_features]
Input points, the same points that will be used in mean_shift
bin_size: float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift
min_bin_freq: integer, default 1
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array [n_samples, n_features]
points used as initial kernel posistions in clustering.mean_shift
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.cast[np.int32](point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in bin_sizes.iteritems() if \
freq >= min_bin_freq], dtype=np.float32)
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator):
"""MeanShift clustering
Parameters
----------
bandwidth: float, optional
Bandwith used in the RBF kernel
If not set, the bandwidth is estimated.
See clustering.estimate_bandwidth
seeds: array [n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
cluster_all: boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Methods
-------
fit(X):
Compute MeanShift clustering
Attributes
----------
cluster_centers_: array, [n_clusters, n_features]
Coordinates of cluster centers
labels_:
Labels of each point
Notes
-----
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for examply by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than
the mean shift algorithm and will be the bottleneck if it is used.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.cluster_centers_ = None
self.labels_ = None
def fit(self, X):
""" Compute MeanShift
Parameters
-----------
X : array [n_samples, n_features]
Input points
"""
self.cluster_centers_, self.labels_ = \
mean_shift(X,
bandwidth=self.bandwidth,
seeds=self.seeds,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 7, delta, theta, alpha low, alpha high, beta low, beta high, gamm low, batch size = 5 and
balanced data set
@author: yaric
"""
import experiment as ex
import config
from time import time
n_hidden = 7
batch_size = 5
experiment_name = 'cA_%d_dt-th-a_l-a_h-b_l-b_h-g_l' % (n_hidden) # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'IO_TXT', 'IO_SKY', 'KS_10_2', 'RO_10_2']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = batch_size
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = n_hidden
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l,beta_h,gamma_l'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
print("Start analysis for noise records: %s" % noise_ids)
ex.runEEGAnalyzerWithIDs(ids_list=noise_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
noise_dir = "%s/%s/%s" % (config.analyzer_out_dir, experiment_name, noise_ids[0])
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s\nNoise dir: %s"
% (signal_dir, noise_dir))
ex.runClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
noise_dir=noise_dir,
out_suffix=out_suffix)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
|
from typing import Tuple, Union, Generator
from .program_members import (Attribute, Subroutine, Uniform, UniformBlock,
Varying)
__all__ = ['Program', 'detect_format']
class Program:
'''
A Program object represents fully processed executable code
in the OpenGL Shading Language, for one or more Shader stages.
In ModernGL, a Program object can be assigned to :py:class:`VertexArray` objects.
The VertexArray object is capable of binding the Program object once the
:py:meth:`VertexArray.render` or :py:meth:`VertexArray.transform` is called.
Program objects has no method called ``use()``, VertexArrays encapsulate this mechanism.
A Program object cannot be instantiated directly, it requires a context.
Use :py:meth:`Context.program` to create one.
Uniform buffers can be bound using :py:meth:`Buffer.bind_to_uniform_block`
or can be set individually. For more complex binding yielding higher
performance consider using :py:class:`moderngl.Scope`.
'''
__slots__ = ['mglo', '_members', '_subroutines', '_geom', '_glo', 'ctx', 'extra']
def __init__(self):
self.mglo = None #: Internal representation for debug purposes only.
self._members = {}
self._subroutines = None
self._geom = (None, None, None)
self._glo = None
self.ctx = None #: The context this object belongs to
self.extra = None #: Any - Attribute for storing user defined objects
raise TypeError()
def __repr__(self):
return '<Program: %d>' % self._glo
def __eq__(self, other) -> bool:
"""Compares two programs opengl names (mglo).
Returns:
bool: If the programs have the same opengl name
Example::
# True if the internal opengl name is the same
program_1 == program_2
"""
return type(self) is type(other) and self.mglo is other.mglo
def __getitem__(self, key) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]:
"""Get a member such as uniforms, uniform blocks, subroutines,
attributes and varyings by name.
.. code-block:: python
# Get a uniform
uniform = program['color']
# Uniform values can be set on the returned object
# or the `__setitem__` shortcut can be used.
program['color'].value = 1.0, 1.0, 1.0, 1.0
# Still when writing byte data we need to use the `write()` method
program['color'].write(buffer)
"""
return self._members[key]
def __setitem__(self, key, value):
"""Set a value of uniform or uniform block
.. code-block:: python
# Set a vec4 uniform
uniform['color'] = 1.0, 1.0, 1.0, 1.0
# Optionally we can store references to a member and set the value directly
uniform = program['color']
uniform.value = 1.0, 0.0, 0.0, 0.0
uniform = program['cameraMatrix']
uniform.write(camera_matrix)
"""
self._members[key].value = value
def __iter__(self) -> Generator[str, None, None]:
"""Yields the internal members names as strings.
This includes all members such as uniforms, attributes etc.
Example::
# Print member information
for name in program:
member = program[name]
print(name, type(member), member)
Output::
vert <class 'moderngl.program_members.attribute.Attribute'> <Attribute: 0>
vert_color <class 'moderngl.program_members.attribute.Attribute'> <Attribute: 1>
gl_InstanceID <class 'moderngl.program_members.attribute.Attribute'> <Attribute: -1>
rotation <class 'moderngl.program_members.uniform.Uniform'> <Uniform: 0>
scale <class 'moderngl.program_members.uniform.Uniform'> <Uniform: 1>
We can filter on member type if needed::
for name in prog:
member = prog[name]
if isinstance(member, moderngl.Uniform):
print("Uniform", name, member)
or a less verbose version using dict comprehensions::
uniforms = {name: self.prog[name] for name in self.prog
if isinstance(self.prog[name], moderngl.Uniform)}
print(uniforms)
Output::
{'rotation': <Uniform: 0>, 'scale': <Uniform: 1>}
"""
yield from self._members
@property
def geometry_input(self) -> int:
'''
int: The geometry input primitive.
The GeometryShader's input primitive if the GeometryShader exists.
The geometry input primitive will be used for validation.
'''
return self._geom[0]
@property
def geometry_output(self) -> int:
'''
int: The geometry output primitive.
The GeometryShader's output primitive if the GeometryShader exists.
'''
return self._geom[1]
@property
def geometry_vertices(self) -> int:
'''
int: The maximum number of vertices that
the geometry shader will output.
'''
return self._geom[2]
@property
def subroutines(self) -> Tuple[str, ...]:
'''
tuple: The subroutine uniforms.
'''
return self._subroutines
@property
def glo(self) -> int:
'''
int: The internal OpenGL object.
This values is provided for debug purposes only.
'''
return self._glo
def get(self, key, default) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]:
'''
Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying.
Args:
default: This is the value to be returned in case key does not exist.
Returns:
:py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`,
:py:class:`Attribute` or :py:class:`Varying`
'''
return self._members.get(key, default)
def release(self) -> None:
'''
Release the ModernGL object.
'''
self.mglo.release()
def detect_format(program, attributes, mode='mgl') -> str:
'''
Detect format for vertex attributes.
The format returned does not contain padding.
Args:
program (Program): The program.
attributes (list): A list of attribute names.
Returns:
str
'''
def fmt(attr):
'''
For internal use only.
'''
# Translate shape format into attribute format
mgl_fmt = {
'd': 'f8',
'I': 'u'
}
# moderngl attribute format uses f, i and u
if mode == 'mgl':
return attr.array_length * attr.dimension, mgl_fmt.get(attr.shape) or attr.shape
# struct attribute format uses f, d, i and I
elif mode == 'struct':
return attr.array_length * attr.dimension, attr.shape
else:
raise ValueError('invalid format mode: {}'.format(mode))
return ' '.join('%d%s' % fmt(program[a]) for a in attributes)
|
# This code demonstrates how the eBot can be used to detect an obstacle and turn left/right/stop.
# All sonar values are reported during the exercise.
# Copyright (c) 2014, Erik Wilhelm
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Edgebotix.
# 4. Neither the name of the SUTD nor Edgebotix nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY ERIK WILHELM ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ERIK WILHELM BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.insert(0, 'C:\Users\Erik Wilhelm\Documents\GitHub\eBot-API')
from eBot import *
#Create new instance of eBot - connects to first eBot the computer is connected to
myEBot = eBot()
myEBot.connect()
myvalue = [0, 0, 0, 0, 0, 0]
myEBot.halt()
# wait before entering loop
sleep(1)
t_run=300 #number of loop iterations (not seconds) to run for
thresh=0.300 #30 cm threshold for turning
myEBot.wheels(1, 1) #set the robot in motion, full speed ahead!
for i in range(1, t_run, 1):
sonars = myEBot.robot_uS()
if sonars[2] < thresh: #obstacle detected, turn!!!
sleep(1) #wait a moment
myEBot.wheels(-1, 1) #turn left until no more obstacle
#myEBot.wheels(-1, 1) #turn right until no more obstacle
else:
myEBot.wheels(1, 1)
print sonars
myEBot.halt()
sleep(4)
myEBot.close()
|
# [h] set advance width of selected glyphs
### options `split difference` and `relative split`
### suggested and funded by Bas Jacobs / Underware
# imports
from mojo.roboFont import CurrentFont, CurrentGlyph
from vanilla import *
from hTools2 import hDialog
from hTools2.dialogs.misc import Spinner
from hTools2.modules.fontutils import get_glyphs
from hTools2.modules.glyphutils import center_glyph
from hTools2.modules.messages import no_font_open, no_glyph_selected
# objects
class setWidthDialog(hDialog):
"""A dialog to set the advance width of the selected glyphs.
.. image:: imgs/glyphs/width-set.png
"""
# attributes
_width_ = 400
_modes = [ 'set equal to', 'increase by', 'decrease by' ]
_mode = 0
# methods
def __init__(self):
self.title = 'width'
self.height = self.button_height + (self.text_height * 5) + self.nudge_button + (self.padding_y * 6)
self.w = FloatingWindow((self.width, self.height), self.title)
# left
x = self.padding_x
y = self.padding_y
# mode
self.w.width_mode = RadioGroup(
(x, y,
-self.padding_x,
self.text_height),
['=', '+', '-'],
sizeStyle=self.size_style,
callback=self.mode_callback,
isVertical=False)
self.w.width_mode.set(0)
# width value
x = 0
y += (self.text_height + self.padding_y)
self.w.spinner = Spinner(
(x, y),
default=self._width_,
integer=True,
label='width')
# center
x = self.padding_x
y += self.w.spinner.getPosSize()[3]
self.w.center_checkbox = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
"center glyphs",
value=False,
sizeStyle=self.size_style,
callback=self.center_callback)
# split difference
y += self.text_height
self.w.split_checkbox = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
"split difference",
value=False,
sizeStyle=self.size_style,
callback=self.split_callback)
# split relative
y += self.text_height
self.w.split_relative_checkbox = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
"relative split",
value=False,
sizeStyle=self.size_style,
callback=self.split_relative_callback)
# apply button
x = self.padding_x
y += (self.text_height + self.padding_y)
self.w.button_apply = SquareButton(
(x, y,
-self.padding_x,
self.button_height),
"apply",
callback=self.apply_callback,
sizeStyle=self.size_style)
# open window
self.w.open()
# callbacks
def mode_callback(self, sender):
self._mode = self.w.width_mode.get()
def center_callback(self, sender):
if sender.get():
if self.w.split_checkbox.get():
self.w.split_checkbox.set(False)
if self.w.split_relative_checkbox.get():
self.w.split_relative_checkbox.set(False)
def split_callback(self, sender):
if sender.get():
if self.w.center_checkbox.get():
self.w.center_checkbox.set(False)
if self.w.split_relative_checkbox.get():
self.w.split_relative_checkbox.set(False)
def split_relative_callback(self, sender):
if sender.get():
if self.w.center_checkbox.get():
self.w.center_checkbox.set(False)
if self.w.split_checkbox.get():
self.w.split_checkbox.set(False)
# apply
def set_width(self, glyph, width, mode=None):
# store old values
old_left = glyph.leftMargin
old_right = glyph.rightMargin
old_width = glyph.width
glyph_width = old_width - (old_left + old_right)
# save undo state
glyph.prepareUndo('set glyph width')
# add value
if self._mode == 1:
new_width = old_width + width
# subtract value
elif self._mode == 2:
new_width = old_width - width
# equal to value
else:
new_width = width
# center glyph
if mode == 'center':
glyph.width = new_width
center_glyph(glyph)
# split difference
elif mode == 'split difference':
# calculate new left margin
try:
diff = new_width - old_width
new_left = old_left + (diff / 2)
except:
new_left = 0
# set margins
glyph.leftMargin = new_left
glyph.width = new_width
# split relative
elif mode == 'split relative':
# calculate new left margin
try:
whitespace = new_width - glyph_width
new_left = whitespace / ( 1 + (old_right / old_left) )
except:
new_left = 0
# set margins
glyph.leftMargin = new_left
glyph.width = new_width
# set width
else:
glyph.width = new_width
# done!
glyph.update()
glyph.performUndo()
def apply_callback(self, sender):
f = CurrentFont()
if f is not None:
# iterate over glyphs
glyph_names = get_glyphs(f)
if len(glyph_names) > 0:
# get parameters
width = int(self.w.spinner.value.get())
center = self.w.center_checkbox.get()
split = self.w.split_checkbox.get()
split_relative = self.w.split_relative_checkbox.get()
boolstring = ( False, True )
# set sidebearings mode
if center:
w_mode = 'center'
elif split:
w_mode = 'split difference'
elif split_relative:
w_mode = 'split relative'
else:
w_mode = None
# print info
print 'setting character widths...\n'
print '\t%s %s' % (self._modes[self._mode], width)
print '\tmode: %s' % w_mode
print
print '\t',
for glyph_name in glyph_names:
print glyph_name,
self.set_width(f[glyph_name], width, w_mode)
f.update()
print
print '\n...done.\n'
# no glyph selected
else:
print no_glyph_selected
# no font open
else:
print no_font_open
|
import unittest
import os
from happana.template import SafeTester
from happana.template import RiskyTester
class SafeDBTester(SafeTester):
""" General template for safe "DB" modules testing """
def __init__(self, test_name):
SafeTester.__init__(self, test_name)
def set_dir(self):
self.working_dir = os.path.join(os.path.join(os.path.join(os.path.dirname(__file__),
'tmp'),
self.test_class),
self.test_function)
self.data_dir = os.path.join(os.path.join(os.path.dirname(__file__),
'data'),
self.test_class)
#class RiskyDBTester(RiskyTester):
# """ General template for risky "DB" modules testing """
#
# def __init__(self, test_name):
# RiskyTester.__init__(self, test_name)
#
# def set_dir(self):
# self.working_dir = happana_settings.happana_WORKING_DIR
# self.data_dir = os.path.join(os.path.join(os.path.dirname(__file__),
# 'big_data'),
# self.test_class)
|
from __future__ import print_function
import time
import os
from graph import Graph
from graph import make
from graph import GraphException
from graph import Matrix
def run(name):
graph = make( name )
ret = [0,0]
start = time.time()
graph.dfs(0)
ret[1] = (time.time()-start)
start = time.time()
graph.bfs(0)
ret[0] = (time.time()-start)
return ret
def go():
names = list()
bfs = list()
dfs = list()
for name in os.listdir("./graphs"):
names.append(name)
name = "./graphs/" + name
res = run(name)
bfs.append(res[0])
dfs.append(res[1])
for index in range(0, len(names)):
name = names[index]
b = bfs[index]
d = dfs[index]
first = "%s" % str(object=name).ljust(30, " ")
second = "%s" % str(object=b).rjust(18, " ")
third = "%s" % str(object=d).ljust(20, " ")
print("dfs: " + str(d) + " bfs: " + str(b))
if d > b:
print("dfs is faster on " + first + " by " + str(abs(b-d)) + " seconds")
else:
print("bfs is faster on " + first + " by " + str(abs(b-d)) + " seconds")
# print(first + " took " + second + " " + third)
go()
|
"""Callback management utilities"""
import collections
from ctypes import * # pylint: disable=wildcard-import,unused-wildcard-import
import logging
class CallbackManager(object):
"""Manager for ctypes DLL hooks"""
def __init__(self, dll):
self.dll = dll
self.hooks = collections.defaultdict(dict)
self.logger = logging.getLogger(__name__)
def install(self, name, func):
"""
Install a callback function ensuring a reference is kept.
:param name: name of function to install
:param func: callback function to install
"""
self.logger.debug('installing callback for %s in %s', name, self.dll)
self._install(name, func)
def uninstall(self, name):
"""
Remove an installed callback function.
:param name: name of function to uninstall
"""
self.logger.debug('uninstalling callback for %s in %s', name, self.dll)
self._install(name)
def _install(self, name, func=None):
"""Install or remove a callback function"""
# install the callback function
# pylint: disable=no-member
c_void_p.in_dll(self.dll, name).value = cast(func, c_void_p).value
# store the function so it doesn't get GC'd
self.hooks[name] = func
|
import gzip
import logging
from overrides import overrides
import numpy
import torch
from torch.nn.functional import embedding
import h5py
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.time_distributed import TimeDistributed
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@TokenEmbedder.register("embedding")
class Embedding(TokenEmbedder):
"""
A more featureful embedding module than the default in Pytorch. Adds the ability to:
1. embed higher-order inputs
2. pre-specify the weight matrix
3. use a non-trainable embedding
4. project the resultant embeddings to some other dimension (which only makes sense with
non-trainable embeddings).
5. build all of this easily ``from_params``
Note that if you are using our data API and are trying to embed a
:class:`~allennlp.data.fields.TextField`, you should use a
:class:`~allennlp.modules.TextFieldEmbedder` instead of using this directly.
Parameters
----------
num_embeddings :, int:
Size of the dictionary of embeddings (vocabulary size).
embedding_dim : int
The size of each embedding vector.
projection_dim : int, (optional, default=None)
If given, we add a projection layer after the embedding layer. This really only makes
sense if ``trainable`` is ``False``.
weight : torch.FloatTensor, (optional, default=None)
A pre-initialised weight matrix for the embedding lookup, allowing the use of
pretrained vectors.
padding_index : int, (optional, default=None)
If given, pads the output with zeros whenever it encounters the index.
trainable : bool, (optional, default=True)
Whether or not to optimize the embedding parameters.
max_norm : float, (optional, default=None)
If given, will renormalize the embeddings to always have a norm lesser than this
norm_type : float, (optional, default=2):
The p of the p-norm to compute for the max_norm option
scale_grad_by_freq : boolean, (optional, default=False):
If given, this will scale gradients by the frequency of the words in the mini-batch.
sparse : bool, (optional, default=False):
Whether or not the Pytorch backend should use a sparse representation of the embedding weight.
Returns
-------
An Embedding module.
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
projection_dim: int = None,
weight: torch.FloatTensor = None,
padding_index: int = None,
trainable: bool = True,
max_norm: float = None,
norm_type: float = 2.,
scale_grad_by_freq: bool = False,
sparse: bool = False) -> None:
super(Embedding, self).__init__()
self.num_embeddings = num_embeddings
self.padding_index = padding_index
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.output_dim = projection_dim or embedding_dim
if weight is None:
weight = torch.FloatTensor(num_embeddings, embedding_dim)
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
self.weight.data.normal_(0, 1)
else:
if weight.size() != (num_embeddings, embedding_dim):
raise ConfigurationError("A weight matrix was passed with contradictory embedding shapes.")
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
if self.padding_index is not None:
self.weight.data[self.padding_index].fill_(0)
if projection_dim:
self._projection = torch.nn.Linear(embedding_dim, projection_dim)
else:
self._projection = None
@overrides
def get_output_dim(self) -> int:
return self.output_dim
@overrides
def forward(self, inputs): # pylint: disable=arguments-differ
original_inputs = inputs
if original_inputs.dim() > 2:
inputs = inputs.view(-1, inputs.size(-1))
embedded = embedding(inputs, self.weight,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse)
if original_inputs.dim() > 2:
view_args = list(original_inputs.size()) + [embedded.size(-1)]
embedded = embedded.view(*view_args)
if self._projection:
projection = self._projection
for _ in range(embedded.dim() - 2):
projection = TimeDistributed(projection)
embedded = projection(embedded)
return embedded
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'Embedding':
"""
We need the vocabulary here to know how many items we need to embed, and we look for a
``vocab_namespace`` key in the parameter dictionary to know which vocabulary to use. If
you know beforehand exactly how many embeddings you need, or aren't using a vocabulary
mapping for the things getting embedded here, then you can pass in the ``num_embeddings``
key directly, and the vocabulary will be ignored.
"""
num_embeddings = params.pop('num_embeddings', None)
vocab_namespace = params.pop("vocab_namespace", "tokens")
if num_embeddings is None:
num_embeddings = vocab.get_vocab_size(vocab_namespace)
embedding_dim = params.pop('embedding_dim')
pretrained_file = params.pop("pretrained_file", None)
projection_dim = params.pop("projection_dim", None)
trainable = params.pop("trainable", True)
padding_index = params.pop('padding_index', None)
max_norm = params.pop('max_norm', None)
norm_type = params.pop('norm_type', 2.)
scale_grad_by_freq = params.pop('scale_grad_by_freq', False)
sparse = params.pop('sparse', False)
params.assert_empty(cls.__name__)
if pretrained_file:
# If we're loading a saved model, we don't want to actually read a pre-trained
# embedding file - the embeddings will just be in our saved weights, and we might not
# have the original embedding file anymore, anyway.
weight = _read_pretrained_embedding_file(pretrained_file,
embedding_dim,
vocab,
vocab_namespace)
else:
weight = None
return cls(num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
projection_dim=projection_dim,
weight=weight,
padding_index=padding_index,
trainable=trainable,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse)
def _read_pretrained_embedding_file(embeddings_filename: str,
embedding_dim: int,
vocab: Vocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
"""
Reads a pre-trained embedding file and generates an Embedding layer that has weights
initialized to the pre-trained embeddings. The Embedding layer can either be trainable or
not.
We use the ``Vocabulary`` to map from the word strings in the embeddings file to the indices
that we need, and to know which words from the embeddings file we can safely ignore.
Parameters
----------
embeddings_filename : str, required.
The path to a file containing pretrained embeddings. We support two file formats,
gzipped-word2vec and hdf5. If the filename ends with '.hdf5' or '.h5' then we load from
hdf5, otherwise assume gzipped-word2vec format.
vocab : Vocabulary, required.
A Vocabulary object.
namespace : str, (optional, default=tokens)
The namespace of the vocabulary to find pretrained embeddings for.
trainable : bool, (optional, default=True)
Whether or not the embedding parameters should be optimized.
Returns
-------
A weight matrix with embeddings initialized from the read file. The matrix has shape
``(vocab.get_vocab_size(namespace), embedding_dim)``, where the indices of words appearing in
the pretrained embedding file are initialized to the pretrained embedding value.
"""
if embeddings_filename[-3:] == '.h5' or embeddings_filename[-5:] == '.hdf5':
return _read_pretrained_hdf5_format_embedding_file(embeddings_filename, embedding_dim,
vocab, namespace)
else:
# default to word2vec
return _read_pretrained_word2vec_format_embedding_file(embeddings_filename, embedding_dim,
vocab, namespace)
def _read_pretrained_word2vec_format_embedding_file(embeddings_filename: str, # pylint: disable=invalid-name
embedding_dim: int,
vocab: Vocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
"""
Read from a gzipped-word2vec format file. The embeddings file is assumed to be gzipped and
space delimited, e.g. [word] [dim 1] [dim 2] ...
The remainder of the docstring is identical to ``_read_pretrained_embedding_file``.
"""
words_to_keep = set(vocab.get_index_to_token_vocabulary(namespace).values())
vocab_size = vocab.get_vocab_size(namespace)
embeddings = {}
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading embeddings from file")
with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:
for line in embeddings_file:
fields = line.decode('utf-8').strip().split(' ')
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s",
embedding_dim, len(fields) - 1, line)
continue
word = fields[0]
if word in words_to_keep:
vector = numpy.asarray(fields[1:], dtype='float32')
embeddings[word] = vector
if not embeddings:
raise ConfigurationError("No embeddings of correct dimension found; you probably "
"misspecified your embedding_dim parameter, or didn't "
"pre-populate your Vocabulary")
all_embeddings = numpy.asarray(list(embeddings.values()))
embeddings_mean = float(numpy.mean(all_embeddings))
embeddings_std = float(numpy.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(embeddings_mean,
embeddings_std)
for i in range(0, vocab_size):
word = vocab.get_token_from_index(i, namespace)
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if word in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[word])
else:
logger.debug("Word %s was not found in the embedding file. Initialising randomly.", word)
# The weight matrix is initialized, so we construct and return the actual Embedding.
return embedding_matrix
def _read_pretrained_hdf5_format_embedding_file(embeddings_filename: str, # pylint: disable=invalid-name
embedding_dim: int,
vocab: Vocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
"""
Reads from a hdf5 formatted file. The embedding matrix is assumed to
be keyed by 'embedding' and of size ``(num_tokens, embedding_dim)``.
"""
with h5py.File(embeddings_filename, 'r') as fin:
embeddings = fin['embedding'][...]
if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]:
raise ConfigurationError(
"Read shape {0} embeddings from the file, but expected {1}".format(
list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim]))
return torch.FloatTensor(embeddings)
|
# -*- test-case-name: txweb2.test.test_server -*-
##
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
"""
This is a web-server which integrates with the twisted.internet
infrastructure.
"""
from __future__ import print_function
import cgi, time, urlparse
from urllib import quote, unquote
from urlparse import urlsplit
import weakref
from zope.interface import implements
from twisted.internet import defer
from twisted.python import failure
from twext.python.log import Logger
from txweb2 import http, iweb, fileupload, responsecode
from txweb2 import http_headers
from txweb2.filter.range import rangefilter
from txweb2 import error
from txweb2 import __version__ as web2_version
from twisted import __version__ as twisted_version
VERSION = "Twisted/%s TwistedWeb/%s" % (twisted_version, web2_version)
_errorMarker = object()
log = Logger()
def defaultHeadersFilter(request, response):
if not response.headers.hasHeader('server'):
response.headers.setHeader('server', VERSION)
if not response.headers.hasHeader('date'):
response.headers.setHeader('date', time.time())
return response
defaultHeadersFilter.handleErrors = True
def preconditionfilter(request, response):
if request.method in ("GET", "HEAD"):
http.checkPreconditions(request, response)
return response
def doTrace(request):
request = iweb.IRequest(request)
txt = "%s %s HTTP/%d.%d\r\n" % (request.method, request.uri,
request.clientproto[0], request.clientproto[1])
l=[]
for name, valuelist in request.headers.getAllRawHeaders():
for value in valuelist:
l.append("%s: %s\r\n" % (name, value))
txt += ''.join(l)
return http.Response(
responsecode.OK,
{'content-type': http_headers.MimeType('message', 'http')},
txt)
def parsePOSTData(request, maxMem=100*1024, maxFields=1024,
maxSize=10*1024*1024):
"""
Parse data of a POST request.
@param request: the request to parse.
@type request: L{txweb2.http.Request}.
@param maxMem: maximum memory used during the parsing of the data.
@type maxMem: C{int}
@param maxFields: maximum number of form fields allowed.
@type maxFields: C{int}
@param maxSize: maximum size of file upload allowed.
@type maxSize: C{int}
@return: a deferred that will fire when the parsing is done. The deferred
itself doesn't hold a return value, the request is modified directly.
@rtype: C{defer.Deferred}
"""
if request.stream.length == 0:
return defer.succeed(None)
ctype = request.headers.getHeader('content-type')
if ctype is None:
return defer.succeed(None)
def updateArgs(data):
args = data
request.args.update(args)
def updateArgsAndFiles(data):
args, files = data
request.args.update(args)
request.files.update(files)
def error(f):
f.trap(fileupload.MimeFormatError)
raise http.HTTPError(
http.StatusResponse(responsecode.BAD_REQUEST, str(f.value)))
if (ctype.mediaType == 'application'
and ctype.mediaSubtype == 'x-www-form-urlencoded'):
d = fileupload.parse_urlencoded(request.stream)
d.addCallbacks(updateArgs, error)
return d
elif (ctype.mediaType == 'multipart'
and ctype.mediaSubtype == 'form-data'):
boundary = ctype.params.get('boundary')
if boundary is None:
return defer.fail(http.HTTPError(
http.StatusResponse(
responsecode.BAD_REQUEST,
"Boundary not specified in Content-Type.")))
d = fileupload.parseMultipartFormData(request.stream, boundary,
maxMem, maxFields, maxSize)
d.addCallbacks(updateArgsAndFiles, error)
return d
else:
return defer.fail(http.HTTPError(
http.StatusResponse(
responsecode.BAD_REQUEST,
"Invalid content-type: %s/%s" % (
ctype.mediaType, ctype.mediaSubtype))))
class StopTraversal(object):
"""
Indicates to Request._handleSegment that it should stop handling
path segments.
"""
pass
class Request(http.Request):
"""
vars:
site
remoteAddr
scheme
host
port
path
params
querystring
args
files
prepath
postpath
@ivar path: The path only (arguments not included).
@ivar args: All of the arguments, including URL and POST arguments.
@type args: A mapping of strings (the argument names) to lists of values.
i.e., ?foo=bar&foo=baz&quux=spam results in
{'foo': ['bar', 'baz'], 'quux': ['spam']}.
"""
implements(iweb.IRequest)
site = None
_initialprepath = None
responseFilters = [rangefilter, preconditionfilter,
error.defaultErrorHandler, defaultHeadersFilter]
def __init__(self, *args, **kw):
self.timeStamps = [("t", time.time(),)]
if kw.has_key('site'):
self.site = kw['site']
del kw['site']
if kw.has_key('prepathuri'):
self._initialprepath = kw['prepathuri']
del kw['prepathuri']
self._resourcesByURL = {}
self._urlsByResource = {}
# Copy response filters from the class
self.responseFilters = self.responseFilters[:]
self.files = {}
self.resources = []
http.Request.__init__(self, *args, **kw)
try:
self.serverInstance = self.chanRequest.channel.transport.server.port
except AttributeError:
self.serverInstance = "Unknown"
def timeStamp(self, tag):
self.timeStamps.append((tag, time.time(),))
def addResponseFilter(self, filter, atEnd=False, onlyOnce=False):
"""
Add a response filter to this request.
Response filters are applied to the response to this request in order.
@param filter: a callable which takes an response argument and returns
a response object.
@param atEnd: if C{True}, C{filter} is added at the end of the list of
response filters; if C{False}, it is added to the beginning.
@param onlyOnce: if C{True}, C{filter} is not added to the list of
response filters if it already in the list.
"""
if onlyOnce and filter in self.responseFilters:
return
if atEnd:
self.responseFilters.append(filter)
else:
self.responseFilters.insert(0, filter)
def unparseURL(self, scheme=None, host=None, port=None,
path=None, params=None, querystring=None, fragment=None):
"""Turn the request path into a url string. For any pieces of
the url that are not specified, use the value from the
request. The arguments have the same meaning as the same named
attributes of Request."""
if scheme is None: scheme = self.scheme
if host is None: host = self.host
if port is None: port = self.port
if path is None: path = self.path
if params is None: params = self.params
if querystring is None: querystring = self.querystring
if fragment is None: fragment = ''
if port == http.defaultPortForScheme.get(scheme, 0):
hostport = host
else:
hostport = host + ':' + str(port)
return urlparse.urlunparse((
scheme, hostport, path,
params, querystring, fragment))
def _parseURL(self):
if self.uri[0] == '/':
# Can't use urlparse for request_uri because urlparse
# wants to be given an absolute or relative URI, not just
# an abs_path, and thus gets '//foo' wrong.
self.scheme = self.host = self.path = self.params = self.querystring = ''
if '?' in self.uri:
self.path, self.querystring = self.uri.split('?', 1)
else:
self.path = self.uri
if ';' in self.path:
self.path, self.params = self.path.split(';', 1)
else:
# It is an absolute uri, use standard urlparse
(self.scheme, self.host, self.path,
self.params, self.querystring, fragment) = urlparse.urlparse(self.uri)
if self.querystring:
self.args = cgi.parse_qs(self.querystring, True)
else:
self.args = {}
path = map(unquote, self.path[1:].split('/'))
if self._initialprepath:
# We were given an initial prepath -- this is for supporting
# CGI-ish applications where part of the path has already
# been processed
prepath = map(unquote, self._initialprepath[1:].split('/'))
if path[:len(prepath)] == prepath:
self.prepath = prepath
self.postpath = path[len(prepath):]
else:
self.prepath = []
self.postpath = path
else:
self.prepath = []
self.postpath = path
#print("_parseURL", self.uri, (self.uri, self.scheme, self.host, self.path, self.params, self.querystring))
def _schemeFromPort(self, port):
"""
Try to determine the scheme matching the supplied server port. This is needed in case
where a device in front of the server is changing the scheme (e.g. decoding SSL) but not
rewriting the scheme in URIs returned in responses (e.g. in Location headers). This could trick
clients into using an inappropriate scheme for subsequent requests. What we should do is
take the port number from the Host header or request-URI and map that to the scheme that
matches the service we configured to listen on that port.
@param port: the port number to test
@type port: C{int}
@return: C{True} if scheme is https (secure), C{False} otherwise
@rtype: C{bool}
"""
#from twistedcaldav.config import config
if hasattr(self.site, "EnableSSL") and self.site.EnableSSL:
if port == self.site.SSLPort:
return True
elif port in self.site.BindSSLPorts:
return True
return False
def _fixupURLParts(self):
hostaddr, secure = self.chanRequest.getHostInfo()
if not self.scheme:
self.scheme = ('http', 'https')[secure]
if self.host:
self.host, self.port = http.splitHostPort(self.scheme, self.host)
self.scheme = ('http', 'https')[self._schemeFromPort(self.port)]
else:
# If GET line wasn't an absolute URL
host = self.headers.getHeader('host')
if host:
self.host, self.port = http.splitHostPort(self.scheme, host)
self.scheme = ('http', 'https')[self._schemeFromPort(self.port)]
else:
# When no hostname specified anywhere, either raise an
# error, or use the interface hostname, depending on
# protocol version
if self.clientproto >= (1,1):
raise http.HTTPError(responsecode.BAD_REQUEST)
self.host = hostaddr.host
self.port = hostaddr.port
def process(self):
"Process a request."
log.info("%s %s %s" % (
self.method,
self.uri,
"HTTP/%s.%s" % self.clientproto
))
try:
self.checkExpect()
resp = self.preprocessRequest()
if resp is not None:
self._cbFinishRender(resp).addErrback(self._processingFailed)
return
self._parseURL()
self._fixupURLParts()
self.remoteAddr = self.chanRequest.getRemoteHost()
except:
self._processingFailed(failure.Failure())
return
d = defer.Deferred()
d.addCallback(self._getChild, self.site.resource, self.postpath)
d.addCallback(self._rememberResource, "/" + "/".join(quote(s) for s in self.postpath))
d.addCallback(self._processTimeStamp)
d.addCallback(lambda res, req: res.renderHTTP(req), self)
d.addCallback(self._cbFinishRender)
d.addErrback(self._processingFailed)
d.callback(None)
return d
def _processTimeStamp(self, res):
self.timeStamp("t-req-proc")
return res
def preprocessRequest(self):
"""Do any request processing that doesn't follow the normal
resource lookup procedure. "OPTIONS *" is handled here, for
example. This would also be the place to do any CONNECT
processing."""
if self.method == "OPTIONS" and self.uri == "*":
response = http.Response(responsecode.OK)
response.headers.setHeader('allow', ('GET', 'HEAD', 'OPTIONS', 'TRACE'))
return response
elif self.method == "POST":
# Allow other methods to tunnel through using POST and a request header.
# See http://code.google.com/apis/gdata/docs/2.0/basics.html
if self.headers.hasHeader("X-HTTP-Method-Override"):
intendedMethod = self.headers.getRawHeaders("X-HTTP-Method-Override")[0];
if intendedMethod:
self.originalMethod = self.method
self.method = intendedMethod
# This is where CONNECT would go if we wanted it
return None
def _getChild(self, _, res, path, updatepaths=True):
"""Call res.locateChild, and pass the result on to _handleSegment."""
self.resources.append(res)
if not path:
return res
result = res.locateChild(self, path)
if isinstance(result, defer.Deferred):
return result.addCallback(self._handleSegment, res, path, updatepaths)
else:
return self._handleSegment(result, res, path, updatepaths)
def _handleSegment(self, result, res, path, updatepaths):
"""Handle the result of a locateChild call done in _getChild."""
newres, newpath = result
# If the child resource is None then display a error page
if newres is None:
raise http.HTTPError(responsecode.NOT_FOUND)
# If we got a deferred then we need to call back later, once the
# child is actually available.
if isinstance(newres, defer.Deferred):
return newres.addCallback(
lambda actualRes: self._handleSegment(
(actualRes, newpath), res, path, updatepaths)
)
if path:
url = quote("/" + "/".join(path))
else:
url = "/"
if newpath is StopTraversal:
# We need to rethink how to do this.
#if newres is res:
return res
#else:
# raise ValueError("locateChild must not return StopTraversal with a resource other than self.")
newres = iweb.IResource(newres)
if newres is res:
assert not newpath is path, "URL traversal cycle detected when attempting to locateChild %r from resource %r." % (path, res)
assert len(newpath) < len(path), "Infinite loop impending..."
if updatepaths:
# We found a Resource... update the request.prepath and postpath
for x in xrange(len(path) - len(newpath)):
self.prepath.append(self.postpath.pop(0))
url = quote("/" + "/".join(self.prepath) + ("/" if self.prepath and self.prepath[-1] else ""))
self._rememberResource(newres, url)
else:
try:
previousURL = self.urlForResource(res)
url = quote(previousURL + path[0] + ("/" if path[0] and len(path) > 1 else ""))
self._rememberResource(newres, url)
except NoURLForResourceError:
pass
child = self._getChild(None, newres, newpath, updatepaths=updatepaths)
return child
_urlsByResource = weakref.WeakKeyDictionary()
def _rememberResource(self, resource, url):
"""
Remember the URL of a visited resource.
"""
self._resourcesByURL[url] = resource
self._urlsByResource[resource] = url
return resource
def _forgetResource(self, resource, url):
"""
Remember the URL of a visited resource.
"""
del self._resourcesByURL[url]
del self._urlsByResource[resource]
def urlForResource(self, resource):
"""
Looks up the URL of the given resource if this resource was found while
processing this request. Specifically, this includes the requested
resource, and resources looked up via L{locateResource}.
Note that a resource may be found at multiple URIs; if the same resource
is visited at more than one location while processing this request,
this method will return one of those URLs, but which one is not defined,
nor whether the same URL is returned in subsequent calls.
@param resource: the resource to find a URI for. This resource must
have been obtained from the request (i.e. via its C{uri} attribute, or
through its C{locateResource} or C{locateChildResource} methods).
@return: a valid URL for C{resource} in this request.
@raise NoURLForResourceError: if C{resource} has no URL in this request
(because it was not obtained from the request).
"""
url = self._urlsByResource.get(resource, None)
if url is None:
raise NoURLForResourceError(resource)
return url
def locateResource(self, url):
"""
Looks up the resource with the given URL.
@param uri: The URL of the desired resource.
@return: a L{Deferred} resulting in the L{IResource} at the
given URL or C{None} if no such resource can be located.
@raise HTTPError: If C{url} is not a URL on the site that this
request is being applied to. The contained response will
have a status code of L{responsecode.BAD_GATEWAY}.
@raise HTTPError: If C{url} contains a query or fragment.
The contained response will have a status code of
L{responsecode.BAD_REQUEST}.
"""
if url is None:
return defer.succeed(None)
#
# Parse the URL
#
(scheme, host, path, query, fragment) = urlsplit(url)
if query or fragment:
raise http.HTTPError(http.StatusResponse(
responsecode.BAD_REQUEST,
"URL may not contain a query or fragment: %s" % (url,)
))
# Look for cached value
cached = self._resourcesByURL.get(path, None)
if cached is not None:
return defer.succeed(cached)
segments = unquote(path).split("/")
assert segments[0] == "", "URL path didn't begin with '/': %s" % (path,)
# Walk the segments up to see if we can find a cached resource to start from
preSegments = segments[:-1]
postSegments = segments[-1:]
cachedParent = None
while(len(preSegments)):
parentPath = "/".join(preSegments) + "/"
cachedParent = self._resourcesByURL.get(parentPath, None)
if cachedParent is not None:
break
else:
postSegments.insert(0, preSegments.pop())
if cachedParent is None:
cachedParent = self.site.resource
postSegments = segments[1:]
def notFound(f):
f.trap(http.HTTPError)
if f.value.response.code != responsecode.NOT_FOUND:
return f
return None
d = defer.maybeDeferred(self._getChild, None, cachedParent, postSegments, updatepaths=False)
d.addCallback(self._rememberResource, path)
d.addErrback(notFound)
return d
def locateChildResource(self, parent, childName):
"""
Looks up the child resource with the given name given the parent
resource. This is similar to locateResource(), but doesn't have to
start the lookup from the root resource, so it is potentially faster.
@param parent: the parent of the resource being looked up. This resource
must have been obtained from the request (i.e. via its C{uri} attribute,
or through its C{locateResource} or C{locateChildResource} methods).
@param childName: the name of the child of C{parent} to looked up.
to C{parent}.
@return: a L{Deferred} resulting in the L{IResource} at the
given URL or C{None} if no such resource can be located.
@raise NoURLForResourceError: if C{resource} was not obtained from the
request.
"""
if parent is None or childName is None:
return None
assert "/" not in childName, "Child name may not contain '/': %s" % (childName,)
parentURL = self.urlForResource(parent)
if not parentURL.endswith("/"):
parentURL += "/"
url = parentURL + quote(childName)
segment = childName
def notFound(f):
f.trap(http.HTTPError)
if f.value.response.code != responsecode.NOT_FOUND:
return f
return None
d = defer.maybeDeferred(self._getChild, None, parent, [segment], updatepaths=False)
d.addCallback(self._rememberResource, url)
d.addErrback(notFound)
return d
def _processingFailed(self, reason):
if reason.check(http.HTTPError) is not None:
# If the exception was an HTTPError, leave it alone
d = defer.succeed(reason.value.response)
else:
# Otherwise, it was a random exception, so give a
# ICanHandleException implementer a chance to render the page.
def _processingFailed_inner(reason):
handler = iweb.ICanHandleException(self, self)
return handler.renderHTTP_exception(self, reason)
d = defer.maybeDeferred(_processingFailed_inner, reason)
d.addCallback(self._cbFinishRender)
d.addErrback(self._processingReallyFailed, reason)
return d
def _processingReallyFailed(self, reason, origReason):
"""
An error occurred when attempting to report an error to the HTTP
client.
"""
log.failure("Exception rendering error page", reason)
log.failure("Original exception", origReason)
try:
body = (
"<html><head><title>Internal Server Error</title></head>"
"<body><h1>Internal Server Error</h1>"
"An error occurred rendering the requested page. "
"Additionally, an error occurred rendering the error page."
"</body></html>"
)
response = http.Response(
responsecode.INTERNAL_SERVER_ERROR,
{'content-type': http_headers.MimeType('text','html')},
body
)
self.writeResponse(response)
except:
log.failure(
"An error occurred. We tried to report that error. "
"Reporting that error caused an error. "
"In the process of reporting the error-reporting error to "
"the client, there was *yet another* error. Here it is. "
"I give up."
)
self.chanRequest.abortConnection()
def _cbFinishRender(self, result):
def filterit(response, f):
if (hasattr(f, 'handleErrors') or
(response.code >= 200 and response.code < 300)):
return f(self, response)
else:
return response
response = iweb.IResponse(result, None)
if response:
d = defer.Deferred()
for f in self.responseFilters:
d.addCallback(filterit, f)
d.addCallback(self.writeResponse)
d.callback(response)
return d
resource = iweb.IResource(result, None)
if resource:
self.resources.append(resource)
d = defer.maybeDeferred(resource.renderHTTP, self)
d.addCallback(self._cbFinishRender)
return d
raise TypeError("html is not a resource or a response")
def renderHTTP_exception(self, req, reason):
log.failure("Exception rendering request: {request}", reason, request=req)
body = ("<html><head><title>Internal Server Error</title></head>"
"<body><h1>Internal Server Error</h1>An error occurred rendering the requested page. More information is available in the server log.</body></html>")
return http.Response(
responsecode.INTERNAL_SERVER_ERROR,
{'content-type': http_headers.MimeType('text','html')},
body)
class Site(object):
def __init__(self, resource):
"""Initialize.
"""
self.resource = iweb.IResource(resource)
def __call__(self, *args, **kwargs):
return Request(site=self, *args, **kwargs)
class NoURLForResourceError(RuntimeError):
def __init__(self, resource):
RuntimeError.__init__(self, "Resource %r has no URL in this request." % (resource,))
self.resource = resource
__all__ = ['Request', 'Site', 'StopTraversal', 'VERSION', 'defaultHeadersFilter', 'doTrace', 'parsePOSTData', 'preconditionfilter', 'NoURLForResourceError']
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import essentia
import essentia.standard as standard
import essentia.streaming as streaming
import numpy
from postprocess import postProcess
def tonalPoolCleaning(pool, namespace=None):
tonalspace = 'tonal.'
if namespace: tonalspace = namespace + '.tonal.'
tuningFreq = pool[tonalspace + 'tuning_frequency'][-1]
pool.remove(tonalspace + 'tuning_frequency')
pool.set(tonalspace + 'tuning_frequency', tuningFreq)
pool.remove(tonalspace + 'hpcp_highres')
def normalize(array):
max = numpy.max(array)
return [float(val)/float(max) for val in array]
def tuningSystemFeatures(pool, namespace=''):
# expects tonal descriptors and tuning features to be in pool
tonalspace = 'tonal.'
if namespace: tonalspace = namespace + '.tonal.'
# 1-diatonic strength
hpcp_highres = normalize(numpy.mean(pool[tonalspace + 'hpcp_highres'], 0))
key,scale,strength,_ = standard.Key(profileType='diatonic')(hpcp_highres)
pool.set(tonalspace + 'tuning_diatonic_strength', strength)
# 2- high resolution features
eqTempDeviation, ntEnergy,_ = standard.HighResolutionFeatures()(hpcp_highres)
pool.set(tonalspace+'tuning_equal_tempered_deviation', eqTempDeviation)
pool.set(tonalspace+'tuning_nontempered_energy_ratio', ntEnergy)
# 3- THPCP
hpcp = normalize(numpy.mean(pool[tonalspace + 'hpcp'], 0))
hpcp_copy = hpcp[:]
idx = numpy.argmax(hpcp)
offset = len(hpcp)-idx
hpcp[:offset] = hpcp_copy[idx:offset+idx]
hpcp[offset:offset+idx] = hpcp_copy[0:idx]
pool.set(tonalspace+'thpcp', essentia.array(hpcp))
def sfxPitch(pool, namespace=''):
sfxspace = 'sfx.'
llspace = 'lowlevel.'
if namespace:
sfxspace = namespace + '.sfx.'
llspace = namespace + '.lowlevel.'
pitch = pool[llspace+'pitch']
gen = streaming.VectorInput(pitch)
maxtt = streaming.MaxToTotal()
mintt = streaming.MinToTotal()
amt = streaming.AfterMaxToBeforeMaxEnergyRatio()
gen.data >> maxtt.envelope
gen.data >> mintt.envelope
gen.data >> amt.pitch
maxtt.maxToTotal >> (pool, sfxspace+'pitch_max_to_total')
mintt.minToTotal >> (pool, sfxspace+'pitch_min_to_total')
amt.afterMaxToBeforeMaxEnergyRatio >> (pool, sfxspace+'pitch_after_max_to_before_max_energy_ratio')
essentia.run(gen)
pc = standard.Centroid(range=len(pitch)-1)(pitch)
pool.set(sfxspace+'pitch_centroid', pc)
def compute(pool, namespace=''):
# 5th pass: High-level descriptors that depend on others, but we
# don't need to stream the audio anymore
# Average Level
from level import levelAverage
levelAverage(pool, namespace)
# SFX Descriptors
sfxPitch(pool, namespace)
# Tuning System Features
tuningSystemFeatures(pool, namespace)
# Pool Cleaning (removing temporary descriptors):
tonalPoolCleaning(pool, namespace)
# Add missing descriptors which are not computed yet, but will be for the
# final release or during the 1.x cycle. However, the schema need to be
# complete before that, so just put default values for these.
postProcess(pool, namespace)
|
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db import models
from django.utils import translation
from templatefield.fields import TemplateTextField
from templatefield.managers import RenderTemplateManager
class FlatContent(models.Model):
slug = models.SlugField(max_length=255, unique=False,
help_text='The name by which the template author '
'retrieves this content.')
site = models.ForeignKey(Site, blank=True, null=True)
content = TemplateTextField()
default = models.Manager()
objects = RenderTemplateManager()
class Meta:
unique_together = ('slug', 'site',)
verbose_name_plural = 'flat content'
def __unicode__(self):
return self.slug
def save(self, *args, **kwargs):
super(FlatContent, self).save(*args, **kwargs)
cache.delete(self.key_from_slug(
self.slug, site_id=self.site.id if self.site else None))
def delete(self):
cache.delete(self.key_from_slug(
self.slug, site_id=self.site.id if self.site else None))
super(FlatContent, self).delete()
# Helper method to get key for caching
def key_from_slug(slug, site_id=None):
lang = translation.get_language()
return 'flatcontent_%s_%s_%s' % (site_id, slug, lang)
key_from_slug = staticmethod(key_from_slug)
# Class method with caching
@classmethod
def get(cls, slug, site_id=None, context=None):
"""
Checks if key is in cache, otherwise performs database lookup and
inserts into cache.
"""
key, cache_value = cls._get_cached(slug, site_id, context)
if cache_value:
return cache_value
manager = cls.objects.with_context(context or {})
try:
fc = manager.get(slug=slug, site=site_id)
except cls.DoesNotExist:
try:
# Fallback to the non-site specific flatcontent
key, cache_value = cls._get_cached(slug, context=context)
if cache_value:
return cache_value
fc = manager.get(slug=slug, site=None)
except:
return ''
if not context:
cache.set(key, fc.content)
return fc.content
@classmethod
def _get_cached(cls, slug, site_id=None, context=None):
key = cls.key_from_slug(slug, site_id=site_id)
cache_value = None if context else cache.get(key)
return key, cache_value
|
"""
Stores global access to draw to the window.
The graphics context is the embodiment of what will be drawn. Whenever something wants draw to the screen, it must
do so via what is provided here. In particular, most drawing is done by providing sprites and data about how to
position, rotate, and scale the sprite.
"""
screen_width = 1920
"""
Width of the screen in pixels.
"""
screen_height = 1080
"""
Height of the screen in pixels.
"""
sprite_buffer = {
'background': {},
'below_player': {},
'same_as_player': {},
'above_player': {}
}
"""
The sprite buffer is a dictionary divided into layers corresponding to the order that the sprites will be drawn. Within
each layer, there is no guarantee of order (which is why the layers exist). As the names imply, the ``background``
is drawn first, with ``below_player`` on top, followed by ``same_as_player`` (which will include the player), then
``above_player``.
It's up to the user to make sure that there is no overlapping of sprites on the same layer (as such may have
inconsistent effects).
Each layer is a dictionary. The reason for this is to ensure that the user can not only add sprites to the buffer, but
also remove them. The buffer is not cleared automatically, but rather the user must clear it (either by manually
removing things we don't want to draw anymore or by calling ``clear_buffer()``. This is useful as there's going to be
many cases in which the sprites do not change.
"""
def clear_buffer():
"""
Clears the sprite buffer, emptying each layer.
This is most useful for mode switches, but can also be used for other purposes. On its own, it should result in
nothing being drawn, and thus a black screen.
"""
for layer in sprite_buffer.values():
layer.clear()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
###############################################################################
# Solid dryer equipment dialog
###############################################################################
from functools import partial
from PyQt5 import QtWidgets
from equipment.gas_solid_liquid import Dryer
from lib import unidades
from equipment.parents import UI_equip
from UI.widgets import Entrada_con_unidades
class UI_equipment(UI_equip):
"""Solid dryer equipment edition dialog"""
Equipment = Dryer()
def __init__(self, equipment=None, parent=None):
"""
equipment: Initial equipment instance to model
"""
super(UI_equipment, self).__init__(Dryer, parent=parent)
# Input tab
self.addEntrada(QtWidgets.QApplication.translate("pychemqt", "Humid Solid"),
"entradaSolido")
self.addEntrada(QtWidgets.QApplication.translate("pychemqt", "Air"),
"entradaAire", psychro=True)
# Calculate tab
lyt = QtWidgets.QGridLayout(self.tabCalculo)
lyt.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Mode")), 1, 1)
self.mode = QtWidgets.QComboBox()
for txt in self.Equipment.TEXT_MODE:
self.mode.addItem(txt)
self.mode.currentIndexChanged.connect(
partial(self.changeParams, "mode"))
lyt.addWidget(self.mode, 1, 2, 1, 4)
lyt.addItem(QtWidgets.QSpacerItem(
20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed),
2, 1, 1, 6)
lyt.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Air Relative Humidity")), 3, 1)
self.HumedadAire = Entrada_con_unidades(float, max=1, spinbox=True,
step=0.01)
self.HumedadAire.valueChanged.connect(partial(self.changeParams, "HR"))
lyt.addWidget(self.HumedadAire, 3, 2)
lyt.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Product moisture fraction")), 4, 1)
self.HumedadSolido = Entrada_con_unidades(float, max=1., spinbox=True, step=0.01, textounidad=unidades.Mass(None).text()+"/"+unidades.Mass(None).text())
self.HumedadSolido.valueChanged.connect(
partial(self.changeParams, "HumedadResidual"))
lyt.addWidget(self.HumedadSolido, 4, 2)
lyt.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Output Solid Temperature")), 5, 1)
self.temperatura = Entrada_con_unidades(unidades.Temperature)
self.temperatura.valueChanged.connect(
partial(self.changeParams, "TemperaturaSolid"))
lyt.addWidget(self.temperatura, 5, 2)
lyt.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Heat Duty")), 6, 1)
self.Heat = Entrada_con_unidades(unidades.Power)
self.Heat.valueChanged.connect(partial(self.changeParams, "Heat"))
lyt.addWidget(self.Heat, 6, 2)
lyt.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Pressure Drop")), 7, 1)
self.DeltaP = Entrada_con_unidades(unidades.Pressure)
self.DeltaP.valueChanged.connect(partial(self.changeParams, "DeltaP"))
lyt.addWidget(self.DeltaP, 7, 2)
lyt.addItem(QtWidgets.QSpacerItem(
20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding),
8, 1, 1, 6)
group = QtWidgets.QGroupBox(
QtWidgets.QApplication.translate("pychemqt", "Results"))
lyt.addWidget(group, 9, 1, 1, 5)
layout = QtWidgets.QGridLayout(group)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Output Temperature")), 1, 1)
self.temperaturaCalculada = Entrada_con_unidades(unidades.Temperature, retornar=False, readOnly=True)
layout.addWidget(self.temperaturaCalculada, 1, 2)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Air Flow")), 2, 1)
self.caudalVolumetrico = Entrada_con_unidades(unidades.VolFlow, "QGas", retornar=False, readOnly=True)
layout.addWidget(self.caudalVolumetrico, 2, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Output Air Relative Humidity")), 3, 1)
self.HumedadCalculada = Entrada_con_unidades(float, readOnly=True, textounidad="%")
layout.addWidget(self.HumedadCalculada, 3, 2)
lyt.addItem(QtWidgets.QSpacerItem(
20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding),
11, 1, 1, 6)
# Output Tab
self.addSalida(QtWidgets.QApplication.translate("pychemqt", "Air"), psychro=True)
self.addSalida(QtWidgets.QApplication.translate("pychemqt", "Dry solid"))
if equipment:
self.setEquipment(equipment)
# def rellenar(self):
# self.EntradaAire.setCorriente(self.Equipment.kwargs["entradaAire"])
# self.EntradaSolido.setCorriente(self.Equipment.kwargs["entradaSolido"])
# if self.Equipment.status:
# self.temperaturaCalculada.setValue(self.Equipment.SalidaSolido.T)
# self.caudalVolumetrico.setValue(self.Equipment.entradaAire.corriente.Q)
# self.HumedadCalculada.setValue(self.Equipment.SalidaAire.Xw*100)
# self.SalidaAire.setCorriente(self.Equipment.SalidaAire)
# self.SalidaSolido.setCorriente(self.Equipment.SalidaSolido)
# if self.Equipment.kwargs["mode"]==1:
# self.EntradaAire.setCorriente(self.Equipment.entradaAire)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
# from lib.corriente import Mezcla, Corriente, Solid, PsyStream
# from lib.psycrometry import PsychroState
# diametros=[96.5, 105, 110, 118, 125, 130, 140, 150, 170]
# fraccion=[0.02, 0.05, 0.1, 0.15, 0.25, 0.2, 0.15, 0.05, 0.03]
# solido=Solid(caudalSolido=[5000], distribucion_fraccion=fraccion, distribucion_diametro=diametros)
# Solido=Corriente(T=300, P=101325., caudalMasico=50, ids=[62], fraccionMolar=[1], solido=solido)
# Aire=PsyStream(caudalMasico=100, tdb=300, HR=50)
# secador=Dryer(entradaSolido=Solido, entradaAire=Aire)
dialogo = UI_equipment()
dialogo.show()
sys.exit(app.exec_())
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
calicoctl status
Description:
Print current status information regarding calico-node container
and the BIRD routing daemon.
"""
import re
from utils import docker_client
def status(arguments):
"""
Main dispatcher for status commands. Calls the corresponding helper
function.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: None
"""
calico_node_info = filter(lambda container: "/calico-node" in
container["Names"],
docker_client.containers())
if len(calico_node_info) == 0:
print "calico-node container not running"
else:
print "calico-node container is running. Status: %s" % \
calico_node_info[0]["Status"]
apt_cmd = docker_client.exec_create("calico-node", ["/bin/bash", "-c",
"apt-cache policy calico-felix"])
result = re.search(r"Installed: (.*?)\s", docker_client.exec_start(apt_cmd))
if result is not None:
print "Running felix version %s" % result.group(1)
print "IPv4 Bird (BGP) status"
bird_cmd = docker_client.exec_create("calico-node",
["/bin/bash", "-c",
"echo show protocols | "
"birdc -s /etc/service/bird/bird.ctl"])
print docker_client.exec_start(bird_cmd)
print "IPv6 Bird (BGP) status"
bird6_cmd = docker_client.exec_create("calico-node",
["/bin/bash", "-c",
"echo show protocols | "
"birdc6 -s "
"/etc/service/bird6/bird6.ctl"])
print docker_client.exec_start(bird6_cmd)
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os, sys
import logging
import warnings
from optparse import OptionParser
from zeroinstall import _, SafeException
from zeroinstall.injector import requirements
from zeroinstall.injector.driver import Driver
from zeroinstall.injector.config import load_config
from zeroinstall.support import tasks
_recalculate = tasks.Blocker('recalculate')
def recalculate():
"""Ask the mainloop to recalculate. If we're already recalculating, wait for that to finish
and then do it again."""
global _recalculate
_recalculate.trigger()
_recalculate = tasks.Blocker('recalculate')
def run_gui(args):
parser = OptionParser(usage=_("usage: %prog [options] interface"))
parser.add_option("", "--before", help=_("choose a version before this"), metavar='VERSION')
parser.add_option("", "--cpu", help=_("target CPU type"), metavar='CPU')
parser.add_option("", "--command", help=_("command to select"), metavar='COMMAND')
parser.add_option("-d", "--download-only", help=_("fetch but don't run"), action='store_true')
parser.add_option("-g", "--force-gui", help=_("display an error if there's no GUI"), action='store_true')
parser.add_option("", "--message", help=_("message to display when interacting with user"))
parser.add_option("", "--not-before", help=_("minimum version to choose"), metavar='VERSION')
parser.add_option("", "--os", help=_("target operation system type"), metavar='OS')
parser.add_option("-r", "--refresh", help=_("check for updates of all interfaces"), action='store_true')
parser.add_option("", "--select-only", help=_("only download the feeds"), action='store_true')
parser.add_option("-s", "--source", help=_("select source code"), action='store_true')
parser.add_option("", "--systray", help=_("download in the background"), action='store_true')
parser.add_option("-v", "--verbose", help=_("more verbose output"), action='count')
parser.add_option("-V", "--version", help=_("display version information"), action='store_true')
parser.add_option("", "--with-store", help=_("add an implementation cache"), action='append', metavar='DIR')
parser.disable_interspersed_args()
(options, args) = parser.parse_args(args)
if options.verbose:
logger = logging.getLogger()
if options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
if options.version:
import gui
print("0launch-gui (zero-install) " + gui.version)
print("Copyright (C) 2010 Thomas Leonard")
print(_("This program comes with ABSOLUTELY NO WARRANTY,"
"\nto the extent permitted by law."
"\nYou may redistribute copies of this program"
"\nunder the terms of the GNU Lesser General Public License."
"\nFor more information about these matters, see the file named COPYING."))
sys.exit(0)
def nogui(ex):
if options.force_gui:
fn = logging.warn
else:
fn = logging.info
fn("No GUI available", exc_info = ex)
sys.exit(100)
with warnings.catch_warnings():
if not options.force_gui:
warnings.filterwarnings("ignore")
if sys.version_info[0] < 3:
try:
import pygtk; pygtk.require('2.0')
except ImportError as ex:
nogui(ex)
import gui
try:
if sys.version_info[0] > 2:
from zeroinstall.gtkui import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version = '3.0')
import gtk
except (ImportError, ValueError) as ex:
nogui(ex)
if gtk.gdk.get_display() is None:
try:
raise SafeException("Failed to connect to display.")
except SafeException as ex:
nogui(ex) # logging needs this as a raised exception
handler = gui.GUIHandler()
config = load_config(handler)
if options.with_store:
from zeroinstall import zerostore
for x in options.with_store:
config.stores.stores.append(zerostore.Store(os.path.abspath(x)))
if len(args) < 1:
@tasks.async
def prefs_main():
import preferences
box = preferences.show_preferences(config)
done = tasks.Blocker('close preferences')
box.connect('destroy', lambda w: done.trigger())
yield done
tasks.wait_for_blocker(prefs_main())
sys.exit(0)
interface_uri = args[0]
if len(args) > 1:
parser.print_help()
sys.exit(1)
import mainwindow, dialog
r = requirements.Requirements(interface_uri)
r.parse_options(options)
widgets = dialog.Template('main')
driver = Driver(config = config, requirements = r)
root_iface = config.iface_cache.get_interface(interface_uri)
driver.solver.record_details = True
window = mainwindow.MainWindow(driver, widgets, download_only = bool(options.download_only), select_only = bool(options.select_only))
handler.mainwindow = window
if options.message:
window.set_message(options.message)
root = config.iface_cache.get_interface(r.interface_uri)
window.browser.set_root(root)
window.window.connect('destroy', lambda w: handler.abort_all_downloads())
if options.systray:
window.use_systray_icon()
@tasks.async
def main():
force_refresh = bool(options.refresh)
while True:
window.refresh_button.set_sensitive(False)
window.browser.set_update_icons(force_refresh)
solved = driver.solve_with_downloads(force = force_refresh, update_local = True)
if not window.systray_icon:
window.show()
yield solved
try:
window.refresh_button.set_sensitive(True)
window.browser.highlight_problems()
tasks.check(solved)
except Exception as ex:
window.report_exception(ex)
if window.systray_icon and window.systray_icon.get_visible() and \
window.systray_icon.is_embedded():
if driver.solver.ready:
window.systray_icon.set_tooltip(_('Downloading updates for %s') % root_iface.get_name())
window.run_button.set_active(True)
else:
# Should already be reporting an error, but
# blink it again just in case
window.systray_icon.set_blinking(True)
refresh_clicked = dialog.ButtonClickedBlocker(window.refresh_button)
yield refresh_clicked, _recalculate
if refresh_clicked.happened:
force_refresh = True
tasks.wait_for_blocker(main())
|
from gi.repository import Gtk
from pychess import Variants
from pychess.Utils.const import NORMALCHESS, ATOMICCHESS, BUGHOUSECHESS, CRAZYHOUSECHESS, \
LOSERSCHESS, SUICIDECHESS, FISCHERRANDOMCHESS, WILDCASTLESHUFFLECHESS, \
SHUFFLECHESS, RANDOMCHESS, ASYMMETRICRANDOMCHESS, WILDCASTLECHESS, UPSIDEDOWNCHESS, \
PAWNSPUSHEDCHESS, PAWNSPASSEDCHESS, GIVEAWAYCHESS, THREECHECKCHESS
IC_CONNECTED, IC_DISCONNECTED = range(2)
# Fixed values of my relation to this game
# http://www.freechess.org/Help/HelpFiles/style12.html
IC_POS_INITIAL, IC_POS_ISOLATED, IC_POS_OBSERVING_EXAMINATION, IC_POS_OP_TO_MOVE, \
IC_POS_OBSERVING, IC_POS_ME_TO_MOVE, IC_POS_EXAMINATING = range(-4, 3)
# RatingType
TYPE_BLITZ, TYPE_STANDARD, TYPE_LIGHTNING, TYPE_WILD, \
TYPE_BUGHOUSE, TYPE_CRAZYHOUSE, TYPE_SUICIDE, TYPE_LOSERS, TYPE_ATOMIC, \
TYPE_BULLET, TYPE_ONE_MINUTE, TYPE_THREE_MINUTE, TYPE_FIVE_MINUTE, \
TYPE_FIFTEEN_MINUTE, TYPE_FORTYFIVE_MINUTE, TYPE_CHESS960, \
TYPE_UNTIMED, TYPE_EXAMINED, TYPE_OTHER = range(19)
RATING_TYPES = (TYPE_BLITZ,
TYPE_STANDARD,
TYPE_LIGHTNING,
TYPE_BULLET,
TYPE_ONE_MINUTE,
TYPE_THREE_MINUTE,
TYPE_FIVE_MINUTE,
TYPE_FIFTEEN_MINUTE,
TYPE_FORTYFIVE_MINUTE,
TYPE_ATOMIC,
TYPE_BUGHOUSE,
TYPE_CRAZYHOUSE,
TYPE_LOSERS,
TYPE_SUICIDE,
TYPE_WILD,
TYPE_CHESS960,
TYPE_UNTIMED,
)
# Rating deviations
DEVIATION_NONE, DEVIATION_ESTIMATED, DEVIATION_PROVISIONAL = range(3)
IC_STATUS_PLAYING, IC_STATUS_ACTIVE, IC_STATUS_BUSY, IC_STATUS_OFFLINE, \
IC_STATUS_AVAILABLE, IC_STATUS_NOT_AVAILABLE, IC_STATUS_EXAMINING, \
IC_STATUS_IDLE, IC_STATUS_IN_TOURNAMENT, IC_STATUS_RUNNING_SIMUL_MATCH, \
IC_STATUS_UNKNOWN = range(11)
TITLES_RE = r"(?:\([A-Z*]+\))*"
NAMES_RE = "[A-Za-z]+"
DEVIATION = {
"E": DEVIATION_ESTIMATED,
"P": DEVIATION_PROVISIONAL,
" ": DEVIATION_NONE,
"": DEVIATION_NONE,
}
STATUS = {
"^": IC_STATUS_PLAYING,
" ": IC_STATUS_AVAILABLE,
".": IC_STATUS_IDLE,
"#": IC_STATUS_EXAMINING,
":": IC_STATUS_NOT_AVAILABLE,
"~": IC_STATUS_RUNNING_SIMUL_MATCH,
"&": IC_STATUS_IN_TOURNAMENT,
}
class GameType:
def __init__(self,
fics_name,
short_fics_name,
rating_type,
display_text=None,
variant_type=NORMALCHESS):
self.fics_name = fics_name
self.short_fics_name = short_fics_name
self.rating_type = rating_type
if display_text:
self.display_text = display_text
self.variant_type = variant_type
@property
def variant(self):
return Variants.variants[self.variant_type]
def __repr__(self):
s = "<GameType "
s += "fics_name='%s', " % self.fics_name
s += "display_text='%s'>" % self.display_text
return s
class NormalGameType(GameType):
def __init__(self, fics_name, short_fics_name, rating_type, display_text):
GameType.__init__(self,
fics_name,
short_fics_name,
rating_type,
display_text=display_text)
class VariantGameType(GameType):
def __init__(self, fics_name, short_fics_name, rating_type, variant_type):
GameType.__init__(self,
fics_name,
short_fics_name,
rating_type,
variant_type=variant_type)
@property
def display_text(self):
assert self.variant_type is not None
return Variants.variants[self.variant_type].name
@property
def seek_text(self):
if "/" in self.fics_name:
return self.fics_name.replace("/", " ")
else:
return self.fics_name
class WildGameType(VariantGameType):
_instances = []
def __init__(self, fics_name, variant_type):
VariantGameType.__init__(self,
fics_name,
"w",
TYPE_WILD,
variant_type=variant_type)
WildGameType._instances.append(self)
@classmethod
def instances(cls):
return cls._instances
# FICS game types
GAME_TYPES = {
"blitz": NormalGameType("blitz", "b", TYPE_BLITZ, _("Blitz")),
"standard": NormalGameType("standard", "s", TYPE_STANDARD, _("Standard")),
"lightning": NormalGameType("lightning", "l", TYPE_LIGHTNING, _("Lightning")),
"1-minute": NormalGameType("1-minute", "o", TYPE_ONE_MINUTE, _("1-minute")),
"3-minute": NormalGameType("3-minute", "M", TYPE_THREE_MINUTE, _("3-minute")),
"5-minute": NormalGameType("5-minute", "f", TYPE_FIVE_MINUTE, _("5-minute")),
"15-minute": NormalGameType("15-minute", "F", TYPE_FIFTEEN_MINUTE, _("15-minute")),
"45-minute": NormalGameType("45-minute", "J", TYPE_FORTYFIVE_MINUTE, _("45-minute")),
"chess960": NormalGameType("chess960", "K", TYPE_CHESS960, _("Chess960")),
"untimed": NormalGameType("untimed", "u", TYPE_UNTIMED, _("Untimed")),
"examined": NormalGameType("examined", "e", TYPE_EXAMINED, _("Examined")),
"nonstandard": NormalGameType("nonstandard", "n", TYPE_OTHER, _("Other")),
"w20": NormalGameType("loaded", "w20", TYPE_OTHER, _("Other")), # loadfen/loadgame
"w21": NormalGameType("loaded", "w21", TYPE_OTHER, _("Other")), # thematic tournaments
"atomic": VariantGameType("atomic", "x", TYPE_ATOMIC, ATOMICCHESS),
"bughouse": VariantGameType("bughouse", "B", TYPE_BUGHOUSE, BUGHOUSECHESS),
"crazyhouse": VariantGameType("crazyhouse", "z", TYPE_CRAZYHOUSE, CRAZYHOUSECHESS),
"losers": VariantGameType("losers", "L", TYPE_LOSERS, LOSERSCHESS),
"suicide": VariantGameType("suicide", "S", TYPE_SUICIDE, SUICIDECHESS),
# FICS http://www.freechess.org/Help/HelpFiles/wild.html
"wild/fr": WildGameType("wild/fr", FISCHERRANDOMCHESS),
"wild/0": WildGameType("wild/0", WILDCASTLECHESS),
"wild/1": WildGameType("wild/1", WILDCASTLESHUFFLECHESS),
"wild/2": WildGameType("wild/2", SHUFFLECHESS),
"wild/3": WildGameType("wild/3", RANDOMCHESS),
"wild/4": WildGameType("wild/4", ASYMMETRICRANDOMCHESS),
"wild/5": WildGameType("wild/5", UPSIDEDOWNCHESS),
"wild/8": WildGameType("wild/8", PAWNSPUSHEDCHESS),
"wild/8a": WildGameType("wild/8a", PAWNSPASSEDCHESS),
# ICC https://www.chessclub.com/user/helpcenter/tips/wild.html
"w17": WildGameType("w17", LOSERSCHESS),
"w26": WildGameType("w26", GIVEAWAYCHESS),
"w24": WildGameType("w24", BUGHOUSECHESS),
"w23": WildGameType("w23", CRAZYHOUSECHESS),
# "w16": WildGameType("w16", KRIEGSPIELCHESS),
"w27": WildGameType("w27", ATOMICCHESS),
# "w28": WildGameType("w28", SHATRANJCHESS),
"w25": WildGameType("w25", THREECHECKCHESS),
"w1": WildGameType("w1", WILDCASTLESHUFFLECHESS),
"w2": WildGameType("w2", SHUFFLECHESS),
"w3": WildGameType("w3", RANDOMCHESS),
"w4": WildGameType("w4", ASYMMETRICRANDOMCHESS),
"w22": WildGameType("w22", FISCHERRANDOMCHESS),
"w5": WildGameType("w5", UPSIDEDOWNCHESS),
# "w7": WildGameType("w7", THREEPAWNSCHESS),
"w8": WildGameType("w8", PAWNSPUSHEDCHESS),
# "w9": WildGameType("w9", TWOKINGSCHESS),
# "w18": WildGameType("w18", EIGHTQUEENSCHESS),
# "w19": WildGameType("w19", KNNKPCHESS),
}
VARIANT_GAME_TYPES = {}
for key in GAME_TYPES:
if isinstance(GAME_TYPES[key], VariantGameType):
VARIANT_GAME_TYPES[GAME_TYPES[key].variant_type] = GAME_TYPES[key]
# The following 3 GAME_TYPES_* data structures don't have any real entries
# for the WildGameType's in GAME_TYPES above, and instead use
# a dummy type for the all-encompassing "Wild" FICS rating for wild/* games
GAME_TYPES_BY_SHORT_FICS_NAME = {
"w": GameType("wild",
"w",
TYPE_WILD,
display_text=_("Wild"))
}
for key in GAME_TYPES:
if not isinstance(GAME_TYPES[key], WildGameType):
GAME_TYPES_BY_SHORT_FICS_NAME[GAME_TYPES[key].short_fics_name] = \
GAME_TYPES[key]
GAME_TYPES_BY_RATING_TYPE = {}
for key in GAME_TYPES_BY_SHORT_FICS_NAME:
GAME_TYPES_BY_RATING_TYPE[GAME_TYPES_BY_SHORT_FICS_NAME[key].rating_type] = \
GAME_TYPES_BY_SHORT_FICS_NAME[key]
GAME_TYPES_BY_FICS_NAME = {}
for key in GAME_TYPES_BY_SHORT_FICS_NAME:
GAME_TYPES_BY_FICS_NAME[GAME_TYPES_BY_SHORT_FICS_NAME[key].fics_name] = \
GAME_TYPES_BY_SHORT_FICS_NAME[key]
# Finally add conflicting ICC game type
GAME_TYPES["bullet"] = NormalGameType("bullet", "B", TYPE_BULLET, _("Bullet"))
GAME_TYPES_BY_FICS_NAME["bullet"] = GAME_TYPES["bullet"]
GAME_TYPES_BY_RATING_TYPE[TYPE_BULLET] = GAME_TYPES["bullet"]
# GAME_TYPES_BY_SHORT_FICS_NAME["B"] will be fixed in FICSConnections.py
# and VARIANT_GAME_TYPES[FISCHERRANDOMCHESS] also
def type_to_display_text(typename):
if "loaded from" in typename.lower():
typename = typename.split()[-1]
if typename in GAME_TYPES:
return GAME_TYPES[typename].display_text
# Default solution for eco/A00 and a few others
elif "/" in typename:
a, b = typename.split("/")
a = a[0].upper() + a[1:]
b = b[0].upper() + b[1:]
return a + " " + b
else:
# Otherwise forget about it
return typename[0].upper() + typename[1:]
def time_control_to_gametype(minutes, gain):
assert isinstance(minutes, int) and isinstance(gain, int)
assert minutes >= 0 and gain >= 0
gainminutes = gain > 0 and (gain * 60) - 1 or 0
if minutes == 0 and gain == 0:
return GAME_TYPES["untimed"]
elif (minutes * 60) + gainminutes >= (15 * 60):
return GAME_TYPES["standard"]
elif (minutes * 60) + gainminutes >= (3 * 60):
return GAME_TYPES["blitz"]
else:
return GAME_TYPES["lightning"]
TYPE_ADMINISTRATOR, TYPE_BLINDFOLD, TYPE_COMPUTER, \
TYPE_TEAM, TYPE_UNREGISTERED, TYPE_CHESS_ADVISOR, \
TYPE_SERVICE_REPRESENTATIVE, TYPE_TOURNAMENT_DIRECTOR, TYPE_MAMER_MANAGER, \
TYPE_GRAND_MASTER, TYPE_INTERNATIONAL_MASTER, TYPE_FIDE_MASTER, \
TYPE_WOMAN_GRAND_MASTER, TYPE_WOMAN_INTERNATIONAL_MASTER, TYPE_WOMAN_FIDE_MASTER,\
TYPE_DUMMY_ACCOUNT, TYPE_CANDIDATE_MASTER, TYPE_FIDE_ARBEITER, TYPE_NATIONAL_MASTER, \
TYPE_DISPLAY_MASTER = range(20)
TITLE_TYPE_DISPLAY_TEXTS = (_("Administrator"),
_("Blindfold Account"),
_("Computer"),
_("Team Account"),
_("Unregistered"),
_("Chess Advisor"),
_("Service Representative"),
_("Tournament Director"),
_("Mamer Manager"),
_("Grand Master"),
_("International Master"),
_("FIDE Master"),
_("Woman Grand Master"),
_("Woman International Master"),
_("Woman FIDE Master"),
_("Dummy Account"),
_("Candidate Master"),
_("FIDE Arbeiter"),
_("National Master"),
_("Display Master"),
)
TITLE_TYPE_DISPLAY_TEXTS_SHORT = (
_("*"), _("B"), _("C"), _("T"), _("U"), _("CA"), _("SR"), _("TD"), _("TM"),
_("GM"), _("IM"), _("FM"), _("WGM"), _("WIM"), _("WFM"), _("D"), _("H"),
_("CM"), _("FA"), _("NM"), _("DM"))
TITLES = { # From FICS 'help who'
"*": TYPE_ADMINISTRATOR,
"B": TYPE_BLINDFOLD,
"C": TYPE_COMPUTER,
"T": TYPE_TEAM,
"U": TYPE_UNREGISTERED,
"CA": TYPE_CHESS_ADVISOR,
"SR": TYPE_SERVICE_REPRESENTATIVE,
"TD": TYPE_TOURNAMENT_DIRECTOR,
"TM": TYPE_MAMER_MANAGER,
"GM": TYPE_GRAND_MASTER,
"IM": TYPE_INTERNATIONAL_MASTER,
"FM": TYPE_FIDE_MASTER,
"WFM": TYPE_WOMAN_FIDE_MASTER,
"WIM": TYPE_WOMAN_INTERNATIONAL_MASTER,
"WGM": TYPE_WOMAN_GRAND_MASTER,
"D": TYPE_DUMMY_ACCOUNT,
"H": TYPE_SERVICE_REPRESENTATIVE,
"CM": TYPE_CANDIDATE_MASTER,
"FA": TYPE_FIDE_ARBEITER,
"NM": TYPE_NATIONAL_MASTER,
"DM": TYPE_DISPLAY_MASTER,
}
HEX_TO_TITLE = {
0x1: TYPE_UNREGISTERED,
0x2: TYPE_COMPUTER,
0x4: TYPE_GRAND_MASTER,
0x8: TYPE_INTERNATIONAL_MASTER,
0x10: TYPE_FIDE_MASTER,
0x20: TYPE_WOMAN_GRAND_MASTER,
0x40: TYPE_WOMAN_INTERNATIONAL_MASTER,
0x80: TYPE_WOMAN_FIDE_MASTER,
}
def parse_title_hex(titlehex):
titles = set()
for key in HEX_TO_TITLE:
if int(titlehex, 16) & key:
titles.add(HEX_TO_TITLE[key])
return titles
def parseRating(rating):
if rating[0] == " ":
rating = rating[1:]
if rating[-1].isalpha():
rating = rating[:-1]
return int(rating) if rating.isdigit() else 0
def get_infobarmessage_content(player, text, gametype=None):
content = Gtk.HBox()
icon = Gtk.Image()
icon.set_from_pixbuf(player.getIcon(size=32, gametype=gametype))
content.pack_start(icon, False, False, 4)
label = Gtk.Label()
label.set_markup(player.getMarkup(gametype=gametype))
content.pack_start(label, False, False, 0)
label = Gtk.Label()
label.set_markup(text)
content.pack_start(label, False, False, 0)
return content
def get_infobarmessage_content2(player,
heading_text,
message_text,
gametype=None):
hbox = Gtk.HBox()
image = Gtk.Image()
image.set_from_pixbuf(player.getIcon(size=24, gametype=gametype))
hbox.pack_start(image, False, False, 0)
label = Gtk.Label()
markup = player.getMarkup(gametype=gametype, long_titles=False)
label.set_markup(markup + heading_text)
hbox.pack_start(label, False, False, 0)
vbox = Gtk.VBox()
vbox.pack_start(hbox, False, False, 0)
label = Gtk.Label()
label.props.xalign = 0
label.props.xpad = 4
label.props.justify = Gtk.Justification.LEFT
label.props.wrap = True
label.set_width_chars(70)
label.set_text(message_text)
vbox.pack_start(label, False, False, 5)
return vbox
"""
Internal command codes used in FICS block mode
(see "help block_codes" and "help iv_block").
Used mostly by internal library functions.
BLOCK_ variables are message boundary markers.
BLKCMD_ variables are command codes.
"""
BLOCK_START = chr(21) # \U
BLOCK_SEPARATOR = chr(22) # \V
BLOCK_END = chr(23) # \W
BLOCK_POSE_START = chr(24) # \X
BLOCK_POSE_END = chr(25) # \Y
BLKCMD_NULL = 0
BLKCMD_GAME_MOVE = 1
BLKCMD_ABORT = 10
BLKCMD_ACCEPT = 11
BLKCMD_ADDLIST = 12
BLKCMD_ADJOURN = 13
BLKCMD_ALLOBSERVERS = 14
BLKCMD_ASSESS = 15
BLKCMD_BACKWARD = 16
BLKCMD_BELL = 17
BLKCMD_BEST = 18
BLKCMD_BNAME = 19
BLKCMD_BOARDS = 20
BLKCMD_BSETUP = 21
BLKCMD_BUGWHO = 22
BLKCMD_CBEST = 23
BLKCMD_CLEARMESSAGES = 24
BLKCMD_CLRSQUARE = 25
BLKCMD_CONVERT_BCF = 26
BLKCMD_CONVERT_ELO = 27
BLKCMD_CONVERT_USCF = 28
BLKCMD_COPYGAME = 29
BLKCMD_CRANK = 30
BLKCMD_CSHOUT = 31
BLKCMD_DATE = 32
BLKCMD_DECLINE = 33
BLKCMD_DRAW = 34
BLKCMD_ECO = 35
BLKCMD_EXAMINE = 36
BLKCMD_FINGER = 37
BLKCMD_FLAG = 38
BLKCMD_FLIP = 39
BLKCMD_FMESSAGE = 40
BLKCMD_FOLLOW = 41
BLKCMD_FORWARD = 42
BLKCMD_GAMES = 43
BLKCMD_GETGI = 44
BLKCMD_GETPI = 45
BLKCMD_GINFO = 46
BLKCMD_GOBOARD = 47
BLKCMD_HANDLES = 48
BLKCMD_HBEST = 49
BLKCMD_HELP = 50
BLKCMD_HISTORY = 51
BLKCMD_HRANK = 52
BLKCMD_INCHANNEL = 53
BLKCMD_INDEX = 54
BLKCMD_INFO = 55
BLKCMD_ISET = 56
BLKCMD_IT = 57
BLKCMD_IVARIABLES = 58
BLKCMD_JKILL = 59
BLKCMD_JOURNAL = 60
BLKCMD_JSAVE = 61
BLKCMD_KIBITZ = 62
BLKCMD_LIMITS = 63
BLKCMD_LINE = 64 # Not on FICS
BLKCMD_LLOGONS = 65
BLKCMD_LOGONS = 66
BLKCMD_MAILHELP = 67
BLKCMD_MAILMESS = 68
BLKCMD_MAILMOVES = 69
BLKCMD_MAILOLDMOVES = 70
BLKCMD_MAILSOURCE = 71
BLKCMD_MAILSTORED = 72
BLKCMD_MATCH = 73
BLKCMD_MESSAGES = 74
BLKCMD_MEXAMINE = 75
BLKCMD_MORETIME = 76
BLKCMD_MOVES = 77
BLKCMD_NEWS = 78
BLKCMD_NEXT = 79
BLKCMD_OBSERVE = 80
BLKCMD_OLDMOVES = 81
BLKCMD_OLDSTORED = 82
BLKCMD_OPEN = 83
BLKCMD_PARTNER = 84
BLKCMD_PASSWORD = 85
BLKCMD_PAUSE = 86
BLKCMD_PENDING = 87
BLKCMD_PFOLLOW = 88
BLKCMD_POBSERVE = 89
BLKCMD_PREFRESH = 90
BLKCMD_PRIMARY = 91
BLKCMD_PROMOTE = 92
BLKCMD_PSTAT = 93
BLKCMD_PTELL = 94
BLKCMD_PTIME = 95
BLKCMD_QTELL = 96
BLKCMD_QUIT = 97
BLKCMD_RANK = 98
BLKCMD_RCOPYGAME = 99
BLKCMD_RFOLLOW = 100
BLKCMD_REFRESH = 101
BLKCMD_REMATCH = 102
BLKCMD_RESIGN = 103
BLKCMD_RESUME = 104
BLKCMD_REVERT = 105
BLKCMD_ROBSERVE = 106
BLKCMD_SAY = 107
BLKCMD_SERVERS = 108
BLKCMD_SET = 109
BLKCMD_SHOUT = 110
BLKCMD_SHOWLIST = 111
BLKCMD_SIMABORT = 112
BLKCMD_SIMALLABORT = 113
BLKCMD_SIMADJOURN = 114
BLKCMD_SIMALLADJOURN = 115
BLKCMD_SIMGAMES = 116
BLKCMD_SIMMATCH = 117
BLKCMD_SIMNEXT = 118
BLKCMD_SIMOBSERVE = 119
BLKCMD_SIMOPEN = 120
BLKCMD_SIMPASS = 121
BLKCMD_SIMPREV = 122
BLKCMD_SMOVES = 123
BLKCMD_SMPOSITION = 124
BLKCMD_SPOSITION = 125
BLKCMD_STATISTICS = 126
BLKCMD_STORED = 127
BLKCMD_STYLE = 128
BLKCMD_SWITCH = 130
BLKCMD_TAKEBACK = 131
BLKCMD_TELL = 132
BLKCMD_TIME = 133
BLKCMD_TOMOVE = 134
BLKCMD_TOURNSET = 135
BLKCMD_UNALIAS = 136
BLKCMD_UNEXAMINE = 137
BLKCMD_UNOBSERVE = 138
BLKCMD_UNPAUSE = 139
BLKCMD_UPTIME = 140
BLKCMD_USCF = 141
BLKCMD_USTAT = 142
BLKCMD_VARIABLES = 143
BLKCMD_WHENSHUT = 144
BLKCMD_WHISPER = 145
BLKCMD_WHO = 146
BLKCMD_WITHDRAW = 147
BLKCMD_WNAME = 148
BLKCMD_XKIBITZ = 149
BLKCMD_XTELL = 150
BLKCMD_XWHISPER = 151
BLKCMD_ZNOTIFY = 152
BLKCMD_REPLY = 153 # Not on FICS
BLKCMD_SUMMON = 154
BLKCMD_SEEK = 155
BLKCMD_UNSEEK = 156
BLKCMD_SOUGHT = 157
BLKCMD_PLAY = 158
BLKCMD_ALIAS = 159
BLKCMD_NEWBIES = 160
BLKCMD_SR = 161
BLKCMD_CA = 162
BLKCMD_TM = 163
BLKCMD_GETGAME = 164
BLKCMD_CCNEWSE = 165
BLKCMD_CCNEWSF = 166
BLKCMD_CCNEWSI = 167
BLKCMD_CCNEWSP = 168
BLKCMD_CCNEWST = 169
BLKCMD_CSNEWSE = 170
BLKCMD_CSNEWSF = 171
BLKCMD_CSNEWSI = 172
BLKCMD_CSNEWSP = 173
BLKCMD_CSNEWST = 174
BLKCMD_CTNEWSE = 175
BLKCMD_CTNEWSF = 176
BLKCMD_CTNEWSI = 177
BLKCMD_CTNEWSP = 178
BLKCMD_CTNEWST = 179
BLKCMD_CNEWS = 180
BLKCMD_SNEWS = 181
BLKCMD_TNEWS = 182
BLKCMD_RMATCH = 183
BLKCMD_RSTAT = 184
BLKCMD_CRSTAT = 185
BLKCMD_HRSTAT = 186
BLKCMD_GSTAT = 187
# Note admin codes start from 300.
BLKCMD_ERROR_BADCOMMAND = 512
BLKCMD_ERROR_BADPARAMS = 513
BLKCMD_ERROR_AMBIGUOUS = 514
BLKCMD_ERROR_RIGHTS = 515
BLKCMD_ERROR_OBSOLETE = 516
BLKCMD_ERROR_REMOVED = 517
BLKCMD_ERROR_NOTPLAYING = 518
BLKCMD_ERROR_NOSEQUENCE = 519
BLKCMD_ERROR_LENGTH = 520
LIMIT_BLKCMD_ERRORS = 500
FICS_COMMANDS = [
'abort', 'accept', 'addlist', 'adjourn', 'alias', 'allobservers', 'assess',
'backward', 'bell', 'best', 'boards', 'bsetup', 'bugwho', 'cbest',
'clearmessages', 'convert_bcf', 'convert_elo', 'convert_uscf', 'copygame',
'crank', 'cshout', 'date', 'decline', 'draw', 'examine', 'finger', 'flag',
'flip', 'fmessage', 'follow', 'forward', 'games', 'gnotify', 'goboard',
'handles', 'hbest', 'help', 'history', 'hrank', 'inchannel', 'index',
'info', 'it', 'jkill', 'jsave', 'kibitz', 'limits', 'llogons', 'logons',
'mailhelp', 'mailmess', 'mailmoves', 'mailoldmoves', 'mailsource',
'mailstored', 'match', 'messages', 'mexamine', 'moretime', 'moves', 'news',
'next', 'observe', 'oldmoves', 'open', 'password', 'pause', 'pending',
'pfollow', 'play', 'pobserve', 'promote', 'pstat', 'qtell', 'quit', 'rank',
'refresh', 'resign', 'resume', 'revert', 'say', 'seek', 'servers', 'set',
'shout', 'showlist', 'simabort', 'simallabort', 'simadjourn',
'simalladjourn', 'simgames', 'simmatch', 'simnext', 'simobserve',
'simopen', 'simpass', 'simprev', 'smoves', 'smposition', 'sought',
'sposition', 'statistics', 'stored', 'style', 'sublist', 'switch',
'takeback', 'tell', 'time', 'unalias', 'unexamine', 'unobserve', 'unpause',
'unseek', 'uptime', 'ustat', 'variables', 'whisper', 'who', 'withdraw',
'xkibitz', 'xtell', 'xwhisper', 'znotify']
FICS_HELP = [
'_index', 'abort', 'abuse', 'academy', 'accept', 'addlist', 'addresses',
'adjourn', 'adjournments', 'adjudicate', 'adjudication', 'adm_app',
'adm_info', 'adm_new', 'admins', 'alias', 'allobservers', 'assess',
'atomic', 'audiochat', 'avail_vars', 'backward', 'bclock', 'bell', 'best',
'blind', 'blindfold', 'blindh', 'blitz', 'block_codes', 'bname', 'boards',
'brating', 'bsetup', 'bughouse', 'bughouse_strat', 'bugreport', 'bugwho',
'busy', 'ca', 'category', 'cbest', 'censor', 'chan_1', 'chan_4', 'channel',
'channel_list', 'channels', 'chess_adviser', 'chess_advisor',
'clearmessage', 'clearmessages', 'clock', 'clocks', 'clrsquare', 'cls',
'cls_info', 'command', 'commands', 'commit', 'computer_app',
'computer_list', 'computers', 'confidentiality', 'convert_bcf',
'convert_elo', 'convert_uscf', 'copygame', 'crank', 'crazyhouse',
'crazyhouse_strat', 'credit', 'crstat', 'cshout', 'csnewse', 'csnewsf',
'csnewsi', 'csnewsp', 'csnewst', 'date', 'decline', 'disclaimer',
'disconnection', 'draw', 'eco', 'eggo', 'email', 'etime', 'examine', 'exl',
'fen', 'fics_faq', 'fics_lingo', 'finger', 'flag', 'flip', 'fmessage',
'follow', 'formula', 'forward', 'fr', 'fr_rules', 'ftp_hints', 'games',
'games', 'getgame', 'getgi', 'getpi', 'ginfo', 'glicko', 'gnotify',
'goboard', 'handle', 'handles', 'hbest', 'help', 'highlight', 'history',
'hrank', 'hrstat', 'hstat', 'icsdrone', 'idlenotify', 'inchannel', 'index',
'indexfile', 'inetchesslib', 'info', 'intellegence', 'interfaces',
'intro_analysis', 'intro_basics', 'intro_general', 'intro_information',
'intro_moving', 'intro_playing', 'intro_settings', 'intro_talking',
'intro_welcome', 'irc_help', 'iset', 'it', 'iv_allresults', 'iv_atomic',
'iv_audiochat', 'iv_block', 'iv_boardinfo', 'iv_compressmove',
'iv_crazyhouse', 'iv_defprompt', 'iv_extascii', 'iv_extuserinfo', 'iv_fr',
'iv_gameinfo', 'iv_graph', 'iv_list', 'iv_lock', 'iv_pendinfo',
'iv_seekinfo', 'iv_seekremove', 'iv_startpos', 'ivariables', 'jkill',
'journal', 'jsave', 'kibitz', 'kiblevel', 'lag', 'lecture1', 'lessons',
'lightning', 'limits', 'links', 'lists', 'llogons', 'logons', 'losers',
'losers_chess', 'mailhelp', 'mailmess', 'mailmoves', 'mailoldmoves',
'mailstored', 'mamer', 'manual_usage', 'manual_vars', 'match',
'meeting_1_followup', 'meeting_1_long', 'meeting_1_short',
'meetings_index', 'messages', 'mexamine', 'moretime', 'motd', 'motd_fri',
'motd_help', 'motd_mon', 'motd_sat', 'motd_sun', 'motd_thu', 'motd_tue',
'motd_wed', 'moves', 'mule', 'new_features', 'newbie', 'news', 'next',
'noescape', 'noplay', 'notes', 'notify', 'observe', 'odds', 'oldmoves',
'oldpstat', 'open', 'partner', 'password', 'pause', 'pending', 'pfollow',
'pgn', 'ping', 'play', 'pobserve', 'powericsfaq', 'prefresh', 'primary',
'private', 'promote', 'pstat', 'ptell', 'ptime', 'qtell', 'quit', 'rank',
'rating_changes', 'ratings', 'rcopygame', 'rd', 'refresh', 'register',
'relay', 'relay_operator', 'rematch', 'replay', 'resign', 'result',
'resume', 'revert', 'rfollow', 'rmatch', 'robofics', 'robserve', 'rstat',
'sabort', 'say', 'sdraw', 'seek', 'servers', 'set', 'setup', 'shout',
'shout_quota', 'showadmins', 'showlist', 'showsrs', 'simabort',
'simadjourn', 'simallabort', 'simalladjourn', 'simgames', 'simmatch',
'simnext', 'simobserve', 'simopen', 'simpass', 'simprev', 'simuls',
'skype', 'smoves', 'smposition', 'sought', 'spending', 'sposition', 'sr',
'sr_info', 'standard', 'statistics', 'stats', 'stc', 'stored', 'style',
'style12', 'sublist', 'suicide_chess', 'summon', 'switch', 'system_alias',
'takeback', 'team', 'teamgames', 'tell', 'time', 'timeseal',
'timeseal_mac', 'timeseal_os2', 'timeseal_unix', 'timeseal_windows',
'timezones', 'tm', 'tomove', 'totals', 'totals_info', 'tournset',
'town_meetings', 'townmtg1', 'unalias', 'unexamine', 'unobserve',
'unpause', 'unseek', 'untimed', 'uptime', 'uscf', 'uscf_faq', 'ustat',
'v_autoflag', 'v_automail', 'v_availinfo', 'v_availmax', 'v_availmin',
'v_bell', 'v_bugopen', 'v_chanoff', 'v_cshout', 'v_ctell', 'v_echo',
'v_flip', 'v_formula', 'v_gin', 'v_height', 'v_highlight', 'v_inc',
'v_interface', 'v_jprivate', 'v_kibitz', 'v_kiblevel', 'v_language',
'v_mailmess', 'v_messreply', 'v_notakeback', 'v_notifiedby', 'v_open',
'v_pgn', 'v_pin', 'v_private', 'v_prompt', 'v_provshow', 'v_ptime',
'v_rated', 'v_ropen', 'v_seek', 'v_shout', 'v_silence', 'v_simopen',
'v_style', 'v_tell', 'v_time', 'v_tolerance', 'v_tourney', 'v_tzone',
'v_unobserve', 'v_width', 'variables', 'wclock', 'webpage', 'whenshut',
'whisper', 'who', 'wild', 'withdraw', 'wname', 'wrating', 'xkibitz',
'xtell', 'xwhisper', 'zhouse', 'znotify']
|
# This file is part of Radicale Server - Calendar Server
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
# Copyright © 2008-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Custom authentication.
Just check username for testing
"""
from radicale import auth
class Auth(auth.BaseAuth):
def login(self, login, password):
if login == "tmp":
return login
return ""
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from utils import util
from prjuray.db import Database
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type in ['CLEM_R']:
site_name = sorted(gridinfo.sites.keys())[0]
yield tile_name, site_name
def write_params(params):
pinstr = 'tile,val,site\n'
for tile, (site, val) in sorted(params.items()):
pinstr += '%s,%s,%s\n' % (tile, val, site)
open('params.csv', 'w').write(pinstr)
def run():
print('''
module top();
''')
params = {}
sites = list(gen_sites())
for (tile_name, site_name), isone in zip(sites,
util.gen_fuzz_states(len(sites))):
params[tile_name] = (site_name, isone)
print('''
(* KEEP, DONT_TOUCH, LOC = "{loc}", LOCK_PINS="I0:A1 I1:A2 I2:A3 I3:A4 I4:A5 I5:A6" *)
wire loop_{loc};
LUT6 #(.INIT(64'b{isone}) ) lut_{loc} (
.I0(loop_{loc}),
.I1(1),
.I2(1),
.I3(1),
.I4(1),
.I5(1),
.O(loop_{loc})
);
'''.format(
loc=site_name,
isone=isone,
))
print("endmodule")
write_params(params)
if __name__ == '__main__':
run()
|
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
import unittest
import os
import uuid
from swift.common.direct_client import direct_get_suffix_hashes
from swift.common.exceptions import DiskFileDeleted
from swift.common.internal_client import UnexpectedResponse
from swift.container.backend import ContainerBroker
from swift.common import utils
from swiftclient import client
from swift.common.ring import Ring
from swift.common.utils import Timestamp, get_logger, hash_path
from swift.obj.diskfile import DiskFileManager
from swift.common.storage_policy import POLICIES
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest
class Test(ReplProbeTest):
def setUp(self):
"""
Reset all environment and start all servers.
"""
super(Test, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'object',
policy=self.policy)
self.int_client = self.make_internal_client(object_post_as_copy=False)
def tearDown(self):
super(Test, self).tearDown()
def _get_object_info(self, account, container, obj, number):
obj_conf = self.configs['object-server']
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:object-server')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = POLICIES.get_object_ring(int(self.policy), swift_dir)
part, nodes = ring.get_nodes(account, container, obj)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
mgr = DiskFileManager(options, get_logger(options))
disk_file = mgr.get_diskfile(device, part, account, container, obj,
self.policy)
info = disk_file.read_metadata()
return info
def _assert_consistent_object_metadata(self):
obj_info = []
for i in range(1, 5):
info_i = self._get_object_info(self.account, self.container_name,
self.object_name, i)
if info_i:
obj_info.append(info_i)
self.assertTrue(len(obj_info) > 1)
for other in obj_info[1:]:
self.assertDictEqual(obj_info[0], other)
def _assert_consistent_deleted_object(self):
for i in range(1, 5):
try:
info = self._get_object_info(self.account, self.container_name,
self.object_name, i)
if info is not None:
self.fail('Expected no disk file info but found %s' % info)
except DiskFileDeleted:
pass
def _get_db_info(self, account, container, number):
server_type = 'container'
obj_conf = self.configs['%s-server' % server_type]
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:container-server')
root = options.get('devices')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = Ring(swift_dir, ring_name=server_type)
part, nodes = ring.get_nodes(account, container)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
path_hash = utils.hash_path(account, container)
_dir = utils.storage_directory('%ss' % server_type, part, path_hash)
db_dir = os.path.join(root, device, _dir)
db_file = os.path.join(db_dir, '%s.db' % path_hash)
db = ContainerBroker(db_file)
return db.get_info()
def _assert_consistent_container_dbs(self):
db_info = []
for i in range(1, 5):
info_i = self._get_db_info(self.account, self.container_name, i)
if info_i:
db_info.append(info_i)
self.assertTrue(len(db_info) > 1)
for other in db_info[1:]:
self.assertEqual(db_info[0]['hash'], other['hash'],
'Container db hash mismatch: %s != %s'
% (db_info[0]['hash'], other['hash']))
def _assert_object_metadata_matches_listing(self, listing, metadata):
self.assertEqual(listing['bytes'], int(metadata['content-length']))
self.assertEqual(listing['hash'], metadata['etag'])
self.assertEqual(listing['content_type'], metadata['content-type'])
modified = Timestamp(metadata['x-timestamp']).isoformat
self.assertEqual(listing['last_modified'], modified)
def _put_object(self, headers=None, body=u'stuff'):
headers = headers or {}
self.int_client.upload_object(StringIO(body), self.account,
self.container_name,
self.object_name, headers)
def _post_object(self, headers):
self.int_client.set_object_metadata(self.account, self.container_name,
self.object_name, headers)
def _delete_object(self):
self.int_client.delete_object(self.account, self.container_name,
self.object_name)
def _get_object(self, headers=None, expect_statuses=(2,)):
return self.int_client.get_object(self.account,
self.container_name,
self.object_name,
headers,
acceptable_statuses=expect_statuses)
def _get_object_metadata(self):
return self.int_client.get_object_metadata(self.account,
self.container_name,
self.object_name)
def _assert_consistent_suffix_hashes(self):
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
name_hash = hash_path(
self.account, self.container_name, self.object_name)
results = []
for node in onodes:
results.append(
(node,
direct_get_suffix_hashes(node, opart, [name_hash[-3:]])))
for (node, hashes) in results[1:]:
self.assertEqual(results[0][1], hashes,
'Inconsistent suffix hashes found: %s' % results)
def test_object_delete_is_replicated(self):
self.brain.put_container(policy_index=int(self.policy))
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object()
self.brain.start_primary_half()
# delete object on second server subset
self.brain.stop_handoff_half()
self._delete_object()
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check object deletion has been replicated on first server set
self.brain.stop_primary_half()
self._get_object(expect_statuses=(4,))
self.brain.start_primary_half()
# check object deletion persists on second server set
self.brain.stop_handoff_half()
self._get_object(expect_statuses=(4,))
# put newer object to second server set
self._put_object()
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check new object has been replicated on first server set
self.brain.stop_primary_half()
self._get_object()
self.brain.start_primary_half()
# check new object persists on second server set
self.brain.stop_handoff_half()
self._get_object()
def test_object_after_replication_with_subsequent_post(self):
self.brain.put_container(policy_index=0)
# put object
self._put_object(headers={'Content-Type': 'foo'}, body=u'older')
# put newer object to first server subset
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'}, body=u'newer')
metadata = self._get_object_metadata()
etag = metadata['etag']
self.brain.start_primary_half()
# post some user meta to all servers
self._post_object({'x-object-meta-bar': 'meta-bar'})
# run replicator
self.get_to_final_state()
# check that newer data has been replicated to second server subset
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
self.assertEqual(etag, metadata['etag'])
self.assertEqual('bar', metadata['content-type'])
self.assertEqual('meta-bar', metadata['x-object-meta-bar'])
self.brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_subsequent_put(self):
sysmeta = {'x-object-sysmeta-foo': 'older'}
sysmeta2 = {'x-object-sysmeta-foo': 'newer'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=0)
# put object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# put object with updated sysmeta to second server subset
self.brain.stop_handoff_half()
self._put_object(headers=sysmeta2)
metadata = self._get_object_metadata()
for key in sysmeta2:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta2[key])
self._post_object(usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check sysmeta has been replicated to first server subset
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_primary_half()
# check user sysmeta ok on second server subset
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_subsequent_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=int(self.policy))
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# post some user meta to second server subset
self.brain.stop_handoff_half()
self._post_object(usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta:
self.assertFalse(key in metadata)
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check user metadata has been replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
expected = dict(sysmeta)
expected.update(usermeta)
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_primary_half()
# check user metadata and sysmeta both on second server subset
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_prior_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=int(self.policy))
# put object
self._put_object()
# put user meta to first server subset
self.brain.stop_handoff_half()
self._post_object(headers=usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
self.brain.start_handoff_half()
# put newer object with sysmeta to second server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# run replicator
self.get_to_final_state()
# check stale user metadata is not replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
self.brain.start_primary_half()
# check stale user metadata is removed from second server subset
# and sysmeta is replicated
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
self.brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_post_ctype_replicated_when_previous_incomplete_puts(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta: ctype = baz
#
# ...run replicator and expect...
#
# t1.data:
# t2.meta: ctype = baz
self.brain.put_container(policy_index=0)
# incomplete write to primary half
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo'})
self.brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'})
self.brain.start_primary_half()
# content-type update to primary half
self.brain.stop_handoff_half()
self._post_object(headers={'Content-Type': 'baz'})
self.brain.start_handoff_half()
self.get_to_final_state()
# check object metadata
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'baz'
self.assertEqual(obj['content_type'], expected)
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_put_ctype_replicated_when_subsequent_post(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta:
#
# ...run replicator and expect...
#
# t1.data: ctype = bar
# t2.meta:
self.brain.put_container(policy_index=0)
# incomplete write
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo'})
self.brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'})
self.brain.start_primary_half()
# metadata update with newest data unavailable
self.brain.stop_handoff_half()
self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
self.brain.start_handoff_half()
self.get_to_final_state()
# check object metadata
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
else:
self.fail('obj not found in container listing')
expected = 'bar'
self.assertEqual(obj['content_type'], expected)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_post_ctype_replicated_when_subsequent_post_without_ctype(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta: ctype = bif
# t3.data: ctype = baz, color = 'Red'
# t4.meta: color = Blue
#
# ...run replicator and expect...
#
# t1.data:
# t4-delta.meta: ctype = baz, color = Blue
self.brain.put_container(policy_index=0)
# incomplete write
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo',
'X-Object-Sysmeta-Test': 'older'})
self.brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar',
'X-Object-Sysmeta-Test': 'newer'})
self.brain.start_primary_half()
# incomplete post with content type
self.brain.stop_handoff_half()
self._post_object(headers={'Content-Type': 'bif'})
self.brain.start_handoff_half()
# incomplete post to handoff with content type
self.brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'baz',
'X-Object-Meta-Color': 'Red'})
self.brain.start_primary_half()
# complete post with no content type
self._post_object(headers={'X-Object-Meta-Color': 'Blue',
'X-Object-Sysmeta-Test': 'ignored'})
# 'baz' wins over 'bar' but 'Blue' wins over 'Red'
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'baz'
self.assertEqual(obj['content_type'], expected)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newer')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_put_ctype_replicated_when_subsequent_posts_without_ctype(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta:
# t3.meta
#
# ...run replicator and expect...
#
# t1.data: ctype = bar
# t3.meta
self.brain.put_container(policy_index=0)
self._put_object(headers={'Content-Type': 'foo',
'X-Object-Sysmeta-Test': 'older'})
# incomplete write to handoff half
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar',
'X-Object-Sysmeta-Test': 'newer'})
self.brain.start_primary_half()
# incomplete post with no content type to primary half
self.brain.stop_handoff_half()
self._post_object(headers={'X-Object-Meta-Color': 'Red',
'X-Object-Sysmeta-Test': 'ignored'})
self.brain.start_handoff_half()
# incomplete post with no content type to handoff half
self.brain.stop_primary_half()
self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
self.brain.start_primary_half()
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'bar'
self.assertEqual(obj['content_type'], expected)
self._assert_object_metadata_matches_listing(obj, metadata)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newer')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_posted_metadata_only_persists_after_prior_put(self):
# newer metadata posted to subset of nodes should persist after an
# earlier put on other nodes, but older content-type on that subset
# should not persist
self.brain.put_container(policy_index=0)
# incomplete put to handoff
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_primary_half()
# incomplete put to primary
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_handoff_half()
# incomplete post with content-type to handoff
self.brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'newer',
'X-Object-Meta-Test': 'newer'})
self.brain.start_primary_half()
# incomplete put to primary
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'newest',
'X-Object-Sysmeta-Test': 'newest',
'X-Object-Meta-Test': 'newer'})
self.brain.start_handoff_half()
# incomplete post with no content-type to handoff which still has
# out of date content-type
self.brain.stop_primary_half()
self._post_object(headers={'X-Object-Meta-Test': 'newest'})
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newer')
self.brain.start_primary_half()
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newest')
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
self.assertEqual(obj['content_type'], 'newest')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_post_trumped_by_prior_delete(self):
# new metadata and content-type posted to subset of nodes should not
# cause object to persist after replication of an earlier delete on
# other nodes.
self.brain.put_container(policy_index=0)
# incomplete put
self.brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_primary_half()
# incomplete put then delete
self.brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self._delete_object()
self.brain.start_handoff_half()
# handoff post
self.brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'newest',
'X-Object-Sysmeta-Test': 'ignored',
'X-Object-Meta-Test': 'newest'})
# check object metadata
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-sysmeta-test'], 'oldest')
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newest')
self.brain.start_primary_half()
# delete trumps later post
self.get_to_final_state()
# check object is now deleted
self.assertRaises(UnexpectedResponse, self._get_object_metadata)
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
self.assertEqual(0, len(objs))
self._assert_consistent_container_dbs()
self._assert_consistent_deleted_object()
self._assert_consistent_suffix_hashes()
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Minimal path in a 80x80 matrix, from top left node to bottom right node.
Moving up, down, left, or right directions.
'''
from __future__ import print_function
import timeit
import os
try:
range = xrange
except NameError:
pass
path = os.getcwd().strip('py_solutions_81-90')
with open(path + 'euler_txt/matrix.txt') as f:
edges = [list(map(int, v.split(','))) for v in f.readlines()]
traveled = [['inf'] * 80 for _ in range(80)]
def euler_83():
x = y = 0
heap = [[y, x]]
while heap:
y, x = heap.pop(0)
traverse(y, x, heap)
return traveled[79][79]
def traverse(y, x, heap):
bounds = 80
r_vertex = d_vertex = u_vertex = l_vertex = False
if traveled[y][x] == 'inf':
traveled[y][x] = curr = edges[y][x]
else:
curr = traveled[y][x]
if x + 1 >= bounds and y + 1 >= bounds:
return
if y + 1 < bounds:
d_vertex = d_edge(y, x, curr)
if x + 1 < bounds:
r_vertex = r_edge(y, x, curr)
if y - 1 >= 0:
u_vertex = u_edge(y, x, curr)
if x - 1 >= 0:
l_vertex = l_edge(y, x, curr)
mvs = {d_vertex:'d_vertex',
r_vertex:'r_vertex',
u_vertex:'u_vertex',
l_vertex:'l_vertex'
}
if any(mvs):
mvs = {k:v for k,v in mvs.items() if k}
next_mv = min(mvs)
heap_mv = [mv for mv in mvs.values() if mv != mvs[next_mv]]
push_heap(y, x, heap, heap_mv)
if mvs[next_mv] == 'd_vertex':
traverse(y + 1, x, heap)
elif mvs[next_mv] == 'r_vertex':
traverse(y, x + 1, heap)
elif mvs[next_mv] == 'u_vertex':
traverse(y - 1, x, heap)
else:
traverse(y, x - 1, heap)
def d_edge(y, x, curr):
d_vertex = curr + edges[y + 1][x]
if traveled[y + 1][x] == 'inf':
traveled[y + 1][x] = d_vertex
elif d_vertex < traveled[y + 1][x]:
traveled[y + 1][x] = d_vertex
else:
d_vertex = False
return d_vertex
def r_edge(y, x, curr):
r_vertex = curr + edges[y][x + 1]
if traveled[y][x + 1] == 'inf':
traveled[y][x + 1] = r_vertex
elif r_vertex < traveled[y][x + 1]:
traveled[y][x + 1] = r_vertex
else:
r_vertex = False
return r_vertex
def u_edge(y, x, curr):
u_vertex = curr + edges[y - 1][x]
if traveled[y - 1][x] == 'inf':
traveled[y - 1][x] = u_vertex
elif u_vertex < traveled[y - 1][x]:
traveled[y - 1][x] = u_vertex
else:
u_vertex = False
return u_vertex
def l_edge(y, x, curr):
l_vertex = curr + edges[y][x - 1]
if traveled[y][x - 1] == 'inf':
traveled[y][x - 1] = l_vertex
elif l_vertex < traveled[y][x - 1]:
traveled[y][x - 1] = l_vertex
else:
l_vertex = False
return l_vertex
def push_heap(y, x, heap, heap_mv):
mv_coor = {'d_vertex':[y + 1,x],
'r_vertex':[y, x + 1],
'u_vertex':[y - 1, x],
'l_vertex':[y, x - 1]
}
heap.extend([mv_coor[i] for i in heap_mv])
if __name__ == '__main__':
start = timeit.default_timer()
print('Answer: {}'.format(euler_83()))
stop = timeit.default_timer()
print('Time: {0:9.5f}'.format(stop - start))
|
"""Registry of available TrueType font files
XXX Currently two copies of exactly the same font
will likely confuse the registry because the
specificFonts set will only have one of the
metrics sets. Nothing breaks at the moment
because of this, but it's not ideal.
"""
from ttfquery import describe, findsystem
import cPickle, time, traceback, os, sys
import logging
log =logging.getLogger( __name__ )
FILENAME, MODIFIERS, SPECIFICNAME, FONTNAME, FAMILY = range(5)
class Registry(object):
"""Object providing centralized registration of TTF files
Attributes:
families -- mapping from TrueType font families
to sub-families and then to general fonts
(as a set of font-names).
fonts -- mapping from general fonts to modifiers to
specific font instances
specificFonts -- mapping from specific font names
to the entire "metrics" set for the particular
font.
files -- mapping from (absolute) filenames to
specific font names
shortFiles -- mapping from font filename basenames
to font-file-lists
DIRTY -- flag indicating whether the registry has
had a new font registered (i.e. whether it should
be saved out to disk).
"""
DIRTY = 0
filename = ""
def __init__(self):
"""Initialize the Registry"""
self.families = {}
self.fonts = {}
self.specificFonts = {}
self.files = {}
self.shortFiles = {}
def clear( self ):
"""Clear out the all tables and mark unchanged"""
self.families.clear()
self.fonts.clear()
self.specificFonts.clear()
self.files.clear()
self.shortFiles.clear()
self.dirty(0)
def dirty(self, dirty = 1):
"""Mark the registry as changed/unchanged"""
self.DIRTY = dirty
def metadata(
self,
filename,
force = 0
):
"""Retrieve metadata from font file
filename -- fully specified path to the font file
force -- if false, and the metadata is already
available for this file, do not access the
font file to retrieve, just return the existing
metadata.
return value:
tuple of:
filename -- fully specified absolute path
modifiers -- (weightInteger, italicsFlag)
specificName -- specific name of the particular
font stored in the given file, the name of
the "modified" font
fontName -- name of the general font which
the modifiers are specialising
specifier -- family specifier, two-tuple of
high-level and sub-level font classifications
based on font characteristics as encoded
in the font file.
"""
filename = os.path.abspath( filename )
if self.files.has_key( filename ) and not force:
return self.specificFonts.get( self.files[filename] )
font = describe.openFont(filename)
try:
modifiers = describe.modifiers( font )
except (KeyError,AttributeError), err:
modifiers = (None,None)
specificName, fontName = describe.shortName( font )
specifier = describe.family(font)
return (
filename,
modifiers,
specificName,
fontName,
specifier,
)
def register(
self,
filename,
modifiers = None,
specificName = None,
fontName = None,
familySpecifier = None,
force = 0,
):
"""Do the actual registration of a filename & metadata
See metadata function for description of the various
arguments. If modifiers == None then the metadata function
will be used to scan for the metadata.
force -- if true, force re-reading font-file even if we already
have the meta-data for the file loaded.
"""
filename = os.path.abspath( filename )
if self.files.has_key( filename ) and not force:
return self.specificFonts.get( self.files[filename] )
self.dirty(1)
if modifiers == None:
(filename, modifiers, specificName, fontName, familySpecifier) = self.metadata(filename, force = force)
description = (filename, modifiers, specificName, fontName, familySpecifier)
try:
self.files[filename] = specificName
major,minor = familySpecifier
self.families.setdefault(major,{}).setdefault(minor,{})[fontName] = 1
self.fonts.setdefault(fontName, {}).setdefault(modifiers,[]).append(specificName)
self.specificFonts[ specificName ] = description
self.shortFiles.setdefault(os.path.basename(filename), []).append( filename )
except Exception:
if self.files.has_key(filename):
del self.files[filename]
raise
return description
def familyMembers( self, major, minor=None ):
"""Get all (general) fonts for a given family"""
major = major.upper()
if not minor:
result = []
for key,set in self.families.get(major,{}).items():
result.extend( set.keys())
return result
minor = minor.upper()
return self.families.get( major, {}).get(minor,{}).keys()
def fontMembers( self, fontName, weight=None, italics=None ):
"""Get specific font names for given generic font name
weight -- if specified, only members with the given weight
italics -- if specified, only members where the flag matches
returns list of specific font names
"""
table = self.fonts.get( fontName, {})
items = table.items()
items.sort()
if weight is not None:
weight = describe.weightNumber( weight )
items = [item for item in items if item[0][0]==weight]
if italics is not None:
items = [item for item in items if item[0][1]==italics]
result = []
for item in items:
result.extend( item[1])
return result
def fontForms( self, fontName ):
"""Retrieve the set of font-forms (weight,italics) available in a font"""
return self.fonts.get( fontName, {}).keys()
def fontFile( self, specificName ):
"""Return the absolute path-name for a given specific font"""
description = self.specificFonts.get( specificName )
if description:
return description[0]
else:
raise KeyError( """Couldn't find font with specificName %r, can't retrieve filename for it"""%( specificName,))
def matchName( self, name, single=0 ):
"""Try to find a general font based on a name"""
result = {}
if self.fonts.has_key( name ):
v = name
if single:
return v
else:
result[v] = 1
if self.specificFonts.has_key( name ):
v = self.specificFonts[name][FONTNAME]
if single:
return v
else:
result[v] = 1
if self.families.has_key( name.upper() ):
for general in self.familyMembers( name ):
if single:
return general
result[general] = 1
testname = name.lower()
for specific in self.specificFonts.keys():
if specific.lower().find( testname ) > -1:
if single:
return specific
result[ self.specificFonts[specific][FONTNAME]]=1
for majorFamily in self.families.keys():
if majorFamily.lower().find( testname ) > -1:
for item in self.familyMembers( majorFamily ):
if single:
return item
result[item] = 1
else:
# if previous was true, we already included everything
# that could be included here...
for minorFamily in self.families[majorFamily].keys():
if minorFamily.lower().find( testname ) > -1:
for item in self.familyMembers( majorFamily, minorFamily ):
if single:
return item
result[item] = 1
if not result:
raise KeyError( """Couldn't find a font with name %r"""%(name,))
return result.keys()
def save( self, file= None, force=0 ):
"""Attempt to save the font metadata to a pickled file
file -- a file open in binary write mode or a filename
force -- if not true and DIRTY false, then don't actually
save anything
returns number of records saved
"""
if not force and not self.DIRTY:
return 0
file = file or self.filename
if not file:
raise TypeError( """Attempted to save %r to default file, no default file specified"""% (self,))
if not hasattr( file, 'write'):
file = open( file, 'wb' )
cPickle.dump( self.specificFonts.values(), file, 1 )
return len(self.specificFonts)
def load( self, file, clearFirst=1 ):
"""Attempt to load the font metadata from a pickled file
file -- a file open in binary read mode or a filename
clearFirst -- if true, clear tables first, and reset DIRTY
to 0 after finished
"""
if clearFirst:
self.clear()
if not hasattr( file, 'read'):
self.filename = file
file = open( file, 'rb' )
table = cPickle.load( file )
for filename, modifiers, specificName, fontName, familySpecifier in table:
## Minimal sanity check...
if os.path.isfile( filename ):
self.register(filename, modifiers, specificName, fontName, familySpecifier)
if clearFirst:
self.dirty(0)
return len(table)
def scan( self, paths=None, printErrors=0, force = 0 ):
"""Scan the given paths registering each found font"""
new, failed = [],[]
for filename in findsystem.findFonts(paths):
try:
self.register( filename, force = force )
except Exception, err:
log.info( 'Failure scanning %s', filename )
if printErrors:
log.warn( "%s", traceback.format_exc())
failed.append( filename )
else:
new.append( filename )
return new, failed
def load( *arguments, **named ):
"""Construct registry from saved file
Assembly creates a Registry object and calls the
load method with the file as argument.
"""
registry = Registry()
registry.load( *arguments, **named )
return registry
def main():
usage ="""ttffiles [registryFile [directories]]
Update registryFile (default "font.cache") by scanning
the given directories, the system font directories by
default.
"""
exit = 0
import sys
if sys.argv[1:2]:
testFilename = sys.argv[1]
if sys.argv[2:]:
directories = sys.argv[2:]
else:
directories = None
else:
testFilename = "font.cache"
directories = None
if os.path.isfile( testFilename ):
registry = load( testFilename )
else:
registry = Registry()
new,failed = registry.scan( directories, printErrors = False, force = 0)
log.info( '%s fonts available', len(new) )
registry.save(testFilename)
return exit
if __name__ == "__main__":
main()
|
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
import UserDict
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.scheduler import filters
host_manager_opts = [
cfg.IntOpt('reserved_host_disk_mb',
default=0,
help='Amount of disk in MB to reserve for host/dom0'),
cfg.IntOpt('reserved_host_memory_mb',
default=512,
help='Amount of memory in MB to reserve for host/dom0'),
cfg.MultiStrOpt('scheduler_available_filters',
default=['nova.scheduler.filters.standard_filters'],
help='Filter classes available to the scheduler which may '
'be specified more than once. An entry of '
'"nova.scheduler.filters.standard_filters" '
'maps to all filters included with nova.'),
cfg.ListOpt('scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'RamFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter'
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
class ReadOnlyDict(UserDict.IterableUserDict):
"""A read-only dict."""
def __init__(self, source=None):
self.data = {}
self.update(source)
def __setitem__(self, key, item):
raise TypeError
def __delitem__(self, key):
raise TypeError
def clear(self):
raise TypeError
def pop(self, key, *args):
raise TypeError
def popitem(self):
raise TypeError
def update(self, source=None):
if source is None:
return
elif isinstance(source, UserDict.UserDict):
self.data = source.data
elif isinstance(source, type({})):
self.data = source
else:
raise TypeError
class HostState(object):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def __init__(self, host, topic, capabilities=None, service=None):
self.host = host
self.topic = topic
# Read-only capability dicts
if capabilities is None:
capabilities = {}
self.capabilities = ReadOnlyDict(capabilities.get(topic, None))
if service is None:
service = {}
self.service = ReadOnlyDict(service)
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_total = 0
self.vcpus_used = 0
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
all_disk_mb = compute['local_gb'] * 1024
all_ram_mb = compute['memory_mb']
vcpus_total = compute['vcpus']
if FLAGS.reserved_host_disk_mb > 0:
all_disk_mb -= FLAGS.reserved_host_disk_mb
if FLAGS.reserved_host_memory_mb > 0:
all_ram_mb -= FLAGS.reserved_host_memory_mb
#NOTE(jogo) free_ram_mb can be negative
self.free_ram_mb = all_ram_mb
self.total_usable_ram_mb = all_ram_mb
self.free_disk_mb = all_disk_mb
self.vcpus_total = vcpus_total
def consume_from_instance(self, instance):
"""Update information about a host from instance info."""
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
ram_mb = instance['memory_mb']
vcpus = instance['vcpus']
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
def passes_filters(self, filter_fns, filter_properties):
"""Return whether or not this host passes filters."""
if self.host in filter_properties.get('ignore_hosts', []):
LOG.debug(_('Host filter fails for ignored host %(host)s'),
{'host': self.host})
return False
force_hosts = filter_properties.get('force_hosts', [])
if force_hosts:
if not self.host in force_hosts:
LOG.debug(_('Host filter fails for non-forced host %(host)s'),
{'host': self.host})
return self.host in force_hosts
for filter_fn in filter_fns:
if not filter_fn(self, filter_properties):
LOG.debug(_('Host filter function %(func)s failed for '
'%(host)s'),
{'func': repr(filter_fn),
'host': self.host})
return False
LOG.debug(_('Host filter passes for %(host)s'), {'host': self.host})
return True
def __repr__(self):
return ("host '%s': free_ram_mb:%s free_disk_mb:%s" %
(self.host, self.free_ram_mb, self.free_disk_mb))
class HostManager(object):
"""Base HostManager class."""
# Can be overriden in a subclass
host_state_cls = HostState
def __init__(self):
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
self.filter_classes = filters.get_filter_classes(
FLAGS.scheduler_available_filters)
def _choose_host_filters(self, filters):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if filters is None:
filters = FLAGS.scheduler_default_filters
if not isinstance(filters, (list, tuple)):
filters = [filters]
good_filters = []
bad_filters = []
for filter_name in filters:
found_class = False
for cls in self.filter_classes:
if cls.__name__ == filter_name:
found_class = True
filter_instance = cls()
# Get the filter function
filter_func = getattr(filter_instance,
'host_passes', None)
if filter_func:
good_filters.append(filter_func)
break
if not found_class:
bad_filters.append(filter_name)
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def filter_hosts(self, hosts, filter_properties, filters=None):
"""Filter hosts and return only ones passing all filters"""
filtered_hosts = []
filter_fns = self._choose_host_filters(filters)
for host in hosts:
if host.passes_filters(filter_fns, filter_properties):
filtered_hosts.append(host)
return filtered_hosts
def update_service_capabilities(self, service_name, host, capabilities):
"""Update the per-service capabilities based on this notification."""
LOG.debug(_("Received %(service_name)s service update from "
"%(host)s.") % locals())
service_caps = self.service_states.get(host, {})
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
service_caps[service_name] = capab_copy
self.service_states[host] = service_caps
def get_all_host_states(self, context, topic):
"""Returns a dict of all the hosts the HostManager
knows about. Also, each of the consumable resources in HostState
are pre-populated and adjusted based on data in the db.
For example:
{'192.168.1.100': HostState(), ...}
Note: this can be very slow with a lot of instances.
InstanceType table isn't required since a copy is stored
with the instance (in case the InstanceType changed since the
instance was created)."""
if topic != 'compute':
raise NotImplementedError(_(
"host_manager only implemented for 'compute'"))
host_state_map = {}
# Make a compute node dict with the bare essential metrics.
compute_nodes = db.compute_node_get_all(context)
for compute in compute_nodes:
service = compute['service']
if not service:
LOG.warn(_("No service for compute ID %s") % compute['id'])
continue
host = service['host']
capabilities = self.service_states.get(host, None)
host_state = self.host_state_cls(host, topic,
capabilities=capabilities,
service=dict(service.iteritems()))
host_state.update_from_compute_node(compute)
host_state_map[host] = host_state
# "Consume" resources from the host the instance resides on.
instances = db.instance_get_all(context,
columns_to_join=['instance_type'])
for instance in instances:
host = instance['host']
if not host:
continue
host_state = host_state_map.get(host, None)
if not host_state:
continue
host_state.consume_from_instance(instance)
return host_state_map
|
'''
Created on Nov 5, 2015
@author: David Zwicker <dzwicker@seas.harvard.edu>
'''
from __future__ import division
import numpy as np
class Cuboid(object):
""" class that represents a cuboid in n dimensions """
def __init__(self, pos, size):
self.pos = np.asarray(pos)
self.size = np.asarray(size)
assert len(self.pos) == len(self.size)
@classmethod
def from_points(cls, p1, p2):
p1 = np.asarray(p1)
p2 = np.asarray(p2)
return cls(np.minimum(p1, p2), np.abs(p1 - p2))
@classmethod
def from_centerpoint(cls, centerpoint, size):
centerpoint = np.asarray(centerpoint)
size = np.asarray(size)
return cls(centerpoint - size/2, size)
def copy(self):
return self.__class__(self.pos, self.size)
def __repr__(self):
return "%s(pos=%s, size=%s)" % (self.__class__.__name__, self.pos,
self.size)
def set_corners(self, p1, p2):
p1 = np.asarray(p1)
p2 = np.asarray(p2)
self.pos = np.minimum(p1, p2)
self.size = np.abs(p1 - p2)
@property
def bounds(self):
return [(p, p + s) for p, s in zip(self.pos, self.size)]
@property
def corners(self):
return self.pos, self.pos + self.size
@corners.setter
def corners(self, ps):
self.set_corners(ps[0], ps[1])
@property
def dimension(self):
return len(self.pos)
@property
def slices(self):
return [slice(int(p), int(p + s)) for p, s in zip(self.pos, self.size)]
@property
def centroid(self):
return [p + s/2 for p, s in zip(self.pos, self.size)]
@property
def volume(self):
return np.prod(self.size)
def translate(self, distance=0, inplace=True):
""" translates the cuboid by a certain distance in all directions """
distance = np.asarray(distance)
if inplace:
self.pos += distance
return self
else:
return self.__class__(self.pos + distance, self.size)
def buffer(self, amount=0, inplace=True):
""" dilate the cuboid by a certain amount in all directions """
amount = np.asarray(amount)
if inplace:
self.pos -= amount
self.size += 2*amount
return self
else:
return self.__class__(self.pos - amount, self.size + 2*amount)
def scale(self, factor=1, inplace=True):
""" scale the cuboid by a certain amount in all directions """
factor = np.asarray(factor)
if inplace:
self.pos *= factor
self.size *= factor
return self
else:
return self.__class__(self.pos * factor, self.size * factor)
|
import requests
import sys, getopt, os
import time
import datetime
CHUNK_SIZE = 1024
MB_SIZE = 1048576
links = None
outputdir = None
def main():
try:
opts, args = getopt.getopt(sys.argv[1:],"hf:o:",["file=","outdir="])
except getopt.GetoptError:
print('usage: bulk-downloader.py -f <link.txt> -o <output_dir>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('usage: bulk-downloader.py -f <link.txt> -o <output_dir>')
sys.exit()
elif opt in ("-f", "--file"):
links = arg
elif opt in ("-o", "--outdir"):
outputdir = arg
if links is None:
print('Missing links.txt parameter.')
sys.exit(2)
if outputdir is None:
print('Missing output_dir parameter.')
sys.exit(2)
print('Output dir: ' + outputdir)
if not os.path.exists(outputdir):
print(outputdir + " does not exists... creating...")
os.makedirs(outputdir)
print(outputdir + " created!")
print('Opening ' + links + "...")
with open(links) as links_file:
for url in links_file.readlines():
url = url.replace('\n', '')
last_slash_index = url.rindex('/')
file_name = url[last_slash_index+1 : len(url)]
res = requests.get(url, stream=True)
total_length = res.headers.get('content-length')
print("downloading " + file_name)
dl = 0
total_length = int(total_length)
loops = 0
speeds = 0
with open(outputdir + "/" + file_name, 'wb') as file:
total_length_mb = total_length / MB_SIZE
start_time = time.mktime(time.localtime())
for chunk in res.iter_content(CHUNK_SIZE):
file.write(chunk)
elapsed_time = time.mktime(time.localtime()) - start_time
if elapsed_time == 0:
elapsed_time = 1
dl = dl + len(chunk)
done = int(25 * dl / total_length)
total_mb_downloaded = float(dl / MB_SIZE)
remaining_size = total_length_mb - total_mb_downloaded
speed = float(total_mb_downloaded / elapsed_time)
speeds = speeds + speed;
loops = loops + 1
sys.stdout.write('\r[%s%s] %.2f Mb of %.2f Mb %.2f Mb/s ETA: %s' %
(
'=' * done, ' ' * (25-done),
total_mb_downloaded,
float(total_length_mb),
speed,
str(datetime.timedelta(seconds=int(remaining_size/speed)))
)
)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.write("\n")
sys.stdout.flush()
print("Elapsed time: %s, Avg Speed: %.2f Mb/s" %
(
str(datetime.timedelta(seconds= elapsed_time)), float(speeds/loops))
)
print(file_name + " saved to " + outputdir + " folder")
if __name__ == "__main__":
main()
|
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
cppEditorStr = ":Qt Creator_CppEditor::Internal::CPPEditorWidget"
def main():
global cppEditorStr
folder = prepareTemplate(os.path.abspath(os.path.join(os.getcwd(), "..", "shared",
"simplePlainCPP")))
if folder == None:
test.fatal("Could not prepare test files - leaving test")
return
proFile = os.path.join(folder, "testfiles.pro")
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
openQmakeProject(proFile)
if not testRenameMacroAfterSourceModification():
return
headerName = "anothertestfile.h"
addCPlusPlusFileToCurrentProject(headerName, "C++ Header File",
expectedHeaderName=headerName)
if not testRenameMacroAfterSourceMoving():
return
invokeMenuItem("File", "Save All")
invokeMenuItem("File", "Exit")
def testRenameMacroAfterSourceModification():
def __deleteAnyClass__():
global cppEditorStr
if platform.system() == 'Darwin':
type(cppEditorStr, "<Meta+Left>")
else:
type(cppEditorStr, "<Home>")
markText(cppEditorStr, "Down", 5)
type(cppEditorStr, "<Delete>")
test.log("Testing rename macro after modifying source.")
formerTexts = {}
content = openDocumentPlaceCursor("testfiles.Headers.testfile\\.h",
"class AnyClass", __deleteAnyClass__)
if not content:
return False
formerTexts["testfiles.Headers.testfile\\.h"] = content
content = openDocumentPlaceCursor("testfiles.Sources.testfile\\.cpp", "SOME_MACRO_NAME(a)")
if not content:
return False
formerTexts["testfiles.Sources.testfile\\.cpp"] = content
performMacroRenaming('SOME_OTHER_MACRO_NAME')
verifyChangedContent(formerTexts, "SOME_MACRO_NAME", "SOME_OTHER_MACRO_NAME")
revertChanges(formerTexts)
return True
def testRenameMacroAfterSourceMoving():
def __cut__():
global cppEditorStr
if platform.system() == 'Darwin':
type(cppEditorStr, "<Meta+Left>")
else:
type(cppEditorStr, "<Home>")
markText(cppEditorStr, "Down", 4)
invokeMenuItem("Edit", "Cut")
def __paste__():
global cppEditorStr
type(cppEditorStr, "<Return>")
invokeMenuItem("Edit", "Paste")
def __insertInclude__():
global cppEditorStr
typeLines(cppEditorStr, ['', '#include "anothertestfile.h"'])
test.log("Testing rename macro after moving source.")
formerTexts = {}
content = openDocumentPlaceCursor("testfiles.Headers.testfile\\.h",
"#define SOME_MACRO_NAME( X )\\", __cut__)
if not content:
return False
formerTexts["testfiles.Headers.testfile\\.h"] = content
content = openDocumentPlaceCursor("testfiles.Headers.anothertestfile\\.h",
"#define ANOTHERTESTFILE_H", __paste__)
if not content:
return False
formerTexts["testfiles.Headers.anothertestfile\\.h"] = content
content = openDocumentPlaceCursor('testfiles.Sources.testfile\\.cpp',
'#include "testfile.h"', __insertInclude__)
if not content:
return False
formerTexts["testfiles.Sources.testfile\\.cpp"] = content
placeCursorToLine(cppEditorStr, "SOME_MACRO_NAME(a)")
performMacroRenaming("COMPLETELY_DIFFERENT_MACRO_NAME")
verifyChangedContent(formerTexts, "SOME_MACRO_NAME", "COMPLETELY_DIFFERENT_MACRO_NAME")
revertChanges(formerTexts)
return True
def performMacroRenaming(newMacroName):
for i in range(10):
type(cppEditorStr, "<Left>")
invokeContextMenuItem(waitForObject(cppEditorStr), "Refactor",
"Rename Symbol Under Cursor")
waitForSearchResults()
validateSearchResult(2)
replaceLineEdit = waitForObject("{leftWidget={text='Replace with:' type='QLabel' "
"unnamed='1' visible='1'} "
"type='Core::Internal::WideEnoughLineEdit' unnamed='1' "
"visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}")
replaceEditorContent(replaceLineEdit, newMacroName)
clickButton(waitForObject("{text='Replace' type='QToolButton' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}"))
def verifyChangedContent(origTexts, replacedSymbol, replacement):
global cppEditorStr
successfullyCompared = []
for fileName,text in origTexts.iteritems():
if openDocument(fileName):
successfullyCompared.append(test.compare(waitForObject(cppEditorStr).plainText,
text.replace(replacedSymbol, replacement),
"Verifying content of %s" %
simpleFileName(fileName)))
else:
successfullyCompared.append(False)
test.fail("Failed to open document %s" % simpleFileName(fileName))
if successfullyCompared.count(True) == len(origTexts):
test.passes("Successfully compared %d changed files" % len(origTexts))
else:
test.fail("Verified %d files - %d have been successfully changed and %d failed to "
"change correctly." % (len(origTexts), successfullyCompared.count(True),
successfullyCompared.count(False)))
def revertChanges(files):
for f in files:
simpleName = simpleFileName(f)
if openDocument(f):
try:
invokeMenuItem('File', 'Revert "%s" to Saved' % simpleName)
clickButton(waitForObject(":Revert to Saved.Proceed_QPushButton"))
test.log("Reverted changes inside %s" % simpleName)
except:
test.warning("File '%s' cannot be reverted." % simpleName,
"Maybe it has not been changed at all.")
else:
test.fail("Could not open %s for reverting changes" % simpleName)
|
import os
import unittest
from vsg.rules import process
from vsg.rules import architecture
from vsg import vhdlFile
from vsg.tests import utils
# Read in test file used for all tests
lFile, eError = vhdlFile.utils.read_vhdlfile(os.path.join(os.path.dirname(__file__), 'next_line_code_tag_test_input.vhd'))
oFile = vhdlFile.vhdlFile(lFile)
class testCodeTags(unittest.TestCase):
def setUp(self):
self.assertIsNone(eError)
def test_rule_process_016(self):
oRule = process.rule_016()
# dExpected = []
# dExpected.append(utils.add_violation(13))
# dExpected.append(utils.add_violation(25))
# oRule.analyze(oFile)
# self.assertEqual(oRule.violations, dExpected)
lExpected = [13, 25]
oRule.analyze(oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_rule_process_018(self):
oRule = process.rule_018()
# dExpected = []
# dExpected.append(utils.add_violation(15))
# dExpected.append(utils.add_violation(27))
# oRule.analyze(oFile)
# self.assertEqual(oRule.violations, dExpected)
lExpected = [15, 27]
oRule.analyze(oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_rule_process_014(self):
oRule = process.rule_014()
# dExpected = []
# dExpected.append(utils.add_violation(19))
# oRule.analyze(oFile)
# self.assertEqual(oRule.violations, dExpected)
lExpected = [19]
oRule.analyze(oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_rule_architecture_024(self):
oRule = architecture.rule_024()
# dExpected = []
# oRule.analyze(oFile)
# self.assertEqual(oRule.violations, dExpected)
lExpected = []
oRule.analyze(oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_rule_process_002(self):
oRule = process.rule_002()
# dExpected = []
# oRule.analyze(oFile)
# self.assertEqual(oRule.violations, dExpected)
lExpected = []
oRule.analyze(oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
|
# Copyright 2005 Mark Rose <mkrose@users.sourceforge.net>
# All rights reserved.
from NodeConstructor import *
RegisterNetwork(globals())
#######################################################################
# pitch control laws
class filtered_pitch_rate(LeadFilter):
a = 1.0
input = "pitch_rate"
gain = RadiansToDegrees
class de_right(Adder):
input_a = "elevator_pitch_deflection_command_mpo"
input_b = "da_program"
gain_b = 0.25
class de_left(Adder):
input_a = "elevator_pitch_deflection_command_mpo"
input_b = "da_program"
gain_b = -0.25
class de(Scale):
input = "elevator_pitch_deflection_command_mpo"
class de_left_f(LagFilter):
input = "de_left"
gain = DegreesToRadians
a = 20.2
class de_right_f(LagFilter):
input = "de_right"
gain = DegreesToRadians
a = 20.2
class de_f(LagFilter):
input = "de"
gain = DegreesToRadians
a = 20.2
class g_command(Schedule1):
"""NASA-79-tp1538 page 212"""
schedule = table(breaks="-180.0 -80.0 -30.0 -10.0 10.0 40.0 180.0", values="-4.0 -4.0 -0.8 0.0 0.0 0.5 10.8", spacing=10.0)
gain = 180.0
input = "pitch_control"
class g_trim(Constant):
value = 0.0
class trimmed_g_command(Adder):
input_a = "g_command"
input_b = "g_trim"
class positive_g_limit(Scale):
clamp_lo = 0.0
clamp_hi = 8.0
input = "trimmed_g_command"
class negative_g_limit(Greater):
input_a = "trimmed_g_command"
input_b = "negative_g_schedule"
clamp_hi = 0.0
class negative_g_schedule(Scale):
clamp_lo = -4.0
clamp_hi = -1.0
input = "qbar"
gain = -0.000444
class limited_g_command(Adder):
input_a = "negative_g_limit"
input_b = "positive_g_limit"
class limited_g_command_f(LagFilter):
a = 8.3
input = "limited_g_command"
class elevator_deviation(Adder):
input_a = "limited_g_command_f"
input_b = "pitch_limiter"
gain_a = -1.0
class reduced_elevator_deviation(Schedule2):
input_a = "elevator_deviation"
input_b = "qbar"
gain_a = 1.5
gain_b = 0.001
schedule = table('0 12.0 44.0 200.0', '1.0 1.0 0.4 0.4', spacing=4.0)
class elevator_pitch_deflection_command(Adder3):
input_a = "reduced_elevator_deviation"
input_b = "elevator_deviation_integrator"
input_c = "alpha_f"
gain_c = 0.5
class elevator_pitch_deflection_command_mpo(BooleanSwitch):
"""
When the MPO is activated, the input command drives the elevators directly,
bypassing both the g/aoa limiter and integrator.
"""
channel = "F16.ManualPitchOverrideActive"
input_a = "pitch_control"
input_b = "elevator_pitch_deflection_command"
gain_a = -25.0
class compensated_elevator_deviation(Adder):
input_a = "reduced_elevator_deviation"
input_b = "elevator_compensation"
gain_b = -5.0
class elevator_compensation(Schedule1):
input = "elevator_pitch_deflection_command"
schedule = table('-125.0 -25.0 25.0 125.0', '-100.0 0.0 0.0 100.0', spacing=25.0)
class elevator_deviation_integrator(Integrator):
input = "compensated_elevator_deviation"
gain = 5.0
clamp_lo = -25.0
clamp_hi = 25.0
# G/AoA limiter ###################################################################
class PitchLimiterControl(Node): pass
class pitch_limiter(PitchLimiterControl):
"""F16 G/AoA limiter circuit."""
filtered_alpha = "alpha_f"
filtered_g_command = "limited_g_command_f"
alpha_break1 = 15.0
alpha_break2 = 20.4
pitch_rate_schedule = table('0.0 5000.0 15000.0 200000.0', '1.0 1.0 0.35 0.35', spacing=5000.0)
|
#-------------------------------------------------------------------------------
# PROJECT: VHDL Code Generator
# NAME: System
#
# LICENSE: GNU-GPL V3
#-------------------------------------------------------------------------------
__author__ = "BlakeTeam"
import lib.signature
from lib import *
from .Block import Block as _Block
from lib.Connection import Connection as _Connection
IN = 1
OUT = 0
class System:
def __init__(self,name,input_info,output_info):
""" Structure that handles an abstract system
:String name: Name of the system (Name of the project)
:Int[] input_info: List with the name & size of the input ports of the system
:Int[] output_info: List with the name & size of the output ports of the system
"""
self.name = name # The name of the system
self.block_name = set() # The name of all blocks on the system
self.conn_name = set() # The name of all connections on the system
self.block = [] # Block list of the system
self.connections = {} # Connection dictionary of the system <Abstract Connection: QGraphicsLineItem>
self.system_input = _Block((),[size for name,size in input_info],self)
# Setting names to input ports
for i in range(len(input_info)):
self.system_input.output_ports[i].name = input_info[i][0]
self.system_input.screenPos = (-50,0)
self.system_input.setName("SystemInput")
self.system_output = _Block([size for name,size in output_info],(),self)
# Setting names to input ports
for i in range(len(output_info)):
self.system_output.input_ports[i].name = output_info[i][0]
self.system_output.screenPos = (50,0)
self.system_output.setName("SystemOutput")
self.input_info = input_info
self.output_info = output_info
self.input_names = [name for name,size in input_info]
self.output_names = [name for name,size in output_info]
self.includedLibrary = ["ieee.std_logic_1164.all"] #TODO: Revisar esto, hay que modificarlo
def buildVHDLCode(self):
""" Building the code that will be generated.
"""
fileText = lib.signature.signature()
# Including libraries
fileText += "-- Including libraries\nLIBRARY ieee;\n"
for i in self.includedLibrary:
fileText += "USE %s;\n"%i
fileText += "\n"
fileText += "ENTITY %s IS\n"%self.name
fileText += "-- Generating ports\n"
fileText += "PORT (\n"
# Generating input ports
for i in self.system_input.output_ports:
fileText += "%s: IN std_logic%s;\n"%(i.name,"" if i.size == 1 else "_vector(%d downto 0)"%(i.size - 1)) #TODO: Aqui cambie
# Generating output ports
for i in self.system_output.input_ports:
fileText += "%s: OUT std_logic%s;\n"%(i.name,"" if i.size == 1 else "_vector(%d downto 0)"%(i.size - 1)) #TODO: Aqui cambie
fileText = fileText[:-2]
fileText += ");\n"
fileText += "END %s;\n"%self.name
# Architecture Implementation
fileText += "\n-- Architecture Implementation\n"
fileText += "ARCHITECTURE Arq_%s OF %s IS\n"%(self.name,self.name)
fileText += "BEGIN\n"
# Port declaration
fileText += "-- Port declaration\n"
# TODO: Overrated RAM
for i in self.block:
signals = i.getSignals()
inputSig = []
outputSig = []
tempSig = []
for name,size,mode in signals:
if mode == IN:
inputSig.append((name,size))
elif mode == OUT:
outputSig.append((name,size))
else:
tempSig.append((name,size))
fileText += "\n-- Declaring %s's ports%s\n"%(i.name," & temporary signals" if len(tempSig) != 0 else "") #TODO: Aqui cambie y moví la linea de lugar
fileText += "-- Input ports\n"
for name,size in inputSig:
fileText += "signal %s__%s: std_logic%s;\n"%(i.name,name,"" if size == 1 else "_vector(%d downto 0)"%(size - 1)) #TODO: Aqui cambie
fileText += "\n-- Output ports\n"
for name,size in outputSig:
fileText += "signal %s__%s: std_logic%s;\n"%(i.name,name,"" if size == 1 else "_vector(%d downto 0)"%(size - 1)) #TODO: Aqui cambie
if len(tempSig) != 0: #TODO: Aqui cambie
fileText += "\n-- Temporary signals\n"
for name,size in tempSig:
fileText += "signal %s__%s: std_logic%s;\n"%(i.name,name,"" if size == 1 else "_vector(%d downto 0)"%(size - 1)) #TODO: Aqui cambie
# Defining connections
fileText += "\n-- Defining connections\n"
for i in self.block:
for port_inp in i.input_ports:
receiver = i.name + "__" + port_inp.name
if self.system_input == port_inp.connection.out_block:
sender = port_inp.connection.out_block.output_ports[port_inp.connection.ind_output].name
else:
sender = port_inp.connection.out_block.name + "__" + port_inp.connection.out_block.output_ports[port_inp.connection.ind_output].name
fileText += "%s <= %s;\n"%(receiver, sender)
fileText += "\n"
# Block implementations
fileText += "\n-- Blocks implementation\n"
for i in self.block:
fileText += "-- Implementation of %s block\n"%i.name
fileText += i.generate()
fileText += "\n"
# Connecting outputs
fileText += "-- Connecting outputs\n"
for i in self.system_output.input_ports:
fileText += "%s <= %s__%s;\n"%(i.name,i.connection.out_block.name,i.connection.out_block.output_ports[i.connection.ind_output].name)
fileText += "END Arq_%s;\n"%self.name
# print("\nGENERATED CODE\n")
# print(fileText)
return fileText
def __getitem__(self, name):
""" Find a port for his name.
This function starts for input ports.
If the port exist it returns the reference to the port & mode(IN/OUT)
Else it returns -1
:String name: The name of the wanted port/
"""
try:
pos = self.input_names.index(name)
return pos,IN
except ValueError:
try:
pos = self.output_names.index(name)
return pos,OUT
except ValueError:
return -1
def connect(self,output_block,ind_output,input_block,ind_input,visualConnection = None):
"""
:param output_block:
:param ind_output:
:param input_block:
:param ind_input:
"""
conn = _Connection(output_block,ind_output,input_block,ind_input,self) # Creating the connection between 2 blocks
output_block.output_ports[ind_output].connection.append(conn) # Linking the connection with the output block
input_block.input_ports[ind_input].connection = conn # Linking the connection with the input block
self.connections.update({conn:visualConnection}) # Adding the connection to the connection list (on the system)
return conn
|
from CommonServerPython import *
''' IMPORTS '''
from typing import Dict, Tuple, List, AnyStr, Optional, Union
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
"""GLOBALS/PARAMS
Attributes:
INTEGRATION_NAME:
Name of the integration as shown in the integration UI, for example: Microsoft Graph User.
INTEGRATION_COMMAND_NAME:
Command names should be written in all lower-case letters,
and each word separated with a hyphen, for example: msgraph-user.
INTEGRATION_CONTEXT_NAME:
Context output names should be written in camel case, for example: MSGraphUser.
"""
INTEGRATION_NAME = 'Case Management Integration'
INTEGRATION_NAME_COMMAND = 'case-management'
INTEGRATION_CONTEXT_NAME = 'CaseManagement'
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
DEFAULT_FETCH_TIME = '3 days'
"""Helper function"""
def build_raw_tickets_to_context(tickets: Union[dict, list]):
if isinstance(tickets, list):
return [build_raw_tickets_to_context(ticket) for ticket in tickets]
return {
'ID': tickets.get('id'),
'Name': tickets.get('name'),
'Category': tickets.get('category'),
'Description': tickets.get('description'),
'Timestamp': tickets.get('timestamp'),
'IsOpen': tickets.get('isOpen'),
'Assignee': [
{
'ID': assignee.get('id'),
'Name': assignee.get('name')
} for assignee in tickets.get('assignee', [])
]
}
def build_raw_users_to_context(users: Union[list, dict]):
if isinstance(users, list):
return [build_raw_users_to_context(user) for user in users]
return {
'ID': users.get('id'),
'Username': users.get('username')
}
class Client(BaseClient):
def __init__(self, base_url, limit=50, *args, **kwargs):
self._limit = limit
super().__init__(base_url, *args, **kwargs)
def test_module(self) -> Dict:
"""Performs basic get request to get item samples
Returns:
True if request succeeded
"""
return self._http_request('GET', 'version')
def list_tickets(self, ticket_id: Optional[AnyStr] = None,
limit: Optional[AnyStr] = None, from_time: Optional[datetime] = None
) -> dict:
"""Gets all credentials from API.
Returns:
credentials
"""
suffix = 'ticket'
params = dict()
if limit:
params['limit'] = limit
elif self._limit:
params['limit'] = limit # type: ignore # [assignment]
params.update(
assign_params(
id=ticket_id,
fromTime=from_time.strftime(TIME_FORMAT) if from_time else None
))
return self._http_request('GET', suffix, params=params)
def close_ticket(self, ticket_id: AnyStr) -> dict:
"""Gets events from given IDS
Args:
ticket_id: to lock
Returns:
locked account
"""
# The service endpoint to request from
suffix: str = 'ticket/close'
# Dictionary of params for the request
params = {
'id': ticket_id
}
return self._http_request('POST', suffix, params=params)
def reopen_ticket(self, ticket_id: AnyStr) -> dict:
"""Gets events from given IDS
Args:
ticket_id: account to unlock
Returns:
response json
"""
# The service endpoint to request from
suffix = 'ticket/open'
# Dictionary of params for the request
params = {
'id': ticket_id
}
# Send a request using our http_request wrapper
return self._http_request('POST', suffix, params=params)
def reset_ticket(self, ticket_id: str) -> dict:
"""Gets events from given IDS
Args:
ticket_id: ticket to reset
Returns:
response json
"""
# The service endpoint to request from
suffix = 'ticket/reset'
# Dictionary of params for the request
params = {
'id': ticket_id
}
# Send a request using our http_request wrapper
return self._http_request('POST', suffix, params=params)
def assign_ticket(self, ticket_id: str, users: List[str]) -> dict:
"""Locks vault
Args:
ticket_id: vault to lock
users: A list of users' id
Returns:
Response JSON
"""
suffix = 'ticket/assign'
params = {'id': ticket_id}
body = {'users': users}
return self._http_request('POST', suffix, params=params, json_data=body)
def create_ticket(
self, name: str = None, category: str = None, description: str = None,
assignee: list = None, timestamp: str = None, is_open: bool = None
):
suffix = 'ticket'
body = {'ticket': assign_params(
name=name,
category=category,
description=description,
assignee=assignee,
timestamp=timestamp if timestamp else datetime.now().strftime(TIME_FORMAT),
isOpen=is_open
)}
return self._http_request('POST', suffix, json_data=body)
def list_users(self):
suffix = 'user'
return self._http_request('GET', suffix)
''' COMMANDS '''
@logger
def test_module_command(client: Client, *_) -> str:
"""Performs basic get request to get item samples.
"""
raw_response = client.test_module()
if raw_response:
return 'ok'
raise DemistoException(f'{INTEGRATION_NAME} - Unexpected response from service: {raw_response}')
@logger
def get_ticket_command(client: Client, args: dict) -> Tuple[str, dict, dict]:
"""Gets details about a raw_response using IDs or some other filters.
Args:
client: Client object
args: Usually demisto.args()
Returns:
Outputs
"""
# Get arguments from user
ticket_to_get = args.get('ticket_id')
# Make request and get raw response
raw_response = client.list_tickets(ticket_id=ticket_to_get)
# Parse response into context & content entries
tickets = raw_response.get('ticket')
if tickets:
context = dict()
title = f'{INTEGRATION_NAME} - Ticket ID: `{ticket_to_get}`.'
context_entry = build_raw_tickets_to_context(tickets)
context[f'{INTEGRATION_CONTEXT_NAME}.Ticket(val.ID && val.ID === obj.ID)'] = context_entry
# Creating human readable for War room
human_readable = tableToMarkdown(
title, context_entry, headers=['ID', 'Name', 'Timestamp', 'Description', 'Assignee']
)
# Return data to Demisto
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find ticket ID: `{ticket_to_get}`', {}, {}
@logger
def create_ticket_command(client: Client, args: dict) -> Tuple[str, dict, dict]:
"""Gets details about a raw_response using IDs or some other filters.
Args:
client: Client object
args: Usually demisto.args()
Returns:
Outputs
"""
# Initialize main vars
context = dict()
# Get arguments from user
name = args.get('name')
description = args.get('description')
assignee = argToList(args.get('assignee'))
category = args.get('category')
timestamp = args.get('timestamp')
# Make request and get raw response
raw_response = client.create_ticket(
name=name, category=category, description=description, assignee=assignee, timestamp=timestamp
)
tickets = raw_response.get('ticket')
# Parse response into context & content entries
if tickets:
title: str = f'{INTEGRATION_NAME} - Ticket has been successfully created.'
context_entry = build_raw_tickets_to_context(tickets)
context[f'{INTEGRATION_CONTEXT_NAME}.Ticket(val.ID && val.ID === obj.ID)'] = context_entry
# Creating human readable for War room
human_readable = tableToMarkdown(
title, context_entry, headers=['ID', 'Name', 'Timestamp', 'Description', 'Assignee']
)
# Return data to Demisto
return human_readable, context, raw_response
else:
raise DemistoException(f'{INTEGRATION_NAME} - Could not create new ticket!\n Response: {raw_response}')
@logger
def assign_users_command(client: Client, args: dict) -> Tuple[str, dict, dict]:
"""
Args:
client: Client object
args: Usually demisto.args()
Returns:
Outputs
"""
ticket_id = args.get('ticket_id')
users = argToList(args.get('users'))
raw_response = client.assign_ticket(ticket_id, users) # type: ignore # [assignment]
tickets = raw_response.get('ticket')
if tickets:
title = f'{INTEGRATION_NAME} - Users has been assigned to {ticket_id}.'
context_entry = build_raw_tickets_to_context(tickets)
context = {
f'{INTEGRATION_CONTEXT_NAME}.Ticket(val.ID && val.ID === obj.ID)': context_entry
}
human_readable = tableToMarkdown(
title, context_entry, headers=['ID', 'Name', 'Timestamp', 'Description', 'Assignee']
)
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not assign users to ticket ID: {ticket_id}', {}, raw_response
@logger
def list_users_command(client: Client, *_) -> Tuple[str, dict, dict]:
raw_response = client.list_users()
if raw_response:
title = f'{INTEGRATION_NAME} - Users list:'
context_entry = build_raw_users_to_context(raw_response.get('user', []))
context = {
f'{INTEGRATION_CONTEXT_NAME}.User(val.ID && val.ID === obj.ID)': context_entry
}
human_readable = tableToMarkdown(title, context_entry, headers=['Username', 'ID'])
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any users.', {}, {}
@logger
def close_ticket_command(client: Client, args: dict) -> Tuple[str, dict, dict]:
"""
Gets details about a raw_response using IDs or some other filters
"""
# Initialize main vars
context = dict()
# Get arguments from user
ticket_to_lock = args.get('ticket_id', '')
# Make request and get raw response
raw_response = client.close_ticket(ticket_to_lock)
# Parse response into context & content entries
tickets = raw_response.get('ticket')
if tickets and tickets[0].get('id') == ticket_to_lock and not tickets[0].get('isOpen'):
ticket_obj = tickets[0]
ticket_id = ticket_obj.get('id')
title: str = f'{INTEGRATION_NAME} - Ticket `{ticket_id}` has been closed.'
context_entry = build_raw_tickets_to_context(tickets[0])
context[f'{INTEGRATION_CONTEXT_NAME}.Ticket(val.ID && val.ID === obj.ID)'] = context_entry
# Creating human readable for War room
human_readable = tableToMarkdown(
title, context_entry, headers=['ID', 'Name', 'Timestamp', 'Description', 'Assignee']
)
# Return data to Demisto
return human_readable, context, raw_response
else:
raise DemistoException(f'{INTEGRATION_NAME} - Could not close'
f' ticket `{ticket_to_lock}`.\nResponse: {raw_response}')
@logger
def list_tickets_command(client: Client, args: dict) -> Tuple[str, dict, dict]:
limit = args.get('limit')
raw_response = client.list_tickets(limit=limit)
tickets = raw_response.get('ticket')
if tickets:
title = f'{INTEGRATION_NAME} - Tickets list:'
context_entry = build_raw_tickets_to_context(tickets)
context = {
f'{INTEGRATION_CONTEXT_NAME}.Ticket(val.ID && val.Name ==== obj.ID)': context_entry
}
human_readable = tableToMarkdown(title, context_entry)
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not find any tickets.', {}, raw_response
''' COMMANDS MANAGER / SWITCH PANEL '''
@logger
def fetch_incidents_command(client: Client, last_fetch: dict, fetch_time: str) -> Tuple[list, dict]:
if last_fetch:
last_fetch_datetime = datetime.strptime(last_fetch.get('timestamp', ''), TIME_FORMAT)
else:
last_fetch_datetime, _ = parse_date_range(fetch_time if fetch_time else DEFAULT_FETCH_TIME)
raw_response = client.list_tickets(from_time=last_fetch_datetime)
tickets = raw_response.get('ticket')
incidents = list()
if tickets:
for ticket in tickets:
incidents.append({
'name': f'{INTEGRATION_NAME} - ticket number: {ticket.get("id")}',
'rawJSON': json.dumps(ticket)
})
new_time = datetime.strptime(ticket.get('timestamp'), TIME_FORMAT)
if last_fetch_datetime < new_time:
last_fetch_datetime = new_time
return incidents, {'timestamp': last_fetch_datetime.strftime(TIME_FORMAT)}
def main():
params = demisto.params()
server = params.get('url')
use_ssl = not params.get('insecure', False)
use_proxy = params.get('proxy') == 'true'
client = Client(server, use_ssl=use_ssl, proxy=use_proxy)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'test-module': test_module_command,
f'{INTEGRATION_NAME_COMMAND}-list-tickets': list_tickets_command,
f'{INTEGRATION_NAME_COMMAND}-get-ticket': get_ticket_command,
f'{INTEGRATION_NAME_COMMAND}-create-ticket': create_ticket_command,
f'{INTEGRATION_NAME_COMMAND}-close-ticket': close_ticket_command,
f'{INTEGRATION_NAME_COMMAND}-assign-user': assign_users_command,
f'{INTEGRATION_NAME_COMMAND}-list-users': list_users_command,
'fetch-incidents': fetch_incidents_command,
}
try:
if command == 'fetch-incidents':
incidents, last_run = fetch_incidents_command(client, demisto.getLastRun(), params.get('fetch_time'))
demisto.incidents(incidents)
demisto.setLastRun(last_run)
if command in commands:
return_outputs(*commands[command](client, demisto.args()))
# Log exceptions
except Exception as e:
err_msg = f'Error in AuthenticationExample Integration [{e}]'
return_error(err_msg, error=e)
if __name__ == 'builtins':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.