code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class BellMediaIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?P<domain>
(?:
ctv|
tsn|
bnn|
thecomedynetwork|
discovery|
discoveryvelocity|
sciencechannel|
investigationdiscovery|
animalplanet|
bravo|
mtv|
space
)\.ca|
much\.com
)/.*?(?:\bvid=|-vid|~|%7E|/(?:episode)?)(?P<id>[0-9]{6})'''
_TESTS = [{
'url': 'http://www.ctv.ca/video/player?vid=706966',
'md5': 'ff2ebbeae0aa2dcc32a830c3fd69b7b0',
'info_dict': {
'id': '706966',
'ext': 'mp4',
'title': 'Larry Day and Richard Jutras on the TIFF red carpet of \'Stonewall\'',
'description': 'etalk catches up with Larry Day and Richard Jutras on the TIFF red carpet of "Stonewall”.',
'upload_date': '20150919',
'timestamp': 1442624700,
},
'expected_warnings': ['HTTP Error 404'],
}, {
'url': 'http://www.thecomedynetwork.ca/video/player?vid=923582',
'only_matching': True,
}, {
'url': 'http://www.tsn.ca/video/expectations-high-for-milos-raonic-at-us-open~939549',
'only_matching': True,
}, {
'url': 'http://www.bnn.ca/video/berman-s-call-part-two-viewer-questions~939654',
'only_matching': True,
}, {
'url': 'http://www.ctv.ca/YourMorning/Video/S1E6-Monday-August-29-2016-vid938009',
'only_matching': True,
}, {
'url': 'http://www.much.com/shows/atmidnight/episode948007/tuesday-september-13-2016',
'only_matching': True,
}, {
'url': 'http://www.much.com/shows/the-almost-impossible-gameshow/928979/episode-6',
'only_matching': True,
}]
_DOMAINS = {
'thecomedynetwork': 'comedy',
'discoveryvelocity': 'discvel',
'sciencechannel': 'discsci',
'investigationdiscovery': 'invdisc',
'animalplanet': 'aniplan',
}
def _real_extract(self, url):
domain, video_id = re.match(self._VALID_URL, url).groups()
domain = domain.split('.')[0]
return {
'_type': 'url_transparent',
'id': video_id,
'url': '9c9media:%s_web:%s' % (self._DOMAINS.get(domain, domain), video_id),
'ie_key': 'NineCNineMedia',
}
|
mxamin/youtube-dl
|
youtube_dl/extractor/bellmedia.py
|
Python
|
unlicense
| 2,574
|
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
import codecs
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
func = lambda name, enc=ascii: {True: enc}.get(name=='mbcs')
codecs.register(func)
from distutils.core import setup, Extension
import glob, os, shutil, fnmatch, platform
version = '1.1.61'
from generator import mavgen, mavparse
# path to message_definitions directory
mdef_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'message_definitions')
dialects_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dialects')
v09_dialects = glob.glob(os.path.join(mdef_path, 'v0.9', '*.xml'))
v10_dialects = glob.glob(os.path.join(mdef_path, 'v1.0', '*.xml'))
v09_dialects
if not "NOGEN" in os.environ:
for xml in v09_dialects:
shutil.copy(xml, os.path.join(dialects_path, 'v09'))
for xml in v10_dialects:
shutil.copy(xml, os.path.join(dialects_path, 'v10'))
for xml in v09_dialects:
dialect = os.path.basename(xml)[:-4]
wildcard = os.getenv("MAVLINK_DIALECT",'*')
if not fnmatch.fnmatch(dialect, wildcard):
continue
print("Building %s" % xml)
mavgen.mavgen_python_dialect(dialect, mavparse.PROTOCOL_0_9)
for xml in v10_dialects:
dialect = os.path.basename(xml)[:-4]
wildcard = os.getenv("MAVLINK_DIALECT",'*')
if not fnmatch.fnmatch(dialect, wildcard):
continue
print("Building %s" % xml)
mavgen.mavgen_python_dialect(dialect, mavparse.PROTOCOL_1_0)
extensions = [] # Assume we might be unable to build native code
if platform.system() != 'Windows':
extensions = [ Extension('mavnative',
sources = ['mavnative/mavnative.c'],
include_dirs = [
'generator/C/include_v1.0',
'mavnative'
]
) ]
else:
print("Skipping mavnative due to Windows possibly missing a compiler...")
setup (name = 'pymavlink',
version = version,
description = 'Python MAVLink code',
long_description = '''A Python library for handling MAVLink protocol streams and log files. This allows for the creation of simple scripts to analyse telemetry logs from autopilots such as ArduPilot which use the MAVLink protocol. See the scripts that come with the package for examples of small, useful scripts that use pymavlink. For more information about the MAVLink protocol see http://qgroundcontrol.org/mavlink/''',
url = 'http://github.com/mavlink/mavlink',
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'
],
license='LGPLv3',
package_dir = { 'pymavlink' : '.' },
package_data = { 'pymavlink.dialects.v09' : ['*.xml'],
'pymavlink.dialects.v10' : ['*.xml'],
'pymavlink.generator' : [ '*.xsd',
'java/lib/*.*',
'java/lib/Messages/*.*',
'C/include_v0.9/*.h',
'C/include_v1.0/*.h',
'C/include_v1.0/*.hpp' ],
'pymavlink.generator.lib.minixsv': [ '*.xsd' ],
'pymavlink' : ['mavnative/*.h'] },
packages = ['pymavlink',
'pymavlink.generator',
'pymavlink.generator.lib',
'pymavlink.generator.lib.genxmlif',
'pymavlink.generator.lib.minixsv',
'pymavlink.dialects',
'pymavlink.dialects.v09',
'pymavlink.dialects.v10'],
scripts = [ 'tools/magfit_delta.py', 'tools/mavextract.py',
'tools/mavgraph.py', 'tools/mavparmdiff.py',
'tools/mavtogpx.py', 'tools/magfit_gps.py',
'tools/mavflightmodes.py', 'tools/mavlogdump.py',
'tools/mavparms.py', 'tools/magfit_motors.py',
'tools/mavflighttime.py', 'tools/mavloss.py',
'tools/mavplayback.py', 'tools/magfit.py',
'tools/mavgpslock.py',
'tools/mavmission.py',
'tools/mavsigloss.py',
'tools/mavsearch.py',
'tools/mavtomfile.py',
'tools/mavgen.py',
'tools/mavkml.py',
'tools/mavfft.py',
'tools/mavsummarize.py',
'tools/MPU6KSearch.py'],
ext_modules = extensions
)
|
GUBotDev/mavlink
|
pymavlink/setup.py
|
Python
|
lgpl-3.0
| 5,125
|
TYPE_OUTPUT_HTML = "HTML"
TYPE_OUTPUT_PDF = "PDF"
TYPE_OUTPUT_XLS = "XLS"
TYPE_OUTPUT_RTF = "RTF"
TYPE_OUTPUT_CSV = "CSV"
TYPE_OUTPUT_ODS = "ODS"
TYPE_OUTPUT_ODT = "ODT"
TYPE_OUTPUT_DOCX = "DOCX"
TYPE_OUTPUT_XLSX = "XLSX"
TYPE_OUTPUT_JPRINT = "JPRINT"
TYPE_OUTPUT_XML = "XML"
|
saguas/jasperserverlib
|
jasperserverlib/core/ReportOutputFormat.py
|
Python
|
mit
| 277
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import subprocess
import sys
from test_util import TestFailedError, prepareForIncrParse, run_command, \
serializeIncrParseMarkupFile
def testWithParserLib(test_file, test_case, pre_edit_file, post_edit_file,
after_roundtrip_file, swiftsyntax_lit_test_helper):
# =========================================================================
# First generate the pre-edit and post-edit Swift file and gather the edits
# and expected reparse regions. This is the parser for the special edit
# markup for testing incremental parsing
# =========================================================================
# Gather command line arguments for swift-syntax-test specifiying the
# performed edits in this list
incremental_edit_args = []
reparse_args = []
try:
prepareForIncrParse(test_file=test_file, test_case=test_case,
pre_edit_file=pre_edit_file,
post_edit_file=post_edit_file,
incremental_edit_args=incremental_edit_args,
reparse_args=reparse_args)
except TestFailedError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print(e.message, file=sys.stderr)
sys.exit(1)
try:
run_command([swiftsyntax_lit_test_helper, '-parse-incremental'] +
['-old-source-file', pre_edit_file] +
['-source-file', post_edit_file] +
incremental_edit_args + reparse_args +
['-out', after_roundtrip_file])
except subprocess.CalledProcessError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print('Parsing the swift file failed:\n', file=sys.stderr)
print(e.output, file=sys.stderr)
sys.exit(1)
# Check if the two syntax trees are the same
try:
run_command(
[
'diff', '-u',
post_edit_file,
after_roundtrip_file
])
except subprocess.CalledProcessError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print('Source file after incrementally parsing '
'does not match post-edit source file:\n\n',
file=sys.stderr)
print(e.output, file=sys.stderr)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Utility for testing incremental syntax parsing',
epilog='''
Based of a single template the utility generates a pre-edit and a post-edit
file. It then verifies that incrementally parsing the post-edit file base
on the pre-edit file results in the same syntax tree as reparsing the
post-edit file from scratch.
To generate the pre-edit and the post-edit file from the template, it
operates on markers of the form:
<<test_case<pre|||post>>>
These placeholders are replaced by:
- 'pre' if a different test case than 'test_case' is run
- 'pre' for the pre-edit version of 'test_case'
- 'post' for the post-edit version of 'test_case'
''')
parser.add_argument(
'file', type=argparse.FileType(),
help='The template file to test')
parser.add_argument(
'--test-case', default='',
help='The test case to execute. If no test case is specified all \
unnamed substitutions are applied')
parser.add_argument(
'--temp-dir', required=True,
help='A temporary directory where pre-edit and post-edit files can be \
saved')
parser.add_argument(
'--swift-syntax-test', required=False,
help='The path to swift-syntax-test')
parser.add_argument(
'--swiftsyntax-lit-test-helper', required=True,
help='The path to the lit-test-helper binary of SwiftSyntax')
parser.add_argument(
'--serialization-format', choices=['json', 'byteTree'],
default='json', help='''
The format that shall be used to transfer the syntax tree
''')
args = parser.parse_args(sys.argv[1:])
test_file = args.file.name
test_file_name = os.path.basename(test_file)
test_case = args.test_case
temp_dir = args.temp_dir
swift_syntax_test = args.swift_syntax_test
swiftsyntax_lit_test_helper = args.swiftsyntax_lit_test_helper
serialization_format = args.serialization_format
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
# FIXME: This check is transitional, once SwiftSyntax starts using the
# parser library it will become self-contained and not need
# swift-syntax-test for its lit tests. Contents of testWithParserLib()
# function will move here and replace what comes below.
if not swift_syntax_test:
pre_edit_file = temp_dir + '/' + test_file_name + '.' + test_case + \
'.pre.swift'
post_edit_file = temp_dir + '/' + test_file_name + '.' + test_case + \
'.post.swift'
after_roundtrip_file = temp_dir + '/' + test_file_name + '.' \
+ test_case + '.post_after_roundtrip.swift'
return testWithParserLib(
test_file=test_file,
test_case=test_case,
pre_edit_file=pre_edit_file,
post_edit_file=post_edit_file,
after_roundtrip_file=after_roundtrip_file,
swiftsyntax_lit_test_helper=swiftsyntax_lit_test_helper)
treeFileExtension = serialization_format
pre_edit_tree_file = temp_dir + '/' + test_file_name + '.' \
+ test_case + '.pre.' + treeFileExtension
incremental_tree_file = temp_dir + '/' + test_file_name + '.' \
+ test_case + '.incr.' + treeFileExtension
post_edit_source_file = temp_dir + '/' + test_file_name + '.' \
+ test_case + '.post.swift'
after_roundtrip_source_file = temp_dir + '/' + test_file_name + '.' \
+ test_case + '.post_after_roundtrip.swift'
# Generate the syntax tree once incrementally and once from scratch
try:
serializeIncrParseMarkupFile(test_file=test_file,
test_case=test_case,
mode='pre-edit',
serialization_mode='full',
serialization_format=serialization_format,
omit_node_ids=False,
output_file=pre_edit_tree_file,
temp_dir=temp_dir,
swift_syntax_test=swift_syntax_test,
print_visual_reuse_info=False)
serializeIncrParseMarkupFile(test_file=test_file,
test_case=test_case,
mode='incremental',
serialization_mode='incremental',
serialization_format=serialization_format,
omit_node_ids=False,
output_file=incremental_tree_file,
temp_dir=temp_dir,
swift_syntax_test=swift_syntax_test,
print_visual_reuse_info=False)
except TestFailedError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print(e.message, file=sys.stderr)
sys.exit(1)
try:
run_command([swiftsyntax_lit_test_helper, '-deserialize-incremental'] +
['-serialization-format', serialization_format] +
['-pre-edit-tree', pre_edit_tree_file] +
['-incr-tree', incremental_tree_file] +
['-out', after_roundtrip_source_file])
except subprocess.CalledProcessError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print('Deserializing the swift file failed:\n', file=sys.stderr)
print(e.output, file=sys.stderr)
sys.exit(1)
# Check if the two syntax trees are the same
try:
run_command(
[
'diff', '-u',
post_edit_source_file,
after_roundtrip_source_file
])
except subprocess.CalledProcessError as e:
print('Test case "%s" of %s FAILed' % (test_case, test_file),
file=sys.stderr)
print('Source file after incrementally transferring the syntax tree '
'to swiftSyntax does not match post-edit source file:\n\n',
file=sys.stderr)
print(e.output, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
sschiau/swift
|
utils/incrparse/incr_transfer_round_trip.py
|
Python
|
apache-2.0
| 9,051
|
from common.challenge import MatasanoChallenge
from common.key_exchange.protocols.srp import SecureRemotePassword,\
SecureRemotePasswordClient,\
SecureRemotePasswordServer
class SRPAuthBypassWithZeroKey(SecureRemotePasswordClient):
def __init__(self):
# Initialize with empty email/password (we don't need them).
SecureRemotePasswordClient.__init__(self, str(), str())
def _init_state(self):
# Initializing A from any multiple of the underlying prime will have
# the same effect: the key computed by the server will be zero.
self.A = 0
def _compute_key(self):
S = self._from_int(0)
self._set_key_from(S)
class Set5Challenge37(MatasanoChallenge):
EMAIL = 'foo@bar.baz'
PASSWORD = 'at4r0rrep'
def validate(self):
client = SRPAuthBypassWithZeroKey()
server = SecureRemotePasswordServer(self.EMAIL, self.PASSWORD)
server.start()
client.start()
client.stop()
server.stop()
return client.get_status() == SecureRemotePassword.STATUS_OK and\
server.get_status() == SecureRemotePassword.STATUS_OK
|
lukius/mts
|
set5/challenge37.py
|
Python
|
mit
| 1,274
|
import os
import gevent.monkey # noqa: I100, gevent must be imported here
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dmoj.settings')
gevent.monkey.patch_all()
# noinspection PyUnresolvedReferences
import dmoj_install_pymysql # noqa: E402, F401, I100, I202, imported for side effect
import django # noqa: E402, F401, I100, I202, django must be imported here
django.setup()
# noinspection PyUnresolvedReferences
import django_2_2_pymysql_patch # noqa: E402, I100, F401, I202, imported for side effect
from judge.bridge.daemon import judge_daemon # noqa: E402, I100, I202, django code must be imported here
if __name__ == '__main__':
judge_daemon()
|
DMOJ/site
|
dmoj_bridge_async.py
|
Python
|
agpl-3.0
| 668
|
'''
module: clusters.py
use: contains functions associated clustering / unsupervised learning
'''
import numpy as np
from kmeans import kplusplus
from utils import getSimilarityArray
def getDegreeArray(sim_array): #convert array W into respective Degree array, Dii = sum(i=1 to n) Wij
'''
Purpose:
Computes the Degree array 'D' in the spectral clustering process from the similarity array
Dii = \sum_{i=1}^n Wij, ie the sum of each row of the similarity array
Inputs:
sim_array - Similarity array Wij retrieved from getSimilarityArray()
Outputs:
D - degree array (described in Purpose
'''
D = np.zeros((sim_array.shape[0],sim_array.shape[0]))
for i in range(0,sim_array.shape[0]):
D[i,i] = np.sum(sim_array[i,:])
return D
def getLaplacian(W,D):
'''
Purpose:
Returns the Laplacian of the similarity array W and the degree array D
For use with spectral clustering
Inputs:
W - similarity array from getSimilarityArray()
D - degree array from getDegreeArray
Outputs:
L = D-W, the laplacian
'''
return D-W
def getLaplacianBasis(features,similarity_method='exp',k_nn=5):
'''
Purpose:
Returns orthogonal basis for Laplacian embedding of features. Essentially the full spectral clustering algorithm before the actual clustering
Inputs:
features - n examples by k features ndarray (n>k preferred)
similarity_method - method to use for computing the similarity array:
--'exp' computes W[i,j] = exp(-||xi - xj||^2 / 2)
--'norm' computes W[i,j] = ||xi - xj||^2
--'chain' is specifically for the 'chain' generateData type
k_nn - number of nearest neighbors to consider in similarity array
num_clusters - number of clusters for kmeans++ to sort the data into
Outputs:
U - orthogonal basis returned by the svd of the laplacian with columns corresponding to the most significant singular values at the lowest indices
'''
W = getSimilarityArray(features,similarity_method,k_nn)
D = getDegreeArray(W)
L = getLaplacian(W,D)
U,s,V = np.linalg.svd(L,full_matrices=0)
return U
def spectralClustering(features,similarity_method='exp',k_nn=5,basis_dim=2,num_clusters=2):
'''
Purpose:
Performs spectral clustering into 'num_clusters' clusters on data defined in the ndarray 'features'
Inputs:
features - n examples by k features ndarray (n>k preferred)
similarity_method - method to use for computing the similarity array:
--'exp' computes W[i,j] = exp(-||xi - xj||^2 / 2)
--'norm' computes W[i,j] = ||xi - xj||^2
--'chain' is specifically for the 'chain' generateData type
k_nn - number of nearest neighbors to consider in similarity array
basis_dim - number of svd basis vectors to consider for input to kmeans++ algorithm
num_clusters - number of clusters for kmeans++ to sort the data into
Outputs:
labels - 1 by n array of assigned cluster labels for each feature example
centers - cluster centers array (basis_dim by num_clusters) representing each of the k cluster centers
'''
#W = getSimilarityArray(features,similarity_method,k_nn)
#D = getDegreeArray(W)
#L = getLaplacian(W,D)
#U,s,V = np.linalg.svd(L,full_matrices=0)
U = getLaplacianBasis(features,similarity_method=similarity_method,k_nn=k_nn)
U = U[:,-1*basis_dim:]
labels, centers = kplusplus(U.T,num_clusters)
return labels, centers, U
|
jvahala/lucid-robotics
|
code/python-modules/clusters.py
|
Python
|
apache-2.0
| 3,293
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PagedComposeApplicationStatusInfoList(Model):
"""The list of compose applications in the cluster. The list is paged when
all of the results cannot fit in a single message. The next set of
results can be obtained by executing the same query with the continuation
token provided in this list.
:param continuation_token:
:type continuation_token: str
:param items:
:type items: list of :class:`ComposeApplicationStatusInfo
<azure.servicefabric.models.ComposeApplicationStatusInfo>`
"""
_attribute_map = {
'continuation_token': {'key': 'ContinuationToken', 'type': 'str'},
'items': {'key': 'Items', 'type': '[ComposeApplicationStatusInfo]'},
}
def __init__(self, continuation_token=None, items=None):
self.continuation_token = continuation_token
self.items = items
|
SUSE/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/paged_compose_application_status_info_list.py
|
Python
|
mit
| 1,371
|
"""
Advertising means registering a function to the 'functions'
domain. From a user's standpoint in that domain lives a dictionary of
'name' -> *fn-obj* that is used by the front end to provide functions.
"""
from context import Context
from default import DEFAULTS_DOMAIN
from functions import MetaAdvert
from skin import Skin, DictSkinConfig
import types
def set(domain, value, function=False):
Context.get_skin(function=function)[domain] = value
def get(domain, function=False, **kwargs):
"""
Get a single piece of data. function needs to be true if you want
a callable.
"""
return Context.get_skin(function=function).get(domain, **kwargs)
def append(domain, value, function=False, **kwargs):
return Context.get_skin(function=function).append(domain, value, **kwargs)
def defaults_decorator(fn):
"""
Decorated function will have default args what is in
DEFAULTS_DOMAIN of the context.
"""
def wrap(*args, **kwargs):
# Convert all positional arguments to kwargs
argdic = dict(zip(fn.__code__.co_varnames, args))
kw = (Context.get_skin(function=False).get(DEFAULTS_DOMAIN) or {}).copy()
kw.update(kwargs)
kw.update(argdic)
return fn(**kw)
return wrap
@defaults_decorator
def get_fn(name, domain=None, **kw):
"""
Access functions in a domain.
"""
d = Context.get_skin(function=True)[domain or name]
try:
return d[name]
except TypeError:
return d
def setdict(dic):
"""
Creates a new skin with config dict.
"""
Context.set_skin(Skin(DictSkinConfig(dic)))
def domaincall(domain, name, *args, **kwargs):
return get_fn(name, domain=domain)(*args, **kwargs)
def freecall(name, *args, **kwargs):
"""
Call a function saved in a 'name' domain.
"""
return get_fn(name, domain=None)(*args, **kwargs)
def call(name, *args, **kwargs):
"""
Call a function from the 'functions' domain.
"""
return get_fn(name)(*args, **kwargs)
@defaults_decorator
def advertise_fn(func, **kwargs):
Context.register_function(func, **kwargs)
return func
@defaults_decorator
def advertise(name=None, domain=None, append=None, **kw):
"""
To decorate methods of a class it needs to subclass
`Advertisable`. Also this decorator implies `@staticmethod`.
Decorator for advertising functions using their name as key, or
provide a name and you may decorate with parameters. Default
parameters are in DEFAULT_DOMAIN of context. You may see what
params you can pass by looking at `Contex.register_function`.
Provide domain and not name to put the vanilla function in the
slot.
"""
def real_dec(fn): return advertise_fn(fn, name=name,
domain=domain,
append=append, **kw)
return real_dec
def jsondump():
return Context.get_skin(function=False).dump()
def attribute_resolvers():
"""
Get a list of the attribute resolvers available.
"""
Context.get_skin(function=True)["resolvers"]
class Advertisable(object):
"""
Subclassing this will give make your methods advertisable.
"""
__metaclass__ = MetaAdvert
|
fakedrake/WikipediaBase-skinz
|
wikipediabase/api.py
|
Python
|
bsd-3-clause
| 3,257
|
from dolfin import *
from nanopores import *
from nanopores.physics.simplepnps import *
geo_name = "H_geo"
nm = 1e-9
geo_params = dict(
x0 = None,
boxfields = True,
#Rx = 300*nm,
#Ry = 30*nm,
)
phys_params = dict(
Membraneqs = -0.0,
bulkcon = 3e2,
bV = -.1,
dnaqsdamp = .25
)
generate_mesh(.5, geo_name, **geo_params)
geo = geo_from_name(geo_name, **geo_params)
phys = Physics("pore", geo, **phys_params)
plot(geo.subdomains)
plot(geo.boundaries)
print geo
if geo.parameter("x0") is None:
exec("from nanopores.geometries.%s.subdomains import synonymes" %geo_name)
geo.import_synonymes({"moleculeb":set()})
geo.import_synonymes(synonymes)
pnps = NonlinearPDE(geo, SimplePNPProblem, phys=phys) #, cyl=True)
pnps.imax = 20
pnps.newtondamp = 1.
pnps.maxcells = 5e4
t = Timer("solve")
pnps.solve(refinement=False)
print "CPU time (solve): %s [s]" % (t.stop(),)
"""
tol = 1e-2
damp = 1.
S = pnps.solvers.values()[0]
S.newtondamp = damp
for i in range(20):
#plot(pnps.functions.values()[0].sub(0)) # for debugging
#interactive()
S.solve()
print 'Relative L2 Newton error:',S.relerror()
if S.convergence(tol):
print 'linf Norm of Newton update:', \
norm(S.problem.u.vector(),'linf'), \
'<=', tol ,' \n ==> break loop \n'
break
print "Newton iterations:",i+1
print 'Relative L2 Newton error:',S.relerror()
"""
pnps.print_results()
#pnps.estimators["est"].plot(rate=-.5)
plot(geo.boundaries)
pnps.visualize()
|
mitschabaude/nanopores
|
scripts/test_SimplePNPS.py
|
Python
|
mit
| 1,498
|
import time
import json
import random
from flask import Flask, request, current_app, abort
from functools import wraps
from cloudbrain.utils.metadata_info import (map_metric_name_to_num_channels,
get_supported_devices,
get_metrics_names)
from cloudbrain.settings import WEBSERVER_PORT
_API_VERSION = "v1.0"
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
from cloudbrain.datastore.CassandraDAO import CassandraDAO
dao = CassandraDAO()
dao.connect()
def support_jsonp(f):
"""Wraps JSONified output for JSONP"""
@wraps(f)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + str(f()) + ')'
return current_app.response_class(content,
mimetype='application/json')
else:
return f(*args, **kwargs)
return decorated_function
@app.route('/data', methods=['GET'])
@support_jsonp
def data():
"""
GET metric data
:return:
"""
# return last 5 microseconds if start not specified.
default_start_timestamp = int(time.time() * 1000000 - 5)
device_id = request.args.get('device_id', None)
device_name = request.args.get('device_name', None)
metric = request.args.get('metric', None)
start = int(request.args.get('start', default_start_timestamp))
if not device_name:
return "missing param: device_name", 500
if not metric:
return "missing param: metric", 500
if not device_id:
return "missing param: device_id", 500
# data_records = _get_mock_data(device_name, metric)
data_records = dao.get_data(device_name, device_id, metric, start)
return json.dumps(data_records)
def _get_mock_data(device_name, metric):
metric_to_num_channels = map_metric_name_to_num_channels(device_name)
num_channels = metric_to_num_channels[metric]
now = int(time.time() * 1000000 - 5) # micro seconds
data_records = []
for i in xrange(5):
record = {'timestamp': now + i}
for j in xrange(num_channels):
channel_name = 'channel_%s' % j
record[channel_name] = random.random() * 10
data_records.append(record)
return data_records
@app.route('/metadata/devices', methods=['GET'])
@support_jsonp
def get_device_names():
""" Returns the device names from the metadata file """
return json.dumps(get_supported_devices())
@app.route('/registered_devices', methods=['GET'])
@support_jsonp
def get_registered_devices():
""" Get the registered devices IDs """
registered_devices = dao.get_registered_devices()
return json.dumps(registered_devices)
""" Tags """
def _generate_mock_tags(user_id, tag_name):
if tag_name is None:
tag_names = ["Facebook", "Netflix", "TechCrunch"]
else:
tag_names = [tag_name]
tags = []
for tag_name in tag_names:
tags.append(
{"tag_id": "c1f6e1f2-c964-48c0-8cdd-fafe8336190b",
"user_id": user_id,
"tag_name": tag_name,
"metadata": {},
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
})
return tags
def generate_mock_tag(user_id, tag_id):
tag = {"tag_id": tag_id,
"user_id": user_id,
"tag_name": "label_1",
"metadata": {},
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
}
return tag
@app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION,
methods=['GET'])
@support_jsonp
def get_tags(user_id):
"""Retrieve all tags for a specific user """
tag_name = request.args.get('tag_name', None)
#tags = _generate_mock_tags(user_id, tag_name)
tags = dao.get_tags(user_id, tag_name)
return json.dumps(tags), 200
@app.route('/api/%s/users/<string:user_id>/tags/<string:tag_id>' % _API_VERSION,
methods=['GET'])
@support_jsonp
def get_tag(user_id, tag_id):
"""Retrieve a specific tag for a specific user """
#tag = dao.get_mock_tag(user_id, tag_id)
tag = dao.get_tag(user_id, tag_id)
return json.dumps(tag), 200
@app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION,
methods=['POST'])
@support_jsonp
def create_tag(user_id):
if (not request.json
or not 'tag_name' in request.json
or not 'start' in request.json):
abort(400)
tag_name = request.json.get("tag_name")
metadata = request.json.get("metadata")
start = request.json.get("start")
end = request.json.get("end")
#tag_id = "c1f6e1f2-c964-48c0-8cdd-fafe8336190b"
tag_id = dao.create_tag(user_id, tag_name, metadata, start, end)
return json.dumps({"tag_id": tag_id}), 500
""" Tag aggregates"""
def _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics):
aggregates = []
for metric in metrics:
aggregates.append(
{
"aggregate_id": "c1f6e1f2-c964-48c0-8cdd-fafe83361977",
"user_id": user_id,
"tag_id": tag_id,
"aggregate_type": "avg",
"device_type": device_type,
"aggregate_value": random.random() * 10,
"metric": metric,
"start": int(time.time() * 1000) - 10,
"end": int(time.time() * 1000)
})
return aggregates
@app.route(('/api/%s/users/<string:user_id>/tags/<string:tag_id>/aggregates'
% _API_VERSION), methods=['GET'])
@support_jsonp
def get_tag_aggregate(user_id, tag_id):
"""Retrieve all aggregates for a specific tag and user"""
device_type = request.args.get('device_type', None)
metrics = request.args.getlist('metrics', None)
if device_type is None and len(metrics) == 0:
device_types = get_supported_devices()
for device_type in device_types:
metrics.extend(get_metrics_names(device_type))
elif len(metrics) == 0 and device_type is not None:
metrics = get_metrics_names(device_type)
elif len(metrics) > 0 and device_type is None:
return "parameter 'device_type' is required to filter on `metrics`", 500
#aggregates = _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics)
aggregates = dao.get_aggregates(user_id, tag_id, device_type, metrics)
return json.dumps(aggregates), 200
if __name__ == "__main__":
app.run(host="0.0.0.0", port=WEBSERVER_PORT)
|
andyh616/cloudbrain
|
cloudbrain/apiservice/rest_api_server.py
|
Python
|
agpl-3.0
| 6,641
|
from pictureflow.core import Image, Node
import cv2
class Scale(Node):
"""
Scale an image
Args:
parent (Node): Parent image node
scale_factor (Node): Scale factor
id (str): ID of the node
Attributes:
Input Types: [ :py:class:`Image`, :py:class:`int` ]
Output Type: :py:class:`Image`
"""
_input_types = [Image, float]
_output_type = Image
def __init__(self, parent, scale_factor, id='scale'):
super().__init__(id, parent, scale_factor)
def apply(self, image, scaling):
image.id += f'-{self.id}({scaling})'
height, width = image.img_mat.shape[:2]
image.img_mat = cv2.resize(image.img_mat, (int(scaling * width), int(scaling * height)))
yield image
|
mentum/pictureflow
|
pictureflow/transform/scale.py
|
Python
|
mit
| 777
|
# -*- coding: utf-8 -*-
#################################################################################
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#################################################################################
import xmlrpclib
class FreeIPA(object):
def __init__(self, rpcurl=None):
self._rpcurl = rpcurl
self._proxy = xmlrpclib.ServerProxy(self._rpcurl, allow_none=True)
def remove_otp(self, fqdn=None):
if fqdn is not None:
result = self._proxy.dc2.freeipa.hosts.delete_ipa_otp(fqdn)
return result
return False
|
sadig/DC2
|
components/dc2-client/dc2/client/api/dc2/addons/freeipa/ipa.py
|
Python
|
gpl-2.0
| 1,423
|
import os
from argparse import ArgumentParser
from xml.etree.ElementTree import tostring
from tqdm import tqdm
from ucca import convert
from ucca.ioutil import write_passage, external_write_mode
from ucca_db.api import get_by_xids, get_most_recent_passage_by_uid
desc = "Download passages from old UCCA annotation app"
def get_by_method(method, id_field, passage_id=None, **kwargs):
if method == "xid":
return get_by_xids(xids=id_field, **kwargs)[0]
elif method == "uid":
return get_most_recent_passage_by_uid(id_field, passage_id, **kwargs)
raise ValueError("Unknown method: '%s'" % method)
def main(args):
os.makedirs(args.outdir, exist_ok=True)
with open(args.filename, encoding="utf-8") as f:
t = list(map(str.split, f))
if not args.verbose:
t = tqdm(t, desc="Downloading", unit=" passages")
for passage_id, id_field in t:
if not args.verbose:
t.set_postfix({"passage_id": passage_id, args.method: id_field})
if args.verbose:
with external_write_mode():
print("Getting passage " + passage_id + " with " + args.method + "=" + id_field, end="\t")
xml_root = get_by_method(id_field=id_field.split(","), passage_id=passage_id, **vars(args))
if xml_root is None:
continue
if args.write_site:
site_filename = passage_id + "_site_download.xml"
with open(site_filename, "w", encoding="utf-8") as fsite:
print(tostring(xml_root).decode(), file=fsite)
if args.verbose:
with external_write_mode():
print("Wrote '%s'" % site_filename)
if args.write:
write_passage(convert.from_site(xml_root), outdir=args.outdir, verbose=args.verbose)
if __name__ == "__main__":
argparser = ArgumentParser(description=desc)
argparser.add_argument("filename", help="specification filename with (passage ID, xid OR uid) per passage")
argparser.add_argument("-m", "--method", default="uid", choices=("xid", "uid"), help="by xid or latest by paid,uid")
argparser.add_argument("-d", "--db-name", default="work", help="database name")
argparser.add_argument("-H", "--host-name", default="pgserver", help="host name")
argparser.add_argument("-o", "--outdir", default=".", help="directory to write created XML IDs to")
argparser.add_argument("-s", "--write-site", action="store_true", help="write site format, too, for debugging")
argparser.add_argument("-n", "--no-write", dest="write", action="store_false", help="do not really write any files")
argparser.add_argument("-x", "--write-xids", help="file to write xids to (for `uid' method)")
argparser.add_argument("-S", "--strict", action="store_true", help="fail if no result is found")
argparser.add_argument("-v", "--verbose", action="store_true", help="print tagged text for each passage")
main(argparser.parse_args())
|
danielhers/ucca
|
ucca_db/download.py
|
Python
|
gpl-3.0
| 3,038
|
"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
pip_log_file = '/tmp/pip.log'
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install pip')
run('touch {}'.format(pip_log_file))
debian.chmod(pip_log_file, mode=777)
pip('install', 'setuptools', '--upgrade')
def pip(command, *options):
info('Running pip {}', command)
run('pip {0} {1} -v --log={2} --log-file={2}'.format(command, ' '.join(options), pip_log_file))
|
gelbander/blues
|
blues/python.py
|
Python
|
mit
| 997
|
def get_thermodynamic_information_minimum(system, database, minimum, commit=True):
m = minimum
changed = False
if m.pgorder is None:
changed = True
m.pgorder = system.get_pgorder(m.coords)
if m.fvib is None:
changed = True
print "computing fvib for minima", m._id, m.energy
m.fvib = system.get_log_product_normalmode_freq(m.coords)
if commit:
database.session.commit()
return changed
def get_thermodynamic_information(system, database):
"""
compute thermodynamic information for all minima in a database
Parameters
----------
system : pygmin System class
databse : a Database object
Notes
-----
The information that is computed is the point group order (m.pgorder) and the
log product of the squared normal mode frequencies (m.fvib).
"""
changed = False
try:
for m in database.minima():
c = get_thermodynamic_information_minimum(system, database, m, commit=False)
if c: changed = True
except KeyboardInterrupt:
if changed:
database.session.commit()
raise
if changed:
database.session.commit()
|
js850/PyGMIN
|
pygmin/thermodynamics/_utils.py
|
Python
|
gpl-3.0
| 1,227
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Ponzoni, Nelson"
__copyright__ = "Copyright 2015"
__credits__ = ["Ponzoni Nelson"]
__maintainer__ = "Ponzoni Nelson"
__contact__ = "npcuadra@gmail.com"
__email__ = "npcuadra@gmail.com"
__license__ = "GPL"
__version__ = "1.0.0"
__status__ = "Production"
"""
"""
import numpy
import theano
from cupydle.dnn.funciones import sigmoideaTheano
from cupydle.dnn.funciones import linealRectificadaTheano
from warnings import warn
class Capa(object):
def __init__(self, unidadesEntrada, unidadesSalida, entrada, rng,
funcionActivacion, W=None, b=None):
# segun la funcion de activacion (str) seleccionada
if funcionActivacion == 'sigmoidea':
funcionActivacion_tmp = sigmoideaTheano()
elif funcionActivacion == 'linealRectificada':
funcionActivacion_tmp = linealRectificadaTheano()
else:
funcionActivacion_tmp = None
self.funcionActivacion = funcionActivacion_tmp
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (unidadesEntrada + unidadesSalida)),
high=numpy.sqrt(6. / (unidadesEntrada + unidadesSalida)),
size=(unidadesEntrada, unidadesSalida)
),
dtype=theano.config.floatX
)
if type(self.funcionActivacion) == type(sigmoideaTheano()):
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
del W_values
else:
if type(W).__module__ != numpy.__name__:
assert False, "Solo acepto del tipo numpy.ndarray"
else:
W = theano.shared(value=W, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((unidadesSalida,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
del b_values
else:
if type(b).__module__ != numpy.__name__:
assert False, "Solo acepto del tipo numpy.ndarray"
else:
b = theano.shared(value=b, name='b', borrow=True)
self.W = W
self.b = b
# parameters of the model
#self.params = [self.W, self.b]
self.x = entrada
def activate(self):
lin_output = theano.tensor.dot(self.x, self.W) + self.b
#output = (lin_output if self.funcionActivacion is None else self.funcionActivacion(lin_output))
output = self.funcionActivacion(lin_output)
return output
# propiedades intrisecas de las capas
def __str__(self):
return str("Capa: " + str(type(self))
+ "\n W[" + str(self.W) + "]: "
+ str(self.W.get_value(borrow=True).shape)
+ " " + str(type(self.W))
+ "\n bias[" + str(self.b) + "]:"
+ str(type(self.b.get_value(borrow=True).shape))
+ " " + str(type(self.b)))
# funciones para obtener valores
def get_weights(self):
warn("No se deberia utilizar mas, <<getW>>")
return self.W
def get_bias(self):
warn("No se deberia utilizar mas, <<getB>>")
return self.b
@property
def getW(self):
return self.W.get_value(borrow=True)
@property
def getB(self):
return self.b.get_value(borrow=True)
def set_weights(self, w):
if isinstance(w, theano.TensorType):
self.W.set_value(w)
else:
assert False
def set_bias(self, b):
if isinstance(b, theano.TensorType):
self.b.set_value(b)
else:
assert False
class CapaClasificacion(Capa):
def __init__(self, unidadesEntrada, unidadesSalida, entrada, W=None, b=None):
# initialize with 0 the weights W as a matrix of shape (unidadesEntrada, unidadesSalida)
if W is None:
W_values = numpy.zeros((unidadesEntrada, unidadesSalida), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W', borrow=True)
del W_values
else:
if type(W).__module__ != numpy.__name__:
assert False, "Solo acepto del tipo numpy.ndarray"
else:
W = theano.shared(value=W, name='W', borrow=True)
# initialize the biases b as a vector of unidadesSalida 0s
if b is None:
b_values = numpy.zeros((unidadesSalida,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
del b_values
else:
if type(b).__module__ != numpy.__name__:
assert False, "Solo acepto del tipo numpy.ndarray"
else:
b = theano.shared(value=b, name='b', borrow=True)
self.W = W
self.b = b
# parameters of the model
#self.params = [self.W, self.b]
self.x = entrada
def activate(self):
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
return theano.tensor.nnet.softmax(theano.tensor.dot(self.x, self.W) + self.b)
def predict(self):
# symbolic description of how to compute prediction as class whose
# probability is maximal
return theano.tensor.argmax(self.activate(), axis=1)
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -theano.tensor.mean(theano.tensor.log(self.activate())[theano.tensor.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.predict().ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.predict().type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return theano.tensor.mean(theano.tensor.neq(self.predict(), y))
else:
raise NotImplementedError()
if __name__ == '__main__':
assert False, "Este modulo no es ejecutable!!!"
|
lerker/cupydle
|
cupydle/dnn/capas.py
|
Python
|
apache-2.0
| 8,329
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Package init."""
# Semantic Versioning 2.0.0 https://semver.org/spec/v2.0.0.html
__version__ = "0.5.0"
|
carlosperate/ubitflashtool
|
ubittool/__init__.py
|
Python
|
mit
| 154
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import os
import StringIO
import sys
import time
import urlparse
import uuid
import weakref
import exceptions
from django.core import urlresolvers
from django.db import IntegrityError, transaction
from smartform import descriptor as smartdescriptor
from mint import urltypes
from mint.django_rest.rbuilder import errors
from mint.django_rest.rbuilder import modellib
from mint.django_rest.rbuilder.manager import basemanager
from mint.django_rest.rbuilder.images import models as imagemodels
from mint.django_rest.rbuilder.jobs import models
from mint.django_rest.rbuilder.inventory import models as inventorymodels
from mint.django_rest.rbuilder.targets import models as targetmodels
from mint.logerror import logErrorAndEmail
exposed = basemanager.exposed
import logging
log = logging.getLogger(__name__)
class JobManager(basemanager.BaseManager):
@exposed
def getJobs(self):
return self._jobsFromIterator(models.Job.objects.all())
@exposed
def getJob(self, job_uuid):
return models.Job.objects.get(job_uuid=job_uuid)
@exposed
def updateJob(self, job_uuid, job):
if not job.pk:
raise errors.ResourceNotFound()
factory = JobHandlerRegistry.getHandlerFactory(job.job_type.name)
if factory is None:
return job
jhandler = factory(self)
jhandler.processResults(job)
return job
@exposed
def addJob(self, job, **extraArgs):
"""
extraArgs can be used for passing additional information that ties
the job to a particular resource (or verifies it). For instance,
the identity of the related resource may be present both in the
job URL and in the descriptor URL, and they should match
"""
job.created_by = job.modified_by = extraArgs.get('forUser', self.user)
typename = job.job_type.name
factory = JobHandlerRegistry.getHandlerFactory(typename)
if factory is None:
raise errors.InvalidData(msg="no factory for job type: %s" % typename)
jhandler = factory(self)
jhandler.create(job, extraArgs)
for system_job in job.systems.all():
system_job.system.updateDerivedData()
return job
@exposed
def deleteJob(self, jobId):
job = models.Job.objects.get(pk=jobId)
systems = job.systems.all()
job.delete()
for system_job in systems:
system_job.job.updateDerivedData()
@exposed
def getJobStates(self):
jobStates = models.JobStates()
jobStates.job_state = models.JobState.objects.all()
return jobStates
@exposed
def getJobStateByName(self, name):
return modellib.Cache.get(models.JobState, name=name)
@exposed
def getJobState(self, jobStateId):
jobState = models.JobState.objects.get(pk=jobStateId)
return jobState
@exposed
def getJobsByJobState(self, job_state_id):
jobState = models.JobState.objects.get(pk=job_state_id)
return self._jobsFromIterator(models.Job.objects.filter(
job_state=jobState))
@exposed
def getSystemJobsByState(self, system_id, job_state_id):
system = inventorymodels.System.objects.get(pk=system_id)
jobState = models.JobState.objects.get(pk=job_state_id)
return self._jobsFromIterator(system.jobs.filter(job_state=jobState))
@exposed
def getSystemJobs(self, system_id):
system = inventorymodels.System.objects.get(pk=system_id)
return self._jobsFromIterator(system.jobs.all())
@exposed
def waitForRmakeJob(self, jobUuid, timeout=10, interval=1):
cli = self.mgr.repeaterMgr.repeaterClient
end = time.time() + timeout
while time.time() < end:
job = cli.getJob(jobUuid)
if job.status.final:
return job
time.sleep(interval)
# Even if we timed out, we'll still return the job, it's up to
# the caller to decide what to do
return job
@classmethod
def _jobsFromIterator(cls, iterator):
jobs = models.Jobs()
for job in iterator:
jobs.job.append(job)
return jobs
@classmethod
def systemModelForSystem(cls, system, topLevelItems):
systemModelLines = []
systemModelLines.extend("install %s" % x.strip() for x in topLevelItems)
return "\n".join(systemModelLines)
@exposed
def finishJob(self, job):
return self.updateJobState(job, stateName=models.JobState.COMPLETED,
statusText="Completed", statusCode=200)
@exposed
def updateJobState(self, job, stateName=models.JobState.COMPLETED,
statusText="Completed", statusCode=200):
job.update(job_state = self.getJobStateByName(stateName),
status_text = statusText,
status_code = statusCode)
class AbstractHandler(object):
__slots__ = [ 'mgrRef', 'extraArgs', ]
def __init__(self, mgr):
self.mgrRef = weakref.ref(mgr)
self.extraArgs = {}
self._init()
def _init(self):
pass
@property
def mgr(self):
return self.mgrRef()
class HandlerRegistry(object):
"""
Generic registry for factories.
"""
__slots__ = []
class __metaclass__(type):
_registry = {}
def __new__(mcs, name, bases, attributes):
if '__slots__' not in attributes:
attributes.update(__slots__=[])
cls = type.__new__(mcs, name, bases, attributes)
baseHandlerClass = cls.BaseHandlerClass
if baseHandlerClass is None:
return cls
for fname, fval in attributes.items():
if fname == 'BaseHandlerClass':
continue
if inspect.isclass(fval) and issubclass(fval, baseHandlerClass):
mcs._registry[fval.jobType] = fval
return cls
BaseHandlerClass = None
@classmethod
def getHandlerFactory(cls, jobType):
return cls.__metaclass__._registry.get(jobType)
class BaseJobHandler(AbstractHandler):
__slots__ = []
def create(self, job, extraArgs=None):
self.extraArgs.update(extraArgs or {})
# Tentatively supply a jobUuid, to make sure we have a stable
# URL back to the job
job.job_uuid = str(uuid.uuid4())
uuid_, rmakeJob = self.createRmakeJob(job)
job.job_uuid = str(uuid_)
job.setDefaultValues()
if rmakeJob is not None:
jobToken = rmakeJob.data.getObject().data.get('authToken')
if jobToken:
job.job_token = str(jobToken)
job.save()
# Blank out the descriptor data, we don't need it in the return
# value
job.descriptor_data = None
self.linkRelatedResource(job)
self.postCreateJob(job)
def createRmakeJob(self, job):
cli = self.mgr.mgr.repeaterMgr.repeaterClient
method = self.getRepeaterMethod(cli, job)
methodArgs, methodKwargs = self.getRepeaterMethodArgs(cli, job)
methodKwargs.update(uuid=job.job_uuid)
return method(*methodArgs, **methodKwargs)
def getRepeaterMethodArgs(self, cli, job):
return (), {}
def linkRelatedResource(self, job):
pass
def postCreateJob(self, job):
pass
class ResultsProcessingMixIn(object):
__slots__ = []
ResultsTag = None
# Results processing API
def _init(self):
self.results = None
def processResults(self, job):
if job.oldModel is None:
# We won't allow job creation to happen here
raise errors.InvalidData(msg="no model")
# Flush job state to the DB, it is needed by processJobResults
models.Job.objects.filter(job_id=job.job_id).update(
job_state=job.job_state)
self.results = self.getJobResults(job)
self.validateJobResults(job)
self.processJobResults(job)
for system_job in job.systems.all():
system_job.system.updateDerivedData()
job.save()
def getJobResults(self, job):
if job.results is None:
return None
return job.results.find(self.ResultsTag)
def validateJobResults(self, job):
jobState = modellib.Cache.get(models.JobState, pk=job.job_state_id)
if self.results is None and jobState.name == jobState.COMPLETED:
raise errors.InvalidData(msg = "missing results")
def loadDescriptorData(self, job):
descriptor = smartdescriptor.ConfigurationDescriptor(fromStream=job._descriptor)
descriptorData = smartdescriptor.DescriptorData(
fromStream=job._descriptor_data, descriptor=descriptor)
return descriptorData
def processJobResults(self, job):
jobState = modellib.Cache.get(models.JobState, pk=job.job_state_id)
if jobState.name != jobState.COMPLETED:
job.results = None
return None
tsid = transaction.savepoint()
try:
resources = self._processJobResults(job)
except Exception, e:
transaction.savepoint_rollback(tsid)
e_type, e_value, e_tb = sys.exc_info()
log.error("Error processing job %s %s",
job.job_uuid, e)
try:
handled = self.handleError(job, e)
except exceptions.AttributeError:
handled = False
if handled:
return None
logErrorAndEmail(self.mgr.cfg, e_type, e_value, e_tb,
'jobs handler', dict(), doEmail=True)
self.handleErrorDefault(job, e)
return None
# save the results from rmake to the DB
if not isinstance(resources, list):
resources = [ resources ]
for resource in resources:
tag = resource._xobj.tag
# XXX this is ugly. We should have a more extensible way to
# handle this
if tag == 'image':
models.JobImageArtifact(job=job, image=resource).save()
elif tag not in set(['target', 'system']):
raise Exception("internal error, don't know how to save resource: %s" % tag)
job.results = models.JobResults()
job.results.result = [ modellib.HrefFieldFromModel(x) for x in resources ]
return resources[0]
def _createTargetConfiguration(self, job, targetType):
descriptorData = self.loadDescriptorData(job)
driverClass = self.mgr.mgr.targetsManager.getDriverClass(targetType)
cloudName = driverClass.getCloudNameFromDescriptorData(descriptorData)
config = driverClass.getTargetConfigFromDescriptorData(descriptorData)
return targetType, cloudName, config
def handleErrorDefault(self, job, exc):
job.status_text = "Unknown exception, please check logs"
job.status_code = 500
class DescriptorJobHandler(BaseJobHandler, ResultsProcessingMixIn):
__slots__ = [ 'results', 'descriptor', 'descriptorData', ]
def _init(self):
ResultsProcessingMixIn._init(self)
self.descriptor = self.descriptorData = None
def extractDescriptorData(self, job):
"Executed when the job is created"
descriptor = None
descriptorDataObj = None
descriptorId = 1
descriptorDataXml = ''
descriptorDataObj = None
if isinstance(job.descriptor, smartdescriptor.ConfigurationDescriptor):
# path for direct python API usage, such as target system import
# not yet patched up for supplying descriptor data
descriptor = job.descriptor
descriptorDataObj = None
descriptor = self.getDescriptor(job.descriptor.id)
else:
descriptorId = job.descriptor.attrib['id']
# Strip the server-side portion
descriptorId = urlparse.urlsplit(descriptorId).path
descriptor = self.getDescriptor(descriptorId)
descriptorDataXml = modellib.Etree.tostring(job.descriptor_data,
xmlDeclaration=True, prettyPrint=False)
# Save the original URL for the descriptor
self._setDescriptorId(descriptorId, descriptor)
# Related resources are linked to jobs through a many-to-many
# relationship
job._relatedResource = self.getRelatedResource(descriptor)
job._relatedThroughModel = self.getRelatedThroughModel(descriptor)
try:
descriptorDataObj = self._processDescriptor(descriptor, descriptorDataXml)
except smartdescriptor.errors.ConstraintsValidationError, e:
raise errors.InvalidData(msg=str(e))
descrXml = self._serializeDescriptor(descriptor)
job._descriptor = descrXml
if hasattr(descriptorDataObj, 'toxml'):
# Re-serialize descriptor data to make sure extra fields get
# filtered out
descriptorDataXml = descriptorDataObj.toxml()
job._descriptor_data = descriptorDataXml
return descriptor, descriptorDataObj
def _setDescriptorId(self, descriptorId, descriptor):
descriptor.setId(descriptorId)
def _processDescriptor(self, descriptor, descriptorDataXml):
descriptor.setRootElement("descriptor_data")
# This will also validate the descriptor data
descriptorDataObj = smartdescriptor.DescriptorData(
fromStream=descriptorDataXml,
descriptor=descriptor)
return descriptorDataObj
def _serializeDescriptor(self, descriptor):
# Serialize descriptor for the job
sio = StringIO.StringIO()
descriptor.serialize(sio)
return sio.getvalue()
def getRelatedResource(self, descriptor):
descriptorId = descriptor.getId()
try:
match = self.splitResourceId(descriptorId)
except errors.InvalidData:
return None
return match.func.get(**match.kwargs)
def getDescriptor(self, descriptorId):
raise NotImplementedError()
def linkRelatedResource(self, job):
if job._relatedResource is None:
return
# It's possible to link multiple resources to a job
relatedResources = job._relatedResource
if not isinstance(relatedResources, list):
relatedResources = [ relatedResources ]
relatedClass = relatedResources[0].__class__
for relatedResource in relatedResources:
model = job._relatedThroughModel(job=job)
# Find the name of the related field
relatedFields = [ x for x in job._relatedThroughModel._meta.fields
if x.rel and x.rel.to == relatedClass ]
if not relatedFields:
return
relatedFieldName = relatedFields[0].name
setattr(model, relatedFieldName, relatedResource)
self.postprocessRelatedResource(job, model)
model.save()
def postprocessRelatedResource(self, job, model):
pass
@classmethod
def splitResourceId(cls, resourceId):
try:
match = urlresolvers.resolve(resourceId)
except urlresolvers.Resolver404:
raise errors.InvalidData(msg="unable to resolve resource id: %s" % resourceId)
return match
class _TargetDescriptorJobHandler(DescriptorJobHandler):
__slots__ = [ 'target', ]
def _init(self):
DescriptorJobHandler._init(self)
self.target = None
def getDescriptor(self, descriptorId):
match = self.splitResourceId(descriptorId)
targetId = int(match.kwargs['target_id'])
self._setTarget(targetId)
descr = self._getDescriptorMethod()(targetId)
return descr
def _setTarget(self, targetId):
target = self.mgr.mgr.getTargetById(targetId)
self.target = target
def getRelatedResource(self, descriptor):
return self.target
def getRelatedThroughModel(self, descriptor):
return targetmodels.JobTarget
def _buildTargetConfigurationFromDb(self, cli):
targetData = self.mgr.mgr.getTargetConfiguration(self.target)
targetTypeName = modellib.Cache.get(targetmodels.TargetType,
pk=self.target.target_type_id).name
targetConfiguration = cli.targets.TargetConfiguration(
targetTypeName, self.target.name, targetData.get('alias'),
targetData)
return targetConfiguration
def _buildTargetCredentialsFromDb(self, cli, job):
creds = self.mgr.mgr.getTargetCredentialsForCurrentUser(self.target)
if creds is None:
raise errors.InvalidData(msg="missing credentials")
return self._buildTargetCredentials(cli, job, creds)
def _buildTargetCredentials(self, cli, job, creds):
rbUser = self.mgr.auth.username
rbUserId = self.mgr.auth.userId
isAdmin = self.mgr.auth.admin
userCredentials = cli.targets.TargetUserCredentials(
credentials=creds,
rbUser=rbUser,
rbUserId=rbUserId,
isAdmin=isAdmin)
return userCredentials
class JobHandlerRegistry(HandlerRegistry):
BaseHandlerClass = BaseJobHandler
class TargetRefreshImages(_TargetDescriptorJobHandler):
__slots__ = []
jobType = models.EventType.TARGET_REFRESH_IMAGES
ResultsTag = 'images'
def _getDescriptorMethod(self):
return self.mgr.mgr.getDescriptorRefreshImages
def _configureTargetMethod(self, cli, job):
targetConfiguration = self._buildTargetConfigurationFromDb(cli)
targetUserCredentials = self._buildTargetCredentialsFromDb(cli, job)
zone = self.mgr.mgr.getTargetZone(self.target)
cli.targets.configure(zone.name, targetConfiguration,
targetUserCredentials)
def getRepeaterMethod(self, cli, job):
self.descriptor, self.descriptorData = self.extractDescriptorData(job)
self._configureTargetMethod(cli, job)
return cli.targets.listImages
def _processJobResults(self, job):
targetId = job.target_jobs.all()[0].target_id
self._setTarget(targetId)
images = list(self.results.iterchildren('image'))
self.mgr.mgr.updateTargetImages(self.target, images)
return self.target
def _setTargetUserCredentials(self, job):
targetId = job.target_jobs.all()[0].target_id
self._setTarget(targetId)
descriptorData = self.loadDescriptorData(job)
creds = dict((k.getName(), k.getValue())
for k in descriptorData.getFields())
self.mgr.mgr.setTargetUserCredentials(self.target, creds)
return self.target
class TargetRefreshSystems(TargetRefreshImages):
__slots__ = []
jobType = models.EventType.TARGET_REFRESH_SYSTEMS
ResultsTag = 'instances'
def _getDescriptorMethod(self):
return self.mgr.mgr.getDescriptorRefreshSystems
def _buildAllUserCredentialsFromDb(self, cli, job):
credsList = self.mgr.mgr.getTargetAllUserCredentials(self.target)
ret = []
for credId, creds in credsList:
userCredentials = cli.targets.TargetUserCredentials(
credentials=creds,
rbUser=None,
rbUserId=None,
isAdmin=False,
opaqueCredentialsId=credId)
ret.append(userCredentials)
return ret
def _configureTargetMethod(self, cli, job):
targetConfiguration = self._buildTargetConfigurationFromDb(cli)
targetAllUserCredentials = self._buildAllUserCredentialsFromDb(cli, job)
zone = self.mgr.mgr.getTargetZone(self.target)
cli.targets.configure(zone.name, targetConfiguration,
None, targetAllUserCredentials)
def getRepeaterMethod(self, cli, job):
super(JobHandlerRegistry.TargetRefreshSystems, self).getRepeaterMethod(cli, job)
return cli.targets.listInstances
def _processJobResults(self, job):
targetId = job.target_jobs.all()[0].target_id
self._setTarget(targetId)
systems = list(self.results.iterchildren('instance'))
self.mgr.mgr.updateTargetSystems(self.target, systems)
return self.target
class TargetDeployImage(_TargetDescriptorJobHandler):
__slots__ = ['image', 'image_file', ]
jobType = models.EventType.TARGET_DEPLOY_IMAGE
ResultsTag = 'image'
def getDescriptor(self, descriptorId):
match = self.splitResourceId(descriptorId)
targetId = int(match.kwargs['target_id'])
fileId = int(match.kwargs['file_id'])
self._setTarget(targetId)
self._setImageFromFileId(fileId)
return descriptorId
def _setDescriptorId(self, descriptorId, descriptor):
pass
def _serializeDescriptor(self, descriptor):
descriptorXml = '<descriptor id="%s"/>' % descriptor
return descriptorXml
def _setImageFromFileId(self, fileId):
self.image_file = imagemodels.BuildFile.objects.get(file_id=fileId)
self.image = self.image_file.image
def _processDescriptor(self, descriptor, descriptorDataXml):
return descriptorDataXml
def _processJobResults(self, job):
# Nothing to be done, there is another call that posts the
# image
images = list(job.images.all())
if not images:
raise ImageDeletedError("Image was deleted during deployment")
self.image = images[0].image
return self.image
def handleError(self, job, exc):
if isinstance(exc, ImageDeletedError):
job.status_text = str(exc)
job.status_code = 404
return True
return False
def getRepeaterMethod(self, cli, job):
self.extractDescriptorData(job)
targetConfiguration = self._buildTargetConfigurationFromDb(cli)
targetUserCredentials = self._buildTargetCredentialsFromDb(cli, job)
zone = self.mgr.mgr.getTargetZone(self.target)
cli.targets.configure(zone.name, targetConfiguration,
targetUserCredentials)
return cli.targets.deployImage
def _getImageBaseFileName(self):
vals = self.image.image_data.filter(name='baseFileName').values('value')
if not vals:
return None
return vals[0]['value']
def getRepeaterMethodArgs(self, cli, job):
imageDownloadUrl = self.mgr.mgr.restDb.imageMgr.getDownloadUrl(self.image_file.file_id)
hostname = self.image.project.short_name
baseFileName = self._getImageBaseFileName()
troveFlavor = (self.image.trove_flavor or '').encode('ascii')
baseFileName = self.mgr.mgr.restDb.imageMgr._getBaseFileName(
baseFileName, hostname, self.image.trove_name,
self.image.trove_version, troveFlavor,
)
urls = self.image_file.urls_map.filter(
url__url_type=urltypes.LOCAL).values('url__url')
imageFileInfo = dict(
architecture=self.image.architecture,
size=self.image_file.size,
sha1=self.image_file.sha1,
fileId=self.image_file.file_id,
baseFileName=baseFileName,
)
if urls:
imageFileInfo['name'] = os.path.basename(urls[0]['url__url'])
targetImageIdList = [ x.target_image_id
for x in self.image_file.targetimagesdeployed_set.all() ]
params = dict(
descriptorData=job._descriptor_data,
imageFileInfo=imageFileInfo,
imageDownloadUrl=imageDownloadUrl,
targetImageXmlTemplate=self._targetImageXmlTemplate(),
imageFileUpdateUrl='http://localhost/api/v1/images/%s/build_files/%s' % (
self.image.image_id, self.image_file.file_id),
targetImageIdList=targetImageIdList,
imageData = self.mgr.mgr.imagesManager.getImageData(self.image),
)
return (params, ), {}
def getRelatedThroughModel(self, descriptor):
return imagemodels.JobImage
def getRelatedResource(self, descriptor):
imageId = self.extraArgs['imageId']
relatedResources = [ self.image ]
if imageId != str(self.image.image_id):
# We have a base image
relatedResources.append(
imagemodels.Image.objects.get(image_id=imageId))
return relatedResources
def _targetImageXmlTemplate(self):
tmpl = """\
<file>
<target_images>
<target_image>
<target id="/api/v1/targets/%(targetId)s"/>
%%(image)s
</target_image>
</target_images>
</file>"""
return tmpl % dict(targetId=self.target.target_id)
class TargetLaunchSystem(TargetDeployImage):
__slots__ = []
jobType = models.EventType.TARGET_LAUNCH_SYSTEM
ResultsTag = 'systems'
def getRepeaterMethod(self, cli, job):
JobHandlerRegistry.TargetDeployImage.getRepeaterMethod(self, cli, job)
return cli.targets.launchSystem
def getRepeaterMethodArgs(self, cli, job):
args, kwargs = JobHandlerRegistry.TargetDeployImage.getRepeaterMethodArgs(self, cli, job)
params = args[0]
# Use the original image id, which should be the non-base
# image
params.update(systemsCreateUrl =
"http://localhost/api/v1/jobs/%s/systems" % (job.job_uuid, ))
return args, kwargs
def _processJobResults(self, job):
# Nothing to be done, there is another call that posts the
# image
images = list(job.images.all())
if not images:
raise ImageDeletedError("Image was deleted during deployment")
self.image = images[0].image
systems = self.results.iterchildren('system')
results = []
for targetSystem in systems:
# System XML does not contain a target id, hence duplicate lookup
# we should fix this
targetName = modellib.Etree.findBasicChild(
targetSystem, 'targetName')
targetSystemId = modellib.Etree.findBasicChild(
targetSystem, 'target_system_id')
target = targetmodels.Target.objects.get(name=targetName)
realSystem = inventorymodels.System.objects.get(
target = target,
target_system_id = targetSystemId,
)
# The system may not have network info yet, so don't try
# to do anything clever here (Mingle #1785)
results.append(realSystem)
return results
def getRelatedResource(self, descriptor):
imageId = self.extraArgs['imageId']
if imageId != str(self.image.image_id):
# image ID in url corresponds to the deferred image
return [ imagemodels.Image.objects.get(image_id=imageId) ]
return [ self.image ]
class TargetCreator(DescriptorJobHandler):
__slots__ = [ 'targetType', ]
jobType = models.EventType.TARGET_CREATE
ResultsTag = 'target'
def _init(self):
DescriptorJobHandler._init(self)
self.targetType = None
def getDescriptor(self, descriptorId):
match = self.splitResourceId(descriptorId)
targetTypeId = int(match.kwargs['target_type_id'])
self._setTargetType(targetTypeId)
descr = self.mgr.mgr.getDescriptorCreateTargetByTargetType(targetTypeId)
return descr
def _setTargetType(self, targetTypeId):
self.targetType = modellib.Cache.get(targetmodels.TargetType,
pk=targetTypeId)
def _getTargetType(self, job):
if self.targetType is None:
targetTypeId = job.jobtargettype_set.all()[0].target_type_id
self._setTargetType(targetTypeId)
return self.targetType
def getRepeaterMethod(self, cli, job):
self.descriptor, self.descriptorData = self.extractDescriptorData(job)
targetType, targetName, targetData = self._createTargetConfiguration(job, self.targetType)
zone = targetData.pop('zone')
targetConfiguration = cli.targets.TargetConfiguration(targetType.name,
targetName, targetData.get('alias'), targetData)
userCredentials = None
cli.targets.configure(zone, targetConfiguration, userCredentials)
return cli.targets.checkCreate
def getRelatedResource(self, descriptor):
return self.targetType
def getRelatedThroughModel(self, descriptor):
return targetmodels.JobTargetType
def _processJobResults(self, job):
targetType = self._getTargetType(job)
targetType, targetName, targetData = self._createTargetConfiguration(job, targetType)
target = self._createTarget(targetType, targetName, targetData)
return target
def handleError(self, job, exc):
if isinstance(exc, (IntegrityError, errors.Conflict)):
job.job_state = self.mgr.getJobStateByName(models.JobState.FAILED)
job.status_text = "Duplicate Target"
job.status_code = 409
return True
return False
def _createTarget(self, targetType, targetName, config):
return self.mgr.mgr.createTarget(targetType, targetName, config)
class TargetConfigurator(_TargetDescriptorJobHandler):
__slots__ = []
jobType = models.EventType.TARGET_CONFIGURE
ResultsTag = 'target'
def _getDescriptorMethod(self):
return self.mgr.mgr.getDescriptorTargetConfiguration
def getRepeaterMethod(self, cli, job):
self.descriptor, self.descriptorData = self.extractDescriptorData(job)
targetType, targetName, targetData = self._createTargetConfiguration(job, self.target.target_type)
zone = targetData.pop('zone')
targetConfiguration = cli.targets.TargetConfiguration(targetType.name,
targetName, targetData.get('alias'), targetData)
userCredentials = None
cli.targets.configure(zone, targetConfiguration, userCredentials)
return cli.targets.checkCreate
def getRelatedResource(self, descriptor):
return self.target
def _processJobResults(self, job):
targetId = job.target_jobs.all()[0].target_id
self._setTarget(targetId)
targetType, targetName, targetData = self._createTargetConfiguration(job, self.target.target_type)
target = self._createTarget(targetType, targetName, targetData)
return target
def _createTarget(self, targetType, targetName, config):
# We don't allow for the type to change
return self.mgr.mgr.updateTargetConfiguration(self.target,
targetName, config)
def handleError(self, job, exc):
if isinstance(exc, (IntegrityError, errors.Conflict)):
job.job_state = self.mgr.getJobStateByName(models.JobState.FAILED)
job.status_text = "Duplicate Target"
job.status_code = 409
return True
return False
class TargetCredentialsConfigurator(_TargetDescriptorJobHandler):
__slots__ = []
jobType = models.EventType.TARGET_CONFIGURE_CREDENTIALS
ResultsTag = 'target'
def _getDescriptorMethod(self):
return self.mgr.mgr.getDescriptorConfigureCredentials
def getRepeaterMethod(self, cli, job):
self.descriptor, self.descriptorData = self.extractDescriptorData(job)
creds = dict((k.getName(), k.getValue())
for k in self.descriptorData.getFields())
targetConfiguration = self._buildTargetConfigurationFromDb(cli)
targetUserCredentials = self._buildTargetCredentials(cli, job, creds)
zone = self.mgr.mgr.getTargetZone(self.target)
cli.targets.configure(zone.name, targetConfiguration,
targetUserCredentials)
return cli.targets.checkCredentials
def _processJobResults(self, job):
return self._setTargetUserCredentials(job)
def _setTargetUserCredentials(self, job):
targetId = job.target_jobs.all()[0].target_id
self._setTarget(targetId)
descriptorData = self.loadDescriptorData(job)
creds = dict((k.getName(), k.getValue())
for k in descriptorData.getFields())
self.mgr.mgr.setTargetUserCredentials(self.target, creds)
return self.target
class ImageBuildCancellation(DescriptorJobHandler):
__slots__ = [ 'image', ]
jobType = models.EventType.IMAGE_CANCEL_BUILD
ResultsTag = 'image'
def createRmakeJob(self, job):
self.extractDescriptorData(job)
return job.job_uuid, None
def getDescriptor(self, descriptorId):
match = self.splitResourceId(descriptorId)
imageId = int(match.kwargs['image_id'])
if str(imageId) != str(self.extraArgs.get('imageId')):
raise errors.InvalidData(msg = "image id does not match")
self._setImage(imageId)
return self.mgr.mgr.imagesManager.getImageDescriptorCancelBuild(imageId)
def getRelatedResource(self, descriptor):
return self.image
def getRelatedThroughModel(self, descriptor):
return imagemodels.JobImage
def _setImage(self, imageId):
image = imagemodels.Image.objects.get(image_id=imageId)
self.image = image
def postCreateJob(self, job):
self.mgr.mgr.cancelImageBuild(self.image, job)
class TargetLaunchProfileHandler(_TargetDescriptorJobHandler):
jobType = models.EventType.TARGET_CREATE_LAUNCH_PROFILE
ResultsTag = 'launch_profile'
def createRmakeJob(self, job):
self.descriptor, self.descriptorData = self.extractDescriptorData(job)
return job.job_uuid, None
def _getDescriptorMethod(self):
return self.mgr.mgr.getDescriptorCreateLaunchProfile
def postCreateJob(self, job):
try:
self.mgr.mgr.createTargetLaunchProfile(self.target, job, self.descriptorData)
except (IntegrityError, errors.Conflict), e:
self.mgr.mgr.rollback()
self.mgr.updateJobState(job,
stateName=models.JobState.FAILED,
statusText=str(e),
statusCode=409)
raise errors.Conflict(msg=job.status_text)
self.mgr.finishJob(job)
def handleError(self, job, exc):
if isinstance(exc, (IntegrityError, errors.Conflict)):
job.job_state = self.mgr.getJobStateByName(models.JobState.FAILED)
job.status_text = "Duplicate Launch Profile"
job.status_code = 409
return True
return False
class ImageDeletedError(Exception):
pass
|
sassoftware/mint
|
mint/django_rest/rbuilder/jobs/manager.py
|
Python
|
apache-2.0
| 36,536
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Download and build the data if it does not exist.
from parlai.core.build_data import DownloadableFile
import parlai.tasks.dbll_babi.build as dbll_babi_build
import parlai.tasks.wikimovies.build as wikimovies_build
RESOURCES = [
DownloadableFile(
'http://parl.ai/downloads/dbll/dbll.tgz',
'dbll.tgz',
'd8c727dac498b652c7f5de6f72155dce711ff46c88401a303399d3fad4db1e68',
)
]
def build(opt):
# Depends upon another dataset, wikimovies, build that first.
wikimovies_build.build(opt)
dbll_babi_build.build(opt)
|
facebookresearch/ParlAI
|
parlai/tasks/dbll_movie/build.py
|
Python
|
mit
| 756
|
# -*- coding: utf-8 -*-
import unittest
from outwiker.actions.globalsearch import GlobalSearchAction
from test.basetestcases import BaseOutWikerGUIMixin
class GlobalSearchActionTest(unittest.TestCase, BaseOutWikerGUIMixin):
"""
Tests for GlobalSearchAction
"""
def setUp(self):
self.initApplication()
self.wikiroot = self.createWiki()
def tearDown(self):
self.destroyApplication()
self.destroyWiki(self.wikiroot)
def testNoneWiki(self):
self.application.wikiroot = None
self.application.actionController.getAction(GlobalSearchAction.stringId).run(None)
def testEmptyWiki(self):
self.application.wikiroot = self.wikiroot
self.assertEqual(len(self.application.wikiroot.children), 0)
self.application.actionController.getAction(GlobalSearchAction.stringId).run(None)
self.assertEqual(len(self.application.wikiroot.children), 1)
self.assertEqual(self.application.selectedPage, self.application.wikiroot.children[0])
def testReadOnly(self):
self.application.wikiroot = self.wikiroot
self.application.wikiroot.readonly = True
self.application.mainWindow.toaster.counter.clear()
self.application.actionController.getAction(GlobalSearchAction.stringId).run(None)
self.assertEqual(len(self.application.wikiroot.children), 0)
self.assertEqual(
self.application.mainWindow.toaster.counter.showErrorCount,
1)
def testExecSeveralTimes(self):
self.application.wikiroot = self.wikiroot
self.application.actionController.getAction(GlobalSearchAction.stringId).run(None)
self.application.actionController.getAction(GlobalSearchAction.stringId).run(None)
self.application.actionController.getAction(GlobalSearchAction.stringId).run(None)
self.assertEqual(len(self.application.wikiroot.children), 1)
|
unreal666/outwiker
|
src/test/actions/test_globalsearch.py
|
Python
|
gpl-3.0
| 1,932
|
# This file is part of mididump.
#
# mididump is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mididump is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mididump. If not, see <http://www.gnu.org/licenses/>.
class MIDIMessage:
LENGTH = 3
def __init__(self, data):
self._data = data
self._check()
def _check(self):
pass
def __str__(self):
raise NotImplementedError("Subclasses must implement")
class MIDINoteOffMessage(MIDIMessage):
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
assert self._data[2] >> 7 is 0, "Invalid data byte #2"
def __str__(self):
return "Note Off - channel %d - note number %d - note velocity %d" \
% (self._data[0] & 0xf + 1, self._data[1] & 0x7f, self._data[2] & 0x7f)
class MIDINoteOnMessage(MIDIMessage):
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
assert self._data[2] >> 7 is 0, "Invalid data byte #2"
def __str__(self):
return "Note On - channel %d - note number %d - note velocity %d" \
% (self._data[0] & 0xf + 1, self._data[1] & 0x7f, self._data[2] & 0x7f)
class MIDIPolyphonicAftertouchMessage(MIDIMessage):
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
assert self._data[2] >> 7 is 0, "Invalid data byte #2"
def __str__(self):
return "Polyphonic Aftertouch - channel %d - note number %d - pressure %d" \
% (self._data[0] & 0xf + 1, self._data[1] & 0x7f, self._data[2] & 0x7f)
class MIDIControlModeChangeMessage(MIDIMessage):
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
assert self._data[2] >> 7 is 0, "Invalid data byte #2"
def __str__(self):
return "Control/Mode Change - channel %d - control number %d - control value %d" \
% (self._data[0] & 0xf + 1, self._data[1] & 0x7f, self._data[2] & 0x7f)
class MIDIProgramChangeMessage(MIDIMessage):
LENGTH = 2
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
def __str__(self):
return "Program Change - channel %d - program number %d" \
% (self._data[0] & 0xf + 1, ord(self._data[1] & 0x7f))
class MIDIChannelAftertouchMessage(MIDIMessage):
LENGTH = 2
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
def __str__(self):
return "Channel Aftertouch - channel %d - pressure value %d" \
% (self._data[0] & 0xf + 1, ord(self._data[1] & 0x7f))
class MIDIPitchWheelControlMessage(MIDIMessage):
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
assert self._data[2] >> 7 is 0, "Invalid data byte #2"
def __str__(self):
return "Pitch Wheel Control - channel %d - LSB %d - MSB %d" \
% (self._data[0] & 0xf + 1, self._data[1] & 0x7f, self._data[2] & 0x7f)
class MIDISystemExclusiveMessage(MIDIMessage):
def __str__(self):
return "System Exclusive"
class MIDITimeCodeQuarterFrameMessage(MIDIMessage):
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
def __str__(self):
return "Time Code Quarter Frame - message type %d - values %d" \
% ((self._data[1] & 0x70) >> 4, self._data[1] & 0xf)
class MIDISongPositionPointerMessage(MIDIMessage):
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
assert self._data[2] >> 7 is 0, "Invalid data byte #2"
def __str__(self):
return "Song Position Pointer - LSB %d - MSB %d" \
% (self._data[1] & 0x7f, self._data[2] & 0x7f)
class MIDISongSelectMessage(MIDIMessage):
LENGTH = 2
def _check(self):
assert self._data[1] >> 7 is 0, "Invalid data byte #1"
def __str__(self):
return "Song Select - selected sequence/song %d" \
% (ord(self._data[1] & 0x7f))
class MIDIUndefinedMessage(MIDIMessage):
def __str__(self):
return "Undefined (Reserved)"
class MIDITuneRequestMessage(MIDIMessage):
LENGTH = 1
def __str__(self):
return "Tune Request"
class MIDIEndOfSystemExclusiveMessage(MIDIMessage):
LENGTH = 1
def __str__(self):
return "End Of System Exclusive"
class MIDITimingClockMessage(MIDIMessage):
LENGTH = 1
def __str__(self):
return "Timing Clock"
class MIDIStartMessage(MIDIMessage):
LENGTH = 1
def __str__(self):
return "Start"
class MIDIContinueMessage(MIDIMessage):
LENGTH = 1
def __str__(self):
return "Continue"
class MIDIStopMessage(MIDIMessage):
LENGTH = 1
def __str__(self):
return "Stop"
class MIDIActiveSensingMessage(MIDIMessage):
LENGTH = 1
def __str__(self):
return "Active Sensing"
class MIDISystemResetMessage(MIDIMessage):
LENGTH = 1
def __str__(self):
return "MIDISystemResetMessage"
_messages_per_status_byte = {
0b1000: MIDINoteOffMessage,
0b1001: MIDINoteOnMessage,
0b1010: MIDIPolyphonicAftertouchMessage,
0b1011: MIDIControlModeChangeMessage,
0b1100: MIDIProgramChangeMessage,
0b1101: MIDIChannelAftertouchMessage,
0b1110: MIDIPitchWheelControlMessage,
0b1111: {
0b0000: MIDISystemExclusiveMessage,
0b0001: MIDITimeCodeQuarterFrameMessage,
0b0010: MIDISongPositionPointerMessage,
0b0011: MIDISongSelectMessage,
0b0100: MIDIUndefinedMessage,
0b0101: MIDIUndefinedMessage,
0b0110: MIDITuneRequestMessage,
0b0111: MIDIEndOfSystemExclusiveMessage,
0b1000: MIDITimingClockMessage,
0b1001: MIDIUndefinedMessage,
0b1010: MIDIStartMessage,
0b1011: MIDIContinueMessage,
0b1100: MIDIStopMessage,
0b1101: MIDIUndefinedMessage,
0b1110: MIDIActiveSensingMessage,
0b1111: MIDISystemResetMessage,
},
}
class MessageDecoder:
@staticmethod
def get(buf):
status_byte = buf[0]
first_quartet, second_quartet = status_byte >> 4, status_byte & 0x15
assert first_quartet in _messages_per_status_byte.keys(), "Unknown message based on first quartet of the status byte (`%s` = `%s`)" \
% (bin(first_quartet), hex(first_quartet))
entry = _messages_per_status_byte[first_quartet]
if type(entry) is dict:
assert second_quartet in entry, "Unknown message based on second quartet of the status byte (`%s` = `%s`, first quartet `%s`)" \
% (bin(second_quartet), hex(first_quartet), bin(second_quartet), hex(second_quartet))
return entry[second_quartet](buf[:entry.LENGTH])
return entry(buf[:entry.LENGTH])
|
zdobersek/mididump
|
messages.py
|
Python
|
gpl-3.0
| 7,352
|
# Copyright 2021 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs the Spec JBB 2015 benchmark https://www.spec.org/jbb2015/.
User guide: https://www.spec.org/jbb2015/docs/userguide.pdf.
"""
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import openjdk_neoverse
BENCHMARK_NAME = 'specjbb2015'
BENCHMARK_CONFIG = """
specjbb2015:
description: Run specjbb2015
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_50_gb
"""
FLAGS = flags.FLAGS
_DEFAULT_OPEN_JDK_VERSION = '11'
_FOUR_HOURS = 60 * 60 * 4
# Customer's JVM args.
_DEFAULT_JVM_ARGS = ('-XX:+AlwaysPreTouch -XX:-UseAdaptiveSizePolicy '
'-XX:MaxTenuringThreshold=15 -XX:-UseBiasedLocking '
'-XX:SurvivorRatio=10 '
'-XX:TargetSurvivorRatio=90 -XX:TargetSurvivorRatio=90 '
'-XX:+UseParallelOldGC -XX:+PrintGCDetails ')
_DEFAULT_JVM_CONT_TXI_ARGS = ('-Xms2g -Xmx2g -Xmn1536m '
'-XX:+AlwaysPreTouch '
'-XX:ParallelGCThreads=2')
_DEFAULT_COMPOSITE_MEMORY_RATIO = 0.8
_DEFAULT_WORKERS_RATIO = 1
_DEFAULT_NUM_GROUPS = 4
_RAM_MB_PER_CORE = 1500
_SPEC_JBB_2015_ISO = 'SPECjbb2015-1_03.iso'
_SPEC_DIR = 'spec'
_LOG_FILE = '~/specjbb2015.log'
_JAR_FILE = 'specjbb2015.jar'
_PROPS_FILE = 'config/specjbb2015.props'
BENCHMARK_DATA = {
_SPEC_JBB_2015_ISO:
'524bc1588a579ddf35cfada5e07a408c78b5939e72ee5f02b05422d5c0d214bd'
}
BACKEND_MODE = 'backend'
MULTIJVM_MODE = 'MultiJVM'
COMPOSITE_MODE = 'COMPOSITE'
MULTICONTROLLER_MODE = 'multicontroller'
TXINJECTOR_MODE = 'txinjector'
NEW_MAX_RATIO = 0.94 # Taken from customer script
flags.DEFINE_float('specjbb_workers_ratio', _DEFAULT_WORKERS_RATIO,
'A number indicating number of workers per vCPU.')
flags.DEFINE_enum('specjbb_run_mode', MULTIJVM_MODE,
[MULTIJVM_MODE, COMPOSITE_MODE],
'String representing run mode. COMPOSITE or MultiJVM.')
flags.DEFINE_integer('specjbb_num_groups', _DEFAULT_NUM_GROUPS,
'Used in MultiJVM, number of groups.')
flags.DEFINE_bool('build_openjdk_neoverse', False,
'Whether to build OpenJDK optimized for ARM Neoverse.'
'Requires Ubuntu 1804 and OpenJDK 11.')
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _PrepareSpec(vm):
"""Prepares a SPEC client by copying SPEC to the VM."""
mount_dir = 'spec_mnt'
vm.RemoteCommand(f'mkdir -p {mount_dir} {_SPEC_DIR}')
vm.InstallPreprovisionedBenchmarkData(BENCHMARK_NAME, [_SPEC_JBB_2015_ISO],
'~/')
vm.RemoteCommand(
f'sudo mount -t iso9660 -o loop {_SPEC_JBB_2015_ISO} {mount_dir}')
vm.RemoteCommand(f'cp -r {mount_dir}/* {_SPEC_DIR}')
vm.RemoteCommand(f'sudo umount {mount_dir} && sudo rm -rf {mount_dir}')
def Prepare(benchmark_spec):
"""Install Specjbb2015 on the target vm.
Args:
benchmark_spec: The benchmark specification.
"""
vm = benchmark_spec.vms[0]
_PrepareSpec(vm)
if not FLAGS.openjdk_version:
FLAGS.openjdk_version = _DEFAULT_OPEN_JDK_VERSION
vm.Install('openjdk')
# Used on m6g (AWS Graviton 2) machines for optimal performance
if FLAGS.build_openjdk_neoverse:
openjdk_neoverse.InstallNeoverseCompiledOpenJDK(vm, FLAGS.openjdk_version)
vm.InstallPackages('numactl')
# swap only if necessary; free local node memory and avoid remote memory;
# reset caches; set stack size to unlimited
# Also consider setting enable_transparent_hugepages flag to true
cmd = ('echo 1 | sudo tee /proc/sys/vm/swappiness && '
'echo 1 | sudo tee /proc/sys/vm/zone_reclaim_mode && '
'sync ; echo 3 | sudo tee /proc/sys/vm/drop_caches && '
'ulimit -s unlimited')
vm.RemoteCommand(cmd)
def _MaxHeapMB(vm, mode):
"""Returns max heap size in MB as an int."""
if mode == BACKEND_MODE:
return int(
vm.NumCpusForBenchmark() // _DEFAULT_NUM_GROUPS) * _RAM_MB_PER_CORE
elif mode == COMPOSITE_MODE:
return int(vm.total_memory_kb * _DEFAULT_COMPOSITE_MEMORY_RATIO / 1024)
def _JVMArgs(vm, mode):
"""Determines JVM args and returns them as a string."""
if mode in (TXINJECTOR_MODE, MULTICONTROLLER_MODE):
return _DEFAULT_JVM_CONT_TXI_ARGS
gc_size = int(vm.NumCpusForBenchmark() / _DEFAULT_NUM_GROUPS)
jvm_backend_gc_arg = f'-XX:ParallelGCThreads={gc_size}'
# Determine max/new heap arguments. max per group = 3/8 * vCPU GB.
jvm_backend_mem_arg = '-Xms{max_}m -Xmx{max_}m -Xmn{new_}m '.format(
max_=_MaxHeapMB(vm, BACKEND_MODE),
new_=int(_MaxHeapMB(vm, BACKEND_MODE) * NEW_MAX_RATIO))
jvm_composite_mem_arg = '-Xms{max_}m -Xmx{max_}m -Xmn{new_}m '.format(
max_=_MaxHeapMB(vm, COMPOSITE_MODE),
new_=int(_MaxHeapMB(vm, COMPOSITE_MODE) * NEW_MAX_RATIO))
if mode == BACKEND_MODE:
return ' '.join(
[jvm_backend_gc_arg, jvm_backend_mem_arg, _DEFAULT_JVM_ARGS])
elif mode == COMPOSITE_MODE:
return ' '.join([jvm_composite_mem_arg, _DEFAULT_JVM_ARGS])
else:
raise errors.Benchmarks.RunError('Invalid specjbb mode!')
def _SpecArgs(vm, mode):
"""Determines Spec args and returns them as a string."""
num_workers = vm.NumCpusForBenchmark() * FLAGS.specjbb_workers_ratio
spec_num_workers_arg = f' -Dspecjbb.forkjoin.workers={int(num_workers)}'
spec_num_groups_arg = f' -Dspecjbb.group.count={FLAGS.specjbb_num_groups}'
spec_rt_curve_arg = '-Dspecjbb.controller.rtcurve.warmup.step=0.5'
spec_mr_arg = f'-Dspecjbb.mapreducer.pool.size={_DEFAULT_NUM_GROUPS * 2}'
if mode == TXINJECTOR_MODE:
return ''
elif mode == MULTICONTROLLER_MODE:
return ' '.join([
spec_rt_curve_arg, spec_mr_arg, spec_num_workers_arg,
spec_num_groups_arg
])
elif mode == BACKEND_MODE:
return ''
elif mode == COMPOSITE_MODE:
return spec_num_workers_arg
else:
raise errors.Benchmarks.RunError('Invalid specjbb mode!')
def _CollectSLAMetrics(vm):
"""Gathers SLA metrics from specjbb output files."""
# The log file reports the location of the report.html file. Since date/time
# are part of the report filename, we must determine it at runtime. The .raw
# file is easier to parse than the .html file, so parse that instead.
grep_stdout, _ = vm.RemoteCommand(
'grep -oE \'[^ ]+html\' ~/specjbb2015.log', ignore_failure=True)
file_prefix = grep_stdout.split('.')[0]
filename = f'spec/{file_prefix}.raw'
cmd = f'cat {filename} | grep SLA-'
sla_stdout, _ = vm.RemoteCommand(cmd, ignore_failure=True)
return sla_stdout
def ParseJbbOutput(stdout, metadata):
"""Generates samples from the RUN RESULT string."""
samples = []
regex = re.compile(r'RUN\sRESULT:.*?max\-jOPS\s=\s(?P<maxjops>\d+),\s+'
r'critical-jOPS\s=\s(?P<crjops>\d+)')
jops = regex.search(stdout)
if jops:
samples.append(
sample.Sample('max_jOPS', int(jops.group('maxjops')), 'jops', metadata))
samples.append(
sample.Sample('critical_jOPS', int(jops.group('crjops')), 'jops',
metadata))
else:
raise errors.Benchmarks.RunError('No specjbb results found!')
return samples
def _RunBackgroundNumaPinnedCommand(vm, cmd_list, node_id):
"""In a shell session, cd and run a numa pinned background command.
Args:
vm: VM to run the command on
cmd_list: list of commands to be joined together
node_id: NUMA node to pin command on.
"""
# Persist the nohup command past the ssh session, and numa pin.
# "sh -c 'cd /whereever; nohup ./whatever > /dev/null 2>&1 &'"
# "numa --cpunodebind 0 --membind 0 cmd"
cmd = ('sh -c \'cd {dir} && nohup numactl --cpunodebind {node_id} '
'--membind {node_id} {cmd} 2>&1 &\'').format(
node_id=node_id, dir=_SPEC_DIR, cmd=' '.join(cmd_list))
vm.RemoteCommand(cmd)
def Run(benchmark_spec):
"""Runs Specjbb2015 on the target vm.
Args:
benchmark_spec: The benchmark specification.
Returns:
A list of sample.Sample objects with the performance results.
Raises:
Benchmarks.RunError: If no results are found.
"""
vm = benchmark_spec.vms[0]
if FLAGS.specjbb_run_mode == MULTIJVM_MODE:
numactl_stdout, _ = vm.RemoteCommand('numactl -H | grep cpus | wc -l')
numa_zones = int(numactl_stdout)
# Run backends and txinjectors as background commands
# java -jar specjbb2015.jar -m txinjector -G GRP1 -J JVM1 > grp1jvm1.log
# java -jar specjbb2015.jar -m backend -G GRP1 -J JVM1 > grp1jvm2.log
for group in range(1, FLAGS.specjbb_num_groups + 1):
node_id = group % numa_zones
txinjector_cmd = [
'java',
_JVMArgs(vm,
TXINJECTOR_MODE), '-jar', _JAR_FILE, '-m', TXINJECTOR_MODE,
'-G', f'GRP{group}', '-J', 'JVM1', '>', f'grp{group}jvm1.log'
]
_RunBackgroundNumaPinnedCommand(vm, txinjector_cmd, node_id)
backend_cmd = [
'java',
_JVMArgs(vm, BACKEND_MODE), '-jar', _JAR_FILE, '-m', BACKEND_MODE,
'-G', f'GRP{group}', '-J', 'JVM2', '>', f'grp{group}jvm2.log'
]
_RunBackgroundNumaPinnedCommand(vm, backend_cmd, node_id)
# Run multicontroller as a foreground command
controller_cmd = [
'java',
_JVMArgs(vm, MULTICONTROLLER_MODE),
_SpecArgs(vm, MULTICONTROLLER_MODE), '-jar', _JAR_FILE, '-m',
MULTICONTROLLER_MODE, '-p', _PROPS_FILE
]
run_cmd = ('cd {dir} && {cmd} 2>&1 | tee {log_file}').format(
dir=_SPEC_DIR, cmd=' '.join(controller_cmd), log_file=_LOG_FILE)
stdout, _ = vm.RobustRemoteCommand(run_cmd)
max_heap_size_gb = _MaxHeapMB(vm, BACKEND_MODE) / 1000.0 # for metadata
else: # COMPOSITE mode
run_cmd = [
'java',
_JVMArgs(vm, COMPOSITE_MODE),
_SpecArgs(vm, COMPOSITE_MODE), '-jar', _JAR_FILE, '-m', COMPOSITE_MODE,
'-p', _PROPS_FILE
]
cmd = ('cd {dir} && {cmd} 2>&1 | tee {log_file}').format(
dir=_SPEC_DIR, cmd=' '.join(run_cmd), log_file=_LOG_FILE)
stdout, _ = vm.RemoteCommand(cmd, timeout=_FOUR_HOURS)
max_heap_size_gb = _MaxHeapMB(vm, COMPOSITE_MODE) / 1000.0 # for metadata
jdk_metadata = FLAGS.openjdk_version or _DEFAULT_OPEN_JDK_VERSION
if FLAGS.build_openjdk_neoverse:
jdk_metadata += '_neoverse_optimized'
metadata = {
'OpenJDK_version': jdk_metadata,
'iso_hash': BENCHMARK_DATA[_SPEC_JBB_2015_ISO],
'num_workers': vm.NumCpusForBenchmark() * FLAGS.specjbb_workers_ratio,
'num_groups': FLAGS.specjbb_num_groups,
'worker_ratio': FLAGS.specjbb_workers_ratio,
'max_heap_size': f'{max_heap_size_gb}g',
'specjbb_mode': FLAGS.specjbb_run_mode,
'sla_metrics': _CollectSLAMetrics(vm),
}
return ParseJbbOutput(stdout, metadata)
def Cleanup(benchmark_spec):
"""Cleanup Specjbb2015 on the target vm.
Args:
benchmark_spec: The benchmark specification.
"""
vm = benchmark_spec.vms[0]
vm.RemoteCommand(f'sudo umount {_SPEC_DIR}', ignore_failure=True)
vm.RemoteCommand(
f'rm -rf {_SPEC_DIR} {_SPEC_JBB_2015_ISO}', ignore_failure=True)
|
GoogleCloudPlatform/PerfKitBenchmarker
|
perfkitbenchmarker/linux_benchmarks/specjbb2015_benchmark.py
|
Python
|
apache-2.0
| 11,831
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from flask import g, has_request_context, jsonify, render_template, request, session
from markupsafe import Markup
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
def inject_js(js):
"""Injects JavaScript into the current page.
:param js: Code wrapped in a ``<script>`` tag.
"""
if 'injected_js' not in g:
g.injected_js = []
g.injected_js.append(Markup(js))
def _pop_injected_js():
js = None
if 'injected_js' in g:
js = g.injected_js
del g.injected_js
return js
def jsonify_form(form, fields=None, submit=None, back=None, back_url=None, back_button=True, disabled_until_change=True,
disabled_fields=(), form_header_kwargs=None, skip_labels=False, save_reminder=False,
footer_align_right=False, disable_if_locked=True):
"""Returns a json response containing a rendered WTForm.
This ia shortcut to the ``simple_form`` jinja macro to avoid
adding new templates that do nothing besides importing and
calling this macro.
:param form: A WTForms `Form` instance
:param fields: A list of fields to be displayed on the form
:param submit: The title of the submit button
:param back: The title of the back button
:param back_url: The URL the back button redirects to
:param back_button: Whether to show a back button
:param disabled_until_change: Whether to disable form submission
until a field is changed
:param disabled_fields: List of field names to disable
:param form_header_kwargs: Keyword arguments passed to the
``form_header`` macro
:param skip_labels: Whether to show labels on the fields
:param save_reminder: Whether to show a message when the form has
been modified and the save button is not
visible
:param footer_align_right: Whether the buttons in the event footer
should be aligned to the right.
:param disable_if_locked: Whether the form should be disabled when
the associated event is locked (based on
a CSS class in the DOM structure)
"""
if submit is None:
submit = _('Save')
if back is None:
back = _('Cancel')
if form_header_kwargs is None:
form_header_kwargs = {}
tpl = get_template_module('forms/_form.html')
html = tpl.simple_form(form, fields=fields, submit=submit, back=back, back_url=back_url, back_button=back_button,
disabled_until_change=disabled_until_change, disabled_fields=disabled_fields,
form_header_kwargs=form_header_kwargs, skip_labels=skip_labels, save_reminder=save_reminder,
footer_align_right=footer_align_right, disable_if_locked=disable_if_locked)
return jsonify(html=html, js=_pop_injected_js())
def jsonify_template(template, _render_func=render_template, _success=None, **context):
"""Returns a json response containing a rendered template"""
html = _render_func(template, **context)
jsonify_kw = {}
if _success is not None:
jsonify_kw['success'] = _success
return jsonify(html=html, js=_pop_injected_js(), **jsonify_kw)
def jsonify_data(flash=True, **json_data):
"""Returns a json response with some default fields.
This behaves similar to :func:`~flask.jsonify`, but includes
``success=True`` and flashed messages by default.
:param flash: if the json data should contain flashed messages
:param json_data: the data to include in the json response
"""
json_data.setdefault('success', True)
if flash:
json_data['flashed_messages'] = render_template('flashed_messages.html')
return jsonify(**json_data)
def _format_request_data(data, hide_passwords=False):
if not hasattr(data, 'iterlists'):
data = ((k, [v]) for k, v in data.iteritems())
else:
data = data.iterlists()
rv = {}
for key, values in data:
if hide_passwords and 'password' in key:
values = [v if not v else '<{} chars hidden>'.format(len(v)) for v in values]
rv[key] = values if len(values) != 1 else values[0]
return rv
def get_request_info(hide_passwords=True):
"""Gets various information about the current HTTP request.
This is especially useful for logging purposes where you want
as many information as possible.
:param hide_passwords: Hides the actual value of POST fields
if their name contains ``password``.
:return: a dictionary containing request information, or ``None``
when called outside a request context
"""
if not has_request_context():
return None
try:
user_info = {
'id': session.user.id,
'name': session.user.full_name,
'email': session.user.email
} if session.user else None
except Exception as exc:
user_info = 'ERROR: {}'.format(exc)
return {
'id': request.id,
'time': datetime.now().isoformat(),
'url': request.url,
'endpoint': request.url_rule.endpoint if request.url_rule else None,
'method': request.method,
'rh': g.rh.__class__.__name__ if 'rh' in g else None,
'user': user_info,
'ip': request.remote_addr,
'user_agent': unicode(request.user_agent),
'referrer': request.referrer,
'data': {
'url': _format_request_data(request.view_args) if request.view_args is not None else None,
'get': _format_request_data(request.args),
'post': _format_request_data(request.form, hide_passwords=hide_passwords),
'json': request.get_json(silent=True),
'headers': _format_request_data(request.headers, False),
}
}
def url_for_index(_external=False, _anchor=None):
from indico.web.flask.util import url_for
return url_for('categories.display', _external=_external, _anchor=_anchor)
|
eliasdesousa/indico
|
indico/web/util.py
|
Python
|
gpl-3.0
| 6,933
|
import random
import sys
import pygame
import string
import re
import xml.dom.minidom
from pygame.locals import *
from gamedata import *
from menu import Menu
class CreateCharacter:
"""Creates a new character for Gods & Monsters based on the rules
defined in the Rule Book beginning on page 6.
"""
def __init__(self):
self.display = Display()
self.gamedata = GameData()
self.chardata = CharacterData().chardata
def createcharacter(self, screen):
"""Initiates the creation of a new character."""
self.screen = screen
# Set new character's level to 1
self.chardata["Level"] = 1
self.sheet = DisplayCharacter()
self.generateabilites(screen)
self.assignabilities(screen)
self.selectspecies(screen)
self.setspeciesabilities()
self.selectgender(screen)
self.selectarchetype(screen)
self.selectmoralcode(screen)
self.setexperience()
self.setskillpoints()
self.setsurvival()
self.setweapons()
self.setinitialgold()
self.setsaves()
self.setsurprise()
self.setadvantage()
self.setdefense()
self.setattackbonus()
self.setphysicaltraits()
self.setmovement()
self.setmojo()
self.setname(screen)
# self.chooseskills(screen)
self.sheet.printcharactersheet(self.chardata, self.screen)
while True:
event = pygame.event.wait()
if event.type == KEYDOWN:
if event.key == K_q:
exit()
def generateabilites(self, screen):
"""Rolls six scores at 4d6, discarding the lowest die roll and
checks to see that at least one is a 9 or higher. If none are
at least 9, passes the scores on to give the player the option
of rolling six more or changing lowest to 18.
"""
scores = []
# Generate six ability scores
for i in range(6):
scores.append(self.rollability())
# Checks to ensure at least one is 9 or higher.
# Allows player to roll 6 more or assign 18 if not.
if max(scores) < 9:
scores = self.changeprime(scores, screen)
# Attached modifiers to ability scores for later reference.
# self.chardata[ability][-1] is set to original value to
# account for temporary increases or decreases (curses,
# magic, etc).
i = 0
for score in scores:
scores[i] = [score,
self.gamedata.ABIL_MODIFIERS[score][0],
self.gamedata.ABIL_MODIFIERS[score][1],
self.gamedata.ABIL_MODIFIERS[score][2],
score
]
i += 1
# Assigns scores (temporarily) to abilities
i = 0
for ability in self.gamedata.ABIL_NAMES:
self.chardata[ability] = scores[i]
i += 1
def rollability(self):
"""Rolls one score at 4d6, discarding lowest and passing it back
to calling function.
"""
roll = []
for i in range(4):
roll.append(random.randint(1, 6))
return sum(roll) - min(roll)
def changeprime(self, scores, screen):
"""If no abilities are at least 9, gives the player the option
to roll six more scores, taking the highest of the twelve, or to
raise the lowest of the six scores to 18. Then passes them back
to the calling function.
--Page 11--
"""
prompt = ["Your character's ability scores are",
"too low for an archetype selection.",
"You may roll six more and take the",
"highest of all twelve rolls, or",
"increase your lowest score to 18."
]
choices = ["Roll", "Increase"]
bg = pygame.image.load(self.display.BG_FULL).convert()
screen.blit(bg, (0, 0))
element = "ABILITY SCORES:"
value = str(scores[0]) + ", " + str(scores[1]) + ", " + \
str(scores[2]) + ", " + str(scores[3]) + ", " + \
str(scores[4]) + ", " + str(scores[5])
row = 14
col = 2
text = self.display.FONT.render(element, True,
self.display.WHITE)
screen.blit(text, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render(value, True,
self.display.BRIGHT_GREEN)
screen.blit(text, ((col + 16) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row += 3
for line in prompt:
text = self.display.FONT.render(line.upper(), True,
self.display.BRIGHT_GREEN)
screen.blit(text, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row += 1
row = 24
col = 0
for item in choices:
ch = self.display.FONT.render(item[0].upper(), True,
self.display.WHITE)
screen.blit(ch, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render(item[1:].upper(), True,
self.display.BRIGHT_GREEN)
screen.blit(text, ((col + 1) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
col += len(item) + 1
pygame.display.update()
while True:
event = pygame.event.wait()
if event.type == KEYDOWN:
if event.key == K_r:
for i in range(6):
scores.append(self.rollability())
for i in range(6):
scores.remove(min(scores))
return scores
elif event.key == K_i:
lowest = scores.index(min(scores))
scores[lowest] = 18
return scores
def assignabilities(self, screen):
"""Initiates assignment of ability scores and swaps
scores a player request.
"""
while True:
prompta = ["You may customize your character's",
"abilities.",
"",
"Select the first ability to swap, or",
"'f' to finish."
]
promptb = ["You may customize your character's",
"abilities.",
"",
"Select the second ability to swap,",
"or 'f' to finish."
]
while True:
bg = pygame.image.load(self.display.BG_FULL).convert()
screen.blit(bg, (0, 0))
self.sheet.selectabilities(self.chardata, self.screen)
row = 24
col = 0
ch = self.display.FONT.render("F", True,
self.display.WHITE)
screen.blit(ch, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render("INISH", True,
self.display.BRIGHT_GREEN)
screen.blit(text, ((col + 1) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row = 17
col = 2
for line in prompta:
text = self.display.FONT.render(line.upper(), True,
self.display.BRIGHT_GREEN)
screen.blit(text, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row += 1
pygame.display.update()
event = pygame.event.wait()
if event.type == KEYDOWN:
if event.key == K_a:
a = "Agility"
break
elif event.key == K_c:
a = "Charisma"
break
elif event.key == K_e:
a = "Endurance"
break
elif event.key == K_i:
a = "Intelligence"
break
elif event.key == K_w:
a = "Wisdom"
break
elif event.key == K_s:
a = "Strength"
break
elif event.key == K_f:
return
while True:
bg = pygame.image.load(self.display.BG_FULL).convert()
screen.blit(bg, (0, 0))
self.sheet.selectabilities(self.chardata, self.screen, a)
row = 24
col = 0
ch = self.display.FONT.render("F", True,
self.display.WHITE)
screen.blit(ch, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render("INISH", True,
self.display.BRIGHT_GREEN)
screen.blit(text, ((col + 1) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row = 17
col = 2
for line in promptb:
text = self.display.FONT.render(line.upper(), True,
self.display.BRIGHT_GREEN)
screen.blit(text, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row += 1
pygame.display.update()
event = pygame.event.wait()
if event.type == KEYDOWN:
if event.key == K_a:
b = "Agility"
break
elif event.key == K_c:
b = "Charisma"
break
elif event.key == K_e:
b = "Endurance"
break
elif event.key == K_i:
b = "Intelligence"
break
elif event.key == K_w:
b = "Wisdom"
break
elif event.key == K_s:
b = "Strength"
break
elif event.key == K_f:
return
self.chardata[a], self.chardata[b] = \
self.chardata[b], self.chardata[a]
def selectspecies(self, screen):
"""Allows the player to select a species for their character.
This deviates somewhat from the rule set, as this would be
selected as a 'specialty'. As it is here, this will 'cost' the
player their specialty, thus only humans will get to select a
specialty to begin with.
"""
bg = pygame.image.load(self.display.BG_FULL).convert()
screen.blit(bg, (0, 0))
menu = Menu()
self.chardata["Species"] = menu.singlelist(self.gamedata.SPECIES_NAMES,
2, 2, screen)
def setspeciesabilities(self):
"""Modifies ability scores based on selected species. Must be
done prior to archetype selection in order for proper filtering
to occur.
"""
species = self.chardata["Species"]
for i in range(len(self.gamedata.ABIL_NAMES)):
ability = self.gamedata.ABIL_NAMES[i]
score = self.chardata[ability][0]
modifier = self.gamedata.SPECIES[species][0][i]
self.chardata[ability][0] = self.chardata[ability][-1] = \
score + modifier
def selectgender(self, screen):
"""Allows the player to select a gender for their character."""
bg = pygame.image.load(self.display.BG_FULL).convert()
screen.blit(bg, (0, 0))
menu = Menu()
gender = ["Female", "Male"]
self.chardata["Gender"] = menu.singlelist(gender, 2, 2,screen)
def selectarchetype(self, screen):
"""Checks ability scores and allows player to select from
available archetype based on primary ability.
--Page 14--
"""
bg = pygame.image.load(self.display.BG_FULL).convert()
screen.blit(bg, (0, 0))
choices = []
# Checks character ability scores against archetype prime
# ability and appends to the available list if prime is 9 or
# greater for that archetype
for archetype in self.gamedata.ARCH:
prime = self.gamedata.ARCH_ATTRIBUTES[archetype][0]
if self.chardata[prime][0] > 8:
choices.append(archetype)
menu = Menu()
self.chardata["Archetype"] = menu.singlelist(choices, 2, 2, screen)
if self.chardata["Archetype"] == "Thief":
self.chardata["Thief Skill Points"] = 12
def selectmoralcode(self, screen):
"""Checks ability scores and allows player to select from
available archetype based on primary ability.
--Page 14--
"""
bg = pygame.image.load(self.display.BG_FULL).convert()
screen.blit(bg, (0, 0))
menu = Menu()
self.chardata["Moral Code"] = menu.singlelist(self.gamedata.MORAL_CODES,
2, 2, screen)
def setexperience(self):
"""If archetypal ability is 16 or greater, assigns 200 starting
experience points. Assigns 0 if not.
--Page 14--
"""
prime = self.gamedata.ARCH_ATTRIBUTES[self.chardata["Archetype"]][0]
if self.chardata[prime][0] > 15:
self.chardata["Experience"] = 200
else:
self.chardata["Experience"] = 0
def setsurvival(self):
"""Checks archetype for base survival points and then adds that
to Endurance Major Modifier.
--Page 14 & 35--
"""
survival = self.gamedata.ARCH_ATTRIBUTES[self.chardata["Archetype"]][2]
modifier = self.chardata["Endurance"][1]
self.chardata["Survival"] = survival + modifier
def setweapons(self):
"""Checks archetype for initial weapons and type, adding Charisma
Minor modifier and assigns.
--Page 14 & 33--
"""
weapons = self.gamedata.ARCH_ATTRIBUTES[self.chardata["Archetype"]][5]
modifier = self.chardata["Charisma"][2]
self.chardata["Weapon Slots"] = weapons + modifier
self.chardata["Weapon Type"] = \
self.gamedata.ARCH_ATTRIBUTES[self.chardata["Archetype"]][6]
def setskillpoints(self):
"""Checks archetype for initial skills and adds Intelligence
Major, Wisdom Minor and Charisma Minor modifiers. Assigns the
total to available skill points.
--Page 14 & 33--
"""
skills = self.gamedata.ARCH_ATTRIBUTES[self.chardata["Archetype"]][4]
modifier = self.chardata["Intelligence"][1] + \
self.chardata["Wisdom"][2] + \
self.chardata["Charisma"][2]
self.chardata["Skill Points"] = skills + modifier
def setinitialgold(self):
"""Checks archetype for number of dice to roll and bonus (+10
for Monks). Multiplies dice by 10 and adds bonus plus
Intelligence, Wisdom and Charisma Major modifiers.
--Page 14 (also archetype description)--
"""
archetype = self.chardata["Archetype"]
dice = self.gamedata.GOLD_START[archetype][0]
bonus = self.gamedata.GOLD_START[archetype][1]
modifier = self.chardata["Intelligence"][1] + \
self.chardata["Wisdom"][1] + \
self.chardata["Charisma"][1]
roll = []
for i in range(dice):
roll.append(random.randint(1, 6))
gold = (sum(roll) * 10) + bonus + modifier
self.chardata["Gold"] = gold
def setsaves(self):
"""Assigns Saving Roll values using 4 as a base and adding Major
modifier, Minor modifier, Archetype bonus and Species modifiers
where appropriate.
--Page 35--
"""
for save in self.gamedata.SAVES:
base = 4
majorattribute = self.gamedata.SAVES_ATTRIBUTES[save][0]
major = self.chardata[majorattribute][1]
minorattribute = self.gamedata.SAVES_ATTRIBUTES[save][1]
minor = self.chardata[minorattribute][2]
archetype = self.gamedata.SAVES_ATTRIBUTES[save][2]
if archetype == self.chardata["Archetype"]:
bonus = 1
else:
bonus = 0
species = self.chardata["Species"]
index = self.gamedata.SAVES.index(save)
specmod = self.gamedata.SPECIES[species][6][index]
roll = base + major + minor + bonus + specmod
self.chardata[save] = roll
def setsurprise(self):
"""Assigns Surprise using Perception plus Agility Minor
modifier.
--Page 36--
"""
perception = self.chardata["Perception"]
minor = self.chardata["Agility"][2]
self.chardata["Surprise"] = perception + minor
def setadvantage(self):
"""Assigns Advantage as a sum of Agility Major and Charisma
Minor modifiers.
--Page 36--
"""
major = self.chardata["Agility"][1]
minor = self.chardata["Charisma"][2]
self.chardata["Advantage"] = major + minor
def setdefense(self):
"""Assigns Defense as Agility Major modifier.
--Page 36--
"""
self.chardata["Defense"] = self.chardata["Agility"][1]
def setattackbonus(self):
"""Close Combat Bonus (Hand Atk) assigned as Strength Minor;
damage bonus is Strength Major. Thrown Attack (Thrown Atk) is
Agility Minor; damage bonus is Strength Minor; range penalty is
reduced by Strength Minor. Propelled Attack (Prop Atk) is
Agility Minor, with no damage bonus.
--Page 36--
"""
str_major = self.chardata["Strength"][1]
str_minor = self.chardata["Strength"][2]
agi_minor = self.chardata["Agility"][2]
# Close Combat Attack: {"Hand Atk": [Attack, Damage]}
self.chardata["Hand Atk"] = [str_minor, str_major]
# Thrown Attack: {"Thrown Atk": [Attack, Damage, Range]}
self.chardata["Thrown Atk"] = [agi_minor, str_minor, str_minor]
# Propelled Attack: {"Prop Atk": Attack}
self.chardata["Prop Atk"] = agi_minor
def setphysicaltraits(self):
"""Assigns physical attributes:
Age = 15 * species modifier plus 1d6 rolled for mod value
Height = species base + species dice + Str Maj + End Min
Weight = species base + ((5d6 + Str Maj + End Min) *
species modifier)
If Age >= 20 then bonus skill points are applied per
SKILLAGEBONUS.
--Page 36--
"""
# Age
species = self.chardata["Species"]
specmod = self.gamedata.SPECIES[species][3]
if species == "Half-Orc":
age = int(round(15 + random.randint(1, 6)) * specmod)
self.chardata["Age"] = age
else:
base = 15 * specmod
dice = specmod
rolls = 0
for i in range(dice):
rolls += random.randint(1, 6)
age = base + rolls
self.chardata["Age"] = age
bonus = 8
for skillage in self.gamedata.SKILLAGEBONUS:
if age < skillage:
bonus -= 1
else:
break
self.chardata["Skill Points"] += bonus
# Height
base = self.gamedata.SPECIES[species][2][0]
dice = self.gamedata.SPECIES[species][2][2]
rolls = 0
for i in range(dice):
rolls += random.randint(1, 6)
height = base + rolls + self.chardata["Strength"][1] + \
self.chardata["Endurance"][2]
self.chardata["Height"] = height
# Weight
base = self.gamedata.SPECIES[species][2][1]
dice = 5
specmod = self.gamedata.SPECIES[species][2][3]
rolls = 0
for i in range(dice):
rolls += random.randint(1, 6)
weight = base + ((rolls + self.chardata["Endurance"][1] + \
self.chardata["Strength"][2]) * specmod)
self.chardata["Weight"] = weight
def setmovement(self):
"""Assigns movement rate based on species base move modified by
Agility Major and Strength Minor. Also assigns Lift and Carry
according to the character's Strength and multiplying their
weight against the Lift and Carry values in the table on page
37.
--Page 37--
"""
# Movement rate
species = self.chardata["Species"]
base = self.gamedata.SPECIES[species][4]
str_major = self.chardata["Strength"][1]
agi_minor = self.chardata["Agility"][2]
self.chardata["Movement"] = base + str_major + agi_minor
# Lift and Carry
weight = self.chardata["Weight"]
strength = self.chardata["Strength"][0]
end_major = self.chardata["Endurance"][1]
self.chardata["Lift"] = \
int(round(weight * self.gamedata.LIFTANDCARRY[strength][0]))
self.chardata["Carry"] = \
int(round(weight * self.gamedata.LIFTANDCARRY[strength][1] + \
(end_major * 10)))
def setmojo(self):
"""Assigns mojo as 10 + Level."""
level = self.chardata["Level"]
mojo = 10 + level
self.chardata["Mojo"] = mojo
def setname(self, screen):
namegen = NameGenerator()
name = namegen.generatename(self.chardata)
prompt = "NAME:"
while True:
bg = pygame.image.load(self.display.BG_FULL).convert()
screen.blit(bg, (0, 0))
row = 2
col = 2
text = self.display.FONT.render(prompt, True, self.display.WHITE)
self.screen.blit(text, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
col = col + len(prompt) + 1
text = self.display.FONT.render(name.upper(), True,
self.display.BRIGHT_GREEN)
self.screen.blit(text, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row = 24
col = 0
ch = self.display.FONT.render("K", True,
self.display.WHITE)
screen.blit(ch, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render("EEP", True,
self.display.BRIGHT_GREEN)
screen.blit(text, ((col + 1) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
col = 5
ch = self.display.FONT.render("N", True,
self.display.WHITE)
screen.blit(ch, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render("EW", True,
self.display.BRIGHT_GREEN)
screen.blit(text, ((col + 1) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
col = 9
ch = self.display.FONT.render("C", True,
self.display.WHITE)
screen.blit(ch, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render("USTOM", True,
self.display.BRIGHT_GREEN)
screen.blit(text, ((col + 1) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
pygame.display.update()
event = pygame.event.wait()
if event.type == KEYDOWN:
if event.key == K_k:
self.chardata["Name"] = name
break
elif event.key == K_n:
name = namegen.generatename(self.chardata)
elif event.key == K_c:
prompt = "CHARACTER NAME:"
nameinput = Menu()
name = nameinput.textinput(prompt, self.screen)
self.chardata["Name"] = name
break
def chooseskills(self, screen):
"""Allows player to choose skills for the character.
--Page 35--
"""
selectskills = Skills()
skills, bonus, points = selectskills(self.chardata, screen)
self.chardata["Skills"] = skills
self.chardata["Bonus Skill"] = bonus
self.chardata["Skill Points"] = points
self.sheet.printcharactersheet(self.chardata, self.screen)
class NameGenerator:
"""Generates a name based on the character's species and gender."""
def __init__(self):
self.NONTERMINAL = re.compile(r"<(\w+)>")
self.ELF = {"name": ["<start><middle><end>"],
"start": ["An", "Bel", "Cel", "El", "Elr", "Elv", "Eow",
"Ear", "F", "G", "Gal", "Gl", "Is", "Leg", "Lom",
"N", "S", "T", "Thr", "Tin"],
"middle": ["a", "adrie", "ara", "e", "ebri", "i", "io",
"ithra", "ilma", "il-Ga", "o", "orfi", "u", "y"],
"end": ["l", "", "las", "lad", "ldor", "ldur", "linde",
"lith", "mir", "n", "nd", "ndel", "ndil", "ndir",
"nduil", "ng", "mbor", "r", "rith", "ril", "riand",
"rion", "thien", "viel", "wen", "wyn"]
}
self.HALFORC = {"name": ["<start><middle><end>"],
"start": ["B", "C", "D", "Er", "F", "G", "Gr", "H", "K",
"L", "M", "N", "P", "Pr", "R", "S", "T", "V",
"Vr"],
"middle": ["a", "i", "o", "u"],
"end": ["dak", "dash", "dish", "dush", "gak", "gar",
"gor", "gdush", "hai", "l", "lo", "lok",
"gdish", "k", "kar", "kor", "lg", "mak", "nak",
"nai", "ng", "nk", "rag", "rbag", "rg", "rk",
"rt", "ruk", "shnak"]
}
self.GOBLIN = {"name": ["<start><end>"],
"start": ["Big", "Bo", "Dof", "Gim", "Gof", "It", "Kim",
"Leb", "Lib", "Luk", "Mor", "Nif", "Nog",
"Nuf", "Rat", "Rub", "Shek", "Shim", "Skar",
"Tid", "Tip", "Tob", "Top", "Zib", "Zig"],
"end": ["bez", "bit", "ess", "fen", "gash", "gin", "git",
"glum", "ink", "itz", "iz", "let", "lid", "lik",
"lob", "mink", "rak", "rut", "sham", "snik",
"sub", "sus", "wig", "zag", "zib"]
}
self.DWARF = {"name": ["<start><middle><end>"],
"start": ["B", "D", "F", "G", "Gl", "H", "K", "L", "M",
"N", "R", "S", "T", "V"],
"middle": ["a", "e", "i", "o", "oi", "u"],
"end": ["bur", "fur", "gan", "gnus", "gnar", "li", "lin",
"lir", "mli", "nar", "nus", "rin", "ran", "sin",
"sil", "sur"]
}
self.GNOME = {"name": ["<start><middle><end>"],
"start": ["Aeth", "Addr", "Bl", "C", "Car", "D", "G",
"Gl", "Gw", "L", "M", "Ow", "R", "Rh", "S", "T",
"V", "Yr"],
"middle": ["a", "ae", "e", "eo", "i", "o", "u", "y"],
"end": ["bryn", "c", "cyn", "dd", "ddry", "ddyn", "doc",
"dry", "gwyn", "llyn", "myr", "n", "nnyn", "nry",
"nvan", "nyc", "r", "rcyn", "rraent", "ran",
"ryn"]
}
self.HUMAN_M2 = {"name": ["<start><end>"],
"start": ["A", "Ab", "Ac", "Ad", "Af", "Agr", "Ast",
"As", "Al", "Adw", "Adr", "Ar", "B", "Br",
"C", "C", "C", "Cr", "Ch", "Cad", "D", "Dr",
"Dw", "Ed", "Eth", "Et", "Er", "El", "Eow",
"F", "Fr", "G", "Gr", "Gw", "Gw", "Gal",
"Gl", "H", "Ha", "Ib", "Jer", "K", "Ka",
"Ked", "L", "Loth", "Lar", "Leg", "M", "Mir",
"N", "Nyd", "Ol", "Oc", "On", "P", "Pr", "R",
"Rh", "S", "Sev", "T", "Tr", "Th", "Th", "V",
"Y", "Yb", "Z", "W", "W", "Wic"],
"end": ["a", "ae", "ae", "au", "ao", "are", "ale",
"ali", "ay", "ardo", "e", "ei", "ea", "ea",
"eri", "era", "ela", "eli", "enda", "erra",
"i", "ia", "ie", "ire", "ira", "ila", "ili",
"ira", "igo", "o", "oa", "oi", "oe", "ore",
"u", "y"]
}
self.HUMAN_M3 = {"name": ["<start><middle><end>"],
"start": ["A", "Ab", "Ac", "Ad", "Af", "Agr", "Ast",
"As", "Al", "Adw", "Adr", "Ar", "B", "Br",
"C", "C", "C", "Cr", "Ch", "Cad", "D", "Dr",
"Dw", "Ed", "Eth", "Et", "Er", "El", "Eow",
"F", "Fr", "G", "Gr", "Gw", "Gw", "Gal",
"Gl", "H", "Ha", "Ib", "Jer", "K", "Ka",
"Ked", "L", "Loth", "Lar", "Leg", "M", "Mir",
"N", "Nyd", "Ol", "Oc", "On", "P", "Pr", "R",
"Rh", "S", "Sev", "T", "Tr", "Th", "Th", "V",
"Y", "Yb", "Z", "W", "W", "Wic"],
"middle": ["a", "ae", "ae", "au", "ao", "are", "ale",
"ali", "ay", "ardo", "e", "ei", "ea", "ea",
"eri", "era", "ela", "eli", "enda", "erra",
"i", "ia", "ie", "ire", "ira", "ila", "ili",
"ira", "igo", "o", "oa", "oi", "oe", "ore",
"u", "y"],
"end": ["a", "and", "b", "bwyn", "baen", "bard", "c",
"ctred", "cred", "ch", "can", "d", "dan",
"don", "der", "dric", "dfrid", "dus", "f", "g",
"gord", "gan", "l", "li", "lgrin", "lin",
"lith", "lath", "loth", "ld", "ldric", "ldan",
"m", "mas", "mos", "mar", "mond", "n", "nydd",
"nidd", "nnon", "nwan", "nyth", "nad", "nn",
"nnor", "nd", "p", "r", "ron", "rd", "s", "sh",
"seth", "sean", "t", "th", "th", "tha", "tlan",
"trem", "tram", "v", "vudd", "w", "wan", "win",
"win", "wyn", "wyn", "wyr", "wyr", "wyth"]
}
self.HUMAN_F2 = {"name": ["<start><end>"],
"start": ["A", "Ab", "Ac", "Ad", "Af", "Agr", "Ast",
"As", "Al", "Adw", "Adr", "Ar", "B", "Br",
"C", "C", "C", "Cr", "Ch", "Cad", "D", "Dr",
"Dw", "Ed", "Eth", "Et", "Er", "El", "Eow",
"F", "Fr", "G", "Gr", "Gw", "Gw", "Gal",
"Gl", "H", "Ha", "Ib", "Jer", "K", "Ka",
"Ked", "L", "Loth", "Lar", "Leg", "M", "Mir",
"N", "Nyd", "Ol", "Oc", "On", "P", "Pr", "Q",
"R", "Rh", "S", "Sev", "T", "Tr", "Th", "Th",
"Ul", "Um", "Un", "V", "Y", "Yb", "Z", "W",
"W", "Wic"],
"end": ["a", "a", "a", "ae", "ae", "au", "ao", "are",
"ale", "ali", "ay", "ardo", "e", "e", "e",
"ei", "ea", "ea", "eri", "era", "ela", "eli",
"enda", "erra", "i", "i", "i", "ia", "ie",
"ire", "ira", "ila", "ili", "ira", "igo", "o",
"oa", "oi", "oe", "ore", "u", "y"]
}
self.HUMAN_F3 = {"name": ["<start><middle><end>"],
"start": ["A", "Ab", "Ac", "Ad", "Af", "Agr", "Ast",
"As", "Al", "Adw", "Adr", "Ar", "B", "Br",
"C", "C", "C", "Cr", "Ch", "Cad", "D", "Dr",
"Dw", "Ed", "Eth", "Et", "Er", "El", "Eow",
"F", "Fr", "G", "Gr", "Gw", "Gw", "Gal",
"Gl", "H", "Ha", "Ib", "Jer", "K", "Ka",
"Ked", "L", "Loth", "Lar", "Leg", "M", "Mir",
"N", "Nyd", "Ol", "Oc", "On", "P", "Pr", "Q",
"R", "Rh", "S", "Sev", "T", "Tr", "Th", "Th",
"Ul", "Um", "Un", "V", "Y", "Yb", "Z", "W",
"W", "Wic"],
"middle": ["a", "a", "a", "ae", "ae", "au", "ao",
"are", "ale", "ali", "ay", "ardo", "e", "e",
"e", "ei", "ea", "ea", "eri", "era", "ela",
"eli", "enda", "erra", "i", "i", "i", "ia",
"ie", "ire", "ira", "ila", "ili", "ira",
"igo", "o", "oa", "oi", "oe", "ore", "u",
"y"],
"end": ["beth", "cia", "cien", "clya", "de", "dia",
"dda", "dien", "dith", "dia", "lind", "lith",
"lia", "lian", "lla", "llan", "lle", "ma",
"mma", "mwen", "meth", "n", "n", "n", "nna",
"ndra", "ng", "ni", "nia", "niel", "rith",
"rien", "ria", "ri", "rwen", "sa", "sien",
"ssa", "ssi", "swen", "thien", "thiel", "viel",
"via", "ven", "veth", "wen", "wen", "wen",
"wen", "wia", "weth", "wien", "wiel"]
}
self.HALFLING_M = {"name": ["<start><middle><end>"],
"start": ["B", "Dr", "Fr", "Mer", "Per", "R", "S"],
"middle": ["a", "e", "i", "ia", "o", "oi", "u"],
"end": ["bo", "do", "doc", "go", "grin", "m", "ppi",
"rry"]
}
self.HALFLING_F = {"name": ["<start><middle><end>"],
"start": ["Al", "Br", "C", "Cl", "D", "El", "Gw",
"J", "L", "M", "N", "Mer", "S", "R", "Ys"],
"middle": ["a", "ae", "e", "ea", "i", "o", "u", "y",
"w"],
"end": ["brylla", "cla", "dda", "ll", "lla", "llyra",
"lonna", "lyan", "na", "ngwen", "niver",
"noic", "ra", "rka", "ryan", "ssa", "vyan"]
}
def generatename(self, chardata):
species = chardata["Species"]
gender = chardata["Gender"]
namegrammar = self.definegrammar(species, gender)
namestr = random.choice(namegrammar["name"])
matchnonterminal = self.NONTERMINAL.search(namestr)
while matchnonterminal:
substr = random.choice(namegrammar[matchnonterminal.group(1)])
namestr = self.NONTERMINAL.sub(substr, namestr, 1)
matchnonterminal = self.NONTERMINAL.search(namestr)
return namestr
def definegrammar(self, species, gender):
if species == "Dwarf":
return self.DWARF
elif species == "Elf":
return self.ELF
elif species == "Gnome":
return self.GNOME
elif species == "Goblin":
return self.GOBLIN
elif species == "Halfling" and gender == "Female":
return self.HALFLING_F
elif species == "Halfling" and gender == "Male":
return self.HALFLING_M
elif species == "Half-Elf":
roll = random.randint(1, 100)
if roll < 50 and gender == "Female":
return self.namehumanfemale()
elif roll < 50 and gender == "Male":
return self.namehumanmale()
else:
return self.ELF
elif species == "Half-Orc":
return self.HALFORC
elif species == "Human" and gender == "Female":
return self.namehumanfemale()
elif species == "Human" and gender == "Male":
return self.namehumanmale()
def namehumanfemale(self):
roll = random.randint(1, 100)
if roll < 50:
namegrammar = self.HUMAN_F2
else:
namegrammar = self.HUMAN_F3
return namegrammar
def namehumanmale(self):
roll = random.randint(1, 100)
if roll < 50:
namegrammar = self.HUMAN_M2
else:
namegrammar = self.HUMAN_M3
return namegrammar
class Skills:
"""
This section to be removed
"""
def __init__(self):
self.gamedata = GameData()
self.display = Display()
self.menu = Menu()
self.row = 2
self.col = 2
self.bonusset = 0
def chooseskills(self, chardata, screen):
self.archetype = chardata["Archetype"]
self.species = chardata["Species"]
self.points = chardata["Skill Points"]
self.initialskills = chardata["Skills"]
self.bonus = chardata["Bonus"]
if self.bonus != "":
self.bonus = 1
self.setarchetypeskills()
def setarchetypeskills(self):
"""Builds """
class DisplayCharacter:
"""Displays the various character sheet screens"""
def __init__(self):
self.display = Display()
self.gamedata = GameData()
def printcharactersheet(self, chardata, screen):
bg = pygame.image.load(self.display.BG_CHAR).convert()
screen.blit(bg, (0, 0))
# Personal data
row = 2
col = 2
elements = [chardata["Name"],
chardata["Gender"] + " " + chardata["Species"] + " AGE " + \
str(chardata["Age"]),
chardata["Moral Code"],
"LEVEL " + str(chardata["Level"]) + " " + \
chardata["Archetype"],
"EXP " + str(chardata["Experience"])
]
for element in elements:
element = string.upper(element)
text = self.display.FONT.render(element, True,
self.display.WHITE)
screen.blit(text, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row += 1
# Ability scores
elements = []
values = []
for ability in self.gamedata.ABIL_NAMES:
elements.append(ability)
values.append(chardata[ability][-1])
self.printscores(elements, values, (8, 2), 13, 2, screen)
# Saving throws
elements = []
values = []
for save in self.gamedata.SAVES:
elements.append(save)
values.append(chardata[save])
self.printscores(elements, values, (15, 2), 13, 2, screen)
# Gold and Mojo
elements = ["Gold", "Mojo"]
values = []
for element in elements:
values.append(chardata[element])
self.printscores(elements, values, (2, 28), 5, 5, screen)
# Combat scores
elements = ["Survival", "Defense", "Advantage", "Surprise"]
values = []
for element in elements:
values.append(chardata[element])
elements.append("Hand Atk")
values.append(str(chardata["Hand Atk"][0]) + "/" + \
str(chardata["Hand Atk"][1]))
elements.append("Thrown Atk")
values.append(str(chardata["Thrown Atk"][0]) + "/" + \
str(chardata["Thrown Atk"][1]) + "/" + \
str(chardata["Thrown Atk"][2]))
elements.append("Prop Atk")
values.append(str(chardata["Prop Atk"]))
self.printscores(elements, values, (8, 19), 11, 8, screen)
# Movement
elements = ["Movement", "Height", "Weight", "Lift", "Carry"]
values = []
for element in elements:
values.append(chardata[element])
self.printscores(elements, values, (16, 19), 11, 8, screen)
pygame.display.update()
def printscores(self, elements, values, coords, offset, justify, screen):
"""Prints scores block such as ability scores. (labels) is
a list of the labels for each of the scores and should match the
keys contained in the character data. (scores) is a list of the
values corresponding to each element. (coords) is a tuple
containing (row, col) of the first character placement of the
block. (offset) is the column offset that will dictate where the
first character of the score should be placed. (justify) is the
columns to right justify the scores.
"""
row = coords[0]
col = coords[1]
for i in range(len(elements)):
element = string.upper(elements[i])
value = str(values[i]).rjust(justify)
text = self.display.FONT.render(element, True,
self.display.WHITE)
screen.blit(text, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render(value, True,
self.display.BRIGHT_GREEN)
screen.blit(text, ((col + offset) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row += 1
def printabilities(self, chardata, screen):
"""Prints ability scores alone. Requires (chardata) to be passed
as well as the (screen).
"""
elements = []
values = []
for ability in self.gamedata.ABIL_NAMES:
elements.append(ability)
values.append(chardata[ability][0])
self.printscores(elements, values, (8, 2), 13, 2, screen)
def selectabilities(self, chardata, screen, select = ""):
"""Prints ability scores alone. Requires (chardata) to be passed
as well as the (screen). (select) is optional and highlights the
selected ability if passed.
"""
elements = []
values = []
for ability in self.gamedata.ABIL_NAMES:
elements.append(ability)
values.append(chardata[ability][0])
row = 8
col = 2
for i in range(len(elements)):
element = string.upper(elements[i])
value = str(values[i]).rjust(2)
if select == elements[i]:
text = self.display.FONT.render(element, True,
self.display.WHITE)
screen.blit(text, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
else:
ch = self.display.FONT.render(element[0], True,
self.display.WHITE)
screen.blit(ch, (col * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render(element[1:], True,
self.display.BRIGHT_MAGENTA)
screen.blit(text, ((col + 1) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
text = self.display.FONT.render(value, True,
self.display.BRIGHT_MAGENTA)
screen.blit(text, ((col + 13) * self.display.CH_SPACE,
row * self.display.CH_SPACE))
row += 1
|
jmuckian/GodsAndMonsters
|
bin/char.py
|
Python
|
gpl-3.0
| 46,928
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import bz2
import errno
import filecmp
import logging
import shutil
import unittest
from collections import OrderedDict
from gzip import GzipFile
from itertools import product
from tempfile import NamedTemporaryFile, mkdtemp
import mock
from airflow.exceptions import AirflowException
from airflow.providers.apache.hive.operators.s3_to_hive import S3ToHiveTransferOperator
try:
import boto3
from moto import mock_s3
except ImportError:
mock_s3 = None
class TestS3ToHiveTransfer(unittest.TestCase):
def setUp(self):
self.file_names = {}
self.task_id = 'S3ToHiveTransferTest'
self.s3_key = 'S32hive_test_file'
self.field_dict = OrderedDict([('Sno', 'BIGINT'), ('Some,Text', 'STRING')])
self.hive_table = 'S32hive_test_table'
self.delimiter = '\t'
self.create = True
self.recreate = True
self.partition = {'ds': 'STRING'}
self.headers = True
self.check_headers = True
self.wildcard_match = False
self.input_compressed = False
self.kwargs = {'task_id': self.task_id,
's3_key': self.s3_key,
'field_dict': self.field_dict,
'hive_table': self.hive_table,
'delimiter': self.delimiter,
'create': self.create,
'recreate': self.recreate,
'partition': self.partition,
'headers': self.headers,
'check_headers': self.check_headers,
'wildcard_match': self.wildcard_match,
'input_compressed': self.input_compressed
}
try:
header = b"Sno\tSome,Text \n"
line1 = b"1\tAirflow Test\n"
line2 = b"2\tS32HiveTransfer\n"
self.tmp_dir = mkdtemp(prefix='test_tmps32hive_')
# create sample txt, gz and bz2 with and without headers
with NamedTemporaryFile(mode='wb+',
dir=self.tmp_dir,
delete=False) as f_txt_h:
self._set_fn(f_txt_h.name, '.txt', True)
f_txt_h.writelines([header, line1, line2])
fn_gz = self._get_fn('.txt', True) + ".gz"
with GzipFile(filename=fn_gz, mode="wb") as f_gz_h:
self._set_fn(fn_gz, '.gz', True)
f_gz_h.writelines([header, line1, line2])
fn_gz_upper = self._get_fn('.txt', True) + ".GZ"
with GzipFile(filename=fn_gz_upper, mode="wb") as f_gz_upper_h:
self._set_fn(fn_gz_upper, '.GZ', True)
f_gz_upper_h.writelines([header, line1, line2])
fn_bz2 = self._get_fn('.txt', True) + '.bz2'
with bz2.BZ2File(filename=fn_bz2, mode="wb") as f_bz2_h:
self._set_fn(fn_bz2, '.bz2', True)
f_bz2_h.writelines([header, line1, line2])
# create sample txt, bz and bz2 without header
with NamedTemporaryFile(mode='wb+', dir=self.tmp_dir, delete=False) as f_txt_nh:
self._set_fn(f_txt_nh.name, '.txt', False)
f_txt_nh.writelines([line1, line2])
fn_gz = self._get_fn('.txt', False) + ".gz"
with GzipFile(filename=fn_gz, mode="wb") as f_gz_nh:
self._set_fn(fn_gz, '.gz', False)
f_gz_nh.writelines([line1, line2])
fn_gz_upper = self._get_fn('.txt', False) + ".GZ"
with GzipFile(filename=fn_gz_upper, mode="wb") as f_gz_upper_nh:
self._set_fn(fn_gz_upper, '.GZ', False)
f_gz_upper_nh.writelines([line1, line2])
fn_bz2 = self._get_fn('.txt', False) + '.bz2'
with bz2.BZ2File(filename=fn_bz2, mode="wb") as f_bz2_nh:
self._set_fn(fn_bz2, '.bz2', False)
f_bz2_nh.writelines([line1, line2])
# Base Exception so it catches Keyboard Interrupt
except BaseException as e:
logging.error(e)
self.tearDown()
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
# ENOENT - no such file or directory
if e.errno != errno.ENOENT:
raise e
# Helper method to create a dictionary of file names and
# file types (file extension and header)
def _set_fn(self, fn, ext, header):
key = self._get_key(ext, header)
self.file_names[key] = fn
# Helper method to fetch a file of a
# certain format (file extension and header)
def _get_fn(self, ext, header):
key = self._get_key(ext, header)
return self.file_names[key]
@staticmethod
def _get_key(ext, header):
key = ext + "_" + ('h' if header else 'nh')
return key
@staticmethod
def _check_file_equality(fn_1, fn_2, ext):
# gz files contain mtime and filename in the header that
# causes filecmp to return False even if contents are identical
# Hence decompress to test for equality
if ext.lower() == '.gz':
with GzipFile(fn_1, 'rb') as f_1, NamedTemporaryFile(mode='wb') as f_txt_1:
with GzipFile(fn_2, 'rb') as f_2, NamedTemporaryFile(mode='wb') as f_txt_2:
shutil.copyfileobj(f_1, f_txt_1)
shutil.copyfileobj(f_2, f_txt_2)
f_txt_1.flush()
f_txt_2.flush()
return filecmp.cmp(f_txt_1.name, f_txt_2.name, shallow=False)
else:
return filecmp.cmp(fn_1, fn_2, shallow=False)
def test_bad_parameters(self):
self.kwargs['check_headers'] = True
self.kwargs['headers'] = False
self.assertRaisesRegex(AirflowException, "To check_headers.*",
S3ToHiveTransferOperator, **self.kwargs)
def test__get_top_row_as_list(self):
self.kwargs['delimiter'] = '\t'
fn_txt = self._get_fn('.txt', True)
header_list = S3ToHiveTransferOperator(**self.kwargs). \
_get_top_row_as_list(fn_txt)
self.assertEqual(header_list, ['Sno', 'Some,Text'],
msg="Top row from file doesnt matched expected value")
self.kwargs['delimiter'] = ','
header_list = S3ToHiveTransferOperator(**self.kwargs). \
_get_top_row_as_list(fn_txt)
self.assertEqual(header_list, ['Sno\tSome', 'Text'],
msg="Top row from file doesnt matched expected value")
def test__match_headers(self):
self.kwargs['field_dict'] = OrderedDict([('Sno', 'BIGINT'),
('Some,Text', 'STRING')])
self.assertTrue(S3ToHiveTransferOperator(**self.kwargs).
_match_headers(['Sno', 'Some,Text']),
msg="Header row doesnt match expected value")
# Testing with different column order
self.assertFalse(S3ToHiveTransferOperator(**self.kwargs).
_match_headers(['Some,Text', 'Sno']),
msg="Header row doesnt match expected value")
# Testing with extra column in header
self.assertFalse(S3ToHiveTransferOperator(**self.kwargs).
_match_headers(['Sno', 'Some,Text', 'ExtraColumn']),
msg="Header row doesnt match expected value")
def test__delete_top_row_and_compress(self):
s32hive = S3ToHiveTransferOperator(**self.kwargs)
# Testing gz file type
fn_txt = self._get_fn('.txt', True)
gz_txt_nh = s32hive._delete_top_row_and_compress(fn_txt,
'.gz',
self.tmp_dir)
fn_gz = self._get_fn('.gz', False)
self.assertTrue(self._check_file_equality(gz_txt_nh, fn_gz, '.gz'),
msg="gz Compressed file not as expected")
# Testing bz2 file type
bz2_txt_nh = s32hive._delete_top_row_and_compress(fn_txt,
'.bz2',
self.tmp_dir)
fn_bz2 = self._get_fn('.bz2', False)
self.assertTrue(self._check_file_equality(bz2_txt_nh, fn_bz2, '.bz2'),
msg="bz2 Compressed file not as expected")
@unittest.skipIf(mock is None, 'mock package not present')
@unittest.skipIf(mock_s3 is None, 'moto package not present')
@mock.patch('airflow.providers.apache.hive.operators.s3_to_hive.HiveCliHook')
@mock_s3
def test_execute(self, mock_hiveclihook):
conn = boto3.client('s3')
conn.create_bucket(Bucket='bucket')
# Testing txt, zip, bz2 files with and without header row
for (ext, has_header) in product(['.txt', '.gz', '.bz2', '.GZ'], [True, False]):
self.kwargs['headers'] = has_header
self.kwargs['check_headers'] = has_header
logging.info("Testing %s format %s header", ext, 'with' if has_header else 'without')
self.kwargs['input_compressed'] = ext.lower() != '.txt'
self.kwargs['s3_key'] = 's3://bucket/' + self.s3_key + ext
ip_fn = self._get_fn(ext, self.kwargs['headers'])
op_fn = self._get_fn(ext, False)
# Upload the file into the Mocked S3 bucket
conn.upload_file(ip_fn, 'bucket', self.s3_key + ext)
# file parameter to HiveCliHook.load_file is compared
# against expected file output
mock_hiveclihook().load_file.side_effect = \
lambda *args, **kwargs: self.assertTrue(
self._check_file_equality(args[0], op_fn, ext),
msg='{0} output file not as expected'.format(ext))
# Execute S3ToHiveTransfer
s32hive = S3ToHiveTransferOperator(**self.kwargs)
s32hive.execute(None)
@unittest.skipIf(mock is None, 'mock package not present')
@unittest.skipIf(mock_s3 is None, 'moto package not present')
@mock.patch('airflow.providers.apache.hive.operators.s3_to_hive.HiveCliHook')
@mock_s3
def test_execute_with_select_expression(self, mock_hiveclihook):
conn = boto3.client('s3')
conn.create_bucket(Bucket='bucket')
select_expression = "SELECT * FROM S3Object s"
bucket = 'bucket'
# Only testing S3ToHiveTransfer calls S3Hook.select_key with
# the right parameters and its execute method succeeds here,
# since Moto doesn't support select_object_content as of 1.3.2.
for (ext, has_header) in product(['.txt', '.gz', '.GZ'], [True, False]):
input_compressed = ext.lower() != '.txt'
key = self.s3_key + ext
self.kwargs['check_headers'] = False
self.kwargs['headers'] = has_header
self.kwargs['input_compressed'] = input_compressed
self.kwargs['select_expression'] = select_expression
self.kwargs['s3_key'] = 's3://{0}/{1}'.format(bucket, key)
ip_fn = self._get_fn(ext, has_header)
# Upload the file into the Mocked S3 bucket
conn.upload_file(ip_fn, bucket, key)
input_serialization = {
'CSV': {'FieldDelimiter': self.delimiter}
}
if input_compressed:
input_serialization['CompressionType'] = 'GZIP'
if has_header:
input_serialization['CSV']['FileHeaderInfo'] = 'USE'
# Confirm that select_key was called with the right params
with mock.patch('airflow.providers.amazon.aws.hooks.s3.S3Hook.select_key',
return_value="") as mock_select_key:
# Execute S3ToHiveTransfer
s32hive = S3ToHiveTransferOperator(**self.kwargs)
s32hive.execute(None)
mock_select_key.assert_called_once_with(
bucket_name=bucket, key=key,
expression=select_expression,
input_serialization=input_serialization
)
|
wooga/airflow
|
tests/providers/apache/hive/operators/test_s3_to_hive.py
|
Python
|
apache-2.0
| 13,017
|
#!/usr/bin/python
def characterPictureGrid(grid):
for dim1 in range(0, len(grid)):
for dim2 in range(0, len(grid[dim1])):
print grid[dim1][dim2],
print "\n"
grid = [['.', '.', '.', '.', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['O', 'O', 'O', 'O', 'O', '.'],
['.', 'O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['.', '.', '.', '.', '.', '.']]
characterPictureGrid(grid)
|
ajitabhpandey/learn-programming
|
python/characterPictureGrid.py
|
Python
|
gpl-2.0
| 597
|
class PingResult:
def __init__(self, unique_id=1, destination="", ttl=0, time_stamp=0.0, round_trip_time=0, size=0,
successful=False):
"""
:param unique_id: unique of of this ping result
:type unique_id: int
:param destination: target of pint
:type destination: str
:param ttl: the remaining ttl of the packet
:type ttl: int
:param time_stamp: timestamp on the packet
:type time_stamp: float
:param round_trip_time: rout tip time in ms
:type round_trip_time: float
:param size: size of packet in bytes
:type size: int
:param successful: did ping get a response?
:type successful: bool
"""
self.Unique_Id = unique_id
self.Destination = destination
self.Ttl = ttl
self.Time_Stamp = time_stamp
self.Round_Trip_Time = round_trip_time
self.Size = size
self.Successful = successful
def __str__(self):
return "Dst: {0} Ttl: {1} Time: {2} Rtt: {3} Size: {4} Successful: {5}".format(self.Destination,
self.Ttl,
self.Time_Stamp,
self.Round_Trip_Time,
self.Size,
str(self.Successful))
|
nowackie/networkMonitor
|
networkMonitor/PingResult.py
|
Python
|
mit
| 1,628
|
from subprocess import check_output
from xml.etree import ElementTree as etree
from sys import argv
from os import getenv
from os import path
path = path.expandvars(getenv('emu_path'))
emulators = check_output([path, "-list-avds"]).decode("utf-8").rstrip().split('\n')
def build_item(title):
item_el = etree.Element('item')
item_el.attrib = {'arg': title, 'type': 'file'}
title_el = etree.Element('title')
title_el.text = title
item_el.append(title_el)
return item_el
root = etree.Element('items')
tree = etree.ElementTree(root)
for emu in emulators:
if len(argv) == 1 or emu.startswith(argv[1]):
root.append(build_item(emu))
print(etree.tostring(root, encoding='utf8', method='xml'))
|
nassendelft/alfred-android-emulator
|
emulist.py
|
Python
|
gpl-3.0
| 729
|
#!/usr/bin/env python
import os
import sys
import binascii
"""
Shark the Ripper Tool
For packet capture CTF problems:
Follow TCP Steam > Hex Dump > (Select Client/Server Chat) > Save As
Then input the file, followed by offset(s) where you want to cut.
-mandy
"""
if len(sys.argv) < 2:
print "Oh ffs, seriously?"
print "Usage: " + sys.argv[0] + " pasted_wireshark_hex_dump.txt START_OFFSET END_OFFSET"
sys.exit()
if os.path.isfile( sys.argv[1] ):
with open( sys.argv[1] ) as f:
filecontents = f.read()
if len( sys.argv ) > 2:
if len( sys.argv ) == 4:
start = sys.argv[2]
end = sys.argv[3]
else:
start = sys.argv[2]
end = "FFFFFFFF"
cut = True
if len( start ) != 8 or len( end ) != 8:
print "Invalid offset size"
sys.exit()
else:
cut = False
output = ""
if cut == True:
start_cutting = False
for row in filecontents.split("\n"):
if row != "":
if row[:8] == start:
start_cutting = True
if row[:8] == end:
start_cutting = False
if start_cutting == True:
output += row[10:][:48].replace(" ", "")
else:
for row in filecontents.split("\n"):
if row != "":
output += row[10:][:48].replace(" ", "")
output = binascii.unhexlify(output)
with open( sys.argv[1] + ".out", 'w') as output_file:
output_file.write( output )
|
mandatoryprogrammer/ctf_tools
|
shark_the_ripper.py
|
Python
|
mit
| 1,595
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/1/19 0:38
# @Author : TOM.LEE
# @Site : https://github.com/amlyj/pythonStudy
# @File : study_Counter.py
# @Software: PyCharm
from collections import Counter
|
amlyj/pythonStudy
|
2.7/standard_library/collections0/study_Counter.py
|
Python
|
mit
| 229
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
Additionally it maintains a collection with update_ops that need to be
updated after the ops have been computed, for exmaple to update moving means
and moving variances of batch_norm.
Ops that have different behavior during training or eval have an is_training
parameter. Additionally Ops that contain variables.variable have a trainable
parameter, which control if the ops variables are trainable or not.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.training import moving_averages
from slim import losses
from slim import scopes
from slim import variables
# Used to keep the update ops done by batch_norm.
UPDATE_OPS_COLLECTION = '_update_ops_'
def _two_element_tuple(int_or_tuple):
"""Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape.
Returns:
A tuple with 2 values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(int_or_tuple)
if isinstance(int_or_tuple, tf.TensorShape):
if len(int_or_tuple) == 2:
return int_or_tuple[0], int_or_tuple[1]
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of '
'length 2')
@scopes.add_arg_scope
def conv2d(inputs,
num_filters_out,
kernel_size,
stride=1,
padding='SAME',
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=False,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
conv2d creates a variable called 'weights', representing the convolutional
kernel, that is convolved with the input. If `batch_norm_params` is None, a
second variable called 'biases' is added to the result of the convolution
operation.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_filters_out: the number of output filters.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation: activation function.
stddev: standard deviation of the truncated guassian weight distribution.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
num_filters_in = inputs.get_shape()[-1]
weights_shape = [kernel_h, kernel_w,
num_filters_in, num_filters_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
padding=padding)
if batch_norm_params is True:
outputs = tf.contrib.layers.batch_norm(conv, decay=0.999, epsilon=0.001, center=False, scale=False, is_training=True)
else:
bias_shape = [num_filters_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.bias_add(conv, biases)
if activation:
outputs = activation(outputs)
return outputs
@scopes.add_arg_scope
def fc(inputs,
num_units_out,
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=False,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a fully connected layer followed by an optional batch_norm layer.
FC creates a variable called 'weights', representing the fully connected
weight matrix, that is multiplied by the input. If `batch_norm` is None, a
second variable called 'biases' is added to the result of the initial
vector-matrix multiplication.
Args:
inputs: a [B x N] tensor where B is the batch size and N is the number of
input units in the layer.
num_units_out: the number of output units in the layer.
activation: activation function.
stddev: the standard deviation for the weights.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
the tensor variable representing the result of the series of operations.
"""
with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse):
num_units_in = inputs.get_shape()[1]
weights_shape = [num_units_in, num_units_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
if batch_norm_params is True:
outputs = tf.matmul(inputs, weights)
outputs = tf.contrib.layers.batch_norm(outputs, decay=0.999, epsilon=0.001, center=False, scale=False, is_training=True)
else:
bias_shape = [num_units_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.xw_plus_b(inputs, weights, biases)
if activation:
outputs = activation(outputs)
return outputs
def one_hot_encoding(labels, num_classes, scope=None):
"""Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for op_scope.
Returns:
one hot encoding of the labels.
"""
with tf.name_scope(scope, 'OneHotEncoding', [labels]):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
onehot_labels.set_shape([batch_size, num_classes])
return onehot_labels
@scopes.add_arg_scope
def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
"""
with tf.name_scope(scope, 'MaxPool', [inputs]):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Avg Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with tf.name_scope(scope, 'AvgPool', [inputs]):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
"""Returns a dropout layer applied to the input.
Args:
inputs: the tensor to pass to the Dropout layer.
keep_prob: the probability of keeping each input unit.
is_training: whether or not the model is in training mode. If so, dropout is
applied and values scaled. Otherwise, inputs is returned.
scope: Optional scope for op_scope.
Returns:
a tensor representing the output of the operation.
"""
if is_training and keep_prob > 0:
with tf.name_scope(scope, 'Dropout', [inputs]):
return tf.nn.dropout(inputs, keep_prob)
else:
return inputs
def flatten(inputs, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for op_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.name_scope(scope, 'Flatten', [inputs]):
return tf.reshape(inputs, [-1, k])
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_scope(scope, 'RepeatOp', [inputs]):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
|
Gljivius/SemSegmentacijaUtakmice
|
SemSegmentacija/slim/ops.py
|
Python
|
bsd-3-clause
| 14,460
|
"""
Test the API of the symtable module.
"""
import symtable
import unittest
from test import support
TEST_CODE = """
import sys
glob = 42
class Mine:
instance_var = 24
def a_method(p1, p2):
pass
def spam(a, b, *var, **kw):
global bar
bar = 47
x = 23
glob
def internal():
return x
return internal
def foo():
pass
def namespace_test(): pass
def namespace_test(): pass
"""
def find_block(block, name):
for ch in block.get_children():
if ch.get_name() == name:
return ch
class SymtableTest(unittest.TestCase):
top = symtable.symtable(TEST_CODE, "?", "exec")
# These correspond to scopes in TEST_CODE
Mine = find_block(top, "Mine")
a_method = find_block(Mine, "a_method")
spam = find_block(top, "spam")
internal = find_block(spam, "internal")
foo = find_block(top, "foo")
def test_type(self):
self.assertEqual(self.top.get_type(), "module")
self.assertEqual(self.Mine.get_type(), "class")
self.assertEqual(self.a_method.get_type(), "function")
self.assertEqual(self.spam.get_type(), "function")
self.assertEqual(self.internal.get_type(), "function")
def test_optimized(self):
self.assertFalse(self.top.is_optimized())
self.assertFalse(self.top.has_exec())
self.assertTrue(self.spam.is_optimized())
def test_nested(self):
self.assertFalse(self.top.is_nested())
self.assertFalse(self.Mine.is_nested())
self.assertFalse(self.spam.is_nested())
self.assertTrue(self.internal.is_nested())
def test_children(self):
self.assertTrue(self.top.has_children())
self.assertTrue(self.Mine.has_children())
self.assertFalse(self.foo.has_children())
def test_lineno(self):
self.assertEqual(self.top.get_lineno(), 0)
self.assertEqual(self.spam.get_lineno(), 11)
def test_function_info(self):
func = self.spam
self.assertEqual(sorted(func.get_parameters()), ["a", "b", "kw", "var"])
expected = ["a", "b", "internal", "kw", "var", "x"]
self.assertEqual(sorted(func.get_locals()), expected)
self.assertEqual(sorted(func.get_globals()), ["bar", "glob"])
self.assertEqual(self.internal.get_frees(), ("x",))
def test_globals(self):
self.assertTrue(self.spam.lookup("glob").is_global())
self.assertFalse(self.spam.lookup("glob").is_declared_global())
self.assertTrue(self.spam.lookup("bar").is_global())
self.assertTrue(self.spam.lookup("bar").is_declared_global())
self.assertFalse(self.internal.lookup("x").is_global())
self.assertFalse(self.Mine.lookup("instance_var").is_global())
def test_local(self):
self.assertTrue(self.spam.lookup("x").is_local())
self.assertFalse(self.internal.lookup("x").is_local())
def test_referenced(self):
self.assertTrue(self.internal.lookup("x").is_referenced())
self.assertTrue(self.spam.lookup("internal").is_referenced())
self.assertFalse(self.spam.lookup("x").is_referenced())
def test_parameters(self):
for sym in ("a", "var", "kw"):
self.assertTrue(self.spam.lookup(sym).is_parameter())
self.assertFalse(self.spam.lookup("x").is_parameter())
def test_symbol_lookup(self):
self.assertEqual(len(self.top.get_identifiers()),
len(self.top.get_symbols()))
self.assertRaises(KeyError, self.top.lookup, "not_here")
def test_namespaces(self):
self.assertTrue(self.top.lookup("Mine").is_namespace())
self.assertTrue(self.Mine.lookup("a_method").is_namespace())
self.assertTrue(self.top.lookup("spam").is_namespace())
self.assertTrue(self.spam.lookup("internal").is_namespace())
self.assertTrue(self.top.lookup("namespace_test").is_namespace())
self.assertFalse(self.spam.lookup("x").is_namespace())
self.assertTrue(self.top.lookup("spam").get_namespace() is self.spam)
ns_test = self.top.lookup("namespace_test")
self.assertEqual(len(ns_test.get_namespaces()), 2)
self.assertRaises(ValueError, ns_test.get_namespace)
def test_assigned(self):
self.assertTrue(self.spam.lookup("x").is_assigned())
self.assertTrue(self.spam.lookup("bar").is_assigned())
self.assertTrue(self.top.lookup("spam").is_assigned())
self.assertTrue(self.Mine.lookup("a_method").is_assigned())
self.assertFalse(self.internal.lookup("x").is_assigned())
def test_imported(self):
self.assertTrue(self.top.lookup("sys").is_imported())
def test_name(self):
self.assertEqual(self.top.get_name(), "top")
self.assertEqual(self.spam.get_name(), "spam")
self.assertEqual(self.spam.lookup("x").get_name(), "x")
self.assertEqual(self.Mine.get_name(), "Mine")
def test_class_info(self):
self.assertEqual(self.Mine.get_methods(), ('a_method',))
def test_filename_correct(self):
### Bug tickler: SyntaxError file name correct whether error raised
### while parsing or building symbol table.
def checkfilename(brokencode):
try:
symtable.symtable(brokencode, "spam", "exec")
except SyntaxError as e:
self.assertEqual(e.filename, "spam")
else:
self.fail("no SyntaxError for %r" % (brokencode,))
checkfilename("def f(x): foo)(") # parse-time
checkfilename("def f(x): global x") # symtable-build-time
def test_eval(self):
symbols = symtable.symtable("42", "?", "eval")
def test_single(self):
symbols = symtable.symtable("42", "?", "single")
def test_exec(self):
symbols = symtable.symtable("def f(x): return x", "?", "exec")
def test_main():
support.run_unittest(SymtableTest)
if __name__ == '__main__':
test_main()
|
Orav/kbengine
|
kbe/src/lib/python/Lib/test/test_symtable.py
|
Python
|
lgpl-3.0
| 6,136
|
import time
from typing import Optional, Sequence
import orjson
from django.http import HttpRequest, HttpResponse
from django.utils.translation import gettext as _
from zerver.decorator import internal_notify_view, process_client
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import REQ, RequestNotes, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import (
check_bool,
check_int,
check_list,
check_string,
to_non_negative_int,
)
from zerver.models import Client, UserProfile, get_client, get_user_profile_by_id
from zerver.tornado.event_queue import fetch_events, get_client_descriptor, process_notification
from zerver.tornado.exceptions import BadEventQueueIdError
@internal_notify_view(True)
def notify(request: HttpRequest) -> HttpResponse:
process_notification(orjson.loads(request.POST["data"]))
return json_success(request)
@has_request_variables
def cleanup_event_queue(
request: HttpRequest, user_profile: UserProfile, queue_id: str = REQ()
) -> HttpResponse:
client = get_client_descriptor(str(queue_id))
if client is None:
raise BadEventQueueIdError(queue_id)
if user_profile.id != client.user_profile_id:
raise JsonableError(_("You are not authorized to access this queue"))
log_data = RequestNotes.get_notes(request).log_data
assert log_data is not None
log_data["extra"] = f"[{queue_id}]"
client.cleanup()
return json_success(request)
@internal_notify_view(True)
@has_request_variables
def get_events_internal(
request: HttpRequest, user_profile_id: int = REQ(json_validator=check_int)
) -> HttpResponse:
user_profile = get_user_profile_by_id(user_profile_id)
RequestNotes.get_notes(request).requestor_for_logs = user_profile.format_requestor_for_logs()
process_client(request, user_profile, client_name="internal")
return get_events_backend(request, user_profile)
def get_events(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return get_events_backend(request, user_profile)
@has_request_variables
def get_events_backend(
request: HttpRequest,
user_profile: UserProfile,
# user_client is intended only for internal Django=>Tornado requests
# and thus shouldn't be documented for external use.
user_client: Optional[Client] = REQ(
converter=get_client, default=None, intentionally_undocumented=True
),
last_event_id: Optional[int] = REQ(converter=int, default=None),
queue_id: Optional[str] = REQ(default=None),
# apply_markdown, client_gravatar, all_public_streams, and various
# other parameters are only used when registering a new queue via this
# endpoint. This is a feature used primarily by get_events_internal
# and not expected to be used by third-party clients.
apply_markdown: bool = REQ(
default=False, json_validator=check_bool, intentionally_undocumented=True
),
client_gravatar: bool = REQ(
default=False, json_validator=check_bool, intentionally_undocumented=True
),
slim_presence: bool = REQ(
default=False, json_validator=check_bool, intentionally_undocumented=True
),
all_public_streams: bool = REQ(
default=False, json_validator=check_bool, intentionally_undocumented=True
),
event_types: Optional[Sequence[str]] = REQ(
default=None, json_validator=check_list(check_string), intentionally_undocumented=True
),
dont_block: bool = REQ(default=False, json_validator=check_bool),
narrow: Sequence[Sequence[str]] = REQ(
default=[],
json_validator=check_list(check_list(check_string)),
intentionally_undocumented=True,
),
lifespan_secs: int = REQ(
default=0, converter=to_non_negative_int, intentionally_undocumented=True
),
bulk_message_deletion: bool = REQ(
default=False, json_validator=check_bool, intentionally_undocumented=True
),
stream_typing_notifications: bool = REQ(
default=False, json_validator=check_bool, intentionally_undocumented=True
),
user_settings_object: bool = REQ(
default=False, json_validator=check_bool, intentionally_undocumented=True
),
) -> HttpResponse:
if all_public_streams and not user_profile.can_access_public_streams():
raise JsonableError(_("User not authorized for this query"))
# Extract the Tornado handler from the request
tornado_handler = RequestNotes.get_notes(request).tornado_handler
assert tornado_handler is not None
handler = tornado_handler()
assert handler is not None
if user_client is None:
valid_user_client = RequestNotes.get_notes(request).client
assert valid_user_client is not None
else:
valid_user_client = user_client
events_query = dict(
user_profile_id=user_profile.id,
queue_id=queue_id,
last_event_id=last_event_id,
event_types=event_types,
client_type_name=valid_user_client.name,
all_public_streams=all_public_streams,
lifespan_secs=lifespan_secs,
narrow=narrow,
dont_block=dont_block,
handler_id=handler.handler_id,
)
if queue_id is None:
events_query["new_queue_data"] = dict(
user_profile_id=user_profile.id,
realm_id=user_profile.realm_id,
event_types=event_types,
client_type_name=valid_user_client.name,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
slim_presence=slim_presence,
all_public_streams=all_public_streams,
queue_timeout=lifespan_secs,
last_connection_time=time.time(),
narrow=narrow,
bulk_message_deletion=bulk_message_deletion,
stream_typing_notifications=stream_typing_notifications,
user_settings_object=user_settings_object,
)
result = fetch_events(events_query)
if "extra_log_data" in result:
log_data = RequestNotes.get_notes(request).log_data
assert log_data is not None
log_data["extra"] = result["extra_log_data"]
if result["type"] == "async":
# Mark this response with .asynchronous; this will result in
# Tornado discarding the response and instead long-polling the
# request. See zulip_finish for more design details.
handler._request = request
response = json_success(request)
response.asynchronous = True
return response
if result["type"] == "error":
raise result["exception"]
return json_success(request, data=result["response"])
|
zulip/zulip
|
zerver/tornado/views.py
|
Python
|
apache-2.0
| 6,676
|
from graph import *
import re
class Reader(object):
"""
Interpreter for AI source data in original notation
"""
def __init__(self, registry):
super(Reader, self).__init__()
self.registry = registry
self.relExp = re.compile("(`.+`(?:\*\d+)?)\s+(<|\=)(.+)(\=|>)\s+(`.+`(?:\*\d+)?)")
self.nodeExp = re.compile("`([^`]+)`(?:\*(\d+))?")
def eval(self,exp):
exp = exp.split(" ")
if exp[0] == 'node':
self.registry.add(' '.join(exp[1:]))
elif exp[0] == 'rel':
tkns = self.relExp.findall(' '.join(exp[1:]))[0]
if len(tkns) == 5:
rel = tkns[2].split('=')
self.registry.relate(
[(n[0],(int)(n[1] or 1)) for n in self.nodeExp.findall(tkns[0])],
[(n[0],(int)(n[1] or 1)) for n in self.nodeExp.findall(tkns[4])],
"" if tkns[1] != "<" else rel[0],
"" if tkns[3] != ">" else rel[-1]
)
def read(self,filename):
f = open(filename,'r')
for l in f:
self.eval(l)
f.close()
|
AlexArendsen/pylog
|
reader.py
|
Python
|
gpl-2.0
| 927
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-lines
"""Weight updating functions."""
import math
import pickle
import warnings
import numpy
from .base import py_str
from .ndarray import (NDArray, zeros, clip, sqrt, cast, maximum, abs as NDabs)
from .ndarray import (sgd_update, sgd_mom_update, adam_update, rmsprop_update, rmspropalex_update,
mp_sgd_update, mp_sgd_mom_update, square, ftrl_update, ftml_update,
signsgd_update, signum_update)
from .ndarray import _internal
from .ndarray import op
from .ndarray import sparse
from .random import normal
class Optimizer(object):
"""The base class inherited by all optimizers.
Parameters
----------
rescale_grad : float, optional
Multiply the gradient with `rescale_grad` before updating. Often
choose to be ``1.0/batch_size``.
param_idx2name : dict from int to string, optional
A dictionary that maps int index to string name.
clip_gradient : float, optional
Clip the gradient by projecting onto the box ``[-clip_gradient, clip_gradient]``.
learning_rate : float, optional
The initial learning rate.
lr_scheduler : LRScheduler, optional
The learning rate scheduler.
wd : float, optional
The weight decay (or L2 regularization) coefficient. Modifies objective
by adding a penalty for having large weights.
sym: Symbol, optional
The Symbol this optimizer is applying to.
begin_num_update : int, optional
The initial number of updates.
multi_precision : bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
Properties
----------
learning_rate : float
The current learning rate of the optimizer. Given an Optimizer object
optimizer, its learning rate can be accessed as optimizer.learning_rate.
"""
def __init__(self, rescale_grad=1., param_idx2name=None, wd=0.,
clip_gradient=None, learning_rate=0.01,
lr_scheduler=None, sym=None, begin_num_update=0,
multi_precision=False, param_dict=None):
self.rescale_grad = rescale_grad
self.lr = learning_rate
self.lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self.lr_scheduler.base_lr = learning_rate
self.wd = wd
self.lr_mult = {}
self.wd_mult = {}
self.begin_num_update = begin_num_update
self.num_update = begin_num_update
self._index_update_count = {}
self.clip_gradient = clip_gradient
self.multi_precision = multi_precision
if param_idx2name is None:
param_idx2name = {}
assert isinstance(param_idx2name, dict), \
'param_idx2name should be a dict of param indexes to names.'
self.idx2name = param_idx2name.copy()
self.sym_info = (sym.attr_dict(), sym.list_arguments()) if sym is not None else ()
self.param_dict = param_dict if param_dict else {}
self.set_lr_mult({})
self.set_wd_mult({})
opt_registry = {}
@staticmethod
def register(klass):
"""Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in Optimizer.opt_registry:
warnings.warn('WARNING: New optimizer %s.%s is overriding existing '
'optimizer %s.%s', klass.__module__, klass.__name__,
Optimizer.opt_registry[name].__module__,
Optimizer.opt_registry[name].__name__)
Optimizer.opt_registry[name] = klass
return klass
@staticmethod
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
"""
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name)
@property
def learning_rate(self):
if self.lr_scheduler is not None:
return self.lr_scheduler(self.num_update)
else:
return self.lr
def create_state(self, index, weight):
"""Creates auxiliary state for a given weight.
Some optimizers require additional states, e.g. as momentum, in addition
to gradients in order to update weights. This function creates state
for a given weight which will be used in `update`. This function is
called only once for each weight.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
def create_state_multi_precision(self, index, weight):
"""Creates auxiliary state for a given weight, including FP32 high
precision copy if original weight is FP16.
This method is provided to perform automatic mixed precision training
for optimizers that do not support it themselves.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (weight_master_copy,) + (self.create_state(index, weight_master_copy),)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"optimizer")
return self.create_state(index, weight)
def update(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
raise NotImplementedError()
def update_multi_precision(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
if self.multi_precision and weight.dtype == numpy.float16:
# Wrapper for mixed precision
weight_master_copy = state[0]
original_state = state[1]
grad32 = grad.astype(numpy.float32)
self.update(index, weight_master_copy, grad32, original_state)
cast(weight_master_copy, dtype=weight.dtype, out=weight)
else:
self.update(index, weight, grad, state)
def set_learning_rate(self, lr):
"""Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer.
"""
if self.lr_scheduler is not None:
raise UserWarning("LRScheduler of the optimizer has already been "
"defined. Note that set_learning_rate can mutate "
"the value of the learning rate of the optimizer "
"only when the LRScheduler of the optimizer is "
"undefined.")
else:
self.lr = lr
def set_lr_scale(self, args_lrscale): # pylint: disable=unused-argument
"""[DEPRECATED] Sets lr scale. Use set_lr_mult instead."""
raise DeprecationWarning
def set_lr_mult(self, args_lr_mult):
"""Sets an individual learning rate multiplier for each parameter.
If you specify a learning rate multiplier for a parameter, then
the learning rate for the parameter will be set as the product of
the global learning rate `self.lr` and its multiplier.
.. note:: The default learning rate multiplier of a `Variable`
can be set with `lr_mult` argument in the constructor.
Parameters
----------
args_lr_mult : dict of str/int to float
For each of its key-value entries, the learning rate multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.lr_mult = {}
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__lr_mult__' in attr[name]:
self.lr_mult[name] = float(attr[name]['__lr_mult__'])
self.lr_mult.update(args_lr_mult)
def set_wd_mult(self, args_wd_mult):
"""Sets an individual weight decay multiplier for each parameter.
By default, if `param_idx2name` was provided in the
constructor, the weight decay multipler is set as 0 for all
parameters whose name don't end with ``_weight`` or
``_gamma``.
.. note:: The default weight decay multiplier for a `Variable`
can be set with its `wd_mult` argument in the constructor.
Parameters
----------
args_wd_mult : dict of string/int to float
For each of its key-value entries, the weight decay multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.wd_mult = {}
for n in self.idx2name.values():
if not (n.endswith('_weight') or n.endswith('_gamma')):
self.wd_mult[n] = 0.0
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult)
def _update_count(self, index):
"""Updates num_update.
Parameters
----------
index : int
The index to be updated.
"""
if index not in self._index_update_count:
self._index_update_count[index] = self.begin_num_update
self._index_update_count[index] += 1
self.num_update = max(self._index_update_count[index], self.num_update)
def _get_lr(self, index):
"""Gets the learning rate given the index of the weight.
Parameters
----------
index : int
The index corresponding to the weight.
Returns
-------
lr : float
Learning rate for this index.
"""
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
if index in self.param_dict:
lr *= self.param_dict[index].lr_mult
elif index in self.lr_mult:
lr *= self.lr_mult[index]
elif index in self.idx2name:
lr *= self.lr_mult.get(self.idx2name[index], 1.0)
return lr
def _get_wd(self, index):
"""Gets weight decay for index.
Returns 0 for non-weights if the name of weights are provided for `__init__`.
Parameters
----------
index : int
The index for weight.
Returns
-------
wd : float
Weight decay for this index.
"""
wd = self.wd
if index in self.param_dict:
wd *= self.param_dict[index].wd_mult
elif index in self.wd_mult:
wd *= self.wd_mult[index]
elif index in self.idx2name:
wd *= self.wd_mult.get(self.idx2name[index], 1.0)
return wd
# convenience wrapper for Optimizer.Register
register = Optimizer.register # pylint: disable=invalid-name
# pylint: disable=line-too-long
@register
class SGD(Optimizer):
"""The SGD optimizer with momentum and weight decay.
If the storage types of weight and grad are both ``row_sparse``, and ``lazy_update`` is True, \
**lazy updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = lr * rescale_grad * clip(grad[row], clip_gradient) + wd * weight[row]
state[row] = momentum[row] * state[row] + rescaled_grad[row]
weight[row] = weight[row] - state[row]
The sparse update only updates the momentum for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
Otherwise, **standard updates** are applied by::
rescaled_grad = lr * rescale_grad * clip(grad, clip_gradient) + wd * weight
state = momentum * state + rescaled_grad
weight = weight - state
For details of the update algorithm see
:class:`~mxnet.ndarray.sgd_update` and :class:`~mxnet.ndarray.sgd_mom_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lazy_update : bool, optional
Default is True. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients \
in 32-bit precision even if actual weights used in the model have lower precision.\
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, lazy_update=True, **kwargs):
super(SGD, self).__init__(**kwargs)
self.momentum = momentum
self.lazy_update = lazy_update
def create_state_multi_precision(self, index, weight):
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (self.create_state(index, weight_master_copy), weight_master_copy)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
return self.create_state(index, weight)
def create_state(self, index, weight):
momentum = None
stype = weight.stype if self.lazy_update else 'default'
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=stype)
return momentum
def _update_impl(self, index, weight, grad, state, multi_precision=False):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if not multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight,
lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight,
lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight,
lr=lr, wd=wd, **kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight,
lr=lr, wd=wd, **kwargs)
def update(self, index, weight, grad, state):
self._update_impl(index, weight, grad, state, multi_precision=False)
def update_multi_precision(self, index, weight, grad, state):
use_multi_precision = self.multi_precision and weight.dtype == numpy.float16
self._update_impl(index, weight, grad, state,
multi_precision=use_multi_precision)
@register
class Signum(Optimizer):
"""The Signum optimizer that takes the sign of gradient or momentum.
The optimizer updates the weight by:
rescaled_grad = rescale_grad * clip(grad, clip_gradient) + wd * weight
state = momentum * state + (1-momentum)*rescaled_grad
weight = (1 - lr * wd_lh) * weight - lr * sign(state)
See the original paper at: https://jeremybernste.in/projects/amazon/signum.pdf
For details of the update algorithm see
:class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
wd_lh : float, optional
The amount of decoupled weight decay regularization, see details in the original paper at:\
https://arxiv.org/abs/1711.05101
"""
def __init__(self, learning_rate=0.01, momentum=0.9, wd_lh=0.0, **kwargs):
super(Signum, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
self.wd_lh = wd_lh
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)
return momentum
def _update_impl(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.wd_lh:
kwargs['wd_lh'] = self.wd_lh
if state is not None:
signum_update(weight, grad, state, out=weight,
lr=lr, wd=wd, **kwargs)
else:
signsgd_update(weight, grad, out=weight,
lr=lr, wd=wd, **kwargs)
def update(self, index, weight, grad, state):
self._update_impl(index, weight, grad, state)
@register
class FTML(Optimizer):
"""The FTML optimizer.
This class implements the optimizer described in
*FTML - Follow the Moving Leader in Deep Learning*,
available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
0 < beta1 < 1. Generally close to 0.5.
beta2 : float, optional
0 < beta2 < 1. Generally close to 1.
epsilon : float, optional
Small value to avoid division by 0.
"""
def __init__(self, beta1=0.6, beta2=0.999, epsilon=1e-8, **kwargs):
super(FTML, self).__init__(**kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # d_0
zeros(weight.shape, weight.context, dtype=weight.dtype), # v_0
zeros(weight.shape, weight.context, dtype=weight.dtype)) # z_0
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad, 't': t}
if self.clip_gradient:
kwargs['clip_grad'] = self.clip_gradient
prev_d, prev_v, prev_z = state
ftml_update(weight, grad, prev_d, prev_v, prev_z, out=weight,
lr=lr, wd=wd, **kwargs)
# pylint: enable=line-too-long
@register
class DCASGD(Optimizer):
"""The DCASGD optimizer.
This class implements the optimizer described in *Asynchronous Stochastic Gradient Descent
with Delay Compensation for Distributed Deep Learning*,
available at https://arxiv.org/abs/1609.08326.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lamda : float, optional
Scale DC value.
"""
def __init__(self, momentum=0.0, lamda=0.04, **kwargs):
super(DCASGD, self).__init__(**kwargs)
self.momentum = momentum
self.weight_previous = {}
self.lamda = lamda
def create_state(self, index, weight):
if self.momentum == 0.0:
return (None,
weight.copy()) # previous weight
else:
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # momentum
weight.copy()) # previous weight
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
mom, previous_weight = state
if mom:
mom[:] *= self.momentum
mom[:] += -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
else:
assert(self.momentum == 0.0)
mom = -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
previous_weight[:] = weight
weight[:] += mom
@register
class NAG(SGD):
"""Nesterov accelerated SGD.
This optimizer updates each weight by::
state = momentum * state + grad + wd * weight
weight = weight - (lr * (grad + momentum * state))
This optimizer accepts the same arguments as :class:`.SGD`.
"""
def __init__(self, **kwargs):
super(NAG, self).__init__(**kwargs)
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
if state is not None:
mom = state
mom[:] *= self.momentum
grad += wd * weight
mom[:] += grad
grad[:] += self.momentum * mom
weight[:] += -lr * grad
else:
assert self.momentum == 0.0
weight[:] += -lr * (grad + wd * weight)
@register
class SGLD(Optimizer):
"""Stochastic Gradient Riemannian Langevin Dynamics.
This class implements the optimizer described in the paper *Stochastic Gradient
Riemannian Langevin Dynamics on the Probability Simplex*, available at
https://papers.nips.cc/paper/4883-stochastic-gradient-riemannian-langevin-dynamics-on-the-probability-simplex.pdf.
"""
def __init__(self, **kwargs):
super(SGLD, self).__init__(**kwargs)
def create_state(self, index, weight):
return None
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
weight[:] += - lr/2 * (grad + wd * weight) + normal(0, math.sqrt(lr),
weight.shape, weight.context)
@register # pylint: disable=invalid-name
class ccSGD(SGD):
"""[DEPRECATED] Same as `SGD`. Left here for backward compatibility."""
def __init__(self, *args, **kwargs):
super(ccSGD, self).__init__(*args, **kwargs)
@register
class Adam(Optimizer):
"""The Adam optimizer.
This class implements the optimizer described in *Adam: A Method for
Stochastic Optimization*, available at http://arxiv.org/abs/1412.6980.
The optimizer updates the weight by::
rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
m = beta1 * m + (1 - beta1) * rescaled_grad
v = beta2 * v + (1 - beta2) * (rescaled_grad**2)
w = w - learning_rate * m / (sqrt(v) + epsilon)
If the storage types of weight, state and grad are all ``row_sparse``, \
**sparse updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = clip(grad[row] * rescale_grad + wd * weight[row], clip_gradient)
m[row] = beta1 * m[row] + (1 - beta1) * rescaled_grad[row]
v[row] = beta2 * v[row] + (1 - beta2) * (rescaled_grad[row]**2)
w[row] = w[row] - learning_rate * m[row] / (sqrt(v[row]) + epsilon)
The sparse update only updates the mean and var for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all indices.
Compared with the original update, it can provide large improvements in model training
throughput for some applications. However, it provides slightly different semantics than
the original update, and may lead to different empirical results.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
For details of the update algorithm, see :class:`~mxnet.ndarray.adam_update`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
**kwargs):
super(Adam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype,
stype=weight.stype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype,
stype=weight.stype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
mean, var = state
adam_update(weight, grad, mean, var, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaGrad(Optimizer):
"""AdaGrad optimizer.
This class implements the AdaGrad optimizer described in *Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization*, and available at
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
eps: float, optional
Small value to avoid division by 0.
"""
def __init__(self, eps=1e-7, **kwargs):
super(AdaGrad, self).__init__(**kwargs)
self.float_stable_eps = eps
def create_state(self, index, weight):
return zeros(weight.shape, weight.context, stype=weight.stype) # history
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
is_sparse = True if weight.stype == 'row_sparse' and grad.stype == 'row_sparse' else False
if is_sparse is True:
grad_indices_count = len(grad.indices)
grad = grad * self.rescale_grad
if is_sparse is True:
grad_indices = grad.indices
# Make sure that the scalar multiply still has a sparse result
assert grad_indices_count == len(grad_indices)
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
history = state
save_history_stype = history.stype
if is_sparse:
history[:] = sparse.elemwise_add(sparse.square(grad),
sparse.retain(history, grad_indices))
history_indices = history.indices
assert len(history_indices) == grad_indices_count
adjusted_add = _internal._scatter_plus_scalar(history, self.float_stable_eps)
srt = op.sqrt(adjusted_add)
div = _internal._scatter_elemwise_div(grad, srt)
retained_weight = sparse.retain(weight, grad.indices)
to_add = sparse.elemwise_add(div, _internal._mul_scalar(retained_weight, float(wd)))
assert len(to_add.indices) == grad_indices_count
weight[:] = sparse.elemwise_add(weight, _internal._mul_scalar(to_add, float(-lr)))
state[:] = history
assert state.stype == save_history_stype
assert len(history_indices) == grad_indices_count
else:
history[:] += square(grad)
div = grad / sqrt(history + self.float_stable_eps)
weight[:] += (div + weight * wd) * -lr
@register
class RMSProp(Optimizer):
"""The RMSProp optimizer.
Two versions of RMSProp are implemented:
If ``centered=False``, we follow
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012.
For details of the update algorithm see :class:`~mxnet.ndarray.rmsprop_update`.
If ``centered=True``, we follow http://arxiv.org/pdf/1308.0850v5.pdf (38)-(45)
by Alex Graves, 2013.
For details of the update algorithm see :class:`~mxnet.ndarray.rmspropalex_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
gamma1: float, optional
A decay factor of moving average over past squared gradient.
gamma2: float, optional
A "momentum" factor. Only used if `centered`=``True``.
epsilon : float, optional
Small value to avoid division by 0.
centered : bool, optional
Flag to control which version of RMSProp to use.
``True`` will use Graves's version of `RMSProp`,
``False`` will use Tieleman & Hinton's version of `RMSProp`.
clip_weights : float, optional
Clips weights into range ``[-clip_weights, clip_weights]``.
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(RMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.gamma1 = gamma1
self.gamma2 = gamma2
self.centered = centered
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
if self.centered:
return (
zeros(weight.shape, weight.context, stype=weight.stype), # n
zeros(weight.shape, weight.context, stype=weight.stype), # g
zeros(weight.shape, weight.context, stype=weight.stype)) # delta
else:
return (zeros(weight.shape, weight.context, stype=weight.stype),) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'gamma1': self.gamma1, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.centered:
kwargs['gamma2'] = self.gamma2
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.clip_weights:
kwargs['clip_weights'] = self.clip_weights
if not self.centered:
(n, ) = state
rmsprop_update(
weight, grad, n, out=weight, lr=lr, wd=wd, **kwargs)
else:
n, g, delta = state
rmspropalex_update(weight, grad, n, g, delta, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaDelta(Optimizer):
"""The AdaDelta optimizer.
This class implements AdaDelta, an optimizer described in *ADADELTA: An adaptive
learning rate method*, available at https://arxiv.org/abs/1212.5701.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
rho: float
Decay rate for both squared gradients and delta.
epsilon : float
Small value to avoid division by 0.
"""
def __init__(self, rho=0.90, epsilon=1e-5, **kwargs):
super(AdaDelta, self).__init__(**kwargs)
self.rho = rho
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context), # accumulated g
zeros(weight.shape, weight.context)) # accumulated delta
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
wd = self._get_wd(index)
self._update_count(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# accumulated g and delta initlization
acc_g, acc_delta = state
# update g, delta
acc_g[:] = self.rho * acc_g + (1. - self.rho) * grad * grad
current_delta = sqrt(acc_delta + self.epsilon) / sqrt(acc_g + self.epsilon) * grad
acc_delta[:] = self.rho * acc_delta + (1. - self.rho) * current_delta * current_delta
# update weight
weight[:] -= current_delta + wd * weight
#pylint: disable=invalid-name
#pylint: disable=line-too-long
@register
class Ftrl(Optimizer):
"""The Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^2}}
The optimizer updates the weight by::
rescaled_grad = clip(grad * rescale_grad, clip_gradient)
z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate
n += rescaled_grad**2
w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1)
If the storage types of weight, state and grad are all ``row_sparse``, \
**sparse updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient)
z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate
n[row] += rescaled_grad[row]**2
w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1)
The sparse update only updates the z and n for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
For details of the update algorithm, see :class:`~mxnet.ndarray.ftrl_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, **kwargs):
super(Ftrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, stype=weight.stype), # z
zeros(weight.shape, weight.context, stype=weight.stype)) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
kwargs = {'lamda1': self.lamda1, 'beta': self.beta, 'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
# accumulated g and delta initialization
z, n = state
ftrl_update(weight, grad, z, n, out=weight,
lr=lr, wd=wd, **kwargs)
# pylint: enable=line-too-long
@register
class Adamax(Optimizer):
"""The AdaMax optimizer.
It is a variant of Adam based on the infinity norm
available at http://arxiv.org/abs/1412.6980 Section 7.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
"""
def __init__(self, learning_rate=0.002, beta1=0.9, beta2=0.999, **kwargs):
super(Adamax, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
lr /= (1. - self.beta1**t)
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# update m_t and u_t
m_t, u_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
u_t[:] = maximum(self.beta2 * u_t, NDabs(grad))
# update weight
weight[:] -= lr * m_t / u_t
@register
class Nadam(Optimizer):
"""The Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum available
at http://cs229.stanford.edu/proj2015/054_report.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
schedule_decay : float, optional
Exponential decay rate for the momentum schedule
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
schedule_decay=0.004, **kwargs):
super(Nadam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.schedule_decay = schedule_decay
self.m_schedule = 1.
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# warming momentum schedule
momentum_t = self.beta1 * (1. - 0.5 * (pow(0.96, t * self.schedule_decay)))
momentum_t_1 = self.beta1 * (1. - 0.5 * (pow(0.96, (t + 1) * self.schedule_decay)))
self.m_schedule = self.m_schedule * momentum_t
m_schedule_next = self.m_schedule * momentum_t_1
# update m_t and v_t
m_t, v_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
v_t[:] = self.beta2 * v_t + (1. - self.beta2) * grad * grad
grad_prime = grad / (1. - self.m_schedule)
m_t_prime = m_t / (1. - m_schedule_next)
v_t_prime = v_t / (1. - pow(self.beta2, t))
m_t_bar = (1. - momentum_t) * grad_prime + momentum_t_1 * m_t_prime
# update weight
weight[:] -= lr * m_t_bar / (sqrt(v_t_prime) + self.epsilon)
@register
class Test(Optimizer):
"""The Test optimizer"""
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
def create_state(self, index, weight):
"""Creates a state to duplicate weight."""
return zeros(weight.shape, weight.context)
def update(self, index, weight, grad, state):
"""Performs w += rescale_grad * grad."""
weight[:] += grad * self.rescale_grad
state[:] = weight
# backward compatibility wrapper for Optimizer.CreateOptimizer
create = Optimizer.create_optimizer # pylint: disable=invalid-name
class Updater(object):
"""Updater for kvstore."""
def __init__(self, optimizer):
self.optimizer = optimizer
self.states = {}
self.states_synced = {}
def __call__(self, index, grad, weight):
"""Updates weight given gradient and index."""
# convert ctypes.char_p.value back to python str if needed
if isinstance(index, bytes):
index = py_str(index)
if index not in self.states:
self.states[index] = self.optimizer.create_state_multi_precision(index, weight)
self.states_synced[index] = True
elif not self.states_synced[index]:
self.states[index] = \
self.sync_state_context(self.states[index], weight.context)
self.states_synced[index] = True
self.optimizer.update_multi_precision(index, weight, grad, self.states[index])
def sync_state_context(self, state, context):
if isinstance(state, NDArray):
return state.as_in_context(context)
elif isinstance(state, (tuple, list)):
synced_state = (self.sync_state_context(i, context) for i in state)
if isinstance(state, tuple):
return tuple(synced_state)
else:
return list(synced_state)
else:
return state
def set_states(self, states):
"""Sets updater states."""
states = pickle.loads(states)
if isinstance(states, tuple) and len(states) == 2:
self.states, self.optimizer = states
else:
self.states = states
self.states_synced = dict.fromkeys(self.states.keys(), False)
def get_states(self, dump_optimizer=False):
"""Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states)
def get_updater(optimizer):
"""Returns a closure of the updater needed for kvstore.
Parameters
----------
optimizer: Optimizer
The optimizer.
Returns
-------
updater: function
The closure of the updater.
"""
return Updater(optimizer)
|
weleen/mxnet
|
python/mxnet/optimizer.py
|
Python
|
apache-2.0
| 50,474
|
parameter_lists_copy = [m for m in parameter_lists]
for <caret>m in parameter_lists_copy:
if param_index >= len(m.GetParameters()):
parameter_lists.remove(m)
|
asedunov/intellij-community
|
python/testData/refactoring/rename/renameLocalWithComprehension.py
|
Python
|
apache-2.0
| 170
|
#-*-coding:utf-8 -*-
import multiprocessing
import collections
class MapReduce(object):
def __init__(self,mapper,reducer):
self.mapper = mapper
self.reducer = reducer
self.pool = multiprocessing.Pool()
def partition(self,mapped_value):
result = []
for item in mapped_value:
result.extend(item)
partition_data = collections.defaultdict(list)
for key, value in result:
partition_data[key].append(value)
return partition_data.items()
def __call__(self,inputs):
mapped_result = self.pool.map(self.mapper,inputs,chunksize=1)
mapped_value = self.partition(mapped_result)
reduced_value = self.pool.map(self.reducer,mapped_value)
return reduced_value
def mapper(logfile):
mapped_value = []
with file(logfile,'r') as f:
for line in f.readlines():
#print line
line = line.split()
#print line
item = ()
try:
item = (line[0],1)
except Exception,e:
print str(e)
mapped_value.append(item)
return mapped_value
def reducer(item):
cookie,occurances = item
return (cookie,sum(occurances))
if __name__ == "__main__":
mapreduce = MapReduce(mapper,reducer)
import os
import glob
logpath = os.path.join(os.environ.get("SPIDERPATH"),'logs')
result = mapreduce(glob.glob(logpath))
print result
|
haipersist/webspider
|
da/MapReduce.py
|
Python
|
mit
| 1,493
|
#
# Copyright (c) 2015 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from email.utils import parseaddr
def generate_recipients(sender, to, ccs, current_user):
result = {'single': None, 'all': {'to-field': [], 'cc-field': []}}
to.append(sender)
to = remove_duplicates(to)
ccs = remove_duplicates(ccs)
result['single'] = swap_recipient_if_needed(sender, remove_address(to, current_user), current_user)
result['all']['to-field'] = remove_address(to, current_user) if len(to) > 1 else to
result['all']['cc-field'] = remove_address(ccs, current_user) if len(ccs) > 1 else ccs
return result
def remove_duplicates(recipients):
return list(set(recipients))
def remove_address(recipients, current_user):
return [recipient for recipient in recipients if not parsed_mail_matches(recipient, current_user)]
def parsed_mail_matches(to_parse, expected):
return parseaddr(to_parse)[1] == expected
def swap_recipient_if_needed(sender, recipients, current_user):
if len(recipients) == 1 and parsed_mail_matches(sender, current_user):
return recipients[0]
return sender
|
pixelated-project/pixelated-user-agent
|
service/pixelated/support/replier.py
|
Python
|
agpl-3.0
| 1,744
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1SupplementalGroupsStrategyOptions(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, ranges=None, rule=None):
"""
V1beta1SupplementalGroupsStrategyOptions - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'ranges': 'list[V1beta1IDRange]',
'rule': 'str'
}
self.attribute_map = {
'ranges': 'ranges',
'rule': 'rule'
}
self._ranges = ranges
self._rule = rule
@property
def ranges(self):
"""
Gets the ranges of this V1beta1SupplementalGroupsStrategyOptions.
Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.
:return: The ranges of this V1beta1SupplementalGroupsStrategyOptions.
:rtype: list[V1beta1IDRange]
"""
return self._ranges
@ranges.setter
def ranges(self, ranges):
"""
Sets the ranges of this V1beta1SupplementalGroupsStrategyOptions.
Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.
:param ranges: The ranges of this V1beta1SupplementalGroupsStrategyOptions.
:type: list[V1beta1IDRange]
"""
self._ranges = ranges
@property
def rule(self):
"""
Gets the rule of this V1beta1SupplementalGroupsStrategyOptions.
Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
:return: The rule of this V1beta1SupplementalGroupsStrategyOptions.
:rtype: str
"""
return self._rule
@rule.setter
def rule(self, rule):
"""
Sets the rule of this V1beta1SupplementalGroupsStrategyOptions.
Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
:param rule: The rule of this V1beta1SupplementalGroupsStrategyOptions.
:type: str
"""
self._rule = rule
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1SupplementalGroupsStrategyOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
djkonro/client-python
|
kubernetes/client/models/v1beta1_supplemental_groups_strategy_options.py
|
Python
|
apache-2.0
| 4,328
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2022 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from PyQt5 import QtCore, QtGui, QtWidgets
import l5r.widgets as widgets
import l5r.api as api
import l5r.api.character.rankadv
class NextRankDlg(QtWidgets.QDialog):
def __init__(self, pc, parent=None):
super(NextRankDlg, self).__init__(parent)
self.pc = pc
self.build_ui()
self.connect_signals()
# self.setWindowFlags(QtCore.Qt.Tool)
self.setWindowTitle(self.tr("L5R: CM - Advance Rank"))
def build_ui(self):
vbox = QtWidgets.QVBoxLayout(self)
vbox.addWidget(QtWidgets.QLabel(self.tr("""\
You can now advance your Rank,
what would you want to do?
""")))
self.bt_go_on = QtWidgets.QPushButton(
self.tr("Advance in my current school")
)
self.bt_new_school = QtWidgets.QPushButton(
self.tr("Join a new school"))
for bt in [self.bt_go_on, self.bt_new_school]:
bt.setMinimumSize(QtCore.QSize(0, 38))
vbox.addWidget(self.bt_go_on)
vbox.addWidget(self.bt_new_school)
vbox.setSpacing(12)
is_path = api.data.schools.is_path(
api.character.schools.get_current()
)
former_school_adv = api.character.rankadv.get_former_school()
former_school = api.data.schools.get(former_school_adv.school) if former_school_adv else None
# check if the PC is following an alternate path
if is_path:
# offer to going back
if former_school:
self.bt_go_on.setText(self.tr("Continue ") + former_school.name)
else:
self.bt_go_on.setText(self.tr("Go back to your old school"))
self.bt_go_on.setEnabled(former_school != None)
def connect_signals(self):
self.bt_go_on.clicked.connect(self.simply_go_on)
self.bt_new_school.clicked.connect(self.join_new_school)
def join_new_school(self):
dlg = widgets.SchoolChooserDialog(self)
if dlg.exec_() == QtWidgets.QDialog.Rejected:
return
self.accept()
def simply_go_on(self):
is_path = api.data.schools.is_path(
api.character.schools.get_current()
)
# check if the PC is following an alternate path
if is_path:
# the PC want to go back to the old school.
# find the first school that is not a path
api.character.rankadv.leave_path()
else:
api.character.rankadv.advance_rank()
self.accept()
def test():
import sys
app = QtWidgets.QApplication(sys.argv)
dlg = NextRankDlg(None, None)
dlg.show()
sys.exit(app.exec_())
if __name__ == '__main__':
test()
|
OpenNingia/l5r-character-manager-3
|
l5r/dialogs/newrankdlg.py
|
Python
|
gpl-3.0
| 3,473
|
#!/usr/bin/python
## Copyright (C) 2008, 2010 Red Hat, Inc.
## Authors:
## Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
import traceback
_debug=False
def debugprint (x):
if _debug:
try:
sys.stderr.write (x + "\n")
sys.stderr.flush ()
except:
pass
def get_debugging ():
return _debug
def set_debugging (d):
global _debug
_debug = d
def fatalException (exitcode=1):
nonfatalException (type="fatal", end="Exiting")
sys.exit (exitcode)
def nonfatalException (type="non-fatal", end="Continuing anyway.."):
d = get_debugging ()
set_debugging (True)
debugprint ("Caught %s exception. Traceback:" % type)
(type, value, tb) = sys.exc_info ()
extxt = traceback.format_exception_only (type, value)
for line in traceback.format_tb(tb):
debugprint (line.strip ())
debugprint (extxt[0].strip ())
debugprint (end)
set_debugging (d)
|
ruibarreira/linuxtrail
|
usr/lib/python2.7/dist-packages/cupshelpers/debug.py
|
Python
|
gpl-3.0
| 1,658
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tests for the qubit_operator_transforms module'''
import unittest
import numpy
from openfermion.ops.operators import QubitOperator, FermionOperator
from openfermion.transforms.repconversions import (project_onto_sector,
projection_error,
rotate_qubit_by_pauli)
from openfermion.utils import count_qubits
class ProjectionTest(unittest.TestCase):
def setUp(self):
pass
def test_function_errors(self):
"""Test main function errors."""
operator = (QubitOperator('Z0 X1', 1.0) + QubitOperator('X1', 2.0))
sector1 = [0]
sector2 = [1]
qbt_list = [0]
with self.assertRaises(TypeError):
project_onto_sector(operator=1.0, qubits=qbt_list, sectors=sector1)
with self.assertRaises(TypeError):
projection_error(operator=1.0, qubits=qbt_list, sectors=sector1)
with self.assertRaises(TypeError):
project_onto_sector(operator=operator, qubits=0.0, sectors=sector2)
with self.assertRaises(TypeError):
projection_error(operator=operator, qubits=0.0, sectors=sector2)
with self.assertRaises(TypeError):
project_onto_sector(operator=operator,
qubits=qbt_list,
sectors=operator)
with self.assertRaises(TypeError):
projection_error(operator=operator,
qubits=qbt_list,
sectors=operator)
with self.assertRaises(ValueError):
project_onto_sector(operator=operator,
qubits=[0, 1],
sectors=sector1)
with self.assertRaises(ValueError):
projection_error(operator=operator, qubits=[0, 1], sectors=sector1)
with self.assertRaises(ValueError):
project_onto_sector(operator=operator,
qubits=qbt_list,
sectors=[0, 0])
with self.assertRaises(ValueError):
projection_error(operator=operator, qubits=qbt_list, sectors=[0, 0])
with self.assertRaises(ValueError):
project_onto_sector(operator=operator,
qubits=qbt_list,
sectors=[-1])
with self.assertRaises(ValueError):
projection_error(operator=operator, qubits=qbt_list, sectors=[-1])
def test_projection(self):
coefficient = 0.5
opstring = ((0, 'X'), (1, 'X'), (2, 'Z'))
opstring2 = ((0, 'X'), (2, 'Z'), (3, 'Z'))
operator = QubitOperator(opstring, coefficient)
operator += QubitOperator(opstring2, coefficient)
new_operator = project_onto_sector(operator,
qubits=[2, 3],
sectors=[0, 1])
error = projection_error(operator, qubits=[2, 3], sectors=[0, 1])
self.assertEqual(count_qubits(new_operator), 2)
self.assertEqual(error, 0)
self.assertTrue(((0, 'X'), (1, 'X')) in new_operator.terms)
self.assertEqual(new_operator.terms[((0, 'X'), (1, 'X'))], 0.5)
self.assertTrue(((0, 'X'),) in new_operator.terms)
self.assertEqual(new_operator.terms[((0, 'X'),)], -0.5)
def test_projection_error(self):
coefficient = 0.5
opstring = ((0, 'X'), (1, 'X'), (2, 'Z'))
opstring2 = ((0, 'X'), (2, 'Z'), (3, 'Z'))
operator = QubitOperator(opstring, coefficient)
operator += QubitOperator(opstring2, coefficient)
new_operator = project_onto_sector(operator, qubits=[1], sectors=[0])
error = projection_error(operator, qubits=[1], sectors=[0])
self.assertEqual(count_qubits(new_operator), 3)
self.assertTrue(((0, 'X'), (1, 'Z'), (2, 'Z')) in new_operator.terms)
self.assertEqual(new_operator.terms[((0, 'X'), (1, 'Z'), (2, 'Z'))],
0.5)
self.assertEqual(error, 0.5)
class UnitaryRotationsTest(unittest.TestCase):
def setup(self):
pass
def test_rotation(self):
qop = QubitOperator('X0 X1', 1)
qop += QubitOperator('Z0 Z1', 1)
rot_op = QubitOperator('Z1', 1)
rotated_qop = rotate_qubit_by_pauli(qop, rot_op, numpy.pi / 4)
comp_op = QubitOperator('Z0 Z1', 1)
comp_op += QubitOperator('X0 Y1', 1)
self.assertEqual(comp_op, rotated_qop)
def test_exception_Pauli(self):
qop = QubitOperator('X0 X1', 1)
qop += QubitOperator('Z0 Z1', 1)
rot_op = QubitOperator('Z1', 1)
rot_op += QubitOperator('Z0', 1)
rot_op2 = QubitOperator('Z1', 1)
ferm_op = FermionOperator('1^ 2', 1)
with self.assertRaises(TypeError):
rotate_qubit_by_pauli(qop, rot_op, numpy.pi / 4)
with self.assertRaises(TypeError):
rotate_qubit_by_pauli(ferm_op, rot_op2, numpy.pi / 4)
|
quantumlib/OpenFermion
|
src/openfermion/transforms/repconversions/qubit_operator_transforms_test.py
|
Python
|
apache-2.0
| 5,595
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Proposal.slug'
db.add_column('proposal_proposal', 'slug',
self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', blank=True, default='', unique=True, populate_from=('title',), overwrite=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Proposal.slug'
db.delete_column('proposal_proposal', 'slug')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'proposal.audiencelevel': {
'Meta': {'object_name': 'AudienceLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'proposal.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'proposal.proposal': {
'Meta': {'ordering': "['-created']", 'object_name': 'Proposal'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'audience': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['proposal.AudienceLevel']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['proposal.Category']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_extreme': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "('title',)", 'overwrite': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['proposal.ProposalType']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': "orm['auth.User']"})
},
'proposal.proposaltype': {
'Meta': {'object_name': 'ProposalType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['proposal']
|
arscariosus/django-mango
|
mango/apps/proposal/migrations/0007_auto__add_field_proposal_slug.py
|
Python
|
isc
| 6,319
|
from SliderDialog.Slider import Slider_Dialog
from ProgressDialog.Progress import ProgressBar_Dialog
import sys
from PyQt5.QtWidgets import QApplication
if __name__ == '__main__':
app = QApplication(sys.argv)
sd = Slider_Dialog()
pb = ProgressBar_Dialog()
# Making the connection
pb.make_connection(sd)
sys.exit(app.exec_())
|
manashmndl/LearningPyQt
|
Signal_Slot_Example/main.py
|
Python
|
mit
| 362
|
"""Utility for creating multiple dependencies with synchronized save/restore."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.checkpointable import base as checkpointable
class _CallbackSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
"""Wraps save and restore callbacks as a `SaveableObject`."""
def __init__(self, name, dtype, save_callback, restore_callback):
self._restore_callback = restore_callback
spec = saver_lib.BaseSaverBuilder.SaveSpec(
tensor=save_callback,
slice_spec="",
name=name,
dtype=dtype)
super(_CallbackSaveable, self).__init__(
save_callback, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return self._restore_callback(tensor)
class _SplitDependency(checkpointable.Checkpointable):
"""Looks like a regular variable while synchronizing save/restores."""
def __init__(self, save_buffer, restore_buffer, name, dtype, num_components,
fill_save_buffer_fn, consume_restore_buffer_fn):
self._save_buffer = save_buffer
self._restore_buffer = restore_buffer
self._name = name
self._dtype = dtype
self._num_components = num_components
self._fill_save_buffer_fn = fill_save_buffer_fn
self._consume_restore_buffer_fn = consume_restore_buffer_fn
def _save(self):
"""Pull from the shared buffer, populating it if necessary."""
if self._name not in self._save_buffer:
if self._save_buffer:
raise AssertionError(
("Split dependency %s (%s) unsynchronized. Split dependencies must "
"be saved together.") % (self._name, self))
self._fill_save_buffer_fn(self._save_buffer)
return self._save_buffer.pop(self._name)
def _restore(self, tensor):
"""Push into the shared buffer, flushing it if necessary."""
if self._name in self._restore_buffer:
raise AssertionError(
("Split dependency %s (%s) unsynchronized. Split dependencies must "
"be restored together.") % (self._name, self))
self._restore_buffer[self._name] = tensor
if len(self._restore_buffer) == self._num_components:
op = self._consume_restore_buffer_fn(self._restore_buffer)
self._restore_buffer.clear()
return op
else:
return control_flow_ops.no_op()
def _gather_saveables_for_checkpoint(self):
"""Looks to Checkpointable like a regular variable."""
return {
checkpointable.VARIABLE_VALUE_KEY:
functools.partial(_CallbackSaveable,
dtype=self._dtype,
save_callback=self._save,
restore_callback=self._restore)
}
def split_dependency(component_names, component_dtypes,
fill_save_buffer_fn, consume_restore_buffer_fn):
"""Creates multiple dependencies with a synchronized save/restore.
Useful when a single op produces `Tensor`s which should each be saved under
different objects, or when `Tensor`s saved with many different objects need to
be restored together as inputs to a single op (i.e. an object which uses a
single fused op may be swapped out for a subgraph of objects, and these two
programs are checkpoint compatible).
Args:
component_names: A sequence of names for the split
dependencies. `fill_save_buffer_fn` must add these keys to the dictionary
it is passed, and `consume_restore_buffer_fn` will receive a dictionary
with these keys.
component_dtypes: Data types for the `Tensor`s being saved and restored, a
sequence corresponding to `component_names`.
fill_save_buffer_fn: A function which takes an empty dictionary as an
argument and adds `Tensor`s with `component_names` as keys. These
`Tensor`s will be saved as if they were individual variables.
consume_restore_buffer_fn: A function which takes a dictionary with
`component_names` as keys mapping to restored individual `Tensor`s and
returns a restore op (or if executing eagerly, runs the restoration and
may return `None`).
Returns:
A dictionary mapping from names to Checkpointable objects. If one is
reachable from an object as a dependency, the others should be too; adding
dependencies on some but not all of the objects will result in errors.
"""
save_buffer = {}
restore_buffer = {}
split_dependencies = {}
for name, dtype in zip(component_names, component_dtypes):
split_dependencies[name] = _SplitDependency(
save_buffer=save_buffer,
restore_buffer=restore_buffer,
name=name,
dtype=dtype,
num_components=len(component_names),
fill_save_buffer_fn=fill_save_buffer_fn,
consume_restore_buffer_fn=consume_restore_buffer_fn)
return split_dependencies
|
jendap/tensorflow
|
tensorflow/contrib/checkpoint/python/split_dependency.py
|
Python
|
apache-2.0
| 5,769
|
import validator.testcases.langpack as langpack
from validator.errorbundler import ErrorBundle
from helper import _do_test, MockXPI, chrome_manifest
def test_langpack_valid():
'Tests that a language pack has a valid chrome manifest file.'
_do_test('tests/resources/langpack/pass.xpi',
langpack.test_langpack_manifest,
False)
def test_langpack_bad_subject():
"""Tests that a language pack has an invalid subject in the
chrome.manifest file."""
_do_test('tests/resources/langpack/fail.xpi',
langpack.test_langpack_manifest)
def test_langpack_bad_nested_subject():
"""
Test that when a subject in a sub-manifest is not valid, it gets reported.
"""
_do_test('tests/resources/langpack/nested.xpi',
langpack.test_langpack_manifest)
def test_langpack_bad_uri_pred():
"""Tests that a language pack has an invalid URI specified for its
'override' predicates."""
_do_test('tests/resources/langpack/fail_uri_pred.xpi',
langpack.test_langpack_manifest)
def test_langpack_bad_uri_obj():
"""Tests that a language pack has an invalid URI specified for its
'override' objects."""
_do_test('tests/resources/langpack/fail_uri_obj.xpi',
langpack.test_langpack_manifest)
def test_unsafe_html():
'Tests for unsafe HTML in obstract files.'
err = ErrorBundle(None, True)
langpack.test_unsafe_html(err, None, """
This is an <b>innocent</b> file.
Nothing to <a href="#anchor">suspect</a> here.
<img src="chrome://asdf/locale/asdf" />
<tag href="#" />""")
langpack.test_unsafe_html(err, None, "<tag href='foo' />")
langpack.test_unsafe_html(err, None, "<tag src='foo' />")
langpack.test_unsafe_html(err, None, "<tag src='/foo/bar' />")
assert not err.failed()
langpack.test_unsafe_html(err, 'asdf', """
This is not an <script>innocent</script> file.""")
assert err.failed()
err = ErrorBundle()
langpack.test_unsafe_html(err, 'asdf', """
Nothing to <a href="http://foo.bar/">suspect</a> here.""")
assert err.failed()
err = ErrorBundle()
langpack.test_unsafe_html(err, 'asdf', "src='data:foobar")
assert err.failed()
err = ErrorBundle()
langpack.test_unsafe_html(err, 'asdf', "src='//remote/resource")
assert err.failed()
err = ErrorBundle()
langpack.test_unsafe_html(err, 'asdf', 'href="ftp://foo.bar/')
assert err.failed()
def test_has_chrome_manifest():
"""Makes sure the module fails when a chrome.manifest file is not
available."""
assert langpack.test_langpack_manifest(ErrorBundle(),
None) is None
def test_valid_chrome_manifest():
'Chrome manifests must only contain certain elements'
err = ErrorBundle()
err.save_resource('chrome.manifest', chrome_manifest('locale foo bar'))
langpack.test_langpack_manifest(err, MockXPI())
assert not err.failed()
err.save_resource('chrome.manifest', chrome_manifest('foo bar asdf'))
langpack.test_langpack_manifest(err, MockXPI())
assert err.failed()
|
kmaglione/amo-validator
|
tests/test_langpack.py
|
Python
|
bsd-3-clause
| 3,133
|
# -*- coding: utf-8 -*-
from __future__ import division
import time
from collections import OrderedDict
import itertools
import ast
import numpy as np
import numpy.ma as ma
#import Splines
#from Splines import spline1d
from scipy.interpolate import interp1d
from scipy.optimize import minimize, curve_fit, leastsq
from scipy.stats import gaussian_kde # do this for some lists
import pandas as pd
#import matplotlib.pyplot as plt
# TODO: Use Bayes to refine offset estimates given slip rate constraints
def flatten(nested_iterator):
return list(itertools.chain(*nested_iterator))
def tspline_interpolate():
pass
def fit_history_spline(age_array, offset_array):
return interp1d(age_array, offset_array)
def sample_slip_history(age_array, offset_array, time_array,
extend_time=False):
history_spline = fit_history_spline(age_array, offset_array)
if extend_time == False:
time_array = time_array[time_array <= np.max(age_array)]
elif extend_time == True:
raise Exception('extrapolating not supported yet')
return history_spline(time_array)
def inverse_transform_sample(vals, probs, n_samps, n_interp=1000, seed=False,
seed_val=69):
pdf_range, pdf_probs = make_pdf(vals, probs, n_interp)
cdf_range, cdf_probs = make_cdf(pdf_range, pdf_probs)
cdf_interp = interp1d(cdf_probs, cdf_range, bounds_error=False,
fill_value=0.)
if seed == True:
np.random.seed(seed_val)
samps = np.random.rand(n_samps)
return cdf_interp(samps)
def make_pdf(vals, probs, n_interp=1000):
val_min = np.min(vals)
val_max = np.max(vals)
pdf_range = np.linspace(val_min, val_max, n_interp)
pmf = interp1d(vals, probs)
pmf_samples = pmf(pdf_range)
pdf_probs = pmf_samples / np.sum(pmf_samples)
return pdf_range, pdf_probs
def make_cdf(pdf_range, pdf_probs):
return (pdf_range, np.cumsum(pdf_probs))
class OffsetMarker:
"""Represents an offset geologic marker.
Attributes:
offsets: list of possible offset distances for the given marker.
If offset_type = normal, offsets = [mean, sd]
offset_probs: list of probabilities of corresponding offset distances
offset_dist_type: offset prob distribution (normal, uniform, arbitrary)
ages: list of possible ages for the given marker
age_probs: list of probabilities of corresponding ages
age_dist_type: age prob. distribution (normal, uniform, arbitrary)
source: Source for information (e.g., what article, field campaign)
"""
# TODO: Need to make a random.choice setting for large arrays of vals
def __init__(self, offsets=np.array([]), offset_probs=None,
offset_vals=None, offset_mean=None, offset_median=None,
offset_sd=None, offset_mad=None,
offset_min=None, offset_max=None,
offset_seed=None,
offset_dist_type='unspecified', offset_units='unspecified',
ages=np.array([]),
age_probs=None, age_vals=None,
age_mean=None, age_median=None, age_sd=None, age_mad=None,
age_min=None, age_max=None,
age_seed=None,
age_dist_type='unspecified', age_units='unspecified',
source='None'):
self.offsets = offsets
self.offset_probs = offset_probs
self.offset_vals = offset_vals
self.offset_mean = offset_mean
self.offset_median = offset_median
self.offset_sd = offset_sd
self.offset_mad = offset_mad
self.offset_min = offset_min
self.offset_max = offset_max
self.offset_units = offset_units
if offset_dist_type != 'unspecified':
self.offset_dist_type = offset_dist_type
elif offset_dist_type == 'unspecified':
if offset_mean is not None and offset_sd is not None:
self.offset_dist_type = 'normal'
elif (offset_min is not None and offset_max is not None
and offset_sd == None):
self.offset_dist_type = 'uniform'
elif offset_probs is not None and offset_vals is not None:
self.offset_dist_type = 'arbitrary'
self.ages = ages
self.age_probs = age_probs
self.age_vals = age_vals
self.age_mean = age_mean
self.age_median = age_median
self.age_sd = age_sd
self.age_mad = age_mad
self.age_min = age_min
self.age_max = age_max
self.age_units = age_units
if age_dist_type != 'unspecified':
self.age_dist_type = age_dist_type
elif age_dist_type == 'unspecified':
if age_mean is not None and age_sd is not None:
self.age_dist_type = 'normal'
elif (age_min is not None and age_max is not None
and age_sd == None):
self.age_dist_type = 'uniform'
elif age_probs is not None and age_vals is not None:
self.age_dist_type = 'arbitrary'
self.source = source
def sample_offset_from_normal(self, n):
"""Generates n-length sample from normal distribution of offsets"""
return sample_from_bounded_normal(self.offset_mean, self.offset_sd, n,
self.offset_min, self.offset_max)
def sample_offset_from_uniform(self, n):
"""Generates n-length sample from uniform distribution of ages"""
return np.random.uniform(self.offset_min, self.offset_max, n)
def sample_offset_from_arbitrary(self, n):
"""not supported yet"""
offset_sample = inverse_transform_sample(self.offset_vals,
self.offset_probs, n)
return offset_sample
def sample_offset(self, n):
"""Generates n-length array of samples from distribution"""
if self.offset_dist_type == 'normal':
offset_sample = self.sample_offset_from_normal(n)
elif self.offset_dist_type == 'uniform':
offset_sample = self.sample_offset_from_uniform(n)
elif self.offset_dist_type == 'arbitrary':
offset_sample = self.sample_offset_from_arbitrary(n)
else:
print('What is the offset distribution type?')
return offset_sample
def sample_age_from_normal(self, n):
"""Generates n-length sample from normal distribution of ages"""
if self.age_min:
age_min = self.age_min
else:
age_min = 0.
age_sample = sample_from_bounded_normal(self.age_mean, self.age_sd, n,
age_min, self.age_max)
return age_sample
def sample_age_from_uniform(self, n):
"""Generates n-length sample from uniform distribution of ages"""
return np.random.uniform(self.age_min, self.age_max, n)
def sample_age_from_arbitrary(self, n):
"""not supported yet"""
return inverse_transform_sample(self.age_vals, self.age_probs, n)
def sample_age(self, n):
"""Generates n-length array of samples from distribution"""
if self.age_dist_type == 'normal':
age_sample = self.sample_age_from_normal(n)
elif self.age_dist_type == 'uniform':
age_sample = self.sample_age_from_uniform(n)
elif self.age_dist_type == 'arbitrary':
age_sample = self.sample_age_from_arbitrary(n)
else:
print('What is the age distribution type?')
return age_sample
def sample(self, n):
age_sample = self.sample_age(n)
offset_sample = self.sample_offset(n)
asl = len(age_sample)
osl = len(offset_sample)
if asl > osl:
age_sample = age_sample[0:osl]
elif osl > asl:
offset_sample = offset_sample[0:asl]
return age_sample, offset_sample
def offset_list_from_gui(tabledata, table_header):
offsets_d = offset_markers_from_gui(tabledata, table_header)
return list(offsets_d.values())
def offset_markers_from_gui(tabledata, table_header):
offsets_d = OrderedDict()
for row in tabledata:
off_mark_d = offset_marker_dict_from_row(row, table_header)
offsets_d[off_mark_d['Name']] = offset_marker_from_dict(off_mark_d)
return offsets_d
def offset_marker_dict_from_row(row, table_header):
# header_table should be passed from gui
off_mark_d = OrderedDict()
for i, key in enumerate(table_header):
try:
off_mark_d[key] = ast.literal_eval(row[i])
except ValueError:
off_mark_d[key] = row[i]
for key, val in off_mark_d.items():
if key in ['Age', 'Age_Err', 'Offset', 'Offset_Err']:
if not isinstance(val, (list, tuple, np.ndarray)):
if not isinstance( val, (int, float, complex)):
raise Exception(
('Error in {}: value for {} is not numeric. Maybe '
+'a string?').format(off_mark_d['Name'], key) )
else:
for item in val:
if not isinstance( item, (int, float, complex)):
raise Exception(
('Error in {}: value for {} is not numeric. Maybe '
+'a string?').format(off_mark_d['Name'], key) )
return off_mark_d
def offset_marker_from_dict(off_row_d):
or_d = off_row_d
args = {'offset_units': or_d['Offset_Units'],
'age_units': or_d['Age_Units']}
# get offset arguments
if or_d['Offset_Type'] == 'mean':
if not np.isscalar(or_d['Offset']):
raise Exception('Mean Offset has to be a scalar!')
args['offset_mean'] = or_d['Offset']
elif or_d['Offset_Type'] == 'median':
if not np.isscalar(or_d['Offset']):
raise Exception('Median Offset has to be a scalar!')
args['offset_median'] = or_d['Offset']
elif or_d['Offset_Type'] == 'list':
if len(or_d['Offset']) < 2:
raise Exception('List Offsets have to be longer than 1!')
args['offset_vals'] = or_d['Offset']
else:
raise Exception('Offset_Type must be mean, median or list!')
# get offset err arguments
# TODO: More consistency checking between arg types
if or_d['Offset_Err_Type'] == 'sd':
if not np.isscalar(or_d['Offset_Err']):
raise Exception('sd Offset_Err must be a scalar!')
args['offset_sd'] = or_d['Offset_Err']
elif or_d['Offset_Err_Type'] == 'mad':
if not np.isscalar(or_d['Offset_Err']):
raise Exception('mad Offset_Err must be a scalar!')
args['offset_mad'] = or_d['Offset_Err']
elif or_d['Offset_Err_Type'] == 'minmax':
if not np.isscalar(or_d['Offset_Err']):
raise Exception('minmax Offset_Err must be a scalar!')
if not np.isscalar(or_d['Offset']):
raise Exception('Mean Offset has to be a scalar!')
args['offset_min'] = or_d['Offset'] - or_d['Offset_Err']
args['offset_max'] = or_d['Offset'] + or_d['Offset_Err']
args['offset_sd'] = None # just to make sure the class inits right
elif or_d['Offset_Err_Type'] == 'probs':
if len(or_d['Offset_Err']) < 2:
raise Exception('probs Offset_Err have to be longer than 1!')
args['offset_probs'] = or_d['Offset_Err']
# check to make sure offset vals are set too?
elif or_d['Offset_Err_Type'] == 'kde':
if len(or_d['Offset_Err']) < 2:
raise Exception('kde Offset_Err have to be longer than 1!')
args['offset_probs'] = kde(or_d['Offset'])
else:
raise Exception('Offset_Err_Type must be sd, mad, minmax, probs, '
+'or kde!')
# get age arguments
if or_d['Age_Type'] == 'mean':
if not np.isscalar(or_d['Age']):
raise Exception('Mean Age has to be a scalar!')
args['age_mean'] = or_d['Age']
elif or_d['Age_Type'] == 'median':
if not np.isscalar(or_d['Age']):
raise Exception('Median Age has to be a scalar!')
args['age_median'] = or_d['Age']
elif or_d['Age_Type'] == 'list':
if len(or_d['Age']) < 2:
raise Exception('List Ages have to be longer than 1!')
args['age_vals'] = or_d['Age']
else:
raise Exception('Age_Type must be mean, median or list!')
# get age err arguments
# TODO: More consistency checking between arg types
if or_d['Age_Err_Type'] == 'sd':
if not np.isscalar(or_d['Age_Err']):
raise Exception('sd Age_Err must be a scalar!')
args['age_sd'] = or_d['Age_Err']
elif or_d['Age_Err_Type'] == 'mad':
if not np.isscalar(or_d['Age_Err']):
raise Exception('mad Age_Err must be a scalar!')
args['age_mad'] = or_d['Age_Err']
elif or_d['Age_Err_Type'] == 'minmax':
if not np.isscalar(or_d['Age_Err']):
raise Exception('minmax Age_Err must be a scalar!')
if not np.isscalar(or_d['Age']):
raise Exception('Mean Age has to be a scalar!')
args['age_min'] = or_d['Age'] - or_d['Age_Err']
args['age_max'] = or_d['Age'] + or_d['Age_Err']
args['age_sd'] = None # just to make sure the class inits right
elif or_d['Age_Err_Type'] == 'probs':
if len(or_d['Age_Err']) < 2:
raise Exception('probs Age_Err have to be longer than 1!')
args['age_probs'] = or_d['Age_Err']
# check to make sure age vals are set too?
elif or_d['Age_Err_Type'] == 'kde':
if len(or_d['Age_Err']) < 2:
raise Exception('kde Age_Err have to be longer than 1!')
args['age_probs'] = kde(or_d['Age'])
else:
raise Exception('Age_Err_Type must be sd, mad, minmax, probs, '
+'or kde!')
return OffsetMarker(**args)
def kde(vals):
# not sure how to do this yet
# need to match input length? or just resample? pass resampling to class?
# will need to re-do vals too!
raise Exception('Not Implemented Yet')
def sample_from_bounded_normal(mean, sd, n, sample_min=None, sample_max=None):
sample = np.random.normal(mean, sd, n)
sample = trim_distribution(sample, sample_min=sample_min,
sample_max=sample_max)
while len(sample) < n:
next_sample = np.random.normal(mean, sd, n)
next_sample = trim_distribution(next_sample, sample_min, sample_max)
sample = np.hstack([sample, next_sample])
return sample[:n]
def trim_distribution(sample, sample_min=None, sample_max=None):
if sample_min is not None and sample_max is not None:
if sample_min >= sample_max:
raise Exception('min must be less than max!')
if sample_min is not None:
sample = sample[sample >= sample_min]
if sample_max is not None:
sample = sample[sample <= sample_max]
return sample
def check_monot_increasing(in_array):
"""Checks to see if array is monotonically increasing, returns bool value
"""
dx = np.diff(in_array)
return np.all(dx >= 0)
def check_unit_consistency(offset_list):
off_unit_list = [om.offset_units for om in offset_list]
age_unit_list = [om.age_units for om in offset_list]
for off_u in off_unit_list:
if off_u != off_unit_list[0]:
raise Exception('OffsetMarker units not consistent.')
for age_u in age_unit_list:
if age_u != age_unit_list[0]:
raise Exception('OffsetMarker units not consistent.')
return
def get_log_pts(p_min, p_max, n_pts=50, base=np.e):
"""Generates n_pts length array of logarithmically spaced points"""
if p_min == 0:
pts_array = np.hstack([0, np.logspace(np.log(1e-5), np.log(p_max),
num=n_pts-1, base=base)])
else:
pts_array = np.logspace(p_min, p_max, num=n_pts, base=base)
return pts_array
def make_age_offset_arrays(offset_list, n, force_increasing=False,
zero_offset_age=0., seed=False, seed_value=None,
sample_chunks=1):
# TODO: implement sample chunking (using n samples per marker per fit)
if seed == True:
np.random.seed(seed_value)
age_array = np.zeros((n, len(offset_list)+1 * sample_chunks))
off_array = np.zeros((n, len(offset_list)+1 * sample_chunks))
age_array[:,0] = zero_offset_age
for i, off_mark in enumerate(offset_list):
col = i+1
age_array[:,col], off_array[:,col] = off_mark.sample(n)
if force_increasing == True:
def make_inc_bool(age_array, off_array, n):
inc_bool = np.ones((age_array.shape[0]), dtype=int)
for row in range(n):
age_inc = check_monot_increasing(age_array[row,:])
off_inc = check_monot_increasing(off_array[row,:])
if not (age_inc and off_inc):
inc_bool[row] = 0
inc_bool = np.array(inc_bool, dtype=bool)
return inc_bool
inc_bool = make_inc_bool(age_array, off_array, n)
age_array = age_array[inc_bool, :]
off_array = off_array[inc_bool, :]
while age_array.shape[0] < n:
next_age_array, next_off_array = make_age_offset_arrays(
offset_list, n,
force_increasing=False,
zero_offset_age=zero_offset_age)
next_inc_bool = make_inc_bool(next_age_array, next_off_array, n)
next_age_array = next_age_array[next_inc_bool, :]
next_off_array = next_off_array[next_inc_bool, :]
off_array = np.vstack([off_array, next_off_array])
age_array = np.vstack([age_array, next_age_array])
return age_array[:n,:], off_array[:n,:]
####
# Piecewise linear fitting. Multiple methods here, pick your poison.
###
def fit_piecewise_linear_w_breakpts(x_data, y_data, breakpts):
'''
Modified from an email by Josef Perktold on the
StatsModels mailing list.
'''
# make breakpts into list, so we can prepend 0
if hasattr(breakpts, 'shape'):
breakpts = breakpts.tolist()
else:
breakpts = list(breakpts)
breakpts.insert(0,0) # slope over entire array
# make exog array
A = np.column_stack([np.maximum(0, x_data - knot) for knot in breakpts])
# returned slopes are in difference from last slope where slope1 is from 0
# don't know how to make exog array otherwise
slopes, sum_sq_err = np.linalg.lstsq(A, y_data)[0:2]
return np.cumsum(slopes), sum_sq_err # cumsum makes each slope the real one
def piecewise_linear_breakpt_search(x_data, y_data, n_pieces=2, n_iters=20,
penalize_rate_changes=False,
weight_pen=0.2,
allow_slip_reversals=False):
'''
docs
'''
x_d = x_data - x_data[0] # adjust for zero_offset_age
n_breaks = n_pieces - 1
breakpt_samples = np.random.uniform(0., x_d.max(), (n_iters, n_breaks))
breakpt_samples = np.sort(breakpt_samples, axis=1)
slopes = {}
if allow_slip_reversals == False: # 1 means no reversal, 0 means reversal
monotonic_index = np.zeros(len(breakpt_samples), dtype=int)
# make this huge so failures won't be selected as min
sum_sq_errs = np.ones(n_iters) * np.inf
if penalize_rate_changes == True:
pen_sum_sq = sum_sq_errs.copy()
for i, breakpt in enumerate(breakpt_samples):
try:
slopes[i], sum_sq_errs[i] = fit_piecewise_linear_w_breakpts(x_d,
y_data,
breakpt)
if penalize_rate_changes == True:
pen_sum_sq[i] = sum_sq_errs[i] * rate_change_penalization(
slopes[i],
weight_pen)
except ValueError: # returned when least_sqs problem ill-conditioned
pass
if allow_slip_reversals == False:
monotonic_index[i] = check_slip_monotonicity(slopes[i])
rev_index = np.bool_(1 - monotonic_index)
if allow_slip_reversals == False: # give slip reversals inf err, keep inds
sum_sq_errs[rev_index] = np.inf
if penalize_rate_changes == True:
pen_sum_sq[rev_index] = np.inf
if penalize_rate_changes == True:
min_i = np.argmin(pen_sum_sq)
else:
min_i = np.argmin(sum_sq_errs)
return flatten([slopes[min_i], breakpt_samples[min_i] + x_data[0],
[sum_sq_errs[min_i]]])
def check_slip_monotonicity(rates):
'''
Arguments: 'rates', a sequence of slip rates.
Checks for monotonic slip, i.e. no changes in sign of slip rates.
If no slip reversals are found, returns True.
'''
if all(rate >= 0. for rate in rates) or all(rate <= 0. for rate in rates):
return 1
else:
return 0
def rate_change_penalization(slopes, weight_pen):
return 1 + np.abs(np.diff(slopes)) * weight_pen
###
# Older, not currently used piecewise fitting stuff
###
def piece_lin_objective(params, x_data, y_data):
'''docs
Modified from a function by Andreas Hillboll on the StatsModels
mailing list.
'''
y1 = 0.
y2, y3, x2 = params
x1, x3 = x_data[0], x_data[-1]
Xbefore = y1 + (x_data - x1) * (y2 - y1) / (x2 - x1)
Xafter = y2 + (x_data - x2) * (y3 - y2) / (x3 - x2)
Xbreak = np.where(x_data <= x2, Xbefore, Xafter)
return (ma.masked_invalid(Xbreak - y_data)**2).sum()
def piece_lin_opt(x_data, y_data):
init_guesses = (np.mean(y_data), np.mean(y_data), np.mean(x_data))
bounds = ((0, np.max(y_data)), (0., np.max(y_data)), (0., np.max(y_data)))
res = minimize(piece_lin_objective, init_guesses, (x_data, y_data),
method="TNC", bounds=bounds)
sum_sq_err = piece_lin_objective(res.x, x_data, y_data)
y2, y3, x2 = res.x
slope1 = y2 / x2
slope2 = ((y3 - y2) / (np.max(x_data) - x2))
breakpoint = x2
return slope1, slope2, breakpoint, sum_sq_err
def piecewise_linear(x, breakpt, m1, m2):
return np.piecewise(x, [x < breakpt], [lambda x: m1 * x,
lambda x: m2 * x + (m1 * breakpt) - m2 * breakpt])
def piecewise_linear_objective(params, x_data, y_data):
return ( (y_data - piecewise_linear(x_data, *params))**2).sum()
def penalized_piecewise_linear_objective(params, x_data, y_data, weight=0.1):
breakpt, m1, m2 = params
resids = np.array( (y_data - piecewise_linear(x_data, *params)) )
rate_change_penalization = np.sum(np.abs(resids)) * np.abs(m1 - m2) * weight
new_resids = np.append(resids, rate_change_penalization)
return new_resids
def piecewise_linear_opt(x_data, y_data):
breakpt_guess = np.median(x_data)
m1_guess = x_data.max() / y_data.max()
m2_guess = x_data.max() / y_data.max()
init_vals = [breakpt_guess, m1_guess, m2_guess]
try:
params, cov_matrix = curve_fit(piecewise_linear, x_data, y_data,
init_vals)
except RuntimeError:
results = minimize(piecewise_linear_objective, init_vals,
(x_data, y_data), method='SLSQP')
#print('slsqp')
params = results.x
# params =
breakpt, m1, m2 = params
errs = y_data - piecewise_linear(x_data, breakpt, m1, m2)
sum_sq_err = np.sum(errs**2)
return m1, m2, breakpt, sum_sq_err
def penalized_piecewise_linear_opt(x_data, y_data, weight=0.3):
breakpt_guess = np.median(x_data)
m1_guess = x_data.max() / y_data.max()
m2_guess = x_data.max() / y_data.max()
init_vals = (breakpt_guess, m1_guess, m2_guess)
params, success = leastsq(penalized_piecewise_linear_objective, init_vals,
args=(x_data, y_data, weight))
breakpt, m1, m2 = params
errs = y_data - piecewise_linear(x_data, breakpt, m1, m2)
sum_sq_err = np.sum(errs**2)
return m1, m2, breakpt, sum_sq_err
####
# Other fitting stuff
####
def lin_fit(x_data, y_data):
x = x_data[:,np.newaxis] - x_data[0]
# to solve for y = mx + b:
#x = np.vstack([x_data, np.ones(len(x_data))]).T
m, _, _, _ = np.linalg.lstsq(x, y_data)
m = m[0]
sum_sq_err = ((y_data - (m * x))**2).sum()
return m, sum_sq_err
def make_linear_results_columns(fit_type=None, n_linear_pieces=None):
# TODO: fix for arbitrary breakpts
results_columns = ['m', 'sumsq1']
if fit_type == 'piecewise':
m_cols = ['m{}'.format(num + 1) for num in range(n_linear_pieces)]
results_columns = flatten([m_cols, ['breakpt', 'sumsq2'],
results_columns])
return results_columns
def do_linear_fits(age_arr, off_arr, fit_type=None, trim_results=True,
n_linear_pieces=None, allow_slip_reversals=False):
n_iters = age_arr.shape[0]
results_columns = make_linear_results_columns(fit_type, n_linear_pieces)
results_arr = np.zeros( (n_iters, len(results_columns) ) )
if fit_type == 'linear':
for i in range(n_iters):
xd = age_arr[i,:]
yd = off_arr[i,:]
results_arr[i,:] = lin_fit(xd, yd)
elif fit_type == 'piecewise':
for i in range(n_iters):
xd = age_arr[i,:]
yd = off_arr[i,:]
results_arr[i, 4:6] = lin_fit(xd, yd)
#results_arr[i, 0:4] = piece_lin_opt(xd, yd)
#results_arr[i, 0:4] = piecewise_linear_opt(xd, yd)
#results_arr[i, 0:4] = penalized_piecewise_linear_opt(xd, yd)
results_arr[i, 0:4] = piecewise_linear_breakpt_search(xd, yd,
n_pieces=n_linear_pieces,
penalize_rate_changes=True, weight_pen=0.2,
allow_slip_reversals=allow_slip_reversals)
if allow_slip_reversals==False: #extra filter to catch wiley minnows
mon_inds = np.bool_([check_slip_monotonicity((results_arr[i,0:2]))
for i in range(n_iters)])
results_df = pd.DataFrame(results_arr, columns=results_columns)
if fit_type == 'piecewise':
if trim_results==True:
# option will be set in the GUI
results_df = trim_results_df(results_df, age_arr,
allow_slip_reversals=allow_slip_reversals)
return results_df
def trim_results_df(results_df, age_arr, trim_mag=5,
allow_slip_reversals=False):
results_df = results_df[(results_df.breakpt > age_arr[:,0])
&(results_df.breakpt < age_arr[:,-1])]
m1_75 = results_df.m1.describe()['75%']
m2_75 = results_df.m2.describe()['75%']
m1_25 = results_df.m1.describe()['25%']
m2_25 = results_df.m2.describe()['25%']
m1_inter_quart_range = m1_75 - m1_25
m2_inter_quart_range = m2_75 - m2_25
m1_range = trim_mag * m1_inter_quart_range
m2_range = trim_mag * m2_inter_quart_range
results_df = results_df[(np.abs(results_df.m1 - results_df.m1.median())
< m1_range)]
results_df = results_df[(np.abs(results_df.m2 - results_df.m2.median())
< m2_range)]
if allow_slip_reversals == False:
pos_slip = ((results_df.m1 >= 0.) & (results_df.m2 >= 0.))
neg_slip = ((results_df.m1 <= 0.) & (results_df.m2 <= 0.))
results_df = results_df[(pos_slip) ^ (neg_slip)]
return results_df
def log_likelihood(sum_sq, n):
return -n / 2 * np.log(sum_sq)
def BIC(log_likelihood, n, p):
return log_likelihood - ( 0.5 * p * np.log(n / 2 * np.pi))
def AIC(log_likelihood, n, p):
'''Akaiki's Information Criterion. Uses same function call as BIC(),
though *n* is not used.'''
return 2 * p - 2 * log_likelihood
return 2 * p - 2 * log_likelihood
def AICc(log_likelihood, n, p):
aic = AIC(log_likelihood, n, p)
correction_numerator = 2 * p * (p + 1)
correction_denominator = (n - p - 1)
if correction_denominator == 0:
correction = np.inf
else:
correction = correction_numerator / correction_denominator
return aic + correction
def find_nearest_index(array, value):
idx = (np.abs(array-value)).argmin()
return idx
def rate_change_test(results_df, n_offsets, print_res=False):
results_df['log_like_2'] = log_likelihood(results_df.sumsq2, n_offsets)
n_iters_out = results_df.shape[0]
# pn = num params, incl. sum_sq_err and fixed intercept
p1 = 3 #1 # number of parameters for single linear fit
p2 = 5 #3 # number of parameters for 2 part piecewise fit
#if n_offsets > 46:
# results_df['bic_1'] = BIC(results_df.log_like_1, n_offsets, p1)
# results_df['bic_2'] = BIC(results_df.log_like_2, n_offsets, p2)
#else:
# results_df['bic_1'] = AICc(results_df.log_like_1, n_offsets, p1)
# results_df['bic_2'] = AICc(results_df.log_like_2, n_offsets, p2)
#num_1_count = results_df[results_df.bic_1 > results_df.bic_2].shape[0]
#num_2_count = n_iters_out - num_1_count
#num_1_odds = num_1_count / n_iters_out
#num_2_odds = num_2_count / n_iters_out
#if num_1_odds > num_2_odds:
# n_pieces_best = 1
#else:
# n_pieces_best = 2
results_df['bic_1'] = AIC(results_df.log_like_1, n_offsets, p1)
results_df['bic_2'] = AIC(results_df.log_like_2, n_offsets, p2)
num_1_count = results_df[results_df.bic_1 < results_df.bic_2].shape[0]
num_2_count = n_iters_out - num_1_count
num_1_odds = num_1_count / n_iters_out
num_2_odds = num_2_count / n_iters_out
if num_1_count > num_2_count:
n_pieces_best = 1
else:
n_pieces_best = 2
if print_res==True:
if n_pieces_best == 1:
print('1 line fits best. {}/{} ({}% chance)'.format(num_1_count,
n_iters_out,
num_1_odds*100))
print('\nbest fit slip rate results:')
print(results_df.m.describe())
else:
print('2 lines fit best. {}/{} ({}% chance)'.format(num_2_count,
n_iters_out,
num_2_odds*100))
print('\nbest fit slip rate results:')
print('rate 1 (younger):')
print(results_df.m1.describe())
print('rate change timing:')
print(results_df.breakpt.describe())
print('rate 2 (older):')
print(results_df.m2.describe())
print('rate_change:')
print((results_df.m2 - results_df.m1).describe())
return n_pieces_best
def linear_rate_interp(rate, run_time_max, sim_time_max, zero_offset_age=0.,
num_pts=1000):
''' Makes a history array of slip rates. In this case, the slip
rate is a constant from zero_offset_age to run_time_max, and is
zero outside of those boundaries. Returns a Pandas Series.
Arguments:
rate (float): slip rate.
run_time_max (float): Maximum age of slip rate for this MC iteration,
i.e. age of oldest offset feature. Times older
than this will have zero slip rate.
sim_time_max (float): Maximum age of oldest feature in the whole MC
simulation. This determines the length of the
array.
zero_offset_age (float): Youngest age of faulting. Times younger than
this time will have zero rate.
num_pts (int): Number of points in the array.
'''
times = np.linspace(zero_offset_age, sim_time_max, num_pts)
slip_rate_history = pd.Series(index=times, data=np.zeros(num_pts))
slip_rate_history.ix[zero_offset_age : run_time_max] = rate
return slip_rate_history
def piecewise_rate_interp(rate1, rate2, breakpt, run_time_max, sim_time_max,
zero_offset_age=0., num_pts=1000):
times = np.linspace(zero_offset_age, sim_time_max, num_pts)
slip_rate_history = np.zeros(num_pts)
zero_offset_idx = find_nearest_index(times, zero_offset_age)
run_time_max_idx = find_nearest_index(times, run_time_max)
breakpt_idx = find_nearest_index(times, breakpt)
slip_rate_history[zero_offset_idx : breakpt_idx] = rate1
slip_rate_history[breakpt_idx : run_time_max_idx] = rate2
return slip_rate_history
def make_rate_hist_array(results_df, age_arr, n_segments=1, num_pts=1000,
zero_offset_age=0., return_array=False,
sim_time_max='mc_age_max'):
if sim_time_max == 'mc_age_max':
sim_time_max = np.max(age_arr)
times = np.linspace(zero_offset_age, sim_time_max, num_pts)
rate_hist_df = pd.DataFrame(columns=times, index=results_df.index)
rate_hist_ar = np.zeros((len(results_df.index), num_pts))
if n_segments == 1:
for i in rate_hist_df.index:
rate = results_df.ix[i, 'm']
run_time_max = age_arr[i, -1]
rate_hist_df.ix[i, :] = linear_rate_interp(rate, run_time_max,
sim_time_max,
zero_offset_age,
num_pts)
elif n_segments == 2:
for i, row in enumerate(results_df.index):
rate1 = results_df.ix[row, 'm1']
rate2 = results_df.ix[row, 'm2']
breakpt = results_df.ix[row, 'breakpt']
run_time_max = age_arr[row, -1]
rate_hist_ar[i, :] = piecewise_rate_interp(rate1, rate2,
breakpt, run_time_max,
sim_time_max,
zero_offset_age,
num_pts)
else:
raise Exception('Only 1 or 2 rates supported now.')
#return rate_hist_df if return_array == True else rate_hist_df.values
return rate_hist_ar
def make_cum_hist_array(rate_hist_array):
return np.cumsum(rate_hist_array, axis=0)
def run_interp_from_gui(offset_list, run_config_dict):
t0 = time.time()
rc = run_config_dict
check_unit_consistency(offset_list)
n_offsets = len(offset_list) + 1
print('sampling offset markers')
age_arr, off_arr = make_age_offset_arrays(offset_list, rc['n_iters'],
force_increasing=rc['force_increasing'],
zero_offset_age=rc['zero_offset_age'],
seed=rc['random_seed'],
seed_value=rc['random_seed_value'])
print('doing fits')
if rc['fit_type'] in ['linear', 'piecewise']:
results_df = do_linear_fits(age_arr, off_arr, fit_type=rc['fit_type'],
n_linear_pieces=rc['n_linear_pieces'],
allow_slip_reversals=rc['slip_reversals'])
else:
raise Exception('fit type not implemented yet')
results_df['log_like_1'] = log_likelihood(results_df.sumsq1, n_offsets)
if rc['fit_type'] == 'linear':
print(results_df.m.describe())
n_pieces_best = 1
elif rc['fit_type'] == 'piecewise':
n_pieces_best = rate_change_test(results_df, n_offsets, print_res=True)
print("\ndone in {:.2f} seconds".format(time.time() - t0))
return results_df, age_arr, off_arr, n_pieces_best
def trim_age_offset_arrays(res_df, age_arr, off_arr=None):
"""
Trims age and offset arrays based on retained values from the results_df.
"""
good_inds = res_df.index.values
age_arr_trim = age_arr[good_inds, :]
if off_arr is not None:
off_arr_trim = off_arr[good_inds, :]
return age_arr_trim, off_arr_trim
else:
return age_arr_trim
def cumulative_offsets(prev_age, prev_rate, new_age, new_rate):
return prev_age * prev_rate + (new_age - prev_age) * new_rate
|
cossatot/slip_rate_calculator
|
slip_rate_tools/slip_rate_tools.py
|
Python
|
mit
| 38,647
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @file czech-transcription.py
# @author Jaxxer <jaxxer@aeternum.cz>
#
# @section LICENSE
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
class Suffixes:
oneSuffix = None
twoToFourSuffix = None
fiveMoreSuffix = None
def __init__(self, oneSuffix, twoToFourSuffix,
fiveMoreSuffix):
self.oneSuffix = oneSuffix
self.twoToFourSuffix = twoToFourSuffix
self.fiveMoreSuffix = fiveMoreSuffix
class _AbstractGroup:
digits = ""
menForm = True
def __init__(self, digits, menForm=True):
if type(digits) is str:
self.digits = self.filterDigits(digits)
else:
raise TypeError("Given number is not 'str'.")
self.menForm = menForm
def get(self):
if not self.digits:
return u""
baseGroup = BaseGroup(self.digits, self.menForm)
wordsList = baseGroup.get()
groupSuffixes = self.getGroupSuffixes()
digitsToInt = int(self.digits)
if digitsToInt is 1:
return [groupSuffixes.oneSuffix]
elif 1 < digitsToInt and digitsToInt < 5:
wordsList.append(groupSuffixes.twoToFourSuffix)
elif digitsToInt >= 5:
wordsList.append(groupSuffixes.fiveMoreSuffix)
return wordsList
def filterDigits(self, digits):
raise NotImplemented("This method is abstract.")
def getGroupSuffixes(self):
raise NotImplemented("This method is abstract.")
class BillionGroup(_AbstractGroup):
def get(self):
self.menForm = False
return _AbstractGroup.get(self)
def filterDigits(self, digits):
return digits[-12:-9]
def getWords(self):
return Suffixes(u"jedna miliarda", u"miliardy", u"miliard")
class MillionGroup(_AbstractGroup):
def filterDigits(self, digits):
return digits[-9:-6]
def getWords(self):
return Suffixes(u"jeden milión", u"milióny", u"miliónů")
class ThousandGroup(_AbstractGroup):
def filterDigits(self, digits):
return digits[-6:-3]
def getWords(self):
return Suffixes(u"jeden tisíc", u"tisíce", u"tisíc")
class BaseGroup(_AbstractGroup):
def get(self):
wordsList = []
n = Hundreds(self.digits, self.menForm)
wordsList.append(n.getWord())
n = Tens(self.digits, self.menForm)
wordsList.append(n.getWord())
n = Units(self.digits, self.menForm)
wordsList.append(n.getWord())
return wordsList
def filterDigits(self, digits):
return digits[-3:]
class _AbstractBase:
digits = ""
menForm = True
def __init__(self, digits, menForm=True):
self.digits = digits
self.menForm = menForm
def getWord(self):
index = int(self.getIndex())
words = self.getWords()
if index < 0:
return u""
if index < len(words):
return words[index]
else:
raise IndexError("Index must be less then length of words list.")
def getIndex(self):
raise NotImplemented("This method is abstract.")
def getWords(self):
raise NotImplemented("This method is abstract.")
class Units(_AbstractBase):
wordsMenForm = [u"", u"jedna", u"dva", u"tři",
u"čtyři", u"pět", u"šest", u"sedm", u"osm",
u"devět"]
wordsWomenForm = [u"", u"jedna", u"dvě", u"tři",
u"čtyři", u"pět", u"šest", u"sedm", u"osm",
u"devět"]
def getWords(self):
return (self.wordsMenForm if self.menForm
else self.wordsWomenForm)
def getIndex(self):
if not self.digits or (len(self.digits) > 1
and self.digits[-2] is "1":)
return -1
return self.digits[-1]
class Tens(_AbstractBase):
tenToTwenty = [u"deset", u"jedenáct", u"dvanáct",
u"třináct", u"čtrnáct", u"patnáct",
u"šestnáct", u"sedmnáct", u"osmnáct",
u"devatenáct"]
tens = [u"", u"", u"dvacet", u"třicet", u"čtyřicet",
u"padesát", u"šedesát", u"sedmdesát",
u"osmdesát", u"devadesát"]
def isLessThenTwenty(self):
return (len(self.digits) > 1
and self.digits[-2] is "1":)
def getWords(self):
return (self.tenToTwenty
if self.isLessThenTwenty()
else self.tens)
def getIndex(self):
if len(self.digits) < 2:
return -1
return (self.digits[-1]
if self.isLessThenTwenty()
else self.digits[-2])
class Hundreds(_AbstractBase):
words = [u"", u"sto", u"dvě stě", u"tři sta",
u"čtyři sta", u"pět set", u"šest set",
u"sedm set", u"osm set", u"devět set"]
def getWords(self):
return words
def getIndex(self):
if len(self.digits) < 3:
return -1
return self.digits[-3]
def main():
return 0
if __name__ == '__main__':
main()
|
jaxxer/numberTranscription
|
czech-transcription.py
|
Python
|
gpl-3.0
| 5,120
|
#!/usr/bin/env python2
#
# Copyright (c) 2014, 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006 The Regents of The University of Michigan
# Copyright (c) 2007,2011 The Hewlett-Packard Development Company
# Copyright (c) 2016 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Steve Reinhardt
# Andreas Sandberg
from abc import ABCMeta, abstractmethod
from difflib import SequenceMatcher
import inspect
import os
import re
import sys
import style
import sort_includes
from region import *
from file_types import lang_type
def safefix(fix_func):
""" Decorator for the fix functions of the Verifier class.
This function wraps the fix function and creates a backup file
just in case there is an error.
"""
def safefix_wrapper(*args, **kwargs):
# Check to be sure that this is decorating a function we expect:
# a class method with filename as the first argument (after self)
assert(os.path.exists(args[1]))
self = args[0]
assert(is_verifier(self.__class__))
filename = args[1]
# Now, Let's make a backup file.
from shutil import copyfile
backup_name = filename+'.bak'
copyfile(filename, backup_name)
# Try to apply the fix. If it fails, then we revert the file
# Either way, we need to clean up our backup file
try:
fix_func(*args, **kwargs)
except Exception as e:
# Restore the original file to the backup file
self.ui.write("Error! Restoring the original file.\n")
copyfile(backup_name, filename)
raise
finally:
# Clean up the backup file
os.remove(backup_name)
return safefix_wrapper
def _modified_regions(old, new):
try:
m = SequenceMatcher(a=old, b=new, autojunk=False)
except TypeError:
# autojunk was introduced in Python 2.7. We need a fallback
# mechanism to support old Python versions.
m = SequenceMatcher(a=old, b=new)
regions = Regions()
for tag, i1, i2, j1, j2 in m.get_opcodes():
if tag != "equal":
regions.extend(Region(i1, i2))
return regions
class Verifier(object):
"""Base class for style verifiers
Verifiers check for style violations and optionally fix such
violations. Implementations should either inherit from this class
(Verifier) if they need to work on entire files or LineVerifier if
they operate on a line-by-line basis.
Subclasses must define these class attributes:
languages = set of strings identifying applicable languages
test_name = long descriptive name of test, will be used in
messages such as "error in <foo>" or "invalid <foo>"
opt_name = short name used to generate command-line options to
control the test (--fix-<foo>, --ignore-<foo>, etc.)
"""
__metaclass__ = ABCMeta
def __init__(self, ui, opts, base=None):
self.ui = ui
self.base = base
# opt_name must be defined as a class attribute of derived classes.
# Check test-specific opts first as these have precedence.
self.opt_fix = opts.get('fix_' + self.opt_name, False)
self.opt_ignore = opts.get('ignore_' + self.opt_name, False)
self.opt_skip = opts.get('skip_' + self.opt_name, False)
# If no test-specific opts were set, then set based on "-all" opts.
if not (self.opt_fix or self.opt_ignore or self.opt_skip):
self.opt_fix = opts.get('fix_all', False)
self.opt_ignore = opts.get('ignore_all', False)
self.opt_skip = opts.get('skip_all', False)
def normalize_filename(self, name):
abs_name = os.path.abspath(name)
if self.base is None:
return abs_name
abs_base = os.path.abspath(self.base)
return os.path.relpath(abs_name, start=abs_base)
def open(self, filename, mode):
try:
f = file(filename, mode)
except OSError, msg:
print 'could not open file %s: %s' % (filename, msg)
return None
return f
def skip(self, filename):
# We never want to handle symlinks, so always skip them: If the
# location pointed to is a directory, skip it. If the location is a
# file inside the gem5 directory, it will be checked as a file, so
# symlink can be skipped. If the location is a file outside gem5, we
# don't want to check it anyway.
if os.path.islink(filename):
return True
return lang_type(filename) not in self.languages
def apply(self, filename, regions=all_regions):
"""Possibly apply to specified regions of file 'filename'.
Verifier is skipped if --skip-<test> option was provided or if
file is not of an applicable type. Otherwise file is checked
and error messages printed. Errors are fixed or ignored if
the corresponding --fix-<test> or --ignore-<test> options were
provided. If neither, the user is prompted for an action.
Returns True to abort, False otherwise.
"""
if not (self.opt_skip or self.skip(filename)):
errors = self.check(filename, regions)
if errors and not self.opt_ignore:
if self.opt_fix:
self.fix(filename, regions)
else:
result = self.ui.prompt("(a)bort, (i)gnore, or (f)ix?",
'aif', 'a')
if result == 'f':
self.fix(filename, regions)
elif result == 'a':
return True # abort
return False
@abstractmethod
def check(self, filename, regions=all_regions, fobj=None, silent=False):
"""Check specified regions of file 'filename'.
Given that it is possible that the current contents of the file
differ from the file as 'staged to commit', for those cases, and
maybe others, the argument fobj should be a file object open and reset
with the contents matching what the file would look like after the
commit. This is needed keep the messages using 'filename' meaningful.
The argument silent is useful to prevent output when we run check in
the staged file vs the actual file to detect if the user forgot
staging fixes to the commit. This way, we prevent reporting errors
twice in stderr.
Line-by-line checks can simply provide a check_line() method
that returns True if the line is OK and False if it has an
error. Verifiers that need a multi-line view (like
SortedIncludes) must override this entire function.
Returns a count of errors (0 if none), though actual non-zero
count value is not currently used anywhere.
"""
pass
@abstractmethod
def fix(self, filename, regions=all_regions):
"""Fix specified regions of file 'filename'.
Line-by-line fixes can simply provide a fix_line() method that
returns the fixed line. Verifiers that need a multi-line view
(like SortedIncludes) must override this entire function.
"""
pass
class LineVerifier(Verifier):
def check(self, filename, regions=all_regions, fobj=None, silent=False):
close = False
if fobj is None:
fobj = self.open(filename, 'r')
close = True
lang = lang_type(filename)
assert lang in self.languages
errors = 0
for num,line in enumerate(fobj):
if num not in regions:
continue
line = line.rstrip('\n')
if not self.check_line(line, language=lang):
if not silent:
self.ui.write("invalid %s in %s:%d\n" % \
(self.test_name, filename, num + 1))
if self.ui.verbose:
self.ui.write(">>%s<<\n" % line[:-1])
errors += 1
if close:
fobj.close()
return errors
@safefix
def fix(self, filename, regions=all_regions):
f = self.open(filename, 'r+')
lang = lang_type(filename)
assert lang in self.languages
lines = list(f)
f.seek(0)
f.truncate()
for i,line in enumerate(lines):
line = line.rstrip('\n')
if i in regions:
line = self.fix_line(line, language=lang)
f.write(line)
f.write("\n")
f.close()
self.current_language = None
@abstractmethod
def check_line(self, line, **kwargs):
pass
@abstractmethod
def fix_line(self, line, **kwargs):
pass
class Whitespace(LineVerifier):
"""Check whitespace.
Specifically:
- No tabs used for indent
- No trailing whitespace
"""
languages = set(('C', 'C++', 'swig', 'python', 'asm', 'isa', 'scons',
'make', 'dts'))
trail_only = set(('make', 'dts'))
test_name = 'whitespace'
opt_name = 'white'
_lead = re.compile(r'^([ \t]+)')
_trail = re.compile(r'([ \t]+)$')
def skip_lead(self, language):
return language in Whitespace.trail_only
def check_line(self, line, language):
if not self.skip_lead(language):
match = Whitespace._lead.search(line)
if match and match.group(1).find('\t') != -1:
return False
match = Whitespace._trail.search(line)
if match:
return False
return True
def fix_line(self, line, language):
if not self.skip_lead(language) and Whitespace._lead.search(line):
newline = ''
for i,c in enumerate(line):
if c == ' ':
newline += ' '
elif c == '\t':
newline += ' ' * (style.tabsize - \
len(newline) % style.tabsize)
else:
newline += line[i:]
break
line = newline
return line.rstrip()
class SortedIncludes(Verifier):
"""Check for proper sorting of include statements"""
languages = sort_includes.default_languages
test_name = 'include file order'
opt_name = 'include'
def __init__(self, *args, **kwargs):
super(SortedIncludes, self).__init__(*args, **kwargs)
self.sort_includes = sort_includes.SortIncludes()
def check(self, filename, regions=all_regions, fobj=None, silent=False):
close = False
if fobj is None:
fobj = self.open(filename, 'r')
close = True
norm_fname = self.normalize_filename(filename)
old = [ l.rstrip('\n') for l in fobj.xreadlines() ]
if close:
fobj.close()
if len(old) == 0:
return 0
language = lang_type(filename, old[0])
new = list(self.sort_includes(old, norm_fname, language))
modified = _modified_regions(old, new) & regions
if modified:
if not silent:
self.ui.write("invalid sorting of includes in %s\n"
% (filename))
if self.ui.verbose:
for start, end in modified.regions:
self.ui.write("bad region [%d, %d)\n" % (start, end))
return 1
return 0
@safefix
def fix(self, filename, regions=all_regions):
f = self.open(filename, 'r+')
old = f.readlines()
lines = [ l.rstrip('\n') for l in old ]
language = lang_type(filename, lines[0])
sort_lines = list(self.sort_includes(lines, filename, language))
new = ''.join(line + '\n' for line in sort_lines)
f.seek(0)
f.truncate()
for i,line in enumerate(sort_lines):
f.write(line)
f.write('\n')
f.close()
class ControlSpace(LineVerifier):
"""Check for exactly one space after if/while/for"""
languages = set(('C', 'C++'))
test_name = 'spacing after if/while/for'
opt_name = 'control'
_any_control = re.compile(r'\b(if|while|for)([ \t]*)\(')
def check_line(self, line, **kwargs):
match = ControlSpace._any_control.search(line)
return not (match and match.group(2) != " ")
def fix_line(self, line, **kwargs):
new_line = ControlSpace._any_control.sub(r'\1 (', line)
return new_line
class LineLength(LineVerifier):
languages = set(('C', 'C++', 'swig', 'python', 'asm', 'isa', 'scons'))
test_name = 'line length'
opt_name = 'length'
def check_line(self, line, **kwargs):
return style.normalized_len(line) <= 79
def fix(self, filename, regions=all_regions, **kwargs):
self.ui.write("Warning: cannot automatically fix overly long lines.\n")
def fix_line(self, line):
pass
class ControlCharacters(LineVerifier):
languages = set(('C', 'C++', 'swig', 'python', 'asm', 'isa', 'scons'))
test_name = 'control character'
opt_name = 'ascii'
valid = ('\n', '\t')
invalid = "".join([chr(i) for i in range(0, 0x20) if chr(i) not in valid])
def check_line(self, line, **kwargs):
return self.fix_line(line) == line
def fix_line(self, line, **kwargs):
return line.translate(None, ControlCharacters.invalid)
class BoolCompare(LineVerifier):
languages = set(('C', 'C++', 'python'))
test_name = 'boolean comparison'
opt_name = 'boolcomp'
regex = re.compile(r'\s*==\s*([Tt]rue|[Ff]alse)\b')
def check_line(self, line, **kwargs):
return self.regex.search(line) == None
def fix_line(self, line, **kwargs):
match = self.regex.search(line)
if match:
if match.group(1) in ('true', 'True'):
line = self.regex.sub('', line)
else:
self.ui.write("Warning: cannot automatically fix "
"comparisons with false/False.\n")
return line
def is_verifier(cls):
"""Determine if a class is a Verifier that can be instantiated"""
return inspect.isclass(cls) and issubclass(cls, Verifier) and \
not inspect.isabstract(cls)
# list of all verifier classes
all_verifiers = [ v for n, v in \
inspect.getmembers(sys.modules[__name__], is_verifier) ]
|
Weil0ng/gem5
|
util/style/verifiers.py
|
Python
|
bsd-3-clause
| 16,542
|
import os
from functools import partial
from PyQt4.QtGui import QWidget
from PyQt4.QtCore import Qt
from qgis.core import QgsMapLayer
from qgis.gui import QgsExpressionBuilderDialog
from roam.api.utils import layer_by_name
from configmanager.models import QgsLayerModel, QgsFieldModel
from configmanager.editorwidgets.core import ConfigWidget
from configmanager.editorwidgets.uifiles.ui_listwidget_config import Ui_Form
class ListWidgetConfig(Ui_Form, ConfigWidget):
description = 'Select an item from a predefined list'
def __init__(self, parent=None):
super(ListWidgetConfig, self).__init__(parent)
self.setupUi(self)
self.allownull = False
self.orderby = False
self.orderbyCheck.hide()
self.layerRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 0))
self.listRadio.clicked.connect(partial(self.stackedWidget.setCurrentIndex, 1))
self.layermodel = QgsLayerModel(watchregistry=False)
self.layermodel.layerfilter = [QgsMapLayer.VectorLayer]
self.fieldmodel = QgsFieldModel()
self.blockSignals(True)
self.layerCombo.setModel(self.layermodel)
self.keyCombo.setModel(self.fieldmodel)
self.valueCombo.setModel(self.fieldmodel)
self.filterButton.pressed.connect(self.define_filter)
self.fieldmodel.setLayerFilter(self.layerCombo.view().selectionModel())
self.reset()
self.blockSignals(False)
def define_filter(self):
layer = self.layerCombo.currentText()
if not layer:
return
layer = layer_by_name(layer)
dlg = QgsExpressionBuilderDialog(layer, "List filter", self)
text = self.filterText.toPlainText()
dlg.setExpressionText(text)
if dlg.exec_():
self.filterText.setPlainText(dlg.expressionText())
def reset(self):
self.listtype = 'layer'
self.listText.setPlainText('')
self.orderby = False
self.allownull = False
self.filterText.setPlainText('')
self.layerCombo.setCurrentIndex(-1)
self.keyCombo.setCurrentIndex(-1)
self.valueCombo.setCurrentIndex(-1)
def widgetchanged(self):
self.widgetdirty.emit(self.getconfig())
@property
def allownull(self):
return self.allownullCheck.isChecked()
@allownull.setter
def allownull(self, value):
self.allownullCheck.setChecked(value)
@property
def orderby(self):
return self.orderbyCheck.isChecked()
@orderby.setter
def orderby(self, value):
self.orderbyCheck.setChecked(value)
@property
def list(self):
return [item for item in self.listText.toPlainText().split('\n')]
@property
def filter(self):
return self.filterText.toPlainText()
@property
def layer(self):
return self.layerCombo.currentText()
@property
def key(self):
index_key = self.fieldmodel.index(self.keyCombo.currentIndex(), 0)
fieldname_key = self.fieldmodel.data(index_key, QgsFieldModel.FieldNameRole)
return fieldname_key
@property
def value(self):
index_value = self.fieldmodel.index(self.valueCombo.currentIndex(), 0)
return self.fieldmodel.data(index_value, QgsFieldModel.FieldNameRole)
def getconfig(self):
config = {}
config['allownull'] = self.allownull
config['orderbyvalue'] = self.orderby
if self.layerRadio.isChecked():
subconfig = {}
# TODO Grab the data here and not just the text
subconfig['layer'] = self.layer
subconfig['key'] = self.key
subconfig['value'] = self.value
subconfig['filter'] = self.filter
config['layer'] = subconfig
else:
config['list'] = {}
config['list']['items'] = self.list
return config
def blockSignals(self, bool):
for child in self.findChildren(QWidget):
child.blockSignals(bool)
super(ListWidgetConfig, self).blockSignals(bool)
def setconfig(self, config):
self.blockSignals(True)
self.allownull = config.get('allownull', True)
self.orderby = config.get('orderbyvalue', False)
#Clear the widgets
self.listText.setPlainText('')
self.keyCombo.clear()
self.valueCombo.clear()
self.filterText.clear()
self.layermodel.refresh()
# Rebind all the values
if 'list' in config:
subconfig = config.get('list', {})
self.listRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(1)
listitems = subconfig.get('items', [])
itemtext = '\n'.join(listitems)
self.listText.setPlainText(itemtext)
else:
self.layerRadio.setChecked(True)
self.stackedWidget.setCurrentIndex(0)
subconfig = config.get('layer', {})
layer = subconfig.get('layer', '') or ''
key = subconfig.get('key', '') or ''
value = subconfig.get('value', '') or ''
filter = subconfig.get('filter', None)
index = self.layerCombo.findData(layer, Qt.DisplayRole)
if index > -1:
self.layerCombo.setCurrentIndex(index)
index = self.layermodel.index(index, 0)
self.fieldmodel.updateLayer(index, None)
keyindex = self.keyCombo.findData(key.lower(), QgsFieldModel.FieldNameRole)
if keyindex > -1:
self.keyCombo.setCurrentIndex(keyindex)
valueindex = self.valueCombo.findData(value.lower(), QgsFieldModel.FieldNameRole)
if valueindex > -1:
self.valueCombo.setCurrentIndex(valueindex)
self.filterText.setPlainText(filter)
self.allownullCheck.setChecked(self.allownull)
self.orderbyCheck.setChecked(self.orderby)
self.blockSignals(False)
|
lmotta/Roam
|
src/configmanager/editorwidgets/listwidget.py
|
Python
|
gpl-2.0
| 5,997
|
# Phatch - Photo Batch Processor
# Copyright (C) 2007-2008 www.stani.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Phatch recommends SPE (http://pythonide.stani.be) for editing python.
# Embedded icon is taken from www.openclipart.org (public domain)
# Follows PEP8
from core import models
from lib.reverse_translation import _t
from lib.imtools import convert_safe_mode
def init():
#lazily import
global Image
from PIL import Image
global generate_layer
from lib.imtools import generate_layer
def watermark(image, mark, horizontal_offset=None, vertical_offset=None,
horizontal_justification=None, vertical_justification=None,
orientation=None, method=None, opacity=100):
"""Adds a watermark to an image."""
if image.mode == 'P':
image = convert_safe_mode(image)
layer = generate_layer(image.size, mark, method,
horizontal_offset, vertical_offset,
horizontal_justification, vertical_justification,
orientation, opacity)
return Image.composite(layer, image, layer)
class Action(models.StampMixin, models.Action):
"""Apply a watermark with tiling, scaling and opacity"""
label = _t('Watermark')
author = 'Stani'
email = 'spe.stani.be@gmail.com'
init = staticmethod(init)
pil = staticmethod(watermark)
version = '0.1'
tags = [_t('default'), _t('filter')]
__doc__ = _t('Apply with tiling, scaling and opacity')
icon = \
'x\xda\x01\x95\nj\xf5\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x000\x00\
\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\x00\x00\x00\x04sBIT\x08\x08\x08\
\x08|\x08d\x88\x00\x00\nLIDATh\x81\xd5\x98[L\\\xc7\x19\xc7\x7fsv\xcf\xee\xd9\
\xb3\xbb`p\x80\xf8\x02\xacq\x02\xb5\xe36\xb1\xad\xa4)(VmZ)i\xdaD\xceC\x926\
\xedK\x13\xa5\xae\x1f\xaa\xbeTj\x9f\xda\xa7F\xeaC\x1b\xc5Ul\xc9\x97\x07G\x89\
\xda\xe2T\xc4v\xdc<\x94\xd8\xa2\x89\x8d\x12n\xbep\xf1\x05X\xc0W.\xe1\xba\xf7\
=g\xfap\xf6,\x0b,\x04\xbc[Y\x1d\xe9\x08\x16fg\xfe\xbfo\xfe\xdf73GH)\xf9\x7fn\
\xca\x83\x16\x90ks\xe6{\xc0\x1e!v\x99\xf0\x9a\x84Z\x01\xeb\x01\x1d\xb8)\xe1\
\x9a\x02\x1f\xe8\xf0a@\xcah\xbe\xe6\x13\xf9\xb2P\x9b\x10\xaa\x06o\x0bE\xd9_T\
[\x8b\xfe\xc8#\xb8\xcb\xcap\xf8|\xc4GG\x89\x8f\x8c0\xd1\xd2B$\x18\xec2\xe0G\
\xdf\x92r \x1f\xf3\xe6\x05\xe0\x8a\x10e\x02>\\\xf3\xdcsukw\xee\xc4]T\x04B\
\xa4f\x10\x19\xb3\t\xa6;:\xb8s\xe2\xc4\xb8\x0c\x85^\xda"es\xaes\xe7\x05\xa0K\
\x88\x7fm8x\xf0\xd95\xfb\xf6!\xef\xde%y\xecXjtK|\x1a!\xf59\xd2\xdf\xcf\xf0\
\xb1cQ\x0c\xa3f\xab\x94C\xb9\xcc\x9ds\x12_\x11\xe2y\xe0Y\x7f}\xbd\xa5q\xedZ\
\x8cD\x82d$B2\x1c&\x19\x0e\x93\x88DHF\xa3$#\x11\x8cH\x04\xd7\xbau\x14\xd5\
\xd5i&\xfc1\xd7\xf9sNb\x01\xbfW\x8b\x8b\x89\x7f\xfc1\xea\x9e=$/_&1=\r\x80\\\
\xb0\x02"\xe3\xb3VQ\x81\x80\x9ft\x0b\xf1\xd6V)\xbb\xeew\xfe\x9c\x00\xda\x84\
\xd05\xd8\xae\x16\x15\x11\xef\xed%\xd6\xdb\x8b\x04\xa4\xa5\x16\x84\xc06\xa8\
\x10"\r\xa2\x08\x81\xe2\xf5\x02\x08\x13\xbe\x07<\x18\x00\x17<\x0c8\xe3\xa3\
\xa3$B!K\xbc\x10\xd6\x03\xf3\xc4\x83\x15y\x1b$16\x86\t(\xb0-\x17\r9\x01|\x13\
\x82\xdd\x10\x89MO{\xa2##(^/\xa6\x10\x98X \x82\xb9\xd5\xb0\x85\x0bK4\xb3\xfd\
\xfd\x18\x80\x03\x86\x1f\x18\x00R\x9aR\x88n\x13v\x8e\xb5\xb4P\xf8\xd4SH\x87#\
\r\x90\xaeoBX\xd5"\xf53\xf1\xd5WL\xf5\xf4\xe0\x04\x04\xf4>8\x00\xc0\x80OL\
\xd8\x99\x98\x9ef\xb4\xb5\x15\xef\xe6\xcd\xa0ii+\xa5\x9b\x108\x80\xd8\xd8\
\x18\xe1\xeb\xd7\x11R\xa2\x80\x11\x86\x96\\\xe6\xcfy\x1f\xb8$\xc4\xa6\x18\
\xdcH\x82\x12\x07\x92B\xe0\\\xbb\x16\xa7\xcf\x87\xa2i\x08U\xc5\x88\xc50\xa3Q\
\x12\xe3\xe3\x98\xa1\x10N\xac\xc8\xa9\xf0\xc9\xb7\xa5|\xee\x81\x02\x00|)\xc4\
\xa7Q\xd8\x9d\x00\x0c \x99z\xcc\x8c>J\xeaqf<.x\xf1;R\x9e\xcce\xee\xbc\x9cF\
\x93p\xd4\xc4\x12l2\'\xde\x00\x12\xcc\x01%S\x7f3\x01QRbh_|1\x99\xeb\xdcy\x01\
\xb8\x0c\xff\x00\xeeH\xe6\x04&\xb1\xc4/\\\x11\xfb\xff\xae\x97^r\x98\x8a\xf2i\
kk\xeb\x1f\x1a\x1a\x1a\x1c\xf7;w\xdeN\xa3\xe7\x84\xf8m\x0c\xde\x8a3\x17u#\
\xf5d\xdaG\x05\xdc~?%g\xce\x80\xc7c\x7f\xfd?\xa6i\xbe\xf6\xe4\x93O\xae\xba\
\xa4\xe6\xf3B\xf3\xb6\x84q;\xcav\xe43\x7f\xda\xd1\xd7\xf7\xee\xcd\x14\x0f\
\xf0\x8c\xa2(\x17\xdb\xdb\xdb\xf7\xaev\xd2\xbc\x01|W\xca\xa8c\xfb\xf6\x0b\
\xb6\xc8L\xebd\x8a\x17^/\xfa\x1bod\x1b\xa2HJ\xf9\xcf\xb6\xb6\xb6\x83\x17.\\\
\xf0d\xeb\x90\xad\xe5\x05\xe0\xd4\xa9SzCC\xc3\x07\xb7\xdf|\xf3\x87x<H\xe6\
\x12\xda`>@\xe1\x0b/ t}\xb9\xe1\xf6\xb9\\\xae/[[[Wt\xc4\xc8\x19\xa0\xa5\xa5e\
\x8b\xdb\xedn\x05~\x9cTU&_~\x19\x1cVNfZ\t\xc0SQA\xc1\xfe\xfd+\x19\xf61!\xc4\
\x17mmm\xfb\xbe\xaecN\x00\xed\xed\xed?UU\xf5K\x8f\xc7\xb3E\xd34\\.\x17\xa1\
\x9d;\x19~\xf5U\xa2~?BQ0\x00\xa7\xcb\x85RS\xc3CG\x8f.\xf4\xfer\xcd\x03\x1clo\
oo\x1c\x19\x19\t,\xd5\xe9\xbe\x8e\x12\x17.\\\xf0\xb8\\\xae\x03\xc0\xeb\x00\
\xba\xae#\xa5$\x99L\x92L&\tm\xdbF3P\xec\xf7\xb3\xce\xe5"\xb9i\x13%%%\xa8\xd6\
\x11z\xc9&\x84\xc0\xe1p\xe0p8p:\x9d\xb8\xddn\xca\xca\xca^\xbcv\xed\xda\x96\
\xd2\xd2\xd2\x9a\xbc\x00tttl\xf5z\xbd\x1f*\x8a\xf2\r)%RJL\xd3\xc4\xe7\xf3\
\xe1HYgjj\n\x9f\xcf\xc7\xfa\xcaJ*++\xf1\xfb\xfd\xcb\nu:\x9d\xa8\xaa\x8a\xc3\
\xe1H\x1f\xbd5MC\xd7uN\x9f>MWW\xd7\xc4\x13O<\x91U\xcf\xaa\x00\xba\xba\xba\
\xf6\xf9\xfd\xfe\xbf\x08!4[\xb8i\x9a$\x93I\x12\x89\x04\x9a\xa6\xb1~\xfdz\xa2\
\xd1(7n\xdc\xb0\x8e\xd0\xf6]@\x08\\.\x17\x05\x05\x058\x9dN\x1c\x0e\x07\x8a\
\xa2\xa0(J\xba\x9f\xdd\xd7\xeb\xf5\x12\x8b\xc58t\xe8\x10\xd3\xd6\xedN\x08!\
\x84\xcc\xb2i\xad\x08\xa0\xad\xadM/..n(..\xfe\x01\x80i\x9a\xc4b1\xe2\xf18\
\xf1x\x1c\xc30\xe6\x06t:y\xf4\xd1G\x19\x1a\x1aBQ\x94\xf4\xaa\xe8\xba\x8e\xae\
\xeb\xb8\xdd\xeey\xe23\x01\x14EA\xd7uzzz\xf8\xe8\xa3\x8f\xd2cJ)\x05\xe0\x10B\
\x18\x0b!\xbe\x16\xa0\xaf\xaf\xef\xa9\xf2\xf2\xf2\xd3\xaa\xaa\x96\x84\xc3afg\
g\x89D"\xf3D/lB\x08\xea\xeb\xeb\x99\x98\x98\xc0\xe1p\xa0\xeb:\x85\x85\x85\
\xf8\xfd\xfe\xb4ul\x08[\xbcm\xa5S\xa7N\xd1\xd55\xff\x86)\xa5T\xb06q)\x8403!\
\x96\x05\xe8\xeb\xeb\xfb\x93\xdb\xed\xfe\xf5\xe4\xe4\xa4:;;\xcbj\x8e\x1dB\
\x08\x8a\x8b\x8b\xf1\xf9|\xd8\x15j\xa1x\x1b\xc0\xe1p\x10\x0e\x879|\xf80SSS\
\x8b\xc62MS\x01\\XU9\xf3\xb6\xba4\xc0\x91#G6\xed\xd9\xb3\xe77B\x08fffV,<\xb3\
9\x9d\xcetr\xdb\xbe\xcf\xb6\x02\xdd\xdd\xdd466f\x1d#\x14\n1<<\x9c:}\x13\x07\
\x8c\xcc|X\x12\xe0\xd6\xad[\x89\xf7\xde{\x8f\xba\xba:\xb6o\xdf\xce\xad[\xb7H\
$\x12\xab\x02\xd0u=m\r\x1b 3\x81\r\xc3\xe0\xe4\xc9\x93\\\xb9r%\xeb\xf7\xef\
\xdd\xbb\xc7\x9d;wZ\xcf\x9e=\xfb\x0b\xac=K,\xec\x93u#\x13B\x88P(\xa4\x02|\
\xfe\xf9\xe7\x9c8q\x82u\xeb\xd6QXX\xb8*\x00\x9f\xcf\x87\xaa\xaa\xf3 \x9cN\'B\
\x08\xa6\xa6\xa6x\xf7\xddw\xb3\x8a7\x0c\x83\xbe\xbe\xbeDOO\xcf\xdf\x1b\x1b\
\x1b\x7f755\x15a\x81u\x96\x04\x10V-Sfgg]\xf6\xdf\xee\xde\xbd\xcb\xa1C\x87\
\x88\xc7\xe3l\xd8\xb0!]\xee\x96k\x0b}oW$)%\xdd\xdd\xdd\x1c8p \xab\xdf\xc3\
\xe10\xdd\xdd\xdd\xd3\xe7\xcf\x9f\xff\xf3\xb9s\xe7\xfe\x06L\x03\x11,\xfb\x98\
\x80\xfc\xba$\x16\x80#\x12\x89\xcc\xdb\xf3M\xd3\xa4\xa1\xa1\x81\xc7\x1f\x7f\
\x9c]\xbbvq\xfb\xf6mb\xb1\xd8\x92\x00>\x9foQ\x894M\x93\xd3\xa7O/i\x99\x91\
\x91\x11\x82\xc1`_SS\xd3\x81X,6\x04\xdc\x03&\x80P\n\xc0N\xe2t\x9b\x07`G\x1fP\
gff\xfcR\xcaE\xd1\xbex\xf1"\xc1`\x90W^y\x85X,\xc6\xf8\xf8\xf8\xe2\x08\x08\
\x81\xd7\xebM\x7f\xd7\xae2\xc7\x8f\x1fgrr\xf1-\xd20\x0c\x06\x06\x06\x92\xc1`\
\xb0\xe9\xb3\xcf>kH\t\x1f\x01\xc6\xb1V \x8cuO2W\xb2\x0f\x08\xc0944$5M\x9b\
\x0e\x04\x02\x05\xaa\xaa\xce\xeb\x10\x0e\x87y\xff\xfd\xf7\xa9\xaf\xaf\xa7\
\xb2\xb2\x92\x9b7o\xce\xdb\x17t]OG^\xd34z{{ill\xccZ\x86\xc3\xe10\xfd\xfd\xfd\
3\x9d\x9d\x9dGo\xdc\xb8\xd1\x96\x12?\x8a\x15\xf9Y,\xfb$\x00CJi.\xfc\xfe\xbc+\
\xa5\x10\xc2\xbe\xf9\xe9@q \x10xl\xc7\x8e\x1d\xef\x94\x97\x97\x07\x8a\x8a\
\x8a\xd2\xfd4M\xc3\xedv\xe3v\xbb\t\x04\x02<\xfd\xf4\xd3\xdc\xbbw\x8fP(\x04@I\
I\t\x85\x85\x85\xb8\xddn\xce\x9c9\xc3\xe5\xcb\x97\xb3\xc4)m\x99\xfe\xa6\xa6\
\xa6wb\xb1\xd8\xadT\xd4\xc7\x80),\xdb\xc42"\xbfH|6\x00\x91\x02\xf0\x00k\x802\
`c}}\xfd\xaf***v\x95\x97\x97+\x8a\xa2\xe0v\xbb\xf1x<h\x9a\x86\xa6ix\xbd^jkk\
\x91R266FUU\x15\x89D\x82\xe3\xc7\x8fgMT\xc30\x08\x06\x83\xc9\xc1\xc1\xc1\xa6\
\xe6\xe6\xe6\x13\xc0\xddT\xd4\xbfb\xce2qR\xef\x02\xb2\x9d\x81\x96\x03p`m\x1a\
>`m\nb\xfd\x96-[\x9e\xd9\xb6m\xdb\xcf\xaa\xaa\xaa\xbc>\x9f/}\xb6\xf1x<i\x98@\
@MM\rW\xaf^]\xd62}}}3\x9d\x9d\x9dG\xfb\xfa\xfal\xcb\x8caYf\x06\x88\x92\x91\
\xb0\xcb\x89_\x04\x90\x82\xb0m\xa4\xa5 \x8a\x80\x12\xe0a\xaf\xd7[\xb1{\xf7\
\xee_VTT\x046n\xdc\x88\xd7\xeb\xc5\xe3\xf1\xa4at]\'\x1a\x8d\xd2\xd9\xd9\x99u\
\xb2\x91\x91\x11\x06\x07\x07\x07\xce\x9d;w \x14\n\r3\x97\xa8\x93\xac\xd02+\
\x01\xb0+\x91\x13p\x03^\xa0\x10k5J\x81\xb2\xda\xda\xda\xbdUUU\xbbkjj\xd4\x82\
\x82\x82y\xab\x11\x89D\xb8~\xfd\xfa\xbc1M\xd3$\x18\x0c&\x83\xc1\xe0\xa7\xcd\
\xcd\xcd\r\xe4`\x99\x85mQ\x15\x92RJ!\x84\xfdn\xca\xbe\x9f\xdb/\xd9b@\xf4\xfc\
\xf9\xf3\x1f\x0c\x0f\x0f_\t\x87\xc3?\xaf\xae\xae^SVV\x86a\x18$\x12\t\xe2\xf1\
\xf8\xbc\xf1\xc2\xe10\x03\x03\x03\xb3\x1d\x1d\x1dG2\xaa\xcc}[fa[\xf2\xc5Vj%\
\xec\x9cP\x99\xb3T!\xf0\x10P\xaa\xaa\xea\xfa\xfa\xfa\xfa\xd7+++\xb7n\xde\xbc\
Yh\x9a\x86\x94\x92\x89\x89\t\x00FGG\x19\x1c\x1c\x1c8{\xf6\xec_C\xa1\xd0\x10y\
\xb0\xcc\x8a\x012@2_\xac\xb9\xb1Jl\x01P\x8c\x95\x1be;v\xec\xf8~uu\xf5\x0b\
\xd5\xd5\xd5n\x8f\xc7C8\x1cfpp0\x19\x0c\x06\xcf677\xff\x83<Zf\xd5\x00)\x08;/\
\xec\n\xa5\x01~\xac\x04\x7f\x08()--\xddTWW\xb7\xbf\xac\xacl\xdd\xf8\xf8\xf8\
\xf4\xa5K\x97\x0e_\xbdz\xb5=%|\x14+\xea9[\xe6\xbe\x002 lK\xd9U\xcaN\xf0"\xac\
}\xc3\xb7{\xf7\xee\xe7[[[\xff=333\x8e\xe5\xf3\tV\xb11\xfd\xcf\x00\x16\x80dZ\
\xca\x93\x02\xf1\xa5~wbY#\x82u\x14\xb0\x8f\x03y\xb1\xcc"=\xf73V\x06\x84}Wu\
\xa7\x1e\x15k\x852\xab\x96\x1d\xf5\xbcXfa\xfb/\xb0\xe2\xc5j\xcb\x8b\xb4\xe9\
\x00\x00\x00\x00IEND\xaeB`\x82O\xe6\x0f\xa5'
|
tibor95/phatch-python2.7
|
phatch/actions/watermark.py
|
Python
|
gpl-3.0
| 9,920
|
# lrucache.py -- a simple LRU (Least-Recently-Used) cache class
# Copyright 2004 Evan Prodromou <evan@bad.dynu.ca>
# Licensed under the Academic Free License 2.1
# arch-tag: LRU cache main module
"""a simple LRU (Least-Recently-Used) cache module
This module provides very simple LRU (Least-Recently-Used) cache
functionality.
An *in-memory cache* is useful for storing the results of an
'expensive' process (one that takes a lot of time or resources) for
later re-use. Typical examples are accessing data from the filesystem,
a database, or a network location. If you know you'll need to re-read
the data again, it can help to keep it in a cache.
You *can* use a Python dictionary as a cache for some purposes.
However, if the results you're caching are large, or you have a lot of
possible results, this can be impractical memory-wise.
An *LRU cache*, on the other hand, only keeps _some_ of the results in
memory, which keeps you from overusing resources. The cache is bounded
by a maximum size; if you try to add more values to the cache, it will
automatically discard the values that you haven't read or written to
in the longest time. In other words, the least-recently-used items are
discarded. [1]_
.. [1]: 'Discarded' here means 'removed from the cache'.
"""
import time
from heapq import heappush, heappop, heapify
__version__ = "0.2"
__all__ = ['CacheKeyError', 'LRUCache', 'DEFAULT_SIZE']
__docformat__ = 'reStructuredText en'
DEFAULT_SIZE = 16
"""Default size of a new LRUCache object, if no 'size' argument is given."""
class CacheKeyError(KeyError):
"""Error raised when cache requests fail
When a cache record is accessed which no longer exists (or never did),
this error is raised. To avoid it, you may want to check for the existence
of a cache record before reading or deleting it."""
pass
class LRUCache:
class __Node:
"""Record of a cached value. Not for public consumption."""
def __init__(self, key, obj, timestamp):
object.__init__(self)
self.key = key
self.obj = obj
self.atime = timestamp
self.mtime = self.atime
def __cmp__(self, other):
return cmp(self.atime, other.atime)
def __repr__(self):
return "<%s %s => %s (%s)>" % \
(self.__class__, self.key, self.obj,
time.asctime(time.localtime(self.atime)))
def __init__(self, size=DEFAULT_SIZE):
# Check arguments
if size <= 0:
raise ValueError(size)
elif not isinstance(size, type(0)):
raise TypeError(size)
object.__init__(self)
self.__heap = []
self.__dict = {}
self.size = size
"""Maximum size of the cache.
If more than 'size' elements are added to the cache,
the least-recently-used ones will be discarded."""
def __len__(self):
return len(self.__heap)
def __contains__(self, key):
return key in self.__dict
def __setitem__(self, key, obj):
if key in self.__dict:
node = self.__dict[key]
node.obj = obj
node.atime = time.time()
node.mtime = node.atime
heapify(self.__heap)
else:
# size may have been reset, so we loop
while len(self.__heap) >= self.size:
lru = heappop(self.__heap)
del self.__dict[lru.key]
node = self.__Node(key, obj, time.time())
self.__dict[key] = node
heappush(self.__heap, node)
def __getitem__(self, key):
if key not in self.__dict:
raise CacheKeyError(key)
else:
node = self.__dict[key]
node.atime = time.time()
heapify(self.__heap)
return node.obj
def __delitem__(self, key):
if key not in self.__dict:
raise CacheKeyError(key)
else:
node = self.__dict[key]
del self.__dict[key]
self.__heap.remove(node)
heapify(self.__heap)
return node.obj
def __iter__(self):
copy = self.__heap[:]
while len(copy) > 0:
node = heappop(copy)
yield node.key
raise StopIteration
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
# automagically shrink heap on resize
if name == 'size':
while len(self.__heap) > value:
lru = heappop(self.__heap)
del self.__dict[lru.key]
def __repr__(self):
return "<%s (%d elements)>" % (str(self.__class__), len(self.__heap))
def mtime(self, key):
"""Return the last modification time for the cache record with key.
May be useful for cache instances where the stored values can get
'stale', such as caching file or network resource contents."""
if key not in self.__dict:
raise CacheKeyError(key)
else:
node = self.__dict[key]
return node.mtime
if __name__ == "__main__":
cache = LRUCache(25)
print(cache)
for i in range(50):
cache[i] = str(i)
print(cache)
if 46 in cache:
del cache[46]
print(cache)
cache.size = 10
print(cache)
cache[46] = '46'
print(cache)
print((len(cache)))
for c in cache:
print(c)
print(cache)
print((cache.mtime(46)))
for c in cache:
print(c)
|
Jumpscale/jumpscale_core8
|
lib/JumpScale/tools/cachelru/LRUCache.py
|
Python
|
apache-2.0
| 5,508
|
#! /usr/bin/env python2
import sys
shift = 1;
chars = []
print 'Enter an empty line to exit'
while 1:
str = raw_input(">")
if str=='': break
for c in str:
n = ord(c)
if shift:
chars.append(n << 8)
shift = 0
else:
shift = 1
chars[-1] |= n
#add a newline where the enter key was pressed
if shift:
chars.append(0x0A << 8)
shift = 0
else:
shift = 1
chars[-1] |= 0x0A
sys.stdout.write('DAT ')
#but remove the very last newline because it actually shouldn't be there
if shift: chars[-1] &= 0xFF00
else: chars[-1] = 0x0000
last = ''
for n in chars:
if (last !=''): sys.stdout.write(last+', ')
last = hex(n)
print last
|
cubeOS/cubeOS-alpha
|
packascii.py
|
Python
|
mit
| 645
|
# coding=utf-8
from django.shortcuts import render, get_object_or_404
from django.views import generic
from django.db import models
from watson import search as watson
from .models import Product, Category
from django.views.decorators.cache import cache_page
class ProductListView(generic.ListView):
template_name = 'catalog/product_list.html'
context_object_name = 'products'
paginate_by = 12
def get_queryset(self):
queryset = Product.objects.all()
q = self.request.GET.get('q','')
if q:
'''
queryset = queryset.filter(
models.Q(name__icontains=q) | models.Q(category__name__icontains=q) \
| models.Q(description__icontains=q)
)
'''
# search with watson librarie
queryset = watson.filter(queryset, q)
return queryset
product_list = ProductListView.as_view()
class CategoryListView(generic.ListView):
template_name = 'catalog/category.html'
context_object_name = 'product_list'
paginate_by = 3
def get_queryset(self):
return Product.objects.filter(category__slug=self.kwargs['slug'])
def get_context_data(self, **kwargs):
context = super(CategoryListView, self).get_context_data(**kwargs)
context['current_category'] = get_object_or_404(Category, slug=self.kwargs['slug'])
return context
category = CategoryListView.as_view()
#@cache_page(60)
def product(request, slug):
product = Product.objects.get(slug=slug)
context = {
'product': product
}
return render(request, 'catalog/product.html', context)
|
lucaslamounier/django-ecommerce
|
catalog/views.py
|
Python
|
cc0-1.0
| 1,645
|
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.utils import (partial_model_inference,
pivot_plot,
lee_inference)
from selection.learning.core import normal_sampler, keras_fit
from selection.learning.learners import sparse_mixture_learner
def simulate(n=2000, p=500, s=20, signal=(3 / np.sqrt(2000), 4 / np.sqrt(2000)), sigma=2, alpha=0.1, B=10000):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
print(np.linalg.norm(truth))
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
smooth_sampler = normal_sampler(S, covS)
def meta_algorithm(XTX, XTXi, lam, sampler):
p = XTX.shape[0]
success = np.zeros(p)
loss = rr.quadratic_loss((p,), Q=XTX)
pen = rr.l1norm(p, lagrange=lam)
scale = 0.
noisy_S = sampler(scale=scale)
loss.quadratic = rr.identity_quadratic(0, 0, -noisy_S, 0)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(max_its=300, tol=1.e-10)
success += soln != 0
return tuple(sorted(np.nonzero(success)[0]))
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
lam = 4. * np.sqrt(n)
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, lam)
# run selection algorithm
df = partial_model_inference(X,
y,
truth,
selection_algorithm,
smooth_sampler,
fit_probability=keras_fit,
fit_args={'epochs':30, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'},
success_params=(1, 1),
B=B,
alpha=alpha,
learner_klass=sparse_mixture_learner)
lee_df = lee_inference(X,
y,
lam,
dispersion,
truth,
alpha=alpha)
return pd.merge(df, lee_df, on='variable')
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
for i in range(500):
df = simulate(B=10000)
csvfile = 'lee_multi_500.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
#pivot_ax.plot(U, sm.distributions.ECDF(df['lee_pivot'][~np.isnan(df['lee_pivot'])])(U), 'g', label='Lee', linewidth=3)
pivot_ax.figure.savefig(outbase + '.pdf')
length_ax.scatter(df['naive_length'], df['lee_length'])
length_ax.figure.savefig(outbase + '_lengths.pdf')
|
selective-inference/selective-inference
|
doc/learning_examples/multi_target/lee_multi_500.py
|
Python
|
bsd-3-clause
| 3,820
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from ....datehandler import parser
from ....display.place import displayer as place_displayer
from ....lib.eventtype import EventType
from ....lib.eventroletype import EventRoleType
from .. import Rule
#-------------------------------------------------------------------------
#
# HasBirth
#
#-------------------------------------------------------------------------
class HasBirth(Rule):
"""Rule that checks for a person with a birth of a particular value"""
labels = [ _('Date:'), _('Place:'), _('Description:') ]
name = _('People with the <birth data>')
description = _("Matches people with birth data of a particular value")
category = _('Event filters')
allow_regex = True
def prepare(self, db):
if self.list[0]:
self.date = parser.parse(self.list[0])
else:
self.date = None
def apply(self,db,person):
for event_ref in person.get_event_ref_list():
if not event_ref:
continue
elif event_ref.role != EventRoleType.PRIMARY:
# Only match primaries, no witnesses
continue
event = db.get_event_from_handle(event_ref.ref)
if event.get_type() != EventType.BIRTH:
# No match: wrong type
continue
if not self.match_substring(2, event.get_description()):
# No match: wrong description
continue
if self.date:
if not event.get_date_object().match(self.date):
# No match: wrong date
continue
if self.list[1]:
place_id = event.get_place_handle()
if place_id:
place = db.get_place_from_handle(place_id)
place_title = place_displayer.display(db, place)
if not self.match_substring(1, place_title):
# No match: wrong place
continue
else:
# No match: event has no place, but place specified
continue
# This event matched: exit positive
return True
# Nothing matched: exit negative
return False
|
sam-m888/gprime
|
gprime/filters/rules/person/_hasbirth.py
|
Python
|
gpl-2.0
| 3,492
|
from syncloudlib import logger
from syncloud_platform.insider.config import Port
from syncloud_platform.insider.manual import ManualPortMapper
from syncloud_platform.insider.port_prober import PortProber, NoneProber
from syncloud_platform.insider.util import port_to_protocol, is_web_port
from IPy import IP
class PortDrill:
def __init__(self, port_config, port_mapper, port_prober):
self.port_prober = port_prober
self.logger = logger.get_logger('PortDrill')
self.port_config = port_config
self.port_mapper = port_mapper
def remove_all(self):
for mapping in self.list():
self.remove(mapping.local_port, mapping.protocol)
self.port_config.remove_all()
def get(self, local_port, protocol):
return self.port_config.get(local_port, protocol)
def list(self):
return self.port_config.load()
def external_ip(self):
return self.port_mapper.external_ip()
def remove(self, local_port, protocol):
mapping = self.port_config.get(local_port, protocol)
if mapping:
self.port_mapper.remove_mapping(mapping.local_port, mapping.external_port, protocol)
self.port_config.remove(local_port, protocol)
def sync_new_port(self, local_port, protocol):
self.logger.info('Sync one mapping: {0}'.format(local_port))
port_to_try = local_port
lower_limit = 10000
found_external_port = None
retries = 10
message = 'no message from dns service'
for i in range(1, retries):
self.logger.info('Trying {0}'.format(port_to_try))
external_port = self.port_mapper.add_mapping(local_port, port_to_try, protocol)
if not is_web_port(local_port):
self.logger.info('not probing non http(s) ports')
found_external_port = external_port
break
external_ip = self.port_mapper.external_ip()
if external_ip is not None:
ip_version = IP(external_ip).version()
if ip_version == 6:
self.logger.info('probing of IPv6 is not supported yet')
found_external_port = external_port
break
probe_success, message = self.port_prober.probe_port(
external_port, port_to_protocol(local_port), external_ip)
if probe_success:
found_external_port = external_port
break
self.port_mapper.remove_mapping(local_port, external_port, protocol)
if port_to_try == local_port:
port_to_try = lower_limit
else:
self.logger.info('external port: {0}'.format(external_port))
port_to_try = external_port + 1
if not found_external_port:
raise Exception('Unable to verify open ports, {0}'.format(message))
mapping = Port(local_port, found_external_port, protocol)
self.port_config.add_or_update(mapping)
return mapping
def sync_existing_ports(self):
for mapping in self.list():
self.logger.info('syncing existing port mapping: {0}'.format(mapping))
self.port_mapper.add_mapping(mapping.local_port, mapping.external_port, mapping.protocol)
def available(self):
return self.port_mapper is not None
class NonePortDrill:
def __init__(self):
self.logger = logger.get_logger('NonePortDrill')
def remove_all(self):
pass
def get(self, local_port, protocol):
return Port(local_port, None, protocol)
def list(self):
return []
def external_ip(self):
return None
def remove(self, local_port, protocol):
pass
def sync_one_mapping(self, local_port, protocol):
pass
def sync_new_port(self, local_port, protocol):
self.logger.info('port drill is not enabled, not adding {0} {1} mapping'.format(local_port, protocol))
def sync(self):
pass
def available(self):
return False
def sync_existing_ports(self):
pass
class PortDrillFactory:
def __init__(self, user_platform_config, port_config, port_mapper_factory):
self.port_config = port_config
self.user_platform_config = user_platform_config
self.port_mapper_factory = port_mapper_factory
def get_drill(self, upnp_enabled, external_access, manual_public_ip, manual_certificate_port, manual_access_port):
if not external_access:
return NonePortDrill()
drill = None
if upnp_enabled:
mapper = self.port_mapper_factory.provide_mapper()
else:
mapper = ManualPortMapper(manual_public_ip, manual_certificate_port, manual_access_port)
if mapper:
prober = self._get_port_prober()
drill = PortDrill(self.port_config, mapper, prober)
return drill
def _get_port_prober(self):
if self.user_platform_config.is_redirect_enabled():
return PortProber(
self.user_platform_config.get_redirect_api_url(),
self.user_platform_config.get_domain_update_token())
else:
return NoneProber()
|
syncloud/platform
|
src/syncloud_platform/insider/port_drill.py
|
Python
|
gpl-3.0
| 5,284
|
import pytest
from twisted.internet import defer
from webmonitor.monitor import WebMonitor
def defer_with_content(content):
'''
return given content via deferred
'''
d = defer.Deferred()
d.callback(content)
return d
def defer_raises(exc_instance):
'''
raises exception inside a deferred chain, use this for checking your
errbacks
'''
def _raise(ignored):
raise exc_instance
d = defer.Deferred()
d.addCallback(_raise)
d.callback(None)
return d
@pytest.fixture(scope='function')
def monitor(request):
monitor = WebMonitor('http://foo.com', 'lorem', 1)
return monitor
|
eddwardo/webmonitor
|
tests/conftest.py
|
Python
|
gpl-3.0
| 646
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Verify the command modules by install them using PIP"""
import sys
import os.path
import subprocess
import glob
import filecmp
import logging
import unittest
from pkg_resources import working_set
import automation.utilities.path as automation_path
from automation.utilities.const import COMMAND_MODULE_PREFIX
logger = logging.getLogger('azdev.verify.package')
# The package verifications are organized in the form of unittests so as to gather better output and error handling.
# It also ensures all the items were ran and errors are collected.
class PackageVerifyTests(unittest.TestCase):
def __init__(self, method_name, **kwargs):
super(PackageVerifyTests, self).__init__(method_name)
self.test_data = kwargs
def test_azure_cli_module_manifest_and_azure_bdist(self):
path = self.test_data['module_path']
self.assertTrue(os.path.isdir(path), msg='Path {} does not exist'.format(path))
manifest_file = os.path.join(path, 'MANIFEST.in')
self.assertTrue(os.path.isfile(manifest_file), msg='Manifest file {} missing'.format(manifest_file))
# Check azure_bdist_wheel.py file for module.
# Assumption is that core has the correct file always so compare against that.
core_azure_bdist_wheel = os.path.join(automation_path.get_repo_root(), 'src', 'azure-cli-core', 'azure_bdist_wheel.py')
mod_azure_bdist_wheel = os.path.join(path, 'azure_bdist_wheel.py')
if os.path.isfile(mod_azure_bdist_wheel):
self.assertTrue(filecmp.cmp(core_azure_bdist_wheel, mod_azure_bdist_wheel), "Make sure {} is correct. It should look like {}".format(mod_azure_bdist_wheel, core_azure_bdist_wheel))
def test_azure_cli_installation(self):
az_output = subprocess.check_output(['az', '--debug'], stderr=subprocess.STDOUT, universal_newlines=True)
self.assertNotIn('Error loading command module', az_output, msg='Module loading error message showed up.')
def test_azure_cli_module_installation(self):
expected_modules = set([n for n, _ in automation_path.get_command_modules_paths(include_prefix=True)])
installed_command_modules = [dist.key for dist in list(working_set) if dist.key.startswith(COMMAND_MODULE_PREFIX)]
logger.info('Installed command modules %s', installed_command_modules)
missing_modules = expected_modules - set(installed_command_modules)
self.assertFalse(missing_modules,
msg='Following modules are not installed successfully: {}'.format(', '.join(missing_modules)))
def init(root):
parser = root.add_parser('package', help='Verify the basic requirements for command module packages.')
parser.add_argument('build_folder', help='The path to the folder contains all wheel files.')
parser.set_defaults(func=run_verifications)
def run_verifications(args):
suite = unittest.TestSuite()
suite.addTest(PackageVerifyTests('test_azure_cli_installation'))
suite.addTest(PackageVerifyTests('test_azure_cli_module_installation'))
for _, path in automation_path.get_all_module_paths():
suite.addTest(PackageVerifyTests('test_azure_cli_module_manifest_and_azure_bdist', module_path=path))
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
sys.exit(not result.wasSuccessful())
|
yugangw-msft/azure-cli
|
tools/automation/verify/verify_packages.py
|
Python
|
mit
| 3,692
|
"""Test praw.models.list.base."""
import pytest
from praw.models.list.base import BaseList
class TestBaseList(object):
def setup(self):
self._prev_child_attribute = BaseList.CHILD_ATTRIBUTE
self._prev_convert = BaseList._convert
def teardown(self):
BaseList.CHILD_ATTRIBUTE = self._prev_child_attribute
BaseList._convert = staticmethod(self._prev_convert)
def test__init__CHILD_ATTRIBUTE_not_set(self):
with pytest.raises(NotImplementedError):
BaseList(None, None)
def test__init___convert_not_extended(self):
BaseList.CHILD_ATTRIBUTE = 'praw'
with pytest.raises(NotImplementedError):
BaseList(None, {'praw': [1]})
def test__contains__(self):
BaseList._convert = staticmethod(lambda _a, _b: None)
BaseList.CHILD_ATTRIBUTE = 'praw'
items = ['foo', 1, {'a': 'b'}]
base_list = BaseList(None, {'praw': items})
for item in items:
assert item in base_list
def test__getitem__(self):
BaseList._convert = staticmethod(lambda _a, _b: None)
BaseList.CHILD_ATTRIBUTE = 'praw'
items = ['foo', 1, {'a': 'b'}]
base_list = BaseList(None, {'praw': items})
for i, item in enumerate(items):
assert item == base_list[i]
def test__iter__(self):
BaseList._convert = staticmethod(lambda _a, _b: None)
BaseList.CHILD_ATTRIBUTE = 'praw'
items = ['foo', 1, {'a': 'b'}]
base_list = BaseList(None, {'praw': items})
for i, item in enumerate(base_list):
assert items[i] == item
def test__str__(self):
BaseList._convert = staticmethod(lambda _a, _b: None)
BaseList.CHILD_ATTRIBUTE = 'praw'
items = ['foo', 1, {'a': 'b'}]
base_list = BaseList(None, {'praw': items})
assert str(items) == str(base_list)
|
RGood/praw
|
tests/unit/models/list/test_base.py
|
Python
|
bsd-2-clause
| 1,889
|
import urllib
from url_shortener import URLShortener
class XedccShortener (URLShortener):
def __init__ (self, *args, **kwargs):
self.name = "Xed.cc"
super(XedccShortener, self).__init__(*args, **kwargs)
def _shorten (self, url):
answer = url
api = urllib.urlopen ("http://xed.cc/yourls-api.php?action=shorturl&format=simple&url=" + urllib.quote(url))
if api.getcode() == 200:
answer = api.read()
api.close()
return answer
def created_url (self, url):
return 'xed.cc' in url.lower()
|
codeofdusk/ProjectMagenta
|
src/url_shortener/shorteners/xedcc.py
|
Python
|
gpl-2.0
| 508
|
__author__ = 'Chao'
import numpy as np
from sklearn import svm, cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
activity_label = {'1': 'WALKING',
'2': 'WALKING_UPSTAIRS',
'3': 'WALKING_DOWNSTAIRS',
'4': 'SITTING',
'5': 'STANDING',
'6': 'LAYING'}
# ############################# Open data set ###############################
X = []
y = []
X_fin = []
y_fin = []
print "Opening dataset..."
try:
with open("X_train.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X.append(pair)
f.close()
with open("y_train.txt", 'rU') as f:
res = list(f)
for line in res:
y.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
try:
with open("X_test.txt", 'rU') as f:
res = list(f)
for line in res:
line.strip("\n")
pair = line.split(" ")
while pair.__contains__(""):
pair.remove("")
for i in xrange(pair.__len__()):
pair[i] = float(pair[i])
X_fin.append(pair)
f.close()
with open("y_test.txt", 'rU') as f:
res = list(f)
for line in res:
y_fin.append(int(line.strip("\n")[0]))
f.close()
except:
print "Error in reading the train set file."
exit()
print "Dataset opened."
X = np.array(X)
y = np.array(y)
###### Separate data set into 70% training set and 30% test set
print "Separating data into 70% training set & 30% test set..."
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.3)
print "Dataset separated."
###### Get best parameters ######
############################### Kernel=Linear ###############################
print "######## SVM, Kernel = Linear #########"
#C_linear = [0.1, 1, 10, 100]
C_linear = [3]
result_linear = []
print "C value chosen from: ", C_linear
print "Calculating accuracy with K-fold..."
for C in C_linear:
svc_linear = svm.SVC(kernel='linear', C=C)
scores = cross_validation.cross_val_score(svc_linear, X_train, y_train, scoring='accuracy', cv=6)
result_linear.append(scores.mean())
print "result:", result_linear
#Result with different C are equal, so here choose C=1 directly as the best parameter.
best_param_linear = {"C": 3}
#linear_test_score = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X_test, y_test).score(X_test, y_test)
#rbf_test_score = svm.SVC(kernel='rbf', C=best_param_rbf.get("C"), gamma=best_param_rbf.get("gamma")).fit(X_test, y_test).score(X_test, y_test)
#poly_test_score = svm.SVC(kernel='poly', C=best_param_poly.get("C"), degree=best_param_poly.get("degree")).fit(X_test, y_test).score(X_test, y_test)
linear_test = svm.SVC(kernel='linear', C=best_param_linear.get("C")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = linear_test.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
#print "Linear Kernel test score: ", linear_test_score
#print "RBF Kernel test score: ", rbf_test_score
#print "Poly Kernel test score: ", poly_test_score
################################### Random Forests ####################################
print "##### Random Forest ######"
n_estimators_list = range(1, 16, 1)
result_random_forests = []
max_score_rf = float("-inf")
best_param_rf = None
for n_estimators in n_estimators_list:
print "Testing n_estimators = ", n_estimators
rf_clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=None, min_samples_split=1, random_state=0)
scores = cross_validation.cross_val_score(rf_clf, X_train, y_train, scoring="accuracy", cv=6)
result_random_forests.append(scores.mean())
if scores.mean() > max_score_rf:
max_score_rf = scores.mean()
best_param_rf = {"n_estimators": n_estimators}
print "number of trees: ", n_estimators_list
print "results: ", result_random_forests
print "best accuracy: ", max_score_rf
print "best parameter: ", best_param_rf
rf_clf_test_score = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None,
min_samples_split=1, random_state=0).fit(X_test, y_test).score(X_test,
y_test)
print "Test set accuracy: ", rf_clf_test_score
rf_clf = RandomForestClassifier(n_estimators=best_param_rf.get("n_estimators"), max_depth=None, min_samples_split=1,
random_state=0).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = rf_clf.predict(X_fin[i])
b = y_fin[i]
print "+ ", a[0],
print "- ", b
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
################################### K Nearest Neighbors ####################################
print "##### K Nearest Neighbors ######"
n_neighbors_list = range(1, 6, 1)
result_n_neighbors = []
max_score_knn = float("-inf")
best_param_knn = None
for n_neighbors in n_neighbors_list:
print "Testing n_neighbors = ", n_neighbors
neigh = KNeighborsClassifier(n_neighbors=n_neighbors)
scores = cross_validation.cross_val_score(neigh, X_train, y_train, scoring="accuracy", cv=6)
result_n_neighbors.append(scores.mean())
if scores.mean() > max_score_knn:
max_score_knn = scores.mean()
best_param_knn = {"n_neighbors": n_neighbors}
print "number of neighbors: ", n_neighbors_list
print "results: ", result_n_neighbors
print "best accuracy: ", max_score_knn
print "best parameter: ", best_param_knn
neigh_test_score = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X_test, y_test).score(X_test, y_test)
print "Test set accuracy: ", neigh_test_score
neigh = KNeighborsClassifier(best_param_knn.get("n_neighbors")).fit(X, y)
count1 = 0
count2 = 0
for i in xrange(X_fin.__len__()):
count2 += 1
a = neigh.predict(X_fin[i])
b = y_fin[i]
if a == [b]:
count1 += 1
print "Total cases: ", count2
print "Correct Prediction: ", count1
print "Correct Rate: ", float(count1) / count2
|
Sapphirine/Human-Activity-Monitoring-and-Prediction
|
analysis.py
|
Python
|
apache-2.0
| 6,718
|
#! /usr/bin/env python
import struct
from itertools import izip
from asciipixel import AsciiPixel
class Screen:
"""
stores a screen full of ascii characters
and their respective RGB values
i think the client should tell the server what size
screen they want (number of tiles)
"""
# units are in ascii pixels, not pixels
DEFAULT_WIDTH = 5
DEFAULT_HEIGHT = 5
def __init__(self, screen=None):
# note that this makes the list row-major
# i.e. y,x
if screen == None:
self.width = Screen.DEFAULT_WIDTH
self.height = Screen.DEFAULT_HEIGHT
# then initially populate with dummy cells
# cool! i almost never use list comprehensions!
#FIXME: seems like too much processing work for something
# that is just going to be replaced anyway
self.screen = [[AsciiPixel() for j in range(self.width)]
for i in range(self.height)]
else:
self.screen = screen
self.height = len(screen)
self.width = len(screen[0])
def setCell(self, cell, x, y):
self.screen[y][x] = cell
def getCell(self, x, y):
return self.screen[y][x]
def setScreen(self, screen):
self.screen = screen
def getScreen(self):
return self.screen
def __repr__(self):
ret = ""
for row in self.screen:
rowStr = ""
for cell in row:
rowStr += repr(cell)
ret += rowStr + '\n'
return ret
def __str__(self):
"""
at least a little confusing that string does
something completely unlike repr in
this case. maybe use a different
naming convention? i mean, this
is really just a custom pickle job
"""
ret = ""
for row in self.screen:
rowStr = ""
for cell in row:
rowStr += str(cell)
rowStr += "|" # ascii-pixel separator
ret += rowStr + '\n'
return ret
# note: not part of class
def destr(screenString):
"""
should return a valid screen object
as defined by input string
(think depickling)
"""
#print "making screen from this received string: %s" % screenString
rowList = []
curRow = []
curAsciiStr = ""
curStr = ""
for ch in screenString:
if ch == '\n':
# then we are done with the row and append it
# and start a new row
rowList.append(curRow)
curRow = []
elif ch == '|':
# then we're ready to make our current asciipixel
curAsciiPixel = AsciiPixel(int(curAsciiStr), int(curStr))
curAsciiStr = curColorStr = ""
curRow.append(curAsciiPixel)
curStr = ""
elif ch == ',':
# then we're now building the color string
curAsciiStr = curStr[:]
curStr = ""
else:
curStr += ch
ret = Screen(rowList)
return ret
def byte(screen):
msg = bytearray()
msg.extend(struct.pack("BB", screen.height, screen.width))
for row in screen.screen:
for asciiPixel in row:
msg.extend(struct.pack(
"BB", asciiPixel.ascii, AsciiPixel.getColorCode(
asciiPixel.color[0], asciiPixel.bgColor[0])))
return msg
def unbyte(screenBytes):
msg = bytearray()
msg.extend(screenBytes)
width = 0
height = 0
asciiPixels = []
curRow = []
height, width = struct.unpack("BB", str(msg[:2]))
for i in range(0, height):
# height and width are counting in ASCII PIXELS not bytes
for j in range(0, width):
curPos = 2 * (i * width + j) + 2 # 2=size of header
symbol, color = struct.unpack("BB",
str(msg[curPos:(curPos + 2)]))
asciiPixel = AsciiPixel(symbol, color)
curRow.append(asciiPixel)
asciiPixels.append(curRow)
curRow = []
screen = Screen(asciiPixels)
return screen
if __name__=="__main__":
#unit test
screen = Screen()
newCell = AsciiPixel(ord('a'), 255, 0, 0)
screen.setCell(newCell, 2, 0)
print "repr of screen:"
print repr(screen)
print "str of screen:"
print str(screen)
|
kendase3/every
|
common/screen.py
|
Python
|
bsd-2-clause
| 3,676
|
from ecl import EclPrototype
import sys
import os
def installAbortSignals():
if sys.version_info.major < 3 and not os.getenv('ECL_SKIP_SIGNAL'):
install_signals = EclPrototype("void util_install_signals()")
install_signals()
def updateAbortSignals():
"""
Will install the util_abort_signal for all UNMODIFIED signals.
"""
if sys.version_info.major < 3 and not os.getenv('ECL_SKIP_SIGNAL'):
update_signals = EclPrototype("void util_update_signals()")
update_signals()
|
Statoil/libecl
|
python/ecl/util/util/install_abort_signals.py
|
Python
|
gpl-3.0
| 522
|
# -*- coding: utf-8 -*-
# © 2015 Antiun Ingeniería, S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Legal terms per event",
"summary": "Make attendees to accept legal terms per event",
"version": "8.0.1.0.0",
"category": "Marketing",
"website": "http://www.antiun.com",
"author": "Antiun Ingeniería S.L., Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"auto_install": True,
"depends": [
"website_event_sale",
"website_sale_product_legal",
],
"data": [
"views/event_event_view.xml",
"views/legal_term_view.xml",
"views/templates.xml",
],
}
|
Endika/event
|
website_event_sale_legal/__openerp__.py
|
Python
|
agpl-3.0
| 742
|
import socket
import math
import random
UDP_IP = "192.168.16.195"
UDP_PORT = 5005
MESSAGE = "50,50,"
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print "message:", MESSAGE
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for i in range(0,1000):
#angle = math.radians((i % 360 - 180))
angle = math.radians((random.uniform(44, 50)))
msg = MESSAGE + "{:0.2f}".format(angle)
sock.sendto(msg, (UDP_IP, UDP_PORT))
print msg
|
yannicl/raspi-robot
|
v1/pi/camera/learning/test_learn_from_basic_rotation.py
|
Python
|
mit
| 475
|
# Copyright (C) 2021 Open Source Integrators
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class MrpWorkcenterProductivity(models.Model):
_inherit = "mrp.workcenter.productivity"
def _prepare_mrp_workorder_analytic_item(self):
"""
Prepare additional values for Analytic Items created.
For compatibility with analytic_activity_cost
"""
self.ensure_one()
return {
"name": "{} / {}".format(self.production_id.name, self.workorder_id.name),
"account_id": self.production_id.analytic_account_id.id,
"date": fields.Date.today(),
"company_id": self.company_id.id,
"manufacturing_order_id": self.production_id.id,
"workorder_id": self.workorder_id.id,
"unit_amount": self.duration / 60, # convert minutes to hours
"amount": -self.duration / 60 * self.workcenter_id.costs_hour,
}
def generate_mrp_work_analytic_line(self):
AnalyticLine = self.env["account.analytic.line"].sudo()
for timelog in self:
line_vals = timelog._prepare_mrp_workorder_analytic_item()
analytic_line = AnalyticLine.create(line_vals)
analytic_line.on_change_unit_amount()
@api.model
def create(self, vals):
timelog = super().create(vals)
if vals.get("date_end"):
timelog.generate_mrp_work_analytic_line()
return timelog
def write(self, vals):
res = super().write(vals)
if vals.get("date_end"):
self.generate_mrp_work_analytic_line()
return res
|
OCA/manufacture
|
mrp_account_analytic/models/mrp_workorder.py
|
Python
|
agpl-3.0
| 1,671
|
# -*- coding: utf-8 -*-
#
#Copyright (C) 2009 kingzero, RaNaN
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License,
#or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
###
from __future__ import with_statement
import os
from os.path import join
from os.path import abspath
import logging
import subprocess
#import tempfile
import Image
import TiffImagePlugin
import PngImagePlugin
import GifImagePlugin
import JpegImagePlugin
class OCR(object):
__name__ = "OCR"
__type__ = "ocr"
__version__ = "0.1"
__description__ = """OCR base plugin"""
__author_name__ = "pyLoad Team"
__author_mail__ = "admin@pyload.org"
def __init__(self):
self.logger = logging.getLogger("log")
def load_image(self, image):
self.image = Image.open(image)
self.pixels = self.image.load()
self.result_captcha = ''
def unload(self):
"""delete all tmp images"""
pass
def threshold(self, value):
self.image = self.image.point(lambda a: a * value + 10)
def run(self, command):
"""Run a command"""
popen = subprocess.Popen(command, bufsize = -1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
popen.wait()
output = popen.stdout.read() +" | "+ popen.stderr.read()
popen.stdout.close()
popen.stderr.close()
self.logger.debug("Tesseract ReturnCode %s Output: %s" % (popen.returncode, output))
def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True):
#self.logger.debug("create tmp tif")
#tmp = tempfile.NamedTemporaryFile(suffix=".tif")
tmp = open(join("tmp", "tmpTif_%s.tif" % self.__name__), "wb")
tmp.close()
#self.logger.debug("create tmp txt")
#tmpTxt = tempfile.NamedTemporaryFile(suffix=".txt")
tmpTxt = open(join("tmp", "tmpTxt_%s.txt" % self.__name__), "wb")
tmpTxt.close()
self.logger.debug("save tiff")
self.image.save(tmp.name, 'TIFF')
if os.name == "nt":
tessparams = [join(pypath,"tesseract","tesseract.exe")]
else:
tessparams = ["tesseract"]
tessparams.extend( [abspath(tmp.name), abspath(tmpTxt.name).replace(".txt", "")] )
if subset and (digits or lowercase or uppercase):
#self.logger.debug("create temp subset config")
#tmpSub = tempfile.NamedTemporaryFile(suffix=".subset")
tmpSub = open(join("tmp", "tmpSub_%s.subset" % self.__name__), "wb")
tmpSub.write("tessedit_char_whitelist ")
if digits:
tmpSub.write("0123456789")
if lowercase:
tmpSub.write("abcdefghijklmnopqrstuvwxyz")
if uppercase:
tmpSub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
tmpSub.write("\n")
tessparams.append("nobatch")
tessparams.append(abspath(tmpSub.name))
tmpSub.close()
self.logger.debug("run tesseract")
self.run(tessparams)
self.logger.debug("read txt")
try:
with open(tmpTxt.name, 'r') as f:
self.result_captcha = f.read().replace("\n", "")
except:
self.result_captcha = ""
self.logger.debug(self.result_captcha)
try:
os.remove(tmp.name)
os.remove(tmpTxt.name)
if subset and (digits or lowercase or uppercase):
os.remove(tmpSub.name)
except:
pass
def get_captcha(self, name):
raise NotImplementedError
def to_greyscale(self):
if self.image.mode != 'L':
self.image = self.image.convert('L')
self.pixels = self.image.load()
def eval_black_white(self, limit):
self.pixels = self.image.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if self.pixels[x, y] > limit:
self.pixels[x, y] = 255
else:
self.pixels[x, y] = 0
def clean(self, allowed):
pixels = self.pixels
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 255:
continue
# No point in processing white pixels since we only want to remove black pixel
count = 0
try:
if pixels[x-1, y-1] != 255:
count += 1
if pixels[x-1, y] != 255:
count += 1
if pixels[x-1, y + 1] != 255:
count += 1
if pixels[x, y + 1] != 255:
count += 1
if pixels[x + 1, y + 1] != 255:
count += 1
if pixels[x + 1, y] != 255:
count += 1
if pixels[x + 1, y-1] != 255:
count += 1
if pixels[x, y-1] != 255:
count += 1
except:
pass
# not enough neighbors are dark pixels so mark this pixel
# to be changed to white
if count < allowed:
pixels[x, y] = 1
# second pass: this time set all 1's to 255 (white)
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 1:
pixels[x, y] = 255
self.pixels = pixels
def derotate_by_average(self):
"""rotate by checking each angle and guess most suitable"""
w, h = self.image.size
pixels = self.pixels
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 155
highest = {}
counts = {}
for angle in xrange(-45, 45):
tmpimage = self.image.rotate(angle)
pixels = tmpimage.load()
w, h = self.image.size
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
count = {}
for x in xrange(w):
count[x] = 0
for y in xrange(h):
if pixels[x, y] == 155:
count[x] += 1
sum = 0
cnt = 0
for x in count.values():
if x != 0:
sum += x
cnt += 1
avg = sum / cnt
counts[angle] = cnt
highest[angle] = 0
for x in count.values():
if x > highest[angle]:
highest[angle] = x
highest[angle] = highest[angle] - avg
hkey = 0
hvalue = 0
for key, value in highest.iteritems():
if value > hvalue:
hkey = key
hvalue = value
self.image = self.image.rotate(hkey)
pixels = self.image.load()
for x in xrange(w):
for y in xrange(h):
if pixels[x, y] == 0:
pixels[x, y] = 255
if pixels[x, y] == 155:
pixels[x, y] = 0
self.pixels = pixels
def split_captcha_letters(self):
captcha = self.image
started = False
letters = []
width, height = captcha.size
bottomY, topY = 0, height
pixels = captcha.load()
for x in xrange(width):
black_pixel_in_col = False
for y in xrange(height):
if pixels[x, y] != 255:
if not started:
started = True
firstX = x
lastX = x
if y > bottomY:
bottomY = y
if y < topY:
topY = y
if x > lastX:
lastX = x
black_pixel_in_col = True
if black_pixel_in_col is False and started is True:
rect = (firstX, topY, lastX, bottomY)
new_captcha = captcha.crop(rect)
w, h = new_captcha.size
if w > 5 and h > 5:
letters.append(new_captcha)
started = False
bottomY, topY = 0, height
return letters
def correct(self, values, var=None):
if var:
result = var
else:
result = self.result_captcha
for key, item in values.iteritems():
if key.__class__ == str:
result = result.replace(key, item)
else:
for expr in key:
result = result.replace(expr, item)
if var:
return result
else:
self.result_captcha = result
if __name__ == '__main__':
ocr = OCR()
ocr.load_image("B.jpg")
ocr.to_greyscale()
ocr.eval_black_white(140)
ocr.derotate_by_average()
ocr.run_tesser()
print "Tesseract", ocr.result_captcha
ocr.image.save("derotated.jpg")
|
estaban/pyload
|
module/plugins/captcha/captcha.py
|
Python
|
gpl-3.0
| 9,726
|
from p4a.calendar import interfaces
from Acquisition import aq_inner, aq_parent
def update_catalog(obj, evt):
"""Reindex the object in the catalog.
"""
obj.reindexObject()
def vevent_demarshalled(obj, evt):
container = aq_parent(aq_inner(obj))
config = interfaces.ICalendarConfig(container, None)
if config is not None and not config.calendar_activated:
config.calendar_activated = True
|
cynapse/cynin
|
products/Plone4ArtistsCalendar/pythonlib/p4a/plonecalendar/__init__.py
|
Python
|
gpl-3.0
| 422
|
from setuptools import setup, find_packages
setup(
name='ewave',
version='0.0',
description='ewave',
long_description='',
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'clld>=8',
'clldmpg>=4.2',
'sqlalchemy',
'waitress',
],
extras_require={
'dev': [
'flake8',
'tox'
],
'test': [
'mock',
'psycopg2',
'pytest>=5.4',
'pytest-clld',
'pytest-mock',
'pytest-cov',
'coverage>=4.2',
'selenium',
'zope.component>=3.11.0',
],
},
test_suite="ewave",
entry_points={
'paste.app_factory': [
'main = ewave:main',
],
})
|
clld/ewave
|
setup.py
|
Python
|
apache-2.0
| 1,119
|
__author__ = 'Sharon Lev'
__email__ = 'sharon_lev@yahoo.com'
__date__ = '11/21/16'
from unittest import TestCase, TestLoader, TextTestRunner, TestSuite
from src.unittestextras import DataSet, DataProvider
from StringIO import StringIO
class test_DataProvider(TestCase):
setup_count = 0
teardown_count = 0
class DataProviderInner(TestCase):
"""
"""
data_dict = DataSet(
dict(x=10, y=20, label='set a'),
dict(x=5, y=7, label='set b'),
dict(x=100, y=5, label='set c'),
dict(x=100, y="st", label='set d')
)
data_list = DataSet(
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[4, 4, 0]
)
data_strings = DataSet(
"string_2",
1,
0.5,
((1, 2, 3), )
)
@DataProvider(data_list)
def test_me_list(self, x=1, y=1, z=1):
print self.id(), x
induce_divided_by_zero_error = x/z
self.assertEquals(x+y, z)
@DataProvider(data_list, id_index=2)
def test_me_l_indexed(self, x=1, y=1, z=1):
print self.id(), x
induce_divided_by_zero_error = x/z
self.assertEquals(x+y, z)
@DataProvider(data_dict)
def test_me_dict(self, x=1, y=1, z=1, label=None):
print self.id(), x
if type(x) != type(y): raise StandardError("not same type")
self.assertGreater(x, y)
@DataProvider(data_dict, id_key='y')
def test_me_d_key(self, x=1, y=1, z=1, label=None):
print self.id(), x
if type(x) != type(y): raise StandardError("not same type")
self.assertGreater(x, y)
@DataProvider(data_strings)
def test_me_primitives(self, x=1, y=1, z=1):
print self.id(), x
if isinstance(x, tuple):
import InduceImportError
self.assertIsInstance(x, str)
def setUp(self):
test_DataProvider.setup_count += 1
def tearDown(self):
test_DataProvider.teardown_count += 1
def setUp(self):
self.__class__.teardown_count = 0
self.__class__.setup_count = 0
self.suite = TestLoader().loadTestsFromTestCase(self.DataProviderInner)
def tearDown(self):
pass
def _subsuite(self, suite, pattern):
subsuite = TestSuite()
for test in suite:
if pattern in test._testMethodName:
subsuite.addTest(test)
return subsuite
def test_provided_primitives(self):
self.assertEqual(self.setup_count, 0)
self.assertEqual(self.teardown_count, 0)
results = TextTestRunner(stream=StringIO()).run(self._subsuite(self.suite, 'primitive'))
print results
self.assertEqual(self.setup_count, 4)
self.assertEqual(self.teardown_count, 4)
self.assertEqual(results.testsRun, 4)
self.assertEqual(len(results.failures), 2)
self.assertEqual(len(results.errors), 1)
def test_provided_list(self):
self.assertEqual(self.setup_count, 0)
self.assertEqual(self.teardown_count, 0)
results = TextTestRunner(stream=StringIO()).run(self._subsuite(self.suite, 'list'))
print results
self.assertEqual(self.setup_count, 4)
self.assertEqual(self.teardown_count, 4)
self.assertEqual(results.testsRun, 4)
self.assertEqual(len(results.failures), 2)
self.assertEqual(len(results.errors), 1)
def test_provided_list_indexed(self):
self.assertEqual(self.setup_count, 0)
self.assertEqual(self.teardown_count, 0)
results = TextTestRunner(stream=StringIO()).run(self._subsuite(self.suite, 'l_index'))
print results
self.assertEqual(self.setup_count, 4)
self.assertEqual(self.teardown_count, 4)
self.assertEqual(results.testsRun, 4)
self.assertEqual(len(results.failures), 2)
self.assertEqual(len(results.errors), 1)
def test_provided_dict(self):
self.assertEqual(self.setup_count, 0)
self.assertEqual(self.teardown_count, 0)
results = TextTestRunner(stream=StringIO()).run(self._subsuite(self.suite, 'dict'))
print results
self.assertEqual(self.setup_count, 4)
self.assertEqual(self.teardown_count, 4)
self.assertEqual(results.testsRun, 4)
self.assertEqual(len(results.failures), 2)
self.assertEqual(len(results.errors), 1)
def test_provided_dict_keyed(self):
self.assertEqual(self.setup_count, 0)
self.assertEqual(self.teardown_count, 0)
results = TextTestRunner(stream=StringIO()).run(self._subsuite(self.suite, 'd_key'))
print results
self.assertEqual(self.setup_count, 4)
self.assertEqual(self.teardown_count, 4)
self.assertEqual(results.testsRun, 4)
self.assertEqual(len(results.failures), 2)
self.assertEqual(len(results.errors), 1)
|
sharonlev/pyUnittestExtras
|
test/test_DataProvider.py
|
Python
|
gpl-3.0
| 4,520
|
from datetime import datetime
from flask.ext.script import Manager
from app import app, db
from app.models import User, Post
manager = Manager(app)
@manager.command
def init():
dropdb()
initdb()
filldb()
@manager.command
def initdb():
print('Initializing database...'),
db.create_all()
print('done!')
@manager.command
def filldb():
print('Filling database...'),
admin = User(u'Aishee', u'24111408')
db.session.add(admin)
db.session.commit()
post = Post(
title=u'Hello, world!',
markup=POST_1,
author_id=admin.id,
visible=True,
)
db.session.add(post)
post.created = datetime(2011, 6, 13)
post.update(post.title, post.markup, True)
post = Post(
title=u'Random Words 1',
markup=POST_4,
author_id=admin.id,
visible=True,
)
db.session.add(post)
post.created = datetime(2012, 8, 15)
post.update(post.title, post.markup, True)
post = Post(
title=u'Random Words 2',
markup=POST_2,
author_id=admin.id,
visible=True,
)
db.session.add(post)
post.created = datetime(2012, 12, 24)
post.update(post.title, post.markup, True)
post = Post(
title=u'Commander Riker!',
markup=POST_3,
author_id=admin.id,
visible=True,
)
db.session.add(post)
post = Post(
title=u'Random Words 3',
markup=POST_4,
author_id=admin.id,
visible=True,
)
db.session.add(post)
post = Post(
title=u'Getting started with Flask',
markup=POST_5,
author_id=admin.id,
visible=True,
)
db.session.add(post)
db.session.commit()
print('done!')
@manager.command
def dropdb():
print('Dropping database...'),
db.drop_all()
print('done!')
POST_1 = u"""
First blog post.
Nam quis urna est. Duis vel tincidunt quam. Vivamus odio tortor, suscipit vel
pretium quis, imperdiet quis dolor. Integer molestie enim nec risus malesuada
imperdiet. Donec pellentesque justo id sem tempor varius. Etiam ut tincidunt
lorem. Nullam a tellus sem.
### Golden Axe + Metal
<iframe width="560" height="315" src="//www.youtube.com/embed/sIrUcJ2JS3w"
frameborder="0" allowfullscreen></iframe>
Vestibulum a neque sed quam pharetra interdum. Quisque euismod dictum ipsum.
Vivamus tincidunt mi at tellus pharetra placerat. Sed sed sem nisi, sit amet
ultrices neque. Quisque eget turpis et sapien luctus auctor in ac magna.
"""
POST_2 = u"""
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean vel ipsum
lectus. Pellentesque tempus enim sed leo imperdiet non lobortis nulla
sollicitudin. Maecenas arcu orci, interdum eu rhoncus ut, blandit id felis.
Mauris consectetur dui at felis ultricies tempus. Quisque molestie convallis
lectus vitae viverra. Duis lobortis ultrices turpis, nec eleifend est
venenatis nec. Sed sed lorem quis metus eleifend ullamcorper. Ut semper
nulla a arcu ornare **condimentum**.
Aliquam neque metus, posuere vitae condimentum ut, fermentum quis diam.
*Nulla facilisi*. Proin sapien felis, tristique eu venenatis at,
**accumsan** non dui. Vestibulum ante ipsum primis in faucibus orci luctus et
ultrices posuere cubilia.
"""
POST_3 = u"""
Maecenas ut gravida nisi. Aenean feugiat orci non quam vehicula accumsan.
Nullam scelerisque elementum sollicitudin. Sed vel tellus nisi, non tincidunt
augue. Aliquam at nulla ut sem mollis tincidunt.

Nam quis urna est. Duis vel tincidunt quam. Vivamus odio tortor, suscipit vel
pretium quis, imperdiet quis dolor. Integer molestie enim nec risus malesuada
imperdiet. Donec pellentesque justo id sem tempor varius. Etiam ut tincidunt
lorem. Nullam a tellus sem.
Vestibulum a neque sed quam pharetra interdum. Quisque euismod dictum ipsum.
Vivamus tincidunt mi at tellus pharetra placerat. Sed sed sem nisi, sit amet
ultrices neque. Quisque eget turpis et sapien luctus auctor in ac magna.
Etiam rhoncus commodo molestie.
"""
POST_4 = u"""
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean vel ipsum
lectus. Pellentesque tempus enim sed leo imperdiet non lobortis nulla
sollicitudin. Maecenas arcu orci, interdum eu rhoncus ut, blandit id felis.
Mauris consectetur dui at felis ultricies tempus. Quisque molestie convallis
lectus vitae viverra. Duis lobortis ultrices turpis, nec eleifend est
venenatis nec.
+ Quisque
+ Venenatis
Sed sed lorem quis metus eleifend ullamcorper. Ut semper nulla a arcu ornare
condimentum. Ut et lacus ac lacus pulvinar accumsan quis eget lacus. Integer
id nibh non eros tincidunt bibendum. Aenean diam lectus, tempus sed consequat
consectetur, posuere non ipsum. Donec vitae eleifend est. Donec at elit mi.
Maecenas tempor nulla gravida quam volutpat varius.
Vivamus malesuada viverra mauris sed dapibus. Aliquam erat volutpat. Aliquam
neque metus, posuere vitae condimentum ut, fermentum quis diam. Nulla
facilisi. Proin sapien felis, tristique eu venenatis at, accumsan non dui.
Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere
cubilia.
"""
POST_5 = u"""
How to run a *Hello World* program, code from [Flask](http://flask.pocoo.org).
```python
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run()
```
"""
if __name__ == '__main__':
manager.run()
|
aishee/aisheeblog
|
manage.py
|
Python
|
gpl-2.0
| 5,429
|
# Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from helpers.command import Command
@Command('guarded', ['handler'])
def cmd(send, msg, args):
"""Shows the currently guarded nicks.
Syntax: !guarded
"""
guarded = args['handler'].guarded
if not guarded:
send("Nobody is guarded.")
else:
send(", ".join(guarded))
|
jwoglom/ionbot
|
commands/guarded.py
|
Python
|
gpl-2.0
| 1,148
|
"""
ESSArch is an open source archiving and digital preservation system
ESSArch
Copyright (C) 2005-2019 ES Solutions AB
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact information:
Web - http://www.essolutions.se
Email - essarch@essolutions.se
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-09 10:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ip', '0017_remove_informationpackage_profiles'),
]
operations = [
migrations.RemoveField(
model_name='informationpackage',
name='ObjectNumItems',
),
migrations.RemoveField(
model_name='informationpackage',
name='ObjectSize',
),
]
|
ESSolutions/ESSArch_Core
|
ESSArch_Core/ip/migrations/0018_auto_20161109_1114.py
|
Python
|
gpl-3.0
| 1,388
|
# modu
# Copyright (c) 2006-2010 Phil Christensen
# http://modu.bubblehouse.org
#
#
# See LICENSE for details
"""
Datatypes to manage foreign key relationships.
"""
import time
from zope.interface import implements
from modu import assets
from modu.editable import IDatatype, define
from modu.util import form, tags, OrderedDict
from modu.persist import sql
from modu.persist.sql import escape_dot_syntax as q
class ForeignLabelField(define.definition):
"""
Display a value from a foreign table based on this field's value.
"""
implements(IDatatype)
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
store = storable.get_store()
value = self['fvalue']
label = self['flabel']
table = self['ftable']
where = self.get('fwhere', 'WHERE %s = %%s' % q(value))
args = [getattr(storable, self.get_column_name(), None)]
if(callable(where)):
where = where(req, storable)
args = []
if(isinstance(where, dict)):
where = sql.build_where(where)
args = []
foreign_label_query = "SELECT %s, %s FROM %s %s" % (q(value), q(label), q(table), where)
foreign_label_query = sql.interp(foreign_label_query, *args)
results = store.pool.runQuery(foreign_label_query)
frm = form.FormNode(self.name)
frm(type='label')
if(results):
frm(value=results[0][label])
return frm
class ItemTitleField(ForeignLabelField):
"""
Display the item title for this record.
Given the item_id and item_table fields available in some records,
this field will display the proper title.
"""
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
store = storable.get_store()
label = None
value = None
label_col = self.get('flabel', 'title')
value_col = 'id'
table = getattr(storable, 'item_table', None)
if not(table):
table = self.get('ftable')
item_value = getattr(storable, self.get_column_name(), None)
if(table is None or item_value is None):
results = None
else:
# We select * in case the particular item doesn't have a title field
foreign_label_query = "SELECT * FROM %s WHERE %s = %%s" % (table, value_col)
foreign_label_query = sql.interp(foreign_label_query, [item_value])
results = store.pool.runQuery(foreign_label_query)
if(results):
value = results[0][value_col]
label = results[0].get(label_col, '(label not found)')
frm = form.FormNode(self.name)
suffix = ''
prefix = ''
if(style == 'listing'):
frm(type='hidden', value=value)
if(table and value):
label = tags.a(href=req.get_path(req.prepath, 'detail', table, value))[label]
frm(type='label', value=label)
else:
if not(label):
label = '(no link available)'
frm(type='hidden', value=value)
if(table and value):
prefix = tags.a(href=req.get_path(req.prepath, 'detail', table, value))[label]
else:
prefix = label
frm(prefix=prefix, suffix=suffix)
return frm
def update_storable(self, req, form, storable):
"""
No operation.
@see: L{modu.editable.define.definition.update_storable()}
"""
pass
class ForeignSelectField(define.definition):
"""
Allow selection of a foreign value.
"""
implements(IDatatype)
inherited_attributes = ['size']
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
store = storable.get_store()
value = self['fvalue']
label = self['flabel']
table = self['ftable']
where = self.get('fwhere', '')
order_by = self.get('order_by', None)
if(callable(where)):
where = where(req, storable)
if(isinstance(where, dict)):
where = sql.build_where(where)
foreign_query = 'SELECT %s, %s FROM %s ' % (q(value), q(label), q(table))
if(where):
foreign_query += where
if(order_by):
foreign_query += 'ORDER BY %s' % order_by
results = store.pool.runQuery(foreign_query)
options = OrderedDict([(item[value], item[label]) for item in results])
frm = form.FormNode(self.name)
if(style == 'listing' or self.get('read_only', False)):
foreign_value = getattr(storable, self.get_column_name(), None)
if(foreign_value in options):
frm(type='label', value=options[foreign_value])
else:
frm(type='label', value='')
else:
frm(type='select', value=getattr(storable, self.get_column_name(), None), options=options)
return frm
class ForeignAutocompleteField(define.definition):
"""
Allow selection of a foreign value by autocomplete field.
"""
implements(IDatatype)
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
form_name = '%s-form' % storable.get_table()
ac_id = '%s-%s-autocomplete' % (form_name, self.name)
ac_cb_id = '%s-%s-ac-callback' % (form_name, self.name)
ac_url = req.get_path(req.prepath, 'autocomplete', storable.get_table(), self.name)
prefs = """
autoFill:1,
selectFirst:1,
matchSubset:0,
selectOnly:1,
formatItem:formatItem,
extraParams:{t:%d}, minChars:%d""" % (int(time.time()), self.get('min_chars', 3))
#ac_javascript = '$("#%s").autocomplete("%s", '
#ac_javascript += '{onItemSelect:select_item("%s"), %s});'
#ac_javascript = ac_javascript % (ac_id, ac_url, ac_cb_id, prefs)
ac_javascript = '$("#%s").autocomplete("%s", {%s});' % (ac_id, ac_url, prefs)
ac_javascript += '$("#%s").result(select_item_handler("%s"));' % (ac_id, ac_cb_id)
ac_javascript = tags.script(type='text/javascript')[ac_javascript]
ac_field = form.FormNode('%s-autocomplete' % self.name)
ac_field(type='textfield', weight=0, attributes={'id':ac_id}, suffix=ac_javascript)
value_field = form.FormNode(self.name)
value_field(type='hidden', weight=2, value=getattr(storable, self.get_column_name(), None), attributes={'id':ac_cb_id})
store = storable.get_store()
value = self['fvalue']
label = self['flabel']
table = self['ftable']
if(hasattr(storable, self.get_column_name())):
query = 'SELECT %s FROM %s WHERE %s = %%s' % (q(label), q(table), q(value))
field_value = getattr(storable, self.get_column_name())
if(field_value is not None):
results = store.pool.runQuery(sql.interp(query, field_value))
if(results):
ac_field(value=results[0][label])
else:
value_field(value=0)
else:
value_field(value=0)
if(style == 'listing' or self.get('read_only', False)):
return form.FormNode(self.name)(type='label', value=ac_field.attr('value', ''))
req.content.report('header', tags.style(type="text/css")[
"""@import '%s';""" % req.get_path('/assets/jquery/jquery.autocomplete.css')])
req.content.report('header', tags.script(type="text/javascript")[
"""
function formatItem(item, index, totalItems){
return item[0].replace('<', '<').replace('>', '>')
}
"""
])
assets.activate_jquery(req)
req.content.report('header', tags.script(type="text/javascript",
src=req.get_path("/assets/jquery/jquery.autocomplete.js"))[''])
req.content.report('header', tags.script(type="text/javascript",
src=req.get_path("/assets/editable-autocomplete.js"))[''])
frm = form.FormNode('%s-ac-fieldset' % self.name)(type='fieldset', style='brief')
frm[ac_field.name] = ac_field
frm[value_field.name] = value_field
return frm
def update_storable(self, req, frm, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
form_name = '%s-form' % storable.get_table()
if(form_name in req.data):
form_data = req.data[form_name]
if not(form_data.get(self.name, {}).get('%s-autocomplete' % self.name, None).value):
setattr(storable, self.get_column_name(), None)
elif(self.name in form_data and self.name in form_data[self.name]):
setattr(storable, self.get_column_name(), form_data[self.name][self.name].value)
return True
class ForeignMultipleSelectField(define.definition):
"""
Allow management of an n2m relationship with a foreign table.
"""
implements(IDatatype)
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
mlabel = self.get('flabel', '')
if(mlabel.find('.') == -1):
mlabel = 'm.%s' % q(mlabel)
mlabel = self.get('flabel_sql', mlabel)
where = self.get('fwhere', '')
if(callable(where)):
where = where(req, storable)
if(isinstance(where, dict)):
where = sql.build_where(where)
ntom_query = """SELECT m.%s AS value, %s AS label, COALESCE(n2m.%s, n2m.%s = 1, 0) AS selected
FROM %s m
LEFT JOIN %s n2m ON m.%s = n2m.%s AND n2m.%s = %%s
%s
ORDER BY label""" % (self['fvalue'], mlabel, self['ntof_f_id'], self['ntof_f_id'],
q(self['ftable']),
q(self['ntof']), self.get('fvalue', 'id'),
self['ntof_f_id'], self['ntof_n_id'],
where)
store = storable.get_store()
results = store.pool.runQuery(sql.interp(ntom_query, storable.get_id()))
if(style == 'listing' or self.get('read_only', False)):
def _default_formatter(req_ignored, style_ignored, storable_ignored, result):
return ', '.join([item['label'] for item in result if item['selected']])
formatter = self.get('formatter', _default_formatter)
label_value = formatter(req, style, storable, results)
return form.FormNode(self.name)(type='label', value=label_value)
values = [item['value'] for item in results if item['selected']]
options = OrderedDict([(item['value'], item['label']) for item in results])
frm = form.FormNode(self.name)
frm(type='select', multiple=True, value=values, options=options)
return frm
def update_storable(self, req, form, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
form_data = req.data[form.name]
store = storable.get_store()
item_id = storable.get_id()
delete_query = sql.build_delete(self['ntof'], {self['ntof_n_id']:item_id})
store.pool.runOperation(delete_query)
if(self.name in form_data):
values = form_data[self.name].value
if(isinstance(values, dict)):
values = values[self.name + '-autocomplete']
if not(isinstance(values, list)):
values = [values]
data = [{self['ntof_n_id']:item_id, self['ntof_f_id']:getattr(val, 'value', val)} for val in values]
insert_query = sql.build_insert(self['ntof'], data, **self.get('ntof_extras', {}))
store.pool.runOperation(insert_query)
elif(self.get('required', False)):
# A conundrum...
# It's got to be a postwrite field, because a new record would
# have to be saved before we could insert a record elsewhere with
# a foreign key (supposing for a minute we weren't use MySQL, argh)
#
# This means that it's impossible for this field to stop the writing
# of the record at this point, thus 'required' is currently meaningless.
#
# Should there be a way for a postwrite field to validate separately,
# before the write?
#
# I think the way it was supposed to work in Procuro was that if you
# are using GUIDs, you can fill the field at creation time, otherwise
# you saw a field that told you to save before editing (lame).
return False
return True
def is_postwrite_field(self):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
return True
class ForeignMultipleAutocompleteField(ForeignMultipleSelectField):
"""
Allow management of an n2m relationship with a foreign table by using an autocomplete field.
"""
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
mlabel = self.get('flabel', '')
if(mlabel.find('.') == -1):
mlabel = 'm.%s' % mlabel
mlabel = self.get('flabel_sql', mlabel)
where = self.get('fwhere', '')
if(callable(where)):
where = where(storable)
elif(isinstance(where, dict)):
where = sql.build_where(where)
limit = 'LIMIT %d' % self.get('limit_choices', 20)
ntom_query = """SELECT m.%s AS value, %s AS label
FROM %s m
INNER JOIN %s n2m ON m.%s = n2m.%s AND n2m.%s = %%s
%s
ORDER BY label
%s""" % (self['fvalue'], q(mlabel),
q(self['ftable']),
q(self['ntof']), self.get('fvalue', 'id'),
self['ntof_f_id'], self['ntof_n_id'],
where, limit)
store = storable.get_store()
results = store.pool.runQuery(sql.interp(ntom_query, storable.get_id()))
if(style == 'listing' or self.get('read_only', False)):
label_value = ', '.join([result['label'] for result in results])
return form.FormNode(self.name)(type='label', value=label_value)
options = dict([(str(result['value']), result['label']) for result in results])
form_name = '%s-form' % storable.get_table()
ac_id = '%s-%s-autocomplete' % (form_name, self.name)
select_id = '%s-foreign-select' % self.name
ac_url = req.get_path(req.prepath, 'autocomplete', storable.get_table(), self.name) + '?time=' + str(time.time())
hidden_options = ''
for value in options:
hidden_options += tags.input(type='hidden', name='%s[%s]' % (form_name, self.name), value=value)
select_frm = form.FormNode('%s-select-view' % self.name)
select_frm(type='select', options=options, size=self.get('size', 5),
multiple=None, suffix=hidden_options + '<br/>', attributes={'id':select_id})
prefs = 'autoFill:1, selectFirst:1, matchSubset:0, selectOnly:1, extraParams:{t:%d}, minChars:%d' % (int(time.time()), self.get('min_chars', 3))
# ac_js = '$(document).ready(function(){$("#%s").autocomplete("%s", {onItemSelect:add_foreign_item("%s", "%s"), %s});});' % (ac_id, ac_url, form_name, self.name, prefs)
ac_js = """
$(document).ready(function(){
$("#%s").autocomplete("%s", {%s});
$("#%s").result(add_foreign_item("%s", "%s"));
});
""" % (ac_id, ac_url, prefs, ac_id, form_name, self.name)
ac_controls = tags.script(type='text/javascript')[ac_js]
ac_field = form.FormNode('%s-autocomplete' % self.name)
ac_field(type='textfield', weight=10, attributes={'id':ac_id}, suffix=ac_controls)
req.content.report('header', tags.style(type="text/css")[
"""@import '%s';""" % req.get_path('/assets/jquery/jquery.autocomplete.css')])
assets.activate_jquery(req)
req.content.report('header', tags.script(type="text/javascript",
src=req.get_path("/assets/jquery/jquery.autocomplete.js"))[''])
req.content.report('header', tags.script(type="text/javascript",
src=req.get_path("/assets/editable-autocomplete.js"))[''])
frm = form.FormNode('%s-ac-fieldset' % self.name)(type='fieldset', style='brief')
frm[select_frm.name] = select_frm
frm[ac_field.name] = ac_field
return frm
|
philchristensen/modu
|
src/modu/editable/datatypes/relational.py
|
Python
|
mit
| 14,701
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import testtools
from mock import patch
from cloudify import ctx as ctx_proxy
from cloudify import manager
from cloudify import decorators
from cloudify.decorators import operation, workflow
from cloudify import context
from cloudify.exceptions import NonRecoverableError, ProcessExecutionError
from cloudify.workflows import workflow_context
import cloudify.tests.mocks.mock_rest_client as rest_client_mock
class MockNotPicklableException(Exception):
"""Non-picklable exception"""
def __init__(self, custom_error):
self.message = custom_error
def __str__(self):
return self.message
class MockPicklableException(Exception):
"""Non-picklable exception"""
def __init__(self, custom_error):
super(Exception, self).__init__(custom_error)
@operation
def acquire_context(a, b, ctx, **kwargs):
return ctx
@operation
def some_operation(**kwargs):
from cloudify import ctx
return ctx
@workflow
def error_workflow(ctx, picklable=False, **_):
if picklable:
raise MockPicklableException('hello world!')
raise MockNotPicklableException('hello world!')
class OperationTest(testtools.TestCase):
def test_empty_ctx(self):
ctx = acquire_context(0, 0)
self.assertIsInstance(ctx, context.CloudifyContext)
def test_provided_ctx(self):
ctx = {'node_id': '1234'}
kwargs = {'__cloudify_context': ctx}
ctx = acquire_context(0, 0, **kwargs)
self.assertIsInstance(ctx, context.CloudifyContext)
self.assertEquals('1234', ctx.instance.id)
def test_proxied_ctx(self):
self.assertRaises(RuntimeError,
lambda: ctx_proxy.instance.id)
@operation
def test_op(ctx, **kwargs):
self.assertEqual(ctx, ctx_proxy)
test_op()
self.assertRaises(RuntimeError,
lambda: ctx_proxy.instance.id)
def test_provided_capabilities(self):
ctx = {
'node_id': '5678',
}
# using a mock rest client
manager.get_rest_client = \
lambda: rest_client_mock.MockRestclient()
rest_client_mock.put_node_instance(
'5678',
relationships=[{'target_id': 'some_node',
'target_name': 'some_node'}])
rest_client_mock.put_node_instance('some_node',
runtime_properties={'k': 'v'})
kwargs = {'__cloudify_context': ctx}
ctx = acquire_context(0, 0, **kwargs)
self.assertIn('k', ctx.capabilities)
self.assertEquals('v', ctx.capabilities['k'])
def test_capabilities_clash(self):
ctx = {
'node_id': '5678',
}
# using a mock rest client
manager.get_rest_client = \
lambda: rest_client_mock.MockRestclient()
rest_client_mock.put_node_instance(
'5678',
relationships=[{'target_id': 'node1',
'target_name': 'node1'},
{'target_id': 'node2',
'target_name': 'node2'}])
rest_client_mock.put_node_instance('node1',
runtime_properties={'k': 'v1'})
rest_client_mock.put_node_instance('node2',
runtime_properties={'k': 'v2'})
kwargs = {'__cloudify_context': ctx}
ctx = acquire_context(0, 0, **kwargs)
self.assertRaises(NonRecoverableError, ctx.capabilities.__contains__,
'k')
def test_workflow_error_delegation(self):
try:
workflow_context.get_rest_client = \
lambda: rest_client_mock.MockRestclient()
decorators.get_rest_client = \
lambda: rest_client_mock.MockRestclient()
manager.get_rest_client = \
lambda: rest_client_mock.MockRestclient()
kwargs = {'__cloudify_context': {}}
try:
error_workflow(picklable=False, **kwargs)
self.fail('Expected exception')
except ProcessExecutionError as e:
self.assertTrue('hello world!' in e.message)
self.assertTrue('test_decorators.py' in e.traceback)
self.assertTrue(MockNotPicklableException.__name__ in
e.error_type)
try:
error_workflow(picklable=True, **kwargs)
self.fail('Expected exception')
except ProcessExecutionError as e:
self.assertTrue('hello world!' in e.message)
self.assertTrue('test_decorators.py' in e.traceback)
self.assertTrue(MockPicklableException.__name__ in
e.error_type)
finally:
from cloudify.workflows import api
api.ctx = None
api.pipe = None
def test_instance_update(self):
with patch.object(context.NodeInstanceContext,
'update') as mock_update:
kwargs = {'__cloudify_context': {
'node_id': '5678'
}}
some_operation(**kwargs)
mock_update.assert_called_once_with()
def test_source_target_update_in_relationship(self):
with patch.object(context.NodeInstanceContext,
'update') as mock_update:
kwargs = {'__cloudify_context': {
'node_id': '5678',
'relationships': ['1111'],
'related': {
'node_id': '1111',
'is_target': True
}
}}
some_operation(**kwargs)
self.assertEqual(2, mock_update.call_count)
|
xdegenne/cloudify-plugins-common
|
cloudify/tests/test_decorators.py
|
Python
|
apache-2.0
| 6,442
|
import re
class CustomYaml(object):
"""
Custom YAML dumper that fits the PlanB config export needs exactly.
The regular YAML dumper would add lots of tags that we don't need.
This one is just right for this particular output.
The ugly backslash (\\b) hack signifies that we prefer the data to
be on the previous line.
"""
# No need for double quotes around these:
_yaml_safe_re = re.compile(r'^[a-z/_.][a-z0-9/_.-]*$')
def __init__(self, obj):
self._parsed = self._to_string(obj)
def __str__(self):
return '\n'.join(self._parsed)
def _to_string(self, obj):
return self._from_dict(obj, root=True)
def _from_obj(self, obj):
if isinstance(obj, (dict, list, tuple)):
if len(obj) == 0:
if isinstance(obj, (dict,)):
return ['\b', '{}']
else:
return ['\b', '[]']
if isinstance(obj, dict):
return self._from_dict(obj)
return self._from_list(obj)
# |<LF>preformatted string<LF>
if isinstance(obj, str) and '\n' in obj:
obj = obj.rstrip() # no need for trailing LFs here
return ['\b', '|'] + [' {}'.format(i) for i in obj.split('\n')]
return ['\b', self._from_atom(obj)]
def _from_list(self, list_):
ret = []
for item in list_:
if isinstance(item, (list, tuple)):
raise NotImplementedError('list in list')
subret = self._from_obj(item)
if subret[0] == '\b':
ret.append('- {}'.format(subret[1]))
ret.extend([' {}'.format(i) for i in subret[2:]])
else:
assert subret[0].startswith(' ')
subret[0] = '- {}'.format(subret[0][2:])
ret.extend(subret)
return [' {}'.format(i) for i in ret]
def _from_dict(self, dict_, root=False):
ret = []
for key, value in dict_.items():
subret = self._from_obj(value)
if subret[0] == '\b':
ret.append('{}: {}'.format(
self._from_atom(key), subret[1]))
ret.extend(subret[2:])
else:
ret.append('{}:'.format(self._from_atom(key)))
ret.extend(subret)
if not root:
return [' {}'.format(i) for i in ret]
return ret
def _from_atom(self, atom):
if isinstance(atom, str):
return self._from_string(atom)
if atom is None:
return 'null' # or '~'
if atom is True:
return 'true'
if atom is False:
return 'false'
if isinstance(atom, (int, float)):
return str(atom)
return self._from_string(str(atom))
def _from_string(self, string):
assert isinstance(string, str), string
if string.lower() in ('null', 'true', 'false'):
return '"{}"'.format(string)
if self._yaml_safe_re.match(string):
return string
if '\n' in string:
raise NotImplementedError('did not expect LF here')
return '"{}"'.format(
str(string).replace('\\', '\\\\')
.replace('"', '\\"'))
|
ossobv/planb
|
planb/common/customyaml.py
|
Python
|
gpl-3.0
| 3,288
|
# -*- coding: utf-8 -*-
configs = {
'db': {
}
}
|
longfan3/contact
|
www/config_override.py
|
Python
|
apache-2.0
| 57
|
import couchdb
import glob
import os
couch = couchdb.Server('http://127.0.0.1:5984')
db = couch['paullaroid']
"events_gen = glob.iglob(os.path.join('_data','*')) #it's a
for event in events_gen:
doc = { '_id' : os.path.basename(event), 'type_doc':'event'}
db.save(doc)
#
#pict_gen = glob.iglob(os.path.join(event,'*THSF_2017.jpg'))
#for pict in pict_gen:
# picture = { '_id' : os.path.basename(pict), 'type_doc':'image',
# 'datetime': ' '.join(os.path.basename(pict).split('_')[:-1]),
# 'event_id': os.path.basename(event)}
#
# db.save(picture)
# with open(pict, 'rb') as current_pict_full:
# db.put_attachment(picture, current_pict_full, filename='full',
# content_type='image/jpeg')
#
#
# with open(pict+'.thumbnail.jpg', 'rb') as current_pict_thumb:
# db.put_attachment(picture, current_pict_thumb, filename='thumb',
# content_type='image/jpeg')
|
paulla/photomaton
|
populate_db.py
|
Python
|
mit
| 1,008
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import unittest
from pytz import UTC, timezone
from influxdb import line_protocol
class TestLineProtocol(unittest.TestCase):
def test_make_lines(self):
data = {
"tags": {
"empty_tag": "",
"none_tag": None,
"integer_tag": 2,
"string_tag": "hello"
},
"points": [
{
"measurement": "test",
"fields": {
"string_val": "hello!",
"int_val": 1,
"float_val": 1.1,
"none_field": None,
"bool_val": True,
}
}
]
}
self.assertEqual(
line_protocol.make_lines(data),
'test,integer_tag=2,string_tag=hello '
'bool_val=True,float_val=1.1,int_val=1i,string_val="hello!"\n'
)
def test_timezone(self):
dt = datetime(2009, 11, 10, 23, 0, 0, 123456)
utc = UTC.localize(dt)
berlin = timezone('Europe/Berlin').localize(dt)
eastern = berlin.astimezone(timezone('US/Eastern'))
data = {
"points": [
{"measurement": "A", "fields": {"val": 1},
"time": 0},
{"measurement": "A", "fields": {"val": 1},
"time": "2009-11-10T23:00:00.123456Z"},
{"measurement": "A", "fields": {"val": 1}, "time": dt},
{"measurement": "A", "fields": {"val": 1}, "time": utc},
{"measurement": "A", "fields": {"val": 1}, "time": berlin},
{"measurement": "A", "fields": {"val": 1}, "time": eastern},
]
}
self.assertEqual(
line_protocol.make_lines(data),
'\n'.join([
'A val=1i 0',
'A val=1i 1257894000123456000',
'A val=1i 1257894000123456000',
'A val=1i 1257894000123456000',
'A val=1i 1257890400123456000',
'A val=1i 1257890400123456000',
]) + '\n'
)
def test_string_val_newline(self):
data = {
"points": [
{
"measurement": "m1",
"fields": {
"multi_line": "line1\nline1\nline3"
}
}
]
}
self.assertEqual(
line_protocol.make_lines(data),
'm1 multi_line="line1\\nline1\\nline3"\n'
)
def test_make_lines_unicode(self):
data = {
"tags": {
"unicode_tag": "\'Привет!\'" # Hello! in Russian
},
"points": [
{
"measurement": "test",
"fields": {
"unicode_val": "Привет!", # Hello! in Russian
}
}
]
}
self.assertEqual(
line_protocol.make_lines(data),
'test,unicode_tag=\'Привет!\' unicode_val="Привет!"\n'
)
def test_quote_ident(self):
self.assertEqual(
line_protocol.quote_ident(r"""\foo ' bar " Örf"""),
r'''"\\foo ' bar \" Örf"'''
)
def test_quote_literal(self):
self.assertEqual(
line_protocol.quote_literal(r"""\foo ' bar " Örf"""),
r"""'\\foo \' bar " Örf'"""
)
|
Asimmetric/influxdb-python
|
influxdb/tests/test_line_protocol.py
|
Python
|
mit
| 3,723
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('visas', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='visa',
name='end_date',
field=models.DateField(verbose_name=b'End Date', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='visa',
name='start_date',
field=models.DateField(default=datetime.date(2015, 3, 17), verbose_name=b'Start Date'),
preserve_default=True,
),
]
|
sfu-fas/coursys
|
visas/migrations/0002_auto_20150317_1306.py
|
Python
|
gpl-3.0
| 663
|
# -*- coding: utf-8 -*-
from django.conf import settings
from wsgiref.headers import Headers
from wsgiref.handlers import format_date_time
from time import time
from logging import getLogger
log = getLogger(__name__)
class ExpiresMiddleware (object):
"""WSGI middleware that intercepts calls to the static files
directory, as defined by the STATIC_URL setting, and serves those files.
"""
def __init__(self, application, expire_seconds):
self.application = application
self.expire_seconds = expire_seconds
@property
def debug(self):
return settings.DEBUG
def make_expire_time_for(self, mime):
expire_stamp = time() + self.expire_seconds[mime]
return format_date_time(expire_stamp)
def start_response_with_expiration(self, start_response):
def patched_start_response(status, headers, exc_info=None):
# if self._should_handle(headers)
wsgi_headers = Headers(headers)
# If we're debugging, or the response already has an expires
# header, just skip this.
if not self.debug and 'Expires' not in wsgi_headers:
mime = wsgi_headers.get('Content-Type', '*').split(';')[0]
# If the mime type is explicitly called out, use the expire
# delay specified.
if mime in self.expire_seconds:
expire_time = self.make_expire_time_for(mime)
# If there's a catch-all wildcard delay, use that.
elif '*' in self.expire_seconds:
expire_time = self.make_expire_time_for('*')
# Otherwise, don't set the header.
else:
expire_time = None
if expire_time is not None:
log.debug('Adding expires header value: ' + expire_time)
headers.append(('Expires', expire_time))
return start_response(status, headers, exc_info)
return patched_start_response
def __call__(self, environ, start_response):
return self.application(environ, self.start_response_with_expiration(start_response))
|
codeforsanjose/MobilityMapApi
|
src/project/twinkie.py
|
Python
|
gpl-3.0
| 2,176
|
"""Serialization module.
.. codeauthor:: Tomas Krizek <tomas.krizek1@tul.cz>
"""
import copy
from enum import Enum
class Serializable:
"""Class defines special operations during the serialization process.
It can:
- exclude certain keys from serialization
- delete key from serialized file (same as excluded, but used for deprecated / removed keys)
- set default values for keys if they are not specified in the source data
- allow serialization of nested objects
Serialization of nested objects.
Define nested keys in the composite dictionary. As a value, pass in the class to be instanced.
If the class needs to reference itself, you can define __serializable__ after the class
definition. See testing/gm_base/test_serializable.py for example.
"""
def __init__(self, excluded=None, deleted=None, default=None, composite=None):
self.excluded = excluded if excluded is not None else []
self.deleted = deleted if deleted is not None else []
self.default = default if default is not None else {}
self.composite = composite if composite is not None else {}
@staticmethod
def load(data, cls=None):
"""Create object data structure from native dict."""
if cls is not None:
if hasattr(cls, '__serializable__'):
rules = cls.__serializable__
else:
rules = Serializable()
else:
# nothing to do, no rules defined
return data
if isinstance(data, list):
deserialized = []
for item in data:
deserialized.append(Serializable.load(item, cls))
return deserialized
if data is None:
return cls()
elif not isinstance(data, dict):
return cls(data)
for exclude in (rules.excluded + rules.deleted):
if exclude in data:
del data[exclude]
for key, value in rules.default.items():
if key not in data:
data[key] = value
# __all__: set default composite
composite = {}
if '__all__' in rules.composite:
default_type = rules.composite['__all__']
composite = {key: default_type for key in data}
# override default composite
composite.update(rules.composite)
# recursively resolve composite
for key, class_ in composite.items():
if key in data:
subdata = data[key]
data[key] = Serializable.load(subdata, class_)
# finally, construct the class
return cls(**data)
@staticmethod
def dump(data):
"""Create serializable data structure from provided data."""
if hasattr(data, '__serializable__'):
rules = data.__serializable__
elif hasattr(data, '__dict__'):
rules = Serializable()
elif isinstance(data, list):
serialized = []
for item in data:
serialized.append(Serializable.dump(item))
return serialized
else:
# nothing to do, no rules defined
return data
# different serialization for dict, enum and class
if isinstance(data, dict):
out = dict(copy.copy(data))
elif isinstance(data, Enum):
return data.value
else:
out = copy.copy(data.__dict__)
for exclude in (rules.excluded + rules.deleted):
if exclude in out:
del out[exclude]
# __all__: set default composite
composite = {}
if '__all__' in rules.composite:
default_type = rules.composite['__all__']
composite = {key: default_type for key in out}
# override default composite
composite.update(rules.composite)
# recursively resolve composite
for key, class_ in composite.items():
if key in out:
subdata = out[key]
out[key] = Serializable.dump(subdata)
return out
|
GeoMop/GeoMop
|
src/gm_base/geomop_util/serializable.py
|
Python
|
gpl-3.0
| 4,097
|
"""
SUPPRESS-GO-AHEAD
This supports suppressing or activating Evennia
the GO-AHEAD telnet operation after every server reply.
If the client sends no explicit DONT SUPRESS GO-AHEAD,
Evennia will default to supressing it since many clients
will fail to use it and has no knowledge of this standard.
It is set as the NOGOAHEAD protocol_flag option.
http://www.faqs.org/rfcs/rfc858.html
"""
SUPPRESS_GA = bytes([3]) # b"\x03"
# default taken from telnet specification
# try to get the customized mssp info, if it exists.
class SuppressGA(object):
"""
Implements the SUPRESS-GO-AHEAD protocol. Add this to a variable on the telnet
protocol to set it up.
"""
def __init__(self, protocol):
"""
Initialize suppression of GO-AHEADs.
Args:
protocol (Protocol): The active protocol instance.
"""
self.protocol = protocol
self.protocol.protocol_flags["NOGOAHEAD"] = True
self.protocol.protocol_flags[
"NOPROMPTGOAHEAD"
] = True # Used to send a GA after a prompt line only, set in TTYPE (per client)
# tell the client that we prefer to suppress GA ...
self.protocol.will(SUPPRESS_GA).addCallbacks(self.will_suppress_ga, self.wont_suppress_ga)
def wont_suppress_ga(self, option):
"""
Called when client requests to not suppress GA.
Args:
option (Option): Not used.
"""
self.protocol.protocol_flags["NOGOAHEAD"] = False
self.protocol.handshake_done()
def will_suppress_ga(self, option):
"""
Client will suppress GA
Args:
option (Option): Not used.
"""
self.protocol.protocol_flags["NOGOAHEAD"] = True
self.protocol.handshake_done()
|
jamesbeebop/evennia
|
evennia/server/portal/suppress_ga.py
|
Python
|
bsd-3-clause
| 1,792
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class InputExample:
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
pairID: (Optional) string. Unique identifier for the pair of sentences.
"""
guid: str
text_a: str
text_b: Optional[str] = None
label: Optional[str] = None
pairID: Optional[str] = None
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
pairID: (Optional) Unique identifier for the pair of sentences.
"""
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
label: Optional[Union[int, float]] = None
pairID: Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class HansDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = None,
overwrite_cache=False,
evaluate: bool = False,
):
processor = hans_processors[task]()
cached_features_file = os.path.join(
data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
tokenizer.__class__.__name__,
str(max_seq_length),
task,
),
)
label_list = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}")
self.features = torch.load(cached_features_file)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
examples = (
processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
)
logger.info("Training examples: %s", len(examples))
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(self.features, cached_features_file)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
if is_tf_available():
import tensorflow as tf
class TFHansDataset:
"""
This will be superseded by a framework-agnostic approach
soon.
"""
features: List[InputFeatures]
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
task: str,
max_seq_length: Optional[int] = 128,
overwrite_cache=False,
evaluate: bool = False,
):
processor = hans_processors[task]()
label_list = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
def gen():
for (ex_index, ex) in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
self.dataset = tf.data.Dataset.from_generator(
gen,
(
{
"example_id": tf.int32,
"input_ids": tf.int32,
"attention_mask": tf.int32,
"token_type_ids": tf.int32,
},
tf.int64,
),
(
{
"example_id": tf.TensorShape([]),
"input_ids": tf.TensorShape([None, None]),
"attention_mask": tf.TensorShape([None, None]),
"token_type_ids": tf.TensorShape([None, None]),
},
tf.TensorShape([]),
),
)
def get_dataset(self):
return self.dataset
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
class HansProcessor(DataProcessor):
"""Processor for the HANS data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev")
def get_labels(self):
"""See base class.
Note that we follow the standard three labels for MNLI
(see :class:`~transformers.data.processors.utils.MnliProcessor`)
but the HANS evaluation groups `contradiction` and `neutral` into `non-entailment` (label 0) while
`entailment` is label 1."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[5]
text_b = line[6]
pairID = line[7][2:] if line[7].startswith("ex") else line[7]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))
return examples
def hans_convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_length: int,
tokenizer: PreTrainedTokenizer,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` containing the examples.
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method.
max_length: Maximum example length.
tokenizer: Instance of a tokenizer that will tokenize the examples.
Returns:
A list of task-specific ``InputFeatures`` which can be fed to the model.
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc="convert examples to features"):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index))
inputs = tokenizer(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length,
padding="max_length",
truncation=True,
return_overflowing_tokens=True,
)
label = label_map[example.label] if example.label in label_map else 0
pairID = int(example.pairID)
features.append(InputFeatures(**inputs, label=label, pairID=pairID))
for i, example in enumerate(examples[:5]):
logger.info("*** Example ***")
logger.info(f"guid: {example}")
logger.info(f"features: {features[i]}")
return features
hans_tasks_num_labels = {
"hans": 3,
}
hans_processors = {
"hans": HansProcessor,
}
|
huggingface/transformers
|
examples/research_projects/adversarial/utils_hans.py
|
Python
|
apache-2.0
| 11,767
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# run as:
# python web2py.py -S eden -M -R applications/eden/static/scripts/tools/build.sahana.py
# or
# python web2py.py -S eden -M -R applications/eden/static/scripts/tools/build.sahana.py -A gis
#
#
# Built with code/inspiration from MapFish, OpenLayers & Michael Crute
#
try:
theme = settings.get_theme()
except:
print "ERROR: File now needs to be run in the web2py environment in order to pick up which theme to build"
exit()
import os
import sys
import shutil
SCRIPTPATH = os.path.join(request.folder, "static", "scripts", "tools")
os.chdir(SCRIPTPATH)
sys.path.append("./")
# For JS
import getopt
import jsmin, mergejs
# For CSS
import re
## Untested as libsass failing to run for me:
# For SCSS
#try:
# import sass
#except:
# print "Unable to import libsass: so if your theme includes SCSS sources, these won't be rebuilt"
def mergeCSS(inputFilenames, outputFilename):
output = ""
for inputFilename in inputFilenames:
output += open(inputFilename, "r").read()
open(outputFilename, "w").write(output)
return outputFilename
def cleanline(theLine):
""" Kills line breaks, tabs, and double spaces """
p = re.compile("(\n|\r|\t|\f|\v)+")
m = p.sub("", theLine)
# Kills double spaces
p = re.compile("( )+")
m = p.sub(" ", m)
# Removes last semicolon before }
p = re.compile("(; }|;})+")
m = p.sub("}", m)
# Removes space before {
p = re.compile("({ )+")
m = p.sub("{", m)
# Removes all comments
p = re.compile("/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/")
m = p.sub("", m)
# Strip off the Charset
p = re.compile("@CHARSET .*;")
m = p.sub("", m)
# Strip spaces before the {
p = re.compile(" {")
m = p.sub("{", m)
# Strip space after :
p = re.compile(": ")
m = p.sub(":", m)
# Strip space after ,
p = re.compile(", ")
m = p.sub(",", m)
# Strip space after ;
p = re.compile("; ")
m = p.sub(";", m)
return m
def compressCSS(inputFilename, outputFilename):
theFile = open(inputFilename, "r").read()
output = ""
for line in theFile:
output = output + cleanline(line)
# Once more, clean the entire file string
_output = cleanline(output)
open(outputFilename, "w").write(_output)
return
def dojs(dogis = False, warnings = True):
""" Minifies the JavaScript """
# Do we have local version of the Closure Compiler available?
use_compressor = "jsmin" # Fallback
try:
import closure
use_compressor = "closure"
print "using local Closure Compiler"
except Exception, E:
print "No closure (%s)" % E
print "Download from http://closure-compiler.googlecode.com/files/compiler-latest.zip"
try:
import closure_ws
use_compressor = "closure_ws"
print "Using Closure via Web Service - limited to files < 1Mb!"
except ImportError:
print "No closure_ws"
if use_compressor == "closure":
if not warnings:
closure.extra_params = "--warning_level QUIET"
minimize = closure.minimize
elif use_compressor == "closure_ws":
minimize = closure_ws.minimize
elif use_compressor == "jsmin":
minimize = jsmin.jsmin
sourceDirectory = ".."
configFilename = "sahana.js.cfg"
outputFilename = "S3.min.js"
# Merge JS files
print "Merging Core libraries."
merged = mergejs.run(sourceDirectory, None, configFilename)
# Compress JS files
print "Compressing - JS"
minimized = minimize(merged)
# Add license
print "Adding license file."
minimized = open("license.txt").read() + minimized
# Print to output files
print "Writing to %s." % outputFilename
open(outputFilename, "w").write(minimized)
# Remove old JS files
print "Deleting %s." % outputFilename
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
# Move new JS files
print "Moving new JS files"
shutil.move(outputFilename, "../S3")
# Bootstrap
# print "Compressing Bootstrap"
# sourceDirectoryBootstrap = ".."
# configFilenameBootstrap = "sahana.js.bootstrap.cfg"
# outputFilenameBootstrap = "bootstrap.min.js"
# mergedBootstrap = mergejs.run(sourceDirectoryBootstrap,
# None,
# configFilenameBootstrap)
# minimizedBootstrap = minimize(mergedBootstrap)
# open(outputFilenameBootstrap, "w").write(minimizedBootstrap)
# try:
# os.remove("../%s" % outputFilenameBootstrap)
# except:
# pass
# shutil.move(outputFilenameBootstrap, "..")
# Calendar
print "Compressing calendar"
sourceDirectory = ".."
configFilename = "sahana.js.calendar.cfg"
outputFilename = "s3.ui.calendar.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# dataLists
print "Compressing dataLists"
sourceDirectory = ".."
configFilename = "sahana.js.dataLists.cfg"
outputFilename = "s3.dataLists.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# dataTables
print "Compressing dataTables"
sourceDirectory = ".."
configFilename = "sahana.js.dataTables.cfg"
outputFilename = "s3.dataTables.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
configFilename = "sahana.js.dataTables_multi.cfg"
outputFilename = "s3.dataTables.multi.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# pivotTables
print "Compressing pivotTables"
sourceDirectory = ".."
configFilename = "sahana.js.pivotTables.cfg"
outputFilename = "s3.pivotTables.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# timeplot
print "Compressing timeplot"
sourceDirectory = ".."
configFilename = "sahana.js.timeplot.cfg"
outputFilename = "s3.timeplot.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# groupedItems
print "Compressing groupedItems"
sourceDirectory = ".."
configFilename = "sahana.js.groupeditems.cfg"
outputFilename = "s3.groupeditems.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# ImageCrop
print "Compressing ImageCrop"
sourceDirectory = ".."
configFilename = "sahana.js.imageCrop.cfg"
outputFilename = "s3.imagecrop.widget.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# JSTree
print "Compressing JSTree"
sourceDirectory = ".."
configFilename = "sahana.js.jstree.cfg"
outputFilename = "s3.jstree.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Chat
print "Compressing Chat"
sourceDirectory = ".."
configFilename = "sahana.js.chat.cfg"
outputFilename = "s3.chat.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Guided Tour
print "Compressing Guided Tour"
sourceDirectory = ".."
configFilename = "sahana.js.guidedTour.cfg"
outputFilename = "s3.guidedtour.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Single scripts
for filename in ("add_person",
"cap",
"gis",
"gis.feature_crud",
"gis.fullscreen",
"gis.latlon",
"gis.loader",
"gis.pois",
"locationselector.widget",
"msg",
"popup",
"register_validation",
"select_person",
"timeline",
"ui.contacts",
"ui.embeddedcomponent",
"ui.locationselector",
):
print "Compressing s3.%s.js" % filename
inputFilename = os.path.join("..", "S3", "s3.%s.js" % filename)
outputFilename = "s3.%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Enable when needed
full = False
if full:
for filename in ("spectrum",
"tag-it",
):
print "Compressing %s.js" % filename
in_f = os.path.join("..", filename + ".js")
out_f = os.path.join("..", filename + ".min.js")
with open(in_f, "r") as inp:
with open(out_f, "w") as out:
out.write(minimize(inp.read()))
# Vulnerability
print "Compressing Vulnerability"
sourceDirectory = "../.."
configFilename = "sahana.js.vulnerability.cfg"
outputFilename = "s3.vulnerability.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../../themes/Vulnerability/js/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../../themes/Vulnerability/js")
print "Compressing Vulnerability GIS"
sourceDirectory = "../.."
configFilename = "sahana.js.vulnerability_gis.cfg"
outputFilename = "OpenLayers.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../../themes/Vulnerability/js/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../../themes/Vulnerability/js")
if dogis:
sourceDirectoryOpenLayers = "../gis/openlayers/lib"
sourceDirectoryMGRS = "../gis"
sourceDirectoryGeoExt = "../gis/GeoExt/lib"
sourceDirectoryGxp = "../gis/gxp"
configFilenameOpenLayers = "sahana.js.ol.cfg"
configFilenameMGRS = "sahana.js.mgrs.cfg"
configFilenameGeoExt = "sahana.js.geoext.cfg"
configFilenameGxpMin = "sahana.js.gxp.cfg"
configFilenameGxp2 = "sahana.js.gxp2.cfg"
configFilenameGxpFull = "sahana.js.gxpfull.cfg"
outputFilenameOpenLayers = "OpenLayers.js"
outputFilenameMGRS = "MGRS.min.js"
outputFilenameGeoExt = "GeoExt.js"
outputFilenameGxp = "gxp.js"
outputFilenameGxp2 = "gxp_upload.js"
# Merge GIS JS Files
print "Merging OpenLayers libraries."
mergedOpenLayers = mergejs.run(sourceDirectoryOpenLayers,
None,
configFilenameOpenLayers)
print "Merging MGRS libraries."
mergedMGRS = mergejs.run(sourceDirectoryMGRS,
None,
configFilenameMGRS)
print "Merging GeoExt libraries."
mergedGeoExt = mergejs.run(sourceDirectoryGeoExt,
None,
configFilenameGeoExt)
print "Merging gxp libraries."
mergedGxpMin = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxpMin)
mergedGxp2 = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxp2)
mergedGxpFull = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxpFull)
# Compress JS files
print "Compressing - OpenLayers JS"
if use_compressor == "closure_ws":
# Limited to files < 1Mb!
minimizedOpenLayers = jsmin.jsmin(mergedOpenLayers)
#minimizedOpenLayers = jsmin.jsmin("%s\n%s" % (mergedOpenLayers,
# mergedOpenLayersExten))
else:
minimizedOpenLayers = minimize(mergedOpenLayers)
#minimizedOpenLayers = minimize("%s\n%s" % (mergedOpenLayers,
# mergedOpenLayersExten))
# OpenLayers extensions
for filename in ["OWM.OpenLayers",
]:
inputFilename = os.path.join("..", "gis", "%s.js" % filename)
outputFilename = "%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../gis/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../gis")
print "Compressing - MGRS JS"
minimizedMGRS = minimize(mergedMGRS)
print "Compressing - GeoExt JS"
minimizedGeoExt = minimize("%s\n%s" % (mergedGeoExt,
#mergedGeoExtux,
mergedGxpMin))
# GeoNamesSearchCombo
inputFilename = os.path.join("..", "gis", "GeoExt", "ux", "GeoNamesSearchCombo.js")
outputFilename = "GeoNamesSearchCombo.min.js"
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../gis/GeoExt/ux/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../gis/GeoExt/ux")
print "Compressing - gxp JS"
minimizedGxp = minimize(mergedGxpFull)
minimizedGxp2 = minimize(mergedGxp2)
for filename in ("WMSGetFeatureInfo",
):
inputFilename = os.path.join("..", "gis", "gxp", "plugins", "%s.js" % filename)
outputFilename = "%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../gis/gxp/plugins/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../gis/gxp/plugins")
for filename in ("GoogleEarthPanel",
"GoogleStreetViewPanel",
):
inputFilename = os.path.join("..", "gis", "gxp", "widgets", "%s.js" % filename)
outputFilename = "%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../gis/gxp/widgets/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../gis/gxp/widgets")
# Add license
#minimizedGIS = open("license.gis.txt").read() + minimizedGIS
# Print to output files
print "Writing to %s." % outputFilenameOpenLayers
open(outputFilenameOpenLayers, "w").write(minimizedOpenLayers)
print "Writing to %s." % outputFilenameMGRS
open(outputFilenameMGRS, "w").write(minimizedMGRS)
print "Writing to %s." % outputFilenameGeoExt
open(outputFilenameGeoExt, "w").write(minimizedGeoExt)
print "Writing to %s." % outputFilenameGxp
open(outputFilenameGxp, "w").write(minimizedGxp)
print "Writing to %s." % outputFilenameGxp2
open(outputFilenameGxp2, "w").write(minimizedGxp2)
# Move new JS files
print "Deleting %s." % outputFilenameOpenLayers
try:
os.remove("../gis/%s" % outputFilenameOpenLayers)
except:
pass
print "Moving new OpenLayers JS files"
shutil.move(outputFilenameOpenLayers, "../gis")
print "Deleting %s." % outputFilenameMGRS
try:
os.remove("../gis/%s" % outputFilenameMGRS)
except:
pass
print "Moving new MGRS JS files"
shutil.move(outputFilenameMGRS, "../gis")
print "Deleting %s." % outputFilenameGeoExt
try:
os.remove("../gis/%s" % outputFilenameGeoExt)
except:
pass
print "Moving new GeoExt JS files"
shutil.move(outputFilenameGeoExt, "../gis")
print "Deleting %s." % outputFilenameGxp
try:
os.remove("../gis/%s" % outputFilenameGxp)
except:
pass
print "Moving new gxp JS files"
shutil.move(outputFilenameGxp, "../gis")
print "Deleting %s." % outputFilenameGxp2
try:
os.remove("../gis/%s" % outputFilenameGxp2)
except:
pass
print "Moving new gxp2 JS files"
shutil.move(outputFilenameGxp2, "../gis")
def docss():
""" Compresses the CSS files """
# Theme
theme = settings.get_theme()
location = settings.get_template_location()
print "Using theme %s" % theme
css_cfg = os.path.join("..", "..", "..", location, "templates", theme, "css.cfg")
f = open(css_cfg, "r")
files = f.readlines()
f.close()
listCSS = []
for file in files[:-1]:
if file[0] != "#":
# Real line, not a comment
if file[:5] == "SCSS ":
# Compile the SCSS first
file = file[5:]
filename = file.split("/")[-1].split(".")[0]
sourcePath = os.path.join("..", "..", "..", location, "templates", theme, "scss")
sourceFilename = os.path.join(sourcePath, "%s.scss" % filename)
sourceFile = open(sourceFilename, "r")
source = sourceFile.read()
sourceFile.close()
os.chdir(sourcePath)
outputText = sass.compile(source)
os.chdir(SCRIPTPATH)
outputFile = open(file, "w")
outputFile.write(outputText)
outputFile.close()
p = re.compile("(\n|\r|\t|\f|\v)+")
file = p.sub("", file)
listCSS.append("../../styles/%s" % file)
outputFilenameCSS = "eden.min.css"
# Merge CSS files
print "Merging Core styles."
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
# Compress CSS files
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../themes/%s/%s" % (theme, outputFilenameCSS))
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../themes/%s" % theme)
# Enable when needed
full = False
if full:
for filename in ("joyride",
"jstree",
"spectrum",
):
print "Merging %s styles." % filename
listCSS = ("../../styles/plugins/%s.css" % filename,)
outputFilenameCSS = "%s.min.css" % filename
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../styles/plugins/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../styles/plugins")
# Bootstrap
print "Bootstrap CSS"
listCSS = []
for file in ["bootstrap.css",
"bootstrap-responsive.css",
"font-awesome.css",
#"bootstrap-multiselect.css",
]:
listCSS.append("../../styles/bootstrap/%s" % file)
outputFilenameCSS = "bootstrap-combined.min.css"
# Merge CSS files
print "Merging Bootstrap styles."
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
# Compress CSS files
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../styles/bootstrap/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../styles/bootstrap")
# Ext
print "Ext Gray CSS"
listCSS = []
for file in ["ext-all-notheme.css",
"xtheme-gray.css",
]:
listCSS.append("../ext/resources/css/%s" % file)
outputFilenameCSS = "ext-gray.min.css"
# Merge CSS files
print "Merging Ext styles."
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
# Compress CSS file
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../ext/resources/css/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../ext/resources/css")
print "Ext no-Theme CSS"
outputFilenameCSS = "ext-notheme.min.css"
# Compress CSS file
print "Writing to %s." % outputFilenameCSS
compressCSS("../ext/resources/css/ext-all-notheme.css", outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../ext/resources/css/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../ext/resources/css")
print "Ext Themes CSS"
outputFilenameCSS = "xtheme-ifrc.min.css"
# Compress CSS file
print "Writing to %s." % outputFilenameCSS
compressCSS("../../themes/IFRC/xtheme-ifrc.css", outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../themes/IFRC/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../themes/IFRC")
def main(argv):
if len(argv) > 0:
parameter1 = argv[0]
else:
parameter1 = "ALL"
if len(argv) > 1:
if(argv[1] == "DOGIS"):
parameter2 = True
else:
parameter2 = False
else:
parameter2 = True
closure_warnings = True
if "NOWARN" in argv:
closure_warnings = False
if parameter1 in ("ALL", "NOWARN"):
dojs(warnings=closure_warnings)
docss()
else:
if parameter1 in ("CSS", "css"):
docss()
else:
dojs(parameter2, warnings=closure_warnings)
docss()
print "Done."
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
schlos/eden
|
static/scripts/tools/build.sahana.py
|
Python
|
mit
| 26,289
|
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorators for Chromium port of Rietveld."""
import mimetypes
import sha
from google.appengine.api import memcache
from django.http import HttpResponseForbidden
from . import decorators as deco
from . import models_chromium
from . import responses
def binary_required(func):
"""Decorator that processes the content argument.
Attributes set on the request:
content: a Content entity.
"""
@deco.patch_required
def binary_wrapper(request, content_type, *args, **kwds):
if content_type == "0":
content_key = request.patch.content_key
elif content_type == "1":
content_key = request.patch.patched_content_key
if not content_key or not content_key.get().data:
# The file was not modified. It was likely moved without modification.
# Return the original file.
content_key = request.patch.content_key
else:
# Other values are erroneous so request.content won't be set.
return responses.HttpTextResponse(
'Invalid content type: %s, expected 0 or 1' % content_type,
status=404)
request.mime_type = mimetypes.guess_type(request.patch.filename)[0]
request.content = content_key.get()
return func(request, *args, **kwds)
return binary_wrapper
def key_required(func):
"""Decorator that insists that you are using a specific key."""
@deco.require_methods('POST')
def key_wrapper(request, *args, **kwds):
key = request.POST.get('password')
if request.user or not key:
return HttpResponseForbidden('You must be admin in for this function')
value = memcache.get('key_required')
if not value:
obj = models_chromium.Key.query().get()
if not obj:
# Create a dummy value so it can be edited from the datastore admin.
obj = models_chromium.Key(hash='invalid hash')
obj.put()
value = obj.hash
memcache.add('key_required', value, 60)
if sha.new(key).hexdigest() != value:
return HttpResponseForbidden('You must be admin in for this function')
return func(request, *args, **kwds)
return key_wrapper
|
nicko96/Chrome-Infra
|
appengine/chromium_rietveld/codereview/decorators_chromium.py
|
Python
|
bsd-3-clause
| 2,669
|
"""
Implementation of cursor for iterating over results.
Backed by pymongo cursor.
"""
from sacredboard.app.data.datastorage import Cursor
class MongoDbCursor(Cursor):
"""Implements Cursor for mongodb."""
def __init__(self, mongodb_cursor):
"""Initialize a MongoDB cursor."""
self.mongodb_cursor = mongodb_cursor
def count(self):
"""Return the number of items in this cursor."""
return self.mongodb_cursor.count()
def __iter__(self):
"""Iterate over runs."""
return self.mongodb_cursor
|
chovanecm/sacredboard
|
sacredboard/app/data/pymongo/mongocursor.py
|
Python
|
mit
| 556
|
#!/usr/bin/env python
#encoding: utf-8
import numpy as np
from pylab import *
dt=0.01 # msec
tau=40.0 # msec
tmax=1000 # msec
V_spk=-20
V_thres=-50.0
V_reset=-70.0
E_leak=V_reset
R_m=10.0 # MΩ
tt=np.arange(0, tmax, dt) #0:dt:tmax
Nt=len(tt) #length(tt)
V=np.zeros((Nt,))
V2=np.zeros((Nt,))
S=np.zeros((Nt,))
S2=np.zeros((Nt,))
#I0=np.zeros((Nt,))
# Plot characteristics
Vlim=E_leak-10,V_spk+10
# tlim=0,1000 #msec
tlim=200,800 #msec
nrows=4
LW=2
colors=[]
cmap = cm.hsv
# Solved Dayan & Abbott (2001) Ch.5 Eq. 5.12 for I_e using r_isi = 7 Hz:
theta_freq = 7
def I_e(f):
tau_isi = 1000.0/f
return -(1/R_m) * (E_leak + (V_reset - V_thres*exp(tau_isi/tau))/(exp(tau_isi/tau) - 1))
I_const=I_e(theta_freq) # 2.0578580 # 2.1 # constant current
print 'I_const = %.4f nA'%I_const
Dt=25 # msec: STDP half window
n=int(Dt/dt)
hPlus=1.0*I_const # max height
hMinus=2.0*hPlus
dI=np.r_[np.linspace(0,hPlus,n),0,np.linspace(-hMinus,0,n)]
## first simulation
V[0]=V_reset
for i in xrange(1, Nt): #=2:Nt
V[i]=((tau-dt)/tau)*V[i-1]+(dt/tau)*(E_leak+R_m*I_const)
if V[i]>=V_thres:
V[i]=V_reset
S[i]=1
k=np.nonzero(S>0)[0]
Nspk=len(k)
ioff()
figure(1, figsize=(10.0, 14.7625))
clf()
subplot(nrows,1,1)
plot(tt,V,'k-',lw=LW)
# hold(True)
# plot([[k*dt,k*dt]*Nspk,[V_reset,V_spk],'b-',lw=LW)
title('control')
xlim(tlim)
ylim(Vlim)
## second simulation
T=(k[2]-k[1])*dt # period
Nsuper=5 # number of super-cycle for testing different timing
timeList=np.linspace((-T/2), T/2,Nsuper)
phaseList=np.zeros((Nsuper,))
plot_spikes =True
for i_super in xrange(Nsuper): #=1:Nsuper
k0=k[2]+int(timeList[i_super]/dt)
I=np.zeros((Nt,))
I[k0-n:k0+n+1]=dI
V2[0]=V_reset
S2=np.zeros((Nt,))
for i in xrange(1, Nt): #=2:Nt
V2[i]=((tau-dt)/tau)*V2[i-1]+(dt/tau)*(E_leak+R_m*(I_const+I[i]))
if V2[i]>=V_thres:
V2[i]=V_reset
S2[i]=1
k2=np.nonzero(S2>0)[0]
Nspk2=len(k2)
subplot(nrows,1,2)
color = cmap(i_super/float(Nsuper))
colors.append(color)
plot(tt,V2,'-',zorder=-Nsuper+i_super,lw=LW,c=color)
if plot_spikes:
hold(True)
plot([k2*dt]*2, [V_reset,V_spk], '-',zorder=-Nsuper+i_super,c=color,lw=LW)
title('Adding input')
subplot(nrows,1,3)
plot(tt,I,c=color,lw=LW,zorder=-Nsuper+i_super)
draw()
# Wrap new phase around half-cycles
newphase=(k2[4]-k[4])*2*dt/T
if newphase<-1:
newphase+=2
elif newphase >=1:
newphase-=2
phaseList[i_super]=newphase
subplot(nrows,1,2)
plot([k*dt]*2, [V_reset,V_spk], 'k-',lw=LW,zorder=-50)
xlim(tlim)
ylim(Vlim)
ylabel('V')
subplot(nrows,1,3)
xlim(tlim)
ylim(-25, 25)
ylabel(r'$I_e$ (pA)')
# plot(timeList/T, phaseList,'o-')
# xlabel('Pulse timing (Period)')
# ylabel('Phase reset (degree)')
# grid(True)
subplot(nrows,2,7)
X=2*timeList/T
Y=phaseList+0.0
# Unwrap phases
jump_ix = np.argmax(np.abs(np.diff(Y)))+1
X = r_[X[jump_ix:]-2, X[:jump_ix]]
Y = r_[Y[jump_ix:], Y[:jump_ix]]
colors = colors[jump_ix:] + colors[:jump_ix]
midX = X[int(Nsuper/2)+1]
for i_super in xrange(Nsuper):
plot(X[i_super],Y[i_super],'o',mec='k',
mfc=colors[i_super],ms=6,mew=1,zorder=i_super)
print X[i_super],Y[i_super]
# p=np.polyfit(x,y,1)
# yp=np.polyval(p,x)
# plot(x,yp,'r-',zorder=0)
# plot(X,Y,'b-',lw=1,zorder=0)
ylabel(r'Phase Reset ($\pi$)')
ax = gca()
ax.set_xticks(linspace(-1, 1, 5))
ax.set_yticks(linspace(-1, 1, 5))
axis('equal')
axis('image')
xlim(midX-1.2, midX+1.2)
ylim(-1.2, 1.2)
ion()
show()
|
jdmonaco/vmo-feedback-model
|
src/spike_reset.py
|
Python
|
mit
| 3,535
|
from tensorflow.keras import Input
from tensorflow.keras import Model
from tensorflow.keras.layers import Layer, Activation, BatchNormalization, Convolution2D, Dense, Flatten, MaxPooling2D, AveragePooling2D, \
add
from tensorflow.keras.models import Sequential
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import plot_model
from models.TrainingConfiguration import TrainingConfiguration
class ResNet3SmallWithLocalization(TrainingConfiguration):
""" A network with residual modules """
def __init__(self, optimizer: str, width: int, height: int, training_minibatch_size: int, number_of_classes: int):
super().__init__(optimizer=optimizer, data_shape=(height, width, 3),
training_minibatch_size=training_minibatch_size, number_of_classes=number_of_classes)
def classifier(self) -> Sequential:
""" Returns the model of this configuration """
input = Input(shape=self.data_shape)
layer = self.add_convolution(input, 16, 3)
layer = self.add_res_net_block(layer, 16, 3, False)
layer = MaxPooling2D()(layer)
layer = self.add_res_net_block(layer, 32, 3, True)
layer = self.add_res_net_block(layer, 32, 3, False)
layer = MaxPooling2D()(layer)
layer = self.add_res_net_block(layer, 64, 3, True)
layer = self.add_res_net_block(layer, 64, 3, False)
layer = self.add_res_net_block(layer, 64, 3, False)
layer = MaxPooling2D()(layer)
layer = self.add_res_net_block(layer, 128, 3, True)
layer = self.add_res_net_block(layer, 128, 3, False)
layer = self.add_res_net_block(layer, 128, 3, False)
layer = MaxPooling2D()(layer)
layer = self.add_res_net_block(layer, 256, 3, True)
layer = self.add_res_net_block(layer, 256, 3, False)
layer = self.add_res_net_block(layer, 256, 3, False)
layer = AveragePooling2D()(layer)
feature_vector = Flatten()(layer)
number_of_ouput_classes = self.number_of_classes
classification_head = Dense(units=number_of_ouput_classes, kernel_regularizer=l2(self.weight_decay),
activation='softmax', name='output_class')(feature_vector)
number_of_output_variables = 4 # Four values of the bounding-box: origin-x, origin-y, width and height
regression_head = Dense(units=number_of_output_variables, kernel_regularizer=l2(self.weight_decay),
activation='linear', name='output_bounding_box')(feature_vector)
model = Model(inputs=[input], outputs=[classification_head, regression_head])
model.compile(self.get_optimizer(),
loss={'output_class': 'categorical_crossentropy', 'output_bounding_box': 'mse'},
loss_weights={'output_class': 0.998, 'output_bounding_box': 0.002},
metrics=["accuracy"])
return model
def add_convolution(self, previous_layer: Layer, filters: int, kernel_size: int):
layer = Convolution2D(filters, kernel_size, padding='same', kernel_regularizer=l2(self.weight_decay))(
previous_layer)
layer = BatchNormalization()(layer)
layer = Activation('relu')(layer)
return layer
def add_res_net_block(self, previous_layer: Layer, filters, kernel_size, shortcut_is_conv) -> Layer:
layer = Convolution2D(filters, kernel_size, padding='same', kernel_regularizer=l2(self.weight_decay))(
previous_layer)
layer = BatchNormalization()(layer)
layer = Activation('relu')(layer)
layer = Convolution2D(filters, kernel_size, padding='same', kernel_regularizer=l2(self.weight_decay))(layer)
layer = BatchNormalization()(layer)
shortcut = previous_layer
if shortcut_is_conv:
shortcut = Convolution2D(filters, kernel_size, padding='same', kernel_regularizer=l2(self.weight_decay))(
previous_layer)
merge = add([layer, shortcut])
layer = Activation('relu')(merge)
return layer
def name(self) -> str:
""" Returns the name of this configuration """
return "res_net_3_small_with_localization"
def performs_localization(self) -> bool:
return True
if __name__ == "__main__":
configuration = ResNet3SmallWithLocalization("Adadelta", 112, 112, 16, 32)
classifier = configuration.classifier()
classifier.summary()
plot_model(classifier, to_file="res_net_3.png")
print(configuration.summary())
|
apacha/MusicSymbolClassifier
|
ModelTrainer/models/ResNet3SmallWithLocalization.py
|
Python
|
mit
| 4,567
|
import unittest
from datetime import datetime, timezone, timedelta
from crontab import CronTab
from unittest.mock import Mock
from ..scheduler import Scheduler
class TestScheduler(unittest.TestCase):
def setUp(self):
c = CronTab(user=True)
c.remove_all()
c.write()
def tearDown(self):
c = CronTab(user=True)
c.remove_all()
c.write()
def test_scheduleAddedCorrectly(self):
expectedSchedule = '0 7 1 1 * /bin/sh /replylater/src/core/runmessage.sh --id=1 --data=sqllite # 1'
tz = timezone(timedelta(hours=5, minutes=30))
d = datetime(year=2022, month=1, day=1, hour=12, minute=30, tzinfo=tz)
Scheduler.scheduleReply(1, d)
c = CronTab(user=True)
iter = c.find_comment('1')
jobs = [i for i in iter]
self.assertEqual(len(jobs), 1)
self.assertEqual(str(jobs[0]), expectedSchedule)
def test_scheduleUpdatedCorrectly(self):
expectedSchedule = '0 7 1 1 * /bin/sh /replylater/src/core/runmessage.sh --id=1 --data=sqllite # 1'
tz = timezone(timedelta(hours=5, minutes=30))
d = datetime(year=2022, month=1, day=1, hour=12, minute=30, tzinfo=tz)
Scheduler.scheduleReply(1, d)
c = CronTab(user=True)
iter = c.find_comment('1')
jobs = [i for i in iter]
self.assertEqual(len(jobs), 1)
self.assertEqual(str(jobs[0]), expectedSchedule)
d = datetime(year=2022, month=2, day=1, hour=12, minute=30, tzinfo=tz)
expectedSchedule = '0 7 1 2 * /bin/sh /replylater/src/core/runmessage.sh --id=1 --data=sqllite # 1'
Scheduler.updateReply(1, d)
c = CronTab(user=True)
iter = c.find_comment('1')
jobs = [i for i in iter]
self.assertEqual(len(jobs), 1)
self.assertEqual(str(jobs[0]), expectedSchedule)
tz = timezone(timedelta(hours=4, minutes=0))
d = datetime(year=2022, month=2, day=1, hour=12, minute=30, tzinfo=tz)
expectedSchedule = '30 8 1 2 * /bin/sh /replylater/src/core/runmessage.sh --id=1 --data=sqllite # 1'
Scheduler.updateReply(1, d)
c = CronTab(user=True)
iter = c.find_comment('1')
jobs = [i for i in iter]
self.assertEqual(len(jobs), 1)
self.assertEqual(str(jobs[0]), expectedSchedule)
def test_scheduleRemovedCorrectly(self):
expectedSchedule = '0 7 1 1 * /bin/sh /replylater/src/core/runmessage.sh --id=1 --data=sqllite # 1'
tz = timezone(timedelta(hours=5, minutes=30))
d = datetime(year=2022, month=1, day=1, hour=12, minute=30, tzinfo=tz)
Scheduler.scheduleReply(1, d)
c = CronTab(user=True)
iter = c.find_comment('1')
jobs = [i for i in iter]
self.assertEqual(len(jobs), 1)
self.assertEqual(str(jobs[0]), expectedSchedule)
Scheduler.removeReply(1)
c = CronTab(user=True)
iter = c.find_comment('1')
jobs = [i for i in iter]
self.assertEqual(len(jobs), 0)
if __name__ == "__main__":
unittest.main()
|
kiriappeee/reply-later
|
src/core/tests/TestScheduler.py
|
Python
|
mit
| 3,070
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UploadedFile'
db.create_table('lizard_progress_uploadedfile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_progress.Project'])),
('contractor', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_progress.Contractor'])),
('uploaded_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('uploaded_at', self.gf('django.db.models.fields.DateTimeField')()),
('path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ready', self.gf('django.db.models.fields.BooleanField')(default=False)),
('success', self.gf('django.db.models.fields.BooleanField')(default=False)),
('linelike', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('lizard_progress', ['UploadedFile'])
# Adding model 'UploadedFileError'
db.create_table('lizard_progress_uploadedfileerror', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('uploaded_file', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_progress.UploadedFile'])),
('line', self.gf('django.db.models.fields.IntegerField')(default=0)),
('error_code', self.gf('django.db.models.fields.CharField')(max_length=10)),
('error_message', self.gf('django.db.models.fields.CharField')(max_length=300)),
))
db.send_create_signal('lizard_progress', ['UploadedFileError'])
# Changing field 'Location.information'
db.alter_column('lizard_progress_location', 'information', self.gf('jsonfield.fields.JSONField')(null=True))
# Changing field 'Measurement.data'
db.alter_column('lizard_progress_measurement', 'data', self.gf('jsonfield.fields.JSONField')(null=True))
def backwards(self, orm):
# Deleting model 'UploadedFile'
db.delete_table('lizard_progress_uploadedfile')
# Deleting model 'UploadedFileError'
db.delete_table('lizard_progress_uploadedfileerror')
# Changing field 'Location.information'
db.alter_column('lizard_progress_location', 'information', self.gf('jsonfield.JSONField')(null=True))
# Changing field 'Measurement.data'
db.alter_column('lizard_progress_measurement', 'data', self.gf('jsonfield.JSONField')(null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_progress.area': {
'Meta': {'object_name': 'Area'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Project']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'lizard_progress.availablemeasurementtype': {
'Meta': {'object_name': 'AvailableMeasurementType'},
'can_be_displayed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_icon_complete': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'default_icon_missing': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'needs_predefined_locations': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'needs_scheduled_measurements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'lizard_progress.contractor': {
'Meta': {'unique_together': "(('project', 'slug'),)", 'object_name': 'Contractor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'lizard_progress.hydrovak': {
'Meta': {'unique_together': "(('project', 'br_ident'),)", 'object_name': 'Hydrovak'},
'br_ident': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Project']"}),
'the_geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '28992'})
},
'lizard_progress.location': {
'Meta': {'unique_together': "(('location_code', 'project'),)", 'object_name': 'Location'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Area']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'information': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'location_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Project']"}),
'the_geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '28992', 'null': 'True'})
},
'lizard_progress.measurement': {
'Meta': {'object_name': 'Measurement'},
'data': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scheduled': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.ScheduledMeasurement']"}),
'the_geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '28992', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'lizard_progress.measurementtype': {
'Meta': {'unique_together': "(('project', 'mtype'),)", 'object_name': 'MeasurementType'},
'icon_complete': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'icon_missing': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mtype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.AvailableMeasurementType']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Project']"})
},
'lizard_progress.project': {
'Meta': {'object_name': 'Project'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'superuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'lizard_progress.scheduledmeasurement': {
'Meta': {'unique_together': "(('project', 'contractor', 'measurement_type', 'location'),)", 'object_name': 'ScheduledMeasurement'},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contractor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Contractor']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Location']"}),
'measurement_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.MeasurementType']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Project']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'lizard_progress.uploadedfile': {
'Meta': {'object_name': 'UploadedFile'},
'contractor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Contractor']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'linelike': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.Project']"}),
'ready': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'lizard_progress.uploadedfileerror': {
'Meta': {'object_name': 'UploadedFileError'},
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'error_message': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'uploaded_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_progress.UploadedFile']"})
}
}
complete_apps = ['lizard_progress']
|
pombredanne/lizard-progress
|
lizard_progress/migrations/0003_auto__add_uploadedfile__add_uploadedfileerror__chg_field_location_info.py
|
Python
|
gpl-3.0
| 14,099
|
#!/usr/bin/env python
"""This script generates release notes for each merged pull request from
git merge-commit messages.
Usage:
`python release.py <start_commit> <end_commit> [--output {file,stdout}]`
For example, if you wanted to find the diff between version 1.0 and 1.2,
and write the output to the release notes file, you would type the
following:
`python release.py 1.0 1.2 -f CHANGELOG.md`
Source:
http://mattdeboard.net/2014/01/14/automatic-changelog-generation-with-git/
"""
import os.path as op
import re
import subprocess
from collections import deque
PROJECT_URI = "https://github.com/brady-vitrano/django-starter-project/pull"
def commit_msgs(start_commit, end_commit):
"""Run the git command that outputs the merge commits (both subject
and body) to stdout, and return the output.
"""
fmt_string = ("'%s%n* [#{pr_num}]"
"(" + PROJECT_URI + "/{pr_num}) - %b'")
return subprocess.check_output([
"git",
"log",
"--pretty=format:%s" % fmt_string,
"--merges", "%s..%s" % (start_commit, end_commit)])
def release_note_lines(msgs):
"""Parse the lines from git output and format the strings using the
pull request number.
"""
ptn = r"Merge pull request #(\d+).*\n([^\n]*)'$"
pairs = re.findall(ptn, msgs, re.MULTILINE)
return deque(body.format(pr_num=pr_num) for pr_num, body in pairs)
def release_header_line(version, release_date=None):
release_date = release_date or datetime.date.today().strftime('%Y/%m/%d')
return "## %s - %s" % (version, release_date)
def prepend(filename, lines, release_header=False):
"""Write `lines` (i.e. release notes) to file `filename`."""
if op.exists(filename):
with open(filename, 'r+') as f:
first_line = f.read()
f.seek(0, 0)
f.write('\n\n'.join([lines, first_line]))
else:
with open(filename, 'w') as f:
f.write(lines)
f.write('\n')
if __name__ == "__main__":
import argparse
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('start_commit', metavar='START_COMMIT_OR_TAG')
parser.add_argument('end_commit', metavar='END_COMMIT_OR_TAG')
parser.add_argument('--filepath', '-f',
help="Absolute path to output file.")
parser.add_argument('--tag', '-t', metavar='NEW_TAG')
parser.add_argument(
'--date', '-d', metavar='RELEASE_DATE',
help="Date of release for listed patch notes. Use yyyy/mm/dd format.")
args = parser.parse_args()
start, end = args.start_commit, args.end_commit
lines = release_note_lines(commit_msgs(start, end))
if args.tag:
lines.appendleft(release_header_line(args.tag, args.date))
lines = '\n'.join(lines)
if args.filepath:
filename = op.abspath(args.filepath)
prepend(filename, lines)
else:
print lines
|
brady-vitrano/full-stack-django-kit
|
release.py
|
Python
|
mit
| 2,926
|
import importlib
from random import randint
from PIL import Image, ImageOps
from datetime import datetime
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
from utils.utils import Master
from django.utils.translation import ugettext_lazy as _
from sorl.thumbnail import get_thumbnail
try:
from rol import settings_name
except ImportError:
settings_name = "settings"
settings_var = "imagemap."+settings_name
settings = importlib.import_module(settings_var)
GENDER_CHOICES = (
(0, _("masculino")),
(1, _("femenino")),
)
class DocumentType(Master):
name = models.CharField(max_length=90, verbose_name=_('name'))
abbr = models.CharField(max_length=10, verbose_name=_('abbr'))
def __unicode__(self):
return self.abbr
class Meta:
verbose_name = _('document_type')
verbose_name_plural = _('document_types')
# function to return the correct UPLOAD_TO variable for the image field.
# All the images are storage in the folder with the name of the profile
def avatar_image_name(instance, filename):
filename = filename.split('.')
filename = str(instance.pk)+datetime.now().strftime("-%Y-%m-%d-%H-%M-%S")+str('.')+str(filename[-1])
return '/'.join(['img', 'avatars', slugify(instance.user.username), filename])
class UserProfile(Master):
user = models.OneToOneField(User, related_name='profile', blank=True, null=True)
avatar = models.ImageField(blank=True, null=True, upload_to=avatar_image_name, verbose_name=_("avatar"))
about_me = models.TextField(max_length=220, blank=True, null=True, verbose_name=_("about_me"))
document_id = models.IntegerField(null=True, blank=True, verbose_name=_("document_id"))
document_type = models.ForeignKey(DocumentType, null=True, blank=True, verbose_name=_("document_type"))
gender = models.SmallIntegerField(null=True, blank=True, choices=GENDER_CHOICES,
verbose_name=_("gender"))
telephone = models.IntegerField(null=True, blank=True, verbose_name=_("telephone"))
cellphone = models.BigIntegerField(null=True, blank=True, verbose_name=_("cellphone"))
address = models.TextField(null=True, blank=True, verbose_name=_("address"))
birth_date = models.DateField(null=True, blank=True, verbose_name=_("birth_date"))
class Meta:
verbose_name = _('user_profile')
verbose_name_plural = _('user_profiles')
def show_thumb(self, x, y):
im = get_thumbnail(self.avatar, '%sx%s' % (x, y), crop='center', quality=99, format='JPEG')
return im.url
@property
def hexagon_avatar(self):
if self.avatar:
return self.show_thumb(150, 150)
return settings.STATIC_URL+"ghosttown/img/fantasma-usuario-46.svg"
@property
def get_full_name(self):
return self.user.get_full_name()
@property
def get_short_name(self):
return self.user.get_short_name()
@property
def email(self):
return self.user.email
@property
def first_name(self):
return self.user.first_name
@property
def last_name(self):
return self.user.last_name
def __unicode__(self):
return self.user.get_full_name()
@property
def gender_unicode(self):
if self.gender is not None:
return dict(GENDER_CHOICES)[self.gender].decode()
|
beren5000/ghosttown
|
imagemap/applications/user_profiles/models.py
|
Python
|
mit
| 3,409
|
"""
Tests for L{xmantissa.test.rendertools}.
"""
from twisted.trial.unittest import TestCase
from nevow.athena import LiveFragment, LiveElement
from nevow.loaders import stan
from nevow.tags import p, directive
from xmantissa.test.rendertools import renderLiveFragment
class LivePageRendererTestCase(TestCase):
"""
Test utility function L{render} to make sure it can render various kinds of
fragments.
"""
message = 'Hello, world.'
def docFactory(self, renderer, message):
return stan(p(render=directive(renderer))[message])
def testRenderLiveFragment(self):
"""
Test that L{render} spits out the right thing for a L{LiveFragment}.
"""
docFactory = self.docFactory('liveFragment', self.message)
self.assertIn(
self.message,
renderLiveFragment(LiveFragment(docFactory=docFactory)))
def testRenderLiveElement(self):
"""
Test that L{render} spits out the right thing for a L{LiveElement}.
"""
docFactory = self.docFactory('liveElement', self.message)
self.assertIn(
self.message,
renderLiveFragment(LiveElement(docFactory=docFactory)))
|
twisted/mantissa
|
xmantissa/test/test_rendertools.py
|
Python
|
mit
| 1,212
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields
class adhoc_base_configuration(models.TransientModel):
_inherit = 'adhoc.base.config.settings'
# Fixes
module_purchase_multic_fix = fields.Boolean(
'FiX purchase in multi-company father/son environment',
help="""Installs the purchase_multic_fix module.""")
# Purchase modules
module_purchase_double_validation_imp = fields.Boolean(
'Adds a button for confirmed orders so that you can print the purchase order.',
help="""Installs the purchase_double_validation_imp module.""")
module_purchase_usability_extension = fields.Boolean(
'Display Invoices and Incoming Shipments on Purchase Order form view (in dedicated tabs).',
help="""Installs the purchase_usability_extension module.""")
module_purchase_discount = fields.Boolean(
'Mange disccounts on purchases',
help="""Installs the purchase_discount module.""")
module_account_analytic_purchase_contract = fields.Boolean(
'Manage contracts on Purchase.',
help="""Installs the account_analytic_purchase_contract module.""")
module_purchase_uom_prices_uoms = fields.Boolean(
'Restrict purchase uom to the product uom, purchase product uom and uoms defined in UOM Prices.',
help="""Installs the purchase_uom_prices_uoms.""")
module_purchase_line_defaults = fields.Boolean(
'Set defaults values on purchase orders in order to facilitate file import.',
help="""Installs the purchase_line_defaults.""")
module_partner_products_shortcut = fields.Boolean(
'Adds a shortcut on supplier partner form to the products supplied by this partner.',
help="""Installs the partner_products_shortcut module.""")
module_partner_products_shortcut = fields.Boolean(
'Adds a shortcut on supplier partner form to the products supplied by this partner.',
help="""Installs the partner_products_shortcut module.""")
module_purchase_prices_update = fields.Boolean(
'Adds a button on purchase order view to update prices for the order lines.',
help="""Installs the purchase_prices_update module.""")
|
jorsea/odoo-addons
|
adhoc_base_purchase/res_config.py
|
Python
|
agpl-3.0
| 2,441
|
# -*- coding: utf-8 -*-
import os
import tempfile
from scout.commands import cli
from scout.server.extensions import store
def test_load_gene_fusion_report_research(mock_app):
"""Test command line function that load a gene fusion research report for an existing case"""
# GIVEN a database with an existing case
case_obj = store.case_collection.find_one()
case_id = case_obj["_id"]
# GIVEN that this case has no gene fusion research report
assert case_obj.get("gene_fusion_report_research") is None
runner = mock_app.test_cli_runner()
# WHEN the update_gene_fusion command is executed provifing a new gene fusion research report
with tempfile.NamedTemporaryFile(suffix=".pdf") as tf:
research_gene_fusion_report_path = os.path.dirname(tf.name)
result = runner.invoke(
cli,
[
"load",
"gene-fusion-report",
case_id,
research_gene_fusion_report_path,
"--research",
],
)
# THEN the command should be succesful
assert result.exit_code == 0
# And the gene fusion research report should have been updated
updated_case = store.case_collection.find_one()
assert updated_case["gene_fusion_report_research"]
def test_load_gene_fusion_report_update(mock_app):
"""Test command line function that updated the gene fusion report for an existing case"""
# GIVEN a database with an existing case
case_obj = store.case_collection.find_one()
# GIVEN that this case has an old gene fusion report
old_report = case_obj.get("gene_fusion_report")
assert old_report
case_id = case_obj["_id"]
runner = mock_app.test_cli_runner()
# WHEN the update_gene_fusion command is executed provifing a new gene fusion report
with tempfile.NamedTemporaryFile(suffix=".pdf") as tf:
new_report_path = os.path.dirname(tf.name)
result = runner.invoke(
cli, ["load", "gene-fusion-report", case_id, new_report_path, "--update"]
)
# THEN the command should be succesful
assert result.exit_code == 0
# And the gene fusion report should have been updated
updated_case = store.case_collection.find_one()
assert updated_case["gene_fusion_report"] != old_report
|
Clinical-Genomics/scout
|
tests/commands/load/test_load_report_cmd.py
|
Python
|
bsd-3-clause
| 2,361
|
import click
import os
import sys
# import utils
sys.path.append(os.path.join('/'.join(__file__.split('/')[:-1]), '../src'))
from kubeconfig.kubectl_actions import *
def _get_output_cli(command):
"""
Process shell command line and return output
"""
c = command.split(' ')
p = subprocess.Popen(c, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out
@click.group(invoke_without_command=False)
def cli():
print "Welcome to teleport python-side !"
pass
@cli.command('ports')
@click.option('--filter', help='filter ports given the used ones or the available ones', default='used')
@click.option('--docker', help='name of the docker ie the little name of the server')
def ports(filter, docker):
"""Return ports"""
print kubectl_used_ports(docker) if filter == 'used' else kubectl_available_ports(docker)
@cli.command('register')
@click.argument('servicepath')
def register(servicepath):
"""Register a service into the database"""
kubectl_register(servicepath)
@cli.command('status')
@click.option('--ressources', help='List of ressources to display', default='rc, pods')
@click.option('--all-namespaces', help='Looking at all namespaces', default=True)
def status(ressources, all_namespaces):
"""Prints the status of all services"""
print kubectl_status(ressources, all_namespaces)
@cli.command('logs')
@click.argument('servicename')
@click.option('-f', is_flag=True, help='Follow logs, like tail -f', default=False)
def logs(servicename, f):
"""Get the full log of a service"""
print kubectl_logs(servicename, f)
@cli.command('restart')
@click.argument('servicename')
def restart(servicename):
"""Restarts a service"""
kubectl_stop(servicename)
kubectl_start(servicename)
@cli.command('stop')
@click.argument('servicename')
def stop(servicename):
"""Stops a service"""
kubectl_stop(servicename)
@cli.command('start')
@click.argument('servicename')
def start(servicename):
"""Starts a service"""
kubectl_start(servicename)
@cli.command('connect')
@click.argument('servicename')
def connect(servicename):
"""Connect into a running service container"""
kubectl_connect(servicename)
@cli.command('inspect')
@click.argument('servicename')
def inspect(servicename):
"""Get the running configuration of a service container"""
print kubectl_describe(servicename)
if __name__ == '__main__':
cli()
|
snipsco/teleport
|
bin/index.py
|
Python
|
mit
| 2,447
|
class Corpus:
def __init__(self, id, title, contents, tags = [],tokenized_contents = None):
self.id = id
self.title = title
self.contents = contents
self.tags = tags
self.tokenized_contents = tokenized_contents
def to_dict(self):
return { 'title': self.title, 'tags': self.tags , 'contents': self.contents, 'tokenized_contents': self.tokenized_contents, 'id': self.id }
|
kmp3325/linguine-python
|
linguine/corpus.py
|
Python
|
mit
| 427
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.