text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from command import PagedCommand
from color import Coloring
from error import NoSuchProjectError
from git_refs import R_M
class _Coloring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, "status")
class Info(PagedCommand):
common = True
helpSummary = "Get info on the manifest branch, current branch or unmerged branches"
helpUsage = "%prog [-dl] [-o [-b]] [<project>...]"
def _Options(self, p):
p.add_option('-d', '--diff',
dest='all', action='store_true',
help="show full info and commit diff including remote branches")
p.add_option('-o', '--overview',
dest='overview', action='store_true',
help='show overview of all local commits')
p.add_option('-b', '--current-branch',
dest="current_branch", action="store_true",
help="consider only checked out branches")
p.add_option('-l', '--local-only',
dest="local", action="store_true",
help="Disable all remote operations")
def Execute(self, opt, args):
self.out = _Coloring(self.manifest.globalConfig)
self.heading = self.out.printer('heading', attr = 'bold')
self.headtext = self.out.printer('headtext', fg = 'yellow')
self.redtext = self.out.printer('redtext', fg = 'red')
self.sha = self.out.printer("sha", fg = 'yellow')
self.text = self.out.nofmt_printer('text')
self.dimtext = self.out.printer('dimtext', attr = 'dim')
self.opt = opt
manifestConfig = self.manifest.manifestProject.config
mergeBranch = manifestConfig.GetBranch("default").merge
manifestGroups = (manifestConfig.GetString('manifest.groups')
or 'all,-notdefault')
self.heading("Manifest branch: ")
if self.manifest.default.revisionExpr:
self.headtext(self.manifest.default.revisionExpr)
self.out.nl()
self.heading("Manifest merge branch: ")
self.headtext(mergeBranch)
self.out.nl()
self.heading("Manifest groups: ")
self.headtext(manifestGroups)
self.out.nl()
self.printSeparator()
if not opt.overview:
self.printDiffInfo(args)
else:
self.printCommitOverview(args)
def printSeparator(self):
self.text("----------------------------")
self.out.nl()
def printDiffInfo(self, args):
try:
projs = self.GetProjects(args)
except NoSuchProjectError:
return
for p in projs:
self.heading("Project: ")
self.headtext(p.name)
self.out.nl()
if p.flow:
self.heading("Git-flow: ")
self.headtext("branch("+p.flow.prefix_all+p.flow.branch_develop+",")
self.headtext(p.flow.prefix_all+p.flow.branch_master+"), prefix(")
self.headtext(p.flow.prefix_all+p.flow.prefix_feature+",")
self.headtext(p.flow.prefix_all+p.flow.prefix_release+",")
self.headtext(p.flow.prefix_all+p.flow.prefix_hotfix+",")
self.headtext(p.flow.prefix_all+p.flow.prefix_support+",tag:'")
self.headtext(p.flow.prefix_versiontag+"')")
self.out.nl()
self.heading("Mount path: ")
self.headtext(p.worktree)
self.out.nl()
self.heading("Current revision: ")
self.headtext(p.revisionExpr)
self.out.nl()
localBranches = p.GetBranches().keys()
self.heading("Local Branches: ")
self.redtext(str(len(localBranches)))
if len(localBranches) > 0:
self.text(" [")
self.text(", ".join(localBranches))
self.text("]")
self.out.nl()
if self.opt.all:
self.findRemoteLocalDiff(p)
self.printSeparator()
def findRemoteLocalDiff(self, project):
#Fetch all the latest commits
if not self.opt.local:
project.Sync_NetworkHalf(quiet=True, current_branch_only=True)
logTarget = R_M + self.manifest.manifestProject.config.GetBranch("default").merge
bareTmp = project.bare_git._bare
project.bare_git._bare = False
localCommits = project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
logTarget + "..",
'--')
originCommits = project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
".." + logTarget,
'--')
project.bare_git._bare = bareTmp
self.heading("Local Commits: ")
self.redtext(str(len(localCommits)))
self.dimtext(" (on current branch)")
self.out.nl()
for c in localCommits:
split = c.split()
self.sha(split[0] + " ")
self.text(" ".join(split[1:]))
self.out.nl()
self.printSeparator()
self.heading("Remote Commits: ")
self.redtext(str(len(originCommits)))
self.out.nl()
for c in originCommits:
split = c.split()
self.sha(split[0] + " ")
self.text(" ".join(split[1:]))
self.out.nl()
def printCommitOverview(self, args):
all_branches = []
for project in self.GetProjects(args):
br = [project.GetUploadableBranch(x)
for x in project.GetBranches()]
br = [x for x in br if x]
if self.opt.current_branch:
br = [x for x in br if x.name == project.CurrentBranch]
all_branches.extend(br)
if not all_branches:
return
self.out.nl()
self.heading('Projects Overview')
project = None
for branch in all_branches:
if project != branch.project:
project = branch.project
self.out.nl()
self.headtext(project.relpath)
self.out.nl()
commits = branch.commits
date = branch.date
self.text('%s %-33s (%2d commit%s, %s)' % (
branch.name == project.CurrentBranch and '*' or ' ',
branch.name,
len(commits),
len(commits) != 1 and 's' or '',
date))
self.out.nl()
for commit in commits:
split = commit.split()
self.text('{0:38}{1} '.format('','-'))
self.sha(split[0] + " ")
self.text(" ".join(split[1:]))
self.out.nl()
|
{
"content_hash": "e8f511d1c2ece720967ae8ee5f57475a",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 86,
"avg_line_length": 30.538071065989847,
"alnum_prop": 0.5967420212765957,
"repo_name": "GatorQue/git-repo-flow",
"id": "649096236fade4f0a68340a8c7cf844f5c796b8d",
"size": "6619",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "subcmds/info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "405326"
},
{
"name": "Shell",
"bytes": "6043"
}
],
"symlink_target": ""
}
|
import unittest
from billy.sunlightapi import SunlightAPI
from billy.sunlightparsers import MemberParser
class TestCustomAPICalls(unittest.TestCase):
def setUp(self):
self.api = SunlightAPI()
def test_get_member_recent_votes(self):
# Test with Elizabeth Warren
results = self.api.get_member_recent_votes('W000817')
self.assertTrue(len(results) > 0)
def test_get_roll_call_vote(self):
# Test with Elizabeth Warren
results = self.api.get_roll_call_vote('s38-2017', 'W000817')
self.assertTrue(len(results) == 1)
self.assertTrue('voters' in results[0].keys())
class TestMemberParser(unittest.TestCase):
def setUp(self):
self.parser = MemberParser
def test_find_member_by_zip(self):
results = self.parser.find_member_by_zip('02052')
self.assertTrue(all([member['state'] == 'MA' for member in results]))
def test_look_up_all_members_of_congress(self):
results = self.parser.lookup_members('senate')
self.assertEqual(len(results), 100)
results = self.parser.lookup_members('house')
expected_results = len(self.parser.MEMBERS_OF_CONGRESS) - 100
self.assertEqual(len(results), expected_results)
def test_look_up_members_of_congress_from_specified_list(self):
items = self.parser.find_member_by_zip('02052')
results = self.parser.lookup_members('senate', items)
self.assertEqual(len(results), 2)
def test_formalize_name(self):
items = self.parser.find_member_by_zip('02052')
results = self.parser.lookup_members('Warren', items)
self.assertEqual(len(results), 1)
member = results[0]
formalized_name = self.parser.formalize_name(member)
self.assertEqual(formalized_name, 'Sen. Elizabeth Warren (D-MA)')
def test_summarize_bio(self):
items = self.parser.find_member_by_zip('02052')
members = self.parser.lookup_members('senate', items)
results = self.parser.summarize_bio(members[0])
self.assertEqual(len(results), 2)
self.assertEqual(type(results[0]), str)
self.assertEqual(type(results[1]), dict)
def test_find_members(self):
results = self.parser.find_members('senate D')
self.assertTrue(len(results) > 2)
results = self.parser.find_members('senate D', '02052')
self.assertEqual(len(results), 2)
|
{
"content_hash": "38fb800b67461b185592c8b9b3813b27",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 36.71212121212121,
"alnum_prop": 0.658687577383409,
"repo_name": "mosegontar/billybot",
"id": "056ae7583efbe40e5af497e91426b8ed9768fe3a",
"size": "2423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "billy/tests/test_sunlight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26669"
}
],
"symlink_target": ""
}
|
import os
class GitException(Exception):
def __init__(self, msg):
super(GitException, self).__init__()
self.msg = msg
def __repr__(self):
return "%s: %s" % (type(self).__name__, self.msg)
__str__ = __repr__
class CannotFindRepository(GitException):
pass
class MergeConflict(GitException):
def __init__(self, msg='Merge Conflict'):
super(MergeConflict, self).__init__(msg=msg)
class GitCommandFailedException(GitException):
def __init__(self, directory, command, popen):
super(GitCommandFailedException, self).__init__(None)
self.command = command
self.directory = os.path.abspath(directory)
self.stderr = popen.stderr.read()
self.stdout = popen.stdout.read()
self.popen = popen
self.msg = "Command %r failed in %s (%s):\n%s\n%s" % (command, self.directory, popen.returncode,
self.stderr, self.stdout)
class NonexistentRefException(GitException):
pass
|
{
"content_hash": "81b55a5103760370cbbc069f72e0c410",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 104,
"avg_line_length": 34.51724137931034,
"alnum_prop": 0.6183816183816184,
"repo_name": "t-amerssonis/okami",
"id": "4c4185cb71a30f42f64810de8a49a7a99a56356a",
"size": "2527",
"binary": false,
"copies": "122",
"ref": "refs/heads/master",
"path": "src/Okami/third-parts/gitpy/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""A libusb1-based ADB reimplementation.
ADB was giving us trouble with its client/server architecture, which is great
for users and developers, but not so great for reliable scripting. This will
allow us to more easily catch errors as Python exceptions instead of checking
random exit codes, and all the other great benefits from not going through
subprocess and a network socket.
All timeouts are in milliseconds.
"""
import cStringIO
import os
import socket
import adb_protocol
import common
import filesync_protocol
# From adb.h
CLASS = 0xFF
SUBCLASS = 0x42
PROTOCOL = 0x01
# pylint: disable=invalid-name
DeviceIsAvailable = common.InterfaceMatcher(CLASS, SUBCLASS, PROTOCOL)
try:
# Imported locally to keep compatibility with previous code.
from sign_m2crypto import M2CryptoSigner
except ImportError:
# Ignore this error when M2Crypto is not installed, there are other options.
pass
class AdbCommands(object):
"""Exposes adb-like methods for use.
Some methods are more-pythonic and/or have more options.
"""
protocol_handler = adb_protocol.AdbMessage
filesync_handler = filesync_protocol.FilesyncProtocol
@classmethod
def ConnectDevice(
cls, port_path=None, serial=None, default_timeout_ms=None, **kwargs):
"""Convenience function to get an adb device from usb path or serial.
Args:
port_path: The filename of usb port to use.
serial: The serial number of the device to use.
default_timeout_ms: The default timeout in milliseconds to use.
If serial specifies a TCP address:port, then a TCP connection is
used instead of a USB connection.
"""
if serial and ':' in serial:
handle = common.TcpHandle(serial)
else:
handle = common.UsbHandle.FindAndOpen(
DeviceIsAvailable, port_path=port_path, serial=serial,
timeout_ms=default_timeout_ms)
return cls.Connect(handle, **kwargs)
def __init__(self, handle, device_state):
self.handle = handle
self._device_state = device_state
def Close(self):
self.handle.Close()
@classmethod
def Connect(cls, usb, banner=None, **kwargs):
"""Connect to the device.
Args:
usb: UsbHandle or TcpHandle instance to use.
banner: See protocol_handler.Connect.
**kwargs: See protocol_handler.Connect for kwargs. Includes rsa_keys,
and auth_timeout_ms.
Returns:
An instance of this class if the device connected successfully.
"""
if not banner:
banner = socket.gethostname()
device_state = cls.protocol_handler.Connect(usb, banner=banner, **kwargs)
# Remove banner and colons after device state (state::banner)
device_state = device_state.split(':')[0]
return cls(usb, device_state)
@classmethod
def Devices(cls):
"""Get a generator of UsbHandle for devices available."""
return common.UsbHandle.FindDevices(DeviceIsAvailable)
def GetState(self):
return self._device_state
def Install(self, apk_path, destination_dir='', timeout_ms=None):
"""Install an apk to the device.
Doesn't support verifier file, instead allows destination directory to be
overridden.
Args:
apk_path: Local path to apk to install.
destination_dir: Optional destination directory. Use /system/app/ for
persistent applications.
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm install output.
"""
if not destination_dir:
destination_dir = '/data/local/tmp/'
basename = os.path.basename(apk_path)
destination_path = destination_dir + basename
self.Push(apk_path, destination_path, timeout_ms=timeout_ms)
return self.Shell('pm install -r "%s"' % destination_path,
timeout_ms=timeout_ms)
def Push(self, source_file, device_filename, mtime='0', timeout_ms=None):
"""Push a file or directory to the device.
Args:
source_file: Either a filename, a directory or file-like object to push to
the device.
device_filename: Destination on the device to write to.
mtime: Optional, modification time to set on the file.
timeout_ms: Expected timeout for any part of the push.
"""
if isinstance(source_file, basestring):
if os.path.isdir(source_file):
self.Shell("mkdir " + device_filename)
for f in os.listdir(source_file):
self.Push(os.path.join(source_file, f), device_filename + '/' + f)
return
source_file = open(source_file)
connection = self.protocol_handler.Open(
self.handle, destination='sync:', timeout_ms=timeout_ms)
self.filesync_handler.Push(connection, source_file, device_filename,
mtime=int(mtime))
connection.Close()
def Pull(self, device_filename, dest_file='', timeout_ms=None):
"""Pull a file from the device.
Args:
device_filename: Filename on the device to pull.
dest_file: If set, a filename or writable file-like object.
timeout_ms: Expected timeout for any part of the pull.
Returns:
The file data if dest_file is not set.
"""
if not dest_file:
dest_file = cStringIO.StringIO()
elif isinstance(dest_file, basestring):
dest_file = open(dest_file, 'w')
connection = self.protocol_handler.Open(
self.handle, destination='sync:',
timeout_ms=timeout_ms)
self.filesync_handler.Pull(connection, device_filename, dest_file)
connection.Close()
# An empty call to cStringIO.StringIO returns an instance of
# cStringIO.OutputType.
if isinstance(dest_file, cStringIO.OutputType):
return dest_file.getvalue()
def Stat(self, device_filename):
"""Get a file's stat() information."""
connection = self.protocol_handler.Open(self.handle, destination='sync:')
mode, size, mtime = self.filesync_handler.Stat(
connection, device_filename)
connection.Close()
return mode, size, mtime
def List(self, device_path):
"""Return a directory listing of the given path.
Args:
device_path: Directory to list.
"""
connection = self.protocol_handler.Open(self.handle, destination='sync:')
listing = self.filesync_handler.List(connection, device_path)
connection.Close()
return listing
def Reboot(self, destination=''):
"""Reboot the device.
Args:
destination: Specify 'bootloader' for fastboot.
"""
self.protocol_handler.Open(self.handle, 'reboot:%s' % destination)
def RebootBootloader(self):
"""Reboot device into fastboot."""
self.Reboot('bootloader')
def Remount(self):
"""Remount / as read-write."""
return self.protocol_handler.Command(self.handle, service='remount')
def Root(self):
"""Restart adbd as root on the device."""
return self.protocol_handler.Command(self.handle, service='root')
def Shell(self, command, timeout_ms=None):
"""Run command on the device, returning the output."""
return self.protocol_handler.Command(
self.handle, service='shell', command=command,
timeout_ms=timeout_ms)
def StreamingShell(self, command, timeout_ms=None):
"""Run command on the device, yielding each line of output.
Args:
command: Command to run on the target.
timeout_ms: Maximum time to allow the command to run.
Yields:
The responses from the shell command.
"""
return self.protocol_handler.StreamingCommand(
self.handle, service='shell', command=command,
timeout_ms=timeout_ms)
def Logcat(self, options, timeout_ms=None):
"""Run 'shell logcat' and stream the output to stdout.
Args:
options: Arguments to pass to 'logcat'.
"""
return self.StreamingCommand('logcat %s' % options, timeout_ms)
|
{
"content_hash": "f2a1af1d5181123d932ca76c09f50d27",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 80,
"avg_line_length": 32.90677966101695,
"alnum_prop": 0.6834921452485192,
"repo_name": "maruel/python-adb",
"id": "f57645fdfd37a2c6a48079d21cb6c2f3615ebd08",
"size": "8362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adb/adb_commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86505"
}
],
"symlink_target": ""
}
|
import abc
import numpy as np
from pySDC.Collocation import CollBase
from pySDC.Level import level
from future.utils import with_metaclass
class sweeper(with_metaclass(abc.ABCMeta)):
"""
Base abstract sweeper class
Attributes:
__level: current level
coll: collocation object
"""
def __init__(self,params):
"""
Initialization routine for the base sweeper
Args:
params: parameter object
"""
# short helper class to add params as attributes
class pars():
def __init__(self,params):
defaults = dict()
defaults['do_LU'] = False
for k,v in defaults.items():
setattr(self,k,v)
for k,v in params.items():
if not k is 'collocation_class':
setattr(self,k,v)
self.params = pars(params)
coll = params['collocation_class'](params['num_nodes'],0,1)
assert isinstance(coll, CollBase)
# This will be set as soon as the sweeper is instantiated at the level
self.__level = None
# collocation object
self.coll = coll
def __set_level(self,L):
"""
Sets a reference to the current level (done in the initialization of the level)
Args:
L: current level
"""
assert isinstance(L,level)
self.__level = L
@property
def level(self):
"""
Returns the current level
"""
return self.__level
def predict(self):
"""
Predictor to fill values at nodes before first sweep
Default prediction for the sweepers, only copies the values to all collocation nodes
and evaluates the RHS of the ODE there
"""
# get current level and problem description
L = self.level
P = L.prob
# evaluate RHS at left point
L.f[0] = P.eval_f(L.u[0],L.time)
# copy u[0] to all collocation nodes, evaluate RHS
for m in range(1,self.coll.num_nodes+1):
L.u[m] = P.dtype_u(L.u[0])
L.f[m] = P.eval_f(L.u[m],L.time+L.dt*self.coll.nodes[m-1])
# indicate that this level is now ready for sweeps
L.status.unlocked = True
return None
def compute_residual(self):
"""
Computation of the residual using the collocation matrix Q
"""
# get current level and problem description
L = self.level
P = L.prob
# check if there are new values (e.g. from a sweep)
assert L.status.updated
# compute the residual for each node
# build QF(u)
res_norm = []
res = self.integrate()
for m in range(self.coll.num_nodes):
# add u0 and subtract u at current node
res[m] += L.u[0] - L.u[m+1]
# add tau if associated
if L.tau is not None:
res[m] += L.tau[m]
# use abs function from data type here
res_norm.append(abs(res[m]))
# find maximal residual over the nodes
L.status.residual = max(res_norm)
# indicate that the residual has seen the new values
L.status.updated = False
return None
@abc.abstractmethod
def compute_end_point(self):
"""
Abstract interface to end-node computation
"""
return None
@abc.abstractmethod
def integrate(self):
"""
Abstract interface to right-hand side integration
"""
return None
@abc.abstractmethod
def update_nodes(self):
"""
Abstract interface to node update
"""
return None
|
{
"content_hash": "0eef7306dd851ba967dab0fcc7019127",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 92,
"avg_line_length": 24.376623376623378,
"alnum_prop": 0.5506126798082046,
"repo_name": "kidaa/pySDC",
"id": "fdba6341f196d4b8f77cdc8669dea4ec0d92d989",
"size": "3754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pySDC/Sweeper.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "158308"
}
],
"symlink_target": ""
}
|
'''
A simple Hy (hylang) kernel for IPython.
'''
from __future__ import print_function
import __future__ # NOQA
import ast
import re
from IPython.kernel.zmq.ipkernel import IPythonKernel
from IPython.utils.py3compat import PY3
import astor
from hy.version import __version__ as hy_version
from hy.macros import _hy_macros, load_macros
from hy.lex import tokenize
from hy.compiler import hy_compile, _compile_table, load_stdlib
from hy.core import language
from .version import __version__
CELL_MAGIC_RAW = r'^%%%'
class HyKernel(IPythonKernel):
'''
This may not be the recommended way to create a kernel, but seems to bring
the most features along for free.
Seeking a better solution!
'''
implementation = 'hy'
implementation_version = __version__
language = 'hy'
language_version = hy_version
banner = 'Hy is a wonderful dialect of Lisp that’s embedded in Python.'
language_info = {
'name': 'hy',
'mimetype': 'text/x-hylang',
'codemirror_mode': {
'name': 'hy'
},
# TODO: port CM to pygments?
'pygments_lexer': 'ipython3'
}
def __init__(self, *args, **kwargs):
'''
Create the hy environment
'''
super(HyKernel, self).__init__(*args, **kwargs)
load_stdlib()
[load_macros(m) for m in ['hy.core', 'hy.macros']]
self._cell_magic_warned = False
self._line_magic_warned = False
def _forward_input(self, *args, **kwargs):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
super(HyKernel, self)._forward_input(*args, **kwargs)
if PY3:
language.input = self.raw_input
else:
language.raw_input = self.raw_input
language.input = lambda prompt='': eval(self.raw_input(prompt))
def do_execute(self, code, *args, **kwargs):
'''
Generate python code, and have IPythonKernel run it, or show why we
couldn't have python code.
'''
try:
if re.match(CELL_MAGIC_RAW, code):
# this is a none-code magic cell
pass
else:
cell = []
chunk = []
for line in code.split("\n"):
if line[:2] == "%%":
# cell magic
cell.append(line)
elif line[0] in "!%":
# line magic
if chunk:
cell.append(self.compile_chunk(chunk))
chunk = []
cell.append(line)
else:
chunk.append(line)
if chunk:
cell.append(self.compile_chunk(chunk))
code = "\n".join(cell)
except Exception as err:
if (not hasattr(err, 'source')) or not err.source:
err.source = code
# shell will find the last exception
self.shell.showsyntaxerror()
# an empty code cell is basically a no-op
code = ''
return super(HyKernel, self).do_execute(code, *args, **kwargs)
def compile_chunk(self, chunk):
tokens = tokenize("\n".join(chunk))
_ast = hy_compile(tokens, '__console__', root=ast.Interactive)
_ast_for_print = ast.Module()
_ast_for_print.body = _ast.body
return astor.codegen.to_source(_ast_for_print)
def do_complete(self, code, cursor_pos):
# let IPython do the heavy lifting for variables, etc.
txt, matches = self.shell.complete('', code, cursor_pos)
# mangle underscores into dashes
matches = [match.replace('_', '-') for match in matches]
for p in list(_hy_macros.values()) + [_compile_table]:
p = filter(lambda x: isinstance(x, str), p.keys())
p = [x.replace('_', '-') for x in p]
matches.extend([
x for x in p
if x.startswith(txt) and x not in matches
])
return {
'matches': matches,
'cursor_end': cursor_pos,
'cursor_start': cursor_pos - len(txt),
'metadata': {},
'status': 'ok'
}
if __name__ == '__main__':
from IPython.kernel.zmq.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=HyKernel)
|
{
"content_hash": "2e68ece91c89c7be0628992393e89992",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 78,
"avg_line_length": 30.643835616438356,
"alnum_prop": 0.5346446133214126,
"repo_name": "bollwyvl/hy_kernel",
"id": "25a83f5dbdd36a4032f6435ab6c0630ea1a56563",
"size": "4476",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hy_kernel/hy_kernel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "3521"
},
{
"name": "JavaScript",
"bytes": "14521"
},
{
"name": "Jupyter Notebook",
"bytes": "48640"
},
{
"name": "Python",
"bytes": "12476"
}
],
"symlink_target": ""
}
|
"""Tests of Spanish g2p rules."""
from pynini.examples import g2p
from absl.testing import absltest
class SpanishG2PTest(absltest.TestCase):
def assertPron(self, grapheme: str, phoneme: str):
self.assertEqual(g2p.g2p(grapheme), phoneme)
def testG2P(self):
self.assertPron("anarquista", "anarkista")
self.assertPron("cantar", "kantar")
self.assertPron("gañir", "gaɲir")
self.assertPron("hacer", "aser")
self.assertPron("hijo", "ixo")
self.assertPron("llave", "ʝabe")
self.assertPron("pero", "peɾo")
self.assertPron("perro", "pero")
self.assertPron("vivir", "bibir")
if __name__ == "__main__":
absltest.main()
|
{
"content_hash": "267893da64ddf910f9cbf470f6c8e19a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 52,
"avg_line_length": 24.555555555555557,
"alnum_prop": 0.6666666666666666,
"repo_name": "kylebgorman/pynini",
"id": "5d3924ab3beb07bf12107768e7569c3102d5c8b5",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/g2p_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "173515"
},
{
"name": "Cython",
"bytes": "285320"
},
{
"name": "Python",
"bytes": "287089"
},
{
"name": "Shell",
"bytes": "1897"
},
{
"name": "Starlark",
"bytes": "31558"
}
],
"symlink_target": ""
}
|
from runner.koan import *
class AboutInheritance(Koan):
class Dog:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def bark(self):
return "WOOF"
class Chihuahua(Dog):
def wag(self):
return "happy"
def bark(self):
return "yip"
def test_subclasses_have_the_parent_as_an_ancestor(self):
self.assertEqual(True, issubclass(self.Chihuahua, self.Dog))
def test_this_all_classes_in_python_3_ultimately_inherit_from_object_class(self):
self.assertEqual(True, issubclass(self.Chihuahua, object))
# Note: This isn't the case in Python 2. In that version you have
# to inherit from a built in class or object explicitly
def test_instances_inherit_behavior_from_parent_class(self):
chico = self.Chihuahua("Chico")
self.assertEqual("Chico", chico.name)
def test_subclasses_add_new_behavior(self):
chico = self.Chihuahua("Chico")
self.assertEqual("happy", chico.wag())
fido = self.Dog("Fido")
with self.assertRaises(AttributeError): fido.wag()
def test_subclasses_can_modify_existing_behavior(self):
chico = self.Chihuahua("Chico")
self.assertEqual("yip", chico.bark())
fido = self.Dog("Fido")
self.assertEqual("WOOF", fido.bark())
# ------------------------------------------------------------------
class BullDog(Dog):
def bark(self):
return super().bark() + ", GRR"
# Note, super() is much simpler to use in Python 3!
def test_subclasses_can_invoke_parent_behavior_via_super(self):
ralph = self.BullDog("Ralph")
self.assertEqual("WOOF, GRR", ralph.bark())
# ------------------------------------------------------------------
class GreatDane(Dog):
def growl(self):
return super().bark() + ", GROWL"
def test_super_works_across_methods(self):
george = self.GreatDane("George")
self.assertEqual("WOOF, GROWL", george.growl())
# ---------------------------------------------------------
class Pug(Dog):
def __init__(self, name):
pass
class Greyhound(Dog):
def __init__(self, name):
super().__init__(name)
def test_base_init_does_not_get_called_automatically(self):
snoopy = self.Pug("Snoopy")
with self.assertRaises(AttributeError): name = snoopy.name
def test_base_init_has_to_be_called_explicitly(self):
boxer = self.Greyhound("Boxer")
self.assertEqual("Boxer", boxer.name)
|
{
"content_hash": "546cc339d076dae676f975a4e9b24b0a",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 85,
"avg_line_length": 31.058139534883722,
"alnum_prop": 0.5563459378509922,
"repo_name": "kipel/koans",
"id": "0ae0e242f639de1186848a064fc4ebb14e14d9f7",
"size": "2718",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python_koans/python3/koans/about_inheritance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "331433"
},
{
"name": "Ruby",
"bytes": "92459"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
}
|
"""
Tests for the ConfigurationAdmin tests
:author: Thomas Calmant
"""
# Standard library
import json
import os
import shutil
import time
try:
import unittest2 as unittest
except ImportError:
import unittest
# Pelix
import pelix.framework
import pelix.services as services
from pelix.utilities import use_service
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Use a local configuration folder
conf_folder = os.path.join(os.path.dirname(__file__), "conf")
class ConfigurationAdminTest(unittest.TestCase):
"""
Tests for configuration admin methods
"""
def setUp(self):
"""
Sets up the test
"""
# Create the framework
self.framework = pelix.framework.create_framework(
('pelix.ipopo.core', 'pelix.services.configadmin'),
{'configuration.folder': conf_folder})
self.framework.start()
context = self.framework.get_bundle_context()
# Get the service
self.config_ref = context.get_service_reference(
services.SERVICE_CONFIGURATION_ADMIN)
self.config = context.get_service(self.config_ref)
def tearDown(self):
"""
Cleans up for next test
"""
# Release the service
self.framework.get_bundle_context().unget_service(self.config_ref)
pelix.framework.FrameworkFactory.delete_framework()
self.config = None
self.config_ref = None
@classmethod
def tearDownClass(cls):
"""
Cleans up after all tests have been executed
"""
shutil.rmtree(conf_folder)
def testCreateFactoryConfiguration(self):
"""
Tests the create factory configuration method
"""
# Invalid name
for value in (None, "", " "):
self.assertRaises(ValueError,
self.config.create_factory_configuration, value)
# Invalid type
for value in ([], 12, True):
self.assertRaises(ValueError,
self.config.create_factory_configuration, value)
# Create a configuration
factory_pid = "test.ca.factory"
config = self.config.create_factory_configuration(factory_pid)
pid = config.get_pid()
# Check validity
self.assertIsNotNone(config, "No configuration returned")
self.assertEqual(config.get_factory_pid(), factory_pid,
"Wrong factory PID")
self.assertIn(factory_pid, pid, "PID doesn't contain the Factory PID")
self.assertIsNone(config.get_properties(),
"Fresh factory configuration has properties")
# Check Factory/Configuration PIDs
self.assertRaises(KeyError, self.config.get_configuration, factory_pid)
# Delete the configuration
config.delete()
# Create a new one
config2 = self.config.create_factory_configuration(factory_pid)
# They must be different and have different PID
self.assertIsNot(
config, config2,
"ConfigAdmin returned a deleted factory configuration")
self.assertNotEqual(pid, config2.get_pid(),
"Same PID for new configuration")
# Delete the new one
config2.delete()
def testGetConfiguration(self):
"""
Tests the get_configuration method (and the configuration bean)
"""
# Create the configuration
pid = "test.ca.get"
config = self.config.get_configuration(pid)
# It is not valid and has no properties
self.assertFalse(config.is_valid(), "Fresh configuration is valid")
self.assertIsNone(config.get_properties(),
"Fresh configuration has properties")
# Update properties
config.update({"answer": 42})
# Ensure we still have the same object
self.assertIs(config, self.config.get_configuration(pid),
"Configuration object changed")
# Ensure we have the new properties
self.assertTrue(config.is_valid(), "Configuration is still invalid")
properties = config.get_properties()
self.assertEqual(properties[services.CONFIG_PROP_PID], pid,
"Different PID in properties")
self.assertEqual(properties["answer"], 42, "Configuration not updated")
# Delete the configuration
config.delete()
# Ensure we'll get a new one
config2 = self.config.get_configuration(pid)
self.assertIsNot(config, config2,
"ConfigAdmin returned a deleted configuration")
# Clean up
config2.delete()
def testListConfiguration(self):
"""
Tests the list configuration method
"""
# There should be nothing at first
configs = self.config.list_configurations()
self.assertIsNotNone(configs,
"list_configurations() must not return None")
self.assertSetEqual(configs, set(), "Non-empty result set")
# Add a configuration
pid = "test.ca.list"
config = self.config.get_configuration(pid)
# Simple pre-check
self.assertFalse(config.is_valid(), "Fresh configuration is valid")
# It must be visible, but must not match filters
self.assertSetEqual(self.config.list_configurations(), {config},
"Incorrect result set")
ldap_filter = "({0}={1})".format(services.CONFIG_PROP_PID, pid)
self.assertSetEqual(self.config.list_configurations(ldap_filter),
set(), "Invalid configuration matches a filter")
# Update the configuration
config.update({'arthur': 'dent'})
# It must be visible, even with filters
self.assertSetEqual(self.config.list_configurations(), {config},
"Incorrect result set")
filters = [ # PID
"({0}={1})".format(services.CONFIG_PROP_PID, pid),
# Property
"({0}={1})".format('arthur', 'dent'),
# Both
"(&({0}={1})({2}={3}))".format(services.CONFIG_PROP_PID, pid,
'arthur', 'dent'),
]
for ldap_filter in filters:
self.assertSetEqual(
self.config.list_configurations(ldap_filter), {config},
"Configuration doesn't match filter {0}".format(ldap_filter))
# Add a new configuration
config2 = self.config.get_configuration(pid + "-bis")
self.assertSetEqual(
self.config.list_configurations(), {config, config2},
"Incorrect result set")
# Delete it
config2.delete()
self.assertSetEqual(self.config.list_configurations(), {config},
"Incorrect result set")
# Delete the first one
config.delete()
self.assertSetEqual(configs, set(), "Non-empty result set")
def testPersistence(self):
"""
Tests configuration reload
"""
pid = "test.ca.persistence"
props = {"zaphod": "beeblebrox"}
# Create a configuration
config = self.config.get_configuration(pid)
config.update(props)
# Forget it locally
config = None
# Stop the framework
self.tearDown()
# Restart it
self.setUp()
# Reload the configuration
config = self.config.get_configuration(pid)
# Compare properties
self.assertDictContainsSubset(props, config.get_properties(),
"Properties lost with framework restart")
# Delete the configuration
config.delete()
# ------------------------------------------------------------------------------
class ManagedServiceTest(unittest.TestCase):
"""
Tests the behavior of managed services
"""
def setUp(self):
"""
Sets up the test
"""
self.framework = pelix.framework.create_framework(
('pelix.ipopo.core', 'pelix.services.configadmin'),
{'configuration.folder': conf_folder})
self.framework.start()
context = self.framework.get_bundle_context()
# Get the ConfigAdmin service
self.config_ref = context.get_service_reference(
services.SERVICE_CONFIGURATION_ADMIN)
self.config = context.get_service(self.config_ref)
# Install the test bundle (don't start it)
self.bundle = context.install_bundle(
'tests.services.configadmin_bundle')
self.pid = self.bundle.get_module().CONFIG_PID
# Remove existing configurations
for config in self.config.list_configurations():
config.delete()
def tearDown(self):
"""
Cleans up for next test
"""
# Remove existing configurations
for config in self.config.list_configurations():
config.delete()
# Release the service
self.framework.get_bundle_context().unget_service(self.config_ref)
pelix.framework.FrameworkFactory.delete_framework()
self.config = None
self.config_ref = None
@classmethod
def tearDownClass(cls):
"""
Cleans up after all tests have been executed
"""
shutil.rmtree(conf_folder)
def get_ref(self):
"""
Retrieves the reference to the managed service provided by the test
bundle
"""
return self.bundle.get_registered_services()[0]
def pause(self):
"""
Small pause to let the task pool notify the services
"""
time.sleep(.2)
def check_call_count(self, test_svc, expected_count):
"""
Checks if the given test service has been called X times
"""
self.assertEqual(test_svc.call_count, expected_count,
"updated() called more than {0} times"
.format(expected_count))
test_svc.call_count = 0
def testNoConfigDelete(self):
"""
Tests the behaviour of the service with an empty configuration
"""
# Start the test bundle
self.bundle.start()
# Get the service
with use_service(self.framework.get_bundle_context(),
self.get_ref()) as svc:
# Create the configuration
config = self.config.get_configuration(self.pid)
# Give some time for the possible erroneous notification
self.pause()
# Nothing should have happened yet
self.assertIsNone(svc.value, "Value has been set")
self.assertFalse(
svc.deleted, "Configuration considered as deleted")
# Delete the configuration
config.delete()
# Give some time for the possible erroneous notification
self.pause()
# Nothing should have happened either
self.assertIsNone(svc.value, "Value has been set")
self.assertFalse(
svc.deleted, "Configuration considered as deleted")
def testEarlyConfig(self):
"""
Tests the behaviour if a configuration is already set when the managed
service is registered
"""
# Create the configuration
config = self.config.get_configuration(self.pid)
config.update({'config.value': 42})
# Start the test bundle
self.bundle.start()
# Get the service
with use_service(self.framework.get_bundle_context(),
self.get_ref()) as svc:
# Give some time for the notification
self.pause()
# The service should already have been configured
self.assertEqual(svc.value, 42, "Value hasn't been set")
self.assertFalse(
svc.deleted, "Configuration considered as deleted")
# Delete the configuration
config.delete()
# Give some time for the notification
self.pause()
# The flag must have been set
self.assertTrue(svc.deleted, "Configuration considered as deleted")
def testLateConfig(self):
"""
Tests the behaviour if a configuration is created after the managed
service has been registered
"""
# Start the test bundle
self.bundle.start()
# Get the service
with use_service(self.framework.get_bundle_context(),
self.get_ref()) as svc:
# Give some time for the notification
self.pause()
# Nothing should have happened yet
self.assertIsNone(svc.value, "Value has been set")
self.assertFalse(
svc.deleted, "Configuration considered as deleted")
# Create the configuration
config = self.config.get_configuration(self.pid)
config.update({'config.value': 42})
# Update is done a another thread
self.pause()
# The service should have been configured
self.assertEqual(svc.value, 42, "Value hasn't been set")
self.assertFalse(
svc.deleted, "Configuration considered as deleted")
# Delete the configuration
config.delete()
# Give some time for the notification
self.pause()
# The flag must have been set
self.assertTrue(svc.deleted, "Configuration considered as deleted")
def testUpdateConfig(self):
"""
Tests the behaviour if a configuration is updated
"""
# Create the configuration
config = self.config.get_configuration(self.pid)
# Start the test bundle
self.bundle.start()
# Get the service
with use_service(self.framework.get_bundle_context(),
self.get_ref()) as svc:
# Give some time for the notification
self.pause()
# Nothing should have happened yet
self.check_call_count(svc, 0)
self.assertIsNone(svc.value, "Value has been set")
self.assertFalse(
svc.deleted, "Configuration considered as deleted")
# Update the configuration
config.update({'config.value': 42})
# Update is done a another thread
self.pause()
# The service should have been configured
self.check_call_count(svc, 1)
self.assertEqual(svc.value, 42, "Value hasn't been set")
self.assertFalse(
svc.deleted, "Configuration considered as deleted")
# Delete the configuration
config.delete()
# Give some time for the notification
self.pause()
# The flag must have been set
self.check_call_count(svc, 1)
self.assertTrue(svc.deleted, "Configuration considered as deleted")
# ------------------------------------------------------------------------------
class FileInstallTest(unittest.TestCase):
"""
Tests the behavior of FileInstall with ConfigurationAdmin
"""
def setUp(self):
"""
Sets up the test
"""
self.framework = pelix.framework.create_framework(
('pelix.ipopo.core', 'pelix.services.configadmin'),
{'configuration.folder': conf_folder})
self.framework.start()
context = self.framework.get_bundle_context()
# in FileInstall
self.bnd_fileinstall = context.install_bundle(
'pelix.services.fileinstall')
# Get the ConfigAdmin service
self.config_ref = context.get_service_reference(
services.SERVICE_CONFIGURATION_ADMIN)
self.config = context.get_service(self.config_ref)
# Install the test bundle (don't start it)
self.bundle = context.install_bundle(
'tests.services.configadmin_bundle')
self.pid = self.bundle.get_module().CONFIG_PID
def start_fileinstall(self):
"""
Starts the file install bundle and tweaks its service
"""
# Start the bundle
self.bnd_fileinstall.start()
# Speed up the poll time
context = self.framework.get_bundle_context()
fileinstall_ref = context.get_service_reference(
services.SERVICE_FILEINSTALL)
with use_service(context, fileinstall_ref) as svc:
svc._poll_time = .1
time.sleep(.5)
def tearDown(self):
"""
Cleans up for next test
"""
# Release the service
self.framework.get_bundle_context().unget_service(self.config_ref)
pelix.framework.FrameworkFactory.delete_framework()
self.config = None
self.config_ref = None
@classmethod
def tearDownClass(cls):
"""
Cleans up after all tests have been executed
"""
shutil.rmtree(conf_folder)
def get_ref(self):
"""
Retrieves the reference to the managed service provided by the test
bundle
"""
return self.bundle.get_registered_services()[0]
def check_call_count(self, test_svc, expected_count):
"""
Checks if the given test service has been called X times
"""
self.assertEqual(test_svc.call_count, expected_count)
test_svc.call_count = 0
def touch(self, filepath):
"""
Updates the modification time of the given file
"""
with open(filepath, "r"):
os.utime(filepath, None)
def write(self, filepath, value):
"""
Writes the property dictionary in JSON
"""
props = {'config.value': value}
with open(filepath, "w") as filep:
filep.write(json.dumps(props))
try:
# Change modification time to bypass weak time resolution of
# the underlying file system
module_stat = os.stat(filepath)
os.utime(filepath, (module_stat.st_atime,
module_stat.st_mtime + 1))
except OSError:
# Can't touch the file, hope that the OS will see the write update
pass
def testAddUpdateDelete(self):
"""
Tests a whole file life cycle
"""
# Start file install
self.start_fileinstall()
context = self.framework.get_bundle_context()
# Start the test bundle
self.bundle.start()
ref = self.get_ref()
# Wait a little
time.sleep(.4)
with use_service(context, ref) as svc:
self.check_call_count(svc, 0)
self.assertIsNone(svc.value, "Value has been set")
# Get the watched folder
persistence_ref = context.get_service_reference(
services.SERVICE_CONFIGADMIN_PERSISTENCE)
folder = persistence_ref.get_property(services.PROP_FILEINSTALL_FOLDER)
# JSON persistence file name
filepath = os.path.join(folder, self.pid + '.config.js')
# Create the empty configuration
value = 'Ni !'
self.write(filepath, value)
# Wait a little
time.sleep(.4)
# Check if the service has been updated
with use_service(context, ref) as svc:
self.assertEqual(svc.value, value, "Incorrect initial value")
self.check_call_count(svc, 1)
# Update the properties
value = 'Ecky-ecky-ecky-ecky-pikang-zoom-boing'
self.write(filepath, value)
# Wait a little
time.sleep(.4)
# Check if the service has been updated
with use_service(context, ref) as svc:
self.assertEqual(svc.value, value, "Value not updated")
self.check_call_count(svc, 1)
# Reset the flags
svc.reset()
# Touch the file
self.touch(filepath)
# Wait a little
time.sleep(.4)
# Check if the service has been updated
with use_service(context, ref) as svc:
self.check_call_count(svc, 0)
self.assertIsNone(svc.value, "File updated after simple touch")
self.assertFalse(svc.deleted, "Configuration considered deleted")
# Delete the file
os.remove(filepath)
# Wait a little
time.sleep(.4)
with use_service(context, ref) as svc:
self.check_call_count(svc, 1)
self.assertTrue(svc.deleted, "Configuration not deleted")
# ------------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "e356506e28d8b7cdeb9b7e35292f4c25",
"timestamp": "",
"source": "github",
"line_count": 653,
"max_line_length": 80,
"avg_line_length": 32.31087289433384,
"alnum_prop": 0.5731077302241813,
"repo_name": "tcalmant/ipopo",
"id": "19c73f0c15f50d0d880f81143a620534b09401cd",
"size": "21153",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1",
"path": "tests/services/test_configadmin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2183067"
}
],
"symlink_target": ""
}
|
from .. import app, db, api
from flask.ext import security
import pytz
# Define models
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))
)
class Role(db.Model, security.RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __str__(self):
return self.name
class User(db.Model, security.UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
name = db.Column(db.String(255))
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
confirmed_at = db.Column(db.DateTime())
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(32))
current_login_ip = db.Column(db.String(32))
login_count = db.Column(db.Integer())
timezone = db.Column(db.String(64))
def get_tz(self):
if self.timezone:
return pytz.timezone(self.timezone)
else:
return app.config.get("DEFAULT_TIMEZONE", pytz.utc)
def dict(self):
"""
A dictionary representation that can be used for JSON serialization
"""
return {
"email": self.email,
"active": self.active,
"confirmed_at": api.serialize_date(self.confirmed_at),
"timezone": self.timezone
}
def __str__(self):
return self.name or self.email
# Setup Flask-Security
user_datastore = security.SQLAlchemyUserDatastore(db, User, Role)
app.security = security.Security(app, user_datastore)
|
{
"content_hash": "fc80a6632d726d4bfa2a8cf18a2d87fe",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 31.918032786885245,
"alnum_prop": 0.6286594761171033,
"repo_name": "andrewsnowden/flask-starter",
"id": "573a86c2aac933d72da24b01dd96f5e4797e5b44",
"size": "1947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starter/users/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "189014"
},
{
"name": "HTML",
"bytes": "150266"
},
{
"name": "JavaScript",
"bytes": "52293"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "23870"
}
],
"symlink_target": ""
}
|
from . import xmlwriter
class Format(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Format file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, properties={}, xf_indices=None, dxf_indices=None):
"""
Constructor.
"""
super(Format, self).__init__()
self.xf_format_indices = xf_indices
self.dxf_format_indices = dxf_indices
self.xf_index = None
self.dxf_index = None
self.num_format = 0
self.num_format_index = 0
self.font_index = 0
self.has_font = 0
self.has_dxf_font = 0
self.bold = 0
self.underline = 0
self.italic = 0
self.font_name = 'Calibri'
self.font_size = 11
self.font_color = 0x0
self.font_strikeout = 0
self.font_outline = 0
self.font_shadow = 0
self.font_script = 0
self.font_family = 2
self.font_charset = 0
self.font_scheme = 'minor'
self.font_condense = 0
self.font_extend = 0
self.theme = 0
self.hyperlink = 0
self.hidden = 0
self.locked = 1
self.text_h_align = 0
self.text_wrap = 0
self.text_v_align = 0
self.text_justlast = 0
self.rotation = 0
self.fg_color = 0
self.bg_color = 0
self.pattern = 0
self.has_fill = 0
self.has_dxf_fill = 0
self.fill_index = 0
self.fill_count = 0
self.border_index = 0
self.has_border = 0
self.has_dxf_border = 0
self.border_count = 0
self.bottom = 0
self.bottom_color = 0
self.diag_border = 0
self.diag_color = 0
self.diag_type = 0
self.left = 0
self.left_color = 0
self.right = 0
self.right_color = 0
self.top = 0
self.top_color = 0
self.indent = 0
self.shrink = 0
self.merge_range = 0
self.reading_order = 0
self.just_distrib = 0
self.color_indexed = 0
self.font_only = 0
# Convert properties in the constructor to method calls.
for key, value in properties.items():
getattr(self, 'set_' + key)(value)
###########################################################################
#
# Format properties.
#
###########################################################################
def set_font_name(self, font_name):
"""
Set the Format font_name property such as 'Time New Roman'. The
default Excel font is 'Calibri'.
Args:
font_name: String with the font name. No default.
Returns:
Nothing.
"""
self.font_name = font_name
def set_font_size(self, font_size=11):
"""
Set the Format font_size property. The default Excel font size is 11.
Args:
font_size: Int with font size. No default.
Returns:
Nothing.
"""
self.font_size = font_size
def set_font_color(self, font_color):
"""
Set the Format font_color property. The Excel default is black.
Args:
font_color: String with the font color. No default.
Returns:
Nothing.
"""
self.font_color = self._get_color(font_color)
def set_bold(self, bold=1):
"""
Set the Format bold property.
Args:
bold: Default is 1, turns property on.
Returns:
Nothing.
"""
self.bold = bold
def set_italic(self, italic=1):
"""
Set the Format italic property.
Args:
italic: Default is 1, turns property on.
Returns:
Nothing.
"""
self.italic = italic
def set_underline(self, underline=1):
"""
Set the Format underline property.
Args:
underline: Default is 1, single underline.
Returns:
Nothing.
"""
self.underline = underline
def set_font_strikeout(self, font_strikeout=1):
"""
Set the Format font_strikeout property.
Args:
font_strikeout: Default is 1, turns property on.
Returns:
Nothing.
"""
self.font_strikeout = font_strikeout
def set_font_script(self, font_script=1):
"""
Set the Format font_script property.
Args:
font_script: Default is 1, superscript.
Returns:
Nothing.
"""
self.font_script = font_script
def set_font_outline(self, font_outline=1):
"""
Set the Format font_outline property.
Args:
font_outline: Default is 1, turns property on.
Returns:
Nothing.
"""
self.font_outline = font_outline
def set_font_shadow(self, font_shadow=1):
"""
Set the Format font_shadow property.
Args:
font_shadow: Default is 1, turns property on.
Returns:
Nothing.
"""
self.font_shadow = font_shadow
def set_num_format(self, num_format):
"""
Set the Format num_format property such as '#,##0'.
Args:
num_format: String representing the number format. No default.
Returns:
Nothing.
"""
self.num_format = num_format
def set_locked(self, locked=1):
"""
Set the Format locked property.
Args:
locked: Default is 1, turns property on.
Returns:
Nothing.
"""
self.locked = locked
def set_hidden(self, hidden=1):
"""
Set the Format hidden property.
Args:
hidden: Default is 1, turns property on.
Returns:
Nothing.
"""
self.hidden = hidden
def set_align(self, alignment):
"""
Set the Format cell alignment.
Args:
alignment: String representing alignment. No default.
Returns:
Nothing.
"""
alignment = alignment.lower()
# Set horizontal alignment properties.
if alignment == 'left':
self.set_text_h_align(1)
if alignment == 'centre':
self.set_text_h_align(2)
if alignment == 'center':
self.set_text_h_align(2)
if alignment == 'right':
self.set_text_h_align(3)
if alignment == 'fill':
self.set_text_h_align(4)
if alignment == 'justify':
self.set_text_h_align(5)
if alignment == 'center_across':
self.set_text_h_align(6)
if alignment == 'centre_across':
self.set_text_h_align(6)
if alignment == 'distributed':
self.set_text_h_align(7)
if alignment == 'justify_distributed':
self.set_text_h_align(7)
if alignment == 'justify_distributed':
self.just_distrib = 1
# Set vertical alignment properties.
if alignment == 'top':
self.set_text_v_align(1)
if alignment == 'vcentre':
self.set_text_v_align(2)
if alignment == 'vcenter':
self.set_text_v_align(2)
if alignment == 'bottom':
self.set_text_v_align(3)
if alignment == 'vjustify':
self.set_text_v_align(4)
if alignment == 'vdistributed':
self.set_text_v_align(5)
def set_center_across(self):
"""
Set the Format center_across property.
Returns:
Nothing.
"""
self.set_text_h_align(6)
def set_text_wrap(self, text_wrap=1):
"""
Set the Format text_wrap property.
Args:
text_wrap: Default is 1, turns property on.
Returns:
Nothing.
"""
self.text_wrap = text_wrap
def set_rotation(self, rotation):
"""
Set the Format rotation property.
Args:
rotation: Rotation angle. No default.
Returns:
Nothing.
"""
rotation = int(rotation)
# Map user angle to Excel angle.
if rotation == 270:
rotation = 255
elif -90 <= rotation <= 90:
if rotation < 0:
rotation = -rotation + 90
else:
raise Exception(
"Rotation rotation outside range: -90 <= angle <= 90")
self.rotation = rotation
def set_indent(self, indent=1):
"""
Set the Format indent property.
Args:
indent: Default is 1, turns property on.
Returns:
Nothing.
"""
self.indent = indent
def set_shrink(self, shrink=1):
"""
Set the Format shrink property.
Args:
shrink: Default is 1, turns property on.
Returns:
Nothing.
"""
self.shrink = shrink
def set_text_justlast(self, text_justlast=1):
"""
Set the Format text_justlast property.
Args:
text_justlast: Default is 1, turns property on.
Returns:
Nothing.
"""
self.text_justlast = text_justlast
def set_pattern(self, pattern=1):
"""
Set the Format pattern property.
Args:
pattern: Default is 1, solid fill.
Returns:
Nothing.
"""
self.pattern = pattern
def set_bg_color(self, bg_color):
"""
Set the Format bg_color property.
Args:
bg_color: Background color. No default.
Returns:
Nothing.
"""
self.bg_color = self._get_color(bg_color)
def set_fg_color(self, fg_color):
"""
Set the Format fg_color property.
Args:
fg_color: Foreground color. No default.
Returns:
Nothing.
"""
self.fg_color = self._get_color(fg_color)
# set_border(style) Set cells borders to the same style
def set_border(self, style=1):
"""
Set the Format bottom property.
Args:
bottom: Default is 1, border type 1.
Returns:
Nothing.
"""
self.set_bottom(style)
self.set_top(style)
self.set_left(style)
self.set_right(style)
# set_border_color(color) Set cells border to the same color
def set_border_color(self, color):
"""
Set the Format bottom property.
Args:
color: Color string. No default.
Returns:
Nothing.
"""
self.set_bottom_color(color)
self.set_top_color(color)
self.set_left_color(color)
self.set_right_color(color)
def set_bottom(self, bottom=1):
"""
Set the Format bottom property.
Args:
bottom: Default is 1, border type 1.
Returns:
Nothing.
"""
self.bottom = bottom
def set_bottom_color(self, bottom_color):
"""
Set the Format bottom_color property.
Args:
bottom_color: Color string. No default.
Returns:
Nothing.
"""
self.bottom_color = self._get_color(bottom_color)
def set_diag_type(self, diag_type=1):
"""
Set the Format diag_type property.
Args:
diag_type: Default is 1, border type 1.
Returns:
Nothing.
"""
self.diag_type = diag_type
def set_left(self, left=1):
"""
Set the Format left property.
Args:
left: Default is 1, border type 1.
Returns:
Nothing.
"""
self.left = left
def set_left_color(self, left_color):
"""
Set the Format left_color property.
Args:
left_color: Color string. No default.
Returns:
Nothing.
"""
self.left_color = self._get_color(left_color)
def set_right(self, right=1):
"""
Set the Format right property.
Args:
right: Default is 1, border type 1.
Returns:
Nothing.
"""
self.right = right
def set_right_color(self, right_color):
"""
Set the Format right_color property.
Args:
right_color: Color string. No default.
Returns:
Nothing.
"""
self.right_color = self._get_color(right_color)
def set_top(self, top=1):
"""
Set the Format top property.
Args:
top: Default is 1, border type 1.
Returns:
Nothing.
"""
self.top = top
def set_top_color(self, top_color):
"""
Set the Format top_color property.
Args:
top_color: Color string. No default.
Returns:
Nothing.
"""
self.top_color = self._get_color(top_color)
def set_diag_color(self, diag_color):
"""
Set the Format diag_color property.
Args:
diag_color: Color string. No default.
Returns:
Nothing.
"""
self.diag_color = self._get_color(diag_color)
def set_diag_border(self, diag_border=1):
"""
Set the Format diag_border property.
Args:
diag_border: Default is 1, border type 1.
Returns:
Nothing.
"""
self.diag_border = diag_border
###########################################################################
#
# Internal Format properties. These aren't documented since they are
# either only used internally or else are unlikely to be set by the user.
#
###########################################################################
def set_has_font(self, has_font=1):
# Set the has_font property.
self.has_font = has_font
def set_has_fill(self, has_fill=1):
# Set the has_fill property.
self.has_fill = has_fill
def set_font_index(self, font_index):
# Set the font_index property.
self.font_index = font_index
def set_xf_index(self, xf_index):
# Set the xf_index property.
self.xf_index = xf_index
def set_dxf_index(self, dxf_index):
# Set the xf_index property.
self.dxf_index = dxf_index
def set_num_format_index(self, num_format_index):
# Set the num_format_index property.
self.num_format_index = num_format_index
def set_text_h_align(self, text_h_align):
# Set the text_h_align property.
self.text_h_align = text_h_align
def set_text_v_align(self, text_v_align):
# Set the text_v_align property.
self.text_v_align = text_v_align
def set_reading_order(self, reading_order=1):
# Set the reading_order property.
self.reading_order = reading_order
def set_valign(self, align):
# Set vertical cell alignment. This is required by the constructor
# properties dict to differentiate between the vertical and horizontal
# properties.
self.set_align(align)
def set_font_family(self, font_family):
# Set the Format font_family property.
self.font_family = font_family
def set_font_charset(self, font_charset):
# Set the Format font_charset property.
self.font_charset = font_charset
def set_font_scheme(self, font_scheme):
# Set the Format font_scheme property.
self.font_scheme = font_scheme
def set_font_condense(self, font_condense):
# Set the Format font_condense property.
self.font_condense = font_condense
def set_font_extend(self, font_extend):
# Set the Format font_extend property.
self.font_extend = font_extend
def set_theme(self, theme):
# Set the Format theme property.
self.theme = theme
def set_hyperlink(self, hyperlink=1):
# Set the properties for the hyperlink style. This doesn't
# currently work. To be fixed when styles are supported.
self.set_underline(1)
self.set_theme(10)
self.set_align('top')
self.hyperlink = hyperlink
def set_color_indexed(self, color_index):
# Used in the cell comment format.
self.color_indexed = color_index
def set_font_only(self, font_only=True):
# Used in the cell comment format.
self.font_only = font_only
# Compatibility methods.
def set_font(self, font_name):
# For compatibility with Excel::Writer::XLSX.
self.font_name = font_name
def set_size(self, font_size):
# For compatibility with Excel::Writer::XLSX.
self.font_size = font_size
def set_color(self, font_color):
# For compatibility with Excel::Writer::XLSX.
self.font_color = self._get_color(font_color)
###########################################################################
#
# Private API.
#
###########################################################################
def _get_align_properties(self):
# Return properties for an Style xf <alignment> sub-element.
changed = 0
align = []
# Check if any alignment options in the format have been changed.
if (self.text_h_align or self.text_v_align or self.indent
or self.rotation or self.text_wrap or self.shrink
or self.reading_order):
changed = 1
else:
return changed, align
# Indent is only allowed for horizontal left, right and distributed.
# If it is defined for any other alignment or no alignment has
# been set then default to left alignment.
if (self.indent
and self.text_h_align != 1
and self.text_h_align != 3
and self.text_h_align != 7):
self.text_h_align = 1
# Check for properties that are mutually exclusive.
if self.text_wrap:
self.shrink = 0
if self.text_h_align == 4:
self.shrink = 0
if self.text_h_align == 5:
self.shrink = 0
if self.text_h_align == 7:
self.shrink = 0
if self.text_h_align != 7:
self.just_distrib = 0
if self.indent:
self.just_distrib = 0
continuous = 'centerContinuous'
if self.text_h_align == 1:
align.append(('horizontal', 'left'))
if self.text_h_align == 2:
align.append(('horizontal', 'center'))
if self.text_h_align == 3:
align.append(('horizontal', 'right'))
if self.text_h_align == 4:
align.append(('horizontal', 'fill'))
if self.text_h_align == 5:
align.append(('horizontal', 'justify'))
if self.text_h_align == 6:
align.append(('horizontal', continuous))
if self.text_h_align == 7:
align.append(('horizontal', 'distributed'))
if self.just_distrib:
align.append(('justifyLastLine', 1))
# Property 'vertical' => 'bottom' is a default. It sets applyAlignment
# without an alignment sub-element.
if self.text_v_align == 1:
align.append(('vertical', 'top'))
if self.text_v_align == 2:
align.append(('vertical', 'center'))
if self.text_v_align == 4:
align.append(('vertical', 'justify'))
if self.text_v_align == 5:
align.append(('vertical', 'distributed'))
if self.indent:
align.append(('indent', self.indent))
if self.rotation:
align.append(('textRotation', self.rotation))
if self.text_wrap:
align.append(('wrapText', 1))
if self.shrink:
align.append(('shrinkToFit', 1))
if self.reading_order == 1:
align.append(('readingOrder', 1))
if self.reading_order == 2:
align.append(('readingOrder', 2))
return changed, align
def _get_protection_properties(self):
# Return properties for an Excel XML <Protection> element.
attribs = []
if not self.locked:
attribs.append(('locked', 0))
if self.hidden:
attribs.append(('hidden', 1))
return attribs
def _get_format_key(self):
# Returns a unique hash key for a font. Used by Workbook.
key = ':'.join(self._to_string(x) for x in (
self._get_font_key(),
self._get_border_key(),
self._get_fill_key(),
self._get_alignment_key(),
self.num_format,
self.locked,
self.hidden))
return key
def _get_font_key(self):
# Returns a unique hash key for a font. Used by Workbook.
key = ':'.join(self._to_string(x) for x in (
self.bold,
self.font_color,
self.font_charset,
self.font_family,
self.font_outline,
self.font_script,
self.font_shadow,
self.font_strikeout,
self.font_name,
self.italic,
self.font_size,
self.underline))
return key
def _get_border_key(self):
# Returns a unique hash key for a border style. Used by Workbook.
key = ':'.join(self._to_string(x) for x in (
self.bottom,
self.bottom_color,
self.diag_border,
self.diag_color,
self.diag_type,
self.left,
self.left_color,
self.right,
self.right_color,
self.top,
self.top_color))
return key
def _get_fill_key(self):
# Returns a unique hash key for a fill style. Used by Workbook.
key = ':'.join(self._to_string(x) for x in (
self.pattern,
self.bg_color,
self.fg_color))
return key
def _get_alignment_key(self):
# Returns a unique hash key for alignment formats.
key = ':'.join(self._to_string(x) for x in (
self.text_h_align,
self.text_v_align,
self.indent,
self.rotation,
self.text_wrap,
self.shrink,
self.reading_order))
return key
def _get_xf_index(self):
# Returns the XF index number used by Excel to identify a format.
if self.xf_index is not None:
# Format already has an index number so return it.
return self.xf_index
else:
# Format doesn't have an index number so assign one.
key = self._get_format_key()
if key in self.xf_format_indices:
# Format matches existing format with an index.
return self.xf_format_indices[key]
else:
# New format requiring an index. Note. +1 since Excel
# has an implicit "General" format at index 0.
index = 1 + len(self.xf_format_indices)
self.xf_format_indices[key] = index
self.xf_index = index
return index
def _get_dxf_index(self):
# Returns the DXF index number used by Excel to identify a format.
if self.dxf_index is not None:
# Format already has an index number so return it.
return self.dxf_index
else:
# Format doesn't have an index number so assign one.
key = self._get_format_key()
if key in self.dxf_format_indices:
# Format matches existing format with an index.
return self.dxf_format_indices[key]
else:
# New format requiring an index.
index = len(self.dxf_format_indices)
self.dxf_format_indices[key] = index
self.dxf_index = index
return index
def _get_color(self, color):
# Used in conjunction with the set_xxx_color methods to convert a
# color name into an RGB formatted string. These colors are for
# backward compatibility with older versions of Excel.
named_colors = {
'black': '#000000',
'blue': '#0000FF',
'brown': '#800000',
'cyan': '#00FFFF',
'gray': '#808080',
'green': '#008000',
'lime': '#00FF00',
'magenta': '#FF00FF',
'navy': '#000080',
'orange': '#FF6600',
'pink': '#FF00FF',
'purple': '#800080',
'red': '#FF0000',
'silver': '#C0C0C0',
'white': '#FFFFFF',
'yellow': '#FFFF00',
}
if color in named_colors:
color = named_colors[color]
return color
def _to_string(self, value):
# Convert number to a string but allow for utf-8 strings in Python 2.
try:
return str(value)
except UnicodeEncodeError:
return value.encode('utf-8')
###########################################################################
#
# XML methods.
#
###########################################################################
|
{
"content_hash": "673ac30bf93626a1d3e70fbdab6f7620",
"timestamp": "",
"source": "github",
"line_count": 983,
"max_line_length": 79,
"avg_line_length": 26.106815869786367,
"alnum_prop": 0.511163932509839,
"repo_name": "jkyeung/XlsxWriter",
"id": "4b55d9a0ac526670d02c98be3b6fb0e6ad69be49",
"size": "25888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/format.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
}
|
from .ect import ECT # noqa
|
{
"content_hash": "0afe9c8cfb5103e1846c2e133ca97544",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.6896551724137931,
"repo_name": "rpedigoni/trackr",
"id": "fdb697260a4b04f73755ea056312f0fa19c7c8dd",
"size": "29",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trackr/carriers/ect/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2234"
},
{
"name": "Python",
"bytes": "11332"
}
],
"symlink_target": ""
}
|
"""PKDict abstraction and utils
`PKDict` is similar to :class:`argparse.Namespace`, but is a dict that allows
you to treat elements as attributes.
:copyright: Copyright (c) 2015-2022 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
# Limit pykern imports so avoid dependency issues for pkconfig
import copy
import collections.abc
import decimal
import json
import types
import pykern.pkcompat
class PKDict(dict):
"""A subclass of dict that allows items to be read/written as attributes.
The purpose of this is as a convenience in coding. You
can refer to dictionary keys as attributes as long as they
don't collide with the object's attributes. You should always
use the `dict` interface to refer to the items of the dictionary
programmatically, just like you would do with Javascript.
You can reference any dict key as an attribute as long as it
does not conflict with an attribute. For example, this works::
x = PKDict()
x.a = 1
assert 1 == x.a
x['a'] = 3
assert 3 == x.a
assert 'a' == x.keys()[0]
You can't set an attribute that already exists. These calls throw
exceptions::
x.values = 1
delattr(x, 'values')
`dict` doesn't allow this anyway. However, you can't set or
delete any existing attribute, even writable attributes. Indeed,
you can't delete attributes at all. Subclasses should be "containers"
only, not general objects.
"""
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
rv = self.copy()
memo[id(rv)] = rv
for k, v in rv.items():
rv[copy.deepcopy(k, memo)] = copy.deepcopy(v, memo)
return rv
def __delattr__(self, name):
raise PKDictNameError("{}: you cannot delete attributes", name)
def __getattr__(self, name):
if name in self:
return self[name]
# must match what CPython does exactly:
# https://github.com/python/cpython/blob/d583738a87c3019dcfe06ed4a0002d1d6c9e9762/Objects/object.c#L899
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def __setattr__(self, name, value):
if name in dir(self):
raise PKDictNameError(
"{}: invalid key for PKDict matches existing attribute".format(name)
)
super(PKDict, self).__setitem__(name, value)
def copy(self):
return self.__copy__()
def pkdel(self, name, default=None):
"""Delete item if exists and return value
The code will survive against concurrent access, but is not thread safe.
Never throws KeyError.
Args:
name (object): item to delete
Returns:
object: value (if exists) or default
"""
try:
return self[name]
except KeyError:
return default
finally:
try:
del self[name]
except KeyError:
pass
def pkmerge(self, to_merge, make_copy=True):
"""Add `to_merge` to `self`
Types are assumed to match and are not converted, e.g. dict is
not converted to PKDict. Again, use `canonicalize` if you want
to avoid type incompatibilities.
`to_merge`'s values override `self`'s so if say, `to_merge` is ``{'x': None}``,
then ``self.x`` will be `None` at the end of this call even if it had a value
before.
Lists from to_merge are prepended on this same principle, that
is, to_merge "overrides" self, and prepending is defined as
overriding. Lists must contain unique elements and duplicates will
cause an error.
This function recurses only on PKDicts.
Args:
to_merge (dict): elements will be copied into `self`
make_copy (bool): deepcopy `to_merge` before merging [True]
Returns:
PKDict: self
"""
def _type_err(key, base, merge):
return AssertionError(
f"key={key} type mismatch between (self) base={base} and to_merge={merge}"
)
if make_copy:
to_merge = copy.deepcopy(to_merge)
for k in list(to_merge.keys()):
t = to_merge[k]
s = self.get(k)
if isinstance(s, dict) and isinstance(t, dict):
s.pkmerge(t, make_copy=False)
elif isinstance(s, list) and isinstance(t, list):
# prepend the to_merge values (see docstring above)
# NOTE: creates a new list
self[k] = t + s
# strings, numbers, etc. are hashable, but dicts and lists are not.
# this test ensures we don't have dup entries in lists.
y = [x for x in self[k] if isinstance(x, collections.abc.Hashable)]
assert len(set(y)) == len(
y
), f"duplicates in key={k} list values={self[k]}"
elif type(s) == type(t) or s is None or t is None:
# Just replace, because t overrides type in case of None.
# And if s is None, it doesn't matter.
self[k] = t
else:
raise _type_err(k, s, t)
return self
def pknested_get(self, qualifiers):
"""Split key on dots or iterable and return nested get calls
If `qualifiers` is a str, will split on dots. Otherwise, will be iterated.
If an element is a list or tuple, tries to index as int.
Throws KeyError if the dictionary key doesn't exist.
Args:
qualifiers (str or iterable):
Returns:
object: value of element
"""
d = self
for k in qualifiers.split(".") if isinstance(qualifiers, str) else qualifiers:
try:
d = d[k]
except TypeError:
try:
d = d[int(k)]
continue
except (ValueError, TypeError):
pass
raise
return d
def pksetdefault(self, *args, **kwargs):
"""Get value or set it, possibly after evaluating arg.
Must pass an even number of args or kwargs, but not both. Each pair
is interpreted as (key, value).
If self does not have `key`, then it will be set. If `value` is a callable,
it will be called to get the value to set.
Values will be called if they are callable
Args:
key (object): value to get or set
value (object): if callable, will be called, else verbatim
Returns:
object: self
"""
assert not (args and kwargs), "one of args or kwargs must be set, but not both"
if args:
assert (
len(args) % 2 == 0
), "args must be an even number (pairs of key, value)"
i = zip(args[0::2], args[1::2])
else:
i = kwargs.items()
for k, v in i:
if k not in self:
self[k] = v() if callable(v) else v
return self
def pkunchecked_nested_get(self, qualifiers, default=None):
"""Split key on dots or iterable and return nested get calls
If `qualifiers` is a str, will split on dots. Otherwise, will be iterated.
If the element does not exist or is not indexable, fails silently and returns `default`.
Throws KeyError if the dictionary key doesn't exist.
Args:
qualifiers (str or iterable):
Returns:
object: value of element
"""
try:
return self.pknested_get(qualifiers)
except (KeyError, IndexError, TypeError, ValueError):
return default
def pkupdate(self, *args, **kwargs):
"""Call `dict.update` and return ``self``."""
super(PKDict, self).update(*args, **kwargs)
return self
class PKDictNameError(NameError):
"""Raised when a key matches a builtin attribute in `dict`."""
pass
def canonicalize(obj):
"""Convert to lists and PKDicts for simpler serialization
Traverse `obj` to convert all values to forms that are compatible
with serialization protocols like YAML or JSON.
Simple objects are ensured to match their types e.g. bool, float,
int, and str. Objects that are instances of these, are converted
to these to ensure they are basic types, that is,
``canonicalize(str_subclass('a'))`` will be conveted to ``str('a')``.
bytes and bytearrays will be converted to str.
decimal.Decimal will converted to float.
All objects are traversed. If no objects need to be converted,
`obj` will be returned unmodified.
Generators and iterables are converted to lists.
Circularities are not detected so infinite recursion can occur.
Args:
obj (object): what to convert
Returns:
object: converted object (may or may not be the same)
"""
o = obj
if o is None:
return o
# Order matters so we don't convert bools to ints, since bools are ints.
for x in (
(bool,),
(int,),
(float,),
(str,),
(decimal.Decimal, float),
((bytes, bytearray), pykern.pkcompat.from_bytes),
(
dict,
lambda y: PKDict({canonicalize(k): canonicalize(v) for k, v in y.items()}),
),
(types.GeneratorType, lambda y: list(canonicalize(i) for i in y)),
(collections.abc.Iterable, lambda y: list(canonicalize(i) for i in iter(y))),
):
if isinstance(o, x[0]):
return x[-1](o)
raise ValueError(f"unable to canonicalize type={type(o)} value={repr(o):100}")
# Deprecated names
Dict = PKDict
DictNameError = PKDictNameError
def json_load_any(obj, *args, **kwargs):
"""Parse json with `PKDict` for object pairs
Args:
obj (object): str or object with "read" or py.path
args (tuple): passed verbatim
kwargs (dict): object_pairs_hook may be overriden
Returns:
object: parsed JSON
"""
def object_pairs_hook(*args, **kwargs):
"""Tries to use `PKDict` if else uses `dict`
Returns:
object: `PKDict` or `dict`
"""
try:
return PKDict(*args, **kwargs)
except PKDictNameError:
return dict(*args, **kwargs)
kwargs.setdefault("object_pairs_hook", object_pairs_hook)
return json.loads(obj.read() if hasattr(obj, "read") else obj, *args, **kwargs)
def unchecked_del(obj, *keys):
"""Deletes the keys from obj
Args:
obj (object): What to delete from (usually dict)
keys (object): What to delete
"""
for k in keys:
try:
del obj[k]
except KeyError:
pass
|
{
"content_hash": "f4e26d9fd7c7ab669ed3be8f89dd85db",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 111,
"avg_line_length": 31.86918604651163,
"alnum_prop": 0.583781811547934,
"repo_name": "radiasoft/pykern",
"id": "e195a6378d2af972ca8da03aaac7e5bba3a1fe3c",
"size": "10987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pykern/pkcollections.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "101531"
},
{
"name": "Python",
"bytes": "368016"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
}
|
"""The command group for cloud container clusters."""
from googlecloudsdk.calliope import base
class Clusters(base.Group):
"""Deploy and teardown Google Container Engine clusters."""
@staticmethod
def Args(parser):
"""Add arguments to the parser.
Args:
parser: argparse.ArgumentParser, This is a standard argparser parser with
which you can register arguments. See the public argparse documentation
for its capabilities.
"""
pass
def Filter(self, context, args):
"""Modify the context that will be given to this group's commands when run.
Args:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
Returns:
The refined command context.
"""
return context
|
{
"content_hash": "44ab5adf1e8a02ce7f71b1a790ad8f60",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 80,
"avg_line_length": 28.53125,
"alnum_prop": 0.687842278203724,
"repo_name": "wemanuel/smry",
"id": "f797a0e18e7e13595ed1179ddab7ad6992d7a2c6",
"size": "964",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/container/commands/clusters/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
}
|
from .expression import Expression
from .nameexpr import NameExpr
class AnnotationExpr(Expression):
def __init__(self, kwargs={}):
super(AnnotationExpr, self).__init__(kwargs)
# NameExpr name;
name = kwargs.get(u'name', {})
self._name = NameExpr(name)
def __str__(self):
return str(self._name)
|
{
"content_hash": "0a1da9edcf9a378acf39e8b634ab1cb6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 26.53846153846154,
"alnum_prop": 0.6144927536231884,
"repo_name": "plum-umd/java-sketch",
"id": "9deaa6a5599a3da456bda19efaab018a1d61fa31",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jskparser/ast/expr/annotationexpr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "44034"
},
{
"name": "Java",
"bytes": "5042035"
},
{
"name": "Makefile",
"bytes": "215"
},
{
"name": "Perl",
"bytes": "495"
},
{
"name": "Python",
"bytes": "600201"
},
{
"name": "Shell",
"bytes": "46731"
}
],
"symlink_target": ""
}
|
from flask_wtf import Form
from wtforms import PasswordField, StringField, HiddenField, TextAreaField, IntegerField
from wtforms.validators import DataRequired, NumberRange, url
from flask_wtf.html5 import URLField
class PasswordForm(Form):
next = HiddenField("next", validators=[DataRequired()])
password = PasswordField("password", validators=[DataRequired()])
class BlogPost(Form):
id = HiddenField("id")
topic = StringField("topic", validators=[DataRequired()])
content = TextAreaField("content", validators=[DataRequired()])
tags = StringField("tags", validators=[DataRequired()])
class ProgressForm(Form):
id = HiddenField("id")
progress = IntegerField("name", validators=[NumberRange(min=0, max=100)])
class BookInfoForm(Form):
id = HiddenField("id")
author = StringField("author", validators=[DataRequired()])
name = StringField("name", validators=[DataRequired()])
img = URLField(validators=[url()])
url = URLField(validators=[url()])
description = TextAreaField(validators=[DataRequired()])
class CommentForm(Form):
comment = TextAreaField(validators=[DataRequired()])
|
{
"content_hash": "f09d014fc2967a3ae496cf077b68c222",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 88,
"avg_line_length": 32.857142857142854,
"alnum_prop": 0.7217391304347827,
"repo_name": "Ignotus/bookclub",
"id": "d358d6e7fae6690e2aeeb8f2e0b727c30a50ea17",
"size": "1150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4870"
},
{
"name": "HTML",
"bytes": "22631"
},
{
"name": "JavaScript",
"bytes": "53090"
},
{
"name": "Python",
"bytes": "21252"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Event', fields ['end_at']
db.create_index('maintenancewindow_event', ['end_at'])
# Adding index on 'Event', fields ['is_blocking_users']
db.create_index('maintenancewindow_event', ['is_blocking_users'])
def backwards(self, orm):
# Removing index on 'Event', fields ['is_blocking_users']
db.delete_index('maintenancewindow_event', ['is_blocking_users'])
# Removing index on 'Event', fields ['end_at']
db.delete_index('maintenancewindow_event', ['end_at'])
models = {
'maintenancewindow.event': {
'Meta': {'ordering': "['begin_at', 'title']", 'object_name': 'Event'},
'begin_at': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'event_report': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_blocking_users': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['maintenancewindow']
|
{
"content_hash": "ec35791ff301f491630338fd9290a74d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 120,
"avg_line_length": 40.85,
"alnum_prop": 0.5844553243574051,
"repo_name": "jamilatta/scielo-manager",
"id": "fc5c7b8c13f9c1d7a985b1a2e3c928286fe9237c",
"size": "1652",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scielomanager/maintenancewindow/migrations/0003_auto.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "82927"
},
{
"name": "JavaScript",
"bytes": "307176"
},
{
"name": "Python",
"bytes": "2505149"
},
{
"name": "Shell",
"bytes": "5834"
}
],
"symlink_target": ""
}
|
"""The mill component."""
from __future__ import annotations
from datetime import timedelta
import logging
from mill import Mill
from mill_local import Mill as MillLocal
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_IP_ADDRESS, CONF_PASSWORD, CONF_USERNAME, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CLOUD, CONNECTION_TYPE, DOMAIN, LOCAL
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [Platform.CLIMATE, Platform.SENSOR]
class MillDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Mill data."""
def __init__(
self,
hass: HomeAssistant,
update_interval: timedelta | None = None,
*,
mill_data_connection: Mill | MillLocal,
) -> None:
"""Initialize global Mill data updater."""
self.mill_data_connection = mill_data_connection
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_method=mill_data_connection.fetch_heater_and_sensor_data,
update_interval=update_interval,
)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the Mill heater."""
hass.data.setdefault(DOMAIN, {LOCAL: {}, CLOUD: {}})
if entry.data.get(CONNECTION_TYPE) == LOCAL:
mill_data_connection = MillLocal(
entry.data[CONF_IP_ADDRESS],
websession=async_get_clientsession(hass),
)
update_interval = timedelta(seconds=15)
key = entry.data[CONF_IP_ADDRESS]
conn_type = LOCAL
else:
mill_data_connection = Mill(
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
websession=async_get_clientsession(hass),
)
update_interval = timedelta(seconds=30)
key = entry.data[CONF_USERNAME]
conn_type = CLOUD
if not await mill_data_connection.connect():
raise ConfigEntryNotReady
data_coordinator = MillDataUpdateCoordinator(
hass,
mill_data_connection=mill_data_connection,
update_interval=update_interval,
)
hass.data[DOMAIN][conn_type][key] = data_coordinator
await data_coordinator.async_config_entry_first_refresh()
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
{
"content_hash": "64c44f9b2eb50743e3448ba99dce2f62",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 87,
"avg_line_length": 32.56470588235294,
"alnum_prop": 0.6791907514450867,
"repo_name": "mezz64/home-assistant",
"id": "8fd1d1a3e22dd5f7f9fb6d7b048ea6551eebd716",
"size": "2768",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mill/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import string
import types
from process_common import *
from header_common import *
from header_operations import *
from module_strings import *
from module_skills import *
from module_music import *
from module_meshes import *
from module_sounds import *
from module_items import *
from module_troops import *
from module_factions import *
from module_quests import *
from module_party_templates import *
from module_parties import *
from module_scenes import *
from module_scripts import *
from module_mission_templates import *
from module_game_menus import *
from module_particle_systems import *
from module_scene_props import *
from module_scene_props import *
from module_presentations import *
from module_map_icons import *
from module_tableau_materials import *
from module_animations import *
def get_id_value(tag, identifier, tag_uses):
tag_type = -1
id_no = -1
if (tag == "str"):
id_no = find_object(strings,identifier)
tag_type = tag_string
elif (tag == "itm"):
id_no = find_object(items,identifier)
tag_type = 0
elif (tag == "trp"):
id_no = find_object(troops,identifier)
tag_type = 0
elif (tag == "fac"):
id_no = find_object(factions,identifier)
tag_type = 0
elif (tag == "qst"):
id_no = find_object(quests,identifier)
tag_type = 0
elif (tag == "pt"):
id_no = find_object(party_templates,identifier)
tag_type = 0
elif (tag == "p"):
id_no = find_object(parties,identifier)
tag_type = 0
elif (tag == "scn"):
id_no = find_object(scenes,identifier)
tag_type = 0
elif (tag == "mt"):
id_no = find_object(mission_templates,identifier)
tag_type = 0
elif (tag == "mnu"):
id_no = find_object(game_menus,identifier)
tag_type = 0
elif (tag == "script"):
id_no = find_object(scripts,identifier)
tag_type = 0
elif (tag == "psys"):
id_no = find_object(particle_systems,identifier)
tag_type = 0
elif (tag == "spr"):
id_no = find_object(scene_props,identifier)
tag_type = 0
elif (tag == "prsnt"):
id_no = find_object(presentations,identifier)
tag_type = 0
elif (tag == "snd"):
id_no = find_object(sounds,identifier)
tag_type = 0
elif (tag == "icon"):
id_no = find_object(map_icons,identifier)
tag_type = 0
elif (tag == "skl"):
id_no = find_object(skills,identifier)
tag_type = 0
elif (tag == "track"):
id_no = find_object(tracks,identifier)
tag_type = 0
elif (tag == "mesh"):
id_no = find_object(meshes,identifier)
tag_type = 0
elif (tag == "anim"):
id_no = find_object(animations,identifier)
tag_type = 0
elif (tag == "tableau"):
id_no = find_object(tableaus,identifier)
tag_type = 0
if (tag_type > -1 and id_no > -1):
add_tag_use(tag_uses,tag_type,id_no)
return (tag_type, id_no)
def get_identifier_value(str, tag_uses):
underscore_pos = string.find(str, "_")
result = -1
if (underscore_pos > 0):
tag_str = str[0:underscore_pos]
id_str = str[underscore_pos + 1:len(str)]
(tag_type, id_no) = get_id_value(tag_str,id_str,tag_uses)
if (tag_type >= 0):
if (id_no < 0):
print "Error: Unable to find object:" + str
else:
result = id_no | (tag_type << op_num_value_bits)
else:
print "Error: Unrecognized tag:" +tag_str + "in object:" + str
else:
print "Error: Invalid object:" +str + ".Variables should start with $ sign and references should start with a tag"
return result
def load_quick_strings(export_dir):
quick_strings = []
try:
file = open(export_dir + "quick_strings.txt", "r")
str_list = file.readlines()
file.close()
for s in str_list:
s = string.strip(s)
if s:
ssplit = s.split(' ')
if len(ssplit) == 2:
quick_strings.append(ssplit)
except:
print "Creating new quick_strings.txt file..."
return quick_strings
def save_quick_strings(export_dir, quick_strings):
file = open(export_dir + "quick_strings.txt", "w")
file.write("%d\n"%len(quick_strings))
for i in xrange(len(quick_strings)):
file.write("%s %s\n"%(quick_strings[i][0],replace_spaces(quick_strings[i][1])))
file.close()
def load_variables(export_dir,variable_uses):
variables = []
try:
file = open(export_dir + "variables.txt","r")
var_list = file.readlines()
file.close()
for v in var_list:
vv = string.strip(v)
if vv:
variables.append(vv)
except:
print "variables.txt not found. Creating new variables.txt file"
try:
file = open(export_dir + "variable_uses.txt","r")
var_list = file.readlines()
file.close()
for v in var_list:
vv = string.strip(v)
if vv:
variable_uses.append(int(vv))
except:
print "variable_uses.txt not found. Creating new variable_uses.txt file"
return variables
def save_variables(export_dir,variables_list,variable_uses):
file = open(export_dir + "variables.txt","w")
for i in xrange(len(variables_list)):
file.write("%s\n"%variables_list[i])
file.close()
file = open(export_dir + "variable_uses.txt","w")
for i in xrange(len(variables_list)):
file.write("%d\n"%variable_uses[i])
file.close()
def ensure_tag_use(tag_uses, tag_no, object_no):
if len(tag_uses[tag_no]) <= object_no:
num_to_add = object_no + 1 - len(tag_uses[tag_no])
for j in xrange(num_to_add):
tag_uses[tag_no].append(0)
def add_tag_use(tag_uses, tag_no, object_no):
#TODO: Uncomment to make build_module_check_tags work
# ensure_tag_use(tag_uses, tag_no, object_no)
# tag_uses[tag_no][object_no] = tag_uses[tag_no][object_no] + 1
pass
def load_tag_uses(export_dir):
tag_uses = []
for i in xrange(tags_end):
sub_tag_uses = []
tag_uses.append(sub_tag_uses)
try:
file = open(export_dir + "tag_uses.txt","r")
var_list = file.readlines()
file.close()
for v in var_list:
vv = string.strip(v).split(';')
if vv:
for v2 in vv:
vvv = v2.split(' ')
if len(vvv) >= 3:
ensure_tag_use(tag_uses,int(vvv[0]),int(vvv[1]))
tag_uses[int(vvv[0])][int(vvv[1])] = int(vvv[2])
except:
print "Creating new tag_uses.txt file..."
return tag_uses
def save_tag_uses(export_dir,tag_uses):
file = open(export_dir + "tag_uses.txt","w")
for i in xrange(len(tag_uses)):
for j in xrange(len(tag_uses[i])):
file.write("%d %d %d;" % (i, j, tag_uses[i][j]))
file.write("\n")
file.close()
def add_cookie(cookies_list,cookie_string):
found = 0
result = -1
for i_t in xrange(len(cookies_list)):
if cookie_string == cookies_list[i_t]:
found = 1
result = i_t
break
if not found:
cookies_list.append(cookie_string)
result = len(cookies_list) - 1
return result
def get_cookie(cookies_list,cookie_string):
found = 0
result = -1
for i_t in xrange(len(cookies_list)):
if cookie_string == cookies_list[i_t]:
found = 1
result = i_t
break
if not found:
print "ERROR: input token not found:" + cookie_string
cookies_list.append(cookie_string)
result = len(cookies_list) - 1
return result
def check_varible_not_defined(variable_string,variables_list):
for i_t in xrange(len(variables_list)):
if variable_string == variables_list[i_t]:
print "WARNING: Variable name used for both local and global contexts:" + variable_string
break
#def add_get_variable(variable_string,variables_list):
# found = 0
# result = -1
# for i_t in xrange(len(variables_list)):
# if variable_string == variables_list[i_t]:
# found = 1
# result = i_t
# break
# if not found:
# variables_list.append(variable_string)
# result = len(variables_list) - 1
# return result
def add_variable(variable_string,variables_list,variable_uses):
found = 0
for i_t in xrange(len(variables_list)):
if variable_string == variables_list[i_t]:
found = 1
variable_uses[i_t] = variable_uses[i_t] - 1
break
if not found:
variables_list.append(variable_string)
variable_uses.append(-1)
def get_variable(variable_string,variables_list,variable_uses):
found = 0
result = -1
var_string = variable_string[1:]
for i_t in xrange(len(variables_list)):
if var_string == variables_list[i_t]:
found = 1
result = i_t
variable_uses[result] = variable_uses[result] + 1
break
if not found:
if (variable_string[0] == '$'):
variables_list.append(variable_string)
variable_uses.append(0)
result = len(variables_list) - 1
print "WARNING: Usage of unassigned global variable: " + variable_string
else:
print "ERROR: Usage of unassigned local variable: " + variable_string
return result
def is_lhs_operation(op_code):
found = 0
if op_code in lhs_operations:
return 1
return 0
def is_lhs_operation_for_global_vars(op_code):
found = 0
if op_code in lhs_operations:
return 1
if op_code in global_lhs_operations:
return 1
return 0
def is_can_fail_operation(op_code):
found = 0
if op_code in can_fail_operations:
return 1
return 0
def search_quick_string_keys(key, quick_strings):
index = -1
for i in xrange(len(quick_strings)):
if quick_strings[i][0] == key:
index = i
return index
def insert_quick_string_with_auto_id(sentence,quick_strings):
index = 0
text = convert_to_identifier_with_no_lowercase(sentence)
sentence = replace_spaces(sentence)
done = 0
i = 20
lt = len(text)
if (i > lt):
i = lt
auto_id = "qstr_" + text[0:i]
done = 0
index = search_quick_string_keys(auto_id, quick_strings)
if index >= 0 and (quick_strings[index][1] == sentence):
done = 1
while (i <= lt) and not done:
auto_id = "qstr_" + text[0:i]
index = search_quick_string_keys(auto_id, quick_strings)
if index >= 0:
if quick_strings[index][1] == sentence:
done = 1
else:
i += 1
else:
done = 1
index = len(quick_strings)
quick_strings.append([auto_id, sentence])
if not done:
number = 1
new_auto_id = auto_id + str(number)
while quick_strings.has_key(new_auto_id):
number += 1
new_auto_id = auto_id + str(number)
auto_id = new_auto_id
index = len(quick_strings)
quick_strings.append([auto_id, sentence])
return index
def process_param(param,global_vars_list,global_var_uses, local_vars_list, local_var_uses, tag_uses, quick_strings):
result = 0
if (type(param) == types.StringType):
if (param[0] == '$'):
check_varible_not_defined(param[1:], local_vars_list)
result = get_variable(param, global_vars_list,global_var_uses)
result |= opmask_variable
elif (param[0] == ':'):
check_varible_not_defined(param[1:], global_vars_list)
result = get_variable(param, local_vars_list,local_var_uses)
result |= opmask_local_variable
elif (param[0] == '@'):
result = insert_quick_string_with_auto_id(param[1:], quick_strings)
result |= opmask_quick_string
else:
result = get_identifier_value(param.lower(), tag_uses)
if (result < 0):
print "ERROR: Illegal Identifier:" + param
else:
result = param
return result
def save_statement(ofile,opcode,no_variables,statement,variable_list,variable_uses,local_vars_list,local_var_uses,tag_uses,quick_strings):
if no_variables == 0:
lenstatement = len(statement) - 1
if (is_lhs_operation(opcode) == 1):
if (lenstatement > 0):
param = statement[1]
if (type(param) == types.StringType):
if (param[0] == ':'):
add_variable(param[1:], local_vars_list, local_var_uses)
else:
lenstatement = 0
ofile.write("%d %d "%(opcode, lenstatement))
for i in xrange(lenstatement):
operand = process_param(statement[i + 1],variable_list,variable_uses,local_vars_list,local_var_uses,tag_uses,quick_strings)
ofile.write("%d "%operand)
def compile_global_vars_in_statement(statement,variable_list, variable_uses):
opcode = 0
if ((type(statement) != types.ListType) and (type(statement) != types.TupleType)):
opcode = statement
else:
opcode = statement[0]
if (is_lhs_operation_for_global_vars(opcode) == 1):
if (len(statement) > 1):
param = statement[1]
if (type(param) == types.StringType):
if (statement[1][0] == '$'):
add_variable(statement[1][1:], variable_list, variable_uses)
def save_statement_block(ofile,statement_name,can_fail_statement,statement_block,variable_list, variable_uses,tag_uses,quick_strings):
local_vars = []
local_var_uses = []
ofile.write(" %d "%(len(statement_block)))
store_script_param_1_uses = 0
store_script_param_2_uses = 0
current_depth = 0
can_fail = 0
for i in xrange(len(statement_block)):
statement = statement_block[i]
if ((type(statement) != types.ListType) and (type(statement) != types.TupleType)):
opcode = statement
no_variables = 1
else:
opcode = statement[0]
no_variables = 0
if (opcode in [try_begin,
try_for_range,
try_for_range_backwards,
try_for_parties,
try_for_agents]):
current_depth = current_depth + 1
elif (opcode == try_end):
current_depth = current_depth - 1
elif (opcode == store_script_param_1 or (opcode == store_script_param and statement[2] == 1)):
store_script_param_1_uses = store_script_param_1_uses + 1
elif (opcode == store_script_param_2 or (opcode == store_script_param and statement[2] == 2)):
store_script_param_2_uses = store_script_param_2_uses + 1
elif (can_fail_statement == 0 and current_depth == 0
and (is_can_fail_operation(opcode)
or ((opcode == call_script) and (statement[1].startswith("cf_", 7))))
and (not statement_name.startswith("cf_"))):
print "WARNING: Script can fail at operation #" + str(i) + ". Use cf_ at the beginning of its name: " + statement_name
save_statement(ofile,opcode,no_variables,statement,variable_list,variable_uses,local_vars, local_var_uses,tag_uses,quick_strings)
if (store_script_param_1_uses > 1):
print "WARNING: store_script_param_1 is used more than once:" + statement_name
if (store_script_param_2_uses > 1):
print "WARNING: store_script_param_2 is used more than once:" + statement_name
i = 0
while (i < len(local_vars)):
if (local_var_uses[i] == 0 and not(local_vars[i].startswith("unused"))):
print "WARNING: Local variable never used: " + local_vars[i] + ", at: " + str(statement_name)
i = i + 1
if (len(local_vars) > 128):
print "WARNING: Script uses more than 128 local wariables: " + str(statement_name) + "variables count:" + str(len(local_vars))
def compile_global_vars(statement_block,variable_list, variable_uses):
for statement in statement_block:
compile_global_vars_in_statement(statement, variable_list, variable_uses)
def save_simple_triggers(ofile,triggers,variable_list, variable_uses,tag_uses,quick_strings):
ofile.write("%d\n"%len(triggers))
for trigger in triggers:
ofile.write("%f "%(trigger[0]))
save_statement_block(ofile,0,1,trigger[1] , variable_list, variable_uses,tag_uses,quick_strings)
ofile.write("\n")
ofile.write("\n")
|
{
"content_hash": "29153f56efdc5db13a3c72183536f813",
"timestamp": "",
"source": "github",
"line_count": 473,
"max_line_length": 138,
"avg_line_length": 32.36363636363637,
"alnum_prop": 0.636529918996603,
"repo_name": "infobeisel/mbnwcModSys",
"id": "cd647d64d119f1d5067c4a8af18f08aead3bd7cb",
"size": "15308",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "process_operations.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2343"
},
{
"name": "Python",
"bytes": "4313612"
}
],
"symlink_target": ""
}
|
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NTUPLE',eras.Run2_2017)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration.StandardSequences.RawToDigi_Data_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_Data_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('dummy.root'),
secondaryFileNames = cms.untracked.vstring('dummy.root'))
process.options = cms.untracked.PSet()
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('RECO nevts:10'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $') )
# Output definition
process.RECOoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('')),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('RECO_RAW2DIGI_L1Reco_RECO.root'),
outputCommands = process.RECOEventContent.outputCommands,
splitLevel = cms.untracked.int32(0))
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstruction)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOoutput_step = cms.EndPath(process.RECOoutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.RECOoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# begin inserting configs
#------------------------------------------
# Options - can be given from command line
#------------------------------------------
import FWCore.ParameterSet.VarParsing as opts
opt = opts.VarParsing ('analysis')
opt.register('globalTag', '', opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.string, 'Global Tag, Default="" which uses auto:run2_data')
opt.register('dataTier', 'RECO', opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.string, 'Input file data tier')
opt.register('useTemplates', True, opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.bool, 'Only for On-track clusters! True: use Template reco, False: use Generic reco')
opt.register('saveRECO', False, opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.bool, 'Option to keep GEN-SIM-RECO')
opt.register('RECOFileName', '', opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.string, 'Name of the RECO output file in case saveRECO was used')
opt.register('inputFileName', '', opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.string, 'Name of the input root file')
opt.register('secondaryInputFileName', '', opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.string, 'Name of the RAW (parent) input root file')
opt.register('outputFileName', 'Ntuple.root', opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.string, 'Name of the histograms file')
opt.register('noMagField', False, opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.bool, 'Test LA (SIM) conditions locally (prep/prod database or sqlite file')
opt.register('useLocalQuality', False, opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.bool, 'Test Quality conditions locally (prep/prod database or sqlite file')
opt.register('useLocalLA', False, opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.bool, 'Test LA (RECO) conditions locally (prep/prod database or sqlite file')
opt.register('useLocalGain', False, opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.bool, 'Test Gain conditions locally (prep/prod database or sqlite file')
opt.register('useLocalGenErr', False, opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.bool, 'Test GenError conditions locally (prep/prod database or sqlite file')
opt.register('useLocalTemplates', False, opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.bool, 'Test Template conditions locally (prep/prod database or sqlite file')
opt.register('prescale', 1, opts.VarParsing.multiplicity.singleton, opts.VarParsing.varType.int, 'Save only 1/nth of the events (to conserve disk space for long runs)')
### Events to process: 'maxEvents' is already registered by the framework
opt.setDefault('maxEvents', 100)
# Proceed with settings from command line
opt.parseArguments()
process.maxEvents.input = opt.maxEvents
process.MessageLogger.cerr.FwkReport.reportEvery = 10
# Switch off magnetic field if needed
if opt.noMagField:
process.load('Configuration.StandardSequences.MagneticField_0T_cff')
# Set some default options based on others
if opt.RECOFileName == '': opt.RECOFileName = 'file:RECO_'+str(opt.maxEvents)+'.root'
# Input file
if opt.inputFileName == '':
if opt.dataTier == 'RAW': process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring('file:/data/store/data/Run2016B/ZeroBias/RAW/v2/000/273/158/00000/C62669DA-7418-E611-A8FB-02163E01377A.root')) #273158 RAW
elif opt.dataTier == 'AOD': process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring('/store/data/Run2016D/ZeroBias/AOD/PromptReco-v2/000/276/317/00000/12A3F60B-1145-E611-83B1-02163E01431C.root'), secondaryFileNames = cms.untracked.vstring('/store/data/Run2016D/ZeroBias/RAW/v2/000/276/317/00000/46CDE349-0842-E611-A1F4-02163E012067.root'))
else: process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring('/store/express/Run2017A/ExpressPhysics/FEVT/Express-v1/000/294/928/00000/6ADE5F77-D03F-E711-BFED-02163E01A6C2.root')) #first run
else:
if opt.secondaryInputFileName == '': process.source = cms.Source("PoolSource",fileNames = cms.untracked.vstring(opt.inputFileName))
else: process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring(opt.inputFileName), secondaryFileNames = cms.untracked.vstring(opt.secondaryInputFileName) )
#________________________________________________________________________
# Main Analysis Module
# Refitter
process.load("RecoTracker.TrackProducer.TrackRefitters_cff")
# Specify inputs/outputs
if opt.useTemplates:
process.TrackRefitter.TTRHBuilder = 'WithAngleAndTemplate'
if opt.outputFileName == '': opt.outputFileName = 'Ntuple_'+str(opt.maxEvents)+'.root'
else:
process.TrackRefitter.TTRHBuilder = 'WithTrackAngle'
if opt.outputFileName == '': opt.outputFileName = 'Ntuple_GenericReco_'+str(opt.maxEvents)+'.root'
# Load and confiugre the plugin you want to use
#---------------------------
# PhaseIPixelNtuplizer
#---------------------------
process.PhaseISplitClusterAnalyzerPlugin = cms.EDAnalyzer("PhaseISplitClusterAnalyzer",
trajectoryInput = cms.InputTag('TrackRefitter'),
outputFileName = cms.untracked.string(opt.outputFileName),
# Do not save everything and downscale clusters
clusterSaveDownscaleFactor = cms.untracked.int32(100),
eventSaveDownscaleFactor = cms.untracked.int32(opt.prescale),
saveDigiTree = cms.untracked.bool(False),
saveTrackTree = cms.untracked.bool(False),
saveNonPropagatedExtraTrajTree = cms.untracked.bool(False))
# process.PhaseISplitClusterAnalyzer_step = cms.Path(process.PhaseISplitClusterAnalyzerPlugin)
# myAnalyzer Path
process.myAnalyzer_step = cms.Path(process.MeasurementTrackerEvent*process.TrackRefitter*process.PhaseISplitClusterAnalyzerPlugin)
#________________________________________________________________________
# DataBase Stuff
# Print settings
print "Using options: "
if opt.globalTag == '':
print " globalTag (auto:run2_data) = "+str(process.GlobalTag.globaltag)
else:
if "auto:" in opt.globalTag:
process.GlobalTag = GlobalTag(process.GlobalTag, opt.globalTag, '')
print " globalTag ("+opt.globalTag+") = "+str(process.GlobalTag.globaltag)
else:
process.GlobalTag.globaltag = opt.globalTag
print " globalTag (manually chosen) = "+str(process.GlobalTag.globaltag)
print " dataTier = "+str(opt.dataTier)
print " useTemplates = "+str(opt.useTemplates)
print " saveRECO = "+str(opt.saveRECO)
print " RECOFileName = "+str(opt.RECOFileName)
print " inputFileName = "+str(opt.inputFileName)
print " secondaryInputFileName = "+str(opt.secondaryInputFileName)
print " outputFileName = "+str(opt.outputFileName)
print " noMagField = "+str(opt.noMagField)
print " maxEvents = "+str(opt.maxEvents)
print " useLocalQuality = "+str(opt.useLocalQuality)
print " useLocalLA = "+str(opt.useLocalLA)
print " useLocalGain = "+str(opt.useLocalGain)
print " useLocalGenErr = "+str(opt.useLocalGenErr)
print " useLocalTemplates = "+str(opt.useLocalTemplates)
print " prescale = "+str(opt.prescale)
dir = 'sqlite_file:/afs/cern.ch/user/j/jkarancs/public/DB/Phase1/'
# Test Local DB conditions
# Quality
#Qua_db = 'frontier://FrontierProd/CMS_CONDITIONS'
Qua_db = 'frontier://FrontierPrep/CMS_CONDITIONS'
#Qua_tag = 'SiPixelQuality_phase1_2017_v1_hltvalidation'
Qua_tag = 'SiPixelQuality_phase1_2017_v2' # 2017 May 18 version from Tamas
# Gains
#Gain_db = 'frontier://FrontierProd/CMS_CONDITIONS'
Gain_db = 'frontier://FrontierPrep/CMS_CONDITIONS'
#Gain_db = dir + '2017_05_17/SiPixelGainCalibration_2017_v1_offline.db'
#Gain_tag = 'SiPixelGainCalibration_2017_v1_hltvalidation'
Gain_tag = 'SiPixelGainCalibration_2017_v4'
# LA (RECO)
#LA_db = 'frontier://FrontierProd/CMS_CONDITIONS'
#LA_db = 'frontier://FrontierPrep/CMS_CONDITIONS'
# MC
#LA_db = dir+'2017_02_13/SiPixelLorentzAngle_phase1_mc_v2.db'
#LA_tag = 'SiPixelLorentzAngle_phase1_mc_v2'
# Data
LA_db = dir+'2017_04_05/SiPixelLorentzAngle_phase1_2017_v1.db'
LA_tag = 'SiPixelLorentzAngle_phase1_2017_v1'
# LA (Width)
#LA_Width_db = 'frontier://FrontierProd/CMS_CONDITIONS'
#LA_Width_db = 'frontier://FrontierPrep/CMS_CONDITIONS'
LA_Width_db = dir+'2017_02_13/SiPixelLorentzAngle_forWidth_phase1_mc_v2.db'
LA_Width_tag = 'SiPixelLorentzAngle_forWidth_phase1_mc_v2'
# GenErrors
if opt.noMagField:
# 0T GenErrors
#GenErr_db = 'frontier://FrontierProd/CMS_CONDITIONS'
#GenErr_db = 'frontier://FrontierPrep/CMS_CONDITIONS'
# MC
GenErr_db = dir+'2017_04_05/SiPixelGenErrorDBObject_phase1_00T_mc_v2.db'
GenErr_tag = 'SiPixelGenErrorDBObject_phase1_00T_mc_v2'
# Data
#GenErr_db = dir+'2017_03_20/SiPixelGenErrorDBObject_phase1_00T_2017_v1.db'
#GenErr_tag = 'SiPixelGenErrorDBObject_phase1_00T_2017_v1'
# 0T Templates
#Templates_db = 'frontier://FrontierProd/CMS_CONDITIONS'
#Templates_db = 'frontier://FrontierPrep/CMS_CONDITIONS'
# MC
Templates_db = dir+'2017_04_05/SiPixelTemplateDBObject_phase1_00T_mc_v2.db'
Templates_tag = 'SiPixelTemplateDBObject_phase1_00T_mc_v2'
# Data
#Templates_db = dir+'2017_03_20/SiPixelTemplateDBObject_phase1_00T_2017_v1.db'
#Templates_tag = 'SiPixelTemplateDBObject_phase1_00T_2017_v1'
else:
# 3.8T GenErrors
#GenErr_db = 'frontier://FrontierProd/CMS_CONDITIONS'
#GenErr_db = 'frontier://FrontierPrep/CMS_CONDITIONS'
# MC
#GenErr_db = dir+'2017_02_13/SiPixelGenErrorDBObject_phase1_38T_mc_v2.db'
#GenErr_tag = 'SiPixelGenErrorDBObject_phase1_38T_mc_v2'
# Data
#GenErr_db = dir+'2017_04_05/SiPixelGenErrorDBObject_phase1_38T_2017_v1.db'
#GenErr_tag = 'SiPixelGenErrorDBObject_phase1_38T_2017_v1'
GenErr_db = dir+'2017_07_06/SiPixelGenErrorDBObject_phase1_38T_2017_v4_bugfix.db'
GenErr_tag = 'SiPixelGenErrorDBObject_phase1_38T_2017_v4_bugfix'
# 3.8T Templates
#Templates_db = 'frontier://FrontierProd/CMS_CONDITIONS'
#Templates_db = 'frontier://FrontierPrep/CMS_CONDITIONS'
# MC
#Templates_db = dir+'2017_02_13/SiPixelTemplateDBObject_phase1_38T_mc_v2.db'
#Templates_tag = 'SiPixelTemplateDBObject_phase1_38T_mc_v2'
# Data
#Templates_db = dir+'2017_04_05/SiPixelTemplateDBObject_phase1_38T_2017_v1.db'
#Templates_tag = 'SiPixelTemplateDBObject_phase1_38T_2017_v1'
Templates_db = dir+'2017_07_06/SiPixelTemplateDBObject_phase1_38T_2017_v4_bugfix.db'
Templates_tag = 'SiPixelTemplateDBObject_phase1_38T_2017_v4_bugfix'
# Quality
if opt.useLocalQuality :
process.QualityReader = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('')),
toGet = cms.VPSet(cms.PSet(
record = cms.string('SiPixelQualityFromDbRcd'),
tag = cms.string(Qua_tag))),
connect = cms.string(Qua_db))
process.es_prefer_QualityReader = cms.ESPrefer("PoolDBESSource","QualityReader")
# for reco
# LA
if opt.useLocalLA :
process.LAReader = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('')),
toGet = cms.VPSet(cms.PSet(
record = cms.string("SiPixelLorentzAngleRcd"),
tag = cms.string(LA_tag))),
connect = cms.string(LA_db))
process.LAprefer = cms.ESPrefer("PoolDBESSource","LAReader")
# now the forWidth LA
process.LAWidthReader = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('')),
toGet = cms.VPSet(cms.PSet(
record = cms.string("SiPixelLorentzAngleRcd"),
label = cms.untracked.string("forWidth"),
tag = cms.string(LA_Width_tag))),
connect = cms.string(LA_Width_db))
process.LAWidthprefer = cms.ESPrefer("PoolDBESSource","LAWidthReader")
# Gain
if opt.useLocalGain :
process.GainsReader = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('')),
toGet = cms.VPSet(cms.PSet(
record = cms.string('SiPixelGainCalibrationOfflineRcd'),
tag = cms.string(Gain_tag))),
connect = cms.string(Gain_db))
process.Gainprefer = cms.ESPrefer("PoolDBESSource","GainsReader")
# GenError
if opt.useLocalGenErr :
process.GenErrReader = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('')),
toGet = cms.VPSet(cms.PSet(
record = cms.string('SiPixelGenErrorDBObjectRcd'),
tag = cms.string(GenErr_tag))),
connect = cms.string(GenErr_db))
process.generrprefer = cms.ESPrefer("PoolDBESSource","GenErrReader")
# Templates
if opt.useLocalTemplates :
process.TemplatesReader = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('')),
toGet = cms.VPSet(cms.PSet(
record = cms.string('SiPixelTemplateDBObjectRcd'),
tag = cms.string(Templates_tag))),
connect = cms.string(Templates_db))
if opt.noMagField:
process.TemplatesReader.toGet = cms.VPSet(
cms.PSet(
label = cms.untracked.string('0T'),
record = cms.string('SiPixelTemplateDBObjectRcd'),
tag = cms.string(Templates_tag)
)
)
process.templateprefer = cms.ESPrefer("PoolDBESSource","TemplatesReader")
#---------------------------
# Schedule
#---------------------------
# Modify Schedule
if opt.dataTier == 'RECO' or opt.dataTier == 'FEVT':
process.schedule = cms.Schedule(process.myAnalyzer_step)
else:
if not opt.saveRECO:
process.schedule.remove(process.RECOoutput_step)
else:
process.RECOoutput.fileName = opt.RECOFileName
# Remove unnecessary steps and add Analyzer in the end of the chain
process.schedule.remove(process.endjob_step)
process.schedule.append(process.myAnalyzer_step)
# End of inserted code
#do not add changes to your config after this point (unless you know what you are doing)
from FWCore.ParameterSet.Utilities import convertToUnscheduled
process=convertToUnscheduled(process)
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
{
"content_hash": "44b0d42a3d6db840711813eade4c2e57",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 363,
"avg_line_length": 50.358543417366946,
"alnum_prop": 0.705139615085104,
"repo_name": "hunyadix/PhaseISplitClusterAnalyzer",
"id": "d630cb8d85c47b4edc52d752e557e160c58ae1b3",
"size": "18449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/run_PhaseISplitClusterAnalyzer_Data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "111715"
},
{
"name": "Python",
"bytes": "18449"
}
],
"symlink_target": ""
}
|
from clubsandwich.geom import Point
from clubsandwich.ui import RectView
from stats.enums import StatsEnum
class HudWindow(RectView):
def __init__(self, game_context, **kwargs):
super().__init__(fill=True, **kwargs)
self.game_context = game_context
def draw(self, ctx):
player = self.game_context.player
self.draw_gui(player, ctx)
@staticmethod
def draw_gui(player, ctx):
ctx.printf(Point(0, 0), "Health: {}\n\n".format(int(player.health.current)))
ctx.printf(Point(0, 1), "Attack Power: {}\n\n".format(player.get_attack_modifier()))
ctx.printf(Point(0, 2), "Defense: {}\n\n".format(player.get_armor_class()))
ctx.printf(Point(0, 3), "Speed: {}\n\n".format(player.get_speed_modifier()))
|
{
"content_hash": "8eafc740cab81a2ae2353c7b8960279c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 92,
"avg_line_length": 36.76190476190476,
"alnum_prop": 0.6398963730569949,
"repo_name": "ChrisLR/Python-Roguelike-Template",
"id": "a9a4a23f3477d0d161f3b7cdb91fbeaf20a633dc",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scenes/game/windows/hud_window.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "267082"
}
],
"symlink_target": ""
}
|
N = int(input())
tweet = input()[:-1].split()
checkWords = ['TAKAHASHIKUN', 'Takahashikun', 'takahashikun']
print(sum([1 for w in tweet if w in checkWords]))
|
{
"content_hash": "ab0a873eb253b639ae4b1f34eedb0be4",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 61,
"avg_line_length": 31.8,
"alnum_prop": 0.6666666666666666,
"repo_name": "knuu/competitive-programming",
"id": "b81c1f0086755c01a654f0e77d3ae1f65b58ebf9",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atcoder/arc/arc005_a.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "156029"
},
{
"name": "C++",
"bytes": "609501"
},
{
"name": "Haskell",
"bytes": "208"
},
{
"name": "Java",
"bytes": "9111"
},
{
"name": "Nim",
"bytes": "208992"
},
{
"name": "OCaml",
"bytes": "221"
},
{
"name": "Python",
"bytes": "410086"
}
],
"symlink_target": ""
}
|
import os
import sys
defaultSources = ['file:///usr/share/snmp/mibs', 'file:///usr/share/mibs']
if sys.platform[:3] == 'win':
defaultDest = os.path.join(os.path.expanduser("~"),
'PySNMP Configuration', 'mibs')
else:
defaultDest = os.path.join(os.path.expanduser("~"), '.pysnmp', 'mibs')
defaultBorrowers = []
try:
from pysmi.reader.url import getReadersFromUrls
from pysmi.searcher.pypackage import PyPackageSearcher
from pysmi.searcher.stub import StubSearcher
from pysmi.borrower.pyfile import PyFileBorrower
from pysmi.writer.pyfile import PyFileWriter
from pysmi.parser.smi import parserFactory
from pysmi.parser.dialect import smiV1Relaxed
from pysmi.codegen.pysnmp import PySnmpCodeGen, baseMibs
from pysmi.compiler import MibCompiler
except ImportError:
from pysnmp.smi import error
def addMibCompilerDecorator(errorMsg):
def addMibCompiler(mibBuilder, **kwargs):
if not kwargs.get('ifAvailable'):
raise error.SmiError('MIB compiler not available: %s' % errorMsg)
return addMibCompiler
addMibCompiler = addMibCompilerDecorator(sys.exc_info()[1])
else:
def addMibCompiler(mibBuilder, **kwargs):
if kwargs.get('ifNotAdded') and mibBuilder.getMibCompiler():
return
compiler = MibCompiler(parserFactory(**smiV1Relaxed)(),
PySnmpCodeGen(),
PyFileWriter(kwargs.get('destination') or defaultDest))
compiler.addSources(*getReadersFromUrls(*kwargs.get('sources') or defaultSources))
compiler.addSearchers(StubSearcher(*baseMibs))
compiler.addSearchers(*[PyPackageSearcher(x.fullPath()) for x in mibBuilder.getMibSources()])
compiler.addBorrowers(*[PyFileBorrower(x, genTexts=mibBuilder.loadTexts) for x in getReadersFromUrls(*kwargs.get('borrowers') or defaultBorrowers, **dict(lowcaseMatching=False))])
mibBuilder.setMibCompiler(
compiler, kwargs.get('destination') or defaultDest
)
|
{
"content_hash": "eea2771ba9a8bf19819ad7fdbcdf187a",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 187,
"avg_line_length": 38.51851851851852,
"alnum_prop": 0.6846153846153846,
"repo_name": "mith1979/ansible_automation",
"id": "84dde53d463deb0795721724b4dc34a317227cde",
"size": "2227",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/pysnmp/smi/compiler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
import objc
from PyObjCTools.TestSupport import *
NSObject = objc.lookUpClass('NSObject')
class TestBasicIMP (TestCase):
# Test the basic functionality of IMP's. Imp's are basically unbound
# selectors if you look at the interface. The implementation refers to
# the actual functions that implements the method for calling the IMP
# instead of passing through the usual message sending machinery.
#
def testIMPType(self):
self.assertHasAttr(objc, "IMP")
def testAlloc(self):
cls = NSObject
m = cls.pyobjc_classMethods.methodForSelector_("alloc")
self.assertIsInstance(m, objc.IMP)
self.assertTrue(m.__metadata__()['classmethod'])
self.assertEqual(m.__metadata__()['retval']['already_retained'], cls.alloc.__metadata__()['retval']['already_retained'])
self.assertEqual(m.selector, b'alloc')
o = m(cls).init()
self.assertIsInstance(o, cls)
def testInit1(self):
cls = NSObject
m = cls.instanceMethodForSelector_("init")
self.assertIsInstance(m, objc.IMP)
self.assertFalse(m.__metadata__()['classmethod'])
self.assertEqual(m.__metadata__()['retval']['already_retained'], cls.init.__metadata__()['retval']['already_retained'])
self.assertEqual(m.selector, b'init')
o = m(cls.alloc())
self.assertIsInstance(o, cls)
def testInit2(self):
cls = NSObject
o = cls.alloc().init()
m = o.methodForSelector_("init")
self.assertIsInstance(m, objc.IMP)
self.assertFalse(m.__metadata__()['classmethod'])
self.assertEqual(m.__metadata__()['retval']['already_retained'], cls.init.__metadata__()['retval']['already_retained'])
self.assertEqual(m.selector, b'init')
o = m(cls.alloc())
self.assertIsInstance(o, cls)
def testDescription(self):
o = NSObject.alloc().init()
self.assertEqual(o.description(), o.methodForSelector_(b'description')(o))
if __name__ == "__main__":
main()
|
{
"content_hash": "87543b8ec3ef42f5484eecaf97f76061",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 128,
"avg_line_length": 34.016666666666666,
"alnum_prop": 0.6315531602155806,
"repo_name": "ariabuckles/pyobjc-core",
"id": "bc78a9109dfc09d4f5629375daa419efa0e33ebf",
"size": "2041",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PyObjCTest/test_imp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "47481"
},
{
"name": "C",
"bytes": "444594"
},
{
"name": "C++",
"bytes": "9999"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "Objective-C",
"bytes": "1794618"
},
{
"name": "Python",
"bytes": "3992770"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.tests.auditors.github.test_team_auditor
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Mike Grima <mgrima@netflix.com>
"""
from security_monkey.datastore import Account, AccountType, Technology
from security_monkey.tests import SecurityMonkeyTestCase
from security_monkey import db
from security_monkey.watchers.github.team import GitHubTeamItem
from security_monkey.auditors.github.team import GitHubTeamAuditor
CONFIG_ONE = {
"id": 1,
"url": "https://api.github.com/teams/1",
"name": "Justice League",
"slug": "justice-league",
"description": "A great team.",
"privacy": "secret",
"permission": "pull",
"members_url": "https://api.github.com/teams/1/members{/member}",
"repositories_url": "https://api.github.com/teams/1/repos"
}
CONFIG_TWO = {
"id": 2,
"url": "https://api.github.com/teams/2",
"name": "Team2",
"slug": "Team2",
"description": "A great team.",
"privacy": "closed",
"permission": "admin",
"members_url": "https://api.github.com/teams/2/members{/member}",
"repositories_url": "https://api.github.com/teams/2/repos"
}
class GitHubTeamAuditorTestCase(SecurityMonkeyTestCase):
def pre_test_setup(self):
self.gh_items = [
GitHubTeamItem(account="Org-one", name="Org-one", arn="Org-one", config=CONFIG_ONE),
GitHubTeamItem(account="Org-one", name="Org-one", arn="Org-one", config=CONFIG_TWO),
]
self.account_type = AccountType(name="GitHub")
db.session.add(self.account_type)
db.session.commit()
db.session.add(Account(name="Org-one", account_type_id=self.account_type.id,
identifier="Org-one", active=True, third_party=False))
self.technology = Technology(name="team")
db.session.add(self.technology)
db.session.commit()
def test_public_team_check(self):
team_auditor = GitHubTeamAuditor(accounts=["Org-one"])
team_auditor.check_for_public_team(self.gh_items[0])
team_auditor.check_for_public_team(self.gh_items[1])
# Should raise issue:
self.assertEqual(len(self.gh_items[1].audit_issues), 1)
self.assertEqual(self.gh_items[1].audit_issues[0].score, 1)
# Should not raise issues:
self.assertEqual(len(self.gh_items[0].audit_issues), 0)
|
{
"content_hash": "0ab2daff1dee3529365ba63964d58e47",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 96,
"avg_line_length": 34.52173913043478,
"alnum_prop": 0.647774979009236,
"repo_name": "Netflix/security_monkey",
"id": "f59a6547ad7fc9f7950eead9db4264d120e28721",
"size": "2999",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "security_monkey/tests/auditors/github/test_team_auditor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22837"
},
{
"name": "Dart",
"bytes": "130852"
},
{
"name": "Dockerfile",
"bytes": "3841"
},
{
"name": "HTML",
"bytes": "120266"
},
{
"name": "JavaScript",
"bytes": "13728"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1578684"
},
{
"name": "Shell",
"bytes": "30939"
}
],
"symlink_target": ""
}
|
import random
import django
from django.test import TestCase
from django.utils import timezone
from core.models import User, Event, Join
def get_random_latitude():
return random.uniform(-90, +90)
def get_random_longitude():
return random.uniform(-180, +180)
def create_event(name, event_owner, start_date_time, end_date_time=None, max_num_participants=5):
description = name + ' -- The best sport event on campus'
if end_date_time is None: # default to 90 minutes
end_date_time = start_date_time + timezone.timedelta(minutes=90)
latitude = get_random_latitude()
longitude = get_random_longitude()
return Event.objects.create(name=name,
description=description,
latitude=latitude,
longitude=longitude,
event_owner=event_owner,
start_date_time=start_date_time,
end_date_time=end_date_time,
max_num_participants=max_num_participants)
def create_user(username):
password = username + 'Pass'
email = username + '@ucsc.edu'
phone_number = "+123456789"
latitude = get_random_latitude()
longitude = get_random_longitude()
return User.objects.create(username=username,
password=password,
email=email,
phone_number=phone_number,
latitude=latitude,
longitude=longitude)
class EventUserInteractionTests (TestCase):
def test_delete_participant(self):
l = create_user('Lorenzo')
f = create_user('Federico')
pp = create_event('Ping pong', l, timezone.now(), max_num_participants=2)
Join.objects.create(user=l, event=pp)
Join.objects.create(user=f, event=pp)
self.assertIn(f, pp.participants.all())
Join.objects.get(id=f.id).delete()
self.assertNotIn(f, pp.participants.all())
# Lorenzo is still a participant
self.assertIn(l, pp.participants.all())
# Should this work? Discuss..
def test_delete_event_owner_as_participant(self):
l = create_user('Lorenzo')
pp = create_event('Ping pong', l, timezone.now(), max_num_participants=2)
Join.objects.create(user=l, event=pp)
Join.objects.get(id=l.id).delete()
self.assertNotIn(l, pp.participants.all())
def test_delete_event_owner(self):
l = create_user('Lorenzo')
pp = create_event('Ping pong', l, timezone.now(), max_num_participants=2)
l.delete()
# The event should be deleted
with self.assertRaises(django.core.exceptions.ObjectDoesNotExist):
event = Event.objects.get(id=pp.id)
def test_delete_event_not_symmetric(self):
l = create_user('Lorenzo')
pp = create_event('Ping pong', l, timezone.now(), max_num_participants=2)
pp.delete()
self.assertEqual(User.objects.get(id=l.id), l)
|
{
"content_hash": "375337bd8e7a31373354089ccc57da2d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 97,
"avg_line_length": 38.20987654320987,
"alnum_prop": 0.5932148626817447,
"repo_name": "LorenzSelv/pinned",
"id": "37b0362dc143e8c375f5f27422636aac789e4059",
"size": "3095",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/tests/event_user_interaction_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8718"
},
{
"name": "HTML",
"bytes": "15544"
},
{
"name": "JavaScript",
"bytes": "21339"
},
{
"name": "Makefile",
"bytes": "311"
},
{
"name": "Python",
"bytes": "55338"
}
],
"symlink_target": ""
}
|
class VerificaUpload:
def __init__(self):
print("Verifica Upload")
def verifica(self,diretorio):
pass
|
{
"content_hash": "3568d842b53caeb6723cf415c116495f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.6062992125984252,
"repo_name": "Dturati/projetoUFMT",
"id": "57874eca760c6a17fc7cd35471484d8052de4e1d",
"size": "128",
"binary": false,
"copies": "1",
"ref": "refs/heads/versao-1.3",
"path": "estagio/estagio/base/verifica_upload/VerificaUpload.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "788"
},
{
"name": "Dockerfile",
"bytes": "1018"
},
{
"name": "Elixir",
"bytes": "43"
},
{
"name": "HTML",
"bytes": "20543"
},
{
"name": "JavaScript",
"bytes": "40012"
},
{
"name": "Python",
"bytes": "53548"
},
{
"name": "R",
"bytes": "2850"
},
{
"name": "Shell",
"bytes": "1836"
}
],
"symlink_target": ""
}
|
"""A class to keep track of devices across builds and report state."""
import argparse
import json
import logging
import os
import psutil
import re
import signal
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import devil_chromium
from devil import devil_env
from devil.android import battery_utils
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_list
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.constants import exit_codes
from devil.utils import lsusb
from devil.utils import reset_usb
from devil.utils import run_tests_helper
from pylib.constants import host_paths
_RE_DEVICE_ID = re.compile(r'Device ID = (\d+)')
def KillAllAdb():
def GetAllAdb():
for p in psutil.process_iter():
try:
if 'adb' in p.name:
yield p
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for sig in [signal.SIGTERM, signal.SIGQUIT, signal.SIGKILL]:
for p in GetAllAdb():
try:
logging.info('kill %d %d (%s [%s])', sig, p.pid, p.name,
' '.join(p.cmdline))
p.send_signal(sig)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for p in GetAllAdb():
try:
logging.error('Unable to kill %d (%s [%s])', p.pid, p.name,
' '.join(p.cmdline))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
def _IsBlacklisted(serial, blacklist):
return blacklist and serial in blacklist.Read()
def _BatteryStatus(device, blacklist):
battery_info = {}
try:
battery = battery_utils.BatteryUtils(device)
battery_info = battery.GetBatteryInfo(timeout=5)
battery_level = int(battery_info.get('level', 100))
if battery_level < 15:
logging.error('Critically low battery level (%d)', battery_level)
battery = battery_utils.BatteryUtils(device)
if not battery.GetCharging():
battery.SetCharging(True)
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()], reason='low_battery')
except device_errors.CommandFailedError:
logging.exception('Failed to get battery information for %s',
str(device))
return battery_info
def _IMEISlice(device):
imei_slice = ''
try:
for l in device.RunShellCommand(['dumpsys', 'iphonesubinfo'],
check_return=True, timeout=5):
m = _RE_DEVICE_ID.match(l)
if m:
imei_slice = m.group(1)[-6:]
except device_errors.CommandFailedError:
logging.exception('Failed to get IMEI slice for %s', str(device))
return imei_slice
def DeviceStatus(devices, blacklist):
"""Generates status information for the given devices.
Args:
devices: The devices to generate status for.
blacklist: The current device blacklist.
Returns:
A dict of the following form:
{
'<serial>': {
'serial': '<serial>',
'adb_status': str,
'usb_status': bool,
'blacklisted': bool,
# only if the device is connected and not blacklisted
'type': ro.build.product,
'build': ro.build.id,
'build_detail': ro.build.fingerprint,
'battery': {
...
},
'imei_slice': str,
'wifi_ip': str,
},
...
}
"""
adb_devices = {
a[0].GetDeviceSerial(): a
for a in adb_wrapper.AdbWrapper.Devices(desired_state=None, long_list=True)
}
usb_devices = set(lsusb.get_android_devices())
def blacklisting_device_status(device):
serial = device.adb.GetDeviceSerial()
adb_status = (
adb_devices[serial][1] if serial in adb_devices
else 'unknown')
usb_status = bool(serial in usb_devices)
device_status = {
'serial': serial,
'adb_status': adb_status,
'usb_status': usb_status,
}
if adb_status == 'device':
if not _IsBlacklisted(serial, blacklist):
try:
build_product = device.build_product
build_id = device.build_id
build_fingerprint = device.GetProp('ro.build.fingerprint', cache=True)
wifi_ip = device.GetProp('dhcp.wlan0.ipaddress')
battery_info = _BatteryStatus(device, blacklist)
imei_slice = _IMEISlice(device)
if (device.product_name == 'mantaray' and
battery_info.get('AC powered', None) != 'true'):
logging.error('Mantaray device not connected to AC power.')
device_status.update({
'ro.build.product': build_product,
'ro.build.id': build_id,
'ro.build.fingerprint': build_fingerprint,
'battery': battery_info,
'imei_slice': imei_slice,
'wifi_ip': wifi_ip,
# TODO(jbudorick): Remove these once no clients depend on them.
'type': build_product,
'build': build_id,
'build_detail': build_fingerprint,
})
except device_errors.CommandFailedError:
logging.exception('Failure while getting device status for %s.',
str(device))
if blacklist:
blacklist.Extend([serial], reason='status_check_failure')
except device_errors.CommandTimeoutError:
logging.exception('Timeout while getting device status for %s.',
str(device))
if blacklist:
blacklist.Extend([serial], reason='status_check_timeout')
elif blacklist:
blacklist.Extend([serial], reason=adb_status)
device_status['blacklisted'] = _IsBlacklisted(serial, blacklist)
return device_status
parallel_devices = device_utils.DeviceUtils.parallel(devices)
statuses = parallel_devices.pMap(blacklisting_device_status).pGet(None)
return statuses
def RecoverDevices(devices, blacklist):
"""Attempts to recover any inoperable devices in the provided list.
Args:
devices: The list of devices to attempt to recover.
blacklist: The current device blacklist, which will be used then
reset.
Returns:
Nothing.
"""
statuses = DeviceStatus(devices, blacklist)
should_restart_usb = set(
status['serial'] for status in statuses
if (not status['usb_status']
or status['adb_status'] in ('offline', 'unknown')))
should_restart_adb = should_restart_usb.union(set(
status['serial'] for status in statuses
if status['adb_status'] == 'unauthorized'))
should_reboot_device = should_restart_adb.union(set(
status['serial'] for status in statuses
if status['blacklisted']))
logging.debug('Should restart USB for:')
for d in should_restart_usb:
logging.debug(' %s', d)
logging.debug('Should restart ADB for:')
for d in should_restart_adb:
logging.debug(' %s', d)
logging.debug('Should reboot:')
for d in should_reboot_device:
logging.debug(' %s', d)
if blacklist:
blacklist.Reset()
if should_restart_adb:
KillAllAdb()
for serial in should_restart_usb:
try:
reset_usb.reset_android_usb(serial)
except (IOError, device_errors.DeviceUnreachableError):
logging.exception('Unable to reset USB for %s.', serial)
if blacklist:
blacklist.Extend([serial], reason='usb_failure')
def blacklisting_recovery(device):
if _IsBlacklisted(device.adb.GetDeviceSerial(), blacklist):
logging.debug('%s is blacklisted, skipping recovery.', str(device))
return
if str(device) in should_reboot_device:
try:
device.WaitUntilFullyBooted(retries=0)
return
except (device_errors.CommandTimeoutError,
device_errors.CommandFailedError):
logging.exception('Failure while waiting for %s. '
'Attempting to recover.', str(device))
try:
try:
device.Reboot(block=False, timeout=5, retries=0)
except device_errors.CommandTimeoutError:
logging.warning('Timed out while attempting to reboot %s normally.'
'Attempting alternative reboot.', str(device))
# The device drops offline before we can grab the exit code, so
# we don't check for status.
device.adb.Root()
device.adb.Shell('echo b > /proc/sysrq-trigger', expect_status=None,
timeout=5, retries=0)
except device_errors.CommandFailedError:
logging.exception('Failed to reboot %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_failure')
except device_errors.CommandTimeoutError:
logging.exception('Timed out while rebooting %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_timeout')
try:
device.WaitUntilFullyBooted(retries=0)
except device_errors.CommandFailedError:
logging.exception('Failure while waiting for %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_failure')
except device_errors.CommandTimeoutError:
logging.exception('Timed out while waiting for %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_timeout')
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_recovery)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out-dir',
help='Directory where the device path is stored',
default=os.path.join(host_paths.DIR_SOURCE_ROOT, 'out'))
parser.add_argument('--restart-usb', action='store_true',
help='DEPRECATED. '
'This script now always tries to reset USB.')
parser.add_argument('--json-output',
help='Output JSON information into a specified file.')
parser.add_argument('--adb-path',
help='Absolute path to the adb binary to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('--known-devices-file', action='append', default=[],
dest='known_devices_files',
help='Path to known device lists.')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Log more information.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
devil_custom_deps = None
if args.adb_path:
devil_custom_deps = {
'adb': {
devil_env.GetPlatform(): [args.adb_path],
},
}
devil_chromium.Initialize(custom_deps=devil_custom_deps)
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
last_devices_path = os.path.join(
args.out_dir, device_list.LAST_DEVICES_FILENAME)
args.known_devices_files.append(last_devices_path)
expected_devices = set()
try:
for path in args.known_devices_files:
if os.path.exists(path):
expected_devices.update(device_list.GetPersistentDeviceList(path))
except IOError:
logging.warning('Problem reading %s, skipping.', path)
logging.info('Expected devices:')
for device in expected_devices:
logging.info(' %s', device)
usb_devices = set(lsusb.get_android_devices())
devices = [device_utils.DeviceUtils(s)
for s in expected_devices.union(usb_devices)]
RecoverDevices(devices, blacklist)
statuses = DeviceStatus(devices, blacklist)
# Log the state of all devices.
for status in statuses:
logging.info(status['serial'])
adb_status = status.get('adb_status')
blacklisted = status.get('blacklisted')
logging.info(' USB status: %s',
'online' if status.get('usb_status') else 'offline')
logging.info(' ADB status: %s', adb_status)
logging.info(' Blacklisted: %s', str(blacklisted))
if adb_status == 'device' and not blacklisted:
logging.info(' Device type: %s', status.get('ro.build.product'))
logging.info(' OS build: %s', status.get('ro.build.id'))
logging.info(' OS build fingerprint: %s',
status.get('ro.build.fingerprint'))
logging.info(' Battery state:')
for k, v in status.get('battery', {}).iteritems():
logging.info(' %s: %s', k, v)
logging.info(' IMEI slice: %s', status.get('imei_slice'))
logging.info(' WiFi IP: %s', status.get('wifi_ip'))
# Update the last devices file(s).
for path in args.known_devices_files:
device_list.WritePersistentDeviceList(
path, [status['serial'] for status in statuses])
# Write device info to file for buildbot info display.
if os.path.exists('/home/chrome-bot'):
with open('/home/chrome-bot/.adb_device_info', 'w') as f:
for status in statuses:
try:
if status['adb_status'] == 'device':
f.write('{serial} {adb_status} {build_product} {build_id} '
'{temperature:.1f}C {level}%\n'.format(
serial=status['serial'],
adb_status=status['adb_status'],
build_product=status['type'],
build_id=status['build'],
temperature=float(status['battery']['temperature']) / 10,
level=status['battery']['level']
))
else:
f.write('{serial} {adb_status}'.format(
serial=status['serial'],
adb_status=status['adb_status']
))
except Exception: # pylint: disable=broad-except
pass
# Dump the device statuses to JSON.
if args.json_output:
with open(args.json_output, 'wb') as f:
f.write(json.dumps(statuses, indent=4))
live_devices = [status['serial'] for status in statuses
if (status['adb_status'] == 'device'
and not _IsBlacklisted(status['serial'], blacklist))]
# If all devices failed, or if there are no devices, it's an infra error.
return 0 if live_devices else exit_codes.INFRA
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "e885e74cb359e0e3448a435d72026079",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 80,
"avg_line_length": 34.51690821256039,
"alnum_prop": 0.620923722883135,
"repo_name": "hujiajie/chromium-crosswalk",
"id": "c8b3386d61d468a5a08939f84a04dc5f0ca8ac8e",
"size": "14477",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "build/android/buildbot/bb_device_status_check.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Uses daily bhavcopy to download historical data for all stocks.
One challenge is, bhavcopy has the symbol name for that day. In case of NSE,
the symbol names change and that's a problem, so we need a way to track
changes in symbol names as well.
All this data is stored in an SQLite database as we download, as some of the
updates (eg. update data corresponding to a symbol with a new name are easier
to handle in SQLite than dealing with files).
"""
import os
import sys
import random
import time
from zipfile import ZipFile
from datetime import datetime as dt
from datetime import timedelta as td
import requests
if sys.version_info.major < 3:
from StringIO import StringIO as bio
else:
from io import BytesIO as bio
# BIG FIXME: There are sql statements littered all over the place, sqlalchemy?
from tickerplot.nse.nse_utils import nse_get_name_change_tuples, ScripOHLCVD
from tickerplot.sql.sqlalchemy_wrapper import \
create_or_get_nse_bhav_deliv_download_info, \
create_or_get_nse_equities_hist_data
from tickerplot.sql.sqlalchemy_wrapper import execute_one, execute_one_insert, \
execute_many_insert
from tickerplot.sql.sqlalchemy_wrapper import and_expr, select_expr
from tickerplot.sql.sqlalchemy_wrapper import get_metadata
from tickerplot.utils.logger import get_logger
module_logger = get_logger(os.path.basename(__file__))
_BHAV_HEADERS = {'Host': 'www.nseindia.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0',
'Accept': 'application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://www.nseindia.com/product/content/equities/equities/archives_eq.htm',
'Connection': 'keep-alive',
'DNT': '1'}
_DB_METADATA = None
_DATE_FMT = '%d-%m-%Y'
_BHAV_URL_BASE = 'https://www.nseindia.com/content/historical/EQUITIES/' \
'%(year)s/%(mon)s/cm%(dd)s%(mon)s%(year)sbhav.csv.zip'
_DELIV_URL_BASE = 'https://www.nseindia.com/archives/equities/mto/' \
'MTO_%(dd)s%(mm)s%(year)s.DAT'
# Warn user if number of days data is greater than this
_WARN_DAYS = 100
def get_bhavcopy(date='01-01-2002'):
"""Downloads a bhavcopy for a given date and returns a dictionary of rows
where each row stands for a traded scrip. The scripname is key. If the
bhavcopy for a given day is already downloaded, returns None. date is in
DD-MM-YYYY format"""
global _DATE_FMT
if isinstance(date, str):
d2 = dt.date(dt.strptime(date, _DATE_FMT))
elif isinstance(date, dt):
d2 = dt.date(date)
else:
return None
yr = d2.strftime('%Y')
mon = d2.strftime('%b').upper()
mm = d2.strftime('%0m')
dd = d2.strftime('%0d')
if _bhavcopy_downloaded(d2): # already downloaded
return None
global _BHAV_URL_BASE
global _DELIV_URL_BASE
bhav_url = _BHAV_URL_BASE % ({'year': yr, 'mon': mon, 'dd': dd})
deliv_url = _DELIV_URL_BASE % ({'year': yr, 'mm': mm, 'dd': dd})
try:
bhavcopy_response = requests.get(bhav_url, headers=_BHAV_HEADERS)
module_logger.info("GET:Bhavcopy URL: %s", bhav_url)
delivery_response = requests.get(deliv_url, headers=_BHAV_HEADERS)
module_logger.info("GET:Delivery URL: %s", deliv_url)
except requests.RequestException as e:
module_logger.exception(e)
# We don't update bhav_deliv_downloaded here
return None
stocks_dict = {}
# We do all of the following to avoid - network calls
error_code = None
if bhavcopy_response.status_code == 404 or delivery_response.status_code == 404:
error_code = 'NOT_FOUND'
else:
if not (bhavcopy_response.ok and delivery_response.ok):
error_code = 'DLOAD_ERR'
_update_dload_success(d2, bhavcopy_response.ok,
delivery_response.ok, error_code)
if bhavcopy_response.ok and delivery_response.ok:
bhavs = ZipFile(bio(bhavcopy_response.content))
for name in bhavs.namelist():
csv_name = name
delivery = bio(delivery_response.content)
with bhavs.open(csv_name) as bhav:
i = 0
for line in bhav:
if i == 0:
i += 1
continue
line = line.decode()
l = line.split(',')
if l[1] not in ['EQ', 'BE', 'BZ']:
continue
sym, o, h, l, c, v, d = l[0], l[2], l[3], l[4], \
l[5], l[8], l[8]
stocks_dict[sym] = [float(o), float(h), float(l), float(c),
int(v), int(d)]
i = 0
for line in delivery:
line = line.decode()
if not line.startswith('20'):
i += 1
continue
l = line.split(',')
if (len(l)) == 4:
sym, d = l[1].strip(), l[3].strip()
elif len(l) == 7:
if l[3] not in ['EQ', 'BE', 'BZ']:
i += 1
continue
sym, d = l[2].strip(), l[5].strip()
try:
stocks_dict[sym][-1] = int(d)
except KeyError:
module_logger.error(
"For Symbol: %s Delivery Data found but no Bhavcopy Data", sym)
i += 1
for sym in stocks_dict.keys():
stocks_dict[sym] = ScripOHLCVD(*stocks_dict[sym])
module_logger.debug("ScripInfo(%s): %s", sym,
str(stocks_dict[sym]))
return stocks_dict
else:
if not bhavcopy_response.ok:
module_logger.error("GET:Bhavcopy URL %s (%d)",
bhav_url, bhavcopy_response.status_code)
if not delivery_response.ok:
module_logger.error("GET:Delivery URL %s (%d)",
deliv_url, delivery_response.status_code)
return None
def _update_dload_success(fdate, bhav_ok, deliv_ok, error_code=None):
""" Update whether bhavcopy download and delivery data download for given
date is successful"""
tbl = create_or_get_nse_bhav_deliv_download_info(metadata=_DB_METADATA)
sel_st = select_expr([tbl]).where(tbl.c.download_date == fdate)
res = execute_one(sel_st, engine=_DB_METADATA.bind)
# res.first closes the result
first_row = res.first()
# Following is the closest to what I wanted for an 'upsert' support in
# DB agnostic way. Clearly this is not most ideal, but as of now I do not
# know of better way of doing this.
# This issue discusses something similar
# https://groups.google.com/forum/#!topic/sqlalchemy/63OnY_ZFmic
if not first_row:
ins_or_upd_st = tbl.insert().values(download_date=fdate,
bhav_success=bhav_ok,
deliv_success=deliv_ok,
error_type=error_code)
else:
module_logger.info("Found row. Updating %s", str(first_row))
ins_or_upd_st = tbl.update().where(tbl.c.download_date == fdate).\
values(download_date=fdate,
bhav_success=bhav_ok,
deliv_success=deliv_ok,
error_type=error_code)
module_logger.debug(ins_or_upd_st.compile().params)
result = execute_one_insert(ins_or_upd_st, engine=_DB_METADATA.bind)
result.close()
def _update_bhavcopy(curdate, stocks_dict):
"""update bhavcopy Database date in DD-MM-YYYY format."""
nse_eq_hist_data = create_or_get_nse_equities_hist_data(
metadata=_DB_METADATA)
# delete for today's date if there's anything FWIW
module_logger.debug("Deleting any old data for date %s.", curdate)
d = nse_eq_hist_data.delete(nse_eq_hist_data.c.date == curdate)
r = execute_one(d, engine=_DB_METADATA.bind)
module_logger.debug("Deleted %d rows.", r.rowcount)
insert_statements = []
for k, v in stocks_dict.items():
ins = nse_eq_hist_data.insert().values(symbol=k, date=curdate,
open=v.open, high=v.high,
low=v.low, close=v.close,
volume=v.volume,
delivery=v.deliv)
insert_statements.append(ins)
module_logger.debug(ins.compile().params)
results = execute_many_insert(insert_statements, engine=_DB_METADATA.bind)
for r in results:
r.close()
def _bhavcopy_downloaded(fdate):
"""
Returns success/failure for a given date if bhav/delivery data.
"""
tbl = create_or_get_nse_bhav_deliv_download_info(metadata=_DB_METADATA)
select_st = tbl.select().where(tbl.c.download_date == fdate)
result = execute_one(select_st.compile(), engine=_DB_METADATA.bind)
result = result.fetchone()
if not result:
return False
# For anything older than 7 days from now, if Error is not found,
# we ignore this.
d = dt.date(dt.today())
delta = d - fdate
ignore_error = False
if (delta.days > 7) and result.error_type == 'NOT_FOUND':
ignore_error = True
return (result[1] and result[2]) or ignore_error
def _apply_name_changes_to_db(syms):
"""Changes security names in nse_hist_data table so the name of the security
is always the latest."""
hist_data = create_or_get_nse_equities_hist_data(metadata=_DB_METADATA)
update_statements = []
for sym in syms:
old = sym[0]
new = sym[1]
chdate = sym[2]
chdt = dt.date(dt.strptime(chdate, '%d-%b-%Y'))
upd = hist_data.update().values(symbol=new).\
where(and_expr(hist_data.c.symbol == old,
hist_data.c.date < chdt))
update_statements.append(upd)
results = execute_many_insert(update_statements, engine=_DB_METADATA.bind)
for r in results:
r.close()
def main(args):
# We run the full program
import argparse
parser = argparse.ArgumentParser()
# --full option
parser.add_argument("--full-to",
help="download full data from 1 Jan 2002",
action="store_true")
# --from option
parser.add_argument("--from",
help="From Date in DD-MM-YYYY format. " \
"Default is 01-01-2002",
dest='fromdate',
default="01-01-2002")
# --to option
parser.add_argument("--to",
help="From Date in DD-MM-YYYY format. " \
"Default is Today.",
dest='todate',
default="today")
# --yes option
parser.add_argument("--yes",
help="Answer yes to all questions.",
dest="sure",
action="store_true")
# --dbpath option
parser.add_argument("--dbpath",
help="Database URL to be used.",
dest="dbpath")
args = parser.parse_args()
print(args)
# Make sure we can access the DB path if specified or else exit right here.
if args.dbpath:
try:
global _DB_METADATA
_DB_METADATA = get_metadata(args.dbpath)
except Exception as e:
print("Not a valid DB URL: {} (Exception: {})".format(
args.dbpath, e))
return -1
try:
from_date = dt.strptime(args.fromdate, _DATE_FMT)
if args.todate.lower() == 'today':
args.todate = dt.now().strftime(_DATE_FMT)
to_date = dt.strptime(args.todate, _DATE_FMT)
except ValueError:
print(parser.format_usage())
return -1
# We are now ready to download data
if from_date > to_date:
print(parser.format_usage())
return -1
num_days = to_date - from_date
if num_days.days > _WARN_DAYS:
if args.sure:
sure = True
else:
sure = input("Tatal number of days for download is %1d. "
"Are you Sure?[y|N] " % num_days.days)
if sure.lower() in ("y", "ye", "yes"):
sure = True
else:
sure = False
else:
sure = True
if not sure:
sys.exit(0)
module_logger.info("Downloading data for %d days", num_days.days)
cur_date = from_date
while cur_date <= to_date:
module_logger.debug("Getting data for %s", str(cur_date))
scrips_dict = get_bhavcopy(cur_date)
if scrips_dict is not None:
_update_bhavcopy(cur_date, scrips_dict)
time.sleep(random.randrange(1, 10))
cur_date += td(1)
# Apply the name changes to the DB
sym_change_tuples = nse_get_name_change_tuples()
if len(sym_change_tuples) == 0:
module_logger.info("No name change tuples found...")
sys.exit(-1)
_apply_name_changes_to_db(sym_change_tuples)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "72f9a036f42c8a4a99416eb4b19b6449",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 120,
"avg_line_length": 34.033248081841435,
"alnum_prop": 0.5687983767941684,
"repo_name": "gabhijit/tickdownload",
"id": "d667e88bcb3ba0fcdea21c31e561402f36dd2903",
"size": "13430",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "get_stocks_nse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56932"
},
{
"name": "Shell",
"bytes": "4412"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.auth.views import redirect_to_login
from django.db import models
from django.urls import reverse
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext
from wagtail import hooks
from wagtail.coreutils import get_content_languages
from wagtail.log_actions import LogFormatter
from wagtail.models import ModelLogEntry, Page, PageLogEntry, PageViewRestriction
from wagtail.rich_text.pages import PageLinkHandler
def require_wagtail_login(next):
login_url = getattr(
settings, "WAGTAIL_FRONTEND_LOGIN_URL", reverse("wagtailcore_login")
)
return redirect_to_login(next, login_url)
@hooks.register("before_serve_page")
def check_view_restrictions(page, request, serve_args, serve_kwargs):
"""
Check whether there are any view restrictions on this page which are
not fulfilled by the given request object. If there are, return an
HttpResponse that will notify the user of that restriction (and possibly
include a password / login form that will allow them to proceed). If
there are no such restrictions, return None
"""
for restriction in page.get_view_restrictions():
if not restriction.accept_request(request):
if restriction.restriction_type == PageViewRestriction.PASSWORD:
from wagtail.forms import PasswordViewRestrictionForm
form = PasswordViewRestrictionForm(
instance=restriction,
initial={"return_url": request.get_full_path()},
)
action_url = reverse(
"wagtailcore_authenticate_with_password",
args=[restriction.id, page.id],
)
return page.serve_password_required_response(request, form, action_url)
elif restriction.restriction_type in [
PageViewRestriction.LOGIN,
PageViewRestriction.GROUPS,
]:
return require_wagtail_login(next=request.get_full_path())
@hooks.register("register_rich_text_features")
def register_core_features(features):
features.default_features.append("hr")
features.default_features.append("link")
features.register_link_type(PageLinkHandler)
features.default_features.append("bold")
features.default_features.append("italic")
features.default_features.extend(["h2", "h3", "h4"])
features.default_features.append("ol")
features.default_features.append("ul")
if getattr(settings, "WAGTAIL_WORKFLOW_ENABLED", True):
@hooks.register("register_permissions")
def register_workflow_permissions():
return Permission.objects.filter(
content_type__app_label="wagtailcore",
codename__in=["add_workflow", "change_workflow", "delete_workflow"],
)
@hooks.register("register_permissions")
def register_task_permissions():
return Permission.objects.filter(
content_type__app_label="wagtailcore",
codename__in=["add_task", "change_task", "delete_task"],
)
@hooks.register("describe_collection_contents")
def describe_collection_children(collection):
descendant_count = collection.get_descendants().count()
if descendant_count:
url = reverse("wagtailadmin_collections:index")
return {
"count": descendant_count,
"count_text": ngettext(
"%(count)s descendant collection",
"%(count)s descendant collections",
descendant_count,
)
% {"count": descendant_count},
"url": url,
}
@hooks.register("register_log_actions")
def register_core_log_actions(actions):
actions.register_model(models.Model, ModelLogEntry)
actions.register_model(Page, PageLogEntry)
actions.register_action("wagtail.create", _("Create"), _("Created"))
actions.register_action("wagtail.edit", _("Edit"), _("Edited"))
actions.register_action("wagtail.delete", _("Delete"), _("Deleted"))
actions.register_action("wagtail.publish", _("Publish"), _("Published"))
actions.register_action(
"wagtail.publish.scheduled",
_("Publish scheduled draft"),
_("Published scheduled draft"),
)
actions.register_action("wagtail.unpublish", _("Unpublish"), _("Unpublished"))
actions.register_action(
"wagtail.unpublish.scheduled",
_("Unpublish scheduled draft"),
_("Unpublished scheduled draft"),
)
actions.register_action("wagtail.lock", _("Lock"), _("Locked"))
actions.register_action("wagtail.unlock", _("Unlock"), _("Unlocked"))
actions.register_action("wagtail.moderation.approve", _("Approve"), _("Approved"))
actions.register_action("wagtail.moderation.reject", _("Reject"), _("Rejected"))
@actions.register_action("wagtail.rename")
class RenameActionFormatter(LogFormatter):
label = _("Rename")
def format_message(self, log_entry):
try:
return _("Renamed from '%(old)s' to '%(new)s'") % {
"old": log_entry.data["title"]["old"],
"new": log_entry.data["title"]["new"],
}
except KeyError:
return _("Renamed")
@actions.register_action("wagtail.revert")
class RevertActionFormatter(LogFormatter):
label = _("Revert")
def format_message(self, log_entry):
try:
return _(
"Reverted to previous revision with id %(revision_id)s from %(created_at)s"
) % {
"revision_id": log_entry.data["revision"]["id"],
"created_at": log_entry.data["revision"]["created"],
}
except KeyError:
return _("Reverted to previous revision")
@actions.register_action("wagtail.copy")
class CopyActionFormatter(LogFormatter):
label = _("Copy")
def format_message(self, log_entry):
try:
return _("Copied from %(title)s") % {
"title": log_entry.data["source"]["title"],
}
except KeyError:
return _("Copied")
@actions.register_action("wagtail.copy_for_translation")
class CopyForTranslationActionFormatter(LogFormatter):
label = _("Copy for translation")
def format_message(self, log_entry):
try:
return _("Copied for translation from %(title)s (%(locale)s)") % {
"title": log_entry.data["source"]["title"],
"locale": get_content_languages().get(
log_entry.data["source_locale"]["language_code"]
)
or "",
}
except KeyError:
return _("Copied for translation")
@actions.register_action("wagtail.create_alias")
class CreateAliasActionFormatter(LogFormatter):
label = _("Create alias")
def format_message(self, log_entry):
try:
return _("Created an alias of %(title)s") % {
"title": log_entry.data["source"]["title"],
}
except KeyError:
return _("Created an alias")
@actions.register_action("wagtail.convert_alias")
class ConvertAliasActionFormatter(LogFormatter):
label = _("Convert alias into ordinary page")
def format_message(self, log_entry):
try:
return _("Converted the alias '%(title)s' into an ordinary page") % {
"title": log_entry.data["page"]["title"],
}
except KeyError:
return _("Converted an alias into an ordinary page")
@actions.register_action("wagtail.move")
class MoveActionFormatter(LogFormatter):
label = _("Move")
def format_message(self, log_entry):
try:
return _("Moved from '%(old_parent)s' to '%(new_parent)s'") % {
"old_parent": log_entry.data["source"]["title"],
"new_parent": log_entry.data["destination"]["title"],
}
except KeyError:
return _("Moved")
@actions.register_action("wagtail.reorder")
class ReorderActionFormatter(LogFormatter):
label = _("Reorder")
def format_message(self, log_entry):
try:
return _("Reordered under '%(parent)s'") % {
"parent": log_entry.data["destination"]["title"],
}
except KeyError:
return _("Reordered")
@actions.register_action("wagtail.publish.schedule")
class SchedulePublishActionFormatter(LogFormatter):
label = _("Schedule publication")
def format_message(self, log_entry):
try:
if log_entry.data["revision"]["has_live_version"]:
return _(
"Revision %(revision_id)s from %(created_at)s scheduled for publishing at %(go_live_at)s."
) % {
"revision_id": log_entry.data["revision"]["id"],
"created_at": log_entry.data["revision"]["created"],
"go_live_at": log_entry.data["revision"]["go_live_at"],
}
else:
return _("Page scheduled for publishing at %(go_live_at)s") % {
"go_live_at": log_entry.data["revision"]["go_live_at"],
}
except KeyError:
return _("Page scheduled for publishing")
@actions.register_action("wagtail.schedule.cancel")
class UnschedulePublicationActionFormatter(LogFormatter):
label = _("Unschedule publication")
def format_message(self, log_entry):
try:
if log_entry.data["revision"]["has_live_version"]:
return _(
"Revision %(revision_id)s from %(created_at)s unscheduled from publishing at %(go_live_at)s."
) % {
"revision_id": log_entry.data["revision"]["id"],
"created_at": log_entry.data["revision"]["created"],
"go_live_at": log_entry.data["revision"]["go_live_at"],
}
else:
return _("Page unscheduled for publishing at %(go_live_at)s") % {
"go_live_at": log_entry.data["revision"]["go_live_at"],
}
except KeyError:
return _("Page unscheduled from publishing")
@actions.register_action("wagtail.view_restriction.create")
class AddViewRestrictionActionFormatter(LogFormatter):
label = _("Add view restrictions")
def format_message(self, log_entry):
try:
return _("Added the '%(restriction)s' view restriction") % {
"restriction": log_entry.data["restriction"]["title"],
}
except KeyError:
return _("Added view restriction")
@actions.register_action("wagtail.view_restriction.edit")
class EditViewRestrictionActionFormatter(LogFormatter):
label = _("Update view restrictions")
def format_message(self, log_entry):
try:
return _("Updated the view restriction to '%(restriction)s'") % {
"restriction": log_entry.data["restriction"]["title"],
}
except KeyError:
return _("Updated view restriction")
@actions.register_action("wagtail.view_restriction.delete")
class DeleteViewRestrictionActionFormatter(LogFormatter):
label = _("Remove view restrictions")
def format_message(self, log_entry):
try:
return _("Removed the '%(restriction)s' view restriction") % {
"restriction": log_entry.data["restriction"]["title"],
}
except KeyError:
return _("Removed view restriction")
class CommentLogFormatter(LogFormatter):
@staticmethod
def _field_label_from_content_path(model, content_path):
"""
Finds the translated field label for the given model and content path
Raises LookupError if not found
"""
field_name = content_path.split(".")[0]
return capfirst(model._meta.get_field(field_name).verbose_name)
@actions.register_action("wagtail.comments.create")
class CreateCommentActionFormatter(CommentLogFormatter):
label = _("Add comment")
def format_message(self, log_entry):
try:
return _('Added a comment on field %(field)s: "%(text)s"') % {
"field": self._field_label_from_content_path(
log_entry.page.specific_class,
log_entry.data["comment"]["contentpath"],
),
"text": log_entry.data["comment"]["text"],
}
except KeyError:
return _("Added a comment")
@actions.register_action("wagtail.comments.edit")
class EditCommentActionFormatter(CommentLogFormatter):
label = _("Edit comment")
def format_message(self, log_entry):
try:
return _('Edited a comment on field %(field)s: "%(text)s"') % {
"field": self._field_label_from_content_path(
log_entry.page.specific_class,
log_entry.data["comment"]["contentpath"],
),
"text": log_entry.data["comment"]["text"],
}
except KeyError:
return _("Edited a comment")
@actions.register_action("wagtail.comments.delete")
class DeleteCommentActionFormatter(CommentLogFormatter):
label = _("Delete comment")
def format_message(self, log_entry):
try:
return _('Deleted a comment on field %(field)s: "%(text)s"') % {
"field": self._field_label_from_content_path(
log_entry.page.specific_class,
log_entry.data["comment"]["contentpath"],
),
"text": log_entry.data["comment"]["text"],
}
except KeyError:
return _("Deleted a comment")
@actions.register_action("wagtail.comments.resolve")
class ResolveCommentActionFormatter(CommentLogFormatter):
label = _("Resolve comment")
def format_message(self, log_entry):
try:
return _('Resolved a comment on field %(field)s: "%(text)s"') % {
"field": self._field_label_from_content_path(
log_entry.page.specific_class,
log_entry.data["comment"]["contentpath"],
),
"text": log_entry.data["comment"]["text"],
}
except KeyError:
return _("Resolved a comment")
@actions.register_action("wagtail.comments.create_reply")
class CreateReplyActionFormatter(CommentLogFormatter):
label = _("Reply to comment")
def format_message(self, log_entry):
try:
return _('Replied to comment on field %(field)s: "%(text)s"') % {
"field": self._field_label_from_content_path(
log_entry.page.specific_class,
log_entry.data["comment"]["contentpath"],
),
"text": log_entry.data["reply"]["text"],
}
except KeyError:
return _("Replied to a comment")
@actions.register_action("wagtail.comments.edit_reply")
class EditReplyActionFormatter(CommentLogFormatter):
label = _("Edit reply to comment")
def format_message(self, log_entry):
try:
return _(
'Edited a reply to a comment on field %(field)s: "%(text)s"'
) % {
"field": self._field_label_from_content_path(
log_entry.page.specific_class,
log_entry.data["comment"]["contentpath"],
),
"text": log_entry.data["reply"]["text"],
}
except KeyError:
return _("Edited a reply")
@actions.register_action("wagtail.comments.delete_reply")
class DeleteReplyActionFormatter(CommentLogFormatter):
label = _("Delete reply to comment")
def format_message(self, log_entry):
try:
return _(
'Deleted a reply to a comment on field %(field)s: "%(text)s"'
) % {
"field": self._field_label_from_content_path(
log_entry.page.specific_class,
log_entry.data["comment"]["contentpath"],
),
"text": log_entry.data["reply"]["text"],
}
except KeyError:
return _("Deleted a reply")
@hooks.register("register_log_actions")
def register_workflow_log_actions(actions):
class WorkflowLogFormatter(LogFormatter):
def format_comment(self, log_entry):
return log_entry.data.get("comment", "")
@actions.register_action("wagtail.workflow.start")
class StartWorkflowActionFormatter(WorkflowLogFormatter):
label = _("Workflow: start")
def format_message(self, log_entry):
try:
return _("'%(workflow)s' started. Next step '%(task)s'") % {
"workflow": log_entry.data["workflow"]["title"],
"task": log_entry.data["workflow"]["next"]["title"],
}
except (KeyError, TypeError):
return _("Workflow started")
@actions.register_action("wagtail.workflow.approve")
class ApproveWorkflowActionFormatter(WorkflowLogFormatter):
label = _("Workflow: approve task")
def format_message(self, log_entry):
try:
if log_entry.data["workflow"]["next"]:
return _("Approved at '%(task)s'. Next step '%(next_task)s'") % {
"task": log_entry.data["workflow"]["task"]["title"],
"next_task": log_entry.data["workflow"]["next"]["title"],
}
else:
return _("Approved at '%(task)s'. '%(workflow)s' complete") % {
"task": log_entry.data["workflow"]["task"]["title"],
"workflow": log_entry.data["workflow"]["title"],
}
except (KeyError, TypeError):
return _("Workflow task approved")
@actions.register_action("wagtail.workflow.reject")
class RejectWorkflowActionFormatter(WorkflowLogFormatter):
label = _("Workflow: reject task")
def format_message(self, log_entry):
try:
return _("Rejected at '%(task)s'. Changes requested") % {
"task": log_entry.data["workflow"]["task"]["title"],
}
except (KeyError, TypeError):
return _("Workflow task rejected. Workflow complete")
@actions.register_action("wagtail.workflow.resume")
class ResumeWorkflowActionFormatter(WorkflowLogFormatter):
label = _("Workflow: resume task")
def format_message(self, log_entry):
try:
return _("Resubmitted '%(task)s'. Workflow resumed") % {
"task": log_entry.data["workflow"]["task"]["title"],
}
except (KeyError, TypeError):
return _("Workflow task resubmitted. Workflow resumed")
@actions.register_action("wagtail.workflow.cancel")
class CancelWorkflowActionFormatter(WorkflowLogFormatter):
label = _("Workflow: cancel")
def format_message(self, log_entry):
try:
return _("Cancelled '%(workflow)s' at '%(task)s'") % {
"workflow": log_entry.data["workflow"]["title"],
"task": log_entry.data["workflow"]["task"]["title"],
}
except (KeyError, TypeError):
return _("Workflow cancelled")
|
{
"content_hash": "d7d1da803e92b409c789180eb6a1f34f",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 117,
"avg_line_length": 40.15503875968992,
"alnum_prop": 0.5518339768339768,
"repo_name": "rsalmaso/wagtail",
"id": "791ead85093e52b02f9b8e5afef8546b4aafbef5",
"size": "20720",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/wagtail_hooks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593672"
},
{
"name": "JavaScript",
"bytes": "624463"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6598232"
},
{
"name": "SCSS",
"bytes": "221911"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "296087"
}
],
"symlink_target": ""
}
|
import os
import re
import time
import json
from collections import OrderedDict
import pkg_resources
import pandas as pd
from Bio import SeqIO
from Bio import Entrez
Entrez.email = "testing@ucsd.edu"
def untag(rule):
return re.sub('\?P<.*?>', '', rule)
def parse_antiSMASH(content):
""" Parse antiSMASH output
"""
rule_table_genes = r"""
(?P<subject_gene> \w+ \"?) \t
\w+ \t
(?P<location_start> \d+) \t
(?P<location_end> \d+) \t
(?P<strands> [+|-]) \t
(?P<product> .*) \n
"""
rule_table_blasthit = r"""
(?P<query_gene> \w+ )\"? \t
(?P<subject_gene> \w+ )\"? \t
(?P<identity> \d+) \t
(?P<blast_score> \d+) \t
(?P<coverage> \d+(?:\.\d+)?) \t
(?P<evalue> \d+\.\d+e[+|-]\d+) \t
\n
"""
rule_query_cluster = r"""
(?P<query_gene> \w+) \s+
(?P<location_start> \d+) \s
(?P<location_end> \d+) \s
(?P<strands> [+|-]) \s
(?P<product> \w+ (?:\s \w+)?) \s* \n+
"""
rule_detail = r"""
>>\n
(?P<id>\d+) \. \s+
(?P<cluster_subject> (?P<locus>\w+)_(?P<cluster>\w+)) \n
Source: \s+ (?P<source>.+?) \s* \n
Type: \s+ (?P<type>.+) \s* \n
Number\ of\ proteins\ with\ BLAST\ hits\ to\ this\ cluster:\ (?P<n_hits> \d+ ) \n
Cumulative\ BLAST\ score:\ (?P<cum_BLAST_score> \d+ )
\n \n
Table\ of\ genes,\ locations,\ strands\ and\ annotations\ of\ subject\ cluster:\n
(?P<TableGenes>
(
""" + untag(rule_table_genes) + r"""
)+
)
\n
Table\ of\ Blast\ hits\ \(query\ gene,\ subject\ gene,\ %identity,\ blast\ score,\ %coverage,\ e-value\): \n
(?P<BlastHit>
(\w+ \t \w+ \"? \t \d+ \t \d+ \t \d+\.\d+ \t \d+\.\d+e[+|-]\d+ \t \n)+
)
\n+
"""
rule = r"""
^
ClusterBlast\ scores\ for\ (?P<target>.*)\n+
Table\ of\ genes,\ locations,\ strands\ and\ annotations\ of\ query\ cluster:\n+
(?P<QueryCluster>
(
""" + untag(rule_query_cluster) + r"""
)+
)
\n \n+
Significant \ hits:\ \n
(?P<SignificantHits>
(\d+ \. \ \w+ \t .* \n+)+
)
\n \n
(?P<Details>
Details:\n\n
(
""" + untag(rule_detail) + r"""
)+
)
\n*
$
"""
parsed = re.search(rule, content, re.VERBOSE).groupdict()
output = {}
for k in ['target', 'QueryCluster', 'SignificantHits']:
output[k] = parsed[k]
QueryCluster = OrderedDict()
for k in re.search(
rule_query_cluster, parsed['QueryCluster'],
re.VERBOSE).groupdict().keys():
QueryCluster[k] = []
for row in re.finditer(
rule_query_cluster, parsed['QueryCluster'], re.VERBOSE):
row = row.groupdict()
for k in row:
QueryCluster[k].append(row[k])
output['QueryCluster'] = QueryCluster
output['SignificantHits'] = OrderedDict()
for row in re.finditer(
r"""(?P<id>\d+) \. \ (?P<cluster_subject> (?P<locus>\w+)_(?P<locus_cluster>\w+)) \t (?P<description>.*) \n+""", parsed['SignificantHits'], re.VERBOSE):
hit = row.groupdict()
cs = hit['cluster_subject']
if cs not in output['SignificantHits']:
output['SignificantHits'][cs] = OrderedDict()
for v in ['id', 'description', 'locus', 'locus_cluster']:
output['SignificantHits'][cs][v] = hit[v]
for block in re.finditer(rule_detail, parsed['Details'], re.VERBOSE):
block = dict(block.groupdict())
content = block['TableGenes']
block['TableGenes'] = OrderedDict()
for k in re.findall('\(\?P<(.*?)>', rule_table_genes):
block['TableGenes'][k] = []
for row in re.finditer(rule_table_genes, content, re.VERBOSE):
row = row.groupdict()
for k in row:
block['TableGenes'][k].append(row[k])
content = block['BlastHit']
block['BlastHit'] = OrderedDict()
for k in re.findall('\(\?P<(.*?)>', rule_table_blasthit):
block['BlastHit'][k] = []
for row in re.finditer(rule_table_blasthit, content, re.VERBOSE):
row = row.groupdict()
for k in row:
block['BlastHit'][k].append(row[k])
for k in block:
output['SignificantHits'][block['cluster_subject']][k] = block[k]
return output
def antiSMASH_to_dataFrame(content):
""" Extract an antiSMASH file as a pandas.DataFrame
"""
parsed = parse_antiSMASH(content)
output = pd.DataFrame()
for cs in parsed['SignificantHits']:
clusterSubject = parsed['SignificantHits'][cs].copy()
df = pd.merge(
pd.DataFrame(clusterSubject['BlastHit']),
pd.DataFrame(clusterSubject['TableGenes']),
on='subject_gene', how='outer')
del(clusterSubject['BlastHit'])
del(clusterSubject['TableGenes'])
for v in clusterSubject:
df[v] = clusterSubject[v]
output = output.append(df, ignore_index=True)
return output
class antiSMASH_file(object):
""" A class to handle antiSMASH file output.
"""
def __init__(self, filename):
self.data = {}
self.load(filename)
def __getitem__(self, item):
return self.data[item]
def keys(self):
return self.data.keys()
def load(self, filename):
self.data = {}
with open(filename, 'r') as f:
parsed = parse_antiSMASH(f.read())
for v in parsed:
self.data[v] = parsed[v]
def efetch_hit(term, seq_start, seq_stop):
""" Fetch the relevant part of a hit
"""
db = "nucleotide"
maxtry = 3
ntry = -1
downloaded = False
while ~downloaded and (ntry <= maxtry):
ntry += 1
try:
handle = Entrez.esearch(db=db, term=term)
record = Entrez.read(handle)
assert len(record['IdList']) == 1, \
"Sorry, I'm not ready to handle more than one record"
handle = Entrez.efetch(db=db, rettype="gb", retmode="text",
id=record['IdList'][0],
seq_start=seq_start, seq_stop=seq_stop)
content = handle.read()
downloaded = True
except:
nap = ntry*3
print "Fail to download (term). I'll take a nap of %s seconds ", \
" and try again."
time.sleep(ntry*3)
return content
def download_hits(filename, output_path):
""" Download the GenBank block for all hits by antiSMASH
"""
c = antiSMASH_file(filename)
for cs in c['SignificantHits'].keys():
locus = c['SignificantHits'][cs]['locus']
table_genes = c['SignificantHits'][cs]['TableGenes']
filename_out = os.path.join(
output_path,
"%s_%s-%s.gbk" % (locus,
min(table_genes['location_start']),
max(table_genes['location_end'])))
if os.path.isfile(filename_out):
print "Already downloaded %s" % filename_out
else:
print "Requesting cluster_subject: %s, start: %s, end: %s" % (
locus,
min(table_genes['location_start']),
max(table_genes['location_end']))
content = efetch_hit(
term=locus,
seq_start=min(table_genes['location_start']),
seq_stop=max(table_genes['location_end']))
print "Saving %s" % filename_out
with open(filename_out, 'w') as f:
f.write(content)
import urlparse
import urllib2
import tempfile
import tarfile
import os
def download_mibig(outputdir, version='1.3'):
""" Download and extract MIBiG files into outputdir
"""
assert version in ['1.0', '1.1', '1.2', '1.3'], \
"Invalid version of MIBiG"
server = 'http://mibig.secondarymetabolites.org'
filename = "mibig_gbk_%s.tar.gz" % version
url = urlparse.urljoin(server, filename)
with tempfile.NamedTemporaryFile(delete=True) as f:
u = urllib2.urlopen(url)
f.write(u.read())
f.file.flush()
tar = tarfile.open(f.name)
tar.extractall(path=outputdir)
tar.close()
# MIBiG was packed with strange files ._*gbk. Let's remove it
for f in [f for f in os.listdir(outputdir) if f[:2] == '._']:
os.remove(os.path.join(outputdir, f))
#def gbk2tablegen(gb_file, strain_id=None):
#def cds_from_gbk(gb_file, strain_id=None):
def cds_from_gbk(gb_file):
gb_record = SeqIO.read(open(gb_file,"rU"), "genbank")
#if strain_id is not None:
# gb_record.id = strain_id
output = pd.DataFrame()
sign = lambda x: '+' if x > 0 else '-'
for feature in gb_record.features:
if feature.type == "CDS":
tmp = {}
tmp = {'BGC': gb_record.id,
'locus_tag': feature.qualifiers['locus_tag'][0],
'start': feature.location.start.position,
'stop': feature.location.end.position,
'strand': sign(feature.location.strand) }
if 'note' in feature.qualifiers:
for note in feature.qualifiers['note']:
product = re.search( r"""smCOG: \s (?P<product>.*?) \s+ \(Score: \s* (?P<score>.*); \s* E-value: \s (?P<e_value>.*?)\);""", note, re.VERBOSE)
if product is not None:
product = product.groupdict()
product['score'] = float(product['score'])
product['e_value'] = float(product['e_value'])
for p in product:
tmp[p] = product[p]
output = output.append(pd.Series(tmp), ignore_index=True)
return output
def find_category_from_product(df):
subcluster = json.loads(
pkg_resources.resource_string(
__name__, 'subcluster_dictionary.json'))
def get_category(product):
for s in subcluster:
if re.search(s, product):
return subcluster[s]
return 'hypothetical'
idx = df['product'].notnull()
df['category'] = df.loc[idx, 'product'].apply(get_category)
df['category'].fillna('hypothetical', inplace=True)
return df
def get_hits(filename, criteria='cum_BLAST_score'):
"""
Reproduces original Tiago's code: table_1_extender.py
In the future allow different criteria. Right now it takes
from the very first block, which has the highest Cumulative
BLAST.
"""
with open(filename) as f:
df = antiSMASH_to_dataFrame(f.read())
df.dropna(subset=['query_gene'], inplace=True)
df.sort_values(by=criteria, ascending=False, na_position='last',
inplace=True)
return df.groupby('query_gene', as_index=False).first()
|
{
"content_hash": "425832e6789edb480bd5827ee72c8a40",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 163,
"avg_line_length": 31.610795454545453,
"alnum_prop": 0.5234115215242203,
"repo_name": "tiagolbiotech/BioCompass",
"id": "4dcc3dbae9535ce00a6b3d8e83e8b0a712e75656",
"size": "11128",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "BioCompass/BioCompass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5231"
},
{
"name": "Python",
"bytes": "36877"
},
{
"name": "Shell",
"bytes": "977"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('music', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='music',
name='singer',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='Musician_singer', to='music.Musician'),
),
migrations.AlterField(
model_name='musician',
name='birthday',
field=models.DateTimeField(verbose_name='\u51fa\u751f\u65e5\u671f'),
),
migrations.AlterField(
model_name='musician',
name='name',
field=models.CharField(max_length=40, verbose_name='\u539f\u540d'),
),
migrations.AlterField(
model_name='musician',
name='sex',
field=models.CharField(choices=[('M', '\u7537'), ('F', '\u5973')], max_length=1, verbose_name='\u6027\u522b'),
),
migrations.AlterField(
model_name='musician',
name='stagename',
field=models.CharField(blank=True, max_length=40, null=True, verbose_name='\u827a\u540d'),
),
]
|
{
"content_hash": "99e62e5f1cb1fbb9973d4fec60e72b05",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 145,
"avg_line_length": 33.15384615384615,
"alnum_prop": 0.576952822892498,
"repo_name": "tea321000/django-project",
"id": "d91af87cb825b0ba8e267f785b2fee46e3ef679e",
"size": "1364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "musicsite/music/migrations/0002_auto_20170305_2121.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "153"
},
{
"name": "HTML",
"bytes": "3864"
},
{
"name": "Python",
"bytes": "30302"
}
],
"symlink_target": ""
}
|
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose
from scrapy.spiders import Spider
from w3lib.html import remove_tags
class MySpider(Spider):
name = ''
start_urls = [''] #FIRST LEVEL
# 1. SCRAPING
def parse(self, response):
item_loader = ItemLoader(item=MyItem(), response=response)
item_loader.default_input_processor = MapCompose(remove_tags)
item_loader.default_output_processor = TakeFirst()
#
#item_loader.add_css("my_field", "my_css")
#item_loader.add_xpath("my_field", "my_xpath")
#
return item_loader.load_item()
|
{
"content_hash": "747cbf2da2260b01a52a9347fd7497b1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 69,
"avg_line_length": 32.55,
"alnum_prop": 0.6666666666666666,
"repo_name": "zseta/scrapy-templates",
"id": "ceac4c1ecc59b0ab9853422c4933045c392742e2",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templates/Ext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17318"
}
],
"symlink_target": ""
}
|
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Figurer
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Viewers.Viewer"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
SYS.addDo("Figurer","Figure","Figuring","Figured")
#</DefineAugmentation>
#<ImportSpecificModules>
from ShareYourSystem.Standards.Itemizers import Setter,Manager
from ShareYourSystem.Standards.Controllers import Controller
import copy
#</ImportSpecificModules>
#<DefineLocals>
FigurePlotKeyStr='#plot'
FigureBarKeyStr='#bar'
FigureScatterKeyStr='#scatter'
FigureAxesKeyStr='#axes'
FigureMpld3KeyStr='#mpld3.plugins.'
#</DefineLocals>
#<DefineClass>
@DecorationClass(**{
#'ClassingSwitchMethodStrsList':['figure']
})
class FigurerClass(BaseClass):
def default_init(self,
_FigurePyplotVariable=None,
_FigureCartoonVariablesList=None,
_FigureTooltipVariablesList=None,
_FiguringGridIntsTuple=(20,20),
_FiguringShapeIntsTuple=(1,1),
_FiguringDrawVariable=None,
_FiguringShiftIntsTuple=(1,0),
_FiguredTeamTagStr="",
_FiguredDeriveTeamerVariablesList=None,
_FiguredPanelDeriveTeamerVariable=None,
_FiguredAxesDeriveTeamerVariable=None,
_FiguredAxesVariable=None,
_FiguredAnchorIntsList=[0,0],
_FiguredShiftTuplesList={
'DefaultValueType':property,
'PropertyInitVariable':None,
'PropertyDocStr':'I am reactive when I am a Panel and want to know the space I take !'
},
_FiguredPanelShapeIntsList=None,
_FiguredCursorIntsList=[0,0],
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_figure(self):
#/###################/#
# First we get the children figurers and check what they are
#
#debug
'''
self.debug(
[
'We figure here',
#('self.',self,['ViewFirstDeriveViewerVariable'])
'self.TeamDict.keys() is ',
str(self.TeamDict.keys())
]
)
'''
#filter
FiguredTeamTagStrsList=SYS._filter(
lambda __KeyStr:
__KeyStr in ['Panels','Axes','Plots'],
self.TeamDict.keys()
)
#Check
if len(FiguredTeamTagStrsList)==1:
#filter
self.FiguredTeamTagStr=FiguredTeamTagStrsList[0]
#get
self.FiguredDeriveTeamerVariablesList=self.TeamDict[
self.FiguredTeamTagStr
].ManagementDict.values()
#debug
'''
self.debug(
[
('self.',self,[
'FiguredTeamTagStr',
#'FiguredDeriveTeamerVariablesList'
])
]
)
'''
#/###################/#
# do something before descending a figure call
#
if self.FiguredTeamTagStr=='Panels':
#debug
'''
self.debug(
[
'I am the top figurer...'
]
)
'''
elif self.FiguredTeamTagStr=='Axes' or self.ParentDeriveTeamerVariable!=None and self.ParentDeriveTeamerVariable.TeamTagStr=='Panels':
#/###############/#
# Add an axe for the symbol of the panel
#
#debug
'''
self.debug(
[
'We transform the team dict Axes to add a panel axe',
'self.TeamDict[\'Axes\'] is ',
SYS._str(self.TeamDict['Axes'])
]
)
'''
#team
self.team('Axes')
#debug
'''
self.debug(
[
'before setting',
'self.TeamedValueVariable.ManagementDict is ',
SYS._str(self.TeamedValueVariable.ManagementDict),
'Manager.ManagementDict is ',
str(Manager.ManagementDict)
]
)
'''
#map an add
map(
lambda __DeriveFigurer:
setattr(
__DeriveFigurer,
'ManagementIndexInt',
__DeriveFigurer.ManagementIndexInt+1
),
self.TeamedValueVariable.ManagementDict.values()
)
#update
self.TeamedValueVariable.ManagementDict=Manager.ManagementDict(
[
(
'Panel',SYS.FigurerClass(
**{
'ManagementTagStr':'Panel',
'ManagementIndexInt':0,
'ParentDeriveTeamerVariable':self.TeamedValueVariable,
'ViewFirstDeriveViewerVariable':self.ViewFirstDeriveViewerVariable,
'FigurePyplotVariable':self.FigurePyplotVariable,
'FiguringShapeIntsTuple':(1,1),
'FiguringDrawVariable':{
'#axes':
{
'set_axis_off':{'#liarg':[]},
'text':{
'#liarg':[
-1,0,'$\\mathbf{'+self.ManagementTagStr+'}$'
],
'#kwarg':{
'fontsize':20
}
}
}
},
'FiguredPanelDeriveTeamerVariable':self,
}
)
)
],
**self.TeamedValueVariable.ManagementDict
)
#debug
'''
self.debug(
[
'after setting',
'self.TeamedValueVariable.ManagementDict is ',
SYS._str(self.TeamedValueVariable.ManagementDict)
]
)
'''
#Add maybe a shift in the next figure
if len(self.TeamedValueVariable.ManagementDict)>1:
#debug
'''
self.debug(
[
'We add a shift down and right to the next figure',
'self.TeamedValueVariable.ManagementDict.get(1) is',
SYS._str(self.TeamedValueVariable.ManagementDict.get(1))
]
)
'''
#set
self.TeamedValueVariable.ManagementDict.get(1).FiguringShiftIntsTuple=(1,1)
#/##################/#
# There are some Axes to count
#
#map get
self.FiguredShiftTuplesList=map(
lambda __DeriveFigurer:
(
__DeriveFigurer.FiguringShapeIntsTuple,
__DeriveFigurer.FiguringShiftIntsTuple
),
self.TeamDict['Axes'].ManagementDict.values()
)
#debug
'''
self.debug(
[
'I am a still Panel...',
('self.',self,[
'FiguredShiftTuplesList',
'ManagementIndexInt'
])
]
)
'''
#/###############/#
# Determine what is the anchor considering the one of the last panel
#
#Check
if self.ManagementIndexInt>0:
#debug
'''
self.debug(
[
'We get the previous Panel',
'self.ParentDeriveTeamerVariable.ManagementDict is ',
SYS._str(self.ParentDeriveTeamerVariable.ManagementDict)
]
)
'''
#get the previous
FiguredPreviousPanelFigurer=self.ParentDeriveTeamerVariable.ManagementDict.get(
self.ManagementIndexInt-1
)
#debug
'''
self.debug(
[
'We look for the previous panel...',
#('FiguredPreviousPanelFigurer.',FiguredPreviousPanelFigurer,[
# 'FiguredAnchorIntsList',
# 'FiguredPanelShapeIntsList'
# ]
#)
]
)
'''
#Check
if self.FiguringShiftIntsTuple[0]>0:
#add
self.FiguredAnchorIntsList[0]=FiguredPreviousPanelFigurer.FiguredAnchorIntsList[0
]+self.FiguringShiftIntsTuple[0]+FiguredPreviousPanelFigurer.FiguredPanelShapeIntsList[0]+1
if self.FiguringShiftIntsTuple[1]>0:
#add
self.FiguredAnchorIntsList[1]=FiguredPreviousPanelFigurer.FiguredAnchorIntsList[1
]+self.FiguringShiftIntsTuple[1]+FiguredPreviousPanelFigurer.FiguredPanelShapeIntsList[1]+1
#debug
self.debug(
[
'we have setted the new anchor',
('self.',self,['FiguredAnchorIntsList'])
]
)
#/###############/#
# Init the Cursor for after
#
#init
self.FiguredCursorIntsList=copy.copy(self.FiguredAnchorIntsList)
#/###############/#
# Look maybe at a Panel without Axes and Plots
#
#Check
if len(self.TeamDict['Axes'].ManagementDict.keys())==1:
#debug
'''
self.debug(
[
'I am a Panel without Axes and Plots',
'So we just set an axe here'
]
)
'''
#set
self.setAxes()
#/###################/#
# if there axes setted then apply the draw set variable
#
#Check
if self.FiguredAxesVariable!=None:
#debug
'''
self.debug(
[
'There are axes so command the figuring draw variable',
('self.',self,[
'FiguringDrawVariable'
])
]
)
'''
#commad self
#self.command(self,self.FiguringDrawVariable)
#self.command(self,[])
self['#map@set'](self.FiguringDrawVariable)
#return
return
elif self.FiguredTeamTagStr=='Plots':
#debug
'''
self.debug(
[
'I am an Axes..',
('self.',self,['ParentDeriveTeamerVariable'])
]
)
'''
#get the parent panel
self.FiguredPanelDeriveTeamerVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
#debug
'''
self.debug(
[
'I am still an Axes..',
('self.',self,[
'FiguredPanelDeriveTeamerVariable',
#'ViewFirstDeriveViewerVariable',
'FiguredAnchorIntsList',
'FiguringShapeIntsTuple'
])
]
)
'''
#alias
self.FiguringGridIntsTuple=self.ViewFirstDeriveViewerVariable.FiguringGridIntsTuple
self.setAxes()
#debug
'''
self.debug(
[
'I am still an Axes..',
('self.',self,['FiguredAxesVariable'])
]
)
'''
else:
#debug
'''
self.debug(
[
'I dont have such panels axes plots...',
'So I can be a plot or the top one axe one plot figurer'
]
)
'''
#Check
if self!=self.ViewFirstDeriveViewerVariable:
#get
FiguredParentParentVariable=self.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable
#debug
'''
self.debug(
[
'FiguredParentParentVariable.ParentDeriveTeamerVariable.TeamTagStr is ',
FiguredParentParentVariable.ParentDeriveTeamerVariable.TeamTagStr,
'FiguredParentParentVariable is ',
SYS._str(FiguredParentParentVariable)
]
)
'''
#Check
if FiguredParentParentVariable.ParentDeriveTeamerVariable.TeamTagStr=='Views':
#/###################/#
# build a FiguredAxesVariables
#
#debug
'''
self.debug(
[
'I am a panel without Axes and Plots so just set an axis here...',
]
)
'''
#alias
self.FiguringGridIntsTuple=self.ViewFirstDeriveViewerVariable.FiguringGridIntsTuple
#set
self.setAxes()
#Check
elif FiguredParentParentVariable.ParentDeriveTeamerVariable.TeamTagStr=='Panels':
#alias
self.FiguredPanelDeriveTeamerVariable=FiguredParentParentVariable
#/###################/#
# build a FiguredAxesVariables
#
#debug
'''
self.debug(
[
'I am an axes without Plots so just set an axis here...',
]
)
'''
#alias
self.FiguringGridIntsTuple=self.ViewFirstDeriveViewerVariable.FiguringGridIntsTuple
self.FiguredAnchorIntsList=self.FiguredPanelDeriveTeamerVariable.FiguredCursorIntsList
#set
self.setAxes()
else:
#/###################/#
# point to the FiguredAxesVariable
#
#debug
'''
self.debug(
[
'I am Plot..',
'I set my FiguredAxesVariables corresponding to my parent one',
#'FiguredParentParentVariable is ',
#SYS._str(FiguredParentParentVariable),
'FiguredParentParentVariable.ManagementTagStr is ',
FiguredParentParentVariable.ManagementTagStr,
'FiguredParentParentVariable.FiguredAxesVariable is ',
FiguredParentParentVariable.FiguredAxesVariable,
]
)
'''
#get the parent panel
self.FiguredAxesDeriveTeamerVariable=FiguredParentParentVariable
#get the one of the parent
self.FiguredAxesVariable=self.FiguredAxesDeriveTeamerVariable.FiguredAxesVariable
#debug
'''
self.debug(
[
'I have definitely an axes..',
('self.',self,['FiguredAxesVariable'])
]
)
'''
#/###################/#
# if there are axes setted then apply the draw set variable
#
#Check
if self.FiguredAxesVariable!=None:
#debug
'''
self.debug(
[
'There are axes so command the figuring draw variable',
('self.',self,[
'FiguringDrawVariable'
])
]
)
'''
#commad self
#self.command(self,self.FiguringDrawVariable)
#self.command(self,[])
self['#map@set'](self.FiguringDrawVariable)
#/###################/#
# if it is the last then trigger the axes to set also
#
#Check
if self.ManagementIndexInt==(len(self.ParentDeriveTeamerVariable.ManagementDict)-1):
#debug
'''
self.debug(
[
'I am the last plot of this axes !',
'Lets the axes setting itself now',
('self.FiguredAxesDeriveTeamerVariable.',
self.FiguredAxesDeriveTeamerVariable,
['FiguringDrawVariable'])
]
)
'''
#commad self
if self.FiguredAxesDeriveTeamerVariable!=None:
if self.FiguredAxesDeriveTeamerVariable.FiguringDrawVariable!=None:
self.FiguredAxesDeriveTeamerVariable['#map@set'](
self.FiguredAxesDeriveTeamerVariable.FiguringDrawVariable
)
else:
#/###################/#
# Figure-Panel-Axes-Plot level
#
#debug
self.debug(
[
'I am the top figurer but with just one axes..',
('self.',self,['FiguringGridIntsTuple'])
]
)
#Set the size of the grid to this just one plot
self.FiguringGridIntsTuple=(1,1)
#get the parent panel
self.FiguredPanelDeriveTeamerVariable=self
#init
self.setAxes()
#map set
if self.FiguringDrawVariable!=None:
#mapSet
self.mapSet(
self.FiguringDrawVariable
)
def mimic_view(self):
#import mpld3
import mpld3
#fig to html
self.ViewedHtmlStr=mpld3.fig_to_html(
self.FigurePyplotVariable,
template_type="simple"
)
#call the base method
BaseClass.view(self)
def propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable):
#/###############/#
# If it is the top then init the figure
#
#Check
if self.ViewFirstDeriveViewerVariable==self:
#debug
'''
self.debug(
[
'We init a figure'
]
)
'''
#import pyplot
from matplotlib import pyplot
#subplots
self.FigurePyplotVariable = pyplot.figure()
else:
#debug
'''
self.debug(
[
'We just do an alias'
]
)
'''
#alias
self.FigurePyplotVariable=self.ViewFirstDeriveViewerVariable.FigurePyplotVariable
#/#################/#
# Lets go for figure
#
#debug
'''
self.debug(
'We are going to figure'
)
'''
#figure
self.figure()
#/#################/#
# Call the base method
#
#call the parent method
BaseClass.propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable)
def mimic_set(self):
#Check
if self.SettingKeyVariable in [
FigurePlotKeyStr,
FigureScatterKeyStr,
FigureBarKeyStr
]:
#debug
'''
self.debug(
[
'before plot',
('self.',self,[
#'ViewDeriveControllerVariable',
'SettingValueVariable'
])
]
)
'''
#init
FigurePlotArgumentDict=Setter.ArgumentDict(
self.SettingValueVariable,
self.ViewDeriveControllerVariable
)
#debug
'''
self.debug(
[
'We plot here',
'FigurePlotArgumentDict is',
SYS._str(FigurePlotArgumentDict)
]
)
'''
#plot
self.FigureCartoonVariablesList.append(
self.FiguredAxesVariable.plot(
*FigurePlotArgumentDict['LiargVariablesList'],
**FigurePlotArgumentDict['KwargVariablesDict']
)[0]
)
#debug
'''
self.debug(
[
('self.',self,['FigureCartoonVariablesList']),
#str(self.FigureCartoonVariablesList[0][0]),
]
)
'''
#return
return {'HookingIsBool':False}
elif self.SettingKeyVariable==FigureAxesKeyStr:
#debug
'''
self.debug(
[
'before axes',
('self.',self,[
#'ViewDeriveControllerVariable',
'FiguredAxesVariable'
])
]
)
'''
#map
ArgumentTuplesList=map(
lambda __ItemTuple:
(
__ItemTuple[0],
Setter.ArgumentDict(
__ItemTuple[1],
self.ViewDeriveControllerVariable
)
),
SYS.SetList(
self.SettingValueVariable
)
)
#debug
'''
self.debug(
[
'We axe here',
'ArgumentTuplesList is ',
SYS._str(ArgumentTuplesList),
]
)
'''
#map
map(
lambda __ArgumentTuple:
SYS.get(
self.FiguredAxesVariable,
__ArgumentTuple[0]
)()
if len(__ArgumentTuple[1]['LiargVariablesList']
)==0 and __ArgumentTuple[1]['KwargVariablesDict']==None
else(
SYS.get(
self.FiguredAxesVariable,
__ArgumentTuple[0]
)(**__ArgumentTuple[1]['KwargVariablesDict'])
if len(__ArgumentTuple[1]['LiargVariablesList']
)==0
else(
SYS.get(
self.FiguredAxesVariable,
__ArgumentTuple[0]
)(
*__ArgumentTuple[1]['LiargVariablesList'],
**__ArgumentTuple[1]['KwargVariablesDict']
)
if __ArgumentTuple[1]['KwargVariablesDict']!=None
else
SYS.get(
self.FiguredAxesVariable,
__ArgumentTuple[0]
)(
*__ArgumentTuple[1]['LiargVariablesList']
)
)
),
ArgumentTuplesList
)
#return
return {'HookingIsBool':False}
elif type(self.SettingKeyVariable)==str and self.SettingKeyVariable.startswith(
FigureMpld3KeyStr):
#deprefix
ToolTipKeyStr=SYS.deprefix(
self.SettingKeyVariable,
FigureMpld3KeyStr
)
#debug
'''
self.debug(
[
'before plugins',
('self.',self,['ViewDeriveControllerVariable'])
]
)
'''
#init
FigurePluginArgumentDict=Setter.ArgumentDict(
self.SettingValueVariable,
self.ViewDeriveControllerVariable
)
#debug
'''
self.debug(
[
'We plugin here',
'FigurePluginArgumentDict is ',
SYS._str(FigurePluginArgumentDict)
]
)
'''
#plugin
from mpld3 import plugins
self.FigureTooltipVariablesList=map(
lambda __FigureCartoonVariable:
getattr(
plugins,
ToolTipKeyStr
)(
*[
__FigureCartoonVariable
]+FigurePluginArgumentDict['LiargVariablesList'],
**FigurePluginArgumentDict['KwargVariablesDict']
),
self.FigureCartoonVariablesList
)
#debug
'''
self.debug(
[
('self.',self,['FigureTooltipVariablesList'])
]
)
'''
#connect
map(
lambda __FigureTooltipVariable:
plugins.connect(
self.FigurePyplotVariable,
__FigureTooltipVariable
),
self.FigureTooltipVariablesList
)
#return
return {'HookingIsBool':False}
#call the base method
BaseClass.set(self)
def setAxes(self):
#/#################/#
# First shift in the grid
#
#Check
if self.ManagementIndexInt>0:
#debug
'''
self.debug(
[
'first we shift',
'self.FiguredPanelDeriveTeamerVariable.FiguredCursorIntsList is ',
str(self.FiguredPanelDeriveTeamerVariable.FiguredCursorIntsList),
('self.',self,['FiguringShapeIntsTuple','FiguringShiftIntsTuple'])
]
)
'''
#get
FiguredPreviousAxesFigurer=self.ParentDeriveTeamerVariable.ManagementDict.get(self.ManagementIndexInt-1)
#shift
if self.FiguringShiftIntsTuple[0]>0:
self.FiguredPanelDeriveTeamerVariable.FiguredCursorIntsList[0
]+=self.FiguringShiftIntsTuple[0]+FiguredPreviousAxesFigurer.FiguringShapeIntsTuple[0]
if self.FiguringShiftIntsTuple[1]>0:
self.FiguredPanelDeriveTeamerVariable.FiguredCursorIntsList[1
]+=self.FiguringShiftIntsTuple[1]+FiguredPreviousAxesFigurer.FiguringShapeIntsTuple[1]
#debug
'''
self.debug(
[
'Ok we have shifted',
'now we link with the fig',
('self.FiguredPanelDeriveTeamerVariable.',self.FiguredPanelDeriveTeamerVariable,
['FiguredCursorIntsList']),
]
)
'''
#set
if self.FiguredPanelDeriveTeamerVariable!=None:
self.FiguredAnchorIntsList=copy.copy(
self.FiguredPanelDeriveTeamerVariable.FiguredCursorIntsList
)
#/#################/#
# init
#
#debug
'''
self.debug(
[
'Ok we set an axes here',
('self.',self,[
'FiguringGridIntsTuple',
'FiguredAnchorIntsList',
'FiguringShapeIntsTuple'
]),
''
]
)
'''
#init
from matplotlib import pyplot
self.FiguredAxesVariable=pyplot.subplot2grid(
self.FiguringGridIntsTuple,
self.FiguredAnchorIntsList,
rowspan=self.FiguringShapeIntsTuple[0],
colspan=self.FiguringShapeIntsTuple[1]
)
#debug
'''
self.debug(
[
'Ok we have initiated the axes',
('self.',self,['FiguredAxesVariable'])
]
)
'''
#/#################/#
# link to the fig
#
#link
self.FiguredAxesVariable._figure=self.FigurePyplotVariable
#debug
'''
self.debug(
[
'Ok we have setted the axe'
]
)
'''
def propertize_setFiguredShiftTuplesList(self,_SettingValueVariable):
#set
self._FiguredShiftTuplesList=_SettingValueVariable
#debug
'''
self.debug(
[
'We bind a set of FiguredShiftTuplesList here',
'_SettingValueVariable is',
str(_SettingValueVariable)
]
)
'''
#init
self.FiguredPanelShapeIntsList=list(_SettingValueVariable[0][0])
#Check
if len(_SettingValueVariable)>1:
#shift
for __FiguredShiftTuple in _SettingValueVariable[1:]:
#debug
'''
self.debug(
[
'We shift with ',
'__FiguredShiftTuple is ',
str(__FiguredShiftTuple)
]
)
'''
#Check
if __FiguredShiftTuple[1][0]>0:
#add
self.FiguredPanelShapeIntsList[0]+=__FiguredShiftTuple[1][0]+__FiguredShiftTuple[0][0]
if __FiguredShiftTuple[1][1]>0:
#dd
self.FiguredPanelShapeIntsList[1]+=__FiguredShiftTuple[1][1]+__FiguredShiftTuple[0][1]
#debug
'''
self.debug(
[
'in the end of the shift',
('self.',self,[
'FiguredPanelShapeIntsList',
'FiguredCursorIntsList'
])
]
)
'''
#</DefineClass>
#</DefinePrint>
FigurerClass.PrintingClassSkipKeyStrsList.extend(
[
#'FigurePyplotVariable',
'FigureCartoonVariablesList',
'FigureTooltipVariablesList',
'FiguringGridIntsTuple',
'FiguringShapeIntsTuple',
'FiguringDrawVariable',
'FiguringShiftIntsTuple',
'FiguredTeamTagStr',
'FiguredDeriveTeamerVariablesList',
'FiguredPanelDeriveTeamerVariable',
'FiguredAxesDeriveTeamerVariable',
'FiguredAxesVariable',
'FiguredAnchorIntsList',
'FiguredShiftTuplesList',
'FiguredPanelShapeIntsList',
'FiguredCursorIntsList'
]
)
#<DefinePrint>
#<DefineLocals>
Controller.ViewsClass.ManagingValueClass=FigurerClass
FigurerClass.TeamingClassesDict=dict(
map(
lambda __KeyStr:
(__KeyStr,Controller.ViewsClass),
['Panels','Axes','Plots']
)
)
#<DefineLocals>
|
{
"content_hash": "7ad4caccb0c8f2db9f7300491a508350",
"timestamp": "",
"source": "github",
"line_count": 1113,
"max_line_length": 136,
"avg_line_length": 20.497753818508535,
"alnum_prop": 0.6155869203120891,
"repo_name": "Ledoux/ShareYourSystem",
"id": "eece8450a691fd80642356de08246bef5728afea",
"size": "22838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Pythonlogy/ShareYourSystem/Standards/Viewers/Pyploter/draft/__init__ copy 4.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
}
|
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Spikes.jl'
copyright = u'2016, Paul Thompson'
author = u'Paul Thompson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
# on_rtd is whether we are on readthedocs.org
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Spikesjldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Spikesjl.tex', u'Spikes.jl Documentation',
u'Paul Thompson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'spikesjl', u'Spikes.jl Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Spikesjl', u'Spikes.jl Documentation',
author, 'Spikesjl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "0c0de5629e99833c81c2385dae65f1f8",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 79,
"avg_line_length": 32.38434163701068,
"alnum_prop": 0.704945054945055,
"repo_name": "paulmthompson/Spikes.jl",
"id": "43b8e2112ccec4843b880ad6184c264311aa5494",
"size": "9522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Julia",
"bytes": "44917"
}
],
"symlink_target": ""
}
|
import numpy
import cv2
img=cv2.imread("/home/udayan/Downloads/messi.jpg", 0) #read image, 0=read as grayscale, -1=read as color
template=cv2.imread("/home/udayan/Downloads/messi_face.jpg", 0) #read image, 0=read as grayscale, -1=read as color
h, w=template.shape
result=cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED) #template matching, can also use cv2.TM_CCOEFF, cv2.TM_CCORR, 'cv2.TM_CCORR_NORMED', cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) #get coordinates and intensity values of pixels with max. & min. intensities in the result, max. value indicates best match here; min. value indicates best match for cv2.TM_SQDIFF & cv2.TM_SQDIFF_NORMED
#mark recognized area
top_left=max_loc
bottom_right=(top_left[0]+w, top_left[1]+h)
cv2.rectangle(img, top_left, bottom_right, 255, 3) #draw a rectangle on the image, 1st argument=image variable, 2nd & 3rd arguments=coordinates of top-left & bottom right corners of rectangle, 4th argument=color (intensity here) of rectangle edge, 5th argument=pixel width of rectangle edge
cv2.imshow('image',img) #show image in a window named "image"
cv2.imshow('result',result) #show image in a window named "result"
cv2.waitKey(0) #waits for a specified time in milliseconds or till a key is pressed, 0=wait indefinitely or till a key is pressed
cv2.destroyAllWindows() #delete all created windows
|
{
"content_hash": "312d9ff2f79d4ae0da71cf5e1df10352",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 292,
"avg_line_length": 59.666666666666664,
"alnum_prop": 0.7465083798882681,
"repo_name": "UdayanSinha/Code_Blocks",
"id": "f23b20a19b93d26fc7d85fca5c783db154289046",
"size": "1453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenCV/cv_template_matching.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "18227"
},
{
"name": "Arduino",
"bytes": "13198"
},
{
"name": "Assembly",
"bytes": "541435"
},
{
"name": "Batchfile",
"bytes": "1373"
},
{
"name": "C",
"bytes": "165345051"
},
{
"name": "C++",
"bytes": "1115090"
},
{
"name": "Coq",
"bytes": "3219"
},
{
"name": "DTrace",
"bytes": "6852"
},
{
"name": "Eagle",
"bytes": "1627871"
},
{
"name": "GDB",
"bytes": "8406"
},
{
"name": "HTML",
"bytes": "496199"
},
{
"name": "Logos",
"bytes": "47742"
},
{
"name": "Makefile",
"bytes": "868753"
},
{
"name": "Matlab",
"bytes": "17436"
},
{
"name": "Objective-C",
"bytes": "73312"
},
{
"name": "Perl",
"bytes": "93946"
},
{
"name": "Prolog",
"bytes": "4221"
},
{
"name": "Python",
"bytes": "48175"
},
{
"name": "Scheme",
"bytes": "21"
},
{
"name": "Shell",
"bytes": "45297"
},
{
"name": "Standard ML",
"bytes": "32"
},
{
"name": "SystemVerilog",
"bytes": "492112"
},
{
"name": "Tcl",
"bytes": "29381"
},
{
"name": "VHDL",
"bytes": "54755"
},
{
"name": "Verilog",
"bytes": "805101"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from vectortile import TypedMatrix
class Tile(object):
def __init__(self, typedmatrix_str, meta=None):
"""
:param typedmatrix_str: a packed string as returned by TypedMatrix.pack()
"""
self.typedmatrix_str = typedmatrix_str
if not meta:
meta = dict()
self.meta = meta
@staticmethod
def fromdata(data, meta=None, columns=None):
"""
:param data: a dict or list of dicts suitable for TypedMatrix.pack()
:param meta: a dict of meta data fields
:return: a Tile object
"""
if not meta:
meta = dict()
return Tile(TypedMatrix.pack(data, extra_header_fields=meta, columns=columns, orientation='columnwise'), meta=meta)
@staticmethod
def nodata(complete_ancestor_bounds=None):
meta = dict(nodata=True)
if complete_ancestor_bounds:
meta['complete_ancestor'] = str(complete_ancestor_bounds.get_bbox())
return Tile.fromdata(data=dict(), meta=meta)
def is_nodata(self):
return 'nodata' in self.meta
def complete_ancestor(self):
return self.meta.get('complete_ancestor')
@staticmethod
def timestamp(dt=datetime.now()):
"""
Create a coded UTC timestamp from a datetime using the internal coding format used in packed tiles
"""
return TypedMatrix.get_utc_timestamp(dt)
def unpack(self):
return TypedMatrix.unpack(self.typedmatrix_str)
def __str__(self):
return "%s" % self.typedmatrix_str
def __repr__(self):
return "%s(%s,meta=%s)" % (self.__class__.__name__, repr(self.typedmatrix_str), repr(self.meta))
@property
def size(self):
return len(self.typedmatrix_str)
def asdict(self):
header, data = self.unpack()
header['data'] = data
return header
|
{
"content_hash": "9b9ec5c989e060abfb640306c99272b3",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 123,
"avg_line_length": 29.734375,
"alnum_prop": 0.6132422490803994,
"repo_name": "SkyTruth/vectortile",
"id": "61394c44b0bafd7417751f8d9b73be318752a2ab",
"size": "1903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vectortile/Tile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28021"
}
],
"symlink_target": ""
}
|
from threading import Thread
from flask import current_app, render_template
from flask.ext.mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
def send_email_cc(to, cc, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to], cc=[cc])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
{
"content_hash": "159096d3eb43aef8c750689994b828e4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 84,
"avg_line_length": 38.2,
"alnum_prop": 0.6492146596858639,
"repo_name": "SuperQuest/v1",
"id": "61052f357d9357703a9667f41469241322fc2d88",
"size": "1146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/email.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3082"
},
{
"name": "JavaScript",
"bytes": "645"
},
{
"name": "Python",
"bytes": "120672"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.core.files.storage import default_storage
from django.db import models
from django.db.models import signals
import subprocess
from videokit.apps import VideokitConfig
from videokit.cache import get_videokit_cache_backend
from videokit.fields import VideoFieldFile
from videokit.fields import VideoFileDescriptor
from videokit.fields import VideoSpecFieldFile
from videokit.fields import VideoSpecFileDescriptor
from videokit.forms import VideoField as VideoFormField
class VideoField(models.FileField):
attr_class = VideoFieldFile
descriptor_class = VideoFileDescriptor
description = 'Video'
def __init__( self, verbose_name = None, name = None,
width_field = None, height_field = None,
rotation_field = None,
mimetype_field = None,
duration_field = None,
thumbnail_field = None,
**kwargs):
self.width_field = width_field
self.height_field = height_field
self.rotation_field = rotation_field
self.mimetype_field = mimetype_field
self.duration_field = duration_field
self.thumbnail_field = thumbnail_field
super(VideoField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(VideoField, self).check(**kwargs)
errors.extend(self._check_video_utils_installed())
return errors
def _check_video_utils_installed(self):
command = 'which ffmpeg'
response = subprocess.call(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
if response != 0:
return [
checks.Error(
'ffmpeg is not installed',
hint = ('Install FFMPEG from https://www.ffmpeg.org'),
obj = self,
)
]
command = 'which mediainfo'
response = subprocess.call(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
if response != 0:
return [
checks.Error(
'mediainfo is not installed',
hint = ('Install Mediainfo from https://mediaarea.net'),
obj = self,
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(VideoField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
if self.rotation_field:
kwargs['rotation_field'] = self.rotation_field
if self.mimetype_field:
kwargs['mimetype_field'] = self.mimetype_field
if self.duration_field:
kwargs['duration_field'] = self.duration_field
if self.thumbnail_field:
kwargs['thumbnail_field'] = self.thumbnail_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super(VideoField, self).contribute_to_class(cls, name, **kwargs)
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender = cls)
signals.post_init.connect(self.update_rotation_field, sender = cls)
signals.post_init.connect(self.update_mimetype_field, sender = cls)
signals.post_init.connect(self.update_duration_field, sender = cls)
signals.post_init.connect(self.update_thumbnail_field, sender = cls)
def update_dimension_fields(self, instance, force = False, *args, **kwargs):
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
file = getattr(instance, self.attname)
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field)))
if dimension_fields_filled and not force:
return
if file:
width = file.width
height = file.height
else:
width = None
height = None
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def update_rotation_field(self, instance, force = False, *args, **kwargs):
has_rotation_field = self.rotation_field
if not has_rotation_field:
return
file = getattr(instance, self.attname)
if not file and not force:
return
rotation_field_filled = not(self.rotation_field and not getattr(instance, self.rotation_field))
if rotation_field_filled and not force:
return
if file:
rotation = file.rotation
else:
rotation = None
if self.rotation_field:
setattr(instance, self.rotation_field, rotation)
def update_mimetype_field(self, instance, force = False, *args, **kwargs):
has_mimetype_field = self.mimetype_field
if not has_mimetype_field:
return
file = getattr(instance, self.attname)
if not file and not force:
return
mimetype_field_filled = not(self.mimetype_field and not getattr(instance, self.mimetype_field))
if mimetype_field_filled and not force:
return
if file:
mimetype = file.mimetype
else:
mimetype = None
if self.mimetype_field:
setattr(instance, self.mimetype_field, mimetype)
def update_duration_field(self, instance, force = False, *args, **kwargs):
has_duration_field = self.duration_field
if not has_duration_field:
return
file = getattr(instance, self.attname)
if not file and not force:
return
duration_field_filled = not(self.duration_field and not getattr(instance, self.duration_field))
if duration_field_filled and not force:
return
if file:
duration = file.duration
else:
duration = None
if self.duration_field:
setattr(instance, self.duration_field, duration)
def update_thumbnail_field(self, instance, force = False, *args, **kwargs):
has_thumbnail_field = self.thumbnail_field
if not has_thumbnail_field:
return
file = getattr(instance, self.attname)
if not file and not force:
return
thumbnail_field_filled = not(self.thumbnail_field and not getattr(instance, self.thumbnail_field))
if thumbnail_field_filled and not force:
return
if file:
thumbnail = file.thumbnail
else:
thumbnail = None
if self.thumbnail_field:
setattr(instance, self.thumbnail_field, thumbnail)
def formfield(self, **kwargs):
defaults = { 'form_class' : VideoFormField }
defaults.update(kwargs)
return super(VideoField, self).formfield(**defaults)
class VideoSpecField(VideoField):
attr_class = VideoSpecFieldFile
descriptor_class = VideoSpecFileDescriptor
def __init__( self, verbose_name = None, name = None,
source = None,
format = VideokitConfig.VIDEOKIT_DEFAULT_FORMAT,
storage = None,
video_cache_backend = None,
**kwargs):
self.source = source
self.format = format
self.storage = storage or default_storage
self.video_cache_backend = video_cache_backend or get_videokit_cache_backend()
kwargs.pop('blank', None)
kwargs.pop('null', None)
if not format in VideokitConfig.VIDEOKIT_SUPPORTED_FORMATS:
raise ValueError('Video format \'%s\' is not supported at this time by videokit.' % format)
super(VideoSpecField, self).__init__(verbose_name, name, blank = True, null = True, **kwargs)
def form_field(self, **kwargs):
return None
|
{
"content_hash": "d7baf97267f8ce4189231e20bf6179cd",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 109,
"avg_line_length": 33.10276679841897,
"alnum_prop": 0.5982089552238806,
"repo_name": "pstreck/django-videokit",
"id": "fd4935a5e14d4ff0ba662b36362f6d9afac23512",
"size": "8375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "videokit/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1126"
},
{
"name": "Python",
"bytes": "30920"
}
],
"symlink_target": ""
}
|
import unittest2
from lldbsuite.test.decorators import *
from lldbsuite.test.concurrent_base import ConcurrentEventsBase
from lldbsuite.test.lldbtest import TestBase
@skipIfWindows
class ConcurrentSignalNWatchNBreak(ConcurrentEventsBase):
mydir = ConcurrentEventsBase.compute_mydir(__file__)
@skipIfFreeBSD # timing out on buildbot
# Atomic sequences are not supported yet for MIPS in LLDB.
@skipIf(triple='^mips')
@expectedFailureNetBSD
@add_test_categories(["watchpoint"])
def test(self):
"""Test one signal thread with 5 watchpoint and breakpoint threads."""
self.build(dictionary=self.getBuildFlags())
self.do_thread_actions(num_signal_threads=1,
num_watchpoint_threads=5,
num_breakpoint_threads=5)
|
{
"content_hash": "6e8dc5211ee1c9872a9c3e33ef8d2223",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 35.78260869565217,
"alnum_prop": 0.6950182260024301,
"repo_name": "endlessm/chromium-browser",
"id": "3b005e559a494562657aaec19b26f2d15a19d982",
"size": "824",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/functionalities/thread/concurrent_events/TestConcurrentSignalNWatchNBreak.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
class Person(object):
""" A simple class representing a person object.
"""
#initialize name, ID number, city
def __init__(self, fname, lname, ID, city):
self.__ID = ID
self.__first_name = fname
self.__last_name = lname
self.__city = city
def _getName(self):
s = ' '
return s.join((self.__first_name, self.__last_name))
def __format__(self, format_spec):
if isinstance(format_spec, unicode):
return unicode(str(self))
else:
return str(self)
#display name
def show_person(self):
print 'Name', format(self._getName(),'<9')
print 'ID:', format(self.__ID,'<9')
print('City:', self.__city)
class Employer(Person):
""" An employer is a person who runs a company.
"""
# The name of the company
def __init__(self, fname, lname, ID, city,company_name):
Person.__init__(self, fname, lname, ID, city)
self.company_name = company_name
# Проверка способа запуска модуля
if __name__ == '__main__':
# Create an employee with a boss
boss_john = Employer(
fname='John', lname='Paw', city="NYC", ID=223344, company_name="Packrat's Cats"
)
boss_john .show_person()
|
{
"content_hash": "be2bbbbe89b297f4c06206e731ba988f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 87,
"avg_line_length": 26.229166666666668,
"alnum_prop": 0.5655281969817315,
"repo_name": "janusnic/21v-python",
"id": "8b4b5470aa3541846142ac23c652a55db9e54063",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit_08/5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "990972"
},
{
"name": "SQLPL",
"bytes": "147"
}
],
"symlink_target": ""
}
|
from Tkinter import *
import sys, os, glob
reload(sys)
sys.setdefaultencoding('utf-8') # encoding Chinese
class Application(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.readme_Tag = False
self.printLogs_Tag = False
self.newLog_Tag = False
self.pack()
self.createMenus(master)
def createScrollbar(self):
self.scrollbar = Scrollbar(self, orient=VERTICAL)
self.scrollbar.pack(side=RIGHT, fill=Y)
def createText(self):
self.text = Text(self, width=1000, height=400,
yscrollcommand=self.scrollbar.set)
self.text.focus_force() # keybord 光标在Text上
self.text.pack()
self.text.delete(1.0, END)
self.scrollbar.config(command=self.text.yview)
def printLogs(self):
self.printLogs_Tag = True
if self.readme_Tag:
self.text1.pack_forget()
if self.newLog_Tag:
self.text.pack_forget()
self.scrollbar.pack_forget()
self.createScrollbar()
self.createText()
self.text.insert(END, "Here is your past logs:--->" + "\n")
current_dir = os.getcwd() # print past logs
os.chdir(current_dir)
for file in glob.glob("*.txt"):
self.text.insert(END, "Log_name:--->" + file + "\n")
file_content = open(file, "r+")
self.text.insert(END, "Log_content:---> " + file_content.read() + "\n \n")
file_content.close()
def newLog(self):
self.newLog_Tag = True
if self.readme_Tag:
self.text1.pack_forget()
if self.printLogs_Tag:
self.text.pack_forget()
self.scrollbar.pack_forget()
self.createScrollbar()
self.createText()
def dialog(self):
top = self.top = Toplevel()
Label(top, text="Input your log name->").grid(row=0)
self.log_name = Entry(top)
self.log_name.focus_force()
self.log_name.grid(row=0, column=1)
self.log_name.bind("<Return>", self.ok)
def ok(self, event):
self.name = self.log_name.get().encode(sys.stdin.encoding) + ".txt"
self.content = self.text.get("1.0", END)
self.top.destroy()
def save(self):
self.dialog()
self.wait_window(self)
log_writer = open(self.name, "a+")
log_writer.write(self.content)
log_writer.close()
def createMenus(self, master):
self.menu = Menu(self)
master.config(menu=self.menu)
self.filemenu = Menu(self)
self.menu.add_cascade(label="file", menu=self.filemenu)
self.filemenu.add_command(label="PastLogs", command=self.printLogs)
self.filemenu.add_command(label="New", command=self.newLog)
self.filemenu.add_command(label="Save", command=self.save)
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=self.cancel)
self.helpmenu = Menu(self)
self.menu.add_cascade(label="Help", menu=self.helpmenu)
self.helpmenu.add_command(label="Guide", command=self.readme)
def cancel(self):
self.master.destroy()
def readme(self):
if self.newLog_Tag:
self.text.pack_forget()
self.scrollbar.pack_forget()
self.readme_Tag = True
self.text1= Text(self, height=1000, width=400)
help ="""
1-Please click PastLogs Menu in the filemenu to see the logs you have wrote.
2-Then Click the New Menu in the filemenu to write your new log
3-Please Click the Save Menu if Want to save your log which you have just worte
4-Exit menu to exit
5-Guide menu in the Help Menu to read the Diary Guide
"""
self.text1.insert(END, help, "color")
self.text1.pack()
def main():
master = Tk()
master.title("Diary App")
master.geometry("1000x400")
statement = Label(master, text="Dear Friend! Welcome!")
statement.pack(side=TOP, fill=X)
app = Application(master)
app.mainloop()
if __name__ == '__main__':
main()
|
{
"content_hash": "87a78a9cbb65897f59ca79946db59c51",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 87,
"avg_line_length": 27.91275167785235,
"alnum_prop": 0.5874008175042077,
"repo_name": "lyltj2010/OMOOC2py",
"id": "e8eee1d9201aaf9692e9e644280ac2f599112063",
"size": "4191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_src/om2py2w/demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7764"
},
{
"name": "Python",
"bytes": "20662"
},
{
"name": "Smarty",
"bytes": "2031"
}
],
"symlink_target": ""
}
|
import json
import urllib.request
import logging
import time
import datetime
import csv
import sys
def getltcbtc():
# Get a number that we can multiple with LTC to get BTC
url = "https://crypto-trade.com/api/1/ticker/ltc_btc"
value = 0
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
xjson = json.loads(response.read().decode('utf-8'))
value = xjson['data']['last']
if value == 0:
# Either the LTC Market has crashed or crypto-trade isn't working
# Using vicurex
url = "https://vircurex.com/api/get_last_trade.json?base=LTC&alt=BTC"
request2 = urllib.request.Request(url, headers = {'user-agent': 'MWGI'})
response2 = urllib.request.urlopen(request2)
xjson2 = json.loads(response2.read().decode('utf-8'))
value = xjson2['value']
return value
|
{
"content_hash": "efd486cace9eede08a35b0c2a9f47d1d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 29.74074074074074,
"alnum_prop": 0.7185554171855542,
"repo_name": "chalbersma/mwgi",
"id": "acbb1410d86f69e1fd0cfa594e0d0f796a3e7b7e",
"size": "827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v1/getltcbtc.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "16615"
},
{
"name": "Shell",
"bytes": "720"
}
],
"symlink_target": ""
}
|
import datetime
import os
import urlparse
from us_ignite.settings.base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Sensitive values are saved as env variables:
env = os.getenv
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
# settings is one directory up now
here = lambda *x: os.path.join(PROJECT_ROOT, '..', *x)
SITE_URL = 'https://us-ignite-staging.herokuapp.com'
ALLOWED_HOSTS = [
'us-ignite-staging.herokuapp.com',
]
# HTTPS configuration:
SESSION_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 60 * 5
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = env('SECRET_KEY')
# Remote storage settings:
STATICFILES_STORAGE = 'us_ignite.common.storage.StaticS3Storage'
DEFAULT_FILE_STORAGE = 'us_ignite.common.storage.MediaS3Storage'
THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'staging-us-ignite-org'
expire_date = datetime.date.today() + datetime.timedelta(days=365)
expire_seconds = 30 * 24 * 60 * 60
AWS_HEADERS = {
'Expires': expire_date.strftime('%a, %d %b %Y 00:00:00 GMT'),
'Cache-Control': 'max-age=%s' % expire_seconds,
}
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = 'https://%s/static/' % AWS_S3_CUSTOM_DOMAIN
redis_url = urlparse.urlparse(env('REDISTOGO_URL'))
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '%s:%s' % (redis_url.hostname, redis_url.port),
'OPTIONS': {
'DB': 0,
'PASSWORD': redis_url.password,
}
}
}
# Email:
EMAIL_SUBJECT_PREFIX = '[STAGE US Ignite] '
DEFAULT_FROM_EMAIL = 'info@staging.us-ignite.org'
SERVER_EMAIL = 'info@staging.us-ignite.org'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = env('EMAIL_HOST')
EMAIL_PORT = env('EMAIL_PORT')
EMAIL_HOST_USER = env('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD')
# Twitter API:
TWITTER_API_KEY = env('TWITTER_API_KEY')
TWITTER_API_SECRET = env('TWITTER_API_SECRET')
# WP email
WP_EMAIL = env('WP_EMAIL')
# Enable dummy content generation on this build:
ENABLE_DUMMY = True
if ENABLE_DUMMY:
INSTALLED_APPS += ('us_ignite.dummy', )
# List of words:
WORDS_PATH = here('..', 'words')
MAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY')
MAILCHIMP_LIST = env('MAILCHIMP_LIST')
GOOGLE_ANALYTICS_ID = 'DUMMY'
# Production flag:
IS_PRODUCTION = True
# Asset compressor:
COMPRESS_ENABLED = True
STATIC_FILES_VERSION = 'v1'
# Heroku does not have a filesystem, used to deploy the assets to S3:
COMPRESS_STORAGE = 'us_ignite.common.storage.CachedS3BotoStorage'
USE_DEBUG_TOOLBAR = False
|
{
"content_hash": "e6813e339fadf802e923c545f842224e",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 70,
"avg_line_length": 26.093457943925234,
"alnum_prop": 0.6966332378223495,
"repo_name": "us-ignite/us_ignite",
"id": "7e5784e8c96dc7c43a05fd6e0d1b6c33eb27b232",
"size": "2837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "us_ignite/settings/staging.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "590320"
},
{
"name": "HTML",
"bytes": "920235"
},
{
"name": "JavaScript",
"bytes": "109759"
},
{
"name": "Nginx",
"bytes": "3047"
},
{
"name": "Pascal",
"bytes": "48"
},
{
"name": "Puppet",
"bytes": "53455"
},
{
"name": "Python",
"bytes": "1321882"
},
{
"name": "Ruby",
"bytes": "370509"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
}
|
import clr
import System
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def IsInPlace(item):
if hasattr(item, "IsInPlace"): return item.IsInPlace
else: return None
items = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [IsInPlace(x) for x in items]
else: OUT = IsInPlace(items)
|
{
"content_hash": "ab80030a72fcea9a12d802017fd7fcfd",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 23.384615384615383,
"alnum_prop": 0.7401315789473685,
"repo_name": "CAAD-RWTH/ClockworkForDynamo",
"id": "508785b9560eb27f3e184e5303ce61485f82497e",
"size": "304",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nodes/1.x/python/Family.IsInPlace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "316146"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0131_realm_create_generic_bot_by_admins_only'),
]
operations = [
migrations.AddField(
model_name='realm',
name='message_visibility_limit',
field=models.IntegerField(null=True),
),
]
|
{
"content_hash": "1933864fe110dc562d7155559765c863",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 67,
"avg_line_length": 23.1875,
"alnum_prop": 0.5956873315363881,
"repo_name": "brainwane/zulip",
"id": "3a8ad88ec95bd8c73e60c21ce61c3106c9ee6627",
"size": "421",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "zerver/migrations/0132_realm_message_visibility_limit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from sentry.constants import RESERVED_TEAM_SLUGS
from sentry.models import slugify_instance
try:
superuser = orm['sentry.User'].objects.filter(is_superuser=True)[0]
except IndexError:
return
for project in orm['sentry.Project'].objects.filter(team__isnull=True):
if not project.owner:
project.owner = superuser
project.team = orm['sentry.Team'](
name=project.name,
owner=project.owner,
)
slugify_instance(project.team, project.team.name, reserved=RESERVED_TEAM_SLUGS)
project.team.save()
project.save()
def backwards(self, orm):
pass
models = {
u'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'})
},
u'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
u'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"})
},
u'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'unique': 'True'})
},
u'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'})
},
u'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['sentry.User']"})
},
u'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': u"orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
}
complete_apps = ['sentry']
symmetrical = True
|
{
"content_hash": "ceb472cde30910d47984d9da5839a5de",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 225,
"avg_line_length": 81.60576923076923,
"alnum_prop": 0.5488001256824162,
"repo_name": "rdio/sentry",
"id": "3ed601693a0023dcf5d6d49b08bbf91f7753aab0",
"size": "25485",
"binary": false,
"copies": "1",
"ref": "refs/heads/rdio_sentry_6.4.4",
"path": "src/sentry/migrations/0101_ensure_teams.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "533425"
},
{
"name": "HTML",
"bytes": "258193"
},
{
"name": "JavaScript",
"bytes": "916843"
},
{
"name": "Makefile",
"bytes": "2982"
},
{
"name": "Python",
"bytes": "2881969"
},
{
"name": "Ruby",
"bytes": "8042"
}
],
"symlink_target": ""
}
|
""" Install the `arte_plus7` script """
from setuptools import setup
NAME = 'arte_plus7'
LONG_DESCRIPTION_FILES = ['README.rst', 'CHANGELOG.rst']
def cat(files, join_str=''):
"""Concatenate `files` content with `join_str` between them."""
files_content = (open(f).read() for f in files)
return join_str.join(files_content)
def get_version(module):
""" Extract package version without importing file
Importing cause issues with coverage,
(modules can be removed from sys.modules to prevent this)
Inspired from pep8 setup.py
"""
with open('%s.py' % module) as module_fd:
for line in module_fd:
if line.startswith('__version__'):
return eval(line.split('=')[-1]) # pylint:disable=eval-used
setup(
name=NAME,
version=get_version(NAME),
description='CLI script to get videos from Arte plus 7 using their URL',
long_description=cat(LONG_DESCRIPTION_FILES, '\n\n'),
url='https://github.com/cladmi/arte_plus7',
author='cladmi',
author_email='hartergaetan@gmail.com',
download_url='https://github.com/cladmi/arte_plus7',
py_modules=[NAME],
license='MIT',
entry_points={
'console_scripts': ['{name} = {name}:main'.format(name=NAME)],
},
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Intended Audience :: End Users/Desktop',
'Topic :: Utilities',
],
install_requires=['argparse', 'beautifulsoup4'],
include_package_data=True,
)
|
{
"content_hash": "797825caa6db1a0d7a3e6e5339fcfbfa",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 30.215686274509803,
"alnum_prop": 0.6353017521090201,
"repo_name": "cladmi/arte_plus7",
"id": "1063fb9d42d5eda2f80a8984fc01e88dbbae796b",
"size": "1588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10842"
}
],
"symlink_target": ""
}
|
import hmac
import uuid
from hashlib import sha1
from django.db import models
from rest_framework.authtoken.models import AUTH_USER_MODEL
from cyder.base.mixins import ObjectUrlMixin
class Token(models.Model, ObjectUrlMixin):
"""Because Django REST Framework's Token model defines the user field
a OneToOneField, we need to create a custom class to replace it that allows
multiple keys per user.
"""
id = models.AutoField(primary_key=True)
key = models.CharField(max_length=40, unique=True)
user = models.ForeignKey(AUTH_USER_MODEL)
purpose = models.CharField(max_length=100) # purpose of token
created = models.DateTimeField(auto_now_add=True)
can_write = models.BooleanField(default=False) # allow requests sent with
# this token
# to alter data
class Meta:
app_label = 'cyder'
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(Token, self).save(*args, **kwargs)
def generate_key(self):
unique = uuid.uuid4()
return hmac.new(unique.bytes, digestmod=sha1).hexdigest()
def __unicode__(self):
return self.key
def details(self):
"""For automatic table generation via the tablefy() function."""
data = super(Token, self).details()
data['data'] = [
('Key', 'token__key', self),
('Purpose', 'token__purpose', self.purpose),
('Created', 'token__created', self.created),
]
return data
|
{
"content_hash": "1b9771325c974ae60a415a953a67d960",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 79,
"avg_line_length": 34.020833333333336,
"alnum_prop": 0.6080832823025107,
"repo_name": "zeeman/cyder",
"id": "5e8b788e5656dadc01ef7794ce30464d19f9365e",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyder/api/authtoken/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1401"
},
{
"name": "CSS",
"bytes": "143911"
},
{
"name": "CoffeeScript",
"bytes": "4769"
},
{
"name": "HTML",
"bytes": "109090"
},
{
"name": "JavaScript",
"bytes": "344874"
},
{
"name": "Makefile",
"bytes": "11293"
},
{
"name": "Puppet",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "2336377"
},
{
"name": "Shell",
"bytes": "16783"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from flask import Blueprint, render_template, send_file, redirect, url_for
from models.spiders import get_all_spiders, delete_spider, get_stats
from helpers.scrapy_util import start_crawler, start_logger
from helpers.config_reader import GLOBAL_PATH
from dateutil import parser
spiders_bp = Blueprint('spiders_bp', __name__, template_folder='templates')
@spiders_bp.route('/')
def list_spiders():
spiders = get_all_spiders()
return render_template('list-spiders.html', spiders=spiders)
@spiders_bp.route('/run/<spider_name>/')
def run_spider(spider_name):
spiders = get_all_spiders()
start_logger(spider_name)
start_crawler(spider_name)
return render_template('list-spiders.html',
spiders=spiders)
@spiders_bp.route('/log/<spider_name>/')
def read_log(spider_name):
filename = datetime.now().strftime("%Y-%m-%d." + spider_name + ".log")
with open(GLOBAL_PATH + 'logs/' + filename) as logf:
data = logf.read()
return render_template('read-log.html', data=data)
@spiders_bp.route('/export/<file_type>/<spider_name>')
def export_json(file_type, spider_name):
file_ = '%sexports/%s/%s.%s' % (GLOBAL_PATH,
file_type, spider_name, file_type)
return send_file(file_, as_attachment=True)
@spiders_bp.route('/delete/<spider_name>')
def spider_del(spider_name):
delete_spider(spider_name)
return redirect(url_for('spiders_bp.list_spiders'))
@spiders_bp.route('/stats/<spider_name>')
def spider_stats(spider_name):
stats = get_stats(spider_name)
pages_crawled = [i.pages_crawled for i in stats]
items_scraped = [i.items_scraped for i in stats]
labels = [i.start_time.strftime('%m-%d-%Y') for i in stats]
return render_template('spider-stats.html', stats=stats,
labels=labels,
items_scraped=items_scraped,
pages_crawled=pages_crawled)
|
{
"content_hash": "b87d205d1759024d3c6f0d0e6ceec69d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 39.68,
"alnum_prop": 0.6567540322580645,
"repo_name": "kirankoduru/arachne-ui",
"id": "52b677b32694996417d1109bfd512f405fd58b25",
"size": "1984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/spiders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "188"
},
{
"name": "HTML",
"bytes": "8430"
},
{
"name": "JavaScript",
"bytes": "151"
},
{
"name": "Python",
"bytes": "17975"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
)
class KhanAcademyIE(InfoExtractor):
_VALID_URL = r'^https?://(?:(?:www|api)\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
IE_NAME = 'KhanAcademy'
_TESTS = [{
'url': 'http://www.khanacademy.org/video/one-time-pad',
'md5': '7b391cce85e758fb94f763ddc1bbb979',
'info_dict': {
'id': 'one-time-pad',
'ext': 'webm',
'title': 'The one-time pad',
'description': 'The perfect cipher',
'duration': 176,
'uploader': 'Brit Cruise',
'uploader_id': 'khanacademy',
'upload_date': '20120411',
},
'add_ie': ['Youtube'],
}, {
'url': 'https://www.khanacademy.org/math/applied-math/cryptography',
'info_dict': {
'id': 'cryptography',
'title': 'Journey into cryptography',
'description': 'How have humans protected their secret messages through history? What has changed today?',
},
'playlist_mincount': 3,
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
if m.group('key') == 'video':
data = self._download_json(
'http://api.khanacademy.org/api/v1/videos/' + video_id,
video_id, 'Downloading video info')
upload_date = unified_strdate(data['date_added'])
uploader = ', '.join(data['author_names'])
return {
'_type': 'url_transparent',
'url': data['url'],
'id': video_id,
'title': data['title'],
'thumbnail': data['image_url'],
'duration': data['duration'],
'description': data['description'],
'uploader': uploader,
'upload_date': upload_date,
}
else:
# topic
data = self._download_json(
'http://api.khanacademy.org/api/v1/topic/' + video_id,
video_id, 'Downloading topic info')
entries = [
{
'_type': 'url',
'url': c['url'],
'id': c['id'],
'title': c['title'],
}
for c in data['children'] if c['kind'] in ('Video', 'Topic')]
return {
'_type': 'playlist',
'id': video_id,
'title': data['title'],
'description': data['description'],
'entries': entries,
}
|
{
"content_hash": "e3082825b98ae849bf24402da6ba2979",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 119,
"avg_line_length": 33.426829268292686,
"alnum_prop": 0.45822692448011676,
"repo_name": "achang97/YouTunes",
"id": "61739efa7a4c3b84892083eab10237c23eb69e3d",
"size": "2741",
"binary": false,
"copies": "86",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/youtube_dl/extractor/khanacademy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9366"
}
],
"symlink_target": ""
}
|
from webclient.models import *
from django.contrib import admin
from django import forms
from django.utils.html import format_html
admin.site.register(Image)
#admin.site.register(ImageLabel)
admin.site.register(ImageSourceType)
admin.site.register(CategoryType)
admin.site.register(ImageFilter)
class ImageLabelInline(admin.TabularInline):
model = ImageLabel
fields = ( 'categoryType', 'parentImage', 'imageWindow', 'pub_date')
readonly_fields = ('parentImage', 'categoryType', 'imageWindow', 'pub_date')
extra = 0
show_change_link = True
can_delete = False
ordering = ['pub_date']
@admin.register(Labeler)
class LabelerAdmin(admin.ModelAdmin):
fieldsets = [
('User', {'fields': ['user']}),
('Label Stats', {'fields': ['number_labeled']})
#('Image Labels', {'fields': ['ImageLabel_set']})
]
readonly_fields = ('number_labeled', )
inlines = [ImageLabelInline]
def number_labeled(self, obj):
return len(ImageLabel.objects.all().filter(labeler=obj))
class ImageLabelAdminForm(forms.ModelForm):
class Meta:
fields = "__all__"
model = ImageLabel
widgets = {
'overlayed_image': forms.ImageField()
}
@admin.register(ImageLabel)
class ImageLabelAdmin(admin.ModelAdmin):
list_display = ('parentImage', 'categoryType', 'imageWindow', 'labeler', 'timeTaken', 'pub_date')
readonly_fields = ('overlayed_image', )
def overlayed_image(self, obj):
return format_html('<img src="{}" alt="Rendered Image Label"></>' , '/webclient/get_overlayed_image/%d' % obj.id)
# blob = RenderSVGString(SVGString(obj.labelShapes))
# b64 = base64.b64encode(blob)
# return format_html('<img src="data:image/png;base64,{}" alt="Rendered Image Label"></>',
# b64)
|
{
"content_hash": "a40d61aea20fbdfc4ff4480a03a64409",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 121,
"avg_line_length": 32.724137931034484,
"alnum_prop": 0.6301369863013698,
"repo_name": "darknight-007/agdss-1",
"id": "c2535a1f334a5e913a29dd9208647a901dce098f",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webclient/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "224266"
},
{
"name": "HTML",
"bytes": "3777031"
},
{
"name": "JavaScript",
"bytes": "531742"
},
{
"name": "Jupyter Notebook",
"bytes": "196302"
},
{
"name": "Python",
"bytes": "142230"
},
{
"name": "Ruby",
"bytes": "903"
},
{
"name": "Shell",
"bytes": "1219"
}
],
"symlink_target": ""
}
|
from .core import base
|
{
"content_hash": "4bdbcc3ab13da6bc3fcae4b8b501c352",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 22,
"avg_line_length": 23,
"alnum_prop": 0.782608695652174,
"repo_name": "Clinical-Genomics/housekeeper",
"id": "a9e0e7763b34ceffb85b399a5af9d39318b6670a",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "housekeeper/cli/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "59717"
},
{
"name": "Shell",
"bytes": "1624"
}
],
"symlink_target": ""
}
|
from .util import *
from electrum.i18n import _
class UTXOList(MyTreeWidget):
filter_columns = [0, 2] # Address, Label
def __init__(self, parent=None):
MyTreeWidget.__init__(self, parent, self.create_menu, [ _('Address'), _('Label'), _('Amount'), _('Height'), _('Output point')], 1)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
def get_name(self, x):
return x.get('prevout_hash') + ":%d"%x.get('prevout_n')
def on_update(self):
self.wallet = self.parent.wallet
item = self.currentItem()
self.clear()
self.utxos = self.wallet.get_utxos()
for x in self.utxos:
address = x.get('address')
height = x.get('height')
name = self.get_name(x)
label = self.wallet.get_label(x.get('prevout_hash'))
amount = self.parent.format_amount(x['value'], whitespaces=True)
utxo_item = SortableTreeWidgetItem([address, label, amount, '%d'%height, name[0:10] + '...' + name[-2:]])
utxo_item.setFont(0, QFont(MONOSPACE_FONT))
utxo_item.setFont(2, QFont(MONOSPACE_FONT))
utxo_item.setFont(4, QFont(MONOSPACE_FONT))
utxo_item.setData(0, Qt.UserRole, name)
if self.wallet.is_frozen(address):
utxo_item.setBackground(0, ColorScheme.BLUE.as_color(True))
self.addChild(utxo_item)
def create_menu(self, position):
selected = [x.data(0, Qt.UserRole) for x in self.selectedItems()]
if not selected:
return
menu = QMenu()
coins = filter(lambda x: self.get_name(x) in selected, self.utxos)
menu.addAction(_("Spend"), lambda: self.parent.spend_coins(coins))
if len(selected) == 1:
txid = selected[0].split(':')[0]
tx = self.wallet.transactions.get(txid)
menu.addAction(_("Details"), lambda: self.parent.show_transaction(tx))
menu.exec_(self.viewport().mapToGlobal(position))
def on_permit_edit(self, item, column):
# disable editing fields in this tab (labels)
return False
|
{
"content_hash": "505f706ea7707beb329e502956cd17d5",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 138,
"avg_line_length": 40.886792452830186,
"alnum_prop": 0.5925242270419936,
"repo_name": "kyuupichan/electrum",
"id": "78e865360697a9b57e810ba97ef885d6a8d5ba47",
"size": "3329",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gui/qt/utxo_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "837"
},
{
"name": "NSIS",
"bytes": "7311"
},
{
"name": "Python",
"bytes": "1918037"
},
{
"name": "Shell",
"bytes": "17760"
}
],
"symlink_target": ""
}
|
import os, multiprocessing, subprocess
from runner import BrowserCore, path_from_root
from tools.shared import *
def clean_pids(pids):
import signal, errno
def pid_exists(pid):
try:
# NOTE: may just kill the process in Windows
os.kill(pid, 0)
except OSError, e:
return e.errno == errno.EPERM
else:
return True
def kill_pids(pids, sig):
for pid in pids:
if not pid_exists(pid):
break
print '[killing %d]' % pid
try:
os.kill(pid, sig)
print '[kill succeeded]'
except:
print '[kill fail]'
# ask nicely (to try and catch the children)
kill_pids(pids, signal.SIGTERM)
time.sleep(1)
# extreme prejudice, may leave children
kill_pids(pids, signal.SIGKILL)
def make_relay_server(port1, port2):
print >> sys.stderr, 'creating relay server on ports %d,%d' % (port1, port2)
proc = Popen([PYTHON, path_from_root('tests', 'sockets', 'socket_relay.py'), str(port1), str(port2)])
return proc
class WebsockifyServerHarness:
def __init__(self, filename, args, listen_port):
self.pids = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port-1
self.args = args or []
def __enter__(self):
import socket, websockify
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
Popen([CLANG_CC, path_from_root('tests', self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + get_clang_native_args() + self.args).communicate()
process = Popen([os.path.abspath('server')])
self.pids.append(process.pid)
# start the websocket proxy
print >> sys.stderr, 'running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.pids.append(self.websockify.pid)
print '[Websockify on process %s]' % str(self.pids[-2:])
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_pids(self.pids)
class CompiledServerHarness:
def __init__(self, filename, args, listen_port):
self.pids = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
child = Popen(NODE_JS + ['-e', 'require("ws");'])
child.communicate()
assert child.returncode == 0, 'ws module for Node.js not installed. Please run \'npm install\' from %s' % EMSCRIPTEN_ROOT
# compile the server
Popen([PYTHON, EMCC, path_from_root('tests', self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args).communicate()
process = Popen(NODE_JS + ['server.js'])
self.pids.append(process.pid)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_pids(self.pids)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# NOTE all datagram tests are temporarily disabled, as
# we can't truly test datagram sockets until we have
# proper listen server support.
def filter_harnesses(harnesses):
# XXX avoid websockify for now due to intermittent errors. see issue #2700
return filter(lambda harness: (harness[0].__class__ if type(harness) is tuple else harness.__class__) is not WebsockifyServerHarness, harnesses)
class sockets(BrowserCore):
def test_inet(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
printf("*%x,%x,%x,%x,%x,%x*\n", htonl(0xa1b2c3d4), htonl(0xfe3572e0), htonl(0x07abcdf0), htons(0xabcd), ntohl(0x43211234), ntohs(0xbeaf));
in_addr_t i = inet_addr("190.180.10.78");
printf("%x\n", i);
return 0;
}
'''
self.do_run(src, '*d4c3b2a1,e07235fe,f0cdab07,cdab,34122143,afbe*\n4e0ab4be\n')
def test_inet2(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntoa(x));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntoa(x2));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet3(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
int main() {
char dst[64];
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntop(AF_INET,&x,dst,sizeof dst));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntop(AF_INET,&x2,dst,sizeof dst));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet4(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
#include <sys/socket.h>
void test(char *test_addr){
char str[40];
struct in6_addr addr;
unsigned char *p = (unsigned char*)&addr;
int ret;
ret = inet_pton(AF_INET6,test_addr,&addr);
if(ret == -1) return;
if(ret == 0) return;
if(inet_ntop(AF_INET6,&addr,str,sizeof(str)) == NULL ) return;
printf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x - %s\n",
p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9],p[10],p[11],p[12],p[13],p[14],p[15],str);
}
int main(){
test("::");
test("::1");
test("::1.2.3.4");
test("::17.18.19.20");
test("::ffff:1.2.3.4");
test("1::ffff");
test("::255.255.255.255");
test("0:ff00:1::");
test("0:ff::");
test("abcd::");
test("ffff::a");
test("ffff::a:b");
test("ffff::a:b:c");
test("ffff::a:b:c:d");
test("ffff::a:b:c:d:e");
test("::1:2:0:0:0");
test("0:0:1:2:3::");
test("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
test("1::255.255.255.255");
//below should fail and not produce results..
test("1.2.3.4");
test("");
test("-");
}
'''
self.do_run(src,
"0000:0000:0000:0000:0000:0000:0000:0000 - ::\n"
"0000:0000:0000:0000:0000:0000:0000:0001 - ::1\n"
"0000:0000:0000:0000:0000:0000:0102:0304 - ::1.2.3.4\n"
"0000:0000:0000:0000:0000:0000:1112:1314 - ::17.18.19.20\n"
"0000:0000:0000:0000:0000:ffff:0102:0304 - ::ffff:1.2.3.4\n"
"0001:0000:0000:0000:0000:0000:0000:ffff - 1::ffff\n"
"0000:0000:0000:0000:0000:0000:ffff:ffff - ::255.255.255.255\n"
"0000:ff00:0001:0000:0000:0000:0000:0000 - 0:ff00:1::\n"
"0000:00ff:0000:0000:0000:0000:0000:0000 - 0:ff::\n"
"abcd:0000:0000:0000:0000:0000:0000:0000 - abcd::\n"
"ffff:0000:0000:0000:0000:0000:0000:000a - ffff::a\n"
"ffff:0000:0000:0000:0000:0000:000a:000b - ffff::a:b\n"
"ffff:0000:0000:0000:0000:000a:000b:000c - ffff::a:b:c\n"
"ffff:0000:0000:0000:000a:000b:000c:000d - ffff::a:b:c:d\n"
"ffff:0000:0000:000a:000b:000c:000d:000e - ffff::a:b:c:d:e\n"
"0000:0000:0000:0001:0002:0000:0000:0000 - ::1:2:0:0:0\n"
"0000:0000:0001:0002:0003:0000:0000:0000 - 0:0:1:2:3::\n"
"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff - ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\n"
"0001:0000:0000:0000:0000:0000:ffff:ffff - 1::ffff:ffff\n"
)
def test_getaddrinfo(self):
self.emcc_args=[]
self.do_run(open(path_from_root('tests', 'sockets', 'test_getaddrinfo.c')).read(), 'success')
def test_getnameinfo(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getnameinfo.c')).read(), 'success')
def test_gethostbyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_gethostbyname.c')).read(), 'success')
def test_getprotobyname(self):
self.do_run(open(path_from_root('tests', 'sockets', 'test_getprotobyname.c')).read(), 'success')
def test_sockets_echo(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49160), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
harnesses = filter_harnesses(harnesses)
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include])
def test_sockets_async_echo(self):
# Run with ./runner.py sockets.test_sockets_async_echo
sockets_include = '-I'+path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49165), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49166), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49167), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49168), 0)
]
#harnesses = filter_harnesses(harnesses)
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include])
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include])
def test_sockets_echo_bigdata(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256*256*2):
message += str(unichr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
input_filename = path_from_root('tests', 'sockets', 'test_sockets_echo_client.c')
input = open(input_filename).read()
output = input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message)
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49170), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49171), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49172), 1)
]
harnesses = filter_harnesses(harnesses)
for harness, datagram in harnesses:
with harness:
self.btest(output, expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], force_c=True)
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_partial_client.c'), expected='165', args=['-DSOCKK=%d' % harness.listen_port])
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49190),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port])
def test_sockets_select_server_closes_connection_rw(self):
sockets_include = '-I'+path_from_root('tests', 'sockets')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port])
def test_enet(self):
try_delete(self.in_dir('enet'))
shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
pwd = os.getcwd()
os.chdir(self.in_dir('enet'))
Popen([PYTHON, path_from_root('emconfigure'), './configure']).communicate()
Popen([PYTHON, path_from_root('emmake'), 'make']).communicate()
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I'+path_from_root('tests', 'enet', 'include')]
os.chdir(pwd)
for harness in [
CompiledServerHarness(os.path.join('sockets', 'test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port])
# This test is no longer in use for WebSockets as we can't truly emulate
# a server in the browser (in the past, there were some hacks to make it
# somewhat work, but those have been removed). However, with WebRTC it
# should be able to resurect this test.
# def test_enet_in_browser(self):
# try_delete(self.in_dir('enet'))
# shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
# pwd = os.getcwd()
# os.chdir(self.in_dir('enet'))
# Popen([PYTHON, path_from_root('emconfigure'), './configure']).communicate()
# Popen([PYTHON, path_from_root('emmake'), 'make']).communicate()
# enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I'+path_from_root('tests', 'enet', 'include')]
# os.chdir(pwd)
# Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_enet_server.c'), '-o', 'server.html', '-DSOCKK=2235'] + enet).communicate()
# with WebsockifyServerHarness('', [], 2235, 2234):
# with WebsockifyServerHarness('', [], 2237, 2236):
# pids = []
# try:
# proc = make_relay_server(2234, 2236)
# pids.append(proc.pid)
# self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=['-DSOCKK=2237', '-DUSE_IFRAME=1'] + enet)
# finally:
# clean_pids(pids);
def zzztest_webrtc(self): # XXX see src/settings.js, this is disabled pending investigation
host_src = 'webrtc_host.c'
peer_src = 'webrtc_peer.c'
host_outfile = 'host.html'
peer_outfile = 'peer.html'
host_filepath = path_from_root('tests', 'sockets', host_src)
temp_host_filepath = os.path.join(self.get_dir(), os.path.basename(host_src))
with open(host_filepath) as f: host_src = f.read()
with open(temp_host_filepath, 'w') as f: f.write(self.with_report_result(host_src))
peer_filepath = path_from_root('tests', 'sockets', peer_src)
temp_peer_filepath = os.path.join(self.get_dir(), os.path.basename(peer_src))
with open(peer_filepath) as f: peer_src = f.read()
with open(temp_peer_filepath, 'w') as f: f.write(self.with_report_result(peer_src))
open(os.path.join(self.get_dir(), 'host_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: undefined,
onpeer: function(peer, route) {
window.open('http://localhost:8888/peer.html?' + route);
// iframe = document.createElement("IFRAME");
// iframe.setAttribute("src", "http://localhost:8888/peer.html?" + route);
// iframe.style.display = "none";
// document.body.appendChild(iframe);
peer.listen();
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
},
onerror: function(error) {
console.error(error);
}
},
};
''')
open(os.path.join(self.get_dir(), 'peer_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: window.location.toString().split('?')[1],
onpeer: function(peer, route) {
peer.connect(Module['webrtc']['session']);
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
// Calling window.close() from this handler hangs my browser, so run it in the next turn
setTimeout(window.close, 0);
},
onerror: function(error) {
console.error(error);
}
}
};
''')
Popen([PYTHON, EMCC, temp_host_filepath, '-o', host_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'host_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1']).communicate()
Popen([PYTHON, EMCC, temp_peer_filepath, '-o', peer_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'peer_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1']).communicate()
# note: you may need to run this manually yourself, if npm is not in the path, or if you need a version that is not in the path
Popen(['npm', 'install', path_from_root('tests', 'sockets', 'p2p')]).communicate()
broker = Popen(NODE_JS + [path_from_root('tests', 'sockets', 'p2p', 'broker', 'p2p-broker.js')])
expected = '1'
self.run_browser(host_outfile, '.', ['/report_result?' + e for e in expected])
broker.kill();
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
# Run with ./runner.py sockets.test_nodejs_sockets_echo
if not NODE_JS in JS_ENGINES:
return self.skip('node is not present')
sockets_include = '-I'+path_from_root('tests', 'sockets')
harnesses = [
(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
]
harnesses = filter_harnesses(harnesses)
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DREPORT_RESULT=int dummy'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print "\nTesting compile time WebSocket configuration.\n"
for harness in filter_harnesses([
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166)
]):
with harness:
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-s', 'SOCKET_DEBUG=1', '-s', 'WEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166', '-DREPORT_RESULT=int dummy'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print "\nTesting runtime WebSocket configuration.\n"
for harness in filter_harnesses([
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168)
]):
with harness:
open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write('''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
Popen([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-s', 'SOCKET_DEBUG=1', '-DSOCKK=12345', '-DREPORT_RESULT=int dummy'], stdout=PIPE, stderr=PIPE).communicate()
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
|
{
"content_hash": "f204f44f0b47f8d2f9983e7071322d0e",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 272,
"avg_line_length": 45.409181636726544,
"alnum_prop": 0.624,
"repo_name": "slightperturbation/Cobalt",
"id": "ce6b1ab3d1bf1e02abe669a7db165e011652464a",
"size": "22750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ext/emsdk_portable/emscripten/1.27.0/tests/test_sockets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "7942339"
},
{
"name": "Batchfile",
"bytes": "27769"
},
{
"name": "C",
"bytes": "64431592"
},
{
"name": "C++",
"bytes": "192377551"
},
{
"name": "CMake",
"bytes": "2563457"
},
{
"name": "CSS",
"bytes": "32911"
},
{
"name": "DTrace",
"bytes": "12324"
},
{
"name": "Emacs Lisp",
"bytes": "11557"
},
{
"name": "Go",
"bytes": "132306"
},
{
"name": "Groff",
"bytes": "141757"
},
{
"name": "HTML",
"bytes": "10597275"
},
{
"name": "JavaScript",
"bytes": "7134930"
},
{
"name": "LLVM",
"bytes": "37169002"
},
{
"name": "Lua",
"bytes": "30196"
},
{
"name": "Makefile",
"bytes": "4368336"
},
{
"name": "Nix",
"bytes": "17734"
},
{
"name": "OCaml",
"bytes": "401898"
},
{
"name": "Objective-C",
"bytes": "492807"
},
{
"name": "PHP",
"bytes": "324917"
},
{
"name": "Perl",
"bytes": "27878"
},
{
"name": "Prolog",
"bytes": "1200"
},
{
"name": "Python",
"bytes": "3678053"
},
{
"name": "Shell",
"bytes": "3047898"
},
{
"name": "SourcePawn",
"bytes": "2461"
},
{
"name": "Standard ML",
"bytes": "2841"
},
{
"name": "TeX",
"bytes": "120660"
},
{
"name": "VimL",
"bytes": "13743"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('insekta.scenario.views',
url(r'^$', 'scenario_home', name='scenario.home'),
url(r'^groups$', 'scenario_groups', name='scenario.groups'),
url(r'^all$', 'all_scenarios', name='scenario.all'),
url(r'^show/([\w-]+)$', 'show_scenario', name='scenario.show'),
url(r'^manage_vm/([\w-]+)$', 'manage_vm', name='scenario.manage_vm'),
url(r'^heartbeat/([\w-]+)$', 'heartbeat', name='scenario.heartbeat'),
url(r'^submit_secret/([\w-]+)$', 'submit_secret',
name='scenario.submit_secret'),
url(r'^editor$', 'editor', name='scenario.editor'),
)
|
{
"content_hash": "454b24fad08b59753892e5a3adac714b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 72,
"avg_line_length": 49.07692307692308,
"alnum_prop": 0.6253918495297806,
"repo_name": "teythoon/Insekta",
"id": "362b18a32fe9c4244ede58e1c76e2a3e93c7e80f",
"size": "638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insekta/scenario/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2327"
},
{
"name": "Python",
"bytes": "72458"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
}
|
"""Definition of platform parameters."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import platform_parameter_domain
from core.domain import platform_parameter_registry as registry
import utils
Registry = registry.Registry
FEATURE_STAGES = platform_parameter_domain.FEATURE_STAGES # pylint: disable=invalid-name
DATA_TYPES = platform_parameter_domain.DATA_TYPES # pylint: disable=invalid-name
PARAM_NAMES = utils.create_enum( # pylint: disable=invalid-name
'dummy_feature', 'dummy_parameter')
# Platform parameters should all be defined below.
Registry.create_feature_flag(
PARAM_NAMES.dummy_feature,
'This is a dummy feature flag.',
FEATURE_STAGES.dev,
)
Registry.create_platform_parameter(
PARAM_NAMES.dummy_parameter,
'This is a dummy platform parameter.',
DATA_TYPES.string
)
|
{
"content_hash": "ea6a03f8eb89cd4110d334e347c956b3",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 88,
"avg_line_length": 32.62068965517241,
"alnum_prop": 0.7642706131078224,
"repo_name": "prasanna08/oppia",
"id": "94215e6e166c76fd4adafa54a2f908518dba431c",
"size": "1569",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/platform_parameter_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "97795"
},
{
"name": "HTML",
"bytes": "1128491"
},
{
"name": "JavaScript",
"bytes": "733121"
},
{
"name": "Python",
"bytes": "9362251"
},
{
"name": "Shell",
"bytes": "10639"
},
{
"name": "TypeScript",
"bytes": "6077851"
}
],
"symlink_target": ""
}
|
"""Type conversion and validation classes for configuration options.
Use these classes as values for the `type` argument to
:class:`oslo_config.cfg.Opt` and its subclasses.
"""
import re
import netaddr
import six
class ConfigType(object):
BASE_TYPES = (None,)
def is_base_type(self, other):
return isinstance(other, self.BASE_TYPES)
class String(ConfigType):
"""String type.
String values do not get transformed and are returned as str objects.
:param choices: Optional sequence of valid values. Mutually
exclusive with 'regex'.
:param quotes: If True and string is enclosed with single or double
quotes, will strip those quotes. Will signal error if
string have quote at the beginning and no quote at
the end. Turned off by default. Useful if used with
container types like List.
:param regex: Optional regular expression (string or compiled
regex) that the value must match on an unanchored
search. Mutually exclusive with 'choices'.
"""
BASE_TYPES = six.string_types
def __init__(self, choices=None, quotes=False, regex=None):
super(String, self).__init__()
if choices and regex:
raise ValueError("'choices' and 'regex' cannot both be specified")
self.choices = choices
self.quotes = quotes
self.regex = re.compile(regex) if regex is not None else None
def __call__(self, value):
value = str(value)
if self.quotes and value:
if value[0] in "\"'":
if value[-1] != value[0]:
raise ValueError('Non-closed quote: %s' % value)
value = value[1:-1]
if self.regex and not self.regex.search(value):
raise ValueError("Value %r doesn't match regex %r" %
(value, self.regex.pattern))
if self.choices is None or value in self.choices:
return value
raise ValueError(
'Valid values are [%s], but found %s' % (
', '.join([str(v) for v in self.choices]),
repr(value)))
def __repr__(self):
details = []
if self.choices:
details.append("choices=%r" % self.choices)
if self.regex:
details.append("regex=%r" % self.regex.pattern)
if details:
return "String(%s)" % ",".join(details)
return 'String'
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.choices == other.choices) and
(self.quotes == other.quotes) and
(self.regex == other.regex)
)
class MultiString(String):
BASE_TYPES = six.string_types + (list,)
class Boolean(ConfigType):
"""Boolean type.
Values are case insensitive and can be set using
1/0, yes/no, true/false or on/off.
"""
TRUE_VALUES = ['true', '1', 'on', 'yes']
FALSE_VALUES = ['false', '0', 'off', 'no']
BASE_TYPES = (bool,)
def __call__(self, value):
if isinstance(value, bool):
return value
s = value.lower()
if s in self.TRUE_VALUES:
return True
elif s in self.FALSE_VALUES:
return False
else:
raise ValueError('Unexpected boolean value %r' % value)
def __repr__(self):
return 'Boolean'
def __eq__(self, other):
return self.__class__ == other.__class__
class Integer(ConfigType):
"""Integer type.
Converts value to an integer optionally doing range checking.
If value is whitespace or empty string will return None.
:param min: Optional check that value is greater than or equal to min
:param max: Optional check that value is less than or equal to max
"""
BASE_TYPES = six.integer_types
def __init__(self, min=None, max=None):
super(Integer, self).__init__()
self.min = min
self.max = max
if min is not None and max is not None and max < min:
raise ValueError('Max value is less than min value')
def __call__(self, value):
if not isinstance(value, int):
s = str(value).strip()
if s == '':
value = None
else:
value = int(value)
if value is not None:
self._check_range(value)
return value
def _check_range(self, value):
if self.min is not None and value < self.min:
raise ValueError('Should be greater than or equal to %d' %
self.min)
if self.max is not None and value > self.max:
raise ValueError('Should be less than or equal to %d' % self.max)
def __repr__(self):
props = []
if self.min is not None:
props.append('min=%d' % self.min)
if self.max is not None:
props.append('max=%d' % self.max)
if props:
return 'Integer(%s)' % ', '.join(props)
return 'Integer'
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.min == other.min) and
(self.max == other.max)
)
class Float(ConfigType):
"""Float type."""
# allow float to be set from int
BASE_TYPES = six.integer_types + (float,)
def __call__(self, value):
if isinstance(value, float):
return value
return float(value)
def __repr__(self):
return 'Float'
def __eq__(self, other):
return self.__class__ == other.__class__
class List(ConfigType):
"""List type.
Represent values of other (item) type, separated by commas.
The resulting value is a list containing those values.
List doesn't know if item type can also contain commas. To workaround this
it tries the following: if the next part fails item validation, it appends
comma and next item until validation succeeds or there is no parts left.
In the later case it will signal validation error.
:param item_type: type of list items
:param bounds: if True, value should be inside "[" and "]" pair
"""
BASE_TYPES = (list,)
def __init__(self, item_type=None, bounds=False):
super(List, self).__init__()
if item_type is None:
item_type = String()
if not callable(item_type):
raise TypeError('item_type must be callable')
self.item_type = item_type
self.bounds = bounds
def __call__(self, value):
if isinstance(value, list):
return value
result = []
s = value.strip()
if self.bounds:
if not s.startswith('['):
raise ValueError('Value should start with "["')
if not s.endswith(']'):
raise ValueError('Value should end with "]"')
s = s[1:-1]
if s == '':
return result
values = s.split(',')
while values:
value = values.pop(0)
while True:
first_error = None
try:
validated_value = self.item_type(value.strip())
break
except ValueError as e:
if not first_error:
first_error = e
if len(values) == 0:
raise first_error
value += ',' + values.pop(0)
result.append(validated_value)
return result
def __repr__(self):
return 'List of %s' % repr(self.item_type)
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.item_type == other.item_type)
)
class Dict(ConfigType):
"""Dictionary type.
Dictionary type values are key:value pairs separated by commas.
The resulting value is a dictionary of these key/value pairs.
Type of dictionary key is always string, but dictionary value
type can be customized.
:param value_type: type of values in dictionary
:param bounds: if True, value should be inside "{" and "}" pair
"""
BASE_TYPES = (dict,)
def __init__(self, value_type=None, bounds=False):
super(Dict, self).__init__()
if value_type is None:
value_type = String()
if not callable(value_type):
raise TypeError('value_type must be callable')
self.value_type = value_type
self.bounds = bounds
def __call__(self, value):
if isinstance(value, dict):
return value
result = {}
s = value.strip()
if self.bounds:
if not s.startswith('{'):
raise ValueError('Value should start with "{"')
if not s.endswith('}'):
raise ValueError('Value should end with "}"')
s = s[1:-1]
if s == '':
return result
pairs = s.split(',')
while pairs:
pair = pairs.pop(0)
while True:
first_error = None
try:
key_value = pair.split(':', 1)
if len(key_value) < 2:
raise ValueError('Value should be NAME:VALUE pairs '
'separated by ","')
key, value = key_value
key = key.strip()
value = value.strip()
value = self.value_type(value)
break
except ValueError as e:
if not first_error:
first_error = e
if not pairs:
raise first_error
pair += ',' + pairs.pop(0)
if key == '':
raise ValueError('Key name should not be empty')
if key in result:
raise ValueError('Duplicate key %s' % key)
result[key] = value
return result
def __repr__(self):
return 'Dict of %s' % repr(self.value_type)
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.value_type == other.value_type)
)
class IPAddress(ConfigType):
"""IP address type
Represents either ipv4 or ipv6. Without specifying version parameter both
versions are checked
:param version: defines which version should be explicitly checked (4 or 6)
"""
BASE_TYPES = six.string_types
def __init__(self, version=None):
super(IPAddress, self).__init__()
version_checkers = {
None: self._check_both_versions,
4: self._check_ipv4,
6: self._check_ipv6
}
self.version_checker = version_checkers.get(version)
if self.version_checker is None:
raise TypeError("%s is not a valid IP version." % version)
def __call__(self, value):
value = str(value)
if not value:
raise ValueError("IP address cannot be an empty string")
self.version_checker(value)
return value
def __repr__(self):
return "IPAddress"
def __eq__(self, other):
return self.__class__ == other.__class__
def _check_ipv4(self, address):
if not netaddr.valid_ipv4(address, netaddr.core.INET_PTON):
raise ValueError("%s is not an IPv4 address" % address)
def _check_ipv6(self, address):
if not netaddr.valid_ipv6(address, netaddr.core.INET_PTON):
raise ValueError("%s is not an IPv6 address" % address)
def _check_both_versions(self, address):
if not (netaddr.valid_ipv4(address, netaddr.core.INET_PTON) or
netaddr.valid_ipv6(address, netaddr.core.INET_PTON)):
raise ValueError("%s is not IPv4 or IPv6 address" % address)
|
{
"content_hash": "f27a392f62e0c437d17703355d3f6703",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 79,
"avg_line_length": 28.60620525059666,
"alnum_prop": 0.539629567829134,
"repo_name": "shad7/oslo.config",
"id": "6245eecb980d27977309f8d77d955691ade8d2a6",
"size": "12593",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "oslo_config/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "361272"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# import apis into api package
from .binary_api import BinaryApi
from .case_record_api import CaseRecordApi
from .classification_api import ClassificationApi
from .contact_item_api import ContactItemApi
from .document_state_api import DocumentStateApi
from .generic_question_api import GenericQuestionApi
from .lawyer_api import LawyerApi
from .potential_answer_api import PotentialAnswerApi
from .referral_api import ReferralApi
from .related_link_api import RelatedLinkApi
from .required_document_api import RequiredDocumentApi
from .system_message_api import SystemMessageApi
from .uploaded_document_api import UploadedDocumentApi
from .uploaded_picture_api import UploadedPictureApi
from .user_api import UserApi
from .config_api import ConfigApi
from .diagnostics_api import DiagnosticsApi
from .permissions_api import PermissionsApi
from .providers_api import ProvidersApi
from .users_api import UsersApi
from .webhooks_api import WebhooksApi
|
{
"content_hash": "bc420bc016bbf5bcc2a02da72c7567f6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 54,
"avg_line_length": 41.208333333333336,
"alnum_prop": 0.8452982810920121,
"repo_name": "tombull/hackneylawclassifier",
"id": "a6eae6da7a20abaec8504fb7cb6211496cca39f4",
"size": "989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/hackney_law_data_client/apis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1115315"
}
],
"symlink_target": ""
}
|
import os
import tempfile
from typing import List, Any
from py4j.java_gateway import JavaObject
from pyflink.common.execution_config import ExecutionConfig
from pyflink.common.job_client import JobClient
from pyflink.common.job_execution_result import JobExecutionResult
from pyflink.common.restart_strategy import RestartStrategies
from pyflink.common.typeinfo import PickledBytesTypeInfo, TypeInformation
from pyflink.datastream.checkpoint_config import CheckpointConfig
from pyflink.datastream.checkpointing_mode import CheckpointingMode
from pyflink.datastream.data_stream import DataStream
from pyflink.datastream.functions import SourceFunction
from pyflink.datastream.state_backend import _from_j_state_backend
from pyflink.datastream.time_characteristic import TimeCharacteristic
from pyflink.java_gateway import get_gateway
from pyflink.serializers import PickleSerializer
from pyflink.util.utils import load_java_class, add_jars_to_context_class_loader
__all__ = ['StreamExecutionEnvironment']
class StreamExecutionEnvironment(object):
"""
The StreamExecutionEnvironment is the context in which a streaming program is executed. A
*LocalStreamEnvironment* will cause execution in the attached JVM, a
*RemoteStreamEnvironment* will cause execution on a remote setup.
The environment provides methods to control the job execution (such as setting the parallelism
or the fault tolerance/checkpointing parameters) and to interact with the outside world (data
access).
"""
def __init__(self, j_stream_execution_environment, serializer=PickleSerializer()):
self._j_stream_execution_environment = j_stream_execution_environment
self.serializer = serializer
def get_config(self):
"""
Gets the config object.
:return: The :class:`~pyflink.common.ExecutionConfig` object.
"""
return ExecutionConfig(self._j_stream_execution_environment.getConfig())
def set_parallelism(self, parallelism):
"""
Sets the parallelism for operations executed through this environment.
Setting a parallelism of x here will cause all operators (such as map,
batchReduce) to run with x parallel instances. This method overrides the
default parallelism for this environment. The
*LocalStreamEnvironment* uses by default a value equal to the
number of hardware contexts (CPU cores / threads). When executing the
program via the command line client from a JAR file, the default degree
of parallelism is the one configured for that setup.
:param parallelism: The parallelism.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setParallelism(parallelism)
return self
def set_max_parallelism(self, max_parallelism):
"""
Sets the maximum degree of parallelism defined for the program. The upper limit (inclusive)
is 32767.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:param max_parallelism: Maximum degree of parallelism to be used for the program,
with 0 < maxParallelism <= 2^15 - 1.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setMaxParallelism(max_parallelism)
return self
def get_parallelism(self):
"""
Gets the parallelism with which operation are executed by default.
Operations can individually override this value to use a specific
parallelism.
:return: The parallelism used by operations, unless they override that value.
"""
return self._j_stream_execution_environment.getParallelism()
def get_max_parallelism(self):
"""
Gets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:return: Maximum degree of parallelism.
"""
return self._j_stream_execution_environment.getMaxParallelism()
def set_buffer_timeout(self, timeout_millis):
"""
Sets the maximum time frequency (milliseconds) for the flushing of the
output buffers. By default the output buffers flush frequently to provide
low latency and to aid smooth developer experience. Setting the parameter
can result in three logical modes:
- A positive integer triggers flushing periodically by that integer
- 0 triggers flushing after every record thus minimizing latency
- -1 triggers flushing only when the output buffer is full thus maximizing throughput
:param timeout_millis: The maximum time between two output flushes.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setBufferTimeout(timeout_millis)
return self
def get_buffer_timeout(self):
"""
Gets the maximum time frequency (milliseconds) for the flushing of the
output buffers. For clarification on the extremal values see
:func:`set_buffer_timeout`.
:return: The timeout of the buffer.
"""
return self._j_stream_execution_environment.getBufferTimeout()
def disable_operator_chaining(self):
"""
Disables operator chaining for streaming operators. Operator chaining
allows non-shuffle operations to be co-located in the same thread fully
avoiding serialization and de-serialization.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.disableOperatorChaining()
return self
def is_chaining_enabled(self):
"""
Returns whether operator chaining is enabled.
:return: True if chaining is enabled, false otherwise.
"""
return self._j_stream_execution_environment.isChainingEnabled()
def get_checkpoint_config(self):
"""
Gets the checkpoint config, which defines values like checkpoint interval, delay between
checkpoints, etc.
:return: The :class:`~pyflink.datastream.CheckpointConfig`.
"""
j_checkpoint_config = self._j_stream_execution_environment.getCheckpointConfig()
return CheckpointConfig(j_checkpoint_config)
def enable_checkpointing(self, interval, mode=None):
"""
Enables checkpointing for the streaming job. The distributed state of the streaming
dataflow will be periodically snapshotted. In case of a failure, the streaming
dataflow will be restarted from the latest completed checkpoint.
The job draws checkpoints periodically, in the given interval. The system uses the
given :class:`~pyflink.datastream.CheckpointingMode` for the checkpointing ("exactly once"
vs "at least once"). The state will be stored in the configured state backend.
.. note::
Checkpointing iterative streaming dataflows in not properly supported at
the moment. For that reason, iterative jobs will not be started if used
with enabled checkpointing.
Example:
::
>>> env.enable_checkpointing(300000, CheckpointingMode.AT_LEAST_ONCE)
:param interval: Time interval between state checkpoints in milliseconds.
:param mode: The checkpointing mode, selecting between "exactly once" and "at least once"
guaranteed.
:return: This object.
"""
if mode is None:
self._j_stream_execution_environment = \
self._j_stream_execution_environment.enableCheckpointing(interval)
else:
j_checkpointing_mode = CheckpointingMode._to_j_checkpointing_mode(mode)
self._j_stream_execution_environment.enableCheckpointing(
interval,
j_checkpointing_mode)
return self
def get_checkpoint_interval(self):
"""
Returns the checkpointing interval or -1 if checkpointing is disabled.
Shorthand for get_checkpoint_config().get_checkpoint_interval().
:return: The checkpointing interval or -1.
"""
return self._j_stream_execution_environment.getCheckpointInterval()
def get_checkpointing_mode(self):
"""
Returns the checkpointing mode (exactly-once vs. at-least-once).
Shorthand for get_checkpoint_config().get_checkpointing_mode().
:return: The :class:`~pyflink.datastream.CheckpointingMode`.
"""
j_checkpointing_mode = self._j_stream_execution_environment.getCheckpointingMode()
return CheckpointingMode._from_j_checkpointing_mode(j_checkpointing_mode)
def get_state_backend(self):
"""
Gets the state backend that defines how to store and checkpoint state.
.. seealso:: :func:`set_state_backend`
:return: The :class:`StateBackend`.
"""
j_state_backend = self._j_stream_execution_environment.getStateBackend()
return _from_j_state_backend(j_state_backend)
def set_state_backend(self, state_backend):
"""
Sets the state backend that describes how to store and checkpoint operator state. It
defines both which data structures hold state during execution (for example hash tables,
RockDB, or other data stores) as well as where checkpointed data will be persisted.
The :class:`~pyflink.datastream.MemoryStateBackend` for example maintains the state in heap
memory, as objects. It is lightweight without extra dependencies, but can checkpoint only
small states(some counters).
In contrast, the :class:`~pyflink.datastream.FsStateBackend` stores checkpoints of the state
(also maintained as heap objects) in files. When using a replicated file system (like HDFS,
S3, MapR FS, Alluxio, etc) this will guarantee that state is not lost upon failures of
individual nodes and that streaming program can be executed highly available and strongly
consistent(assuming that Flink is run in high-availability mode).
The build-in state backend includes:
:class:`~pyflink.datastream.MemoryStateBackend`,
:class:`~pyflink.datastream.FsStateBackend`
and :class:`~pyflink.datastream.RocksDBStateBackend`.
.. seealso:: :func:`get_state_backend`
Example:
::
>>> env.set_state_backend(RocksDBStateBackend("file://var/checkpoints/"))
:param state_backend: The :class:`StateBackend`.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setStateBackend(state_backend._j_state_backend)
return self
def set_restart_strategy(self, restart_strategy_configuration):
"""
Sets the restart strategy configuration. The configuration specifies which restart strategy
will be used for the execution graph in case of a restart.
Example:
::
>>> env.set_restart_strategy(RestartStrategies.no_restart())
:param restart_strategy_configuration: Restart strategy configuration to be set.
:return:
"""
self._j_stream_execution_environment.setRestartStrategy(
restart_strategy_configuration._j_restart_strategy_configuration)
def get_restart_strategy(self):
"""
Returns the specified restart strategy configuration.
:return: The restart strategy configuration to be used.
"""
return RestartStrategies._from_j_restart_strategy(
self._j_stream_execution_environment.getRestartStrategy())
def add_default_kryo_serializer(self, type_class_name, serializer_class_name):
"""
Adds a new Kryo default serializer to the Runtime.
Example:
::
>>> env.add_default_kryo_serializer("com.aaa.bbb.TypeClass", "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with the
given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.addDefaultKryoSerializer(type_clz, j_serializer_clz)
def register_type_with_kryo_serializer(self, type_class_name, serializer_class_name):
"""
Registers the given Serializer via its class as a serializer for the given type at the
KryoSerializer.
Example:
::
>>> env.register_type_with_kryo_serializer("com.aaa.bbb.TypeClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with
the given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.registerTypeWithKryoSerializer(
type_clz, j_serializer_clz)
def register_type(self, type_class_name):
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> env.register_type("com.aaa.bbb.TypeClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_stream_execution_environment.registerType(type_clz)
def set_stream_time_characteristic(self, characteristic):
"""
Sets the time characteristic for all streams create from this environment, e.g., processing
time, event time, or ingestion time.
If you set the characteristic to IngestionTime of EventTime this will set a default
watermark update interval of 200 ms. If this is not applicable for your application
you should change it using
:func:`pyflink.common.ExecutionConfig.set_auto_watermark_interval`.
Example:
::
>>> env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
:param characteristic: The time characteristic, which could be
:data:`TimeCharacteristic.ProcessingTime`,
:data:`TimeCharacteristic.IngestionTime`,
:data:`TimeCharacteristic.EventTime`.
"""
j_characteristic = TimeCharacteristic._to_j_time_characteristic(characteristic)
self._j_stream_execution_environment.setStreamTimeCharacteristic(j_characteristic)
def get_stream_time_characteristic(self):
"""
Gets the time characteristic.
.. seealso:: :func:`set_stream_time_characteristic`
:return: The :class:`TimeCharacteristic`.
"""
j_characteristic = self._j_stream_execution_environment.getStreamTimeCharacteristic()
return TimeCharacteristic._from_j_time_characteristic(j_characteristic)
def add_python_file(self, file_path: str):
"""
Adds a python dependency which could be python files, python packages or
local directories. They will be added to the PYTHONPATH of the python UDF worker.
Please make sure that these dependencies can be imported.
:param file_path: The path of the python dependency.
"""
jvm = get_gateway().jvm
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil\
.getEnvironmentConfig(self._j_stream_execution_environment)
python_files = env_config.getString(jvm.PythonOptions.PYTHON_FILES.key(), None)
if python_files is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join([python_files, file_path])
else:
python_files = file_path
env_config.setString(jvm.PythonOptions.PYTHON_FILES.key(), python_files)
def set_python_requirements(self, requirements_file_path: str,
requirements_cache_dir: str = None):
"""
Specifies a requirements.txt file which defines the third-party dependencies.
These dependencies will be installed to a temporary directory and added to the
PYTHONPATH of the python UDF worker.
For the dependencies which could not be accessed in the cluster, a directory which contains
the installation packages of these dependencies could be specified using the parameter
"requirements_cached_dir". It will be uploaded to the cluster to support offline
installation.
Example:
::
# commands executed in shell
$ echo numpy==1.16.5 > requirements.txt
$ pip download -d cached_dir -r requirements.txt --no-binary :all:
# python code
>>> stream_env.set_python_requirements("requirements.txt", "cached_dir")
.. note::
Please make sure the installation packages matches the platform of the cluster
and the python version used. These packages will be installed using pip,
so also make sure the version of Pip (version >= 7.1.0) and the version of
SetupTools (version >= 37.0.0).
:param requirements_file_path: The path of "requirements.txt" file.
:param requirements_cache_dir: The path of the local directory which contains the
installation packages.
"""
jvm = get_gateway().jvm
python_requirements = requirements_file_path
if requirements_cache_dir is not None:
python_requirements = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[python_requirements, requirements_cache_dir])
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
env_config.setString(jvm.PythonOptions.PYTHON_REQUIREMENTS.key(), python_requirements)
def add_python_archive(self, archive_path: str, target_dir: str = None):
"""
Adds a python archive file. The file will be extracted to the working directory of
python UDF worker.
If the parameter "target_dir" is specified, the archive file will be extracted to a
directory named ${target_dir}. Otherwise, the archive file will be extracted to a
directory with the same name of the archive file.
If python UDF depends on a specific python version which does not exist in the cluster,
this method can be used to upload the virtual environment.
Note that the path of the python interpreter contained in the uploaded environment
should be specified via the method :func:`pyflink.table.TableConfig.set_python_executable`.
The files uploaded via this method are also accessible in UDFs via relative path.
Example:
::
# command executed in shell
# assert the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> stream_env.add_python_archive("py_env.zip")
>>> stream_env.set_python_executable("py_env.zip/py_env/bin/python")
# or
>>> stream_env.add_python_archive("py_env.zip", "myenv")
>>> stream_env.set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
>>> def my_udf():
... with open("myenv/py_env/data/data.txt") as f:
... ...
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.5 or higher.
.. note::
Currently only zip-format is supported. i.e. zip, jar, whl, egg, etc.
The other archive formats such as tar, tar.gz, 7z, rar, etc are not supported.
:param archive_path: The archive file path.
:param target_dir: Optional, the target dir name that the archive file extracted to.
"""
jvm = get_gateway().jvm
if target_dir is not None:
archive_path = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[archive_path, target_dir])
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
python_archives = env_config.getString(jvm.PythonOptions.PYTHON_ARCHIVES.key(), None)
if python_archives is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join(
[python_archives, archive_path])
else:
python_files = archive_path
env_config.setString(jvm.PythonOptions.PYTHON_ARCHIVES.key(), python_files)
def set_python_executable(self, python_exec: str):
"""
Sets the path of the python interpreter which is used to execute the python udf workers.
e.g. "/usr/local/bin/python3".
If python UDF depends on a specific python version which does not exist in the cluster,
the method :func:`pyflink.datastream.StreamExecutionEnvironment.add_python_archive` can be
used to upload a virtual environment. The path of the python interpreter contained in the
uploaded environment can be specified via this method.
Example:
::
# command executed in shell
# assume that the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> stream_env.add_python_archive("py_env.zip")
>>> stream_env.set_python_executable("py_env.zip/py_env/bin/python")
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.5 or higher.
.. note::
The python udf worker depends on Apache Beam (version == 2.23.0).
Please ensure that the specified environment meets the above requirements.
:param python_exec: The path of python interpreter.
"""
jvm = get_gateway().jvm
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
env_config.setString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), python_exec)
def add_jars(self, *jars_path: str):
"""
Adds a list of jar files that will be uploaded to the cluster and referenced by the job.
:param jars_path: Path of jars.
"""
add_jars_to_context_class_loader(jars_path)
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
old_jar_paths = env_config.getString(jars_key, None)
jars_path = jvm.PythonDependencyUtils.FILE_DELIMITER.join(jars_path)
if old_jar_paths is not None:
jars_path = jvm.PythonDependencyUtils.FILE_DELIMITER.join([old_jar_paths, jars_path])
env_config.setString(jars_key, jars_path)
def add_classpaths(self, *classpaths: str):
"""
Adds a list of URLs that are added to the classpath of each user code classloader of the
program. Paths must specify a protocol (e.g. file://) and be accessible on all nodes
:param classpaths: Classpaths that will be added.
"""
add_jars_to_context_class_loader(classpaths)
jvm = get_gateway().jvm
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
old_classpaths = env_config.getString(classpaths_key, None)
classpaths = jvm.PythonDependencyUtils.FILE_DELIMITER.join(classpaths)
if old_classpaths is not None:
classpaths = jvm.PythonDependencyUtils.FILE_DELIMITER.join([old_classpaths, classpaths])
env_config.setString(classpaths_key, classpaths)
def get_default_local_parallelism(self):
"""
Gets the default parallelism that will be used for the local execution environment.
:return: The default local parallelism.
"""
return self._j_stream_execution_environment.getDefaultLocalParallelism()
def set_default_local_parallelism(self, parallelism):
"""
Sets the default parallelism that will be used for the local execution environment.
:param parallelism: The parallelism to use as the default local parallelism.
"""
self._j_stream_execution_environment.setDefaultLocalParallelism(parallelism)
def execute(self, job_name=None):
"""
Triggers the program execution. The environment will execute all parts of
the program that have resulted in a "sink" operation. Sink operations are
for example printing results or forwarding them to a message queue.
The program execution will be logged and displayed with the provided name
:param job_name: Desired name of the job, optional.
:return: The result of the job execution, containing elapsed time and accumulators.
"""
j_stream_graph = self._generate_stream_graph(clear_transformations=True, job_name=job_name)
return JobExecutionResult(self._j_stream_execution_environment.execute(j_stream_graph))
def execute_async(self, job_name: str = 'Flink Streaming Job') -> JobClient:
"""
Triggers the program asynchronously. The environment will execute all parts of the program
that have resulted in a "sink" operation. Sink operations are for example printing results
or forwarding them to a message queue.
The program execution will be logged and displayed with a generated default name.
:param job_name: Desired name of the job.
:return: A JobClient that can be used to communicate with the submitted job, completed on
submission succeeded.
"""
j_stream_graph = self._generate_stream_graph(clear_transformations=True, job_name=job_name)
j_job_client = self._j_stream_execution_environment.executeAsync(j_stream_graph)
return JobClient(j_job_client=j_job_client)
def get_execution_plan(self):
"""
Creates the plan with which the system will execute the program, and returns it as
a String using a JSON representation of the execution data flow graph.
Note that this needs to be called, before the plan is executed.
If the compiler could not be instantiated, or the master could not
be contacted to retrieve information relevant to the execution planning,
an exception will be thrown.
:return: The execution plan of the program, as a JSON String.
"""
j_stream_graph = self._generate_stream_graph(False)
return j_stream_graph.getStreamingPlanAsJSON()
@staticmethod
def get_execution_environment():
"""
Creates an execution environment that represents the context in which the
program is currently executed. If the program is invoked standalone, this
method returns a local execution environment.
:return: The execution environment of the context in which the program is executed.
"""
gateway = get_gateway()
j_stream_exection_environment = gateway.jvm.org.apache.flink.streaming.api.environment\
.StreamExecutionEnvironment.getExecutionEnvironment()
return StreamExecutionEnvironment(j_stream_exection_environment)
def add_source(self, source_func: SourceFunction, source_name: str = 'Custom Source',
type_info: TypeInformation = None) -> 'DataStream':
"""
Adds a data source to the streaming topology.
:param source_func: the user defined function.
:param source_name: name of the data source. Optional.
:param type_info: type of the returned stream. Optional.
:return: the data stream constructed.
"""
j_type_info = type_info.get_java_type_info() if type_info is not None else None
j_data_stream = self._j_stream_execution_environment.addSource(source_func
.get_java_function(),
source_name,
j_type_info)
return DataStream(j_data_stream=j_data_stream)
def read_text_file(self, file_path: str, charset_name: str = "UTF-8") -> DataStream:
"""
Reads the given file line-by-line and creates a DataStream that contains a string with the
contents of each such line. The charset with the given name will be used to read the files.
Note that this interface is not fault tolerant that is supposed to be used for test purpose.
:param file_path: The path of the file, as a URI (e.g., "file:///some/local/file" or
"hdfs://host:port/file/path")
:param charset_name: The name of the character set used to read the file.
:return: The DataStream that represents the data read from the given file as text lines.
"""
return DataStream(self._j_stream_execution_environment
.readTextFile(file_path, charset_name))
def from_collection(self, collection: List[Any],
type_info: TypeInformation = None) -> DataStream:
"""
Creates a data stream from the given non-empty collection. The type of the data stream is
that of the elements in the collection.
Note that this operation will result in a non-parallel data stream source, i.e. a data
stream source with parallelism one.
:param collection: The collection of elements to create the data stream from.
:param type_info: The TypeInformation for the produced data stream
:return: the data stream representing the given collection.
"""
return self._from_collection(collection, type_info)
def _from_collection(self, elements: List[Any],
type_info: TypeInformation = None) -> DataStream:
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = self.serializer
try:
with temp_file:
# dumps elements to a temporary file by pickle serializer.
serializer.dump_to_stream(elements, temp_file)
gateway = get_gateway()
# if user does not defined the element data types, read the pickled data as a byte array
# list.
if type_info is None:
j_objs = gateway.jvm.PythonBridgeUtils.readPickledBytes(temp_file.name)
out_put_type_info = PickledBytesTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO()
else:
j_objs = gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, False)
out_put_type_info = type_info
# Since flink python module depends on table module, we can make use of utils of it when
# implementing python DataStream API.
PythonTableUtils = gateway.jvm\
.org.apache.flink.table.planner.utils.python.PythonTableUtils
execution_config = self._j_stream_execution_environment.getConfig()
j_input_format = PythonTableUtils.getCollectionInputFormat(
j_objs,
out_put_type_info.get_java_type_info(),
execution_config
)
j_data_stream_source = self._j_stream_execution_environment.createInput(
j_input_format,
out_put_type_info.get_java_type_info()
)
j_data_stream_source.forceNonParallel()
return DataStream(j_data_stream=j_data_stream_source)
finally:
os.unlink(temp_file.name)
def _generate_stream_graph(self, clear_transformations: bool = False, job_name: str = None) \
-> JavaObject:
j_stream_graph = get_gateway().jvm \
.org.apache.flink.python.util.PythonConfigUtil.generateStreamGraphWithDependencies(
self._j_stream_execution_environment, clear_transformations)
if job_name is not None:
j_stream_graph.setJobName(job_name)
return j_stream_graph
|
{
"content_hash": "24dee29dbf4ccfa749cf6b9a7ebbf070",
"timestamp": "",
"source": "github",
"line_count": 745,
"max_line_length": 100,
"avg_line_length": 45.26979865771812,
"alnum_prop": 0.6609440787522979,
"repo_name": "darionyaphet/flink",
"id": "c25a7db2658f221abebe871a957bab7db23a9188",
"size": "34684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/datastream/stream_execution_environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4792"
},
{
"name": "CSS",
"bytes": "18100"
},
{
"name": "CoffeeScript",
"bytes": "89007"
},
{
"name": "HTML",
"bytes": "87854"
},
{
"name": "Java",
"bytes": "32636498"
},
{
"name": "JavaScript",
"bytes": "8267"
},
{
"name": "Python",
"bytes": "166860"
},
{
"name": "Scala",
"bytes": "6133615"
},
{
"name": "Shell",
"bytes": "95893"
}
],
"symlink_target": ""
}
|
from chatbot.bot import BotPlugin, command
import random
class Plugin(BotPlugin):
def __init__(self, bot):
super().__init__(bot)
self.register_command("missing_8ball", self._question, argc=0, flags=command.CommandFlag.Missing)
@staticmethod
def get_default_config():
return {
"answers": [
"Yes!",
"No!",
"Maybe...",
"Definitely!",
"Absolutely not!",
"I have a really hard time making decisions..."
]
}
async def _question(self, msg, argv):
if not argv[-1].endswith("?"):
raise command.CommandNotFoundError
await msg.reply(random.choice(self.cfg["answers"]))
|
{
"content_hash": "2c67a9724438f956803072fd8e27d345",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 105,
"avg_line_length": 29.153846153846153,
"alnum_prop": 0.5277044854881267,
"repo_name": "mphe/pychatbot",
"id": "de99d0bced4bc0df32a5a297ac9a04f9672ea378",
"size": "783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chatbot/bot/plugins/8ball.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "156927"
},
{
"name": "Shell",
"bytes": "864"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from time import time
import click
from twisted.internet import defer, endpoints, reactor # pylint: disable=import-error
from twisted.logger import ILogObserver # pylint: disable=import-error
from twisted.logger import Logger # pylint: disable=import-error
from twisted.logger import LogLevel # pylint: disable=import-error
from twisted.logger import formatEvent # pylint: disable=import-error
from twisted.python import failure # pylint: disable=import-error
from twisted.spread import pb # pylint: disable=import-error
from zope.interface import provider # pylint: disable=import-error
from platformio import __pioremote_endpoint__, __version__, app, exception, maintenance
from platformio.remote.factory.client import RemoteClientFactory
from platformio.remote.factory.ssl import SSLContextFactory
class RemoteClientBase( # pylint: disable=too-many-instance-attributes
pb.Referenceable
):
PING_DELAY = 60
PING_MAX_FAILURES = 3
DEBUG = False
def __init__(self):
self.log_level = LogLevel.warn
self.log = Logger(namespace="remote", observer=self._log_observer)
self.id = app.get_host_id()
self.name = app.get_host_name()
self.join_options = {"corever": __version__}
self.perspective = None
self.agentpool = None
self._ping_id = 0
self._ping_caller = None
self._ping_counter = 0
self._reactor_stopped = False
self._exit_code = 0
@provider(ILogObserver)
def _log_observer(self, event):
if not self.DEBUG and (
event["log_namespace"] != self.log.namespace
or self.log_level > event["log_level"]
):
return
msg = formatEvent(event)
click.echo(
"%s [%s] %s"
% (
datetime.fromtimestamp(event["log_time"]).strftime("%Y-%m-%d %H:%M:%S"),
event["log_level"].name,
msg,
)
)
def connect(self):
self.log.info("Name: {name}", name=self.name)
self.log.info("Connecting to PlatformIO Remote Development Cloud")
# pylint: disable=protected-access
proto, options = endpoints._parse(__pioremote_endpoint__)
proto = proto[0]
factory = RemoteClientFactory()
factory.remote_client = self
factory.sslContextFactory = None
if proto == "ssl":
factory.sslContextFactory = SSLContextFactory(options["host"])
reactor.connectSSL(
options["host"],
int(options["port"]),
factory,
factory.sslContextFactory,
)
elif proto == "tcp":
reactor.connectTCP(options["host"], int(options["port"]), factory)
else:
raise exception.PlatformioException("Unknown PIO Remote Cloud protocol")
reactor.run()
if self._exit_code != 0:
raise exception.ReturnErrorCode(self._exit_code)
def cb_client_authorization_failed(self, err):
msg = "Bad account credentials"
if err.check(pb.Error):
msg = err.getErrorMessage()
self.log.error(msg)
self.disconnect(exit_code=1)
def cb_client_authorization_made(self, perspective):
self.log.info("Successfully authorized")
self.perspective = perspective
d = perspective.callRemote("join", self.id, self.name, self.join_options)
d.addCallback(self._cb_client_join_made)
d.addErrback(self.cb_global_error)
def _cb_client_join_made(self, result):
code = result[0]
if code == 1:
self.agentpool = result[1]
self.agent_pool_ready()
self.restart_ping()
elif code == 2:
self.remote_service(*result[1:])
def remote_service(self, command, options):
if command == "disconnect":
self.log.error(
"PIO Remote Cloud disconnected: {msg}", msg=options.get("message")
)
self.disconnect()
def restart_ping(self, reset_counter=True):
# stop previous ping callers
self.stop_ping(reset_counter)
self._ping_caller = reactor.callLater(self.PING_DELAY, self._do_ping)
def _do_ping(self):
self._ping_counter += 1
self._ping_id = int(time())
d = self.perspective.callRemote("service", "ping", {"id": self._ping_id})
d.addCallback(self._cb_pong)
d.addErrback(self._cb_pong)
def stop_ping(self, reset_counter=True):
if reset_counter:
self._ping_counter = 0
if not self._ping_caller or not self._ping_caller.active():
return
self._ping_caller.cancel()
self._ping_caller = None
def _cb_pong(self, result):
if not isinstance(result, failure.Failure) and self._ping_id == result:
self.restart_ping()
return
if self._ping_counter >= self.PING_MAX_FAILURES:
self.stop_ping()
self.perspective.broker.transport.loseConnection()
else:
self.restart_ping(reset_counter=False)
def agent_pool_ready(self):
raise NotImplementedError
def disconnect(self, exit_code=None):
self.stop_ping()
if exit_code is not None:
self._exit_code = exit_code
if reactor.running and not self._reactor_stopped:
self._reactor_stopped = True
reactor.stop()
def cb_disconnected(self, _):
self.stop_ping()
self.perspective = None
self.agentpool = None
def cb_global_error(self, err):
if err.check(pb.PBConnectionLost, defer.CancelledError):
return
msg = err.getErrorMessage()
if err.check(pb.DeadReferenceError):
msg = "Remote Client has been terminated"
elif "PioAgentNotStartedError" in str(err.type):
msg = (
"Could not find active agents. Please start it before on "
"a remote machine using `pio remote agent start` command.\n"
"See http://docs.platformio.org/page/plus/pio-remote.html"
)
else:
maintenance.on_platformio_exception(Exception(err.type))
click.secho(msg, fg="red", err=True)
self.disconnect(exit_code=1)
|
{
"content_hash": "2ff1c94f3187b262f3abc374c162c849",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 88,
"avg_line_length": 35.513966480446925,
"alnum_prop": 0.6043731319804939,
"repo_name": "platformio/platformio-core",
"id": "fe2c4fb4c90efb4a3af8801b602cf634639f0747",
"size": "6968",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "platformio/remote/client/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2005"
},
{
"name": "Makefile",
"bytes": "710"
},
{
"name": "Processing",
"bytes": "101"
},
{
"name": "Python",
"bytes": "1345194"
},
{
"name": "Smarty",
"bytes": "52334"
}
],
"symlink_target": ""
}
|
"""Script to aggregate CDC Data at a country level from state level data."""
import pandas as pd
from absl import app
from absl import flags
flags.DEFINE_string('input_path', None,
'Path to input CSV with state level data.')
flags.DEFINE_string('output_path', None, 'Output CSV path.')
_FLAGS = flags.FLAGS
def main(argv):
df = pd.read_csv(_FLAGS.input_path)
# Aggregating all count stat vars
df_count = df.loc[df['StatVar'].str.startswith('Count')]
df_count.drop('Unit', axis=1, inplace=True) # Count statvars have no unit.
df_count.drop_duplicates(subset=['Year', 'Geo', 'StatVar'],
keep='first',
inplace=True)
country_df = df_count.groupby(by=['Year', 'StatVar'],
as_index=False).agg({'Quantity': 'sum'})
country_df.to_csv(_FLAGS.output_path, index=False)
if __name__ == "__main__":
flags.mark_flags_as_required(['input_path', 'output_path'])
app.run(main)
|
{
"content_hash": "090b4873b89b5ba2d5dbdcf84ce53047",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 32.83870967741935,
"alnum_prop": 0.600196463654224,
"repo_name": "datacommonsorg/data",
"id": "e00d7cb46004a80511142ae77dd5dc2c369497ce",
"size": "1593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/us_cdc/natality/aggregate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78"
},
{
"name": "Go",
"bytes": "51446"
},
{
"name": "HTML",
"bytes": "32842253"
},
{
"name": "JavaScript",
"bytes": "458"
},
{
"name": "Jupyter Notebook",
"bytes": "5088443"
},
{
"name": "Python",
"bytes": "3723204"
},
{
"name": "R",
"bytes": "28607"
},
{
"name": "Shell",
"bytes": "25468"
},
{
"name": "TypeScript",
"bytes": "13472"
}
],
"symlink_target": ""
}
|
import logging
import paho.mqtt.client as mqtt
import config
logger = logging.getLogger(__name__)
def on_connect(client, userdata, rc):
"""
The callback for when the client receives a CONNACK response from the server.
Subscribing in on_connect() means that if we lose the connection and
reconnect then subscriptions will be renewed.
"""
logger.info("Connected with result code %s", str(rc))
client.subscribe("$SYS/#")
class MqttConnection:
def connect(self, user=None, password=None):
self.mqttc = mqtt.Client()
self.mqttc.on_connect = on_connect
if config.MQTT_AUTHENTICATE:
self.mqttc.username_pw_set(user, password)
self.mqttc.connect(config.MQTT_HOST, config.MQTT_PORT, 60)
logger.info('MQTT connect called, waiting for connected')
self.mqttc.loop_start()
logger.info('MQTT looping in other thread')
def send(self, topic, message):
logger.debug('MQTT sending message %s', message)
(result, mid) = self.mqttc.publish(topic, message)
if result == mqtt.MQTT_ERR_SUCCESS:
logger.info('MQTT message send')
return True
elif result == mqtt.MQTT_ERR_NO_CONN:
logger.critical('ERROR, MQTT message not send, client not connected')
return False
|
{
"content_hash": "1a2fe614ae0ed63a017cbc2902929012",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 81,
"avg_line_length": 34.973684210526315,
"alnum_prop": 0.6561324303987961,
"repo_name": "RRMoelker/socketMQTT",
"id": "acfe9bf7f83a0ed50382fcb536ae4c91045f8684",
"size": "1329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mqtt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3302"
}
],
"symlink_target": ""
}
|
"""distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = "$Id$"
import sys, os, string, re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
from distutils.errors import DistutilsByteCompileError
def get_platform ():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = string.find(sys.version, prefix)
if i == -1:
return sys.platform
j = string.find(sys.version, ")", i)
look = sys.version[i+len(prefix):j].lower()
if look=='amd64':
return 'win-amd64'
if look=='itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
from distutils.sysconfig import get_config_vars
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if 1:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
finally:
f.close()
if not macver:
macver = macrelease
if macver:
from distutils.sysconfig import get_config_vars
release = macver
osname = "macosx"
if (macrelease + '.') >= '10.4.' and \
'-arch' in get_config_vars().get('CFLAGS', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r"%(archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
machine = 'ppc'
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
elif osname[:9] == "dragonfly":
release = str.split(release, "-")[0]
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
if os.name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os.name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
elif os.name == 'os2':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
else:
raise DistutilsPlatformError, \
"nothing known about platform '%s'" % os.name
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and 'HOME' not in os.environ:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if var_name in local_vars:
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError, var:
raise ValueError, "invalid variable '$%s'" % var
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
"""Generate a useful error message from an EnvironmentError (IOError or
OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
does what it can to deal with exception objects that don't have a
filename (which happens when the error is due to a two-file operation,
such as 'rename()' or 'link()'. Returns the error message as a string
prefixed with 'prefix'.
"""
# check for Python 1.5.2-style {IO,OS}Error exception objects
if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
if exc.filename:
error = prefix + "%s: %s" % (exc.filename, exc.strerror)
else:
# two-argument functions in posix module don't
# include the filename in the exception object!
error = prefix + "%s" % exc.strerror
else:
error = prefix + str(exc[-1])
return error
# Needed by 'split_quoted()'
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _wordchars_re, _squote_re, _dquote_re
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
def split_quoted (s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
# This is a nice algorithm for splitting up a single string, since it
# doesn't require character-by-character examination. It was a little
# bit of a brain-bender to get it working right, though...
if _wordchars_re is None: _init_regex()
s = string.strip(s)
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = string.lstrip(s[end:])
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError, \
"this can't happen (bad char '%c')" % s[end]
if m is None:
raise ValueError, \
"bad string (mismatched %s quotes?)" % s[end]
(beg, end) = m.span()
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
if pos >= len(s):
words.append(s)
break
return words
# split_quoted ()
def execute (func, args, msg=None, verbose=0, dry_run=0):
"""Perform some action that affects the outside world (eg. by
writing to the filesystem). Such actions are special because they
are disabled by the 'dry_run' flag. This method takes care of all
that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
"external action" being performed), and an optional message to
print.
"""
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
func(*args)
def strtobool (val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = string.lower(val)
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError, "invalid truth value %r" % (val,)
def byte_compile (py_files,
optimize=0, force=0,
prefix=None, base_dir=None,
verbose=1, dry_run=0,
direct=None):
"""Byte-compile a collection of Python source files to either .pyc
or .pyo files in the same directory. 'py_files' is a list of files
to compile; any files that don't end in ".py" are silently skipped.
'optimize' must be one of the following:
0 - don't optimize (generate .pyc)
1 - normal optimization (like "python -O")
2 - extra optimization (like "python -OO")
If 'force' is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in 'py_files'; you can modify these with 'prefix' and
'basedir'. 'prefix' is a string that will be stripped off of each
source filename, and 'base_dir' is a directory name that will be
prepended (after 'prefix' is stripped). You can supply either or both
(or neither) of 'prefix' and 'base_dir', as you wish.
If 'dry_run' is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard py_compile module, or indirectly by writing a
temporary script and executing it. Normally, you should let
'byte_compile()' figure out to use direct compilation or not (see
the source for details). The 'direct' flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to None.
"""
# nothing is done if sys.dont_write_bytecode is True
if sys.dont_write_bytecode:
raise DistutilsByteCompileError('byte-compiling is disabled.')
# First, if the caller didn't force us into direct or indirect mode,
# figure out which mode we should be in. We take a conservative
# approach: choose direct mode *only* if the current interpreter is
# in debug mode and optimize is 0. If we're not in debug mode (-O
# or -OO), we don't know which level of optimization this
# interpreter is running with, so we can't do direct
# byte-compilation and be certain that it's the right thing. Thus,
# always compile indirectly if the current interpreter is in either
# optimize mode, or if either optimization level was requested by
# the caller.
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
try:
from tempfile import mkstemp
(script_fd, script_name) = mkstemp(".py")
except ImportError:
from tempfile import mktemp
(script_fd, script_name) = None, mktemp(".py")
log.info("writing byte-compilation script '%s'", script_name)
if not dry_run:
if script_fd is not None:
script = os.fdopen(script_fd, "w")
else:
script = open(script_name, "w")
script.write("""\
from distutils.util import byte_compile
files = [
""")
# XXX would be nice to write absolute filenames, just for
# safety's sake (script should be more robust in the face of
# chdir'ing before running it). But this requires abspath'ing
# 'prefix' as well, and that breaks the hack in build_lib's
# 'byte_compile()' method that carefully tacks on a trailing
# slash (os.sep really) to make sure the prefix here is "just
# right". This whole prefix business is rather delicate -- the
# problem is that it's really a directory, but I'm treating it
# as a dumb string, so trailing slashes and so forth matter.
#py_files = map(os.path.abspath, py_files)
#if prefix:
# prefix = os.path.abspath(prefix)
script.write(string.join(map(repr, py_files), ",\n") + "]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
prefix=%r, base_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, prefix, base_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
dry_run=dry_run)
# "Direct" byte-compilation: use the py_compile module to compile
# right here, right now. Note that the script generated in indirect
# mode simply calls 'byte_compile()' in direct mode, a weird sort of
# cross-process recursion. Hey, it works!
else:
from py_compile import compile
for file in py_files:
if file[-3:] != ".py":
# This lets us be lazy and not filter filenames in
# the "install_lib" command.
continue
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
cfile = file + (__debug__ and "c" or "o")
dfile = file
if prefix:
if file[:len(prefix)] != prefix:
raise ValueError, \
("invalid prefix: filename %r doesn't start with %r"
% (file, prefix))
dfile = dfile[len(prefix):]
if base_dir:
dfile = os.path.join(base_dir, dfile)
cfile_base = os.path.basename(cfile)
if direct:
if force or newer(file, cfile):
log.info("byte-compiling %s to %s", file, cfile_base)
if not dry_run:
compile(file, cfile, dfile)
else:
log.debug("skipping byte-compilation of %s to %s",
file, cfile_base)
# byte_compile ()
def rfc822_escape (header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = string.split(header, '\n')
header = string.join(lines, '\n' + 8*' ')
return header
|
{
"content_hash": "162bbb563b5ee82bb944dd47575280ee",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 79,
"avg_line_length": 38.068541300527244,
"alnum_prop": 0.5751812012372467,
"repo_name": "Symmetry-Innovations-Pty-Ltd/Python-2.7-for-QNX6.5.0-x86",
"id": "7e598c55ae0edbd4bfe6a5fd5682224dc1ec7cdd",
"size": "21661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usr/pkg/lib/python2.7/distutils/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5696782"
},
{
"name": "C++",
"bytes": "532950"
},
{
"name": "Objective-C",
"bytes": "183763"
},
{
"name": "Python",
"bytes": "9562647"
},
{
"name": "Shell",
"bytes": "247"
}
],
"symlink_target": ""
}
|
"""deCONZ service tests."""
from asynctest import Mock, patch
import pytest
import voluptuous as vol
from homeassistant.components import deconz
from homeassistant.components.deconz.const import CONF_BRIDGE_ID
from .test_gateway import BRIDGEID, setup_deconz_integration
GROUP = {
"1": {
"id": "Group 1 id",
"name": "Group 1 name",
"type": "LightGroup",
"state": {},
"action": {},
"scenes": [{"id": "1", "name": "Scene 1"}],
"lights": ["1"],
}
}
LIGHT = {
"1": {
"id": "Light 1 id",
"name": "Light 1 name",
"state": {"reachable": True},
"type": "Light",
"uniqueid": "00:00:00:00:00:00:00:01-00",
}
}
SENSOR = {
"1": {
"id": "Sensor 1 id",
"name": "Sensor 1 name",
"type": "ZHALightLevel",
"state": {"lightlevel": 30000, "dark": False},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:02-00",
}
}
async def test_service_setup(hass):
"""Verify service setup works."""
assert deconz.services.DECONZ_SERVICES not in hass.data
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await deconz.services.async_setup_services(hass)
assert hass.data[deconz.services.DECONZ_SERVICES] is True
assert async_register.call_count == 2
async def test_service_setup_already_registered(hass):
"""Make sure that services are only registered once."""
hass.data[deconz.services.DECONZ_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_register", return_value=Mock(True)
) as async_register:
await deconz.services.async_setup_services(hass)
async_register.assert_not_called()
async def test_service_unload(hass):
"""Verify service unload works."""
hass.data[deconz.services.DECONZ_SERVICES] = True
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await deconz.services.async_unload_services(hass)
assert hass.data[deconz.services.DECONZ_SERVICES] is False
assert async_remove.call_count == 2
async def test_service_unload_not_registered(hass):
"""Make sure that services can only be unloaded once."""
with patch(
"homeassistant.core.ServiceRegistry.async_remove", return_value=Mock(True)
) as async_remove:
await deconz.services.async_unload_services(hass)
assert deconz.services.DECONZ_SERVICES not in hass.data
async_remove.assert_not_called()
async def test_configure_service_with_field(hass):
"""Test that service invokes pydeconz with the correct path and data."""
await setup_deconz_integration(hass)
data = {
deconz.services.SERVICE_FIELD: "/light/2",
CONF_BRIDGE_ID: BRIDGEID,
deconz.services.SERVICE_DATA: {"on": True, "attr1": 10, "attr2": 20},
}
with patch("pydeconz.DeconzSession.request", return_value=Mock(True)) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_called_with(
"put", "/light/2", json={"on": True, "attr1": 10, "attr2": 20}
)
async def test_configure_service_with_entity(hass):
"""Test that service invokes pydeconz with the correct path and data."""
gateway = await setup_deconz_integration(hass)
gateway.deconz_ids["light.test"] = "/light/1"
data = {
deconz.services.SERVICE_ENTITY: "light.test",
deconz.services.SERVICE_DATA: {"on": True, "attr1": 10, "attr2": 20},
}
with patch("pydeconz.DeconzSession.request", return_value=Mock(True)) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_called_with(
"put", "/light/1", json={"on": True, "attr1": 10, "attr2": 20}
)
async def test_configure_service_with_entity_and_field(hass):
"""Test that service invokes pydeconz with the correct path and data."""
gateway = await setup_deconz_integration(hass)
gateway.deconz_ids["light.test"] = "/light/1"
data = {
deconz.services.SERVICE_ENTITY: "light.test",
deconz.services.SERVICE_FIELD: "/state",
deconz.services.SERVICE_DATA: {"on": True, "attr1": 10, "attr2": 20},
}
with patch("pydeconz.DeconzSession.request", return_value=Mock(True)) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_called_with(
"put", "/light/1/state", json={"on": True, "attr1": 10, "attr2": 20}
)
async def test_configure_service_with_faulty_field(hass):
"""Test that service invokes pydeconz with the correct path and data."""
await setup_deconz_integration(hass)
data = {deconz.services.SERVICE_FIELD: "light/2", deconz.services.SERVICE_DATA: {}}
with pytest.raises(vol.Invalid):
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
async def test_configure_service_with_faulty_entity(hass):
"""Test that service invokes pydeconz with the correct path and data."""
await setup_deconz_integration(hass)
data = {
deconz.services.SERVICE_ENTITY: "light.nonexisting",
deconz.services.SERVICE_DATA: {},
}
with patch("pydeconz.DeconzSession.request", return_value=Mock(True)) as put_state:
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_CONFIGURE_DEVICE, service_data=data
)
await hass.async_block_till_done()
put_state.assert_not_called()
async def test_service_refresh_devices(hass):
"""Test that service can refresh devices."""
gateway = await setup_deconz_integration(hass)
data = {CONF_BRIDGE_ID: BRIDGEID}
with patch(
"pydeconz.DeconzSession.request",
return_value={"groups": GROUP, "lights": LIGHT, "sensors": SENSOR},
):
await hass.services.async_call(
deconz.DOMAIN, deconz.services.SERVICE_DEVICE_REFRESH, service_data=data
)
await hass.async_block_till_done()
assert gateway.deconz_ids == {
"light.group_1_name": "/groups/1",
"light.light_1_name": "/lights/1",
"scene.group_1_name_scene_1": "/groups/1/scenes/1",
"sensor.sensor_1_name": "/sensors/1",
}
|
{
"content_hash": "86da82aebeb895bddd19402c6f5057c1",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 87,
"avg_line_length": 34.35175879396985,
"alnum_prop": 0.6391164423639555,
"repo_name": "Teagan42/home-assistant",
"id": "07985e4d9f4b388c7334a5380beb8e10a33abf79",
"size": "6836",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/deconz/test_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import logging
import pytest
import sdk_install
import sdk_metrics
import sdk_plan
import sdk_utils
from tests import config
log = logging.getLogger(__name__)
foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
@pytest.fixture(scope="function", autouse=True)
def configure_package(configure_security):
try:
sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
yield
finally:
sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
back_off_crash_loop_options = {
"service": {"yaml": "crash-loop", "sleep": 60, "task_failure_backoff": {"enabled": True}}
}
@pytest.mark.sanity
def test_default_plan_backoff():
sdk_install.install(
config.PACKAGE_NAME,
foldered_name,
expected_running_tasks=0,
additional_options=back_off_crash_loop_options,
wait_for_deployment=False,
wait_for_all_conditions=False,
)
# State transition should be STARTING -> STARTED -> DELAYED in a loop.
# As STARTING lasts for a very short duration, we test the switch between other two states.
check_delayed_and_suppressed("deploy")
sdk_plan.wait_for_plan_status(foldered_name, "deploy", "STARTED")
check_delayed_and_suppressed("deploy")
# We can't make further progress, this is the end of the test.
@pytest.mark.sanity
def test_recovery_backoff():
sdk_install.install(
config.PACKAGE_NAME,
foldered_name,
expected_running_tasks=0,
additional_options=back_off_crash_loop_options,
wait_for_deployment=False,
wait_for_all_conditions=False,
)
check_delayed_and_suppressed("deploy")
sdk_plan.force_complete_step(foldered_name, "deploy", "crash", "hello-0:[server]")
# Deploy plan is complete. Recovery plan should take over. Recovery plan is in COMPLETE
# by default, it should go from STARTED -> DELAYED.
sdk_plan.wait_for_plan_status(foldered_name, "recovery", "STARTED")
check_delayed_and_suppressed("recovery")
sdk_plan.wait_for_plan_status(foldered_name, "recovery", "STARTED")
def check_delayed_and_suppressed(plan_name: str):
sdk_plan.wait_for_plan_status(foldered_name, plan_name, "DELAYED")
sdk_metrics.wait_for_scheduler_gauge_value(
foldered_name,
"is_suppressed",
lambda result: isinstance(result, bool) and bool(result), # Should be set to true.
)
|
{
"content_hash": "dc2465f6eb90f0dd4100f8382e061939",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 95,
"avg_line_length": 33.732394366197184,
"alnum_prop": 0.6910229645093946,
"repo_name": "mesosphere/dcos-commons",
"id": "71d967c128eb52f27664dc2e884cebae76c4c2c7",
"size": "2395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frameworks/helloworld/tests/test_backoff.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2657"
},
{
"name": "Go",
"bytes": "191929"
},
{
"name": "Groovy",
"bytes": "4663"
},
{
"name": "HTML",
"bytes": "70049"
},
{
"name": "Java",
"bytes": "3500463"
},
{
"name": "Python",
"bytes": "734088"
},
{
"name": "Shell",
"bytes": "69332"
}
],
"symlink_target": ""
}
|
"""
This module contains loss classes suitable for fitting.
It is not part of the public API.
Specific losses are used for regression, binary classification or multiclass
classification.
"""
# Goals:
# - Provide a common private module for loss functions/classes.
# - To be used in:
# - LogisticRegression
# - PoissonRegressor, GammaRegressor, TweedieRegressor
# - HistGradientBoostingRegressor, HistGradientBoostingClassifier
# - GradientBoostingRegressor, GradientBoostingClassifier
# - SGDRegressor, SGDClassifier
# - Replace link module of GLMs.
import numpy as np
from scipy.special import xlogy
from ._loss import (
CyHalfSquaredError,
CyAbsoluteError,
CyPinballLoss,
CyHalfPoissonLoss,
CyHalfGammaLoss,
CyHalfTweedieLoss,
CyHalfBinomialLoss,
CyHalfMultinomialLoss,
)
from .link import (
Interval,
IdentityLink,
LogLink,
LogitLink,
MultinomialLogit,
)
from ..utils._readonly_array_wrapper import ReadonlyArrayWrapper
from ..utils.stats import _weighted_percentile
# Note: The shape of raw_prediction for multiclass classifications are
# - GradientBoostingClassifier: (n_samples, n_classes)
# - HistGradientBoostingClassifier: (n_classes, n_samples)
#
# Note: Instead of inheritance like
#
# class BaseLoss(BaseLink, CyLossFunction):
# ...
#
# # Note: Naturally, we would inherit in the following order
# # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
# # But because of https://github.com/cython/cython/issues/4350 we set BaseLoss as
# # the last one. This, of course, changes the MRO.
# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss):
#
# we use composition. This way we improve maintainability by avoiding the above
# mentioned Cython edge case and have easier to understand code (which method calls
# which code).
class BaseLoss:
"""Base class for a loss function of 1-dimensional targets.
Conventions:
- y_true.shape = sample_weight.shape = (n_samples,)
- y_pred.shape = raw_prediction.shape = (n_samples,)
- If is_multiclass is true (multiclass classification), then
y_pred.shape = raw_prediction.shape = (n_samples, n_classes)
Note that this corresponds to the return value of decision_function.
y_true, y_pred, sample_weight and raw_prediction must either be all float64
or all float32.
gradient and hessian must be either both float64 or both float32.
Note that y_pred = link.inverse(raw_prediction).
Specific loss classes can inherit specific link classes to satisfy
BaseLink's abstractmethods.
Parameters
----------
sample_weight : {None, ndarray}
If sample_weight is None, the hessian might be constant.
n_classes : {None, int}
The number of classes for classification, else None.
Attributes
----------
closs: CyLossFunction
link : BaseLink
interval_y_true : Interval
Valid interval for y_true
interval_y_pred : Interval
Valid Interval for y_pred
differentiable : bool
Indicates whether or not loss function is differentiable in
raw_prediction everywhere.
need_update_leaves_values : bool
Indicates whether decision trees in gradient boosting need to uptade
leave values after having been fit to the (negative) gradients.
approx_hessian : bool
Indicates whether the hessian is approximated or exact. If,
approximated, it should be larger or equal to the exact one.
constant_hessian : bool
Indicates whether the hessian is one for this loss.
is_multiclass : bool
Indicates whether n_classes > 2 is allowed.
"""
# For decision trees:
# This variable indicates whether the loss requires the leaves values to
# be updated once the tree has been trained. The trees are trained to
# predict a Newton-Raphson step (see grower._finalize_leaf()). But for
# some losses (e.g. least absolute deviation) we need to adjust the tree
# values to account for the "line search" of the gradient descent
# procedure. See the original paper Greedy Function Approximation: A
# Gradient Boosting Machine by Friedman
# (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.
need_update_leaves_values = False
differentiable = True
is_multiclass = False
def __init__(self, closs, link, n_classes=None):
self.closs = closs
self.link = link
self.approx_hessian = False
self.constant_hessian = False
self.n_classes = n_classes
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
self.interval_y_pred = self.link.interval_y_pred
def in_y_true_range(self, y):
"""Return True if y is in the valid range of y_true.
Parameters
----------
y : ndarray
"""
return self.interval_y_true.includes(y)
def in_y_pred_range(self, y):
"""Return True if y is in the valid range of y_pred.
Parameters
----------
y : ndarray
"""
return self.interval_y_pred.includes(y)
def loss(
self,
y_true,
raw_prediction,
sample_weight=None,
loss_out=None,
n_threads=1,
):
"""Compute the pointwise loss value for each input.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
loss_out : None or C-contiguous array of shape (n_samples,)
A location into which the result is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
loss : array of shape (n_samples,)
Element-wise loss function.
"""
if loss_out is None:
loss_out = np.empty_like(y_true)
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
y_true = ReadonlyArrayWrapper(y_true)
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
if sample_weight is not None:
sample_weight = ReadonlyArrayWrapper(sample_weight)
return self.closs.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=loss_out,
n_threads=n_threads,
)
def loss_gradient(
self,
y_true,
raw_prediction,
sample_weight=None,
loss_out=None,
gradient_out=None,
n_threads=1,
):
"""Compute loss and gradient w.r.t. raw_prediction for each input.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
loss_out : None or C-contiguous array of shape (n_samples,)
A location into which the loss is stored. If None, a new array
might be created.
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
of shape (n_samples, n_classes)
A location into which the gradient is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
loss : array of shape (n_samples,)
Element-wise loss function.
gradient : array of shape (n_samples,) or (n_samples, n_classes)
Element-wise gradients.
"""
if loss_out is None:
if gradient_out is None:
loss_out = np.empty_like(y_true)
gradient_out = np.empty_like(raw_prediction)
else:
loss_out = np.empty_like(y_true, dtype=gradient_out.dtype)
elif gradient_out is None:
gradient_out = np.empty_like(raw_prediction, dtype=loss_out.dtype)
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
gradient_out = gradient_out.squeeze(1)
y_true = ReadonlyArrayWrapper(y_true)
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
if sample_weight is not None:
sample_weight = ReadonlyArrayWrapper(sample_weight)
return self.closs.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=loss_out,
gradient_out=gradient_out,
n_threads=n_threads,
)
def gradient(
self,
y_true,
raw_prediction,
sample_weight=None,
gradient_out=None,
n_threads=1,
):
"""Compute gradient of loss w.r.t raw_prediction for each input.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
of shape (n_samples, n_classes)
A location into which the result is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
gradient : array of shape (n_samples,) or (n_samples, n_classes)
Element-wise gradients.
"""
if gradient_out is None:
gradient_out = np.empty_like(raw_prediction)
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
gradient_out = gradient_out.squeeze(1)
y_true = ReadonlyArrayWrapper(y_true)
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
if sample_weight is not None:
sample_weight = ReadonlyArrayWrapper(sample_weight)
return self.closs.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=gradient_out,
n_threads=n_threads,
)
def gradient_hessian(
self,
y_true,
raw_prediction,
sample_weight=None,
gradient_out=None,
hessian_out=None,
n_threads=1,
):
"""Compute gradient and hessian of loss w.r.t raw_prediction.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
of shape (n_samples, n_classes)
A location into which the gradient is stored. If None, a new array
might be created.
hessian_out : None or C-contiguous array of shape (n_samples,) or array \
of shape (n_samples, n_classes)
A location into which the hessian is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
gradient : arrays of shape (n_samples,) or (n_samples, n_classes)
Element-wise gradients.
hessian : arrays of shape (n_samples,) or (n_samples, n_classes)
Element-wise hessians.
"""
if gradient_out is None:
if hessian_out is None:
gradient_out = np.empty_like(raw_prediction)
hessian_out = np.empty_like(raw_prediction)
else:
gradient_out = np.empty_like(hessian_out)
elif hessian_out is None:
hessian_out = np.empty_like(gradient_out)
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
gradient_out = gradient_out.squeeze(1)
if hessian_out.ndim == 2 and hessian_out.shape[1] == 1:
hessian_out = hessian_out.squeeze(1)
y_true = ReadonlyArrayWrapper(y_true)
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
if sample_weight is not None:
sample_weight = ReadonlyArrayWrapper(sample_weight)
return self.closs.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=gradient_out,
hessian_out=hessian_out,
n_threads=n_threads,
)
def __call__(self, y_true, raw_prediction, sample_weight=None, n_threads=1):
"""Compute the weighted average loss.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
loss : float
Mean or averaged loss function.
"""
return np.average(
self.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
loss_out=None,
n_threads=n_threads,
),
weights=sample_weight,
)
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This can be used as initial estimates of predictions, i.e. before the
first iteration in fit.
Parameters
----------
y_true : array-like of shape (n_samples,)
Observed, true target values.
sample_weight : None or array of shape (n_samples,)
Sample weights.
Returns
-------
raw_prediction : float or (n_classes,)
Raw predictions of an intercept-only model.
"""
# As default, take weighted average of the target over the samples
# axis=0 and then transform into link-scale (raw_prediction).
y_pred = np.average(y_true, weights=sample_weight, axis=0)
eps = 10 * np.finfo(y_pred.dtype).eps
if self.interval_y_pred.low == -np.inf:
a_min = None
elif self.interval_y_pred.low_inclusive:
a_min = self.interval_y_pred.low
else:
a_min = self.interval_y_pred.low + eps
if self.interval_y_pred.high == np.inf:
a_max = None
elif self.interval_y_pred.high_inclusive:
a_max = self.interval_y_pred.high
else:
a_max = self.interval_y_pred.high - eps
if a_min is None and a_max is None:
return self.link.link(y_pred)
else:
return self.link.link(np.clip(y_pred, a_min, a_max))
def constant_to_optimal_zero(self, y_true, sample_weight=None):
"""Calculate term dropped in loss.
With this term added, the loss of perfect predictions is zero.
"""
return np.zeros_like(y_true)
# Note: Naturally, we would inherit in the following order
# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
# But because of https://github.com/cython/cython/issues/4350 we
# set BaseLoss as the last one. This, of course, changes the MRO.
class HalfSquaredError(BaseLoss):
"""Half squared error with identity link, for regression.
Domain:
y_true and y_pred all real numbers
Link:
y_pred = raw_prediction
For a given sample x_i, half squared error is defined as::
loss(x_i) = 0.5 * (y_true_i - raw_prediction_i)**2
The factor of 0.5 simplifies the computation of gradients and results in a
unit hessian (and is consistent with what is done in LightGBM). It is also
half the Normal distribution deviance.
"""
def __init__(self, sample_weight=None):
super().__init__(closs=CyHalfSquaredError(), link=IdentityLink())
self.constant_hessian = sample_weight is None
class AbsoluteError(BaseLoss):
"""Absolute error with identity link, for regression.
Domain:
y_true and y_pred all real numbers
Link:
y_pred = raw_prediction
For a given sample x_i, the absolute error is defined as::
loss(x_i) = |y_true_i - raw_prediction_i|
"""
differentiable = False
need_update_leaves_values = True
def __init__(self, sample_weight=None):
super().__init__(closs=CyAbsoluteError(), link=IdentityLink())
self.approx_hessian = True
self.constant_hessian = sample_weight is None
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This is the weighted median of the target, i.e. over the samples
axis=0.
"""
if sample_weight is None:
return np.median(y_true, axis=0)
else:
return _weighted_percentile(y_true, sample_weight, 50)
class PinballLoss(BaseLoss):
"""Quantile loss aka pinball loss, for regression.
Domain:
y_true and y_pred all real numbers
quantile in (0, 1)
Link:
y_pred = raw_prediction
For a given sample x_i, the pinball loss is defined as::
loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i)
rho_{quantile}(u) = u * (quantile - 1_{u<0})
= -u *(1 - quantile) if u < 0
u * quantile if u >= 0
Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError().
Additional Attributes
---------------------
quantile : float
The quantile to be estimated. Must be in range (0, 1).
"""
differentiable = False
need_update_leaves_values = True
def __init__(self, sample_weight=None, quantile=0.5):
if quantile <= 0 or quantile >= 1:
raise ValueError(
"PinballLoss aka quantile loss only accepts "
f"0 < quantile < 1; {quantile} was given."
)
super().__init__(
closs=CyPinballLoss(quantile=float(quantile)),
link=IdentityLink(),
)
self.approx_hessian = True
self.constant_hessian = sample_weight is None
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This is the weighted median of the target, i.e. over the samples
axis=0.
"""
if sample_weight is None:
return np.percentile(y_true, 100 * self.closs.quantile, axis=0)
else:
return _weighted_percentile(
y_true, sample_weight, 100 * self.closs.quantile
)
class HalfPoissonLoss(BaseLoss):
"""Half Poisson deviance loss with log-link, for regression.
Domain:
y_true in non-negative real numbers
y_pred in positive real numbers
Link:
y_pred = exp(raw_prediction)
For a given sample x_i, half the Poisson deviance is defined as::
loss(x_i) = y_true_i * log(y_true_i/exp(raw_prediction_i))
- y_true_i + exp(raw_prediction_i)
Half the Poisson deviance is actually the negative log-likelihood up to
constant terms (not involving raw_prediction) and simplifies the
computation of the gradients.
We also skip the constant term `y_true_i * log(y_true_i) - y_true_i`.
"""
def __init__(self, sample_weight=None):
super().__init__(closs=CyHalfPoissonLoss(), link=LogLink())
self.interval_y_true = Interval(0, np.inf, True, False)
def constant_to_optimal_zero(self, y_true, sample_weight=None):
term = xlogy(y_true, y_true) - y_true
if sample_weight is not None:
term *= sample_weight
return term
class HalfGammaLoss(BaseLoss):
"""Half Gamma deviance loss with log-link, for regression.
Domain:
y_true and y_pred in positive real numbers
Link:
y_pred = exp(raw_prediction)
For a given sample x_i, half Gamma deviance loss is defined as::
loss(x_i) = log(exp(raw_prediction_i)/y_true_i)
+ y_true/exp(raw_prediction_i) - 1
Half the Gamma deviance is actually proportional to the negative log-
likelihood up to constant terms (not involving raw_prediction) and
simplifies the computation of the gradients.
We also skip the constant term `-log(y_true_i) - 1`.
"""
def __init__(self, sample_weight=None):
super().__init__(closs=CyHalfGammaLoss(), link=LogLink())
self.interval_y_true = Interval(0, np.inf, False, False)
def constant_to_optimal_zero(self, y_true, sample_weight=None):
term = -np.log(y_true) - 1
if sample_weight is not None:
term *= sample_weight
return term
class HalfTweedieLoss(BaseLoss):
"""Half Tweedie deviance loss with log-link, for regression.
Domain:
y_true in real numbers for power <= 0
y_true in non-negative real numbers for 0 < power < 2
y_true in positive real numbers for 2 <= power
y_pred in positive real numbers
power in real numbers
Link:
y_pred = exp(raw_prediction)
For a given sample x_i, half Tweedie deviance loss with p=power is defined
as::
loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p)
- y_true_i * exp(raw_prediction_i)**(1-p) / (1-p)
+ exp(raw_prediction_i)**(2-p) / (2-p)
Taking the limits for p=0, 1, 2 gives HalfSquaredError with a log link,
HalfPoissonLoss and HalfGammaLoss.
We also skip constant terms, but those are different for p=0, 1, 2.
Therefore, the loss is not continuous in `power`.
Note furthermore that although no Tweedie distribution exists for
0 < power < 1, it still gives a strictly consistent scoring function for
the expectation.
"""
def __init__(self, sample_weight=None, power=1.5):
super().__init__(
closs=CyHalfTweedieLoss(power=float(power)),
link=LogLink(),
)
if self.closs.power <= 0:
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
elif self.closs.power < 2:
self.interval_y_true = Interval(0, np.inf, True, False)
else:
self.interval_y_true = Interval(0, np.inf, False, False)
def constant_to_optimal_zero(self, y_true, sample_weight=None):
if self.closs.power == 0:
return HalfSquaredError().constant_to_optimal_zero(
y_true=y_true, sample_weight=sample_weight
)
elif self.closs.power == 1:
return HalfPoissonLoss().constant_to_optimal_zero(
y_true=y_true, sample_weight=sample_weight
)
elif self.closs.power == 2:
return HalfGammaLoss().constant_to_optimal_zero(
y_true=y_true, sample_weight=sample_weight
)
else:
p = self.closs.power
term = np.power(np.maximum(y_true, 0), 2 - p) / (1 - p) / (2 - p)
if sample_weight is not None:
term *= sample_weight
return term
class HalfBinomialLoss(BaseLoss):
"""Half Binomial deviance loss with logit link, for binary classification.
This is also know as binary cross entropy, log-loss and logistic loss.
Domain:
y_true in [0, 1], i.e. regression on the unit interval
y_pred in (0, 1), i.e. boundaries excluded
Link:
y_pred = expit(raw_prediction)
For a given sample x_i, half Binomial deviance is defined as the negative
log-likelihood of the Binomial/Bernoulli distribution and can be expressed
as::
loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i
See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman,
section 4.4.1 (about logistic regression).
Note that the formulation works for classification, y = {0, 1}, as well as
logistic regression, y = [0, 1].
If you add `constant_to_optimal_zero` to the loss, you get half the
Bernoulli/binomial deviance.
"""
def __init__(self, sample_weight=None):
super().__init__(
closs=CyHalfBinomialLoss(),
link=LogitLink(),
n_classes=2,
)
self.interval_y_true = Interval(0, 1, True, True)
def constant_to_optimal_zero(self, y_true, sample_weight=None):
# This is non-zero only if y_true is neither 0 nor 1.
term = xlogy(y_true, y_true) + xlogy(1 - y_true, 1 - y_true)
if sample_weight is not None:
term *= sample_weight
return term
def predict_proba(self, raw_prediction):
"""Predict probabilities.
Parameters
----------
raw_prediction : array of shape (n_samples,) or (n_samples, 1)
Raw prediction values (in link space).
Returns
-------
proba : array of shape (n_samples, 2)
Element-wise class probabilites.
"""
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
proba = np.empty((raw_prediction.shape[0], 2), dtype=raw_prediction.dtype)
proba[:, 1] = self.link.inverse(raw_prediction)
proba[:, 0] = 1 - proba[:, 1]
return proba
class HalfMultinomialLoss(BaseLoss):
"""Categorical cross-entropy loss, for multiclass classification.
Domain:
y_true in {0, 1, 2, 3, .., n_classes - 1}
y_pred has n_classes elements, each element in (0, 1)
Link:
y_pred = softmax(raw_prediction)
Note: We assume y_true to be already label encoded. The inverse link is
softmax. But the full link function is the symmetric multinomial logit
function.
For a given sample x_i, the categorical cross-entropy loss is defined as
the negative log-likelihood of the multinomial distribution, it
generalizes the binary cross-entropy to more than 2 classes::
loss_i = log(sum(exp(raw_pred_{i, k}), k=0..n_classes-1))
- sum(y_true_{i, k} * raw_pred_{i, k}, k=0..n_classes-1)
See [1].
Note that for the hessian, we calculate only the diagonal part in the
classes: If the full hessian for classes k and l and sample i is H_i_k_l,
we calculate H_i_k_k, i.e. k=l.
Reference
---------
.. [1] Simon, Noah, J. Friedman and T. Hastie.
"A Blockwise Descent Algorithm for Group-penalized Multiresponse and
Multinomial Regression."
https://arxiv.org/pdf/1311.6529.pdf
"""
is_multiclass = True
def __init__(self, sample_weight=None, n_classes=3):
super().__init__(
closs=CyHalfMultinomialLoss(),
link=MultinomialLogit(),
n_classes=n_classes,
)
self.interval_y_true = Interval(0, np.inf, True, False)
self.interval_y_pred = Interval(0, 1, False, False)
def in_y_true_range(self, y):
"""Return True if y is in the valid range of y_true.
Parameters
----------
y : ndarray
"""
return self.interval_y_true.includes(y) and np.all(y.astype(int) == y)
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This is the softmax of the weighted average of the target, i.e. over
the samples axis=0.
"""
out = np.zeros(self.n_classes, dtype=y_true.dtype)
eps = np.finfo(y_true.dtype).eps
for k in range(self.n_classes):
out[k] = np.average(y_true == k, weights=sample_weight, axis=0)
out[k] = np.clip(out[k], eps, 1 - eps)
return self.link.link(out[None, :]).reshape(-1)
def predict_proba(self, raw_prediction):
"""Predict probabilities.
Parameters
----------
raw_prediction : array of shape (n_samples, n_classes)
Raw prediction values (in link space).
Returns
-------
proba : array of shape (n_samples, n_classes)
Element-wise class probabilites.
"""
return self.link.inverse(raw_prediction)
def gradient_proba(
self,
y_true,
raw_prediction,
sample_weight=None,
gradient_out=None,
proba_out=None,
n_threads=1,
):
"""Compute gradient and class probabilities fow raw_prediction.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : array of shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
gradient_out : None or array of shape (n_samples, n_classes)
A location into which the gradient is stored. If None, a new array
might be created.
proba_out : None or array of shape (n_samples, n_classes)
A location into which the class probabilities are stored. If None,
a new array might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
gradient : array of shape (n_samples, n_classes)
Element-wise gradients.
proba : array of shape (n_samples, n_classes)
Element-wise class probabilites.
"""
if gradient_out is None:
if proba_out is None:
gradient_out = np.empty_like(raw_prediction)
proba_out = np.empty_like(raw_prediction)
else:
gradient_out = np.empty_like(proba_out)
elif proba_out is None:
proba_out = np.empty_like(gradient_out)
y_true = ReadonlyArrayWrapper(y_true)
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
if sample_weight is not None:
sample_weight = ReadonlyArrayWrapper(sample_weight)
return self.closs.gradient_proba(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=gradient_out,
proba_out=proba_out,
n_threads=n_threads,
)
_LOSSES = {
"squared_error": HalfSquaredError,
"absolute_error": AbsoluteError,
"pinball_loss": PinballLoss,
"poisson_loss": HalfPoissonLoss,
"gamma_loss": HalfGammaLoss,
"tweedie_loss": HalfTweedieLoss,
"binomial_loss": HalfBinomialLoss,
"multinomial_loss": HalfMultinomialLoss,
}
|
{
"content_hash": "d881ca05866b6e19c4bd4cb31b759f1f",
"timestamp": "",
"source": "github",
"line_count": 924,
"max_line_length": 87,
"avg_line_length": 35.15367965367965,
"alnum_prop": 0.6047965026784065,
"repo_name": "sergeyf/scikit-learn",
"id": "d883c0e1bd1906e1c96915c1638c8789f7691639",
"size": "32482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/_loss/loss.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "718114"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "9906683"
},
{
"name": "Shell",
"bytes": "49565"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import math
from sentry.utils.compat.mock import Mock
from sentry.utils.cursors import build_cursor, Cursor
def build_mock(**attrs):
obj = Mock()
for key, value in attrs.items():
setattr(obj, key, value)
obj.__repr__ = lambda x: repr(attrs)
return obj
def test_build_cursor():
event1 = build_mock(id=1.1, message="one")
event2 = build_mock(id=1.1, message="two")
event3 = build_mock(id=2.1, message="three")
results = [event1, event2, event3]
def item_key(key, for_prev=False):
return int(math.floor(key.id))
cursor_kwargs = {"key": item_key, "limit": 1}
cursor = build_cursor(results, **cursor_kwargs)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert not cursor.prev
assert list(cursor) == [event1]
cursor = build_cursor(results[1:], cursor=cursor.next, **cursor_kwargs)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event2]
cursor = build_cursor(results[2:], cursor=cursor.next, **cursor_kwargs)
assert isinstance(cursor.next, Cursor)
assert not cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event3]
|
{
"content_hash": "6b44feb15e647c70cd13c88519a806f8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 75,
"avg_line_length": 28.142857142857142,
"alnum_prop": 0.6722262509064539,
"repo_name": "beeftornado/sentry",
"id": "81e1f04e72fd4dead4ca46cf0d725aa1f7b8ec38",
"size": "1379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/utils/test_cursors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
import numpy as np
from ..core import SparseGP
from .. import likelihoods
from .. import kern
from ..inference.latent_function_inference import EPDTC
from copy import deepcopy
class SparseGPClassification(SparseGP):
"""
Sparse Gaussian Process model for classification
This is a thin wrapper around the sparse_GP class, with a set of sensible defaults
:param X: input observations
:param Y: observed values
:param likelihood: a GPy likelihood, defaults to Bernoulli
:param kernel: a GPy kernel, defaults to rbf+white
:param inference_method: Latent function inference to use, defaults to EPDTC
:type inference_method: :class:`GPy.inference.latent_function_inference.LatentFunctionInference`
:param normalize_X: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_X: False|True
:param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_Y: False|True
:rtype: model object
"""
def __init__(self, X, Y=None, likelihood=None, kernel=None, Z=None, num_inducing=10, Y_metadata=None,
mean_function=None, inference_method=None, normalizer=False):
if kernel is None:
kernel = kern.RBF(X.shape[1])
if likelihood is None:
likelihood = likelihoods.Bernoulli()
if Z is None:
i = np.random.permutation(X.shape[0])[:num_inducing]
Z = X[i].copy()
else:
assert Z.shape[1] == X.shape[1]
if inference_method is None:
inference_method = EPDTC()
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, mean_function=mean_function, inference_method=inference_method,
normalizer=normalizer, name='SparseGPClassification', Y_metadata=Y_metadata)
@staticmethod
def from_sparse_gp(sparse_gp):
from copy import deepcopy
sparse_gp = deepcopy(sparse_gp)
SparseGPClassification(sparse_gp.X, sparse_gp.Y, sparse_gp.Z, sparse_gp.kern, sparse_gp.likelihood, sparse_gp.inference_method, sparse_gp.mean_function, name='sparse_gp_classification')
def to_dict(self, save_data=True):
"""
Store the object into a json serializable dictionary
:param boolean save_data: if true, it adds the data self.X and self.Y to the dictionary
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
model_dict = super(SparseGPClassification,self).to_dict(save_data)
model_dict["class"] = "GPy.models.SparseGPClassification"
return model_dict
@staticmethod
def _build_from_input_dict(input_dict, data=None):
input_dict = SparseGPClassification._format_input_dict(input_dict, data)
input_dict.pop('name', None) # Name parameter not required by SparseGPClassification
return SparseGPClassification(**input_dict)
@staticmethod
def from_dict(input_dict, data=None):
"""
Instantiate an SparseGPClassification object using the information
in input_dict (built by the to_dict method).
:param data: It is used to provide X and Y for the case when the model
was saved using save_data=False in to_dict method.
:type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`)
"""
import GPy
m = GPy.core.model.Model.from_dict(input_dict, data)
from copy import deepcopy
sparse_gp = deepcopy(m)
return SparseGPClassification(sparse_gp.X, sparse_gp.Y, sparse_gp.Z, sparse_gp.kern, sparse_gp.likelihood, sparse_gp.inference_method, sparse_gp.mean_function, name='sparse_gp_classification')
def save_model(self, output_filename, compress=True, save_data=True):
"""
Method to serialize the model.
:param string output_filename: Output file
:param boolean compress: If true compress the file using zip
:param boolean save_data: if true, it serializes the training data
(self.X and self.Y)
"""
self._save_model(output_filename, compress=True, save_data=True)
class SparseGPClassificationUncertainInput(SparseGP):
"""
Sparse Gaussian Process model for classification with uncertain inputs.
This is a thin wrapper around the sparse_GP class, with a set of sensible defaults
:param X: input observations
:type X: np.ndarray (num_data x input_dim)
:param X_variance: The uncertainty in the measurements of X (Gaussian variance, optional)
:type X_variance: np.ndarray (num_data x input_dim)
:param Y: observed values
:param kernel: a GPy kernel, defaults to rbf+white
:param Z: inducing inputs (optional, see note)
:type Z: np.ndarray (num_inducing x input_dim) | None
:param num_inducing: number of inducing points (ignored if Z is passed, see note)
:type num_inducing: int
:rtype: model object
.. Note:: If no Z array is passed, num_inducing (default 10) points are selected from the data. Other wise num_inducing is ignored
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, X_variance, Y, kernel=None, Z=None, num_inducing=10, Y_metadata=None, normalizer=None):
from GPy.core.parameterization.variational import NormalPosterior
if kernel is None:
kernel = kern.RBF(X.shape[1])
likelihood = likelihoods.Bernoulli()
if Z is None:
i = np.random.permutation(X.shape[0])[:num_inducing]
Z = X[i].copy()
else:
assert Z.shape[1] == X.shape[1]
X = NormalPosterior(X, X_variance)
SparseGP.__init__(self, X, Y, Z, kernel, likelihood,
inference_method=EPDTC(),
name='SparseGPClassification', Y_metadata=Y_metadata, normalizer=normalizer)
def parameters_changed(self):
#Compute the psi statistics for N once, but don't sum out N in psi2
self.psi0 = self.kern.psi0(self.Z, self.X)
self.psi1 = self.kern.psi1(self.Z, self.X)
self.psi2 = self.kern.psi2n(self.Z, self.X)
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y, self.Y_metadata, psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
self._update_gradients()
|
{
"content_hash": "650ce9bf6460bb8a1b4593044f8111a4",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 220,
"avg_line_length": 44.73103448275862,
"alnum_prop": 0.6708294788775825,
"repo_name": "esiivola/GPYgradients",
"id": "296b70f41e05e16345939511a65a43ec05d6035d",
"size": "6586",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "GPy/models/sparse_gp_classification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2030"
},
{
"name": "C++",
"bytes": "1605"
},
{
"name": "Python",
"bytes": "2052350"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
}
|
import weakref
from warnings import warn
from fontTools.misc import arrayTools
from defcon.objects.base import BaseObject
from defcon.objects.contour import Contour
from defcon.objects.point import Point
from defcon.objects.component import Component
from defcon.objects.anchor import Anchor
from defcon.objects.lib import Lib
from defcon.objects.guideline import Guideline
from defcon.objects.image import Image
from defcon.objects.color import Color
from defcon.tools.representations import glyphBoundsRepresentationFactory, glyphControlPointBoundsRepresentationFactory
from defcon.pens.decomposeComponentPointPen import DecomposeComponentPointPen
def addRepresentationFactory(name, factory):
warn("addRepresentationFactory is deprecated. Use the functions in defcon.__init__.", DeprecationWarning)
Glyph.representationFactories[name] = dict(factory=factory, destructiveNotifications=["Glyph.Changed"])
def removeRepresentationFactory(name):
warn("removeRepresentationFactory is deprecated. Use the functions in defcon.__init__.", DeprecationWarning)
del Glyph.representationFactories[name]
class Glyph(BaseObject):
"""
This object represents a glyph and it contains contour, component, anchor
and other assorted bits data about the glyph.
**This object posts the following notifications:**
============================
Name
============================
Glyph.Changed
Glyph.NameWillChange
Glyph.NameChanged
Glyph.UnicodesChanged
Glyph.WidthChanged
Glyph.HeightChanged
Glyph.NoteChanged
Glyph.LibChanged
Glyph.ImageChanged
Glyph.ImageWillBeDeleted
Glyph.ContourWillBeDeleted
Glyph.ContoursChanged
Glyph.ComponentWillBeDeleted
Glyph.ComponentsChanged
Glyph.AnchorWillBeDeleted
Glyph.AnchorsChanged
Glyph.GuidelineWillBeDeleted
Glyph.GuidelinesChanged
Glyph.MarkColorChanged
============================
The Glyph object has list like behavior. This behavior allows you to interact
with contour data directly. For example, to get a particular contour::
contour = glyph[0]
To iterate over all contours::
for contour in glyph:
To get the number of contours::
contourCount = len(glyph)
To interact with components or anchors in a similar way,
use the ``components`` and ``anchors`` attributes.
"""
changeNotificationName = "Glyph.Changed"
representationFactories = {
"defcon.glyph.bounds" : dict(
factory=glyphBoundsRepresentationFactory,
destructiveNotifications=("Glyph.ContoursChanged", "Glyph.ComponentsChanged", "Glyph.ComponentBaseGlyphDataChanged")
),
"defcon.glyph.controlPointBounds" : dict(
factory=glyphControlPointBoundsRepresentationFactory,
destructiveNotifications=("Glyph.ContoursChanged", "Glyph.ComponentsChanged", "Glyph.ComponentBaseGlyphDataChanged")
)
}
def __init__(self, layer=None,
contourClass=None, pointClass=None, componentClass=None, anchorClass=None,
guidelineClass=None, libClass=None, imageClass=None):
layerSet = font = None
if layer is not None:
font = weakref.ref(layer.layerSet.font)
layerSet = weakref.ref(layer.layerSet)
layer = weakref.ref(layer)
self._font = font
self._layerSet = layerSet
self._layer = layer
super(Glyph, self).__init__()
self.beginSelfNotificationObservation()
self._isLoading = False
self._dirty = False
self._name = None
self._unicodes = []
self._width = 0
self._height = 0
self._note = None
self._image = None
self._identifiers = set()
self._shallowLoadedContours = None
self._contours = []
self._components = []
self._anchors = []
self._guidelines = []
self._lib = None
if contourClass is None:
contourClass = Contour
if pointClass is None:
pointClass = Point
if componentClass is None:
componentClass = Component
if anchorClass is None:
anchorClass = Anchor
if guidelineClass is None:
guidelineClass = Guideline
if libClass is None:
libClass = Lib
if imageClass is None:
imageClass = Image
self._contourClass = contourClass
self._pointClass = pointClass
self._componentClass = componentClass
self._anchorClass = anchorClass
self._guidelineClass = Guideline
self._libClass = libClass
self._imageClass = imageClass
def __del__(self):
super(Glyph, self).__del__()
self._contours = None
self._components = None
self._anchors = None
self._guidelines = None
self._lib = None
self._image = None
# --------------
# Parent Objects
# --------------
def getParent(self):
return self.font
def _get_font(self):
if self._font is None:
return None
return self._font()
font = property(_get_font, doc="The :class:`Font` that this glyph belongs to.")
def _get_layerSet(self):
if self._layerSet is None:
return None
return self._layerSet()
layerSet = property(_get_layerSet, doc="The :class:`LayerSet` that this glyph belongs to.")
def _get_layer(self):
if self._layer is None:
return None
return self._layer()
layer = property(_get_layer, doc="The :class:`Layer` that this glyph belongs to.")
# ----------------
# Basic Attributes
# ----------------
# identifiers
def _get_identifiers(self):
return self._identifiers
identifiers = property(_get_identifiers, doc="Set of identifiers for the glyph. This is primarily for internal use.")
# name
def _set_name(self, value):
oldName = self._name
if oldName != value:
self.postNotification(notification="Glyph.NameWillChange", data=dict(oldValue=oldName, newValue=value))
self._name = value
self.postNotification(notification="Glyph.NameChanged", data=dict(oldValue=oldName, newValue=value))
self.dirty = True
def _get_name(self):
return self._name
name = property(_get_name, _set_name, doc="The name of the glyph. Setting this posts *GLyph.NameChanged* and *Glyph.NameChanged* notifications.")
# unicodes
def _get_unicodes(self):
return list(self._unicodes)
def _set_unicodes(self, value):
oldValue = self.unicodes
if oldValue != value:
self._unicodes = value
self.postNotification(notification="Glyph.UnicodesChanged", data=dict(oldValue=oldValue, newValue=value))
self.dirty = True
unicodes = property(_get_unicodes, _set_unicodes, doc="The list of unicode values assigned to the glyph. Setting this posts *Glyph.UnicodesChanged* and *Glyph.Changed* notifications.")
def _get_unicode(self):
if self._unicodes:
return self._unicodes[0]
return None
def _set_unicode(self, value):
if value is None:
self.unicodes = []
else:
existing = list(self._unicodes)
if value in existing:
existing.pop(existing.index(value))
existing.insert(0, value)
self.unicodes = existing
unicode = property(_get_unicode, _set_unicode, doc="The primary unicode value for the glyph. This is the equivalent of ``glyph.unicodes[0]``. This is a convenience attribute that works with the ``unicodes`` attribute.")
# -------
# Metrics
# -------
# bounds
def _get_bounds(self):
return self.getRepresentation("defcon.glyph.bounds")
bounds = property(_get_bounds, doc="The bounds of the glyph's outline expressed as a tuple of form (xMin, yMin, xMax, yMax).")
def _get_controlPointBounds(self):
return self.getRepresentation("defcon.glyph.controlPointBounds")
controlPointBounds = property(_get_controlPointBounds, doc="The control bounds of all points in the glyph. This only measures the point positions, it does not measure curves. So, curves without points at the extrema will not be properly measured.")
# margins
def _get_leftMargin(self):
bounds = self.bounds
if bounds is None:
return None
xMin, yMin, xMax, yMax = bounds
return xMin
def _set_leftMargin(self, value):
bounds = self.bounds
if bounds is None:
return
xMin, yMin, xMax, yMax = bounds
oldValue = xMin
diff = value - xMin
if value != oldValue:
self.move((diff, 0))
self.width += diff
self.dirty = True
leftMargin = property(_get_leftMargin, _set_leftMargin, doc="The left margin of the glyph. Setting this post *Glyph.WidthChanged* and *Glyph.Changed* notifications among others.")
def _get_rightMargin(self):
bounds = self.bounds
if bounds is None:
return None
xMin, yMin, xMax, yMax = bounds
return self._width - xMax
def _set_rightMargin(self, value):
bounds = self.bounds
if bounds is None:
return
xMin, yMin, xMax, yMax = bounds
oldValue = self._width - xMax
if oldValue != value:
self.width = xMax + value
self.dirty = True
rightMargin = property(_get_rightMargin, _set_rightMargin, doc="The right margin of the glyph. Setting this posts *Glyph.WidthChanged* and *Glyph.Changed* notifications among others.")
# width
def _get_width(self):
return self._width
def _set_width(self, value):
oldValue = self._width
if oldValue != value:
self._width = value
self.postNotification(notification="Glyph.WidthChanged", data=dict(oldValue=oldValue, newValue=value))
self.dirty = True
width = property(_get_width, _set_width, doc="The width of the glyph. Setting this posts *Glyph.WidthChanged* and *Glyph.Changed* notifications.")
# height
def _get_height(self):
return self._height
def _set_height(self, value):
oldValue = self._height
if oldValue != value:
self._height = value
self.postNotification(notification="Glyph.HeightChanged", data=dict(oldValue=oldValue, newValue=value))
self.dirty = True
height = property(_get_height, _set_height, doc="The height of the glyph. Setting this posts *Glyph.HeightChanged* and *Glyph.Changed* notifications.")
# ----------------------
# Lib Wrapped Attributes
# ----------------------
# mark color
def _get_markColor(self):
value = self.lib.get("public.markColor")
if value is not None:
value = Color(value)
return value
def _set_markColor(self, value):
# convert to a color object
if value is not None:
value = Color(value)
# don't write if there is no change
oldValue = self.lib.get("public.markColor")
if oldValue is not None:
oldValue = Color(oldValue)
if value == oldValue:
return
# remove
if value is None:
if "public.markColor" in self.lib:
del self.lib["public.markColor"]
# store
else:
self.lib["public.markColor"] = value
self.postNotification(notification="Glyph.MarkColorChanged", data=dict(oldValue=oldValue, newValue=value))
markColor = property(_get_markColor, _set_markColor, doc="The glyph's mark color. When setting, the value can be a UFO color string, a sequence of (r, g, b, a) or a :class:`Color` object. Setting this posts *Glyph.MarkColorChanged* and *Glyph.Changed* notifications.")
# -------
# Pen API
# -------
def draw(self, pen):
"""
Draw the glyph with **pen**.
"""
from robofab.pens.adapterPens import PointToSegmentPen
pointPen = PointToSegmentPen(pen)
self.drawPoints(pointPen)
def drawPoints(self, pointPen):
"""
Draw the glyph with **pointPen**.
"""
if self._shallowLoadedContours:
self._drawShallowLoadedContours(pointPen, self._shallowLoadedContours)
else:
for contour in self._contours:
contour.drawPoints(pointPen)
for component in self._components:
component.drawPoints(pointPen)
def _drawShallowLoadedContours(self, pointPen, contours):
for contour in contours:
try:
pointPen.beginPath(identifier=contour.get("identifier"))
except TypeError:
pointPen.beginPath()
warn("The beginPath method needs an identifier kwarg. The contour's identifier value has been discarded.", DeprecationWarning)
for args, kwargs in contour["points"]:
pointPen.addPoint(*args, **kwargs)
pointPen.endPath()
def getPen(self):
"""
Get the pen used to draw into this glyph.
"""
from robofab.pens.adapterPens import SegmentToPointPen
return SegmentToPointPen(self.getPointPen())
def getPointPen(self):
"""
Get the point pen used to draw into this glyph.
"""
from defcon.pens.glyphObjectPointPen import GlyphObjectPointPen, GlyphObjectLoadingPointPen
if self._isLoading:
self._shallowLoadedContours = []
return GlyphObjectLoadingPointPen(self)
else:
return GlyphObjectPointPen(self)
# --------
# Contours
# --------
def _get_contourClass(self):
return self._contourClass
contourClass = property(_get_contourClass, doc="The class used for contours.")
def _get_pointClass(self):
return self._pointClass
pointClass = property(_get_pointClass, doc="The class used for points.")
def _fullyLoadShallowLoadedContours(self):
if not self._shallowLoadedContours:
self._shallowLoadedContours = None
return
self.disableNotifications()
contours = list(self._shallowLoadedContours)
self._shallowLoadedContours = None
dirty = self.dirty
pointPen = self.getPointPen()
self._drawShallowLoadedContours(pointPen, contours)
self.dirty = dirty
self.enableNotifications()
def instantiateContour(self):
contour = self._contourClass(
glyph=self,
pointClass=self.pointClass
)
return contour
def beginSelfContourNotificationObservation(self, contour):
if contour.dispatcher is None:
return
contour.addObserver(observer=self, methodName="_contourChanged", notification="Contour.Changed")
def endSelfContourNotificationObservation(self, contour):
if contour.dispatcher is None:
return
contour.removeObserver(observer=self, notification="Contour.Changed")
contour.endSelfNotificationObservation()
def appendContour(self, contour):
"""
Append **contour** to the glyph. The contour must be a defcon
:class:`Contour` object or a subclass of that object. An error
will be raised if the contour's identifier or a point identifier
conflicts with any of the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
self.insertContour(len(self), contour)
def insertContour(self, index, contour):
"""
Insert **contour** into the glyph at index. The contour
must be a defcon :class:`Contour` object or a subclass
of that object. An error will be raised if the contour's
identifier or a point identifier conflicts with any of
the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
assert contour not in self
assert contour.glyph in (self, None), "This contour belongs to another glyph."
if contour.glyph is None:
identifiers = self._identifiers
if contour.identifier is not None:
assert contour.identifier not in identifiers
identifiers.add(contour.identifier)
for point in contour:
if point.identifier is not None:
assert point.identifier not in identifiers
identifiers.add(point.identifier)
contour.glyph = self
contour.beginSelfNotificationObservation()
self.beginSelfContourNotificationObservation(contour)
self._contours.insert(index, contour)
self.postNotification(notification="Glyph.ContoursChanged")
self.dirty = True
def removeContour(self, contour):
"""
Remove **contour** from the glyph.
This will post a *Glyph.Changed* notification.
"""
if contour not in self:
raise IndexError("contour not in glyph")
self.postNotification(notification="Glyph.ContourWillBeDeleted", data=dict(object=contour))
identifiers = self._identifiers
if contour.identifier is not None:
identifiers.remove(contour.identifier)
for point in contour:
if point.identifier is not None:
identifiers.remove(point.identifier)
self._contours.remove(contour)
self.endSelfContourNotificationObservation(contour)
self.postNotification(notification="Glyph.ContoursChanged")
self.dirty = True
def contourIndex(self, contour):
"""
Get the index for **contour**.
"""
return self._getContourIndex(contour)
def clearContours(self):
"""
Clear all contours from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications()
for contour in reversed(self):
self.removeContour(contour)
self.releaseHeldNotifications()
# ----------
# Components
# ----------
def _get_componentClass(self):
return self._componentClass
componentClass = property(_get_componentClass, doc="The class used for components.")
def _get_components(self):
return list(self._components)
components = property(_get_components, doc="An ordered list of :class:`Component` objects stored in the glyph.")
def instantiateComponent(self):
component = self._componentClass(
glyph=self
)
return component
def beginSelfComponentNotificationObservation(self, component):
if component.dispatcher is None:
return
component.addObserver(observer=self, methodName="_componentChanged", notification="Component.Changed")
component.addObserver(observer=self, methodName="_componentBaseGlyphDataChanged", notification="Component.BaseGlyphDataChanged")
def endSelfComponentNotificationObservation(self, component):
if component.dispatcher is None:
return
component.removeObserver(observer=self, notification="Component.Changed")
component.endSelfNotificationObservation()
def appendComponent(self, component):
"""
Append **component** to the glyph. The component must be a defcon
:class:`Component` object or a subclass of that object. An error
will be raised if the component's identifier conflicts with any of
the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
self.insertComponent(len(self._components), component)
def insertComponent(self, index, component):
"""
Insert **component** into the glyph at index. The component
must be a defcon :class:`Component` object or a subclass
of that object. An error will be raised if the component's
identifier conflicts with any of the identifiers within
the glyph.
This will post a *Glyph.Changed* notification.
"""
assert component not in self._components
assert component.glyph in (self, None), "This component belongs to another glyph."
if component.glyph is None:
if component.identifier is not None:
identifiers = self._identifiers
assert component.identifier not in identifiers
identifiers.add(component.identifier)
component.glyph = self
component.beginSelfNotificationObservation()
self.beginSelfComponentNotificationObservation(component)
self._components.insert(index, component)
self.postNotification(notification="Glyph.ComponentsChanged")
self.dirty = True
def removeComponent(self, component):
"""
Remove **component** from the glyph.
This will post a *Glyph.Changed* notification.
"""
self.postNotification(notification="Glyph.ComponentWillBeDeleted", data=dict(object=component))
if component.identifier is not None:
self._identifiers.remove(component.identifier)
self._components.remove(component)
self.endSelfComponentNotificationObservation(component)
self.postNotification(notification="Glyph.ComponentsChanged")
self.dirty = True
def componentIndex(self, component):
"""
Get the index for **component**.
"""
return self._components.index(component)
def clearComponents(self):
"""
Clear all components from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications()
for component in reversed(self._components):
self.removeComponent(component)
self.releaseHeldNotifications()
def decomposeComponent(self, component):
"""
Decompose **component**. This will preserve the identifiers
in the incoming contours and points unless there is a conflict.
In that case, the conflicting incoming identifier will be discarded.
This posts *Glyph.ComponentsChanged*, *Glyph.ContoursChanged*
and *Glyph.Changed* notifications.
"""
self.holdNotifications()
layer = self.layer
pointPen = DecomposeComponentPointPen(self, layer)
self._decomposeComponent(component, layer, pointPen)
self.releaseHeldNotifications()
self.postNotification(notification="Glyph.ContoursChanged")
def decomposeAllComponents(self):
"""
Decompose all components in this glyph. This will preserve the
identifiers in the incoming contours and points unless there is a
conflict. In that case, the conflicting incoming identifier will
be discarded.
This posts *Glyph.ComponentsChanged*, *Glyph.ContoursChanged*
and *Glyph.Changed* notifications.
"""
if not self.components:
return
self.holdNotifications()
layer = self.layer
pointPen = DecomposeComponentPointPen(self, layer)
for component in self.components:
self._decomposeComponent(component, layer, pointPen)
self.releaseHeldNotifications()
self.postNotification(notification="Glyph.ContoursChanged")
def _decomposeComponent(self, component, layer, pointPen):
pointPen.skipConflictingIdentifiers = True
component.drawPoints(pointPen)
self.removeComponent(component)
# -------
# Anchors
# -------
def _get_anchorClass(self):
return self._anchorClass
anchorClass = property(_get_anchorClass, doc="The class used for anchors.")
def _get_anchors(self):
return list(self._anchors)
def _set_anchors(self, value):
self.clearAnchors()
self.holdNotifications()
for anchor in value:
self.appendAnchor(anchor)
self.releaseHeldNotifications()
anchors = property(_get_anchors, _set_anchors, doc="An ordered list of :class:`Anchor` objects stored in the glyph.")
def instantiateAnchor(self, anchorDict=None):
anchor = self._anchorClass(
glyph=self,
anchorDict=anchorDict
)
return anchor
def beginSelfAnchorNotificationObservation(self, anchor):
if anchor.dispatcher is None:
return
anchor.addObserver(observer=self, methodName="_anchorChanged", notification="Anchor.Changed")
def endSelfAnchorNotificationObservation(self, anchor):
if anchor.dispatcher is None:
return
anchor.removeObserver(observer=self, notification="Anchor.Changed")
anchor.endSelfNotificationObservation()
def appendAnchor(self, anchor):
"""
Append **anchor** to the glyph. The anchor must be a defcon
:class:`Anchor` object or a subclass of that object. An error
will be raised if the anchor's identifier conflicts with any of
the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
self.insertAnchor(len(self._anchors), anchor)
def insertAnchor(self, index, anchor):
"""
Insert **anchor** into the glyph at index. The anchor
must be a defcon :class:`Anchor` object or a subclass
of that object. An error will be raised if the anchor's
identifier conflicts with any of the identifiers within
the glyph.
This will post a *Glyph.Changed* notification.
"""
try:
assert anchor.glyph != self
except AttributeError:
pass
if not isinstance(anchor, self._anchorClass):
anchor = self.instantiateAnchor(anchorDict=anchor)
assert anchor.glyph in (self, None), "This anchor belongs to another glyph."
if anchor.glyph is None:
if anchor.identifier is not None:
identifiers = self._identifiers
assert anchor.identifier not in identifiers
identifiers.add(anchor.identifier)
anchor.glyph = self
anchor.beginSelfNotificationObservation()
self.beginSelfAnchorNotificationObservation(anchor)
self._anchors.insert(index, anchor)
self.postNotification(notification="Glyph.AnchorsChanged")
self.dirty = True
def removeAnchor(self, anchor):
"""
Remove **anchor** from the glyph.
This will post a *Glyph.Changed* notification.
"""
self.postNotification(notification="Glyph.AnchorWillBeDeleted", data=dict(object=anchor))
if anchor.identifier is not None:
self._identifiers.remove(anchor.identifier)
self._anchors.remove(anchor)
self.endSelfAnchorNotificationObservation(anchor)
self.postNotification(notification="Glyph.AnchorsChanged")
self.dirty = True
def anchorIndex(self, anchor):
"""
Get the index for **anchor**.
"""
return self._anchors.index(anchor)
def clearAnchors(self):
"""
Clear all anchors from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications()
for anchor in reversed(self._anchors):
self.removeAnchor(anchor)
self.releaseHeldNotifications()
# ----------
# Guidelines
# ----------
def _get_guidelineClass(self):
return self._guidelineClass
guidelineClass = property(_get_guidelineClass, doc="The class used for guidelines.")
def _get_guidelines(self):
return list(self._guidelines)
def _set_guidelines(self, value):
self.clearGuidelines()
self.holdNotifications()
for guideline in value:
self.appendGuideline(guideline)
self.releaseHeldNotifications()
guidelines = property(_get_guidelines, _set_guidelines, doc="An ordered list of :class:`Guideline` objects stored in the glyph. Setting this will post a *Glyph.Changed* notification along with any notifications posted by the :py:meth:`Glyph.appendGuideline` and :py:meth:`Glyph.clearGuidelines` methods.")
def instantiateGuideline(self, guidelineDict=None):
guideline = self._guidelineClass(
glyph=self,
guidelineDict=guidelineDict
)
return guideline
def beginSelfGuidelineNotificationObservation(self, guideline):
if guideline.dispatcher is None:
return
guideline.addObserver(observer=self, methodName="_guidelineChanged", notification="Guideline.Changed")
def endSelfGuidelineNotificationObservation(self, guideline):
if guideline.dispatcher is None:
return
guideline.removeObserver(observer=self, notification="Guideline.Changed")
guideline.endSelfNotificationObservation()
def appendGuideline(self, guideline):
"""
Append **guideline** to the glyph. The guideline must be a defcon
:class:`Guideline` object or a subclass of that object. An error
will be raised if the guideline's identifier conflicts with any of
the identifiers within the glyph.
This will post a *Glyph.Changed* notification.
"""
self.insertGuideline(len(self._guidelines), guideline)
def insertGuideline(self, index, guideline):
"""
Insert **guideline** into the glyph at index. The guideline
must be a defcon :class:`Guideline` object or a subclass
of that object. An error will be raised if the guideline's
identifier conflicts with any of the identifiers within
the glyph.
This will post a *Glyph.Changed* notification.
"""
try:
assert guideline.glyph != self
except AttributeError:
pass
if not isinstance(guideline, self._guidelineClass):
guideline = self.instantiateGuideline(guidelineDict=guideline)
assert guideline.glyph in (self, None), "This guideline belongs to another glyph."
if guideline.glyph is None:
assert guideline.fontInfo is None, "This guideline belongs to a font."
if guideline.glyph is None:
if guideline.identifier is not None:
identifiers = self._identifiers
assert guideline.identifier not in identifiers
if guideline.identifier is not None:
identifiers.add(guideline.identifier)
guideline.glyph = self
guideline.beginSelfNotificationObservation()
self.beginSelfGuidelineNotificationObservation(guideline)
self._guidelines.insert(index, guideline)
self.postNotification(notification="Glyph.GuidelinesChanged")
self.dirty = True
def removeGuideline(self, guideline):
"""
Remove **guideline** from the glyph.
This will post a *Glyph.Changed* notification.
"""
self.postNotification(notification="Glyph.GuidelineWillBeDeleted", data=dict(object=guideline))
if guideline.identifier is not None:
self._identifiers.remove(guideline.identifier)
self._guidelines.remove(guideline)
self.endSelfGuidelineNotificationObservation(guideline)
self.postNotification(notification="Glyph.GuidelinesChanged")
self.dirty = True
def guidelineIndex(self, guideline):
"""
Get the index for **guideline**.
"""
return self._guidelines.index(guideline)
def clearGuidelines(self):
"""
Clear all guidelines from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications()
for guideline in reversed(self._guidelines):
self.removeGuideline(guideline)
self.releaseHeldNotifications()
# ----
# Note
# ----
def _get_note(self):
return self._note
def _set_note(self, value):
if value is not None:
assert isinstance(value, str)
oldValue = self._note
if oldValue != value:
self._note = value
self.postNotification(notification="Glyph.NoteChanged", data=dict(oldValue=oldValue, newValue=value))
self.dirty = True
note = property(_get_note, _set_note, doc="An arbitrary note for the glyph. Setting this will post a *Glyph.Changed* notification.")
# ---
# Lib
# ---
def instantiateLib(self):
lib = self._libClass(
glyph=self
)
return lib
def _get_lib(self):
if self._lib is None:
self._lib = self.instantiateLib()
self.beginSelfLibNotificationObservation()
return self._lib
def _set_lib(self, value):
lib = self.lib
lib.clear()
lib.update(value)
self.dirty = True
lib = property(_get_lib, _set_lib, doc="The glyph's :class:`Lib` object. Setting this will clear any existing lib data and post a *Glyph.Changed* notification if data was replaced.")
def beginSelfLibNotificationObservation(self):
if self._lib.dispatcher is None:
return
self._lib.addObserver(observer=self, methodName="_libContentChanged", notification="Lib.Changed")
def endSelfLibNotificationObservation(self):
if self._lib is None:
return
if self._lib.dispatcher is None:
return
self._lib.removeObserver(observer=self, notification="Lib.Changed")
self._lib.endSelfNotificationObservation()
# -----
# Image
# -----
def instantiateImage(self):
image = self._imageClass(
glyph=self
)
return image
def _get_image(self):
if self._image is None:
self._image = self.instantiateImage()
self.beginSelfImageNotificationObservation()
return self._image
def _set_image(self, image):
# removing image
if image is None:
if self._image is not None:
self.postNotification(notification="Glyph.ImageWillBeDeleted")
self.endSelfImageNotificationObservation()
self._image = None
self.postNotification(notification="Glyph.ImageChanged")
self.dirty = True
# adding image
else:
if self._image is None:
# create the image object
i = self.image
if set(self._image.items()) != set(image.items()):
self._image.fileName = image["fileName"]
self._image.transformation = (image["xScale"], image["xyScale"], image["yxScale"], image["yScale"], image["xOffset"], image["yOffset"])
self._image.color = image.get("color")
self.postNotification(notification="Glyph.ImageChanged")
self.dirty = True
image = property(_get_image, _set_image, doc="The glyph's :class:`Image` object. Setting this posts *Glyph.ImageChanged* and *Glyph.Changed* notifications.")
def beginSelfImageNotificationObservation(self):
if self._image.dispatcher is None:
return
self._image.addObserver(observer=self, methodName="_imageChanged", notification="Image.Changed")
self._image.addObserver(observer=self, methodName="_imageDataChanged", notification="Image.ImageDataChanged")
def endSelfImageNotificationObservation(self):
if self._image is None:
return
if self._image.dispatcher is None:
return
self._image.removeObserver(observer=self, notification="Image.Changed")
self._image.removeObserver(observer=self, notification="Image.ImageDataChanged")
self._image.endSelfNotificationObservation()
# -------------
# List Behavior
# -------------
def __contains__(self, contour):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
return contour in self._contours
def __len__(self):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
return len(self._contours)
def __iter__(self):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
contourCount = len(self)
index = 0
while index < contourCount:
contour = self[index]
yield contour
index += 1
def __getitem__(self, index):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
return self._contours[index]
def _getContourIndex(self, contour):
if self._shallowLoadedContours is not None:
self._fullyLoadShallowLoadedContours()
return self._contours.index(contour)
# ----------------
# Glyph Absorption
# ----------------
def copyDataFromGlyph(self, glyph):
"""
Copy data from **glyph**. This copies the following data:
==========
width
height
unicodes
note
image
contours
components
anchors
guidelines
lib
==========
The name attribute is purposefully omitted.
"""
from copy import deepcopy
self.width = glyph.width
self.height = glyph.height
self.unicodes = list(glyph.unicodes)
self.note = glyph.note
self.guidelines = [self.instantiateGuideline(g) for g in glyph.guidelines]
self.anchors = [self.instantiateAnchor(a) for a in glyph.anchors]
self.image = glyph.image
pointPen = self.getPointPen()
glyph.drawPoints(pointPen)
self.lib = deepcopy(glyph.lib)
# -----
# Clear
# -----
def clear(self):
"""
Clear all contours, components, anchors and guidelines from the glyph.
This posts a *Glyph.Changed* notification.
"""
self.holdNotifications()
self.clearContours()
self.clearComponents()
self.clearAnchors()
self.clearGuidelines()
self.releaseHeldNotifications()
# ----
# Move
# ----
def move(self, xxx_todo_changeme):
"""
Move all contours, components and anchors in the glyph
by **(x, y)**.
This posts a *Glyph.Changed* notification.
"""
(x, y) = xxx_todo_changeme
for contour in self:
contour.move((x, y))
for component in self._components:
component.move((x, y))
for anchor in self._anchors:
anchor.move((x, y))
# ------------
# Point Inside
# ------------
def pointInside(self, xxx_todo_changeme1, evenOdd=False):
"""
Returns a boolean indicating if **(x, y)** is in the
"black" area of the glyph.
"""
(x, y) = xxx_todo_changeme1
from fontTools.pens.pointInsidePen import PointInsidePen
pen = PointInsidePen(glyphSet=None, testPoint=(x, y), evenOdd=evenOdd)
self.draw(pen)
return pen.getResult()
# ----------------------
# Notification Callbacks
# ----------------------
def endSelfNotificationObservation(self):
if self.dispatcher is None:
return
if self._contours:
for contour in self:
self.endSelfContourNotificationObservation(contour)
for component in self.components:
self.endSelfComponentNotificationObservation(component)
for anchor in self.anchors:
self.endSelfAnchorNotificationObservation(anchor)
for guideline in self.guidelines:
self.endSelfGuidelineNotificationObservation(guideline)
self.endSelfLibNotificationObservation()
self.endSelfImageNotificationObservation()
super(Glyph, self).endSelfNotificationObservation()
self._font = None
self._layerSet = None
self._layer = None
def _imageDataChanged(self, notification):
self.postNotification(notification="Glyph.ImageChanged")
self.postNotification(notification=self.changeNotificationName)
def _imageChanged(self, notification):
self.postNotification(notification="Glyph.ImageChanged")
self.dirty = True
def _contourChanged(self, notification):
self.postNotification(notification="Glyph.ContoursChanged")
self.dirty = True
def _componentChanged(self, notification):
self.postNotification(notification="Glyph.ComponentsChanged")
self.dirty = True
def _componentBaseGlyphDataChanged(self, notification):
self.postNotification(notification="Glyph.ComponentsChanged")
self.postNotification(notification=self.changeNotificationName)
def _anchorChanged(self, notification):
self.postNotification(notification="Glyph.AnchorsChanged")
self.dirty = True
def _guidelineChanged(self, notification):
self.postNotification(notification="Glyph.GuidelinesChanged")
self.dirty = True
def _libContentChanged(self, notification):
self.postNotification(notification="Glyph.LibChanged")
self.dirty = True
# -----------------------------
# Serialization/Deserialization
# -----------------------------
def getDataForSerialization(self, **kwargs):
from functools import partial
simple_get = partial(getattr, self)
serialize = lambda item: item.getDataForSerialization();
serialized_get = lambda key: serialize(simple_get(key))
serialized_list_get = lambda key: [serialize(item) for item in simple_get(key)]
getters = [
('name', simple_get),
('unicodes', simple_get),
('width', simple_get),
('height', simple_get),
('note', simple_get),
('components', serialized_list_get),
('anchors', serialized_list_get),
('guidelines', serialized_list_get),
('image', serialized_get),
('lib', serialized_get)
]
if self._shallowLoadedContours is not None:
getters.append( ('_shallowLoadedContours', simple_get) )
else:
getters.append( ('_contours', serialized_list_get) )
return self._serialize(getters, **kwargs)
def setDataFromSerialization(self, data):
from functools import partial, wraps
set_attr = partial(setattr, self) # key, data
def set_each(setter, drop_key=False):
_setter = lambda k, v: setter(v) if drop_key else setter
def wrapper(key, data):
for d in data:
_setter(key, d)
return wrapper
def single_init(factory, data):
item = factory()
item.setDataFromSerialization(data)
return item
def list_init(factory, data):
return [single_init(factory, childData) for childData in data]
def init_set(init, factory, setter):
def wrapper(key, data):
setter(key, init(factory, data))
return wrapper
# Clear all contours, components, anchors and guidelines from the glyph.
self.clear()
setters = (
('name', set_attr),
('unicodes', set_attr),
('width', set_attr),
('height', set_attr),
('note', set_attr),
('lib', set_attr),
('_shallowLoadedContours', set_attr),
('_contours', init_set(list_init, self.instantiateContour, set_each(self.appendContour, True))),
('components', init_set(list_init, self._componentClass, set_each(self.appendComponent, True))),
('guidelines', init_set(list_init, self._guidelineClass, set_attr)),
('anchors',init_set(list_init, self._anchorClass, set_attr)),
('image', init_set(single_init, self.instantiateImage, set_attr))
)
for key, setter in setters:
if key not in data:
continue
setter(key, data[key])
# -----
# Tests
# -----
def _testName():
"""
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.name = 'RenamedGlyph'
>>> glyph.name
'RenamedGlyph'
>>> keys = font.keys()
>>> keys.sort()
>>> keys
['B', 'C', 'RenamedGlyph']
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.name = 'A'
>>> glyph.dirty
False
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.name
'A'
"""
def _testUnicodes():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.unicodes
[65]
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.unicodes = [123, 456]
>>> glyph.unicodes
[123, 456]
>>> glyph.dirty
True
"""
def _testBounds():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.bounds
(0, 0, 700, 700)
>>> glyph = font['B']
>>> glyph.bounds
(0, 0, 700, 700)
>>> glyph = font['C']
>>> glyph.bounds
(0.0, 0.0, 700.0, 700.0)
"""
def _testControlPointBounds():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.controlPointBounds
(0, 0, 700, 700)
>>> glyph = font['B']
>>> glyph.controlPointBounds
(0, 0, 700, 700)
>>> glyph = font['C']
>>> glyph.controlPointBounds
(0.0, 0.0, 700.0, 700.0)
"""
def _testLeftMargin():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.leftMargin
0
>>> glyph = font['B']
>>> glyph.leftMargin
0
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.leftMargin = 100
>>> glyph.leftMargin
100
>>> glyph.width
800
>>> glyph.dirty
True
"""
def _testRightMargin():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.rightMargin
0
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.rightMargin = 100
>>> glyph.rightMargin
100
>>> glyph.width
800
>>> glyph.dirty
True
"""
def _testWidth():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.width
700
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.width = 100
>>> glyph.width
100
>>> glyph.dirty
True
"""
def _testHeight():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.height
500
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.height = 100
>>> glyph.height
100
>>> glyph.dirty
True
"""
def _testComponents():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['C']
>>> len(glyph.components)
2
"""
def _testAnchors():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> len(glyph.anchors)
2
"""
def _testMarkColor():
"""
>>> from defcon.objects.font import Font
>>> font = Font()
>>> font.newGlyph("A")
>>> glyph = font["A"]
>>> glyph.markColor
>>> glyph.markColor = "1,0,1,0"
>>> glyph.markColor
'1,0,1,0'
>>> glyph.markColor = "1,0,1,0"
>>> glyph.markColor
'1,0,1,0'
>>> glyph.markColor = None
>>> glyph.markColor
"""
def _testCopyFromGlyph():
"""
>>> source = Glyph()
>>> source.name = "a"
>>> source.width = 1
>>> source.height = 2
>>> source.unicodes = [3, 4]
>>> source.note = "test image"
>>> source.image = dict(fileName="test image")
>>> source.anchors = [dict(x=100, y=200, name="test anchor")]
>>> source.guidelines = [dict(x=10, y=20, name="test guideline")]
>>> source.lib = {"foo" : "bar"}
>>> pen = source.getPointPen()
>>> pen.beginPath()
>>> pen.addPoint((100, 200), segmentType="line")
>>> pen.addPoint((300, 400), segmentType="line")
>>> pen.endPath()
>>> component = Component()
>>> component.base = "b"
>>> source.appendComponent(component)
>>> dest = Glyph()
>>> dest.copyDataFromGlyph(source)
>>> source.name == dest.name
False
>>> source.width == dest.width
True
>>> source.height == dest.height
True
>>> source.unicodes == dest.unicodes
True
>>> source.note == dest.note
True
>>> source.image.items() == dest.image.items()
True
>>> [g.items() for g in source.guidelines] == [g.items() for g in dest.guidelines]
True
>>> [g.items() for g in source.anchors] == [g.items() for g in dest.anchors]
True
>>> len(source) == len(dest)
True
>>> len(source.components) == len(dest.components)
True
>>> sourceContours = []
>>> for contour in source:
... sourceContours.append([])
... for point in contour:
... sourceContours[-1].append((point.x, point.x, point.segmentType, point.name))
>>> destContours = []
>>> for contour in dest:
... destContours.append([])
... for point in contour:
... destContours[-1].append((point.x, point.x, point.segmentType, point.name))
>>> sourceContours == destContours
True
>>> source.components[0].baseGlyph == dest.components[0].baseGlyph
True
"""
def _testLen():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> len(glyph)
2
"""
def _testIter():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> for contour in glyph:
... print len(contour)
4
4
"""
def _testAppendContour():
"""
>>> from defcon.objects.contour import Contour
>>> glyph = Glyph()
>>> glyph.dirty = False
>>> contour = Contour()
>>> glyph.appendContour(contour)
>>> len(glyph)
1
>>> glyph.dirty
True
>>> contour.getParent() == glyph
True
"""
def _testAppendComponent():
"""
>>> from defcon.objects.component import Component
>>> glyph = Glyph()
>>> glyph.dirty = False
>>> component = Component()
>>> glyph.appendComponent(component)
>>> len(glyph.components)
1
>>> glyph.dirty
True
>>> component.getParent() == glyph
True
"""
def _testAppendAnchor():
"""
>>> from defcon.objects.anchor import Anchor
>>> glyph = Glyph()
>>> glyph.dirty = False
>>> anchor = Anchor()
>>> glyph.appendAnchor(anchor)
>>> len(glyph.anchors)
1
>>> glyph.dirty
True
>>> anchor.getParent() == glyph
True
"""
def _testAppendGuideline():
"""
>>> from defcon.objects.guideline import Guideline
>>> glyph = Glyph()
>>> glyph.dirty = False
>>> guideline = Guideline()
>>> glyph.appendGuideline(guideline)
>>> len(glyph.guidelines)
1
>>> glyph.dirty
True
>>> guideline.getParent() == glyph
True
"""
def _testRemoveContour():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> contour = glyph[0]
>>> glyph.removeContour(contour)
>>> contour in glyph._contours
False
>>> contour.getParent()
"""
def _testRemoveComponent():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['C']
>>> component = glyph.components[0]
>>> glyph.removeComponent(component)
>>> component in glyph.components
False
>>> component.getParent()
"""
def _testRemoveAnchor():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> anchor = glyph.anchors[0]
>>> glyph.removeAnchor(anchor)
>>> anchor in glyph.anchors
False
>>> anchor.getParent()
"""
def _testRemoveGuideline():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font.layers["Layer 1"]["A"]
>>> guideline = glyph.guidelines[0]
>>> glyph.removeGuideline(guideline)
>>> guideline in glyph.guidelines
False
>>> guideline.getParent()
"""
def _testContourIndex():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> contour = glyph[0]
>>> glyph.contourIndex(contour)
0
>>> contour = glyph[1]
>>> glyph.contourIndex(contour)
1
"""
def _testComponentIndex():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['C']
>>> component = glyph.components[0]
>>> glyph.componentIndex(component)
0
>>> component = glyph.components[1]
>>> glyph.componentIndex(component)
1
"""
def _testAnchorIndex():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> anchor = glyph.anchors[0]
>>> glyph.anchorIndex(anchor)
0
>>> anchor = glyph.anchors[1]
>>> glyph.anchorIndex(anchor)
1
"""
def _testClear():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> contour = glyph[0]
>>> anchor = glyph.anchors[0]
>>> glyph.clear()
>>> len(glyph)
0
>>> len(glyph.anchors)
0
>>> glyph = font['C']
>>> component = glyph.components[0]
>>> glyph.clear()
>>> len(glyph.components)
0
>>> glyph = font.layers["Layer 1"]["A"]
>>> guideline = glyph.guidelines[0]
>>> glyph.clear()
>>> len(glyph.guidelines)
0
>>> contour.getParent(), component.getParent(), anchor.getParent(), guideline.getParent()
(None, None, None, None)
>>> contour.dispatcher, component.dispatcher, anchor.dispatcher, guideline.dispatcher
(None, None, None, None)
"""
def _testClearContours():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.clearContours()
>>> len(glyph)
0
"""
def _testClearComponents():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['C']
>>> glyph.clearComponents()
>>> len(glyph.components)
0
"""
def _testClearAnchors():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.clearAnchors()
>>> len(glyph.anchors)
0
"""
def _testClearGuidelines():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.clearGuidelines()
>>> len(glyph.guidelines)
0
"""
def _testDecomposeComponents():
"""
>>> from defcon import Font
>>> font = Font()
>>> font.newGlyph("baseGlyph")
>>> baseGlyph = font["baseGlyph"]
>>> pointPen = baseGlyph.getPointPen()
>>> pointPen.beginPath(identifier="contour1")
>>> pointPen.addPoint((0, 0), "move", identifier="point1")
>>> pointPen.addPoint((0, 100), "line")
>>> pointPen.addPoint((100, 100), "line")
>>> pointPen.addPoint((100, 0), "line")
>>> pointPen.addPoint((0, 0), "line")
>>> pointPen.endPath()
>>> font.newGlyph("referenceGlyph")
>>> referenceGlyph = font["referenceGlyph"]
>>> pointPen = referenceGlyph.getPointPen()
>>> pointPen.addComponent("baseGlyph", (1, 0, 0, 1, 0, 0))
>>> len(referenceGlyph.components)
1
>>> len(referenceGlyph)
0
>>> referenceGlyph.decomposeAllComponents()
>>> len(referenceGlyph.components)
0
>>> len(referenceGlyph)
1
>>> referenceGlyph[0].identifier
'contour1'
>>> referenceGlyph[0][0].identifier
'point1'
>>> pointPen.addComponent("baseGlyph", (1, 0, 0, 1, 100, 100))
>>> len(referenceGlyph.components)
1
>>> len(referenceGlyph)
1
>>> component = referenceGlyph.components[0]
>>> referenceGlyph.decomposeComponent(component)
>>> len(referenceGlyph.components)
0
>>> len(referenceGlyph)
2
>>> referenceGlyph[0].identifier
'contour1'
>>> referenceGlyph[0][0].identifier
'point1'
>>> referenceGlyph[1].identifier
>>> referenceGlyph[1][0].identifier
>>> from defcon import Font
>>> font = Font()
# nested components
>>> font.newGlyph("baseGlyph")
>>> baseGlyph = font["baseGlyph"]
>>> pointPen = baseGlyph.getPointPen()
>>> pointPen.beginPath(identifier="contour1")
>>> pointPen.addPoint((0, 0), "move", identifier="point1")
>>> pointPen.addPoint((0, 100), "line")
>>> pointPen.addPoint((100, 100), "line")
>>> pointPen.addPoint((100, 0), "line")
>>> pointPen.addPoint((0, 0), "line")
>>> pointPen.endPath()
>>> font.newGlyph("referenceGlyph1")
>>> referenceGlyph1 = font["referenceGlyph1"]
>>> pointPen = referenceGlyph1.getPointPen()
>>> pointPen.addComponent("baseGlyph", (1, 0, 0, 1, 3, 6))
>>> font.newGlyph("referenceGlyph2")
>>> referenceGlyph2 = font["referenceGlyph2"]
>>> pointPen = referenceGlyph2.getPointPen()
>>> pointPen.addComponent("referenceGlyph1", (1, 0, 0, 1, 10, 20))
>>> referenceGlyph2.decomposeAllComponents()
>>> len(referenceGlyph2.components)
0
>>> len(referenceGlyph1.components)
1
>>> len(referenceGlyph2)
1
>>> referenceGlyph2.bounds
(13, 26, 113, 126)
"""
def _testMove():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> xMin, yMin, xMax, yMax = glyph.bounds
>>> glyph.move((100, 50))
>>> (xMin+100, yMin+50, xMax+100, yMax+50) == glyph.bounds
True
>>> glyph = font['C']
>>> xMin, yMin, xMax, yMax = glyph.bounds
#>>> glyph.move((100, 50))
#>>> (xMin+100, yMin+50, xMax+100, yMax+50) == glyph.bounds
#True
"""
def _testPointInside():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> glyph.pointInside((100, 100))
True
>>> glyph.pointInside((350, 350))
False
>>> glyph.pointInside((-100, -100))
False
"""
def _testIdentifiers():
"""
>>> glyph = Glyph()
>>> pointPen = glyph.getPointPen()
>>> pointPen.beginPath(identifier="contour 1")
>>> pointPen.addPoint((0, 0), identifier="point 1")
>>> pointPen.addPoint((0, 0), identifier="point 2")
>>> pointPen.endPath()
>>> pointPen.beginPath(identifier="contour 2")
>>> pointPen.endPath()
>>> pointPen.addComponent("A", (1, 1, 1, 1, 1, 1), identifier="component 1")
>>> pointPen.addComponent("A", (1, 1, 1, 1, 1, 1), identifier="component 2")
>>> guideline = Guideline()
>>> guideline.identifier = "guideline 1"
>>> glyph.appendGuideline(guideline)
>>> guideline = Guideline()
>>> guideline.identifier = "guideline 2"
>>> glyph.appendGuideline(guideline)
>>> for contour in glyph:
... contour.identifier
'contour 1'
'contour 2'
>>> for point in glyph[0]:
... point.identifier
'point 1'
'point 2'
>>> for component in glyph.components:
... component.identifier
'component 1'
'component 2'
>>> pointPen.beginPath(identifier="contour 1")
Traceback (most recent call last):
...
AssertionError
>>> pointPen.endPath()
>>> pointPen.beginPath()
>>> pointPen.addPoint((0, 0))
>>> pointPen.addPoint((0, 0), identifier="point 1")
Traceback (most recent call last):
...
AssertionError
>>> pointPen.endPath()
>>> pointPen.addComponent("A", (1, 1, 1, 1, 1, 1), identifier="component 1")
Traceback (most recent call last):
...
AssertionError
>>> g = Guideline()
>>> g.identifier = "guideline 1"
>>> glyph.appendGuideline(g)
Traceback (most recent call last):
...
AssertionError
>>> list(sorted(glyph.identifiers))
['component 1', 'component 2', 'contour 1', 'contour 2', 'guideline 1', 'guideline 2', 'point 1', 'point 2']
>>> glyph.removeContour(glyph[0])
>>> list(sorted(glyph.identifiers))
['component 1', 'component 2', 'contour 2', 'guideline 1', 'guideline 2']
>>> glyph.removeComponent(glyph.components[0])
>>> list(sorted(glyph.identifiers))
['component 2', 'contour 2', 'guideline 1', 'guideline 2']
>>> glyph.removeGuideline(glyph.guidelines[0])
>>> list(sorted(glyph.identifiers))
['component 2', 'contour 2', 'guideline 2']
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{
"content_hash": "a9069870d75554811cf99cab6bc932b8",
"timestamp": "",
"source": "github",
"line_count": 1976,
"max_line_length": 309,
"avg_line_length": 32.24898785425101,
"alnum_prop": 0.6134894231372795,
"repo_name": "adrientetar/defcon",
"id": "f1f913fc47435741f236f04473d5b57d8657ea33",
"size": "63724",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3-ufo3",
"path": "Lib/defcon/objects/glyph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "657760"
}
],
"symlink_target": ""
}
|
def dictlist(x):
for key,value in x.items():
print(key,value)
|
{
"content_hash": "0c8e22b1a27ccfa3930b3a6e6fbb92a5",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 31,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.5945945945945946,
"repo_name": "Sherlock-Holo/Python-3.5-learning",
"id": "b3fae72e279128c6059cf43cf73b6aa3be622843",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oldlearn/函数/items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12132"
}
],
"symlink_target": ""
}
|
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.interface import urls
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.options
from framework.lib.owtf_process import OWTFProcess
class InterfaceServer(OWTFProcess, BaseComponent):
def pseudo_run(self):
self.get_component("core").disable_console_logging()
config = self.get_component("config")
db_config = self.get_component("db_config")
db = self.get_component("db")
application = tornado.web.Application(
handlers=urls.get_handlers(),
template_path=config.FrameworkConfigGet(
'INTERFACE_TEMPLATES_DIR'),
debug=False,
gzip=True,
static_path=config.FrameworkConfigGet('STATICFILES_DIR'),
compiled_template_cache=False
)
self.server = tornado.httpserver.HTTPServer(application)
try:
self.server.bind(
int(config.FrameworkConfigGet(
"UI_SERVER_PORT")),
address=config.FrameworkConfigGet(
"SERVER_ADDR")
)
tornado.options.parse_command_line(
args=[
'dummy_arg',
'--log_file_prefix='+db_config.Get('UI_SERVER_LOG'),
'--logging=info']
)
self.server.start(0)
db.create_session()
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
class FileServer(BaseComponent):
def start(self):
try:
self.worker_manager = self.get_component("worker_manager")
self.get_component("core").disable_console_logging()
config = self.get_component("config")
db = self.get_component("db")
self.application = tornado.web.Application(
handlers=urls.get_file_server_handlers(),
template_path=config.FrameworkConfigGet(
'INTERFACE_TEMPLATES_DIR'),
debug=False,
gzip=True)
self.application.Core = self.get_component("core")
self.server = tornado.httpserver.HTTPServer(self.application)
self.server.bind(
int(config.FrameworkConfigGet(
"FILE_SERVER_PORT")),
address=config.FrameworkConfigGet(
"SERVER_ADDR")
)
tornado.options.parse_command_line(
args=[
'dummy_arg',
'--log_file_prefix='+db.Config.Get('FILE_SERVER_LOG'),
'--logging=info']
)
self.server.start(1)
# 'self.manage_cron' is an instance of class 'tornado.ioloop.PeriodicCallback',
# it schedules the given callback to be called periodically.
# The callback is called every 2000 milliseconds.
self.manager_cron = tornado.ioloop.PeriodicCallback(
self.worker_manager.manage_workers,
2000)
self.manager_cron.start()
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
finally:
self.clean_up()
def clean_up(self):
"""Properly stop any tornado callbacks."""
self.manager_cron.stop()
|
{
"content_hash": "81777425459bdb595ff8381a91f13346",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 91,
"avg_line_length": 38.4,
"alnum_prop": 0.5604745370370371,
"repo_name": "sharad1126/owtf",
"id": "41b3f772e558850f42d3f0438d6304f49119ed74",
"size": "3456",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "framework/interface/server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "205385"
},
{
"name": "JavaScript",
"bytes": "87362"
},
{
"name": "Python",
"bytes": "890557"
},
{
"name": "Shell",
"bytes": "59149"
}
],
"symlink_target": ""
}
|
from mongoengine.fields import IntField, StringField, ListField, BooleanField, DictField, EmbeddedDocumentField, FloatField
from mongoengine import DoesNotExist, EmbeddedDocument, Document
import hmac
from JumpScale import j
try:
import fcrypt as crypt
except ImportError:
import crypt
DB = 'jumpscale_system'
default_meta = {'allow_inheritance': True, "db_alias": DB}
def extend(a, b):
if isinstance(a, list):
return a + b
elif isinstance(a, dict):
tmp = a.copy()
for i in b:
if not i in tmp:
tmp[i] = b[i]
else:
tmp[i] = extend(tmp[i], b[i])
return tmp
else:
return b
class ModelBase:
DoesNotExist = DoesNotExist
gid = IntField(
default=lambda: j.application.whoAmI.gid if j.application.whoAmI else 0)
nid = IntField(
default=lambda: j.application.whoAmI.nid if j.application.whoAmI else 0)
epoch = IntField(default=j.data.time.getTimeEpoch)
meta = default_meta
@property
def guid(self):
return self.pk
@guid.setter
def guid(self, value):
self.pk = value
def to_dict(self):
d = j.data.serializer.json.loads(Document.to_json(self))
d.pop("_cls")
if "_id" in d:
d.pop("_id")
return d
@classmethod
def find(cls, query):
redis = getattr(cls, '__redis__', False)
if redis:
raise j.exceptions.RuntimeError("not implemented")
else:
return cls.objects(__raw__=query)
@classmethod
def _getKey(cls, guid):
"""
@return hsetkey,key
"""
ttype = cls._class_name.split(".")[-1]
key = "models.%s" % ttype
key = '%s_%s' % (key, guid)
key = key.encode('utf-8')
return key
@classmethod
def get(cls, guid, returnObjWhenNonExist=False):
"""
default needs to be in redis, need to mention if not
"""
redis = getattr(cls, '__redis__', False)
if redis:
modelraw = j.core.db.get(cls._getKey(guid))
if modelraw:
modelraw = modelraw.decode()
obj = cls.from_json(modelraw)
return obj
else:
res = None
else:
try:
res = cls.objects.get(id=guid)
except DoesNotExist:
res = None
return res
@classmethod
def _save_redis(cls, obj):
key = cls._getKey(obj.guid)
meta = cls._meta['indexes']
expire = meta[0].get('expireAfterSeconds', None) if meta else None
raw = j.data.serializer.json.dumps(obj.to_dict())
j.core.db.set(key, raw)
if expire:
j.core.db.expire(key, expire)
return obj
def validate(self, clean):
return Document.validate(self, clean)
def _datatomodel(self, data):
for key, value in data.items():
setattr(self, key, value)
def save(self, data=None):
redis = getattr(self, '__redis__', False)
if data:
self._datatomodel(data)
if redis:
return self._save_redis(self)
else:
return Document.save(self)
def delete(self):
redis = getattr(self, '__redis__', False)
if redis:
key = self._getKey(self.guid)
j.core.db.delete(key)
else:
return Document.delete(self)
@classmethod
def exists(cls, guid):
return bool(cls.get(guid=guid))
def getset(cls):
redis = getattr(cls, '__redis__', False)
key = cls._getKey(cls.guid)
if redis:
model = cls.get(key)
if model is None:
model = cls.save()
return model
else:
if not cls.get(cls.guid):
cls.save()
return cls.get(cls.guid)
def __str__(self):
return j.data.serializer.json.dumps(self.to_dict(), indent=2)
__repr__ = __str__
class Errorcondition(ModelBase, Document):
nid = IntField(required=True)
gid = IntField(required=True)
aid = IntField(default=0)
pid = IntField(default=0)
jid = StringField(default='') # TODO: *2 is this right, string???
masterjid = IntField(default=0)
appname = StringField(default="")
level = IntField(default=1, required=True)
type = StringField(choices=("BUG", "PERF", "OPS", "UNKNOWN"),
default="UNKNOWN", required=True)
state = StringField(choices=("NEW", "ALERT", "CLOSED"),
default="NEW", required=True)
# StringField() <--- available starting version 0.9
errormessage = StringField(default="")
errormessagePub = StringField(default="") # StringField()
category = StringField(default="")
tags = StringField(default="")
code = StringField()
funcname = StringField(default="")
funcfilename = StringField(default="")
funclinenr = IntField(default=0)
backtrace = StringField()
backtraceDetailed = StringField()
extra = StringField()
lasttime = IntField(default=j.data.time.getTimeEpoch())
closetime = IntField(default=j.data.time.getTimeEpoch())
occurrences = IntField(default=0)
class Log(ModelBase, Document):
aid = IntField(default=0)
pid = IntField(default=0)
jid = StringField(default='')
masterjid = IntField(default=0)
appname = StringField(default="")
level = IntField(default=1, required=True)
message = StringField(default='')
type = StringField(choices=("BUG", "PERF", "OPS", "UNKNOWN"),
default="UNKNOWN", required=True)
state = StringField(choices=("NEW", "ALERT", "CLOSED"),
default="NEW", required=True)
# StringField() <--- available starting version 0.9
category = StringField(default="")
tags = StringField(default="")
epoch = IntField(default=j.data.time.getTimeEpoch())
class Grid(ModelBase, Document):
name = StringField(default='master')
# id = IntField(default=1)
class Group(ModelBase, Document):
name = StringField(default='')
domain = StringField(default='')
gid = IntField(default=1)
roles = ListField(StringField())
active = BooleanField(default=True)
description = StringField(default='master')
lastcheck = IntField(default=j.data.time.getTimeEpoch())
class Job(EmbeddedDocument):
nid = IntField(required=True)
gid = IntField(required=True)
data = StringField(default='')
streams = ListField(StringField())
level = IntField()
state = StringField(required=True, choices=(
'SUCCESS', 'ERROR', 'TIMEOUT', 'KILLED', 'QUEUED', 'RUNNING'))
starttime = IntField()
time = IntField()
tags = StringField()
critical = StringField()
meta = extend(default_meta, {
'indexes': [{'fields': ['epoch'], 'expireAfterSeconds': 3600 * 24 * 5}],
'allow_inheritance': True,
"db_alias": DB
})
class Command(ModelBase, Document):
guid = StringField(unique=True, required=True)
gid = IntField(default=0)
nid = IntField(default=0)
cmd = StringField()
roles = ListField(StringField())
fanout = BooleanField(default=False)
args = DictField()
data = StringField()
tags = StringField()
starttime = IntField()
jobs = ListField(EmbeddedDocumentField(Job))
meta = extend(default_meta, {
'indexes': [{'fields': ['guid']}]
})
class Audit(ModelBase, Document):
user = StringField(default='')
result = StringField(default='')
call = StringField(default='')
status_code = IntField(default=0)
args = StringField(default='')
kwargs = StringField(default='')
timestamp = IntField(default=j.data.time.getTimeEpoch())
meta = extend(default_meta, {'indexes': [
{'fields': ['epoch'], 'expireAfterSeconds': 3600 * 24 * 5}
], 'allow_inheritance': True, "db_alias": DB})
class Disk(ModelBase, Document):
partnr = IntField()
path = StringField(default='')
size = IntField(default=0)
free = IntField()
ssd = IntField()
fs = StringField(default='')
mounted = BooleanField()
mountpoint = StringField(default='')
active = BooleanField()
model = StringField(default='')
description = StringField(default='')
type = ListField(StringField()) # BOOT, DATA, ...
# epoch of last time the info was checked from reality
lastcheck = IntField(default=j.data.time.getTimeEpoch())
class VDisk(ModelBase, Document):
machineguid = StringField(required=True)
diskid = IntField()
fs = StringField(default='')
size = IntField(default=0)
free = IntField()
sizeondisk = IntField()
mounted = BooleanField()
path = StringField(default='')
description = StringField(default='')
mountpoint = StringField(default='')
role = ListField(StringField())
type = ListField(StringField())
order = IntField()
devicename = StringField(default='') # if known device name in vmachine
lastcheck = IntField(default=j.data.time.getTimeEpoch())
backup = BooleanField()
backuplocation = StringField()
backuptime = IntField(default=j.data.time.getTimeEpoch())
backupexpiration = IntField()
class Alert(ModelBase, Document):
username = StringField(default='')
description = StringField(default='')
descriptionpub = StringField(default='')
level = IntField(min_value=1, max_value=3, default=1)
# dot notation e.g. machine.start.failed
category = StringField(default='')
tags = StringField(default='') # e.g. machine:2323
state = StringField(choices=("NEW", "ALERT", "CLOSED"),
default='NEW', required=True)
history = ListField(DictField())
# first time there was an error condition linked to this alert
inittime = IntField(default=j.data.time.getTimeEpoch())
# last time there was an error condition linked to this alert
lasttime = IntField()
closetime = IntField() # alert is closed, no longer active
# $nr of times this error condition happened
nrerrorconditions = IntField()
errorconditions = ListField(IntField()) # ids of errorconditions
class Heartbeat(ModelBase, Document):
"""
"""
lastcheck = IntField(default=j.data.time.getTimeEpoch())
class Machine(ModelBase, Document):
name = StringField(default='')
roles = ListField(StringField())
netaddr = StringField(default='')
ipaddr = ListField(StringField())
active = BooleanField()
# STARTED,STOPPED,RUNNING,FROZEN,CONFIGURED,DELETED
state = StringField(choices=("STARTED", "STOPPED", "RUNNING", "FROZEN",
"CONFIGURED", "DELETED"), default='CONFIGURED', required=True)
mem = IntField() # $in MB
cpucore = IntField()
description = StringField(default='')
otherid = StringField(default='')
type = StringField(default='') # VM,LXC
# epoch of last time the info was checked from reality
lastcheck = IntField(default=j.data.time.getTimeEpoch())
class Nic(ModelBase, Document):
name = StringField(default='')
mac = StringField(default='')
ipaddr = ListField(StringField())
active = BooleanField(default=True)
# poch of last time the info was checked from reality
lastcheck = IntField(default=j.data.time.getTimeEpoch())
class Node(ModelBase, Document):
name = StringField(default='')
roles = ListField(StringField())
netaddr = DictField(default={})
machineguid = StringField(default='')
ipaddr = ListField(StringField())
active = BooleanField()
peer_stats = IntField() # node which has stats for this node
# node which has transactionlog or other logs for this node
peer_log = IntField()
peer_backup = IntField() # node which has backups for this node
description = StringField(default='')
lastcheck = IntField(default=j.data.time.getTimeEpoch())
# osisrootobj,$namespace,$category,$version
_meta = ListField(StringField())
class Process(ModelBase, Document):
aysdomain = StringField(default='')
aysname = StringField(default='')
pname = StringField(default='') # process name
sname = StringField(default='') # name as specified in startup manager
ports = ListField(IntField())
instance = StringField(default='')
systempid = ListField(IntField()) # system process id (PID) at this point
epochstart = IntField()
epochstop = IntField()
active = BooleanField()
lastcheck = IntField(default=j.data.time.getTimeEpoch())
cmd = StringField(default='')
workingdir = StringField(default='')
parent = StringField(default='')
type = StringField(default='')
statkey = StringField(default='')
nr_file_descriptors = FloatField()
nr_ctx_switches_voluntary = FloatField()
nr_ctx_switches_involuntary = FloatField()
nr_threads = FloatField()
cpu_time_user = FloatField()
cpu_time_system = FloatField()
cpu_percent = FloatField()
mem_vms = FloatField()
mem_rss = FloatField()
io_read_count = FloatField()
io_write_count = FloatField()
io_read_bytes = FloatField()
io_write_bytes = FloatField()
nr_connections_in = FloatField()
nr_connections_out = FloatField()
class Test(ModelBase, Document):
name = StringField(default='')
testrun = StringField(default='')
path = StringField(default='')
state = StringField(choices=("OK", "ERROR", "DISABLED"),
default='OK', required=True)
priority = IntField() # lower is highest priority
organization = StringField(default='')
author = StringField(default='')
version = IntField()
categories = ListField(StringField())
starttime = IntField(default=j.data.time.getTimeEpoch())
endtime = IntField()
enable = BooleanField()
result = DictField()
output = DictField(default={})
eco = DictField(default={})
license = StringField(default='')
source = DictField(default={})
class User(ModelBase, Document):
name = StringField(default='')
domain = StringField(default='')
passwd = StringField(default='') # stored hashed
roles = ListField(StringField())
active = BooleanField()
description = StringField(default='')
emails = ListField(StringField())
xmpp = ListField(StringField())
mobile = ListField(StringField())
# epoch of last time the info updated
lastcheck = IntField(default=j.data.time.getTimeEpoch())
groups = ListField(StringField())
authkey = StringField(default='')
data = StringField(default='')
authkeys = ListField(StringField())
def authenticate(username, passwd):
for user in User.find({'name': username}):
if hmac.compare_digest(user.passwd, j.sal.unix.crypt(passwd, user.passwd)):
return True
return False
def save(user):
if not user.id:
user.passwd = j.sal.unix.crypt(user.passwd)
else:
olduser = User.get(user.id)
if olduser.passwd != user.passwd: # change passwd
user.passwd = j.sal.unix.crypt(user.passwd)
super(ModelBase, user).save()
class SessionCache(ModelBase, Document):
__redis__ = True
user = StringField()
kwargs = DictField()
_creation_time = IntField(default=j.data.time.getTimeEpoch())
_accessed_time = IntField(default=j.data.time.getTimeEpoch())
_expire_at = IntField(default=None)
guid = StringField()
meta = extend(default_meta, {'indexes':
[{'fields': ['epoch'], 'expireAfterSeconds': 432000}],
'allow_inheritance': True,
'db_alias': DB})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _save_redis(self, obj):
key = self._getKey(obj.guid)
indexes = self._meta['indexes']
expire = next(iter(indexes), {}).get('expireAfterSeconds', None)
raw = j.data.serializer.json.dumps(obj.to_dict())
j.core.db.set(key, raw)
if self._expire_at:
j.core.db.expireat(self._getKey(self.guid), self._expire_at)
elif expire:
j.core.db.expire(key, expire)
return obj
del EmbeddedDocument
|
{
"content_hash": "c2b866d0e725b4597a4dca148f924bd5",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 123,
"avg_line_length": 32.83734939759036,
"alnum_prop": 0.6189689965144011,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "4a22f1fba7edb1b7dc99b2d780c024a5e7442570",
"size": "16354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/data/models/Models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
}
|
"""
Functions:
num_headers Guess the number of headers in a matrix.
"""
# Data Types.
CHAR, INT, FLOAT, EMPTY = 1, 2, 4, 8
# Semantic Types.
HEAD, SAMPLE, ANNOT, VALUE, BLANK = 1, 2, 4, 8, 16
def _rule_no_first_row_annots(matrix, num_rows, num_cols, datatype, semtype):
# No ANNOT in the first row.
changed = False
for j in range(num_cols):
if semtype[0][j] & ANNOT:
semtype[0][j] ^= ANNOT
changed = True
return changed
def _rule_first_row_sample(matrix, num_rows, num_cols, datatype, semtype):
# SAMPLE can only be in the first row.
changed = False
for i in range(1, num_rows):
for j in range(num_cols):
if semtype[i][j] & SAMPLE:
semtype[i][j] ^= SAMPLE
changed = True
return changed
def _rule_first_row_col_head(matrix, num_rows, num_cols, datatype, semtype):
# HEAD can only be in the first row or column.
changed = False
for i in range(1, num_rows):
for j in range(1, num_cols):
if semtype[i][j] & HEAD:
semtype[i][j] ^= HEAD
changed = True
return changed
## # RULE 3: If the majority of the potential values in the matrix
## # are floating points, then all values must be floating
## # points.
## # This won't work. E.g. if you use an algorithm to zero-fill
## # missing values.
## value_type_is = INT | FLOAT | EMPTY
## int_values = float_values = 0
## for i in range(num_rows):
## for j in range(num_cols):
## if not (semtype[i][j] & VALUE):
## continue
## if datatype[i][j] == INT:
## int_values += 1
## elif datatype[i][j] == FLOAT:
## float_values += 1
## total = int_values + float_values
## if float_values >= total/2.0:
## # Values must be FLOAT or EMPTY.
## value_type_is = FLOAT | EMPTY
## for i in range(num_rows):
## for j in range(num_cols):
## if not (semtype[i][j] & VALUE):
## continue
## if value_type_is | datatype[i][j] != value_type_is:
## semtype[i][j] ^= VALUE
def _rule_no_values_then_is_head(
matrix, num_rows, num_cols, datatype, semtype):
# If there are no VALUES in a column, then the first row, from
# this down to the first column, must all be HEAD.
changed = False
for j in range(num_cols):
any_values = False
for i in range(num_rows):
if semtype[i][j] & VALUE:
any_values = True
break
if any_values:
continue
for jj in range(j + 1):
assert semtype[0][jj] & HEAD, "Missing header."
if semtype[0][jj] != HEAD:
semtype[0][jj] = HEAD
changed = True
return changed
def _rule_no_broken_values(matrix, num_rows, num_cols, datatype, semtype):
# In each row or column, the VALUEs can only appear at the end.
changed = False
for i in range(num_rows):
in_value = True
for j in range(num_cols - 1, -1, -1):
if in_value and not (semtype[i][j] & VALUE):
in_value = False
elif not in_value and (semtype[i][j] & VALUE):
semtype[i][j] ^= VALUE
changed = True
for j in range(num_cols):
in_value = True
for i in range(num_rows - 1, -1, -1):
if in_value and not (semtype[i][j] & VALUE):
in_value = False
elif not in_value and (semtype[i][j] & VALUE):
semtype[i][j] ^= VALUE
changed = True
return changed
def _rule_no_broken_head1(matrix, num_rows, num_cols, datatype, semtype):
# In each row, the header must start from column 0. There can't
# be a cell with no HEAD followed by one with HEAD. Same with
# columns.
changed = False
for i in range(num_rows):
in_header = True
for j in range(num_cols):
if in_header and not (semtype[i][j] & HEAD):
in_header = False
elif not in_header and (semtype[i][j] & HEAD):
semtype[i][j] ^= HEAD
changed = True
for j in range(num_cols):
in_header = True
for i in range(num_rows):
if in_header and not (semtype[i][j] & HEAD):
in_header = False
elif not in_header and (semtype[i][j] & HEAD):
semtype[i][j] ^= HEAD
changed = True
return changed
def _rule_no_broken_head2(matrix, num_rows, num_cols, datatype, semtype):
# If a cell is a HEAD, then all cells preceeding can only be HEAD.
changed = False
in_header = False
for j in range(num_cols - 1, -1, -1):
if semtype[0][j] == HEAD:
in_header = True
elif in_header and semtype[0][j] != HEAD:
semtype[0][j] = HEAD
changed = True
in_header = False
for i in range(num_rows - 1, -1, -1):
if semtype[i][0] == HEAD:
in_header = True
elif in_header and semtype[i][0] != HEAD:
semtype[i][0] = HEAD
changed = True
return changed
## # RULE 5: Label BLANK cells if there is a potential HEAD in the
## # above it, a potential HEAD to its left, and no FLOATs
## # anywhere to the right of it or below it.
## # This doesn't work:
## # <HEAD> <HEAD> <HEAD> <SAMPLE>
## # <HEAD> <BLANK> <BLANK> <VALUE>
## # <ANNOT> <ANNOT> <ANNOT> <VALUE>
## # If the <ANNOT> are numbers (e.g. GWEIGHT), then won't detect.
## could_be_BLANK = {} # (i, j) -> 1
## for i in range(1, num_rows):
## for j in range(1, num_rows):
## if not (semtype[i][0] & HEAD) or not (semtype[0][j] & HEAD):
## continue
## any_floats = False
## for ii in range(i+1, num_rows):
## if datatype[ii][j] == FLOAT:
## any_floats = True
## break
## if any_floats:
## continue
## for jj in range(j+1, num_cols):
## if datatype[i][jj] == FLOAT:
## any_floats = True
## break
## if any_floats:
## continue
## could_be_BLANK[(i, j)] = 1
## # Start with (1, 1) as BLANK. Then add one row and column at a
## # time, making sure everything I added is blank.
## # X X X X
## # X X
## # X X
## max_row = max_col = 1
## just_added_row = False
## while True:
## if not just_added_row:
## new_row, new_col = max_row+1, max_col
## just_added_row = True
## else:
## new_row, new_col = max_row, max_col+1
## just_added_row = False
## all_blank = True
## for i in range(1, new_row+1):
## for j in range(1, new_col+1):
## if (i, j) not in could_be_BLANK:
## all_blank = False
## # If everything is BLANK, then accept the new rows and columns
## # and try the next one.
## if all_blank:
## max_row, max_col = new_row, new_col
## just_added_row = False
## # If not everything is blank, and we just added a column, then
## # we've already tried everything, and there's no more blanks.
## elif not just_added_row:
## break
## if (max_row, max_col) not in could_be_BLANK:
## max_row = max_col = 0
## for i in range(1, max_row+1):
## for j in range(1, max_col+1):
## semtype[i][j] = BLANK
## for i in range(1, max_row+1):
## semtype[i][0] = HEAD
## for j in range(1, max_col+1):
## semtype[0][j] = HEAD
## for i in range(1, max_row+1):
## for j in range(max_col+1, num_cols):
## semtype[i][j] = ANNOT
## for j in range(1, max_col+1):
## for i in range(max_row+1, num_rows):
## semtype[i][j] = ANNOT
def _rule_no_broken_blank(matrix, num_rows, num_cols, datatype, semtype):
# BLANKs can only be preceeded by BLANKs from (1, 1). BLANKs must
# have headers in the first row and column.
changed = False
blank_indexes = [] # list of (row, col)
for i in range(num_rows):
for j in range(num_cols):
if semtype[i][j] & BLANK:
blank_indexes.append((i, j))
for i, j in blank_indexes:
all_blank = True
for ii in range(1, i):
for jj in range(1, j):
if not semtype[ii][jj] & BLANK:
all_blank = False
if not all_blank:
semtype[i][j] ^= BLANK
changed = True
continue
if not semtype[i][0] & HEAD or not semtype[0][j] & HEAD:
semtype[i][j] ^= BLANK
changed = True
continue
return changed
def _rule_known_headers(matrix, num_rows, num_cols, datatype, semtype):
# If the first row or column (except for (0, 0), because PCL files
# allow different names) match known headers, then set them to
# HEAD.
KNOWN_COL_HEADERS = [
"GID", "NA", "ID", "NAME", "LOCUSLINK",
"GWEIGHT", "GORDER", "GCLUSTER"]
KNOWN_ROW_HEADERS = ["GID", "AID", "EWEIGHT", "EORDER", "ACLUSTER"]
changed = False
if not num_rows or not num_cols:
return changed
if not semtype[0][0] & HEAD:
return changed
col_headers = [x.upper() for x in matrix[0]]
for j in range(1, num_cols):
if not semtype[0][j] & HEAD:
break
if col_headers[j] in KNOWN_COL_HEADERS:
if semtype[0][j] != HEAD:
semtype[0][j] = HEAD
changed = True
row_headers = [x[0].upper() for x in matrix]
for i in range(1, num_rows):
if not semtype[i][0] & HEAD:
break
if row_headers[i] in KNOWN_ROW_HEADERS:
if semtype[i][0] != HEAD:
semtype[i][0] = HEAD
changed = True
return changed
def _rule_no_values_by_head(
matrix, num_rows, num_cols, datatype, semtype):
# There are no VALUEs under column HEAD or to the right of row
# HEAD.
changed = False
for j in range(num_cols):
if semtype[0][j] != HEAD:
break
for i in range(num_rows):
if semtype[i][j] & VALUE:
semtype[i][j] ^= VALUE
changed = True
for i in range(num_rows):
if semtype[i][0] != HEAD:
break
for j in range(num_cols):
if semtype[i][j] & VALUE:
semtype[i][j] ^= VALUE
changed = True
return changed
def _rule_head_around_blank(matrix, num_rows, num_cols, datatype, semtype):
# RULE: If a cell has a HEAD on top and left, it must be BLANK.
changed = False
for i in range(1, num_rows):
for j in range(1, num_cols):
if not semtype[i][j] & BLANK:
continue
if semtype[i][j] == BLANK:
continue
if semtype[i][0] == HEAD and semtype[0][j] == HEAD:
semtype[i][j] = BLANK
changed = True
return changed
def _rule_no_head_around_no_blank(
matrix, num_rows, num_cols, datatype, semtype):
# RULE: If a cell is not blank, then it cannot have a HEAD on the
# top and left.
changed = False
for i in range(1, num_rows):
for j in range(1, num_cols):
if semtype[i][j] & BLANK:
continue
assert semtype[i][0] != HEAD or semtype[0][j] != HEAD, \
"Ambiguous annotation."
if semtype[i][0] == HEAD and semtype[0][j] & HEAD:
semtype[0][j] ^= HEAD
changed = True
elif semtype[0][j] == HEAD and semtype[i][0] & HEAD:
semtype[i][0] ^= HEAD
changed = True
return changed
def _rule_first_values_are_int(
matrix, num_rows, num_cols, datatype, semtype):
# RULE: If the first columns of VALUEs are INT or EMPTY, and the
# remaining are FLOAT or EMPTY, then the first columns should
# be relabeled as ANNOT. (e.g. Gene IDs).
#
# <HEAD1> <HEAD2> <HEAD3> <SAMPLE1>
# <ANNOT/STR> <ANNOT/STR> <ANNOT/INT> <VALUE/FLOAT> Last ANNOT is INT.
# <ANNOT/STR> <ANNOT/INT> <ANNOT/INT> <VALUE/FLOAT> All ANNOTs are INTs.
# <ANNOT/INT> <ANNOT/STR> <ANNOT/INT> <VALUE/FLOAT>
#_print_matrix_debug(semtype, 20, 10, 8)
# Find first column that contain VALUEs.
col = None
for j in range(num_cols):
for i in range(num_rows):
if semtype[i][j] & VALUE and not (datatype[i][j] & EMPTY):
col = j
break
if col is not None:
break
else:
return False
if col + 1 >= num_cols: # only 1 column of values.
return False
# If there are columns of INTs followed by columns of FLOATs, then
# make the INTs ANNOTs.
types = [None] * num_cols
for j in range(col, num_cols):
x = [datatype[i][j] for i in range(num_rows)
if semtype[i][j] & VALUE]
dt = EMPTY
if FLOAT in x:
dt = FLOAT
elif INT in x: # only INT if there are no FLOATs.
dt = INT
types[j] = dt
j = col
while j < num_cols and types[j] == INT:
j += 1
num_INT = j - col
num_FLOAT = 0
for j in range(j, num_cols):
if types[j] == FLOAT:
num_FLOAT += 1
if not num_INT or not num_FLOAT:
return False
if col + num_INT + num_FLOAT != num_cols: # some VALUEs are not FLOAT.
return False
for i in range(num_rows):
for j in range(col, col + num_INT):
if semtype[i][j] & VALUE:
semtype[i][j] ^= VALUE
changed = True
return changed
def _rule_scale_factor_no_value(
matrix, num_rows, num_cols, datatype, semtype):
# RULE: If the column header is SCALE_FACTOR, the row contains
# ANNOT and not VALUE.
#
# <HEAD> <HEAD> <HEAD> <SAMPLE> <SAMPLE>
# DESCRIPTION <BLANK> <BLANK> <ANNOT> <ANNOT>
# SCALE_FACTOR <BLANK> <BLANK> <ANNOT> <ANNOT>
#
# res_format generates the DESCRIPTION and SCALE_FACTOR column
# annotations. Unfortunately, SCALE_FACTOR contains all numbers,
# so it can be interpreted as gene expression values. Make sure
# this is interpreted as an annotation.
# Look for SCALE_FACTOR in the matrix.
if num_cols < 2:
return False
sf_row = None
for i in range(num_rows):
x = matrix[i][0].upper().strip()
if x.startswith("SCALE") and x.endswith("FACTOR"):
sf_row = i
break
if sf_row is None:
return False
col = None
for j in range(1, num_cols):
if semtype[sf_row][j] != BLANK:
col = j
break
# Make sure all the cells can be ANNOTs.
all_annot = True
for j in range(col, num_cols):
if not (semtype[sf_row][j] & ANNOT):
all_annot = False
break
if not all_annot:
return False
# Make sure not of the cells can be VALUEs.
changed = False
for j in range(col, num_cols):
if semtype[sf_row][j] & VALUE:
changed = True
semtype[sf_row][j] ^= VALUE
return changed
NUM_HEADERS_CACHE = None # tuple of (matrix, (nrow, ncol))
def num_headers(matrix):
"""Return (# row headers, # col headers)."""
global NUM_HEADERS_CACHE
if NUM_HEADERS_CACHE and matrix != NUM_HEADERS_CACHE[0]:
NUM_HEADERS_CACHE = None
if NUM_HEADERS_CACHE is None:
x = _num_headers_h(matrix)
NUM_HEADERS_CACHE = (matrix, x)
x1, x2 = NUM_HEADERS_CACHE
assert matrix == x1
return x2
def _print_matrix_debug(matrix, start_row, nrows, ncols):
end_row = min(start_row+nrows, len(matrix))
for i in range(start_row, end_row):
print i, matrix[i][:ncols]
def _num_headers_h(matrix):
# Try to find the number of rows and columns that contain header
# information.
# CASE 1: No headers. All <VALUES>
# CASE 2: 1 row header, 1 column header.
# <HEAD> <SAMPLE1> <SAMPLE2> [...]
# <ANNOT> <VALUE> <VALUE>
# CASE 3: 1 row header, n column headers.
# <HEAD1> <HEAD2> <HEAD3> <SAMPLE1> <SAMPLE2> <SAMPLE3>
# <ANNOT> <ANNOT> <ANNOT> <VALUE> <VALUE> <VALUE>
# CASE 4: n row headers, 1 column headers.
# <HEAD1> <SAMPLE1> <SAMPLE2> <SAMPLE3>
# <HEAD4> <ANNOT> <ANNOT> <ANNOT>
# <HEAD5> <ANNOT> <ANNOT> <ANNOT>
# <ANNOT> <VALUE> <VALUE> <VALUE>
# CASE 5: n row headers, n column headers.
# <HEAD1> <HEAD2> <HEAD3> <SAMPLE1> <SAMPLE2> <SAMPLE3>
# <HEAD4> <BLANK> <BLANK> <ANNOT> <ANNOT> <ANNOT>
# <HEAD5> <BLANK> <BLANK> <ANNOT> <ANNOT> <ANNOT>
# <ANNOT> <ANNOT> <ANNOT> <VALUE> <VALUE> <VALUE>
#
# 1 2 4 8
# 1 HEAD CHAR INT FLOAT
# 2 SAMPLE CHAR INT FLOAT
# 4 ANNOT CHAR INT FLOAT EMPTY
# 8 VALUE INT FLOAT EMPTY
# 16 BLANK EMPTY
#
# Challenges:
# - It's hard to distinguish between ANNOT, VALUE, and BLANK when
# they are EMPTY.
# - It's hard to distinguish between HEADs and SAMPLEs.
#
# RULE: No ANNOT in the first row.
# RULE: SAMPLE can only be in the first row.
# RULE: HEAD can only be in the first row or column.
# RULE: If there are no VALUES in a column, then the first row,
# from this down to the first column, must all be HEAD.
# RULE: In each row, the header must start from column 0. There
# can't be a cell with no HEAD followed by one with HEAD.
# RULE: If a cell is a HEAD, then all cells preceeding can only be
# HEAD.
# RULE: BLANKs can only be preceeded by BLANKs from (1, 1).
# BLANKs must have headers in the first row and column.
# RULE: In each row or column, the VALUEs can only appear from
# the end.
# RULE: If the first row or column (except for (0, 0), because
# PCL files allow different names) match known headers,
# then set them to HEAD.
# RULE: There are no VALUEs under column HEAD or to the right of
# row HEAD.
# RULE: If a cell has a HEAD on top and left, that cell must be
# BLANK.
# RULE: If a cell is not blank, then it cannot have a HEAD on the
# top and left.
# RULE: If the first columns of VALUEs are INT or EMPTY, and the
# remaining are FLOAT or EMPTY, then the first columns should
# be relabeled as ANNOT. (e.g. Gene IDs).
# RULE: If the column header is SCALE_FACTOR, the row contains
# ANNOT and not VALUE.
RULES = [
_rule_no_first_row_annots,
_rule_first_row_sample,
_rule_first_row_col_head,
_rule_no_values_then_is_head,
_rule_no_broken_head1,
_rule_no_broken_head2,
_rule_no_broken_blank,
_rule_no_broken_values,
_rule_known_headers,
_rule_no_values_by_head,
_rule_head_around_blank,
_rule_no_head_around_no_blank,
_rule_first_values_are_int,
_rule_scale_factor_no_value,
]
if not matrix:
return 0, 0
num_rows, num_cols = len(matrix), len(matrix[0])
# Make sure each row contains the same number of columns.
for row in matrix:
assert len(row) == num_cols, "matrix row length mismatch"
# This is REALLY SLOW for big matrices. Optimize by assuming a
# maximum number of header rows. Just look at the first rows for
# the header.
MAX_HEADER_ROWS = 100
# 50 rows might not be sufficient for affymetrix arrays. U133Av2
# has 62 AFFX genes that may or may not have annotations.
#MAX_HEADER_ROWS = 50
matrix = matrix[:MAX_HEADER_ROWS]
num_rows = len(matrix)
# Figure out the data type for each cell in the matrix.
#CHAR, INT, FLOAT, EMPTY = 1, 2, 4, 8
datatype = [[None] * num_cols for i in range(num_rows)]
for i in range(num_rows):
for j in range(num_cols):
x = matrix[i][j]
if x.strip() == "":
dt = EMPTY
elif x.strip().lower() == "null":
dt = EMPTY
elif _is_int(x):
dt = INT
elif _is_float(x):
dt = FLOAT
else:
dt = CHAR
datatype[i][j] = dt
# Make an initial guess at the semantic types of each cell.
#HEAD, SAMPLE, ANNOT, VALUE, BLANK = 1, 2, 4, 8, 16
semtype = [[0] * num_cols for i in range(num_rows)]
for i in range(num_rows):
for j in range(num_cols):
x = datatype[i][j]
if x == CHAR:
st = HEAD | SAMPLE | ANNOT
if matrix[i][j].upper() in ["NA", "-"]:
st = st | VALUE
elif x == INT:
st = HEAD | SAMPLE | ANNOT | VALUE
elif x == FLOAT:
st = HEAD | SAMPLE | ANNOT | VALUE
elif x == EMPTY:
st = ANNOT | VALUE | BLANK
if i == 0:
st = st | HEAD
else:
raise AssertionError
semtype[i][j] = st
# Apply the rules to guess the right types of each cell of the
# matrix.
iteration = 0
changed = True
while changed:
#_print_matrix_debug(semtype, 0, 5, 8)
iteration += 1
changed = False
for rule_fn in RULES:
c = rule_fn(matrix, num_rows, num_cols, datatype, semtype)
changed = changed or c
# Look for the VALUEs. Start looking at the bottom right of the
# MATRIX, and add one column and row at a time.
first_row, first_col = num_rows - 1, num_cols - 1
just_added_row = False
while True:
if not just_added_row:
new_row, new_col = first_row - 1, first_col
just_added_row = True
else:
new_row, new_col = first_row, first_col - 1
just_added_row = False
# Make sure the rows and cols are in bounds.
if new_row < 0 or new_col < 0:
if just_added_row:
continue
break
all_values = True
for i in range(new_row, num_rows):
for j in range(new_col, num_cols):
if not semtype[i][j] & VALUE:
all_values = False
# If everything is a VALUE, then accept the new rows and
# columns and try the next one.
if all_values:
first_row, first_col = new_row, new_col
just_added_row = False
# If not everything is a value, and we just added a column,
# then we've already tried everything, and there's no more
# values.
elif not just_added_row:
break
if not semtype[first_row][first_col] & VALUE:
# There are no values.
first_row, first_col = num_rows, num_cols
hrows, hcols = first_row, first_col
#print "DEBUG", hrows, hcols
#_print_matrix_debug(datatype, 0, 10, 8)
#_print_matrix_debug(semtype, 0, 10, 8)
#import sys; sys.exit(0)
# Don't allow this. It makes it too complicated to have to keep
# track of matrices with and without signal values.
## If this is a matrix that only contains annotations, then there
## can only be one header row. (Because there are no headers for
## annotations).
#if hcols == num_cols:
# hrows = 1
#assert hcols <= num_cols
assert hcols < num_cols, \
"It looks like there are annotations at the end of the matrix."
assert hrows < MAX_HEADER_ROWS, "Too many header rows."
return hrows, hcols
def _all_numeric(vec):
for n in vec:
if not _is_numeric(n):
return False
return True
def _is_numeric(n):
# empty strings are not numeric.
if n == "":
return False
try:
float(n)
except ValueError, x:
return False
return True
def _is_int(n):
try:
int(n)
except ValueError, x:
return False
return True
def _is_float(n):
try:
float(n)
except ValueError, x:
return False
return True
def _is_float_not_int(n):
if _is_int(n):
return False
try:
float(n)
except ValueError, x:
return False
return True
|
{
"content_hash": "e2286469d766453fbc3314c3a1d9800d",
"timestamp": "",
"source": "github",
"line_count": 735,
"max_line_length": 77,
"avg_line_length": 33.87619047619047,
"alnum_prop": 0.5320695610265472,
"repo_name": "jefftc/changlab",
"id": "dc05730cf8405dbca893b7ca97adbd4fe1760503",
"size": "24899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arrayio/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "116953"
},
{
"name": "CSS",
"bytes": "75418"
},
{
"name": "Groff",
"bytes": "10237"
},
{
"name": "HTML",
"bytes": "200459"
},
{
"name": "JavaScript",
"bytes": "159618"
},
{
"name": "Makefile",
"bytes": "11719"
},
{
"name": "Python",
"bytes": "9300228"
},
{
"name": "R",
"bytes": "94670"
},
{
"name": "Shell",
"bytes": "63514"
},
{
"name": "TeX",
"bytes": "64"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib.auth import get_user_model
from .models import UserAddress
User = get_user_model()
class UserAddressForm(forms.ModelForm):
default = forms.BooleanField(label='Make Default', required=False)
class Meta:
model = UserAddress
fields = ['address', 'address2', 'city', 'state', 'country', 'zipcode', 'phone']
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
def clean_username(self):
username = self.cleaned_data.get('username')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise forms.ValidationError('Are you sure you are registered? We cannot find this user.')
return username
def clean_password(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
try:
user = User.objects.get(username=username)
except:
user = None
if user is not None and not user.check_password(password):
raise forms.ValidationError('Invalid Password')
elif user is None:
pass
else:
return password
class RegistrationForm(forms.ModelForm):
email = forms.EmailField(label='Your Email')
password1 = forms.CharField(label='Password', \
widget=forms.PasswordInput())
password2 = forms.CharField(label='Password Confirmation', \
widget=forms.PasswordInput())
class Meta:
model = User
fields = ['username', 'email']
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError('Passwords do not match')
return password2
def clean_email(self):
email = self.cleaned_data.get('email')
user_count = User.objects.filter(email=email).count()
if user_count > 0:
raise forms.ValidationError('This email has already been registered. Please check and try again or reset your password.')
return email
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
|
{
"content_hash": "bfde5325bbc94cc1516a61e13a116617",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 133,
"avg_line_length": 34.013513513513516,
"alnum_prop": 0.6301152165276123,
"repo_name": "loafbaker/django_ecommerce1",
"id": "ab5787442f9dba6afd18773f61ef0fcdd84c62b3",
"size": "2517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "24075"
},
{
"name": "Python",
"bytes": "166101"
}
],
"symlink_target": ""
}
|
import theano.tensor as T
from theano import function
from theano import Param
from theano import shared
state = shared(0)
inc = T.iscalar('inc') # short integer o 32 bit integer
accumulator = function([inc], state, updates=[(state, state + inc)])
so, this is the one im used to..
|
{
"content_hash": "38e687689860b286da814388e70d0fe5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 68,
"avg_line_length": 26,
"alnum_prop": 0.7272727272727273,
"repo_name": "h-mayorquin/mnist_deep_neural_network_BPNNs",
"id": "745ee7b5457178db7cd5bb64c30469d8234a12be",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_learning/tutorial_shared.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "77059"
}
],
"symlink_target": ""
}
|
__script__ = "MyFont.py"
__author__ = "Ppic, Frost"
__credits__ = "Team XBMC-Passion, http://passion-xbmc.org/"
__platform__ = "xbmc media center, [LINUX, OS X, WIN32, XBOX]"
__date__ = "08-01-2010"
__version__ = "1.1"
#python librairy to add font to the current skin. need to have font_filename.ttf in /resources/fonts/, this script will automatically add it to current skin when called.
import os
import elementtree.ElementTree as ET
import shutil
from traceback import print_exc
import xbmc
skin_font_path = xbmc.translatePath("special://skin/fonts/")
script_font_path = os.path.join(os.getcwd() , "resources" , "fonts")
skin_dir = xbmc.translatePath("special://skin/")
list_dir = os.listdir( skin_dir )
print skin_font_path
print script_font_path
def getFontsXML():
fontxml_paths = []
try:
for item in list_dir:
item = os.path.join( skin_dir, item )
if os.path.isdir( item ):
font_xml = os.path.join( item, "Font.xml" )
if os.path.exists( font_xml ):
fontxml_paths.append( font_xml )
except:
print_exc()
return fontxml_paths
def isFontInstalled( fontxml_path, fontname ):
name = "<name>%s</name>" % fontname
if not name in file( fontxml_path, "r" ).read():
print "font name not installed!", fontname
return False
else:
print "font name already installed!", fontname
return True
def addFont( fontname, filename, size, style="", aspect="" ):
try:
reload_skin = False
fontxml_paths = getFontsXML()
if fontxml_paths:
for fontxml_path in fontxml_paths:
print "analyse du fichier: " + fontxml_path
if not isFontInstalled( fontxml_path, fontname ):
tree = ET.parse(fontxml_path)
root = tree.getroot()
print "modification du fichier: " + fontxml_path
for sets in root.getchildren():
sets.findall( "font" )[ -1 ].tail = "\n\t\t" #"\n\n\t\t"
new = ET.SubElement(sets, "font")
new.text, new.tail = "\n\t\t\t", "\n\t"
subnew1=ET.SubElement(new ,"name")
subnew1.text = fontname
subnew1.tail = "\n\t\t\t"
subnew2=ET.SubElement(new ,"filename")
subnew2.text = ( filename, "Arial.ttf" )[ sets.attrib.get( "id" ) == "Arial" ]
subnew2.tail = "\n\t\t\t"
subnew3=ET.SubElement(new ,"size")
subnew3.text = size
subnew3.tail = "\n\t\t\t"
last_elem = subnew3
if style in [ "normal", "bold", "italics", "bolditalics" ]:
subnew4=ET.SubElement(new ,"style")
subnew4.text = style
subnew4.tail = "\n\t\t\t"
last_elem = subnew4
if aspect:
subnew5=ET.SubElement(new ,"aspect")
subnew5.text = aspect
subnew5.tail = "\n\t\t\t"
last_elem = subnew5
reload_skin = True
last_elem.tail = "\n\t\t"
tree.write(fontxml_path)
reload_skin = True
except:
print_exc()
if reload_skin:
if not os.path.exists( os.path.join( skin_font_path, filename ) ) and os.path.exists( os.path.join( script_font_path, filename ) ):
shutil.copyfile( os.path.join( script_font_path, filename ), os.path.join( skin_font_path, filename ) )
xbmc.executebuiltin( "XBMC.ReloadSkin()" )
return True
return False
if __name__ == "__main__":
font_constant = "sportlive_font"
print "update font: %s" % addFont( font_constant+"13", "sportlive.ttf", "20" )
#print "update font: %s" % addfont( "sportlive13" , "sportlive.ttf" , "20" )
#print "update font: %s" % addfont( "sportlive24" , "sportlive.ttf" , "24" )
#print "update font: %s" % addfont( "sportlive45" , "sportlive.ttf" , "45" )
|
{
"content_hash": "c60957fe264d5727f918d6ba5d1ff506",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 169,
"avg_line_length": 40.94444444444444,
"alnum_prop": 0.5058796924468566,
"repo_name": "rolapp/plugin.video.zattooboxExt.beta",
"id": "585474c49a9c33420a370747c92e3e9c872e2381",
"size": "4443",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "resources/MyFont.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "343026"
}
],
"symlink_target": ""
}
|
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import threading
import warnings
from google.protobuf.internal import api_implementation
_USE_C_DESCRIPTORS = False
if api_implementation.Type() != 'python':
# Used by MakeDescriptor in cpp mode
import binascii
import os
# pylint: disable=protected-access
_message = api_implementation._c_module
# TODO(jieluo): Remove this import after fix api_implementation
if _message is None:
from google.protobuf.pyext import _message
_USE_C_DESCRIPTORS = True
class Error(Exception):
"""Base error for this module."""
class TypeTransformationError(Error):
"""Error transforming between python proto type and corresponding C++ type."""
if _USE_C_DESCRIPTORS:
# This metaclass allows to override the behavior of code like
# isinstance(my_descriptor, FieldDescriptor)
# and make it return True when the descriptor is an instance of the extension
# type written in C++.
class DescriptorMetaclass(type):
def __instancecheck__(cls, obj):
if super(DescriptorMetaclass, cls).__instancecheck__(obj):
return True
if isinstance(obj, cls._C_DESCRIPTOR_CLASS):
return True
return False
else:
# The standard metaclass; nothing changes.
DescriptorMetaclass = type
class _Lock(object):
"""Wrapper class of threading.Lock(), which is allowed by 'with'."""
def __new__(cls):
self = object.__new__(cls)
self._lock = threading.Lock() # pylint: disable=protected-access
return self
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_tb):
self._lock.release()
_lock = threading.Lock()
def _Deprecated(name):
if _Deprecated.count > 0:
_Deprecated.count -= 1
warnings.warn(
'Call to deprecated create function %s(). Note: Create unlinked '
'descriptors is going to go away. Please use get/find descriptors from '
'generated code or query the descriptor_pool.'
% name,
category=DeprecationWarning, stacklevel=3)
# Deprecated warnings will print 100 times at most which should be enough for
# users to notice and do not cause timeout.
_Deprecated.count = 100
_internal_create_key = object()
class DescriptorBase(metaclass=DescriptorMetaclass):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionality.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
if _USE_C_DESCRIPTORS:
# The class, or tuple of classes, that are considered as "virtual
# subclasses" of this descriptor class.
_C_DESCRIPTOR_CLASS = ()
def __init__(self, options, serialized_options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
self._serialized_options = serialized_options
# Does this descriptor have non-default options?
self.has_options = (options is not None) or (serialized_options is not None)
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2,
self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
with _lock:
if self._serialized_options is None:
self._options = options_class()
else:
self._options = _ParseOptions(options_class(),
self._serialized_options)
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None, serialized_options=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name (str): The class name of the above options.
name (str): Name of this protocol message type.
full_name (str): Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file (FileDescriptor): Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_options: Protocol message serialized options or None.
"""
super(_NestedDescriptorBase, self).__init__(
options, serialized_options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldn't be serialized, due to to few constructor
arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
Attributes:
name (str): Name of this protocol message type.
full_name (str): Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type (Descriptor): Reference to the descriptor of the type
containing us, or None if this is top-level.
fields (list[FieldDescriptor]): Field descriptors for all fields in
this type.
fields_by_number (dict(int, FieldDescriptor)): Same
:class:`FieldDescriptor` objects as in :attr:`fields`, but indexed
by "number" attribute in each FieldDescriptor.
fields_by_name (dict(str, FieldDescriptor)): Same
:class:`FieldDescriptor` objects as in :attr:`fields`, but indexed by
"name" attribute in each :class:`FieldDescriptor`.
nested_types (list[Descriptor]): Descriptor references
for all protocol message types nested within this one.
nested_types_by_name (dict(str, Descriptor)): Same Descriptor
objects as in :attr:`nested_types`, but indexed by "name" attribute
in each Descriptor.
enum_types (list[EnumDescriptor]): :class:`EnumDescriptor` references
for all enums contained within this type.
enum_types_by_name (dict(str, EnumDescriptor)): Same
:class:`EnumDescriptor` objects as in :attr:`enum_types`, but
indexed by "name" attribute in each EnumDescriptor.
enum_values_by_name (dict(str, EnumValueDescriptor)): Dict mapping
from enum value name to :class:`EnumValueDescriptor` for that value.
extensions (list[FieldDescriptor]): All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name (dict(str, FieldDescriptor)): Same FieldDescriptor
objects as :attr:`extensions`, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable (bool): Does this type define any extension ranges?
oneofs (list[OneofDescriptor]): The list of descriptors for oneof fields
in this message.
oneofs_by_name (dict(str, OneofDescriptor)): Same objects as in
:attr:`oneofs`, but indexed by "name" attribute.
file (FileDescriptor): Reference to file descriptor.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.Descriptor
def __new__(
cls,
name=None,
full_name=None,
filename=None,
containing_type=None,
fields=None,
nested_types=None,
enum_types=None,
extensions=None,
options=None,
serialized_options=None,
is_extendable=True,
extension_ranges=None,
oneofs=None,
file=None, # pylint: disable=redefined-builtin
serialized_start=None,
serialized_end=None,
syntax=None,
create_key=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindMessageTypeByName(full_name)
# NOTE(tmarek): The file argument redefining a builtin is nothing we can
# fix right now since we don't know how many clients already rely on the
# name of the argument.
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
serialized_options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin
syntax=None, create_key=None):
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
if create_key is not _internal_create_key:
_Deprecated('Descriptor')
super(Descriptor, self).__init__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end, serialized_options=serialized_options)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self._fields_by_camelcase_name = None
self.nested_types = nested_types
for nested_type in nested_types:
nested_type.containing_type = self
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
self.is_extendable = is_extendable
self.extension_ranges = extension_ranges
self.oneofs = oneofs if oneofs is not None else []
self.oneofs_by_name = dict((o.name, o) for o in self.oneofs)
for oneof in self.oneofs:
oneof.containing_type = self
self.syntax = syntax or "proto2"
@property
def fields_by_camelcase_name(self):
"""Same FieldDescriptor objects as in :attr:`fields`, but indexed by
:attr:`FieldDescriptor.camelcase_name`.
"""
if self._fields_by_camelcase_name is None:
self._fields_by_camelcase_name = dict(
(f.camelcase_name, f) for f in self.fields)
return self._fields_by_camelcase_name
def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.DescriptorProto.
Args:
proto: An empty descriptor_pb2.DescriptorProto.
"""
# This function is overridden to give a better doc comment.
super(Descriptor, self).CopyToProto(proto)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
Attributes:
name (str): Name of this field, exactly as it appears in .proto.
full_name (str): Name of this field, including containing scope. This is
particularly relevant for extensions.
index (int): Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number (int): Tag number declared for this field in the .proto file.
type (int): (One of the TYPE_* constants below) Declared type.
cpp_type (int): (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label (int): (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
has_default_value (bool): True if this field has a default value defined,
otherwise false.
default_value (Varies): Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type (Descriptor): Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type (Descriptor): If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type (EnumDescriptor): If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope (Descriptor): Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options (descriptor_pb2.FieldOptions): Protocol message field options or
None to use default field options.
containing_oneof (OneofDescriptor): If the field is a member of a oneof
union, contains its descriptor. Otherwise, None.
file (FileDescriptor): Reference to file descriptor.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
_PYTHON_TO_CPP_PROTO_TYPE_MAP = {
TYPE_DOUBLE: CPPTYPE_DOUBLE,
TYPE_FLOAT: CPPTYPE_FLOAT,
TYPE_ENUM: CPPTYPE_ENUM,
TYPE_INT64: CPPTYPE_INT64,
TYPE_SINT64: CPPTYPE_INT64,
TYPE_SFIXED64: CPPTYPE_INT64,
TYPE_UINT64: CPPTYPE_UINT64,
TYPE_FIXED64: CPPTYPE_UINT64,
TYPE_INT32: CPPTYPE_INT32,
TYPE_SFIXED32: CPPTYPE_INT32,
TYPE_SINT32: CPPTYPE_INT32,
TYPE_UINT32: CPPTYPE_UINT32,
TYPE_FIXED32: CPPTYPE_UINT32,
TYPE_BYTES: CPPTYPE_STRING,
TYPE_STRING: CPPTYPE_STRING,
TYPE_BOOL: CPPTYPE_BOOL,
TYPE_MESSAGE: CPPTYPE_MESSAGE,
TYPE_GROUP: CPPTYPE_MESSAGE
}
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
# Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber,
# and kLastReservedNumber in descriptor.h
MAX_FIELD_NUMBER = (1 << 29) - 1
FIRST_RESERVED_FIELD_NUMBER = 19000
LAST_RESERVED_FIELD_NUMBER = 19999
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.FieldDescriptor
def __new__(cls, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
serialized_options=None,
has_default_value=True, containing_oneof=None, json_name=None,
file=None, create_key=None): # pylint: disable=redefined-builtin
_message.Message._CheckCalledFromGeneratedFile()
if is_extension:
return _message.default_pool.FindExtensionByName(full_name)
else:
return _message.default_pool.FindFieldByName(full_name)
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
serialized_options=None,
has_default_value=True, containing_oneof=None, json_name=None,
file=None, create_key=None): # pylint: disable=redefined-builtin
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
if create_key is not _internal_create_key:
_Deprecated('FieldDescriptor')
super(FieldDescriptor, self).__init__(
options, serialized_options, 'FieldOptions')
self.name = name
self.full_name = full_name
self.file = file
self._camelcase_name = None
if json_name is None:
self.json_name = _ToJsonName(name)
else:
self.json_name = json_name
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.has_default_value = has_default_value
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
self.containing_oneof = containing_oneof
if api_implementation.Type() == 'python':
self._cdescriptor = None
else:
if is_extension:
self._cdescriptor = _message.default_pool.FindExtensionByName(full_name)
else:
self._cdescriptor = _message.default_pool.FindFieldByName(full_name)
@property
def camelcase_name(self):
"""Camelcase name of this field.
Returns:
str: the name in CamelCase.
"""
if self._camelcase_name is None:
self._camelcase_name = _ToCamelCase(self.name)
return self._camelcase_name
@property
def has_presence(self):
"""Whether the field distinguishes between unpopulated and default values.
Raises:
RuntimeError: singular field that is not linked with message nor file.
"""
if self.label == FieldDescriptor.LABEL_REPEATED:
return False
if (self.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE or
self.containing_oneof):
return True
if hasattr(self.file, 'syntax'):
return self.file.syntax == 'proto2'
if hasattr(self.message_type, 'syntax'):
return self.message_type.syntax == 'proto2'
raise RuntimeError(
'has_presence is not ready to use because field %s is not'
' linked with message type nor file' % self.full_name)
@staticmethod
def ProtoTypeToCppProtoType(proto_type):
"""Converts from a Python proto type to a C++ Proto Type.
The Python ProtocolBuffer classes specify both the 'Python' datatype and the
'C++' datatype - and they're not the same. This helper method should
translate from one to another.
Args:
proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*)
Returns:
int: descriptor.FieldDescriptor.CPPTYPE_*, the C++ type.
Raises:
TypeTransformationError: when the Python proto type isn't known.
"""
try:
return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type]
except KeyError:
raise TypeTransformationError('Unknown proto_type: %s' % proto_type)
class EnumDescriptor(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
Attributes:
name (str): Name of the enum type.
full_name (str): Full name of the type, including package name
and any enclosing type(s).
values (list[EnumValueDescriptor]): List of the values
in this enum.
values_by_name (dict(str, EnumValueDescriptor)): Same as :attr:`values`,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number (dict(int, EnumValueDescriptor)): Same as :attr:`values`,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type (Descriptor): Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
file (FileDescriptor): Reference to file descriptor.
options (descriptor_pb2.EnumOptions): Enum options message or
None to use default enum options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.EnumDescriptor
def __new__(cls, name, full_name, filename, values,
containing_type=None, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None, create_key=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindEnumTypeByName(full_name)
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None, create_key=None):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
if create_key is not _internal_create_key:
_Deprecated('EnumDescriptor')
super(EnumDescriptor, self).__init__(
options, 'EnumOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end, serialized_options=serialized_options)
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
# Values are reversed to ensure that the first alias is retained.
self.values_by_number = dict((v.number, v) for v in reversed(values))
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto (descriptor_pb2.EnumDescriptorProto): An empty descriptor proto.
"""
# This function is overridden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
Attributes:
name (str): Name of this value.
index (int): Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number (int): Actual number assigned to this enum value.
type (EnumDescriptor): :class:`EnumDescriptor` to which this value
belongs. Set by :class:`EnumDescriptor`'s constructor if we're
passed into one.
options (descriptor_pb2.EnumValueOptions): Enum value options message or
None to use default enum value options options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.EnumValueDescriptor
def __new__(cls, name, index, number,
type=None, # pylint: disable=redefined-builtin
options=None, serialized_options=None, create_key=None):
_message.Message._CheckCalledFromGeneratedFile()
# There is no way we can build a complete EnumValueDescriptor with the
# given parameters (the name of the Enum is not known, for example).
# Fortunately generated files just pass it to the EnumDescriptor()
# constructor, which will ignore it, so returning None is good enough.
return None
def __init__(self, name, index, number,
type=None, # pylint: disable=redefined-builtin
options=None, serialized_options=None, create_key=None):
"""Arguments are as described in the attribute description above."""
if create_key is not _internal_create_key:
_Deprecated('EnumValueDescriptor')
super(EnumValueDescriptor, self).__init__(
options, serialized_options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class OneofDescriptor(DescriptorBase):
"""Descriptor for a oneof field.
Attributes:
name (str): Name of the oneof field.
full_name (str): Full name of the oneof field, including package name.
index (int): 0-based index giving the order of the oneof field inside
its containing type.
containing_type (Descriptor): :class:`Descriptor` of the protocol message
type that contains this field. Set by the :class:`Descriptor` constructor
if we're passed into one.
fields (list[FieldDescriptor]): The list of field descriptors this
oneof can contain.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.OneofDescriptor
def __new__(
cls, name, full_name, index, containing_type, fields, options=None,
serialized_options=None, create_key=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindOneofByName(full_name)
def __init__(
self, name, full_name, index, containing_type, fields, options=None,
serialized_options=None, create_key=None):
"""Arguments are as described in the attribute description above."""
if create_key is not _internal_create_key:
_Deprecated('OneofDescriptor')
super(OneofDescriptor, self).__init__(
options, serialized_options, 'OneofOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_type = containing_type
self.fields = fields
class ServiceDescriptor(_NestedDescriptorBase):
"""Descriptor for a service.
Attributes:
name (str): Name of the service.
full_name (str): Full name of the service, including package name.
index (int): 0-indexed index giving the order that this services
definition appears within the .proto file.
methods (list[MethodDescriptor]): List of methods provided by this
service.
methods_by_name (dict(str, MethodDescriptor)): Same
:class:`MethodDescriptor` objects as in :attr:`methods_by_name`, but
indexed by "name" attribute in each :class:`MethodDescriptor`.
options (descriptor_pb2.ServiceOptions): Service options message or
None to use default service options.
file (FileDescriptor): Reference to file info.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.ServiceDescriptor
def __new__(
cls,
name=None,
full_name=None,
index=None,
methods=None,
options=None,
serialized_options=None,
file=None, # pylint: disable=redefined-builtin
serialized_start=None,
serialized_end=None,
create_key=None):
_message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access
return _message.default_pool.FindServiceByName(full_name)
def __init__(self, name, full_name, index, methods, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None, create_key=None):
if create_key is not _internal_create_key:
_Deprecated('ServiceDescriptor')
super(ServiceDescriptor, self).__init__(
options, 'ServiceOptions', name, full_name, file,
None, serialized_start=serialized_start,
serialized_end=serialized_end, serialized_options=serialized_options)
self.index = index
self.methods = methods
self.methods_by_name = dict((m.name, m) for m in methods)
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor.
Args:
name (str): Name of the method.
Returns:
MethodDescriptor or None: the descriptor for the requested method, if
found.
"""
return self.methods_by_name.get(name, None)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto (descriptor_pb2.ServiceDescriptorProto): An empty descriptor proto.
"""
# This function is overridden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
Attributes:
name (str): Name of the method within the service.
full_name (str): Full name of method.
index (int): 0-indexed index of the method inside the service.
containing_service (ServiceDescriptor): The service that contains this
method.
input_type (Descriptor): The descriptor of the message that this method
accepts.
output_type (Descriptor): The descriptor of the message that this method
returns.
client_streaming (bool): Whether this method uses client streaming.
server_streaming (bool): Whether this method uses server streaming.
options (descriptor_pb2.MethodOptions or None): Method options message, or
None to use default method options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.MethodDescriptor
def __new__(cls,
name,
full_name,
index,
containing_service,
input_type,
output_type,
client_streaming=False,
server_streaming=False,
options=None,
serialized_options=None,
create_key=None):
_message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access
return _message.default_pool.FindMethodByName(full_name)
def __init__(self,
name,
full_name,
index,
containing_service,
input_type,
output_type,
client_streaming=False,
server_streaming=False,
options=None,
serialized_options=None,
create_key=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
if create_key is not _internal_create_key:
_Deprecated('MethodDescriptor')
super(MethodDescriptor, self).__init__(
options, serialized_options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
self.client_streaming = client_streaming
self.server_streaming = server_streaming
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.MethodDescriptorProto.
Args:
proto (descriptor_pb2.MethodDescriptorProto): An empty descriptor proto.
Raises:
Error: If self couldn't be serialized, due to too few constructor
arguments.
"""
if self.containing_service is not None:
from google.protobuf import descriptor_pb2
service_proto = descriptor_pb2.ServiceDescriptorProto()
self.containing_service.CopyToProto(service_proto)
proto.CopyFrom(service_proto.method[self.index])
else:
raise Error('Descriptor does not contain a service.')
class FileDescriptor(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
Note that :attr:`enum_types_by_name`, :attr:`extensions_by_name`, and
:attr:`dependencies` fields are only set by the
:py:mod:`google.protobuf.message_factory` module, and not by the generated
proto code.
Attributes:
name (str): Name of file, relative to root of source tree.
package (str): Name of the package
syntax (str): string indicating syntax of the file (can be "proto2" or
"proto3")
serialized_pb (bytes): Byte string of serialized
:class:`descriptor_pb2.FileDescriptorProto`.
dependencies (list[FileDescriptor]): List of other :class:`FileDescriptor`
objects this :class:`FileDescriptor` depends on.
public_dependencies (list[FileDescriptor]): A subset of
:attr:`dependencies`, which were declared as "public".
message_types_by_name (dict(str, Descriptor)): Mapping from message names
to their :class:`Descriptor`.
enum_types_by_name (dict(str, EnumDescriptor)): Mapping from enum names to
their :class:`EnumDescriptor`.
extensions_by_name (dict(str, FieldDescriptor)): Mapping from extension
names declared at file scope to their :class:`FieldDescriptor`.
services_by_name (dict(str, ServiceDescriptor)): Mapping from services'
names to their :class:`ServiceDescriptor`.
pool (DescriptorPool): The pool this descriptor belongs to. When not
passed to the constructor, the global default pool is used.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.FileDescriptor
def __new__(cls, name, package, options=None,
serialized_options=None, serialized_pb=None,
dependencies=None, public_dependencies=None,
syntax=None, pool=None, create_key=None):
# FileDescriptor() is called from various places, not only from generated
# files, to register dynamic proto files and messages.
# pylint: disable=g-explicit-bool-comparison
if serialized_pb == b'':
# Cpp generated code must be linked in if serialized_pb is ''
try:
return _message.default_pool.FindFileByName(name)
except KeyError:
raise RuntimeError('Please link in cpp generated lib for %s' % (name))
elif serialized_pb:
return _message.default_pool.AddSerializedFile(serialized_pb)
else:
return super(FileDescriptor, cls).__new__(cls)
def __init__(self, name, package, options=None,
serialized_options=None, serialized_pb=None,
dependencies=None, public_dependencies=None,
syntax=None, pool=None, create_key=None):
"""Constructor."""
if create_key is not _internal_create_key:
_Deprecated('FileDescriptor')
super(FileDescriptor, self).__init__(
options, serialized_options, 'FileOptions')
if pool is None:
from google.protobuf import descriptor_pool
pool = descriptor_pool.Default()
self.pool = pool
self.message_types_by_name = {}
self.name = name
self.package = package
self.syntax = syntax or "proto2"
self.serialized_pb = serialized_pb
self.enum_types_by_name = {}
self.extensions_by_name = {}
self.services_by_name = {}
self.dependencies = (dependencies or [])
self.public_dependencies = (public_dependencies or [])
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
def _ToCamelCase(name):
"""Converts name to camel-case and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
if result:
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
# Lower-case the first letter.
if result and result[0].isupper():
result[0] = result[0].lower()
return ''.join(result)
def _OptionsOrNone(descriptor_proto):
"""Returns the value of the field `options`, or None if it is not set."""
if descriptor_proto.HasField('options'):
return descriptor_proto.options
else:
return None
def _ToJsonName(name):
"""Converts name to Json name and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
return ''.join(result)
def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True,
syntax=None):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Handles nested descriptors. Note that this is limited to the scope of defining
a message inside of another message. Composite fields can currently only be
resolved if the message is defined in the same scope as the field.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
build_file_if_cpp: Update the C++ descriptor pool if api matches.
Set to False on recursion, so no duplicates are created.
syntax: The syntax/semantics that should be used. Set to "proto3" to get
proto3 field presence semantics.
Returns:
A Descriptor for protobuf messages.
"""
if api_implementation.Type() != 'python' and build_file_if_cpp:
# The C++ implementation requires all descriptors to be backed by the same
# definition in the C++ descriptor pool. To do this, we build a
# FileDescriptorProto with the same definition as this descriptor and build
# it into the pool.
from google.protobuf import descriptor_pb2
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.message_type.add().MergeFrom(desc_proto)
# Generate a random name for this proto file to prevent conflicts with any
# imported ones. We need to specify a file name so the descriptor pool
# accepts our FileDescriptorProto, but it is not important what that file
# name is actually set to.
proto_name = binascii.hexlify(os.urandom(16)).decode('ascii')
if package:
file_descriptor_proto.name = os.path.join(package.replace('.', '/'),
proto_name + '.proto')
file_descriptor_proto.package = package
else:
file_descriptor_proto.name = proto_name + '.proto'
_message.default_pool.Add(file_descriptor_proto)
result = _message.default_pool.FindFileByName(file_descriptor_proto.name)
if _USE_C_DESCRIPTORS:
return result.message_types_by_name[desc_proto.name]
full_message_name = [desc_proto.name]
if package: full_message_name.insert(0, package)
# Create Descriptors for enum types
enum_types = {}
for enum_proto in desc_proto.enum_type:
full_name = '.'.join(full_message_name + [enum_proto.name])
enum_desc = EnumDescriptor(
enum_proto.name, full_name, None, [
EnumValueDescriptor(enum_val.name, ii, enum_val.number,
create_key=_internal_create_key)
for ii, enum_val in enumerate(enum_proto.value)],
create_key=_internal_create_key)
enum_types[full_name] = enum_desc
# Create Descriptors for nested types
nested_types = {}
for nested_proto in desc_proto.nested_type:
full_name = '.'.join(full_message_name + [nested_proto.name])
# Nested types are just those defined inside of the message, not all types
# used by fields in the message, so no loops are possible here.
nested_desc = MakeDescriptor(nested_proto,
package='.'.join(full_message_name),
build_file_if_cpp=False,
syntax=syntax)
nested_types[full_name] = nested_desc
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
enum_desc = None
nested_desc = None
if field_proto.json_name:
json_name = field_proto.json_name
else:
json_name = None
if field_proto.HasField('type_name'):
type_name = field_proto.type_name
full_type_name = '.'.join(full_message_name +
[type_name[type_name.rfind('.')+1:]])
if full_type_name in nested_types:
nested_desc = nested_types[full_type_name]
elif full_type_name in enum_types:
enum_desc = enum_types[full_type_name]
# Else type_name references a non-local type, which isn't implemented
field = FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, nested_desc, enum_desc, None, False, None,
options=_OptionsOrNone(field_proto), has_default_value=False,
json_name=json_name, create_key=_internal_create_key)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
list(nested_types.values()), list(enum_types.values()), [],
options=_OptionsOrNone(desc_proto),
create_key=_internal_create_key)
|
{
"content_hash": "b2f1ab00f67e5180892f88cd7d8034bb",
"timestamp": "",
"source": "github",
"line_count": 1198,
"max_line_length": 106,
"avg_line_length": 37.580968280467445,
"alnum_prop": 0.6712273999378082,
"repo_name": "grpc/grpc-ios",
"id": "f5a0caa6bda5cc9bb7411eea088994f6959f90e2",
"size": "46653",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "native/third_party/protobuf/python/google/protobuf/descriptor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "5444"
},
{
"name": "Batchfile",
"bytes": "38831"
},
{
"name": "C",
"bytes": "1342403"
},
{
"name": "C#",
"bytes": "111357"
},
{
"name": "C++",
"bytes": "11936431"
},
{
"name": "CMake",
"bytes": "34261"
},
{
"name": "CSS",
"bytes": "1579"
},
{
"name": "Cython",
"bytes": "258768"
},
{
"name": "Dockerfile",
"bytes": "185143"
},
{
"name": "Go",
"bytes": "34794"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "22550"
},
{
"name": "JavaScript",
"bytes": "89695"
},
{
"name": "Objective-C",
"bytes": "770017"
},
{
"name": "Objective-C++",
"bytes": "83300"
},
{
"name": "PHP",
"bytes": "517157"
},
{
"name": "PowerShell",
"bytes": "5008"
},
{
"name": "Python",
"bytes": "4064457"
},
{
"name": "Ruby",
"bytes": "715896"
},
{
"name": "Shell",
"bytes": "781923"
},
{
"name": "Starlark",
"bytes": "849400"
},
{
"name": "Swift",
"bytes": "13168"
},
{
"name": "XSLT",
"bytes": "9846"
}
],
"symlink_target": ""
}
|
class test(HTMLParser.HTMLParser):
intable = False
titling = False
capturing = True
cdata = []
out = u''
def handle_starttag(self, tag, attrs):
print tag,attrs
if dict(attrs).get('class', None) == 'm': self.capturing = False
if tag.lower() == 'table': self.intable = True
elif tag.lower() == 'title': self.titling = True
elif tag.lower() == 'td' and dict(attrs).get('class', None) != 'm':
self.capturing = True
def handle_endtag(self, tag):
if self.titling and tag.lower() == 'title': self.titling=False
if self.capturing:
out = u'\n'.join(self.cdata)
self.cdata = []
self.out += '\n' + out
#print out
def handle_data(self, data):
if self.capturing and self.intable and data.strip() != '': self.cdata.append(data.decode('utf-8'))
if self.titling: self.cdata.append(u'HEADING: %s' % data.decode('utf-8'))
b=test()
for x in range(1,14):
with file('3_%02d.html' % x, 'rU') as a: (b.reset(),setattr(b, 'capturing', True), b.feed(a.read()))
a = file('2_01.html')
b.feed(a.read())
|
{
"content_hash": "e0eb2d1cdae464707cfb486ad1aeff40",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 106,
"avg_line_length": 38.13333333333333,
"alnum_prop": 0.5655594405594405,
"repo_name": "fiddlerwoaroof/sandbox",
"id": "3ae90408df880ddb9bd4c003f8f808c308a513af",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unsorted/pythonsnippets_0005.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "66546"
}
],
"symlink_target": ""
}
|
import datetime
from corehq import Domain
from corehq.apps.accounting import generator
from corehq.apps.accounting.exceptions import NewSubscriptionError
from corehq.apps.accounting.models import (
Subscription, BillingAccount, DefaultProductPlan, SoftwarePlanEdition,
SubscriptionAdjustmentMethod)
from corehq.apps.accounting.tests import BaseAccountingTest
class TestNewDomainSubscription(BaseAccountingTest):
def setUp(self):
super(TestNewDomainSubscription, self).setUp()
self.domain = Domain(
name="test-domain-sub",
is_active=True,
)
self.domain.save()
self.domain2 = Domain(
name="test-domain-sub2",
is_active=True,
)
self.domain2.save()
self.admin_user = generator.arbitrary_web_user()
self.admin_user.add_domain_membership(self.domain.name, is_admin=True)
self.admin_user.save()
self.account = BillingAccount.get_or_create_account_by_domain(
self.domain.name, created_by=self.admin_user.username)[0]
self.account2 = BillingAccount.get_or_create_account_by_domain(
self.domain2.name, created_by=self.admin_user.username)[0]
self.standard_plan = DefaultProductPlan.get_default_plan_by_domain(
self.domain.name, edition=SoftwarePlanEdition.STANDARD)
self.advanced_plan = DefaultProductPlan.get_default_plan_by_domain(
self.domain.name, edition=SoftwarePlanEdition.ADVANCED)
def test_new_susbscription_in_future(self):
"""
Test covers issue that came up with commcare-hq/PR#3725.
"""
today = datetime.date.today()
in_30_days = today + datetime.timedelta(days=30)
week_after_30 = in_30_days + datetime.timedelta(days=7)
next_year = week_after_30 + datetime.timedelta(days=400)
# mimic domain signing up for trial
trial_subscription = Subscription.new_domain_subscription(
self.account, self.domain.name, self.advanced_plan,
date_end=in_30_days,
adjustment_method=SubscriptionAdjustmentMethod.TRIAL,
is_trial=True,
)
trial_subscription.is_active = True
trial_subscription.save()
subscription = Subscription.new_domain_subscription(
self.account2, self.domain.name, self.standard_plan,
web_user=self.admin_user.username,
date_start=week_after_30, date_end=next_year,
)
final_sub = Subscription.objects.get(pk=subscription.id)
self.assertEqual(final_sub.date_start, week_after_30)
self.assertEqual(final_sub.date_end, next_year)
def test_conflicting_dates(self):
"""
Tests creating a subscription with conflicting dates with an existing
subscription
"""
today = datetime.date.today()
one_week = today + datetime.timedelta(days=7)
one_month = today + datetime.timedelta(days=30)
Subscription.new_domain_subscription(
self.account, self.domain.name, self.advanced_plan,
date_start=one_week,
date_end=one_month,
)
# conflicting subscription with no date end.
self.assertRaises(NewSubscriptionError, lambda: Subscription.new_domain_subscription(
self.account, self.domain.name, self.standard_plan,
))
# conflicting subscription with overlapping end date
self.assertRaises(NewSubscriptionError, lambda: Subscription.new_domain_subscription(
self.account, self.domain.name, self.standard_plan,
date_end=one_week + datetime.timedelta(days=1)
))
# conflicting subscription with overlapping start date
self.assertRaises(NewSubscriptionError, lambda: Subscription.new_domain_subscription(
self.account, self.domain.name, self.standard_plan,
date_start=one_month - datetime.timedelta(days=1)
))
# subscription without overlapping dates before
# bound future subscription
sub_before = Subscription.new_domain_subscription(
self.account, self.domain.name, self.standard_plan,
date_end=one_week,
)
# subscription without overlapping dates after
# bound future subscription
sub_after = Subscription.new_domain_subscription(
self.account, self.domain.name, self.standard_plan,
date_start=one_month,
)
|
{
"content_hash": "7f322a47b5ba43fc6cec578e83d7a919",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 93,
"avg_line_length": 39.849557522123895,
"alnum_prop": 0.6577836997557184,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "8fc565faedfa9758cdcce5a135ed37cd4f9a07f5",
"size": "4503",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "corehq/apps/accounting/tests/test_new_domain_subscription.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
}
|
"""Permissions cleanup migrations
Revision ID: 476e9e882141
Revises: 387cf5a9a3ad
Create Date: 2014-03-21 18:27:16.802081
"""
# revision identifiers, used by Alembic.
revision = '476e9e882141'
down_revision = '387cf5a9a3ad'
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select, insert, and_, or_
import sqlalchemy as sa
context_implications_table = table('context_implications',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('source_context_id', sa.Integer),
column('context_scope', sa.String),
column('source_context_scope', sa.String),
column('updated_at', sa.DateTime),
column('modified_by_id', sa.Integer),
)
contexts_table = table('contexts',
column('id', sa.Integer),
column('related_object_id', sa.Integer),
column('related_object_type', sa.String),
)
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('scope', sa.String),
column('description', sa.Text),
)
user_roles_table = table('user_roles',
column('id', sa.Integer),
column('context_id', sa.Integer),
column('role_id', sa.Integer),
column('person_id', sa.Integer),
)
programs_table = table('programs',
column('id', sa.Integer),
column('context_id', sa.Integer),
)
audits_table = table('audits',
column('id', sa.Integer),
column('context_id', sa.Integer),
)
people_table = table('people',
column('id', sa.Integer),
column('context_id', sa.Integer),
)
def upgrade():
connection = op.get_bind()
# Find contexts without matching related objects
context_ids = connection.execute(
select([contexts_table.c.id])
).fetchall()
context_ids = [x for (x,) in context_ids]
program_context_ids = connection.execute(
select([programs_table.c.context_id])
).fetchall()
program_context_ids = [x for (x,) in program_context_ids]
audit_context_ids = connection.execute(
select([audits_table.c.context_id])
).fetchall()
audit_context_ids = [x for (x,) in audit_context_ids]
people_context_ids = connection.execute(
select([people_table.c.context_id])
).fetchall()
people_context_ids = [x for (x,) in people_context_ids]
# Common and Admin contexts
legitimate_context_ids = [0, 1]
# Other still-valid contexts
legitimate_context_ids = legitimate_context_ids +\
program_context_ids + audit_context_ids + people_context_ids
orphan_context_ids = set(context_ids) - set(legitimate_context_ids)
print "Orphaned Contexts:"
print orphan_context_ids
# Find UserRole and ContextImplication objects using orphaned contexts and
# remove them
if len(orphan_context_ids) > 0:
op.execute(
user_roles_table.delete().where(
user_roles_table.c.context_id.in_(orphan_context_ids)))
if len(orphan_context_ids) > 0:
op.execute(
context_implications_table.delete().where(
or_(
context_implications_table.c.source_context_id.in_(orphan_context_ids),
context_implications_table.c.context_id.in_(orphan_context_ids),
)))
# Remove the contexts themselves
# (Actually, don't, since it may cause referential integrity errors, and we
# don't want to implicitly cascade to other objects.)
#if len(orphan_context_ids) > 0:
# op.execute(
# contexts_table.delete().where(
# context_table.c.id.in_(orphan_context_ids)))
# Remove `RoleReader` assignments
role_reader_role_ids = connection.execute(
select([roles_table.c.id])\
.where(roles_table.c.name == "RoleReader")
).fetchall()
role_reader_role_ids = [x for (x,) in role_reader_role_ids]
if len(role_reader_role_ids) > 0:
op.execute(
user_roles_table.delete().where(
user_roles_table.c.role_id.in_(role_reader_role_ids)))
# Remove `RoleReader` role itself
if len(role_reader_role_ids) > 0:
op.execute(
roles_table.delete().where(
roles_table.c.id.in_(role_reader_role_ids)))
# Find all UserRole objects for the same `person_id` and `context_id`, and
# remove all but the "strongest"
# First, get all Role objects with names
role_ids_with_names = connection.execute(
select([roles_table.c.id, roles_table.c.name, roles_table.c.scope])
).fetchall()
roles_by_id = {}
for (id, name, scope) in role_ids_with_names:
roles_by_id[id] = (name, scope)
user_role_tuples = connection.execute(
select([
user_roles_table.c.id,
user_roles_table.c.context_id,
user_roles_table.c.person_id,
user_roles_table.c.role_id,
])
).fetchall()
user_role_items = {}
for (id, context_id, person_id, role_id) in user_role_tuples:
user_role_items\
.setdefault((context_id, person_id), [])\
.append((id, context_id, role_id))
role_strengths = {
"Auditor": 1,
"Reader": 1,
"ObjectEditor": 2,
"ProgramCreator": 3,
"gGRC Admin": 4,
"ProgramReader": 1,
"ProgramEditor": 2,
"ProgramOwner": 3
}
user_role_ids_to_delete = []
for _, user_role_items in user_role_items.items():
if len(user_role_items) > 1:
print("{} UserRole assignments:".format(len(user_role_items)))
ids_with_role_strengths = []
for (id, context_id, role_id) in user_role_items:
role_name, role_scope = roles_by_id.get(role_id, None)
# One-off check for bad context/scope pairs
if context_id in (None, 0, 1)\
and role_name not in (
"gGRC Admin", "Reader", "ObjectEditor", "ProgramCreator"):
print("Invalid system role: context_id={}, role_id={}".format(
context_id, role_id))
elif context_id not in (None, 0, 1)\
and role_name not in (
"Auditor", "ProgramReader", "ProgramEditor", "ProgramOwner"):
print("Invalid non-system role: context_id={}, role_id={}".format(
context_id, role_id))
else:
if role_name:
strength = role_strengths.get(role_name, None)
if strength:
ids_with_role_strengths.append((id, strength))
print("Found user_role.id={}, {}".format(id, role_name))
else:
print("Found unknown role name: {}, user_role.id={}".format(
role_name, id))
else:
print("Found bad role_id: {}".format(role_id))
# Get the `id` with the highest "strength"
keep_id = max(ids_with_role_strengths, key=lambda x: x[1])[0]
ids_to_delete = [
id for (id,_) in ids_with_role_strengths if id != keep_id]
print("Keeping {}, deleting {}".format(keep_id, ids_to_delete))
user_role_ids_to_delete.extend(ids_to_delete)
if len(user_role_ids_to_delete) > 0:
op.execute(
user_roles_table.delete().where(
user_roles_table.c.id.in_(user_role_ids_to_delete)))
# Update ProgramEditor Role description
new_description = """A user with authorization to edit mapping objects related to an access controlled program.<br/><br/>When a person has this role they can map and unmap objects to the Program and edit the Program info, but they are unable to delete the Program or assign other people roles for that program."""
op.execute(
roles_table.update()\
.where(roles_table.c.name == "ProgramEditor")\
.values({ 'description': new_description }))
def downgrade():
# None of these are reversible (except ProgramEditor description, but don't
# want it reversed)
pass
|
{
"content_hash": "5a651e86ad8ece7332c32d1b82e1e731",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 315,
"avg_line_length": 32.3771186440678,
"alnum_prop": 0.6296296296296297,
"repo_name": "hyperNURb/ggrc-core",
"id": "9c2feabd61f6c6209965cbaabf10d4135179a1fd",
"size": "7882",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "src/ggrc_basic_permissions/migrations/versions/20140321182716_476e9e882141_permissions_cleanup_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "235811"
},
{
"name": "Cucumber",
"bytes": "140478"
},
{
"name": "HTML",
"bytes": "943963"
},
{
"name": "JavaScript",
"bytes": "1205888"
},
{
"name": "Makefile",
"bytes": "5936"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "1875139"
},
{
"name": "Ruby",
"bytes": "1496"
},
{
"name": "Shell",
"bytes": "11719"
}
],
"symlink_target": ""
}
|
#
# --- MIT Open Source License --------------------------------------------------
# PiB - Python Build System
# Copyright (C) 2011 by Don Williamson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
#
# MSVCPlatform.py: Command-line parameter abstraction and build nodes for
# Microsoft Visual C++ 2005/2008/2010.
#
# C/C++ Building Reference (2005):
# http://msdn.microsoft.com/en-us/library/91621w01(v=VS.80).aspx
#
# Compiler Warnings that are Off by Default
# http://msdn.microsoft.com/en-us/library/23k5d385(v=VS.80).aspx
#
# Potentially useful warnings:
#
# C4191 'operator/operation' : unsafe conversion from 'type of expression' to 'type required'
# C4242 'identifier' : conversion from 'type1' to 'type2', possible loss of data
# C4263 'function' : member function does not override any base class virtual member function
# C4264 'virtual_function' : no override available for virtual member function from base 'class'; function is hidden
# C4266 'function' : no override available for virtual member function from base 'type'; function is hidden
# C4287 'operator' : unsigned/negative constant mismatch
# C4289 nonstandard extension used : 'var' : loop control variable declared in the for-loop is used outside the for-loop scope
# C4296 'operator' : expression is always false
# C4302 'conversion' : truncation from 'type 1' to 'type 2'
# C4365 'action' : conversion from 'type_1' to 'type_2', signed/unsigned mismatch
#
import os
import sys
import Utils
import Process
import BuildSystem
VSToolsDir = None
VCVarsPath = None
VCIncludeDir = None
VCLibraryDir = None
VSCRTVer = None
def GetVSInstallDir(vs_tools_dir):
vs_install_dir = vs_tools_dir
while vs_install_dir != None and vs_install_dir != "":
split_path = os.path.split(vs_install_dir)
# Detect infinite loop
if vs_install_dir == split_path[0]:
print("ERROR: Visual Studio Tools path is not formatted as expected")
VSInstallDir = None
break
vs_install_dir = split_path[0]
if split_path[1] == "Common7":
break
return vs_install_dir
# Allow the user to override which Visual Studio version to use
def UserAllows(version):
user_msvc_ver = Utils.GetSysArgvProperty("-msvc_ver")
if user_msvc_ver == None:
return True
return user_msvc_ver == version
# These versions use vswhere.exe but where is that? Search known directory layouts for now
vs_2019_path = "C:/Program Files (x86)/Microsoft Visual Studio/2019"
vs_2017_path = "C:/Program Files (x86)/Microsoft Visual Studio/2017"
if VSToolsDir == None and UserAllows("2019"):
if os.path.exists(vs_2019_path + "/BuildTools/Common7/Tools"):
VSToolsDir = vs_2019_path + "/BuildTools/Common7/Tools"
VSCRTVer = "14.28.29333"
elif os.path.exists(vs_2019_path + "/Community/Common7/Tools"):
VSToolsDir = vs_2019_path + "/Community/Common7/Tools"
VSCRTVer = "14.28.29910"
if VSToolsDir == None and UserAllows("2017"):
if os.path.exists(vs_2017_path + "/Community/Common7/Tools"):
VSToolsDir = vs_2017_path + "/Community/Common7/Tools"
VSCRTVer = "14.15.26726"
# Complete paths for new method
if VSToolsDir != None:
VSInstallDir = GetVSInstallDir(VSToolsDir)
if VSInstallDir != None:
VCVarsPath = os.path.join(VSInstallDir, "VC/Auxiliary/Build/vcvarsall.bat")
VCIncludeDir = os.path.join(VSInstallDir, "VC/Tools/MSVC/" + VSCRTVer + "/include")
VCLibraryDir = os.path.join(VSInstallDir, "VC/Tools/MSVC/" + VSCRTVer + "/lib/x86")
# Use the old method
if VSToolsDir == None:
if VSToolsDir == None and UserAllows("2015"):
VSToolsDir = os.getenv("VS140COMNTOOLS")
if VSToolsDir == None and UserAllows("2013"):
VSToolsDir = os.getenv("VS120COMNTOOLS")
if VSToolsDir == None and UserAllows("2012"):
VSToolsDir = os.getenv("VS110COMNTOOLS")
if VSToolsDir == None and UserAllows("2008"):
VSToolsDir = os.getenv("VS90COMNTOOLS")
if VSToolsDir == None and UserAllows("2005"):
VSToolsDir = os.getenv("VS80COMNTOOLS")
VSInstallDir = GetVSInstallDir(VSToolsDir)
# VC directories are a subdirectory of VS install
if VSInstallDir != None:
VCVarsPath = os.path.join(VSInstallDir, "VC/vcvarsall.bat")
VCIncludeDir = os.path.join(VSInstallDir, "VC/include")
VCLibraryDir = os.path.join(VSInstallDir, "VC/lib")
# Show chosen environment
if "-msvc_show_env" in sys.argv:
print("VSToolsDir = ", VSToolsDir)
print("VSInstallDir = ", VSInstallDir)
print("VCVarsPath = ", VCVarsPath)
print("VCIncludeDir = ", VCIncludeDir)
print("VCLibraryDir = ", VCLibraryDir)
if VSToolsDir == None:
print("ERROR: Failed to find installed Visual Studio")
sys.exit(1)
#
# There is no direct way in Python to apply the environment of one subprocess to another. The typical solution is
# to generate a batch file at runtime that does the following:
#
# call ApplyEnvironment.bat
# RunProcess.exe
#
# Rather than doing this for each call to cl.exe, I'm calling the batch file once at the start and copying the
# resulting environment for use later when calling cl.exe.
#
def GetVisualCEnv():
if VSInstallDir == None:
print("ERROR: Visual Studio install directory not detected")
return None
# Locate the batch file that sets up the Visual C build environment
if not os.path.exists(VCVarsPath):
print("ERROR: Visual C environment setup batch file not found")
return None
# Run the batch file, output the environment and prepare it for parsing
process = Process.OpenPiped(VCVarsPath + " x86 & echo ===ENVBEGIN=== & set")
output = Process.WaitForPipeOutput(process)
output = output.split("===ENVBEGIN=== \r\n")[1]
output = output.splitlines()
# Start with the current environment, override with any parsed environment values
env = os.environ.copy()
for line in output:
try:
var, value = line.split("=")
env[var.upper()] = value
except:
print("WARNING: environment variables skipped -> " + line)
# This environment variable is defined in the VS2005 IDE and prevents cl.exe output
# being correctly captured, so remove it!
if "VS_UNICODE_OUTPUT" in env:
del env["VS_UNICODE_OUTPUT"]
return env
#
# Visual C++ Compiler (cl.exe)
#
# Options:
#
# /c Compiles without linking
# /nologo Suppresses the logo
# /showIncludes Display a list of all include files during compilation
# /W{0|1|2|3|4} Warning level
# /WX Treat warnings as errors
# /errorReport:{none|prompt|queue|send} How to report ICEs to Microsoft
#
# /O1 Minimise size (/Og /Os /Oy /Ob2 /Gs /GF /Gy)
# /O2 Maximise speed (/Og /Oi /Ot /Oy /Ob2 /Gs /GF /Gy)
# /Ob{0|1|2} Disable inline expansion, expand marked funtions, compiler expands what it wants
# /Od Disable optimisations
# /Og Provides local and global optimisations (DEPRECATED)
# /Oi Generate intrinsic functions
# /Os Favour smaller code
# /Ot Favour faster code
# /Ox Full optimisation - favours speed over size (/Og /Oi /Ot /Ob2 /Oy)
# /Oy Omits frame pointers (X86 ONLY)
#
# /arch:{SSE|SSE2} Specifies architecture for code generation (X86 ONLY)
# /EH{s|a}[c][-] Specifies exception handling behaviour
# /fp:{precise|except[-]|fast|strict} Specifies floating-point behaviour
# /Gd __cdecl calling convention, except marked (X86 ONLY)
# /Gr __fastcall calling convention, except marked (X86 ONLY)
# /Gz __stdcall calling convention, except marked (X86 ONLY)
# /GF Enable read-only string pooling
# /GL[-] Enable whole program optimisation
# /Gs Controls stack probes
# /Gy Enable function level linking
# /MD References multi-threaded MSVCRT.lib, code is in a DLL. Defines _MT, _DLL.
# /MDd References multi-threaded MSVCRTD.lib, code is in a DLL. Defines _DEBUG, _MT, _DLL.
# /MT References multi-threaded LIBCMT.lib, code is linked statically. Defines _MT.
# /MTd References multi-threaded LIBCMTD.lib, code is linked statically. Defines _DEBUG, _MT.
#
# /Fopathname Specifies the output .obj file
# /Fppathname Provides a path name for a precompiled header instead of using the default payh name
# /Fdpathname Specifies a name for the PDB file
#
# /GS[-] Detects buffer overruns that overwrite the return address (on by default)
# /RTC{c|s|u} Controls runtime error checking
# /Zi Produce debugging info in PDB files
# /ZI Produce debugging info in PDB files with edit and continue (X86 ONLY)
#
# /D[= | #[{string|number}] ] Defines a preprocessing symbol for your source file
# /I[ ]directory Adds a directory to the list of directories searched for include files
#
# /Y- Ignores all other PCH compiler options in the current build
# /Yc[filename] Create a PCH
# /Yu[filename] Use a PCH
#
# Typical cl.exe command-lines:
#
# Debug Windows
# /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_UNICODE" /D "UNICODE" /Gm /EHsc /RTC1 /MDd /Fo"Debug\\" /Fd"Debug\vc80.pdb" /W3 /nologo /c /Wp64 /ZI /TP /errorReport:prompt
#
# Release Windows
# /O2 /GL /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_UNICODE" /D "UNICODE" /FD /EHsc /MD /Fo"Release\\" /Fd"Release\vc80.pdb" /W3 /nologo /c /Wp64 /Zi /TP /errorReport:prompt
#
# Note that DLL builds add "_WINDLL". This may only be needed so that you can decide whether to use declspec dllimport or dllexport.
VCBaseConfig = Utils.enum(
'DEBUG',
'RELEASE'
)
VCArchitecture = Utils.enum(
DEFAULT = None,
IA32 = '/arch:IA32',
SSE = '/arch:SSE',
SSE2 = '/arch:SSE2',
AVX = '/arch:AVX',
AVX2 = '/arch:AVX2'
)
VCFloatingPoint = Utils.enum(
PRECISE = '/fp:precise',
FAST = '/fp:fast',
STRICT = '/fp:strict'
)
VCCallingConvention = Utils.enum(
CDECL = '/Gd',
FASTCALL = '/Gr',
STDCALL = '/Gz'
)
VCOptimisations = Utils.enum(
DISABLE = '/Od',
SIZE = '/O1',
SPEED = '/O2'
)
VCDebuggingInfo = Utils.enum(
DISABLE = None,
PDB = '/Zi',
PDBEDITANDCONTINUE = '/ZI'
)
VCCRTType = Utils.enum(
MT_DLL = '/MD',
MT_DEBUG_DLL = '/MDd',
MT = '/MT',
MT_DEBUG = '/MTd'
)
VCExceptionHandling = Utils.enum(
DISABLE = None,
CPP_ONLY = '/EHsc',
CPP_SEH = '/EHa',
)
VCStandard = Utils.enum(
CPP_14 = '/std:c++14',
CPP_17 = '/std:c++17',
CPP_20 = '/std:c++20',
CPP_LATEST = '/std:c++latest',
C_11 = '/std:c11',
C_17 = '/std:c17',
)
class VCCompileOptions:
def __init__(self, config):
# Initialise the requested config settings
if config == VCBaseConfig.DEBUG:
self.InitDebug()
elif config == VCBaseConfig.RELEASE:
self.InitRelease()
def InitDebug(self):
# Default settings for all compiler options
self.Alignment = 8
self.Architecture = VCArchitecture.DEFAULT
self.CallingConvention = VCCallingConvention.CDECL
self.CRTType = VCCRTType.MT_DEBUG
self.CompileAsC = False
self.DebuggingInfo = VCDebuggingInfo.PDBEDITANDCONTINUE
self.Defines = [ 'WIN32', '_WINDOWS' ]
self.DetectBufferOverruns = True
self.DisabledWarnings = [ ]
self.EnableIntrinsicFunctions = False
self.ExceptionHandling = VCExceptionHandling.CPP_ONLY
self.FloatingPoint = VCFloatingPoint.PRECISE
self.FloatingPointExceptions = False
self.FullPathnameReports = True
self.IncludePaths = [ ]
self.NoLogo = True
self.Optimisations = VCOptimisations.DISABLE
self.ReportClassLayout = False
self.ReportSingleClassLayout = [ ]
self.RTTI = True
self.RuntimeChecks = True
self.WarningLevel = 3
self.WarningsAsErrors = False
self.WholeProgramOptimisation = False
self.Standard = None
self.UpdateCommandLine()
def InitRelease(self):
# Initialise changes from debug
self.InitDebug()
self.DebuggingInfo = VCDebuggingInfo.PDB
self.RuntimeChecks = False
self.DetectBufferOverruns = False
self.Optimisations = VCOptimisations.SPEED
self.WholeProgramOptimisation = True
self.EnableIntrinsicFunctions = True
self.CRTType = VCCRTType.MT
self.Defines.extend( [ 'NDEBUG' ])
self.UpdateCommandLine()
def UpdateCommandLine(self):
# Compile only & we need showIncludes for dependency evaluation
# Complete exception handling
cmdline = [
'/c', # Compile only
'/showIncludes', # Show includes for dependency evaluation
'/errorReport:none', # Don't send any ICEs to Microsoft
'/Zc:threadSafeInit-', # Disable C++11 thread-safe statics
#'/Bt+',
#'/d2cgsummary',
]
# Construct the command line from the set options
if self.NoLogo:
cmdline += [ '/nologo' ]
cmdline += [ "/W" + str(self.WarningLevel) ]
cmdline += [ self.CRTType ]
if self.ExceptionHandling != None:
cmdline += [ self.ExceptionHandling ]
if self.WarningsAsErrors:
cmdline += [ "/WX" ]
for warning in self.DisabledWarnings:
cmdline += [ "/wd" + str(warning) ]
if self.Architecture != None:
cmdline += [ self.Architecture ]
cmdline += [ self.FloatingPoint ]
cmdline += [ '/Zp' + str(self.Alignment) ]
if self.FloatingPointExceptions:
cmdline += "/fp:except"
cmdline += [ self.CallingConvention ]
if self.DebuggingInfo != None:
cmdline += [ self.DebuggingInfo ]
if self.RuntimeChecks:
cmdline += [ "/RTC1" ]
if not self.RTTI:
cmdline += [ "/GR-" ]
if not self.DetectBufferOverruns:
cmdline += [ "/GS-" ]
cmdline += [ self.Optimisations ]
if self.WholeProgramOptimisation:
cmdline += [ "/GL" ]
if self.EnableIntrinsicFunctions:
cmdline += [ "/Oi" ]
for define in self.Defines:
cmdline += [ '/D', define ]
for include in self.IncludePaths:
cmdline += [ '/I', include ]
if self.ReportClassLayout:
cmdline += [ '/d1reportAllClassLayout' ]
for cls in self.ReportSingleClassLayout:
cmdline += [ '/d1reportSingleClassLayout' + cls ]
if self.FullPathnameReports:
cmdline += [ '/FC' ]
if self.CompileAsC:
cmdline += [ '/TC' ]
if self.Standard != None:
cmdline += [ self.Standard ]
self.CommandLine = cmdline
#
# Visual C++ Linker (link.exe)
#
# Command-line:
#
# LINK.exe [options] [files] [@responsefile]
#
# Options:
#
# /DEBUG Creates debugging information
# /DEFAULTLIB:library Adds a library that is searched AFTER input libraries but BEFORE the default libraries named in .obj files
# /DLL Builds a DLL
# /ENTRY:function Specifies an entry point function
# /INCREMENTAL[:NO] By default the linker runs in incremental mode, this allows you to change that
# /LARGEADDRESSAWARE Tells the compiler that the application supports addresses larger than 2GB
# /LIBPATH:dir Adds a library search path that gets used before the environment LIB path
#
# /LTCG[:NOSTATUS|:STATUS|:PGINSTRUMENT|:PGOPTIMIZE|:PGUPDATE] Link-time Code Generation control
#
# /MACHINE:{X64|X86} Specifies the target platform
# /MAP[:filename] Generate a MAP file
# /NOLOGO Suppresses the logo
# /NODEFAULTLIB[:library] Tells the linker to remove one or more default libraries from the list (the compiler can insert some)
# /OPT:{REF|NOREF} Controls the optimisations performed during a build
# /OPT:{ICF[=iterations]|NOICF}
# /OPT:{WIN98|NOWIN98}
# /OUT:filename Specifies the output filename
# /PDB:filename Creates a program database file
# /SUBSYSTEM:{CONSOLE|WINDOWS} Either command-line or window based application (main or WinMain)
# /WX[:NO] Treat linker warnings as errors
#
# Typical link.exe command-lines:
#
# Debug Windows EXE
# /OUT:"D:\dev\projects\TestProject\Debug\TestProject.exe" /INCREMENTAL /NOLOGO /MANIFEST /MANIFESTFILE:"Debug\TestProject.exe.intermediate.manifest" /DEBUG
# /PDB:"d:\dev\projects\testproject\debug\TestProject.pdb" /SUBSYSTEM:WINDOWS /MACHINE:X86 /ERRORREPORT:PROMPT
# kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib
#
# Release Windows EXE
# /OUT:"D:\dev\projects\TestProject\Release\TestProject.exe" /INCREMENTAL:NO /NOLOGO /MANIFEST /MANIFESTFILE:"Release\TestProject.exe.intermediate.manifest" /DEBUG
# /PDB:"d:\dev\projects\testproject\release\TestProject.pdb" /SUBSYSTEM:WINDOWS /OPT:REF /OPT:ICF /LTCG /MACHINE:X86 /ERRORREPORT:PROMPT
# kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib
#
VCMachine = Utils.enum(
X86 = '/MACHINE:x86',
X64 = '/MACHINE:x64'
)
VCUnrefSymbols = Utils.enum(
ELIMINATE = '/OPT:REF',
KEEP = '/OPT:NOREF'
)
VCDupComdats = Utils.enum(
FOLD = '/OPT:ICF',
KEEP = '/OPT:NOICF'
)
VCSubsystem = Utils.enum(
CONSOLE = '/SUBSYSTEM:CONSOLE',
WINDOWS = '/SUBSYSTEM:WINDOWS'
)
class VCLinkOptions:
def __init__(self, config):
# Initialise the requested config settings
if config == VCBaseConfig.DEBUG:
self.InitDebug()
elif config == VCBaseConfig.RELEASE:
self.InitRelease()
def InitDebug(self):
# Default settings for all linker options
self.SafeSEH = False
self.Debug = True
self.NoLogo = True
self.DLL = False
self.EntryPoint = None
self.Incremental = True
self.LargeAddressAware = False
self.LTCG = False
self.Machine = VCMachine.X86
self.MapFile = False
self.UnrefSymbols = VCUnrefSymbols.KEEP
self.DupComdats = VCDupComdats.KEEP
self.Subsystem = VCSubsystem.WINDOWS
self.DefaultLibs = [ ]
self.NoDefaultLibs = False
self.NoDefaultLib = [ ]
self.LibPaths = [ ]
self.UpdateCommandLine()
def InitRelease(self):
# Initialise changes from debug
self.InitDebug()
self.Incremental = False
self.LTCG = True
self.UnrefSymbols = VCUnrefSymbols.ELIMINATE
self.DupComdats = VCDupComdats.FOLD
self.UpdateCommandLine()
def UpdateCommandLine(self):
cmdline = [
'/ERRORREPORT:NONE', # Don't send any ICEs to Microsoft
'/VERBOSE:LIB' # Show libs searched for dependency evaluation
]
if self.SafeSEH:
cmdline += [ "/SAFESEH" ]
if self.Debug:
cmdline += [ "/DEBUG" ]
if self.NoLogo:
cmdline += [ "/NOLOGO" ]
if self.DLL:
cmdline += [ "/DLL" ]
if self.EntryPoint != None:
cmdline += [ "/ENTRY:" + self.EntryPoint ]
# Compiler is incremental by default
if not self.Incremental:
cmdline += [ "/INCREMENTAL:NO" ]
if self.LargeAddressAware:
cmdline += [ "/LARGEADDRESSAWARE" ]
if self.LTCG:
cmdline += [ "/LTCG" ]
cmdline += [ self.Machine ]
cmdline += [ self.UnrefSymbols ]
cmdline += [ self.DupComdats ]
cmdline += [ self.Subsystem ]
for lib in self.DefaultLibs:
cmdline += [ "/DEFAULTLIB:" + lib ]
for lib in self.NoDefaultLib:
cmdline += [ "/NODEFAULTLIB:" + lib ]
if self.NoDefaultLibs:
cmdline += [ "/NODEFAULTLIB" ]
for path in self.LibPaths:
cmdline += [ "/LIBPATH:" + path ]
self.CommandLine = cmdline
#
# Visual C++ Librarian (lib.exe)
#
# Command-line:
#
# LIB [options] [files]
#
# Options:
#
# /LIBPATH:dir Library path to search for when merging libraries
# /LTCG Enable Link Time Code Generation
# /MACHINE:{X64|X86} Specifies the target platform - not normally needed as it's inferred from the .obj file
# /NODEFAULTLIB[:library] Tells the librarian to remove one or more default libraries from the list (the compiler can insert some)
# /NOLOGO Suppress the logo
# /OUT:filename Output library file
# /SUBSYSTEM:{CONSOLE|WINDOWS} Specifies the platform type
# /WX[:NO] Treat warnings as errors
#
# Typical command-lines:
#
# Debug
# /OUT:"D:\dev\projects\TestProject\Debug\TestProject.lib" /NOLOGO
#
# Release
# /OUT:"D:\dev\projects\TestProject\Release\TestProject.lib" /NOLOGO /LTCG
#
class VCLibOptions:
def __init__(self, config):
# Initialise the requested config settings
if config == VCBaseConfig.DEBUG:
self.InitDebug()
elif config == VCBaseConfig.RELEASE:
self.InitRelease()
def InitDebug(self):
# Default settings for all librarian options
self.LTCG = False
self.Machine = VCMachine.X86
self.NoLogo = True
self.Subsystem = None
self.WarningsAsErrors = False
self.LibPaths = [ ]
self.NoDefaultLibs = False
self.NoDefaultLib = [ ]
self.UpdateCommandLine()
def InitRelease(self):
# Initialise changes from debug
self.InitDebug()
self.LTCG = True
self.UpdateCommandLine()
def UpdateCommandLine(self):
cmdline = [
'/ERRORREPORT:NONE' # Don't send ICEs to Microsoft
]
if self.LTCG:
cmdline += [ '/LTCG' ]
cmdline += [ self.Machine ]
if self.NoLogo:
cmdline += [ '/NOLOGO' ]
# Subsystem is implied
if self.Subsystem != None:
cmdline += [ self.Subsystem ]
if self.WarningsAsErrors:
cmdline += [ '/WX' ]
for lib in self.LibPaths:
cmdline += [ '/LIBPATH:' + lib ]
for lib in self.NoDefaultLib:
cmdline += [ "/NODEFAULTLIB:" + lib ]
if self.NoDefaultLibs:
cmdline += [ "/NODEFAULTLIB" ]
self.CommandLine = cmdline
#
# A node for compiling a single C/C++ file to a .obj file
#
class VCCompileNode (BuildSystem.Node):
def __init__(self, path, override_cpp_opts):
super().__init__()
self.Path = path
self.OverrideCPPOptions = override_cpp_opts
def Build(self, env):
output_files = self.GetOutputFiles(env)
# Construct the command-line
cpp_opts = self.GetCPPOptions(env)
cmdline = [ "cl.exe" ] + cpp_opts.CommandLine
if len(output_files) > 1:
cmdline += [ "/Fd" + output_files[1] ]
cmdline += [ "/Fo" + output_files[0], self.GetInputFile(env) ]
Utils.ShowCmdLine(env, cmdline)
# Create the include scanner and launch the compiler
scanner = Utils.LineScanner(env)
scanner.AddLineParser("Includes", "Note: including file:", None, lambda line, length: line[length:].lstrip())
process = Process.OpenPiped(cmdline, env.EnvironmentVariables)
Process.PollPipeOutput(process, scanner)
# Record the implicit dependencies for this file
data = env.GetFileMetadata(self.GetInputFile(env))
data.SetImplicitDeps(env, scanner.Includes)
return process.returncode == 0
def SetCPPOptions(self, override_cpp_opts):
self.OverrideCPPOptions = override_cpp_opts
def GetCPPOptions(self, env):
return env.CurrentConfig.CPPOptions if self.OverrideCPPOptions is None else self.OverrideCPPOptions
def GetInputFile(self, env):
return self.Path
def GetOutputFiles(self, env):
# Get the relocated path minus extension
path = os.path.splitext(self.Path)[0]
path = os.path.join(env.CurrentConfig.IntermediatePath, path)
files = [ path + ".obj" ]
cpp_opts = self.GetCPPOptions(env)
if cpp_opts.DebuggingInfo != None:
# The best we can do here is ensure that the obj\src directory for
# a group of files shares the same pdb/idb
path = os.path.dirname(path)
files += [ os.path.join(path, "vc100.pdb") ]
if cpp_opts.DebuggingInfo == VCDebuggingInfo.PDBEDITANDCONTINUE:
files += [ os.path.join(path, "vc100.idb") ]
return files
def GetTempOutputFiles(self, env):
return [ self.GetOutputFiles(env)[0] ]
#
# A node for linking an EXE or DLL given an output path and list of dependencies
#
class VCLinkNode (BuildSystem.Node):
def __init__(self, path, obj_files, lib_files, weak_lib_files):
super().__init__()
self.Path = path
# Object files are explicit dependencies, lib files are implicit, scanned during output
self.Dependencies = obj_files
self.LibFiles = lib_files
self.WeakLibFiles = weak_lib_files
def Build(self, env):
output_files = self.GetOutputFiles(env)
Utils.Print(env, "Linking: " + output_files[0] + "\n")
# Construct the command-line
cmdline = [ "link.exe" ] + env.CurrentConfig.LinkOptions.CommandLine
cmdline += [ '/OUT:' + output_files[0] ]
if env.CurrentConfig.LinkOptions.MapFile:
cmdline += [ "/MAP:" + output_files[1] ]
cmdline += [ dep.GetOutputFiles(env)[0] for dep in self.Dependencies ]
cmdline += [ dep.GetOutputFiles(env)[0] for dep in self.LibFiles ]
cmdline += [ dep.GetOutputFiles(env)[0] for dep in self.WeakLibFiles ]
Utils.ShowCmdLine(env, cmdline)
#
# When library files get added as dependencies to this link node they get added without a path.
# This requires the linker to check its list of search paths for the location of any input
# library files.
#
# The build system however, needs full paths to evaluate dependencies on each build. Rather than
# trying to search the library paths in the build system (and potentially getting them wrong/different
# to the linker), the linker is asked to output the full path of all libraries it links with. These
# then get added as implicit dependencies.
#
# Create the lib scanner and run the link process
#
scanner = Utils.LineScanner(env)
scanner.AddLineParser("Includes", "Searching ", [ "Searching libraries", "Finished searching libraries" ], lambda line, length: line[length:-1])
process = Process.OpenPiped(cmdline, env.EnvironmentVariables)
Process.PollPipeOutput(process, scanner)
#
# Weak library files are those that should be provided as input to the link step but not used
# as dependencies to check if the link step needs to be rebuilt. Search for those in the scanner
# output and exclude them from the implicit dependency list.
#
includes = [ ]
for include in scanner.Includes:
ignore_dep = False
for lib in self.WeakLibFiles:
lib_name = lib.GetInputFile(env)
if lib_name in include:
ignore_dep = True
break
if not ignore_dep:
includes.append(include)
# Record the implicit dependencies for this file
data = env.GetFileMetadata(self.GetInputFile(env))
data.SetImplicitDeps(env, includes)
return process.returncode == 0
def GetInputFile(self, env):
path = os.path.join(env.CurrentConfig.OutputPath, self.Path)
return path
def GetPrimaryOutput(self, config):
# Get the relocated path minus extension
path = os.path.splitext(self.Path)[0]
path = os.path.join(config.OutputPath, path)
ext = ".exe"
if config.LinkOptions.DLL:
ext = ".dll"
return (path, ext)
def GetOutputFiles(self, env):
# Add the EXE/DLL
(path, ext) = self.GetPrimaryOutput(env.CurrentConfig)
files = [ path + ext ]
# Make sure the .map file is in the intermediate directory, of the same name as the output
if env.CurrentConfig.LinkOptions.MapFile:
map_file = os.path.splitext(self.Path)[0] + ".map"
map_file = os.path.join(env.CurrentConfig.IntermediatePath, map_file)
files += [ map_file ]
if env.CurrentConfig.LinkOptions.Debug:
files += [ path + ".pdb" ]
if env.CurrentConfig.LinkOptions.Incremental:
files += [ path + ".ilk" ]
return files
def __repr__(self):
return "LINK: " + self.Path
#
# A node for compositing a set of dependencies into a library file
#
class VCLibNode (BuildSystem.Node):
def __init__(self, path, dependencies, lib_files):
super().__init__()
self.Path = path
# Object files are explicit dependencies, lib files are implicit, scanned during output
self.Dependencies = dependencies
self.LibFiles = lib_files
def Build(self, env):
output_files = self.GetOutputFiles(env)
# Construct the command-line
cmdline = [ "lib.exe" ] + env.CurrentConfig.LibOptions.CommandLine
cmdline += [ '/OUT:' + output_files[0] ]
cmdline += [ dep.GetOutputFiles(env)[0] for dep in self.Dependencies ]
cmdline += [ dep.GetOutputFiles(env)[0] for dep in self.LibFiles ]
Utils.Print(env, "Librarian: " + output_files[0])
Utils.ShowCmdLine(env, cmdline)
# Run the librarian process
process = Process.OpenPiped(cmdline, env.EnvironmentVariables)
output = Process.WaitForPipeOutput(process)
if not env.NoToolOutput:
Utils.Print(env, output)
return process.returncode == 0
def GetInputFile(self, env):
path = os.path.join(env.CurrentConfig.OutputPath, self.Path)
return path
def GetPrimaryOutput(self, config):
# Get the relocated path minus extension
path = os.path.splitext(self.Path)[0]
path = os.path.join(config.OutputPath, path)
return (path, ".lib")
def GetOutputFiles(self, env):
(path, ext) = self.GetPrimaryOutput(env.CurrentConfig)
return [ path + ext ]
def __RunTests():
options = VCCompileOptions(VCBaseConfig.DEBUG)
print(options.BuildCommandLine())
options = VCCompileOptions(VCBaseConfig.RELEASE)
print(options.BuildCommandLine())
options = VCLinkOptions(VCBaseConfig.DEBUG)
print (options.BuildCommandLine())
options = VCLinkOptions(VCBaseConfig.RELEASE)
print (options.BuildCommandLine())
options = VCLibOptions(VCBaseConfig.DEBUG)
print (options.BuildCommandLine())
options = VCLibOptions(VCBaseConfig.RELEASE)
print (options.BuildCommandLine())
if __name__ == "__main__":
__RunTests()
|
{
"content_hash": "501305fd9a38681d80361f20fc2ea60a",
"timestamp": "",
"source": "github",
"line_count": 962,
"max_line_length": 176,
"avg_line_length": 35.88565488565489,
"alnum_prop": 0.5971554371125659,
"repo_name": "Celtoys/PiB",
"id": "99c7798f7ff76f9c79e642eae516e9632dfbf071",
"size": "34522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/MSVCPlatform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "283"
},
{
"name": "C",
"bytes": "111"
},
{
"name": "C++",
"bytes": "2371"
},
{
"name": "Objective-C",
"bytes": "16"
},
{
"name": "Python",
"bytes": "127358"
}
],
"symlink_target": ""
}
|
from eepp4.articleparser import ArticleParser
# exceptions
from eepp4.articleparser import ArticleParserError
from eepp4.hexviewparser import HexviewParserError
|
{
"content_hash": "db734f8a3b95b1f47439b5889b514c05",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 50,
"avg_line_length": 32.4,
"alnum_prop": 0.8827160493827161,
"repo_name": "eepp/eepp4",
"id": "55186f81c6e0976ff41ebb860e16c4c40140786e",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eepp4/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40331"
}
],
"symlink_target": ""
}
|
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import tensorflow as tf
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(
tf.reshape(
tf.constant(boston.data), [-1, 13]), tf.float32)
target = tf.cast(
tf.reshape(
tf.constant(boston.target), [-1, 1]), tf.float32)
return features, target
def linear_model_fn(features, target, unused_mode):
return tf.contrib.learn.models.linear_regression_zero_init(features, target)
class EstimatorTest(tf.test.TestCase):
def testTrain(self):
output_dir = tempfile.mkdtemp()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
classification=False, model_dir=output_dir)
est.train(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
def testPredict(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
classification=False)
boston = tf.contrib.learn.datasets.load_boston()
est.train(input_fn=boston_input_fn, steps=1)
output = est.predict(boston.data)
self.assertEqual(output['predictions'].shape[0], boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {'other': tf.constant([0, 0, 0])}, tf.constant([0, 0, 0])
output_dir = tempfile.mkdtemp()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
classification=False, model_dir=output_dir)
est.train(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.train(input_fn=other_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "9341983751c3f0b23635b19c7caeea19",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 80,
"avg_line_length": 32.44642857142857,
"alnum_prop": 0.6488717666483214,
"repo_name": "ibab/tensorflow",
"id": "ac8bb5928fb382c0dfe8f22ba759d93fbde43d84",
"size": "2420",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/estimator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156010"
},
{
"name": "C++",
"bytes": "9133299"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "773228"
},
{
"name": "Java",
"bytes": "39181"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "Protocol Buffer",
"bytes": "111555"
},
{
"name": "Python",
"bytes": "6393079"
},
{
"name": "Shell",
"bytes": "164997"
},
{
"name": "TypeScript",
"bytes": "409165"
}
],
"symlink_target": ""
}
|
"""Itertools utils."""
from __future__ import annotations
import collections
import itertools
from typing import Any, Callable, Iterable, Iterator, TypeVar
# from typing_extensions import Unpack, TypeVarTuple # pytype: disable=not-supported-yet # pylint: disable=g-multiple-import
# TODO(pytype): Once supported, should replace
Unpack = Any
TypeVarTuple = Any
_KeyT = TypeVar('_KeyT')
_ValuesT = Any # TypeVarTuple('_ValuesT')
_K = TypeVar('_K')
_Tin = TypeVar('_Tin')
_Tout = TypeVar('_Tout')
def _identity(x: _Tin) -> _Tin:
"""Pass through function."""
return x
def groupby(
iterable: Iterable[_Tin],
*,
key: Callable[[_Tin], _K],
value: Callable[[_Tin], _Tout] = _identity,
) -> dict[_K, list[_Tout]]:
"""Similar to `itertools.groupby` but return result as a `dict()`.
Example:
```python
out = epy.groupby(
['555', '4', '11', '11', '333'],
key=len,
value=int,
)
# Order is consistent with above
assert out == {
3: [555, 333],
1: [4],
2: [11, 11],
}
```
Other difference with `itertools.groupby`:
* Iterable do not need to be sorted. Order of the original iterator is
preserved in the group.
* Transformation can be applied to the value too
Args:
iterable: The iterable to group
key: Mapping applied to group the values (should return a hashable)
value: Mapping applied to the values
Returns:
The dict
"""
groups = collections.defaultdict(list)
for v in iterable:
groups[key(v)].append(value(v))
return dict(groups)
def zip_dict( # pytype: disable=invalid-annotation
*dicts: Unpack[dict[_KeyT, _ValuesT]],
) -> Iterator[_KeyT, tuple[Unpack[_ValuesT]]]:
"""Iterate over items of dictionaries grouped by their keys.
Example:
```python
d0 = {'a': 1, 'b': 2}
d1 = {'a': 10, 'b': 20}
d2 = {'a': 100, 'b': 200}
list(epy.zip_dict(d0, d1, d2)) == [
('a', (1, 10, 100)),
('b', (2, 20, 200)),
]
```
Args:
*dicts: The dict to iterate over. Should all have the same keys
Yields:
The iterator of `(key, zip(*values))`
Raises:
KeyError: If dicts does not contain the same keys.
"""
# Set does not keep order like dict, so only use set to compare keys
all_keys = set(itertools.chain(*dicts))
d0 = dicts[0]
if len(all_keys) != len(d0):
raise KeyError(f'Missing keys: {all_keys ^ set(d0)}')
for key in d0: # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts)
|
{
"content_hash": "557c34e9b298dcc58efafc3be66aaa64",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 126,
"avg_line_length": 23.28440366972477,
"alnum_prop": 0.6249014972419228,
"repo_name": "google/etils",
"id": "fcab07fe8dc1bc866d4d10283500a66580b94365",
"size": "3121",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "etils/epy/itertools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "304785"
}
],
"symlink_target": ""
}
|
"""
Telegram platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.telegram/
"""
import logging
import voluptuous as vol
from homeassistant.const import ATTR_LOCATION
from homeassistant.components.notify import (
ATTR_DATA, ATTR_MESSAGE, ATTR_TARGET, ATTR_TITLE, PLATFORM_SCHEMA,
BaseNotificationService)
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'telegram_bot'
DEPENDENCIES = [DOMAIN]
ATTR_KEYBOARD = 'keyboard'
ATTR_INLINE_KEYBOARD = 'inline_keyboard'
ATTR_PHOTO = 'photo'
ATTR_VIDEO = 'video'
ATTR_DOCUMENT = 'document'
CONF_CHAT_ID = 'chat_id'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_CHAT_ID): vol.Coerce(int),
})
def get_service(hass, config, discovery_info=None):
"""Get the Telegram notification service."""
chat_id = config.get(CONF_CHAT_ID)
return TelegramNotificationService(hass, chat_id)
class TelegramNotificationService(BaseNotificationService):
"""Implement the notification service for Telegram."""
def __init__(self, hass, chat_id):
"""Initialize the service."""
self._chat_id = chat_id
self.hass = hass
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
service_data = {ATTR_TARGET: kwargs.get(ATTR_TARGET, self._chat_id)}
if ATTR_TITLE in kwargs:
service_data.update({ATTR_TITLE: kwargs.get(ATTR_TITLE)})
if message:
service_data.update({ATTR_MESSAGE: message})
data = kwargs.get(ATTR_DATA)
# Get keyboard info
if data is not None and ATTR_KEYBOARD in data:
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
service_data.update(keyboard=keys)
elif data is not None and ATTR_INLINE_KEYBOARD in data:
keys = data.get(ATTR_INLINE_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
service_data.update(inline_keyboard=keys)
# Send a photo, video, document, or location
if data is not None and ATTR_PHOTO in data:
photos = data.get(ATTR_PHOTO, None)
photos = photos if isinstance(photos, list) else [photos]
for photo_data in photos:
service_data.update(photo_data)
self.hass.services.call(
DOMAIN, 'send_photo', service_data=service_data)
return
if data is not None and ATTR_VIDEO in data:
videos = data.get(ATTR_VIDEO, None)
videos = videos if isinstance(videos, list) else [videos]
for video_data in videos:
service_data.update(video_data)
self.hass.services.call(
DOMAIN, 'send_video', service_data=service_data)
return
if data is not None and ATTR_LOCATION in data:
service_data.update(data.get(ATTR_LOCATION))
return self.hass.services.call(
DOMAIN, 'send_location', service_data=service_data)
if data is not None and ATTR_DOCUMENT in data:
service_data.update(data.get(ATTR_DOCUMENT))
return self.hass.services.call(
DOMAIN, 'send_document', service_data=service_data)
# Send message
_LOGGER.debug('TELEGRAM NOTIFIER calling %s.send_message with %s',
DOMAIN, service_data)
return self.hass.services.call(
DOMAIN, 'send_message', service_data=service_data)
|
{
"content_hash": "bb6837b4568288cfcf824aa67e474df8",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 76,
"avg_line_length": 36.55102040816327,
"alnum_prop": 0.6328866554997208,
"repo_name": "jamespcole/home-assistant",
"id": "428c7e093d236536c3c550cc467f95daa0fd14f8",
"size": "3582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/telegram/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
"""
Factory tests.
"""
from flask import Flask
from hamcrest import assert_that, instance_of, is_
from microcosm.api import create_object_graph
def test_configure_flask():
"""
Should create the `Flask` application.
"""
graph = create_object_graph(name="example", testing=True)
assert_that(graph.app, is_(instance_of(Flask)))
|
{
"content_hash": "ba002d4b18457e32c262f9190f70f858",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 61,
"avg_line_length": 21.8125,
"alnum_prop": 0.6934097421203438,
"repo_name": "globality-corp/microcosm-flask",
"id": "b135b7107ba17d752412ffb1764f44461bf1f931",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "microcosm_flask/tests/test_factories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3203"
},
{
"name": "Python",
"bytes": "359772"
},
{
"name": "Shell",
"bytes": "1574"
}
],
"symlink_target": ""
}
|
"""
Collects the following from beanstalkd:
- Server statistics via the 'stats' command
- Per tube statistics via the 'stats-tube' command
#### Dependencies
* beanstalkc
"""
import re
import diamond.collector
try:
import beanstalkc
beanstalkc # workaround for pyflakes issue #13
except ImportError:
beanstalkc = None
class BeanstalkdCollector(diamond.collector.Collector):
COUNTERS_REGEX = re.compile(
r'^(cmd-.*|job-timeouts|total-jobs|total-connections)$')
def get_default_config_help(self):
config_help = super(BeanstalkdCollector,
self).get_default_config_help()
config_help.update({
'host': 'Hostname',
'port': 'Port',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(BeanstalkdCollector, self).get_default_config()
config.update({
'path': 'beanstalkd',
'host': 'localhost',
'port': 11300,
})
return config
def _get_stats(self):
stats = {}
try:
connection = beanstalkc.Connection(self.config['host'],
int(self.config['port']))
except beanstalkc.BeanstalkcException, e:
self.log.error("Couldn't connect to beanstalkd: %s", e)
return {}
stats['instance'] = connection.stats()
stats['tubes'] = []
for tube in connection.tubes():
tube_stats = connection.stats_tube(tube)
stats['tubes'].append(tube_stats)
return stats
def collect(self):
if beanstalkc is None:
self.log.error('Unable to import beanstalkc')
return {}
info = self._get_stats()
for stat, value in info['instance'].items():
if stat != 'version':
self.publish(stat, value,
metric_type=self.get_metric_type(stat))
for tube_stats in info['tubes']:
tube = tube_stats['name']
for stat, value in tube_stats.items():
if stat != 'name':
self.publish('tubes.%s.%s' % (tube, stat), value,
metric_type=self.get_metric_type(stat))
def get_metric_type(self, stat):
if self.COUNTERS_REGEX.match(stat):
return 'COUNTER'
return 'GAUGE'
|
{
"content_hash": "9269312165e36d0f7866201064d11f91",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 72,
"avg_line_length": 28.79310344827586,
"alnum_prop": 0.5425149700598803,
"repo_name": "metamx/Diamond",
"id": "901e078e55a6587f7064e8084edf7530bc3d8221",
"size": "2521",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/collectors/beanstalkd/beanstalkd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "D",
"bytes": "1074"
},
{
"name": "Python",
"bytes": "1074097"
},
{
"name": "Ruby",
"bytes": "230"
},
{
"name": "Shell",
"bytes": "4650"
}
],
"symlink_target": ""
}
|
from thrift.Thrift import *
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TCell(object):
"""
TCell - Used to transport a cell value (byte[]) and the timestamp it was
stored with together as a result for get and getRow methods. This promotes
the timestamp of a cell to a first-class value, making it easy to take
note of temporal data. Cell is used all the way from HStore up to HTable.
Attributes:
- value
- timestamp
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'value', None, None, ), # 1
(2, TType.I64, 'timestamp', None, None, ), # 2
)
def __init__(self, value=None, timestamp=None,):
self.value = value
self.timestamp = timestamp
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TCell')
if self.value != None:
oprot.writeFieldBegin('value', TType.STRING, 1)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.timestamp != None:
oprot.writeFieldBegin('timestamp', TType.I64, 2)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnDescriptor(object):
"""
An HColumnDescriptor contains information about a column family
such as the number of versions, compression settings, etc. It is
used as input when creating a table or adding a column.
Attributes:
- name
- maxVersions
- compression
- inMemory
- bloomFilterType
- bloomFilterVectorSize
- bloomFilterNbHashes
- blockCacheEnabled
- timeToLive
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.I32, 'maxVersions', None, 3, ), # 2
(3, TType.STRING, 'compression', None, "NONE", ), # 3
(4, TType.BOOL, 'inMemory', None, False, ), # 4
(5, TType.STRING, 'bloomFilterType', None, "NONE", ), # 5
(6, TType.I32, 'bloomFilterVectorSize', None, 0, ), # 6
(7, TType.I32, 'bloomFilterNbHashes', None, 0, ), # 7
(8, TType.BOOL, 'blockCacheEnabled', None, False, ), # 8
(9, TType.I32, 'timeToLive', None, -1, ), # 9
)
def __init__(self, name=None, maxVersions=thrift_spec[2][4], compression=thrift_spec[3][4], inMemory=thrift_spec[4][4], bloomFilterType=thrift_spec[5][4], bloomFilterVectorSize=thrift_spec[6][4], bloomFilterNbHashes=thrift_spec[7][4], blockCacheEnabled=thrift_spec[8][4], timeToLive=thrift_spec[9][4],):
self.name = name
self.maxVersions = maxVersions
self.compression = compression
self.inMemory = inMemory
self.bloomFilterType = bloomFilterType
self.bloomFilterVectorSize = bloomFilterVectorSize
self.bloomFilterNbHashes = bloomFilterNbHashes
self.blockCacheEnabled = blockCacheEnabled
self.timeToLive = timeToLive
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.maxVersions = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.compression = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.inMemory = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.bloomFilterType = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.bloomFilterVectorSize = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.bloomFilterNbHashes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.blockCacheEnabled = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.timeToLive = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ColumnDescriptor')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.maxVersions != None:
oprot.writeFieldBegin('maxVersions', TType.I32, 2)
oprot.writeI32(self.maxVersions)
oprot.writeFieldEnd()
if self.compression != None:
oprot.writeFieldBegin('compression', TType.STRING, 3)
oprot.writeString(self.compression)
oprot.writeFieldEnd()
if self.inMemory != None:
oprot.writeFieldBegin('inMemory', TType.BOOL, 4)
oprot.writeBool(self.inMemory)
oprot.writeFieldEnd()
if self.bloomFilterType != None:
oprot.writeFieldBegin('bloomFilterType', TType.STRING, 5)
oprot.writeString(self.bloomFilterType)
oprot.writeFieldEnd()
if self.bloomFilterVectorSize != None:
oprot.writeFieldBegin('bloomFilterVectorSize', TType.I32, 6)
oprot.writeI32(self.bloomFilterVectorSize)
oprot.writeFieldEnd()
if self.bloomFilterNbHashes != None:
oprot.writeFieldBegin('bloomFilterNbHashes', TType.I32, 7)
oprot.writeI32(self.bloomFilterNbHashes)
oprot.writeFieldEnd()
if self.blockCacheEnabled != None:
oprot.writeFieldBegin('blockCacheEnabled', TType.BOOL, 8)
oprot.writeBool(self.blockCacheEnabled)
oprot.writeFieldEnd()
if self.timeToLive != None:
oprot.writeFieldBegin('timeToLive', TType.I32, 9)
oprot.writeI32(self.timeToLive)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRegionInfo(object):
"""
A TRegionInfo contains information about an HTable region.
Attributes:
- startKey
- endKey
- id
- name
- version
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'startKey', None, None, ), # 1
(2, TType.STRING, 'endKey', None, None, ), # 2
(3, TType.I64, 'id', None, None, ), # 3
(4, TType.STRING, 'name', None, None, ), # 4
(5, TType.BYTE, 'version', None, None, ), # 5
)
def __init__(self, startKey=None, endKey=None, id=None, name=None, version=None,):
self.startKey = startKey
self.endKey = endKey
self.id = id
self.name = name
self.version = version
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.startKey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.endKey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.id = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BYTE:
self.version = iprot.readByte();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRegionInfo')
if self.startKey != None:
oprot.writeFieldBegin('startKey', TType.STRING, 1)
oprot.writeString(self.startKey)
oprot.writeFieldEnd()
if self.endKey != None:
oprot.writeFieldBegin('endKey', TType.STRING, 2)
oprot.writeString(self.endKey)
oprot.writeFieldEnd()
if self.id != None:
oprot.writeFieldBegin('id', TType.I64, 3)
oprot.writeI64(self.id)
oprot.writeFieldEnd()
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 4)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.version != None:
oprot.writeFieldBegin('version', TType.BYTE, 5)
oprot.writeByte(self.version)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Mutation(object):
"""
A Mutation object is used to either update or delete a column-value.
Attributes:
- isDelete
- column
- value
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'isDelete', None, False, ), # 1
(2, TType.STRING, 'column', None, None, ), # 2
(3, TType.STRING, 'value', None, None, ), # 3
)
def __init__(self, isDelete=thrift_spec[1][4], column=None, value=None,):
self.isDelete = isDelete
self.column = column
self.value = value
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.isDelete = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Mutation')
if self.isDelete != None:
oprot.writeFieldBegin('isDelete', TType.BOOL, 1)
oprot.writeBool(self.isDelete)
oprot.writeFieldEnd()
if self.column != None:
oprot.writeFieldBegin('column', TType.STRING, 2)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.value != None:
oprot.writeFieldBegin('value', TType.STRING, 3)
oprot.writeString(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BatchMutation(object):
"""
A BatchMutation object is used to apply a number of Mutations to a single row.
Attributes:
- row
- mutations
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
(2, TType.LIST, 'mutations', (TType.STRUCT,(Mutation, Mutation.thrift_spec)), None, ), # 2
)
def __init__(self, row=None, mutations=None,):
self.row = row
self.mutations = mutations
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.mutations = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = Mutation()
_elem5.read(iprot)
self.mutations.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BatchMutation')
if self.row != None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.mutations != None:
oprot.writeFieldBegin('mutations', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.mutations))
for iter6 in self.mutations:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRowResult(object):
"""
Holds row name and then a map of columns to cells.
Attributes:
- row
- columns
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
(2, TType.MAP, 'columns', (TType.STRING,None,TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 2
)
def __init__(self, row=None, columns=None,):
self.row = row
self.columns = columns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.columns = {}
(_ktype8, _vtype9, _size7 ) = iprot.readMapBegin()
for _i11 in xrange(_size7):
_key12 = iprot.readString();
_val13 = TCell()
_val13.read(iprot)
self.columns[_key12] = _val13
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRowResult')
if self.row != None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.columns != None:
oprot.writeFieldBegin('columns', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.columns))
for kiter14,viter15 in self.columns.items():
oprot.writeString(kiter14)
viter15.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IOError(Exception):
"""
An IOError exception signals that an error occurred communicating
to the Hbase master or an Hbase region server. Also used to return
more general Hbase error conditions.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IOError')
if self.message != None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IllegalArgument(Exception):
"""
An IllegalArgument exception indicates an illegal or invalid
argument was passed into a procedure.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IllegalArgument')
if self.message != None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AlreadyExists(Exception):
"""
An AlreadyExists exceptions signals that a table with the specified
name already exists
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AlreadyExists')
if self.message != None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
{
"content_hash": "b7ace4f8e0b7a2160674c2502b40f0ef",
"timestamp": "",
"source": "github",
"line_count": 774,
"max_line_length": 305,
"avg_line_length": 32.514211886304906,
"alnum_prop": 0.6224270841611699,
"repo_name": "zohmg/zohmg",
"id": "a173b99f129c6f9411628142b495c5ad3c9f92e9",
"size": "25266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zohmg/hbase_thrift/ttypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "51788"
},
{
"name": "JavaScript",
"bytes": "13239"
},
{
"name": "Python",
"bytes": "363980"
},
{
"name": "Shell",
"bytes": "18560"
}
],
"symlink_target": ""
}
|
"""Securely hash and check passwords using PBKDF2.
Use random salts to protect againt rainbow tables, many iterations against
brute-force, and constant-time comparaison againt timing attacks.
Keep parameters to the algorithm together with the hash so that we can
change the parameters and keep older hashes working.
See more details at http://exyr.org/2011/hashing-passwords/
Author: Simon Sapin
License: BSD
"""
import hashlib
from os import urandom
from base64 import b64encode, b64decode
from itertools import izip
# From https://github.com/mitsuhiko/python-pbkdf2
from .pbkdf2 import pbkdf2_bin
# Parameters to PBKDF2. Only affect new passwords.
SALT_LENGTH = 12
KEY_LENGTH = 24
HASH_FUNCTION = 'sha256' # Must be in hashlib.
# Linear to the hashing time. Adjust to be high but take a reasonable
# amount of time on your server. Measure with:
# python -m timeit -s 'import passwords as p' 'p.make_hash("something")'
COST_FACTOR = 10000
def make_hash(password):
"""Generate a random salt and return a new hash for the password."""
if isinstance(password, unicode):
password = password.encode('utf-8')
salt = b64encode(urandom(SALT_LENGTH))
return 'PBKDF2${}${}${}${}'.format(
HASH_FUNCTION,
COST_FACTOR,
salt,
b64encode(pbkdf2_bin(password, salt, COST_FACTOR, KEY_LENGTH,
getattr(hashlib, HASH_FUNCTION))))
def check_hash(password, hash_):
"""Check a password against an existing hash."""
if isinstance(password, unicode):
password = password.encode('utf-8')
algorithm, hash_function, cost_factor, salt, hash_a = hash_.split('$')
assert algorithm == 'PBKDF2'
hash_a = b64decode(hash_a)
hash_b = pbkdf2_bin(password, salt, int(cost_factor), len(hash_a),
getattr(hashlib, hash_function))
assert len(hash_a) == len(hash_b) # we requested this from pbkdf2_bin()
# Same as "return hash_a == hash_b" but takes a constant time.
# See http://carlos.bueno.org/2011/10/timing.html
diff = 0
for char_a, char_b in izip(hash_a, hash_b):
diff |= ord(char_a) ^ ord(char_b)
return diff == 0
|
{
"content_hash": "61ef834aa2979cd06cadac977231138b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 76,
"avg_line_length": 34.91935483870968,
"alnum_prop": 0.6817551963048499,
"repo_name": "taeram/flask-boilerplate",
"id": "c76dfc0814e4cc0e5a372017eb447edf937817c8",
"size": "2180",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "app/library/hash_passwords.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2378"
},
{
"name": "HTML",
"bytes": "11967"
},
{
"name": "JavaScript",
"bytes": "4456"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "22334"
}
],
"symlink_target": ""
}
|
__author__ = "Peter Ogden"
__copyright__ = "Copyright 2018, Xilinx"
__email__ = "pynq_support@xilinx.com"
import asyncio
import numpy as np
from pynq import DefaultIP, allocate, UnsupportedConfiguration
class _FrameCache:
def __init__(self, mode, memory, capacity=5, cacheable=0):
self._cache = []
self._mode = mode
self._capacity = capacity
self._cacheable = cacheable
self._memory = memory
def getframe(self):
"""Retrieve a frame from the cache or create a new frame if the
cache is empty. The freebuffer method of the returned array is
overriden to return the object to the cache rather than freeing
the object.
"""
if self._cache:
frame = allocate(
shape=self._mode.shape, dtype='u1', cacheable=self._cacheable,
pointer=self._cache.pop(), cache=self, target=self._memory)
else:
frame = allocate(
shape=self._mode.shape, dtype=np.uint8,
cacheable=self._cacheable, cache=self, target=self._memory)
return frame
def return_pointer(self, pointer):
if len(self._cache) < self._capacity:
self._cache.append(pointer)
def clear(self):
self._cache.clear()
class AxiVDMA(DefaultIP):
"""Driver class for the Xilinx VideoDMA IP core
The driver is split into input and output channels are exposed using the
readchannel and writechannel attributes. Each channel has start and
stop methods to control the data transfer. All channels MUST be stopped
before reprogramming the bitstream or inconsistent behaviour may result.
The DMA uses a single ownership model of frames in that frames are either
owned by the DMA or the user code but not both. S2MMChannel.readframe
and MM2SChannel.newframe both return a frame to the user. It is the
user's responsibility to either free the frame using the freebuffer()
method or to hand ownership back to the DMA using MM2SChannel.writeframe.
Once ownership has been returned the user should not access the contents
of the frame as the underlying memory may be deleted without warning.
Attributes
----------
readchannel : AxiVDMA.S2MMChannel
Video input DMA channel
writechannel : AxiVDMA.MM2SChannel
Video output DMA channel
"""
class _FrameList:
"""Internal helper class for handling the list of frames associated
with a DMA channel. Assumes ownership of all frames it contains
unless explicitly removed with takeownership
"""
def __init__(self, parent, offset, count):
self._frames = [None] * count
self._mmio = parent._mmio
self._offset = offset
self._slaves = set()
self.count = count
self.reload = parent.reload
def __getitem__(self, index):
frame = self._frames[index]
return frame
def takeownership(self, index):
self._frames[index] = None
def __len__(self):
return self.count
def __setitem__(self, index, frame):
self._frames[index] = frame
if frame is not None:
self._mmio.write(self._offset + 4 * index,
frame.physical_address)
else:
self._mmio.write(self._offset + 4 * index, 0)
self.reload()
for s in self._slaves:
s[index] = frame
s.takeownership(index)
def addslave(self, slave):
self._slaves.add(slave)
for i in range(len(self._frames)):
slave[i] = self[i]
slave.takeownership(i)
slave.reload()
def removeslave(self, slave):
self._slaves.remove(slave)
class S2MMChannel:
"""Read channel of the Video DMA
Brings frames from the video input into memory. Hands ownership of
the read frames to the user code.
Attributes
----------
mode : VideoMode
The video mode of the DMA channel
cacheable_frames : bool
Whether frames should be stored in cacheable or
non-cacheable memory
"""
def __init__(self, parent, interrupt, memory):
self._mmio = parent.mmio
self._frames = AxiVDMA._FrameList(self, 0xAC, parent.framecount)
self._interrupt = interrupt
self._sinkchannel = None
self._mode = None
self.memory = memory
self.cacheable_frames = True
def _readframe_internal(self):
if self._mmio.read(0x34) & 0x8980:
# Some spurious errors can occur at the start of transfers
# let's ignore them for now
self._mmio.write(0x34, 0x8980)
self.irqframecount = 1
nextframe = self._cache.getframe()
previous_frame = (self.activeframe + 2) % len(self._frames)
captured = self._frames[previous_frame]
self._frames.takeownership(previous_frame)
self._frames[previous_frame] = nextframe
post_frame = (self.activeframe + 2) % len(self._frames)
captured.invalidate()
return captured
def readframe(self):
"""Read a frame from the channel and return to the user
This function may block until a complete frame has been read. A
single frame buffer is kept so the first frame read after a long
pause in reading may return a stale frame. To ensure an up-to-date
frame when starting processing video read an additional time
before starting the processing loop.
Returns
-------
numpy.ndarray of the video frame
"""
if not self.running:
raise RuntimeError('DMA channel not started')
while self._mmio.read(0x34) & 0x1000 == 0:
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.ensure_future(self._interrupt.wait()))
pass
self._mmio.write(0x34, 0x1000)
return self._readframe_internal()
async def readframe_async(self):
"""Read a frame from the channel, yielding instead of blocking
if no data is available. See readframe for more details
"""
if not self.running:
raise RuntimeError('DMA channel not started')
while self._mmio.read(0x34) & 0x1000 == 0:
await self._interrupt.wait()
self._mmio.write(0x34, 0x1000)
return self._readframe_internal()
@property
def activeframe(self):
"""The frame index currently being processed by the DMA
This process requires clearing any error bits in the DMA channel
"""
self._mmio.write(0x34, 0x4090)
return (self._mmio.read(0x28) >> 24) & 0x1F
@property
def desiredframe(self):
"""The next frame index to the processed by the DMA
"""
return (self._mmio.read(0x28) >> 8) & 0x1F
@desiredframe.setter
def desiredframe(self, frame_number):
if frame_number < 0 or frame_number >= len(self._frames):
raise ValueError("Invalid frame index")
register_value = self._mmio.read(0x28)
mask = ~(0x1F << 8)
register_value &= mask
register_value |= (frame_number << 8)
self._mmio.write(0x28, register_value)
@property
def mode(self):
"""The video mode of the DMA. Must be set prior to starting.
Changing this while the DMA is running will result in the DMA
being stopped.
"""
return self._mode
@mode.setter
def mode(self, value):
if self.running:
self.stop()
self._mode = value
@property
def running(self):
"""Is the DMA channel running
"""
return (self._mmio.read(0x34) & 0x1) == 0
@property
def parked(self):
"""Is the channel parked or running in circular buffer mode
"""
return self._mmio.read(0x30) & 0x2 == 0
@parked.setter
def parked(self, value):
register = self._mmio.read(0x30)
if value:
register &= ~0x2
else:
register |= 0x2
self._mmio.write(0x30, register)
@property
def irqframecount(self):
register = self._mmio.read(0x30)
return (register >> 16) & 0xFF
@irqframecount.setter
def irqframecount(self, val):
register = self._mmio.read(0x30)
newregister = (register & 0xFF00FFFF) | (val << 16)
if register != newregister:
self._mmio.write(0x30, newregister)
def start(self):
"""Start the DMA. The mode must be set prior to this being called
"""
if not self._mode:
raise RuntimeError("Video mode not set, channel not started")
self.desiredframe = 0
self._cache = _FrameCache(
self._mode, self.memory, cacheable=self.cacheable_frames)
for i in range(len(self._frames)):
self._frames[i] = self._cache.getframe()
self._writemode()
self.reload()
self._mmio.write(0x30, 0x00011083) # Start DMA
self.irqframecount = 4 # Ensure all frames are written to
self._mmio.write(0x34, 0x1000) # Clear any interrupts
while not self.running:
pass
self.reload()
self.desiredframe = 1
def stop(self):
"""Stops the DMA, clears the frame cache and unhooks any tied
outputs
"""
self.tie(None)
self._mmio.write(0x30, 0x00011080)
while self.running:
pass
for i in range(len(self._frames)):
self._frames[i] = None
if hasattr(self, '_cache'):
self._cache.clear()
def _writemode(self):
self._mmio.write(0xA4, self._mode.width *
self._mode.bytes_per_pixel)
self._mmio.write(0xA8, self._mode.stride)
def reload(self):
"""Reload the configuration of the DMA. Should only be called
by the _FrameList class or if you really know what you are doing
"""
if self.running:
self._mmio.write(0xA0, self._mode.height)
def reset(self):
"""Soft reset the DMA. Finishes all transfers before starting
the reset process
"""
self.stop()
self._mmio.write(0x30, 0x00011084)
while self._mmio.read(0x30) & 0x4 == 4:
pass
def tie(self, channel):
"""Ties an output channel to this input channel. This is used
to pass video from input to output without invoking the CPU
for each frame. Main use case is when some slow processing is
being done on a subset of frames while the video is passed
through directly to the output. Only one output may be tied
to an output. The tie is broken either by calling tie(None) or
writing a frame to the tied output channel.
"""
if self._sinkchannel:
self._frames.removeslave(self._sinkchannel._frames)
self._sinkchannel.parked = True
self._sinkchannel.sourcechannel = None
self._sinkchannel = channel
if self._sinkchannel:
self._frames.addslave(self._sinkchannel._frames)
self._sinkchannel.parked = False
self._sinkchannel.framedelay = 1
self._sinkchannel.sourcechannel = self
class MM2SChannel:
"""DMA channel from memory to a video output.
Will continually repeat the most recent frame written.
Attributes
----------
mode : VideoMode
Video mode of the DMA channel
cacheable_frames : bool
Whether frames should be stored in cacheable or
non-cacheable memory
"""
def __init__(self, parent, interrupt, memory):
self._mmio = parent.mmio
self._frames = AxiVDMA._FrameList(self, 0x5C, parent.framecount)
self._interrupt = interrupt
self._mode = None
self.sourcechannel = None
self.cacheable_frames = True
self.memory = memory
def start(self):
"""Start the DMA channel with a blank screen. The mode must
be set prior to calling or a RuntimeError will result.
"""
if not self._mode:
raise RuntimeError("Video mode not set, channel not started")
self._cache = _FrameCache(
self._mode, self.memory, cacheable=self.cacheable_frames)
self._frames[0] = self._cache.getframe()
self._writemode()
self.reload()
self._mmio.write(0x00, 0x00011089)
while not self.running:
pass
self.reload()
self.desiredframe = 0
pass
def stop(self):
"""Stop the DMA channel and empty the frame cache
"""
self._mmio.write(0x00, 0x00011080)
while self.running:
pass
for i in range(len(self._frames)):
self._frames[i] = None
if hasattr(self, '_cache'):
self._cache.clear()
def reset(self):
"""Soft reset the DMA channel
"""
self.stop()
self._mmio.write(0x00, 0x00011084)
while self._mmio.read(0x00) & 0x4 == 4:
pass
def _writeframe_internal(self, frame):
if self.sourcechannel:
self.sourcechannel.tie(None)
frame.flush()
next_frame = (self.desiredframe + 1) % len(self._frames)
self._frames[next_frame] = frame
self.desiredframe = next_frame
def writeframe(self, frame):
"""Schedule the specified frame to be the next one displayed.
Assumes ownership of frame which should no longer be modified
by the user. May block if there is already a frame scheduled.
"""
if not self.running:
raise RuntimeError('DMA channel not started')
while self._mmio.read(0x04) & 0x1000 == 0:
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.ensure_future(self._interrupt.wait()))
self._mmio.write(0x04, 0x1000)
self._writeframe_internal(frame)
async def writeframe_async(self, frame):
"""Same as writeframe() but yields instead of blocking if a
frame is already scheduled
"""
if not self.running:
raise RuntimeError('DMA channel not started')
while self._mmio.read(0x04) & 0x1000 == 0:
await self._interrupt.wait()
self._mmio.write(0x04, 0x1000)
self._writeframe_internal(frame)
def setframe(self, frame):
"""Sets a frame without blocking or taking ownership. In most
circumstances writeframe() is more appropriate
"""
frameindex = self.desiredframe
self._frames[frameindex] = frame
self._frames.takeownership(frameindex)
def _writemode(self):
self._mmio.write(0x54, self._mode.width *
self._mode.bytes_per_pixel)
register = self._mmio.read(0x58)
register &= (0xF << 24)
register |= self._mode.stride
self._mmio.write(0x58, register)
def reload(self):
"""Reload the configuration of the DMA. Should only be called
by the _FrameList class or if you really know what you are doing
"""
if self.running:
self._mmio.write(0x50, self._mode.height)
def newframe(self):
"""Returns a frame of the appropriate size for the video mode.
The contents of the frame are undefined and should not be assumed
to be black
Returns
-------
numpy.ndarray video frame
"""
return self._cache.getframe()
@property
def activeframe(self):
self._mmio.write(0x04, 0x4090)
return (self._mmio.read(0x28) >> 16) & 0x1F
@property
def desiredframe(self):
return self._mmio.read(0x28) & 0x1F
@desiredframe.setter
def desiredframe(self, frame_number):
if frame_number < 0 or frame_number >= len(self._frames):
raise ValueError("Invalid Frame Index")
register_value = self._mmio.read(0x28)
mask = ~0x1F
register_value &= mask
register_value |= frame_number
self._mmio.write(0x28, register_value)
@property
def running(self):
return (self._mmio.read(0x04) & 0x1) == 0
@property
def mode(self):
"""The video mode of the DMA, must be called prior to starting.
If changed while the DMA channel is running the channel will be
stopped
"""
return self._mode
@mode.setter
def mode(self, value):
if self.running:
self.stop()
self._mode = value
@property
def parked(self):
"""Is the channel parked or running in circular buffer mode
"""
return self._mmio.read(0x00) & 0x2 == 0
@parked.setter
def parked(self, value):
register = self._mmio.read(0x00)
if value:
self.desiredframe = self.activeframe
register &= ~0x2
else:
register |= 0x2
self._mmio.write(0x00, register)
@property
def framedelay(self):
register = self._mmio.read(0x58)
return register >> 24
@framedelay.setter
def framedelay(self, value):
register = self._mmio.read(0x58)
register &= 0xFFFF
register |= value << 24
self._mmio.write(0x58, register)
def __init__(self, description, framecount=None):
"""Create a new instance of the AXI Video DMA driver
Parameters
----------
name : str
The name of the IP core to instantiate the driver for
"""
super().__init__(description)
if 'parameters' in description:
parameters = description['parameters']
has_s2mm = parameters['C_INCLUDE_S2MM'] == '1'
has_mm2s = parameters['C_INCLUDE_MM2S'] == '1'
framecount = int(parameters['C_NUM_FSTORES'])
s2mm_addr_width = int(parameters['C_M_AXI_S2MM_ADDR_WIDTH'])
mm2s_addr_width = int(parameters['C_M_AXI_MM2S_ADDR_WIDTH'])
if ((has_s2mm and s2mm_addr_width > 32) or
(has_mm2s and mm2s_addr_width > 32)):
raise UnsupportedConfiguration(
'VDMA driver only supports 32-bit addresses')
else:
has_s2mm = True
has_mm2s = True
framecount = 4 if framecount is None else framecount
self.framecount = framecount
memory = description['device'].default_memory
if has_s2mm:
self.readchannel = AxiVDMA.S2MMChannel(self, self.s2mm_introut,
memory)
if has_mm2s:
self.writechannel = AxiVDMA.MM2SChannel(self, self.mm2s_introut,
memory)
bindto = ['xilinx.com:ip:axi_vdma:6.2',
'xilinx.com:ip:axi_vdma:6.3']
|
{
"content_hash": "32e76bdfb82f1713ecee1ea8dd08ccaf",
"timestamp": "",
"source": "github",
"line_count": 589,
"max_line_length": 78,
"avg_line_length": 34.76740237691002,
"alnum_prop": 0.5479050688543803,
"repo_name": "cathalmccabe/PYNQ",
"id": "b026e3b8d3f3357ed3e2de6db9687cf443564cf3",
"size": "22083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynq/lib/video/dma.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "BitBake",
"bytes": "1839"
},
{
"name": "C",
"bytes": "1110727"
},
{
"name": "C++",
"bytes": "74784"
},
{
"name": "CMake",
"bytes": "578"
},
{
"name": "JavaScript",
"bytes": "239958"
},
{
"name": "Jupyter Notebook",
"bytes": "17143645"
},
{
"name": "Makefile",
"bytes": "150630"
},
{
"name": "PHP",
"bytes": "2117"
},
{
"name": "Python",
"bytes": "1583136"
},
{
"name": "Shell",
"bytes": "76262"
},
{
"name": "SystemVerilog",
"bytes": "53374"
},
{
"name": "Tcl",
"bytes": "1389138"
},
{
"name": "VHDL",
"bytes": "738710"
},
{
"name": "Verilog",
"bytes": "284588"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from six import text_type as str
from yelp.obj.attribute import Attribute
from yelp.obj.category import Category
from yelp.obj.coordinates import Coordinates
from yelp.obj.hours import Hours
from yelp.obj.location import Location
from yelp.obj.response_object import ResponseObject
class Business(ResponseObject):
_schema = {
"id": str,
"alias": str,
"name": str,
"image_url": str,
"is_claimed": bool,
"is_closed": bool,
"url": str,
"phone": str,
"display_phone": str,
"review_count": int,
"categories": [Category],
"rating": float,
"location": Location,
"coordinates": Coordinates,
"photos": [str],
"hours": [Hours],
"transactions": [str],
"attributes": [Attribute],
}
|
{
"content_hash": "e2b2b46a824a79739b672527e3b45744",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 51,
"avg_line_length": 25.885714285714286,
"alnum_prop": 0.608167770419426,
"repo_name": "Yelp/yelp-python",
"id": "e0e114caa259c426d50940d55c615506f8b1af10",
"size": "930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yelp/obj/business.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "358"
},
{
"name": "Python",
"bytes": "27070"
}
],
"symlink_target": ""
}
|
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
This script will use telnet to login to a Cisco XRv device and set the cryptographic keys
to support SSH connectivity. This version of the script uses the arguments encoded in the
script itself.
To test for whether the crypto key is set, and whether the netconf agent is running, we can
use this command:
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 830 cisco@172.16.1.11 -s netconf
Guidance from: See http://linuxcommando.blogspot.com/2008/10/how-to-disable-ssh-host-key-checking.html
By configuring the null device file as the host key database, SSH is fooled into thinking that the
SSH client has never connected to any SSH server before, and so will never run into a mismatched host key.
The parameter StrictHostKeyChecking specifies if SSH will automatically add new host keys to the
host key database file. By setting it to no, the host key is automatically added, without user
confirmation, for all first-time connection. Because of the null key database file, all
connection is viewed as the first-time for any SSH server host. Therefore, the host key is
automatically added to the host key database with no user confirmation. Writing the key to
the /dev/null file discards the key and reports success.
'''
from __future__ import print_function
from __future__ import absolute_import
import pexpect
import sys
def add_crypto_key (devices=[], username = 'cisco', password = 'cisco'):
if len(devices) != 0:
network_devices = devices
else:
network_devices = ['172.16.1.11']
for network_device in network_devices:
try:
telnet_command = "telnet %s" % network_device
child = pexpect.spawn (telnet_command)
child.logfile = sys.stdout
child.expect ('Username:')
child.sendline (username)
child.expect ('Password:')
child.sendline (password)
child.expect ('#')
child.sendline ('crypto key generate dsa')
index = child.expect (['[yes/no]','1024]'])
if index == 0:
child.sendline ('yes')
child.expect ('1024]')
child.sendline ('')
child.sendline ('')
elif index == 1:
child.sendline ('')
child.sendline ('')
except:
print ("Could not connect to %s" % network_device)
continue
if __name__ == '__main__':
add_crypto_key ()
|
{
"content_hash": "965747ce4af7ff5226337e0b9a9538e5",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 110,
"avg_line_length": 43.7375,
"alnum_prop": 0.6656187482137753,
"repo_name": "tbarrongh/cosc-learning-labs",
"id": "0ca9702bc80827f9b1b8873986046340ed05cc11",
"size": "4183",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/utils/crypto_key.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "354065"
},
{
"name": "Shell",
"bytes": "2128"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('practice', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='book',
name='writter',
field=models.ManyToManyField(to='practice.Auther', verbose_name='\u8457\u4f5c\u4eba'),
),
migrations.AlterField(
model_name='auther',
name='age',
field=models.IntegerField(verbose_name='\u5e74\u9f84'),
),
]
|
{
"content_hash": "a6137a155d77c4b1adf87ee0147ca913",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 98,
"avg_line_length": 25.217391304347824,
"alnum_prop": 0.5775862068965517,
"repo_name": "boldmanQ/blogsys",
"id": "53b660b4b7091374a204ee1c023abc74d6c5f58c",
"size": "653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blogsys/testcase/migrations/0002_auto_20171031_1558.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6557"
},
{
"name": "HTML",
"bytes": "15024"
},
{
"name": "Python",
"bytes": "37017"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
import reversion
from markedit.admin import MarkEditAdmin
from .models import Page
class PageAdmin(reversion.VersionAdmin, MarkEditAdmin):
list_display = [
'title',
'path',
'status',
'publish_date',
]
class MarkEdit:
fields = ['body']
options = {
'preview': 'below'
}
admin.site.register(Page, PageAdmin)
|
{
"content_hash": "6026deb7b577fe0af8ba63fbcf293836",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 55,
"avg_line_length": 17.708333333333332,
"alnum_prop": 0.6023529411764705,
"repo_name": "miurahr/symposion",
"id": "ffdcb00ed567bafe51a9613a7f612538681e9d67",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/sotmjp",
"path": "symposion/cms/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "79954"
},
{
"name": "Python",
"bytes": "205076"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0032_task_catalogs'),
]
operations = [
migrations.AddField(
model_name='task',
name='locked',
field=models.BooleanField(default=False, help_text='Designates whether this task can be changed.', verbose_name='Locked'),
),
]
|
{
"content_hash": "7a5a9faed8e83136cb882c54534cbffa",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 134,
"avg_line_length": 25.625,
"alnum_prop": 0.6048780487804878,
"repo_name": "DMPwerkzeug/DMPwerkzeug",
"id": "844730acdf8ca76fffa0e64018a5b61056f5879b",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rdmo/tasks/migrations/0033_task_locked.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9735"
},
{
"name": "HTML",
"bytes": "126570"
},
{
"name": "JavaScript",
"bytes": "46177"
},
{
"name": "Python",
"bytes": "120676"
}
],
"symlink_target": ""
}
|
class Model(object):
products = {
'milk': {'price': 1.50, 'quantity': 10},
'eggs': {'price': 0.20, 'quantity': 100},
'cheese': {'price': 2.00, 'quantity': 10}
}
class View(object):
def product_list(self, product_list):
print('PRODUCT LIST:')
for product in product_list:
print(product)
print('')
def product_information(self, product, product_info):
print('PRODUCT INFORMATION:')
print('Name: %s, Price: %.2f, Quantity: %d\n' %
(product.title(), product_info.get('price', 0),
product_info.get('quantity', 0)))
def product_not_found(self, product):
print('That product "%s" does not exist in the records' % product)
class Controller(object):
def __init__(self):
self.model = Model()
self.view = View()
def get_product_list(self):
product_list = self.model.products.keys()
self.view.product_list(product_list)
def get_product_information(self, product):
product_info = self.model.products.get(product, None)
if product_info is not None:
self.view.product_information(product, product_info)
else:
self.view.product_not_found(product)
if __name__ == '__main__':
controller = Controller()
controller.get_product_list()
controller.get_product_information('cheese')
controller.get_product_information('eggs')
controller.get_product_information('milk')
controller.get_product_information('arepas')
|
{
"content_hash": "b7335af164c405d9bb502fdcc54b4d7d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 29.30188679245283,
"alnum_prop": 0.601416613007083,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "69d5e8ce93aa88747ece9bcc0f875024881732c6",
"size": "1601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dataset/python/mvc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import collections
import sys
import types
import normalize.exc as exc
from normalize.record import Record
"""This class contains container classes which can act like collections but
conform to this package's metaclass API"""
class _classproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
class Collection(Record):
"""This is the base class for Record property values which contain
iterable sets of values with a particular common type.
All collections are modeled as a mapping from some index to a value.
Bags are not currently supported, ie the keys must be unique for the diff
machinery as currently written to function.
The type of members in the collection is defined using a sub-class API:
*classproperty* **itemtype**\ =\ *Record sub-class*
This property must be defined for instances to be instantiable.
It declares the type of values.
*classproperty* **coerceitem**\ =\ *FUNC*
Defaults to *itemtype* and is the function or constructor which
will accept items during construction of a collection which are not
already of the correct type.
*classproperty* **colltype**\ =\ *collection type*
This is the type of the underlying container. ``list``, ``dict``,
etc.
"""
@_classproperty
def itemtype(cls):
raise exc.CollectionDefinitionError(
property='itemtype',
coll='Collection',
)
@_classproperty
def coerceitem(cls):
return cls.itemtype
@_classproperty
def colltype(cls):
raise exc.CollectionDefinitionError(
property='colltype',
coll='Collection',
)
@classmethod
def record_cls(cls):
return cls.itemtype
def __init__(self, values=None, **kwargs):
"""
Default collection constructor.
args:
``values=``\ *iterable*
Specify the initial contents of the collection. It will be
converted to the correct type using :py:meth:`coll_to_tuples`
and :py:meth:`tuples_to_coll`
``attribute=``\ *VALUE*
It is possible to add extra properties to ``Collection``
objects; this is how you specify them on construction.
"""
self.values = type(self).tuples_to_coll(
type(self).coll_to_tuples(values)
)
super(Collection, self).__init__(**kwargs)
def __iter__(self):
"""The default iterator always iterates over the *values* of a
Collection."""
for x in self.values:
yield x
def __eq__(self, other):
"""To be ``==``, collections must have exactly the same ``itemtype``
and ``colltype``, and equal ``values``
"""
return self.itemtype == getattr(other, "itemtype", None) and \
self.values == getattr(other, "values", None)
def __ne__(self, other):
"""Implemented, for compatibility"""
return not self.__eq__(other)
def __len__(self):
"""Forwarded to the ``values`` property."""
return len(self.values)
@classmethod
def coerce_tuples(cls, generator):
"""This class method converts a generator of ``(K, V)`` tuples (the
*tuple protocol*), where ``V`` is not yet of the correct type, to a
generator where it is of the correct type (using the ``coerceitem``
class property)
"""
for k, v in generator:
yield k, v if isinstance(v, cls.itemtype) else cls.coerceitem(v)
@classmethod
def tuples_to_coll(cls, generator, coerce=False):
"""*required virtual method* This class method, part of the sub-class
API, converts a generator of ``(K, V)`` tuples (the *tuple protocol*)
to one of the underlying collection type.
"""
if cls != Collection:
raise exc.CollectionDefinitionError(
property='tuples_to_coll',
coll='Collection',
)
def itertuples(self):
"""Iterate over the items in the collection; return (k, v) where k is
the key, index etc into the collection (or potentially the value
itself, for sets). This form is the *tuple protocol*"""
raise exc.CollectionDefinitionError(
property='itertuples',
coll='Collection',
)
@classmethod
def coll_to_tuples(cls, coll):
"""Generate 'conformant' tuples from an input collection, similar to
itertuples"""
raise exc.CollectionDefinitionError(
property='coll_to_tuples',
coll='Collection',
)
class KeyedCollection(Collection):
def __getitem__(self, item):
return self.values[item]
class DictCollection(KeyedCollection):
"""An implementation of keyed collections which obey the `Record` property
protocol and the tuple collection protocol. *Warning*: largely untested,
patches welcome.
"""
suffix = "Map"
colltype = dict
@classmethod
def tuples_to_coll(cls, generator, coerce=True):
return cls.colltype(
cls.coerce_tuples(generator) if coerce else generator
)
@classmethod
def coll_to_tuples(cls, coll):
if isinstance(coll, collections.Mapping):
for k, v in coll.iteritems():
yield k, v
elif isinstance(coll, collections.Sequence):
i = 0
for v in coll:
yield (i, v)
i += 1
elif hasattr(coll, "next") and callable(coll.next):
i = 0
for v in coll:
if isinstance(v, tuple) and len(v) == 2:
yield v
else:
yield (i, v)
i += 1
def itertuples(self):
return self.values.iteritems()
class ListCollection(KeyedCollection):
"""An implementation of sequences which obey the `Record` property protocol
and the tuple collection protocol.
"""
suffix = "List"
colltype = list
@classmethod
def tuples_to_coll(cls, generator, coerce=True):
tuples = cls.coerce_tuples(generator) if coerce else generator
return cls.colltype(v for k, v in tuples)
@classmethod
def coll_to_tuples(cls, coll):
"""``coll_to_tuples`` is capable of unpacking its own collection types
(`list`), ``collections.Mapping`` objects, as well generators,
sequences and iterators. Returns ``(*int*, Value)``. Does not coerce
items.
"""
if isinstance(coll, collections.Mapping):
i = 0
for k in sorted(coll.keys()):
yield (i, coll[k])
elif isinstance(coll, (collections.Sequence, types.GeneratorType)) or (
hasattr(coll, "next") and callable(coll.next)
) or (
hasattr(coll, "__iter__") and callable(coll.__iter__)
):
i = 0
for v in coll:
yield i, v
i += 1
elif not coll:
return
else:
raise exc.CollectionCoerceError(
giventype=type(coll).__name__,
fortype=cls.__name__,
)
def append(self, item):
"""``Sequence`` API, currently passed through to underlying collection.
Type-checking is currently TODO.
"""
self.values.append(item)
def itertuples(self):
return type(self).coll_to_tuples(self.values)
def __str__(self):
"""Informal stringification returns the type of collection, and the
length. For example, ``<MyRecordList: 8 item(s)>``
"""
return "<%s: %d item(s)>" % (
type(self).__name__, len(self.values)
)
def __repr__(self):
"""Implemented: prints a valid constructor.
"""
property_info = super(ListCollection, self).__repr__()
list_info = "[%s]" % ", ".join(repr(x) for x in self.values)
optional_comma = "" if property_info.endswith("()") else ", "
return property_info.replace("(", "(" + list_info + optional_comma, 1)
GENERIC_TYPES = dict()
class _GenericPickler(object):
"""'pickle' doesn't like pickling classes which are dynamically created.
This object is used instead, to keep pickle happy.
"""
def __init__(self, typekey):
self.typekey = typekey
def __call__(self, values):
return GENERIC_TYPES[self.typekey](values=values)
class _Generic(Collection):
"""A mix-in to mark collection types which are (for example) collections of
things."""
def __reduce__(self):
"""helper method for pickling"""
return (_GenericPickler(type(self).generic_key), (self.values,))
def _make_generic(of, coll):
"""Used to make a new Collection type, without that type having to be
defined explicitly. Generates a new type name using the item type and a
'suffix' Collection class property.
args:
``of=``\ *Record type*
The type of values of the collection
``coll=``\ *Collection sub-class*
The container class.
"""
assert(issubclass(coll, Collection))
key = (coll.__name__, "%s.%s" % (of.__module__, of.__name__))
if key in GENERIC_TYPES:
if GENERIC_TYPES[key].itemtype != of:
raise exc.PropertyNotUnique(key=key)
else:
# oh, we get to name it? Goodie!
generic_name = "%s%s" % (of.__name__, coll.suffix)
GENERIC_TYPES[key] = type(
generic_name, (coll, _Generic), dict(itemtype=of, generic_key=key)
)
mod = sys.modules[of.__module__]
if not hasattr(mod, generic_name):
setattr(mod, generic_name, GENERIC_TYPES[key])
return GENERIC_TYPES[key]
|
{
"content_hash": "326f2cf25e08dfe3ef65c846d4c4d82e",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 79,
"avg_line_length": 32.501628664495115,
"alnum_prop": 0.5850871918220084,
"repo_name": "tomo-otsuka/normalize",
"id": "81ee5ffeb66c5a4edd126c0d5822a6bd0d2aa992",
"size": "10541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "normalize/coll.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "299892"
}
],
"symlink_target": ""
}
|
import pytest
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--setupplan",
"--setup-plan",
action="store_true",
help="show what fixtures and tests would be executed but "
"don't execute anything.",
)
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(fixturedef, request):
# Will return a dummy fixture if the setuponly option is provided.
if request.config.option.setupplan:
fixturedef.cached_result = (None, None, None)
return fixturedef.cached_result
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
if config.option.setupplan:
config.option.setuponly = True
config.option.setupshow = True
|
{
"content_hash": "819cc14914f7a3a2d176eca392bcba9a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 70,
"avg_line_length": 28.037037037037038,
"alnum_prop": 0.6763540290620872,
"repo_name": "tomviner/pytest",
"id": "697746f205a6119682360f101999d73f5c9e162b",
"size": "757",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/_pytest/setupplan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "1945670"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.