text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import sys
import time
import copy as cp
import asyncio
import hashlib
from unittest import mock
import celery
import pytest
from waterbutler import tasks # noqa
from waterbutler.core import remote_logging
from waterbutler.core import utils as core_utils
from waterbutler.core.path import WaterButlerPath
import tests.utils as test_utils
# Hack to get the module, not the function
copy = sys.modules['waterbutler.tasks.copy']
FAKE_TIME = 1454684930.0
@pytest.fixture(autouse=True)
def patch_backend(monkeypatch):
monkeypatch.setattr(copy.core.app, 'backend', None)
@pytest.fixture(autouse=True)
def callback(monkeypatch):
mock_request = test_utils.MockCoroutine(
return_value=mock.Mock(
status=200,
read=test_utils.MockCoroutine(
return_value=b'meowmeowmeow'
)
)
)
monkeypatch.setattr(copy.utils, 'send_signed_request', mock_request)
return mock_request
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=FAKE_TIME)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def src_path():
return WaterButlerPath('/user/bin/python')
@pytest.fixture
def dest_path():
return WaterButlerPath('/usr/bin/golang')
@pytest.fixture(scope='function')
def src_provider():
p = test_utils.MockProvider()
p.copy.return_value = (test_utils.MockFileMetadata(), True)
p.auth['callback_url'] = 'src_callback'
return p
@pytest.fixture(scope='function')
def dest_provider():
p = test_utils.MockProvider()
p.copy.return_value = (test_utils.MockFileMetadata(), True)
p.auth['callback_url'] = 'dest_callback'
return p
@pytest.fixture(scope='function')
def providers(monkeypatch, src_provider, dest_provider):
def make_provider(name=None, **kwargs):
if name == 'src':
return src_provider
if name == 'dest':
return dest_provider
raise ValueError('Unexpected provider')
monkeypatch.setattr(copy.utils, 'make_provider', make_provider)
return src_provider, dest_provider
@pytest.fixture(autouse=True)
def log_to_keen(monkeypatch):
mock_log_to_keen = test_utils.MockCoroutine()
monkeypatch.setattr(remote_logging, 'log_to_keen', mock_log_to_keen)
return mock_log_to_keen
@pytest.fixture
def src_bundle(src_path):
return {
'nid': 'mst3k',
'path': src_path,
'provider': {
'name': 'src',
'auth': {
'callback_url': '',
},
'settings': {},
'credentials': {},
}
}
@pytest.fixture
def dest_bundle(dest_path):
return {
'nid': 'fbi4u',
'path': dest_path,
'provider': {
'name': 'dest',
'auth': {
'callback_url': '',
},
'settings': {},
'credentials': {},
}
}
@pytest.fixture
def bundles(src_bundle, dest_bundle):
return src_bundle, dest_bundle
class TestCopyTask:
def test_copy_calls_copy(self, event_loop, providers, bundles):
src, dest = providers
src_bundle, dest_bundle = bundles
copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle))
assert src.copy.called
src.copy.assert_called_once_with(dest, src_bundle['path'], dest_bundle['path'])
def test_is_task(self):
assert callable(copy.copy)
assert isinstance(copy.copy, celery.Task)
assert not asyncio.iscoroutine(copy.copy)
assert asyncio.iscoroutinefunction(copy.copy.adelay)
def test_imputes_exceptions(self, event_loop, providers, bundles, callback):
src, dest = providers
src_bundle, dest_bundle = bundles
src.copy.side_effect = Exception('This is a string')
with pytest.raises(Exception):
copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle))
(method, url, data), _ = callback.call_args_list[0]
assert src.copy.called
src.copy.assert_called_once_with(dest, src_bundle['path'], dest_bundle['path'])
assert url == 'dest_callback'
assert method == 'PUT'
assert data['errors'] == ["Exception('This is a string',)"]
def test_return_values(self, event_loop, providers, bundles, callback, src_path, dest_path, mock_time):
src, dest = providers
src_bundle, dest_bundle = bundles
metadata = test_utils.MockFileMetadata()
src.copy.return_value = (metadata, False)
ret1, ret2 = copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle))
assert (ret1, ret2) == (metadata, False)
(method, url, data), _ = callback.call_args_list[0]
assert method == 'PUT'
assert url == 'dest_callback'
assert data['action'] == 'copy'
assert data['auth'] == {'callback_url': 'dest_callback'}
assert data['email'] == False
assert data['errors'] == []
assert data['time'] == FAKE_TIME + 60
assert data['source'] == {
'nid': 'mst3k',
'resource': 'mst3k',
'path': '/' + src_path.raw_path,
'name': src_path.name,
'materialized': str(src_path),
'provider': src.NAME,
'kind': 'file',
'extra': {},
}
assert data['destination'] == {
'nid': 'fbi4u',
'resource': 'fbi4u',
'path': metadata.path,
'name': metadata.name,
'materialized': metadata.path,
'provider': dest.NAME,
'kind': 'file',
'contentType': metadata.content_type,
'etag': hashlib.sha256(
'{}::{}'.format(metadata.provider, metadata.etag)
.encode('utf-8')
).hexdigest(),
'extra': metadata.extra,
'modified': metadata.modified,
'modified_utc': metadata.modified_utc,
'created_utc': metadata.created_utc,
'size': metadata.size,
}
def test_starttime_override(self, event_loop, providers, bundles, callback, mock_time):
src, dest = providers
src_bundle, dest_bundle = bundles
stamp = FAKE_TIME
copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle), start_time=stamp-100)
copy.copy(cp.deepcopy(src_bundle), cp.deepcopy(dest_bundle), start_time=stamp+100)
(_, _, data), _ = callback.call_args_list[0]
assert data['email'] is True
assert data['time'] == 60 + stamp
(_, _, data), _ = callback.call_args_list[1]
assert data['email'] is False
assert data['time'] == 60 + stamp
|
{
"content_hash": "e2bf793a0d5e939d52bbcea614e21530",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 107,
"avg_line_length": 28.665236051502145,
"alnum_prop": 0.5958975894594999,
"repo_name": "Johnetordoff/waterbutler",
"id": "e2d655bd99a1e7152b8713171e401fae1727877e",
"size": "6679",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/tasks/test_copy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "877100"
}
],
"symlink_target": ""
}
|
from distutils.core import setup, Command
from distutils.command.build import build as _build
import os, sys
import shutil
import platform
class build_server(_build):
description = 'custom build command'
sub_commands = []
def initialize_options(self):
_build.initialize_options(self)
self.cwd = None
def finalize_options(self):
_build.finalize_options(self)
self.cwd = os.getcwd()
def run(self):
if os.environ.get('READTHEDOCS', None) == 'True':
# won't build on readthedocs.org
return
assert os.getcwd() == self.cwd, 'Must be in package root.'
# append any platform specific qmake args to this list
args=[]
if platform.system() == 'Darwin':
# ensure a Makefile is generated rather than an XCode project on OSX
args += ['-spec', 'macx-g++']
os.system('qmake ' + ' '.join(args) + ' && make')
try:
os.remove(os.path.join(self.build_purelib, 'webkit_server'))
except: pass
try:
os.remove(os.path.join(self.build_platlib, 'webkit_server'))
except: pass
try:
os.makedirs(self.build_platlib)
except: pass
try:
os.makedirs(self.build_purelib)
except: pass
shutil.copy('src/webkit_server', self.build_purelib)
shutil.copy('src/webkit_server', self.build_platlib)
setup(name='webkit-server',
version='1.0',
description='a Webkit-based, headless web client',
author='Niklas Baumstark',
author_email='niklas.baumstark@gmail.com',
license='MIT',
url='https://github.com/niklasb/webkit-server',
py_modules=['webkit_server'],
cmdclass={
'build': build_server,
})
|
{
"content_hash": "0dddaf5009e6cd71e1b0c409cd1d8bd9",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 31.32075471698113,
"alnum_prop": 0.6439759036144578,
"repo_name": "niklasb/webkit-server",
"id": "340e050825bfb21348a7342e79574bace2ccf365",
"size": "1660",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "99872"
},
{
"name": "JavaScript",
"bytes": "11321"
},
{
"name": "Python",
"bytes": "19206"
},
{
"name": "QMake",
"bytes": "3261"
},
{
"name": "Shell",
"bytes": "243"
}
],
"symlink_target": ""
}
|
import optparse
import os
import subprocess
import sys
def get_xwalk():
if ("XWALK_HOME" not in os.environ):
print "Error: 'XWALK_HOME' has not been set to the crosswalk binary."
sys.exit(2)
else:
return os.path.join(
os.path.splitext(os.path.abspath(os.environ["XWALK_HOME"]))[0],
"xwalk.exe")
def run_tests():
xwalk = get_xwalk()
cmd = "%s manifest.json" % xwalk
print "Command: " + cmd
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
def start_server():
return subprocess.Popen("rsserver.py", stdout=subprocess.PIPE, shell=True)
def main():
print "Starting server ..."
p = start_server()
print "Running tests..."
run_tests()
out, err = p.communicate()
if err:
print(err)
else:
result = out.split("\n")
for lin in result:
if not lin.startswith("#"):
print(lin)
return
if __name__ == "__main__":
main()
|
{
"content_hash": "112079113d62935d46e0e3ec3a1524e9",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 23.023255813953487,
"alnum_prop": 0.5767676767676768,
"repo_name": "crosswalk-project/realsense-extensions-crosswalk",
"id": "5df3e4a5f1fad12fdc85905583bb9961b09c892c",
"size": "1179",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "311593"
},
{
"name": "CSS",
"bytes": "31914"
},
{
"name": "HTML",
"bytes": "273113"
},
{
"name": "JavaScript",
"bytes": "362059"
},
{
"name": "Python",
"bytes": "20470"
}
],
"symlink_target": ""
}
|
import json
import codecs
import logging
from logging import config as _config
import os
import sys
from pprint import pprint
from scli import command, cli_parse, config_file, prompt
from scli.constants import EbLogFile, EbLocalDir, OutputLevel, ParameterName, \
ParameterSource, ServiceDefault
from scli.parameter import DefaultParameterValue, ParameterPool, ParameterValidator
from lib.utility import misc, shell_utils
log = logging.getLogger(u'cli')
def _getLogFile(filename):
return os.getcwdu() + os.path.sep + filename
def _set_log_filename(config_dict, filename):
#Keyerror
config_dict[u'handlers'][u'default'][u'filename'] = filename
def _set_log_handlers(config_dict, formatter):
config_dict[u'root'][u'handlers'] = [formatter]
config_dict[u'loggers'][u'aws'][u'handlers'] = [formatter]
config_dict[u'loggers'][u'eb'][u'handlers'] = [formatter]
config_dict[u'loggers'][u'op'][u'handlers'] = [formatter]
config_dict[u'loggers'][u'cli'][u'handlers'] = [formatter]
def _disable_logging(config_dict=None):
logging.disable(logging.CRITICAL)
if config_dict is not None:
_set_log_handlers(config_dict, u'null')
del config_dict[u'handlers'][u'default']
def configureLogging(level=None, quiet=False,
filename=EbLogFile.Name,
spec_dir=os.getcwdu() + os.path.sep + EbLocalDir.Path):
if not spec_dir:
output_file = _getLogFile(filename)
else:
shell_utils.create_directory(spec_dir)
output_file = spec_dir + os.path.sep + filename
ori_path = shell_utils.ori_path()
log_config_location = os.path.join(ori_path, u'logconfig.json')
try:
with codecs.open(log_config_location, 'r', encoding='utf-8') as input_file:
config_dict = json.loads(input_file.read())
_set_log_filename(config_dict, output_file)
if level is None and config_dict[u'root'][u'level'].upper() == u'NONE':
# completely disable log
config_dict[u'root'][u'level'] = u'NOTSET'
_disable_logging(config_dict)
else:
if level is not None:
config_dict[u'root'][u'level'] = level
_set_log_handlers(config_dict, u'default')
except (IOError, ValueError, KeyError) as ex:
#JSON logging config file parsing error
if not quiet:
print(u'Encountered error when reading logging configuration file from "{0}": {1}.'. \
format(log_config_location, ex))
_disable_logging()
return
try:
_config.dictConfig(config_dict)
except IOError:
if not quiet:
print >> sys.stderr, u'Could not open {0} for logging. Using stderr instead.'. \
format(output_file)
_set_log_handlers(config_dict, u'to_stderr')
_config.dictConfig(config_dict)
config_file.set_access_permission(output_file, True)
def _exit(code):
log.info(u'EB CLI exit')
sys.exit(code)
def _print_op_results(results):
for index, result in enumerate(results):
prompt.info(u'------------ Operation {0}: {1}----------------'.format \
(index + 1, result.operation.__class__.__name__))
pprint(result.result, depth=3);
print result.message
def main(cmdline=None):
# Initialization
configureLogging(quiet=False)
log.info(u'EB CLI start')
parameter_pool = ParameterPool() # pool of all parameters
validator = ParameterValidator()
DefaultParameterValue.fill_default(parameter_pool)
log.debug(u'Finished initialization')
try:
# Parse command line arguments
cli_parse.parse(parameter_pool, cmdline)
log.debug(u'Finished parsing command line arguments.')
# TODO: set quiet level here.
if parameter_pool.get_value(ParameterName.Verbose) == ServiceDefault.ENABLED:
prompt.set_level(OutputLevel.Info)
else:
prompt.set_level(OutputLevel.ResultOnly)
validator.validate(parameter_pool, ParameterSource.CliArgument)
# Compile operation queue
queue = command.compile_operation_queue(parameter_pool.command)
except SystemExit as ex:
_exit(0)
except BaseException as ex:
print (misc.to_unicode(ex))
log.exception(ex)
_exit(1)
# Execute queue
results = []
try:
queue.run(parameter_pool, results)
log.debug(u'Finished executing operation queue')
except BaseException as ex:
print (misc.to_unicode(ex))
log.exception(ex)
_exit(1)
_exit(0)
|
{
"content_hash": "d69bd3488bbcd560d5c00411debbf425",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 98,
"avg_line_length": 31.510204081632654,
"alnum_prop": 0.6364421416234888,
"repo_name": "radlws/AWS-ElasticBeanstalk-CLI",
"id": "01cc27ed9c767ce82ddbffddd8054c746ef575ce",
"size": "5365",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "eb/macosx/python2.7/scli/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "33685"
},
{
"name": "Python",
"bytes": "4692335"
},
{
"name": "Ruby",
"bytes": "160360"
},
{
"name": "Shell",
"bytes": "23953"
}
],
"symlink_target": ""
}
|
from novaclient import client
from novaclient.v3 import hosts
class Client(object):
"""
Top-level object to access the OpenStack Compute API.
Create an instance with your creds::
>>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL)
Then call methods on its managers::
>>> client.servers.list()
...
>>> client.flavors.list()
...
"""
# FIXME(jesse): project_id isn't required to authenticate
def __init__(self, username, password, project_id, auth_url=None,
insecure=False, timeout=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', extensions=None,
service_type='compute', service_name=None,
volume_service_name=None, timings=False,
bypass_url=None, os_cache=False, no_cache=True,
http_log_debug=False, auth_system='keystone',
auth_plugin=None,
cacert=None, tenant_id=None):
self.projectid = project_id
self.tenant_id = tenant_id
self.os_cache = os_cache or not no_cache
#TODO(bnemec): Add back in v3 extensions
self.hosts = hosts.HostManager(self)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
self.client = client.HTTPClient(username,
password,
projectid=project_id,
tenant_id=tenant_id,
auth_url=auth_url,
insecure=insecure,
timeout=timeout,
auth_system=auth_system,
auth_plugin=auth_plugin,
proxy_token=proxy_token,
proxy_tenant_id=proxy_tenant_id,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name,
timings=timings,
bypass_url=bypass_url,
os_cache=os_cache,
http_log_debug=http_log_debug,
cacert=cacert)
def set_management_url(self, url):
self.client.set_management_url(url)
def get_timings(self):
return self.client.get_timings()
def reset_timings(self):
self.client.reset_timings()
def authenticate(self):
"""
Authenticate against the server.
Normally this is called automatically when you first access the API,
but you can call this method to force authentication right now.
Returns on success; raises :exc:`exceptions.Unauthorized` if the
credentials are wrong.
"""
self.client.authenticate()
|
{
"content_hash": "92c63f5fdd000414a38df2ea6967b5aa",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 76,
"avg_line_length": 38.758620689655174,
"alnum_prop": 0.49644128113879005,
"repo_name": "citrix-openstack-build/python-novaclient",
"id": "091aa090185261a89c97797fa1ed5bcc0195e321",
"size": "4056",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "novaclient/v3/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "837455"
},
{
"name": "Shell",
"bytes": "4466"
}
],
"symlink_target": ""
}
|
from JumpScale import j
from collections import OrderedDict
# don't work with properties, this only happens when init is asked for so no big deal for performance
class ActorTemplate():
def __init__(self, gitrepo, path, aysrepo=None):
# path is path in gitrepo or absolute path
self.logger = j.atyourservice.logger
self._schemaHrd = None
if j.sal.fs.exists(path=path):
# we know its absolute
relpath = j.sal.fs.pathRemoveDirPart(
path, gitrepo.baseDir, removeTrailingSlash=True)
# path is now relative path
else:
relpath = path
path = j.sal.fs.joinPaths(gitrepo.baseDir, path)
if not j.sal.fs.exists(path=path):
raise j.exceptions.Input(
"Cannot find path for template:%s" % path)
self.path = path
self.pathRelative = relpath
base = j.sal.fs.getBaseName(relpath)
self.name = base
if aysrepo is None:
# means the template comes from an ays repo
self.domain = j.sal.fs.getBaseName(gitrepo.baseDir)
if not self.domain.startswith("ays_"):
raise j.exceptions.Input(
"name of ays template git repo should start with ays_, now:%s" % gitrepo.baseDir)
self.domain = self.domain[4:]
else:
self.domain = j.sal.fs.getDirName(aysrepo.path, True)
self.aysrepo = aysrepo
@property
def role(self):
return self.name.split('.')[0]
@property
def remoteUrl(self):
git = j.clients.git.get(self.path, False)
return git.remoteUrl
@property
def schemaHrd(self):
"""
returns capnp schema as text
"""
if self._schemaHrd is None:
hrdpath = j.sal.fs.joinPaths(self.path, "schema.hrd")
if j.sal.fs.exists(hrdpath):
self._schemaHrd = j.data.hrd.getSchema(hrdpath)
else:
self._schemaHrd = j.data.hrd.getSchema(content="")
return self._schemaHrd
@property
def schemaCapnpText(self):
"""
returns capnp schema as text
"""
path = j.sal.fs.joinPaths(self.path, "schema.capnp")
if j.sal.fs.exists(path):
return j.sal.fs.fileGetContents(path)
if self.schemaHrd.capnpSchema is not "":
return self.schemaHrd.capnpSchema
return ""
@property
def _hrd(self):
hrdpath = j.sal.fs.joinPaths(self.path, "actor.hrd")
if j.sal.fs.exists(hrdpath):
return j.data.hrd.get(hrdpath, prefixWithName=False)
else:
return j.data.hrd.get(content="")
@property
def configDict(self):
path = j.sal.fs.joinPaths(self.path, "actor.json")
if j.sal.fs.exists(path, followlinks=True):
ddict = j.data.serializer.json.load(path)
else:
ddict = {}
ddict.update(self._hrd.getHRDAsDict())
return ddict
@property
def configJSON(self):
ddict2 = OrderedDict(self.configDict)
return j.data.serializer.json.dumps(ddict2, sort_keys=True, indent=True)
@property
def dataUI(self):
path = j.sal.fs.joinPaths(self.path, "ui.python")
if j.sal.fs.exists(path, followlinks=True):
return j.sal.fs.fileGetContents(path)
return ""
@property
def recurringDict(self):
return self._hrd.getDictFromPrefix('recurring')
@property
def eventDict(self):
return self._hrd.getDictFromPrefix('event')
def actorGet(self, aysrepo):
Actor = self.aysrepo.getActorClass()
actor = Actor(aysrepo)
actor.initFromTemplate(template=self)
@property
def flists(self):
flists = self._hrd.getDictFromPrefix('flists')
for name in list(flists.keys()):
path = j.sal.fs.joinPaths(self.path, 'flists', name)
if j.sal.fs.exists(path):
flists[name]['content'] = j.sal.fs.fileGetContents(path)
elif j.sal.fs.exists(path + '.flist'):
flists[name]['content'] = j.sal.fs.fileGetContents(path + '.flist')
else:
raise j.exceptions.NotFound("flist definition in %s references a file that doesn't exists: %s" % (self, path))
return flists
def __repr__(self):
return "actortemplate: %-15s:%s" % (self.domain, self.name)
|
{
"content_hash": "0d39502adeb8073d4a0b1a3772a2555a",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 126,
"avg_line_length": 32.41304347826087,
"alnum_prop": 0.5846188240554437,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "c7f432e873c29172da68fbc513f2fee53b8ae71b",
"size": "4475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/baselib/atyourservice81/ActorTemplate.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
}
|
from google.cloud import documentai_v1beta3
async def sample_get_processor():
# Create a client
client = documentai_v1beta3.DocumentProcessorServiceAsyncClient()
# Initialize request argument(s)
request = documentai_v1beta3.GetProcessorRequest(
name="name_value",
)
# Make the request
response = await client.get_processor(request=request)
# Handle the response
print(response)
# [END documentai_v1beta3_generated_DocumentProcessorService_GetProcessor_async]
|
{
"content_hash": "e678ea40985132a7ab500f9de3891295",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 80,
"avg_line_length": 26.789473684210527,
"alnum_prop": 0.7387033398821218,
"repo_name": "googleapis/python-documentai",
"id": "cc90dd2ee4454c254ed4c666a871bcff13827270",
"size": "1915",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/documentai_v1beta3_generated_document_processor_service_get_processor_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1819136"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^demo_project/', include('demo_project.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'geotagging/', include("geotags.urls")),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "85346ef877ad24e9d6b7e8028eed5139",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 31.31578947368421,
"alnum_prop": 0.6907563025210084,
"repo_name": "uclastudentmedia/django-geotagging",
"id": "4484e2ea2004c3bfc033002414f387b14b4d5d6a",
"size": "595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geotagging_demo_project/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "178"
},
{
"name": "Python",
"bytes": "28127"
},
{
"name": "Shell",
"bytes": "224"
}
],
"symlink_target": ""
}
|
from pkg_resources import resource_filename
from time import time
from sanic import Sanic
from sanic.response import html, json
from jinja2 import Environment, PackageLoader, Markup
from asyncpg import create_pool
from monocle import sanitized as conf
from monocle.bounds import center
from monocle.names import DAMAGE, MOVES, POKEMON
from monocle.web_utils import get_scan_coords, get_worker_markers, Workers, get_args
env = Environment(loader=PackageLoader('monocle', 'templates'))
app = Sanic(__name__)
app.static('/static', resource_filename('monocle', 'static'))
def social_links():
social_links = ''
if conf.FB_PAGE_ID:
social_links = '<a class="map_btn facebook-icon" target="_blank" href="https://www.facebook.com/' + conf.FB_PAGE_ID + '"></a>'
if conf.TWITTER_SCREEN_NAME:
social_links += '<a class="map_btn twitter-icon" target="_blank" href="https://www.twitter.com/' + conf.TWITTER_SCREEN_NAME + '"></a>'
if conf.DISCORD_INVITE_ID:
social_links += '<a class="map_btn discord-icon" target="_blank" href="https://discord.gg/' + conf.DISCORD_INVITE_ID + '"></a>'
if conf.TELEGRAM_USERNAME:
social_links += '<a class="map_btn telegram-icon" target="_blank" href="https://www.telegram.me/' + conf.TELEGRAM_USERNAME + '"></a>'
return Markup(social_links)
def render_map():
css_js = ''
if conf.LOAD_CUSTOM_CSS_FILE:
css_js = '<link rel="stylesheet" href="static/css/custom.css">'
if conf.LOAD_CUSTOM_JS_FILE:
css_js += '<script type="text/javascript" src="static/js/custom.js"></script>'
js_vars = Markup(
"_defaultSettings['FIXED_OPACITY'] = '{:d}'; "
"_defaultSettings['SHOW_TIMER'] = '{:d}'; "
"_defaultSettings['TRASH_IDS'] = [{}]; ".format(conf.FIXED_OPACITY, conf.SHOW_TIMER, ', '.join(str(p_id) for p_id in conf.TRASH_IDS)))
template = env.get_template('custom.html' if conf.LOAD_CUSTOM_HTML_FILE else 'newmap.html')
return html(template.render(
area_name=conf.AREA_NAME,
map_center=center,
map_provider_url=conf.MAP_PROVIDER_URL,
map_provider_attribution=conf.MAP_PROVIDER_ATTRIBUTION,
social_links=social_links(),
init_js_vars=js_vars,
extra_css_js=Markup(css_js)
))
def render_worker_map():
template = env.get_template('workersmap.html')
return html(template.render(
area_name=conf.AREA_NAME,
map_center=center,
map_provider_url=conf.MAP_PROVIDER_URL,
map_provider_attribution=conf.MAP_PROVIDER_ATTRIBUTION,
social_links=social_links()
))
@app.get('/')
async def fullmap(request, html_map=render_map()):
return html_map
if conf.MAP_WORKERS:
workers = Workers()
@app.get('/workers_data')
async def workers_data(request):
return json(get_worker_markers(workers))
@app.get('/workers')
async def workers_map(request, html_map=render_worker_map()):
return html_map
del env
@app.get('/data')
async def pokemon_data(request, _time=time):
last_id = request.args.get('last_id', 0)
async with app.pool.acquire() as conn:
results = await conn.fetch('''
SELECT id, pokemon_id, expire_timestamp, lat, lon, atk_iv, def_iv, sta_iv, move_1, move_2
FROM sightings
WHERE expire_timestamp > {} AND id > {}
'''.format(_time(), last_id))
return json(list(map(sighting_to_marker, results)))
@app.get('/gym_data')
async def gym_data(request, names=POKEMON, _str=str):
async with app.pool.acquire() as conn:
results = await conn.fetch('''
SELECT
fs.fort_id,
fs.id,
fs.team,
fs.prestige,
fs.guard_pokemon_id,
fs.last_modified,
f.lat,
f.lon
FROM fort_sightings fs
JOIN forts f ON f.id=fs.fort_id
WHERE (fs.fort_id, fs.last_modified) IN (
SELECT fort_id, MAX(last_modified)
FROM fort_sightings
GROUP BY fort_id
)
''')
return json([{
'id': 'fort-' + _str(fort['fort_id']),
'sighting_id': fort['id'],
'prestige': fort['prestige'],
'pokemon_id': fort['guard_pokemon_id'],
'pokemon_name': names[fort['guard_pokemon_id']],
'team': fort['team'],
'lat': fort['lat'],
'lon': fort['lon']
} for fort in results])
@app.get('/spawnpoints')
async def spawn_points(request, _dict=dict):
async with app.pool.acquire() as conn:
results = await conn.fetch('SELECT spawn_id, despawn_time, lat, lon, duration FROM spawnpoints')
return json([_dict(x) for x in results])
@app.get('/pokestops')
async def get_pokestops(request, _dict=dict):
async with app.pool.acquire() as conn:
results = await conn.fetch('SELECT external_id, lat, lon FROM pokestops')
return json([_dict(x) for x in results])
@app.get('/scan_coords')
async def scan_coords(request):
return json(get_scan_coords())
def sighting_to_marker(pokemon, names=POKEMON, moves=MOVES, damage=DAMAGE, trash=conf.TRASH_IDS, _str=str):
pokemon_id = pokemon['pokemon_id']
marker = {
'id': 'pokemon-' + _str(pokemon['id']),
'trash': pokemon_id in trash,
'name': names[pokemon_id],
'pokemon_id': pokemon_id,
'lat': pokemon['lat'],
'lon': pokemon['lon'],
'expires_at': pokemon['expire_timestamp'],
}
move1 = pokemon['move_1']
if move1:
move2 = pokemon['move_2']
marker['atk'] = pokemon['atk_iv']
marker['def'] = pokemon['def_iv']
marker['sta'] = pokemon['sta_iv']
marker['move1'] = moves[move1]
marker['move2'] = moves[move2]
marker['damage1'] = damage[move1]
marker['damage2'] = damage[move2]
return marker
@app.listener('before_server_start')
async def register_db(app, loop):
app.pool = await create_pool(**conf.DB, loop=loop)
def main():
args = get_args()
app.run(debug=args.debug, host=args.host, port=args.port)
if __name__ == '__main__':
main()
|
{
"content_hash": "a406527cb555e2db08687e5435f1e911",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 142,
"avg_line_length": 32.15544041450777,
"alnum_prop": 0.603770544634225,
"repo_name": "sebast1219/Monocle",
"id": "fba1c6eac3eb4c2c02904ab8a5fac6f94b7c2489",
"size": "6230",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "web_sanic.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3189"
},
{
"name": "HTML",
"bytes": "24630"
},
{
"name": "JavaScript",
"bytes": "17996"
},
{
"name": "PLpgSQL",
"bytes": "1059"
},
{
"name": "Python",
"bytes": "377347"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from social_login import VERSION
packages = [
'social_login',
]
install_requires = [
'socialoauth',
]
setup(
name='django-social-login',
version = VERSION,
license = 'BSD',
description = 'A Django APP for Social account login via OAuth2 Service',
long_description = open('README.txt').read(),
author = 'Wang Chao',
author_email = 'yueyoum@gmail.com',
url = 'https://github.com/yueyoum/django-social-login',
keywords = 'social, oauth, oauth2, django, login',
packages = packages,
install_requires = install_requires,
classifiers = [
'Development Status :: 4 - Beta',
'Topic :: Internet',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
|
{
"content_hash": "dfd7951d0735a2b27d32b0e968477ed0",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 77,
"avg_line_length": 24.735294117647058,
"alnum_prop": 0.6278240190249703,
"repo_name": "wangwanzhong/django-social-login",
"id": "ef98ae9e82255f048124ba8ba81f0fd6669bb5de",
"size": "841",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "5535"
},
{
"name": "Python",
"bytes": "26136"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from contextlib import contextmanager
import sys
import time
from reppy.robots import Robots
content = '''
# /robots.txt for http://www.fict.org/
# comments to webmaster@fict.org
User-agent: unhipbot
Disallow: /
User-agent: webcrawler
User-agent: excite
Disallow:
User-agent: *
Disallow: /org/plans.html
Allow: /org/
Allow: /serv
Allow: /~mak
Disallow: /
'''
@contextmanager
def timer(name, count):
'''Time this block.'''
start = time.time()
try:
yield count
finally:
duration = time.time() - start
print(name)
print('=' * 10)
print('Total: %s' % duration)
print(' Avg: %s' % (duration / count))
print(' Rate: %s' % (count / duration))
print('')
with timer('Parse', 100000) as count:
for _ in xrange(count):
Robots.parse('http://example.com/robots.txt', content)
parsed = Robots.parse('http://example.com/robots.txt', content)
with timer('Evaluate', 100000) as count:
for _ in xrange(count):
parsed.allowed('/org/example.html', 'other-bot')
|
{
"content_hash": "26b6bd20fd5b1db1a93e3b06ba06093e",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 63,
"avg_line_length": 21.45098039215686,
"alnum_prop": 0.6325411334552102,
"repo_name": "seomoz/reppy",
"id": "3a4486be2372e0e9520cc618866c5891551be920",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bench.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "452"
},
{
"name": "Python",
"bytes": "55905"
},
{
"name": "Shell",
"bytes": "1153"
}
],
"symlink_target": ""
}
|
import ast
import re
def get_decimal_or_variable_value(token, variables):
if (token.isdecimal()):
return int(token)
assert(token in variables)
return variables[token]
def parse_freemarker_assign_expr(full_expr, variables):
full_expr = full_expr.decode("ascii")
pos_eq = full_expr.find("=")
assert(pos_eq != -1)
var = full_expr[: pos_eq].strip()
assert(re.search(r"^\w+$", var))
expr = full_expr[pos_eq + 1:].strip()
pos = 0
# TODO: Replace my parser by parser from ast
ops = [
{
"+": lambda x, y: x + y,
"-": lambda x, y: x - y
},
{
"*": lambda x, y: x * y,
"/": lambda x, y: x // y
}
]
def skip_spaces():
nonlocal pos
while pos < len(expr) and expr[pos].isspace():
pos += 1
def parse_binary(lvl):
nonlocal pos
if lvl == 2:
return parse_unary()
ret = parse_binary(lvl + 1)
while True:
if pos == len(expr) or not expr[pos] in ops[lvl]:
skip_spaces()
return ret
f = ops[lvl][expr[pos]]
pos += 1
skip_spaces()
tmp = parse_binary(lvl + 1)
ret = f(ret, tmp)
def parse_unary():
nonlocal pos
assert(pos < len(expr))
if expr[pos] == "(":
pos += 1
skip_spaces()
ret = parse_binary(0)
assert(expr[pos] == ")")
pos += 1
skip_spaces()
return ret
if expr[pos] == "-":
pos += 1
skip_spaces()
return -parse_unary()
token = ""
while pos < len(expr) and not expr[pos].isspace():
token += expr[pos]
pos += 1
skip_spaces()
return get_decimal_or_variable_value(token, variables)
val = parse_binary(0)
assert(pos == len(expr))
return [var, val]
def parse_freemarker_list_as(s, variables):
s = s.decode("ascii").strip()
match = re.search(r"(.*)\bas\b(.*)", s)
assert(match)
arr, var = map(lambda x: x.strip(), match.groups())
assert(re.search(r"^\w+$", var))
if ".." in arr:
assert(arr.count("..") == 1)
left_part, right_part = map(lambda x: x.strip(), arr.split(".."))
from_value = get_decimal_or_variable_value(left_part, variables)
to_value = get_decimal_or_variable_value(right_part, variables)
assert(from_value <= to_value)
return [var, range(from_value, to_value + 1)]
assert(arr[0] == "[" and arr[-1] == "]")
ret = ast.literal_eval(arr)
return [var, ret]
|
{
"content_hash": "826b975261a70305f19329375e580ba8",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 73,
"avg_line_length": 26.78301886792453,
"alnum_prop": 0.4674181049665375,
"repo_name": "kunyavskiy/polygon-cli",
"id": "9996667de9664a0d88063fe9b0f6554399fd0bd0",
"size": "2839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polygon_cli/freemarker_parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85115"
},
{
"name": "Shell",
"bytes": "52"
}
],
"symlink_target": ""
}
|
from bluebottle.time_based.tests.factories import DateActivityFactory, PeriodActivityFactory
from bluebottle.initiatives.tests.factories import InitiativeFactory
from django.urls.base import reverse
from bluebottle.funding.tests.factories import FundingFactory, DonorFactory
from bluebottle.test.factory_models.wallposts import (
MediaWallpostFactory, MediaWallpostPhotoFactory
)
from bluebottle.test.utils import BluebottleAdminTestCase
class TestWallpostAdmin(BluebottleAdminTestCase):
def setUp(self):
super(TestWallpostAdmin, self).setUp()
self.client.force_login(self.superuser)
self.media_wallpost_url = '/en/admin/wallposts/mediawallpost/'
def test_mediawallpost_admin(self):
initiative = InitiativeFactory.create()
self.wallpost = MediaWallpostFactory.create(content_object=initiative)
MediaWallpostPhotoFactory.create_batch(10, mediawallpost=self.wallpost)
self.wallpost.save()
response = self.client.get(self.media_wallpost_url)
self.assertContains(response, '9 more')
url = reverse('admin:wallposts_mediawallpost_change', args=(self.wallpost.id, ))
response = self.client.get(url)
self.assertContains(response, initiative.title)
def test_fundraiser_textwallpost_admin(self):
activity = DateActivityFactory()
self.wallpost = MediaWallpostFactory.create(content_object=activity)
url = reverse('admin:wallposts_mediawallpost_change', args=(self.wallpost.id, ))
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.content)
self.assertContains(response, activity.title)
def test_period_activity_textwallpost_admin(self):
activity = PeriodActivityFactory.create()
self.wallpost = MediaWallpostFactory.create(content_object=activity)
url = reverse('admin:wallposts_mediawallpost_change', args=(self.wallpost.id, ))
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.content)
self.assertContains(response, activity.title)
def test_project_systemwallpost_admin(self):
funding = FundingFactory.create()
donation = DonorFactory(activity=funding)
self.wallpost = MediaWallpostFactory.create(content_object=funding, donation=donation)
url = reverse('admin:wallposts_mediawallpost_change', args=(self.wallpost.id, ))
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.content)
self.assertContains(response, funding.title)
|
{
"content_hash": "88b4d40358b75314877605bced37c88e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 94,
"avg_line_length": 47.30909090909091,
"alnum_prop": 0.7352036894696388,
"repo_name": "onepercentclub/bluebottle",
"id": "60c5b37eb0c797340c22d628ffc7f943520cd687",
"size": "2627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/wallposts/tests/test_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
"""
PyMF Simplex Volume Maximization [1]
SIVM: class for SiVM
[1] C. Thurau, K. Kersting, and C. Bauckhage. Yes We Can - Simplex Volume
Maximization for Descriptive Web-Scale Matrix Factorization. In Proc. Int.
Conf. on Information and Knowledge Management. ACM. 2010.
"""
import scipy.sparse
import numpy as np
from .dist import *
from .aa import AA
__all__ = ["SIVM"]
class SIVM(AA):
"""
SIVM(data, num_bases=4, dist_measure='l2')
Simplex Volume Maximization. Factorize a data matrix into two matrices s.t.
F = | data - W*H | is minimal. H is restricted to convexity. W is iteratively
found by maximizing the volume of the resulting simplex (see [1]).
Parameters
----------
data : array_like, shape (_data_dimension, _num_samples)
the input data
num_bases: int, optional
Number of bases to compute (column rank of W and row rank of H).
4 (default)
dist_measure : one of 'l2' ,'cosine', 'l1', 'kl'
Standard is 'l2' which maximizes the volume of the simplex. In contrast,
'cosine' maximizes the volume of a cone (see [1] for details).
init : string (default: 'fastmap')
'fastmap' or 'origin'. Sets the method used for finding the very first
basis vector. 'Origin' assumes the zero vector, 'Fastmap' picks one of
the two vectors that have the largest pairwise distance.
Attributes
----------
W : "data_dimension x num_bases" matrix of basis vectors
H : "num bases x num_samples" matrix of coefficients
ferr : frobenius norm (after calling .factorize())
Example
-------
Applying SIVM to some rather stupid data set:
>>> import numpy as np
>>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]])
>>> sivm_mdl = SIVM(data, num_bases=2)
>>> sivm_mdl.factorize()
The basis vectors are now stored in sivm_mdl.W, the coefficients in sivm_mdl.H.
To compute coefficients for an existing set of basis vectors simply copy W
to sivm_mdl.W, and set compute_w to False:
>>> data = np.array([[1.5, 1.3], [1.2, 0.3]])
>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> sivm_mdl = SIVM(data, num_bases=2)
>>> sivm_mdl.W = W
>>> sivm_mdl.factorize(compute_w=False)
The result is a set of coefficients sivm_mdl.H, s.t. data = W * sivm_mdl.H.
"""
# always overwrite the default number of iterations
# -> any value other does not make sense.
_NITER = 1
def __init__(self, data, num_bases=4, dist_measure='l2', init='fastmap'):
AA.__init__(self, data, num_bases=num_bases)
self._dist_measure = dist_measure
self._init = init
# assign the correct distance function
if self._dist_measure == 'l1':
self._distfunc = l1_distance
elif self._dist_measure == 'l2':
self._distfunc = l2_distance
elif self._dist_measure == 'cosine':
self._distfunc = cosine_distance
elif self._dist_measure == 'abs_cosine':
self._distfunc = abs_cosine_distance
elif self._dist_measure == 'weighted_abs_cosine':
self._distfunc = weighted_abs_cosine_distance
elif self._dist_measure == 'kl':
self._distfunc = kl_divergence
def _distance(self, idx):
""" compute distances of a specific data point to all other samples"""
if scipy.sparse.issparse(self.data):
step = self.data.shape[1]
else:
step = 50000
d = np.zeros((self.data.shape[1]))
if idx == -1:
# set vec to origin if idx=-1
vec = np.zeros((self.data.shape[0], 1))
if scipy.sparse.issparse(self.data):
vec = scipy.sparse.csc_matrix(vec)
else:
vec = self.data[:, idx:idx+1]
self._logger.info('compute distance to node ' + str(idx))
# slice data into smaller chunks
for idx_start in range(0, self.data.shape[1], step):
if idx_start + step > self.data.shape[1]:
idx_end = self.data.shape[1]
else:
idx_end = idx_start + step
d[idx_start:idx_end] = self._distfunc(
self.data[:,idx_start:idx_end], vec)
self._logger.info('completed:' +
str(idx_end/(self.data.shape[1]/100.0)) + "%")
return d
def init_h(self):
self.H = np.zeros((self._num_bases, self._num_samples))
def init_w(self):
self.W = np.zeros((self._data_dimension, self._num_bases))
def init_sivm(self):
self.select = []
if self._init == 'fastmap':
# Fastmap like initialization
# set the starting index for fastmap initialization
cur_p = 0
# after 3 iterations the first "real" index is found
for i in range(3):
d = self._distance(cur_p)
cur_p = np.argmax(d)
# store maximal found distance -> later used for "a" (->update_w)
self._maxd = np.max(d)
self.select.append(cur_p)
elif self._init == 'origin':
# set first vertex to origin
cur_p = -1
d = self._distance(cur_p)
self._maxd = np.max(d)
self.select.append(cur_p)
def update_w(self):
""" compute new W """
EPS = 10**-8
self.init_sivm()
# initialize some of the recursively updated distance measures ....
d_square = np.zeros((self.data.shape[1]))
d_sum = np.zeros((self.data.shape[1]))
d_i_times_d_j = np.zeros((self.data.shape[1]))
distiter = np.zeros((self.data.shape[1]))
a = np.log(self._maxd)
a_inc = a.copy()
for l in range(1, self._num_bases):
d = self._distance(self.select[l-1])
# take the log of d (sually more stable that d)
d = np.log(d + EPS)
d_i_times_d_j += d * d_sum
d_sum += d
d_square += d**2
distiter = d_i_times_d_j + a*d_sum - (l/2.0) * d_square
# detect the next best data point
self.select.append(np.argmax(distiter))
self._logger.info('cur_nodes: ' + str(self.select))
# sort indices, otherwise h5py won't work
self.W = self.data[:, np.sort(self.select)]
# "unsort" it again to keep the correct order
self.W = self.W[:, np.argsort(np.argsort(self.select))]
def factorize(self, show_progress=False, compute_w=True, compute_h=True,
compute_err=True, niter=1):
""" Factorize s.t. WH = data
Parameters
----------
show_progress : bool
print some extra information to stdout.
compute_h : bool
iteratively update values for H.
compute_w : bool
iteratively update values for W.
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH|.
"""
AA.factorize(self, niter=1, show_progress=show_progress,
compute_w=compute_w, compute_h=compute_h,
compute_err=compute_err)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{
"content_hash": "5ae3017121dd88c886c05b0abf6f2a97",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 83,
"avg_line_length": 33.11013215859031,
"alnum_prop": 0.555880787653007,
"repo_name": "guiquanz/msaf",
"id": "2557a1b7981368637e19d081f327f2286b961503",
"size": "7669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "msaf/pymf/sivm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5577"
},
{
"name": "HTML",
"bytes": "5534"
},
{
"name": "PHP",
"bytes": "18248"
},
{
"name": "Python",
"bytes": "507065"
}
],
"symlink_target": ""
}
|
the truth
|
{
"content_hash": "bc0d43a1c41a3680bd997996a7d1c180",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 9,
"avg_line_length": 9,
"alnum_prop": 0.8888888888888888,
"repo_name": "cmdrflexo/humberto",
"id": "95b5b562aeea290e45fc24ce03acc073037a44f7",
"size": "9",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "policestate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51390"
}
],
"symlink_target": ""
}
|
from .parser import Parser
from .renamer import *
from .formatting import Formatting
from tabulate import tabulate
from fuzzywuzzy import fuzz
|
{
"content_hash": "9c8f51d574c1b8040687493a16bd0b48",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 28.6,
"alnum_prop": 0.8321678321678322,
"repo_name": "JonathanPetit/MovieSerieTorrent",
"id": "8e69db8269acc0c88ad2c1abf295b28c22327f3c",
"size": "143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MovieSerieTorrent/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13523"
}
],
"symlink_target": ""
}
|
import os
import sys
import hashlib
import argparse
import itertools
import logging
from distutils.util import strtobool
spinner = itertools.cycle(['-', '/', '|', '\\'])
logger = logging.getLogger()
class Dedupifier(object):
block_size = 65536
def __init__(self, path, verbosity=0, use_hash=False, log_file=None):
self.path = path
self.verbosity = verbosity
self.use_hash = use_hash
self.log_file = log_file
def spin(self):
sys.stdout.write('{}\b'.format(spinner.__next__()))
sys.stdout.flush()
def write_to_log(self):
with self.log_file as f:
for item in self.items:
f.write('{}\n'.format(item))
for file in self.items[item]:
f.write(' {}\n'.format(file))
def hash_file(self, path):
_hash = hashlib.md5()
with open(path, 'rb') as _file:
while True:
data = _file.read(self.block_size)
if not data:
break
_hash.update(data)
return _hash.hexdigest()
def get_items(self):
self.items = {}
sys.stdout.write('Processing items\n')
num_files = 0
num_keys = 0
for root, subdirs, files in os.walk(self.path):
if self.verbosity > 1:
sys.stdout.write('Entering {}\n'.format(root))
for filename in files:
num_files += 1
key = None
if self.verbosity < 2:
self.spin()
full_path = os.path.join(root, filename)
if self.use_hash:
try:
key = self.hash_file(full_path)
except (FileNotFoundError, OSError) as exc:
logger.error("{}: '{}'".format(exc.strerror, exc.filename))
else:
key = filename
if key:
if not key in self.items:
num_keys += 1
if self.verbosity > 1:
sys.stdout.write('Adding new item {} from {}\n'.format(key, full_path))
self.items[key] = []
else:
if self.verbosity > 1:
sys.stdout.write('Adding file {} to {}\n'.format(full_path, key))
self.items[key].append(full_path)
sys.stdout.write('Found {} items in {} files\n'.format(num_keys, num_files))
if self.log_file:
sys.stdout.write('Writing results to {}\n'.format(self.log_file.name))
self.write_to_log()
return self.items
def remove_duplicates(self):
for item in self.items:
for file in self.items[item][1:]:
if self.verbosity > 1:
sys.stdout.write('Removing file {}\n'.format(file))
os.remove(file)
def main():
parser = argparse.ArgumentParser(description='Dedupifier')
parser.add_argument('path', help='directory to dedupe')
# warning: twisted logic
parser.add_argument('-x', dest='use_hash', action='store_false', default=True,
help='use filename as a key instead of hash')
parser.add_argument('-l', dest='log_file', default='dedupe.log',
type=argparse.FileType('w'), help='name of logfile to save output to [dedupe.log]')
parser.add_argument('-v', dest='verbosity', default=0, action='count',
help='verbosity')
parser.add_argument('-r', '--remove', dest='remove', action='store_true', default=False,
help='Remove duplicate files')
args = parser.parse_args()
dedupifier = Dedupifier(args.path, verbosity=args.verbosity, use_hash=args.use_hash, log_file=args.log_file)
dedupifier.get_items()
if args.remove:
sys.stdout.write('Are you sure you want to remove duplicate files? ')
while True:
try:
if strtobool(input().lower()):
dedupifier.remove_duplicates()
return True
except ValueError:
sys.stdout.write('Please respond with y or n.\n')
if __name__ == "__main__":
main()
|
{
"content_hash": "9d32079de48e8166329b8b9b80cad3f2",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 112,
"avg_line_length": 32.646153846153844,
"alnum_prop": 0.5254476908576814,
"repo_name": "bne/dedupifier",
"id": "6527fdee55470ff21b0b4c384aa1e9479d318f23",
"size": "4244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dedupifier/dedupe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11762"
}
],
"symlink_target": ""
}
|
import sys
import time
reload(sys)
sys.setdefaultencoding('utf8')
########## FILESENDER ##########
def fs_readChunk():
filename=sys.argv[2]
offset=int(sys.argv[3])
length=int(sys.argv[4])
#data = readchunk from external
sys.stdout.write(data)
sys.stdout.flush()
def fs_writeChunk():
filename=sys.argv[2]
offset=int(sys.argv[3])
length=int(sys.argv[4])
size=int(sys.argv[5])
#write file at offset and print number of bytes written
sys.stdout.write(str(length))
sys.stdout.flush()
def fs_deleteFile():
filename=sys.argv[2]
#delete file
def fs_completeFile():
filename=sys.argv[2]
size=int(sys.argv[3])
#close file (if needed)
########## MAIN ##########
method_name = sys.argv[1]
possibles = globals().copy()
possibles.update(locals())
method = possibles.get(method_name)
if not method:
raise NotImplementedError("Method %s not implemented" % method_name)
method()
|
{
"content_hash": "bc1079e011086e29bc8e08b4f85c2024",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 19.471698113207548,
"alnum_prop": 0.5959302325581395,
"repo_name": "filesender/filesender",
"id": "1f9c3771b56bc4967807db5b8823c0129e0cb7f3",
"size": "1070",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "scripts/StorageFilesystemExternal/external.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42040"
},
{
"name": "Dockerfile",
"bytes": "3447"
},
{
"name": "HTML",
"bytes": "6584"
},
{
"name": "Hack",
"bytes": "978"
},
{
"name": "JavaScript",
"bytes": "3187885"
},
{
"name": "PHP",
"bytes": "5033685"
},
{
"name": "Python",
"bytes": "11556"
},
{
"name": "Roff",
"bytes": "2835"
},
{
"name": "Shell",
"bytes": "25069"
}
],
"symlink_target": ""
}
|
from nova import log as logging
from nova.scheduler.filters import abstract_filter
from nova import utils
LOG = logging.getLogger('nova.scheduler.filter.compute_filter')
class ComputeFilter(abstract_filter.AbstractHostFilter):
"""HostFilter hard-coded to work with InstanceType records."""
def _satisfies_extra_specs(self, capabilities, instance_type):
"""Check that the capabilities provided by the compute service
satisfy the extra specs associated with the instance type"""
if 'extra_specs' not in instance_type:
return True
# NOTE(lorinh): For now, we are just checking exact matching on the
# values. Later on, we want to handle numerical
# values so we can represent things like number of GPU cards
for key, value in instance_type['extra_specs'].iteritems():
if capabilities.get(key, None) != value:
return False
return True
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can create instance_type."""
instance_type = filter_properties.get('instance_type')
if host_state.topic != 'compute' or not instance_type:
return True
capabilities = host_state.capabilities
service = host_state.service
if not utils.service_is_up(service) or service['disabled']:
return False
if not capabilities.get("enabled", True):
return False
if not self._satisfies_extra_specs(capabilities, instance_type):
return False
return True
|
{
"content_hash": "b815d9750be58c4e11ea258b174ae837",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 39.925,
"alnum_prop": 0.6643706950532248,
"repo_name": "rcbops/nova-buildpackage",
"id": "f576eace1a7556d024fca70f4deedcf4904e8301",
"size": "2232",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/scheduler/filters/compute_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "5412903"
},
{
"name": "Shell",
"bytes": "24506"
}
],
"symlink_target": ""
}
|
'''
@author: xiaowing
@license: Apache Lincese 2.0
'''
from . import SsoToken, LoginUser
|
{
"content_hash": "80e16e952d86f98023ff963fadaf048a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 17,
"alnum_prop": 0.6176470588235294,
"repo_name": "xiaowing/tinysso",
"id": "5f23bbf533856fa687796810f13a0b85916beee7",
"size": "102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/SsoEntity/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "529"
},
{
"name": "C#",
"bytes": "12233"
},
{
"name": "HTML",
"bytes": "4237"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "28601"
}
],
"symlink_target": ""
}
|
import glob
# Server's part.
datasets = glob.glob("datasets/*")
def get_file_content(filename):
f = open(filename)
try:
return f.read()
finally:
f.close()
source = dict((path, get_file_content(path)) for path in datasets)
output = open("output/map_reduce_results", "w")
def final(key, value):
print key, value
output.write(str((key, value)))
# Client's part.
def mapfn(key, value):
for line in value.splitlines():
for word in line.split():
yield word.lower(), 1
def reducefn(key, value):
return key, len(value)
|
{
"content_hash": "cbaf205ed357579a64deecdcc82bae1a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 17.322580645161292,
"alnum_prop": 0.6759776536312849,
"repo_name": "afronski/playground-notes",
"id": "53b414f834854db9c15c6755bcb3dd28292799b0",
"size": "537",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "big-data/map-reduce-playground/map-reduce-task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "410"
},
{
"name": "Haskell",
"bytes": "7576"
},
{
"name": "Java",
"bytes": "129549"
},
{
"name": "JavaScript",
"bytes": "2066"
},
{
"name": "Jupyter Notebook",
"bytes": "4079096"
},
{
"name": "Matlab",
"bytes": "343072"
},
{
"name": "Python",
"bytes": "425928"
},
{
"name": "R",
"bytes": "1233"
},
{
"name": "Ruby",
"bytes": "1522"
},
{
"name": "Scala",
"bytes": "416337"
},
{
"name": "Shell",
"bytes": "817"
},
{
"name": "TLA",
"bytes": "639"
}
],
"symlink_target": ""
}
|
class GoogleAnalyticsClientError(Exception):
"""
General Google Analytics error (error accessing GA)
"""
def __init__(self, reason):
self.reason = reason
def __repr__(self):
return 'GAError: %s' % self.reason
def __str__(self):
return 'GAError: %s' % self.reason
|
{
"content_hash": "aba1d85b1c9a25d649dbb126c2ee135d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 55,
"avg_line_length": 24.153846153846153,
"alnum_prop": 0.589171974522293,
"repo_name": "joshuajonah/python-googleanalytics",
"id": "bc46d4b180721343e20921f93653da1a4ca4493b",
"size": "314",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/googleanalytics/exception.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28686"
}
],
"symlink_target": ""
}
|
PREFIX = "/video/ardmediathek2016"
REPO_BASE = 'https://raw.githubusercontent.com/rols1/Plex-Plugin-ARDMediathek2016/master'
Plugin_FILE = '/Contents/Resources/update_single_files'
################################################################################
# Aufruf: wie Plugin-Update - bei Pluginstart oder manuell (Einstellungen)
# Ablauf: mode: check:
# 1. Repo_FILE holen, HISTORY holen
# 2. je Zeile: Zeitstempel Repo_FILE gegen LOCAL_FILE vergleichen
# 2.1 Repo_FILE fehlt/leer: Abbruch - kein Austausch - Cancelsignal für laufende Single-Updates
# 2.2 Zeitstempel fehlt oder jünger: Rückgabe 1 = Austauschsignal
#
# mode replace:
# 1., 2. wie check_repo
# 3. update_single_files im Plugin: altes Format/fehlt/leer: Austausch aller gelisteten Dateien
# 4. Zeile: lokaler Zeitstempel ist jünger: Austausch der dazu gehörigen Datei
# 5. Abschluss: Austausch update_single_files + HISTORY im Plugin gegen die Repo-Version
# 6. Hinweis: Plugin neu starten | betroffene Dateien
#
# 03.12.2018 Formatwechsel: jede Zeile mit Dateistempel + Datei
#
# Austausch erfolgt auch, wenn update_single_files: fehlt/leer/im alten Format
# Für jede Datei existiert eine Zeile (ermöglicht inkrementelles Update)
# Beim Versions-Update ist update_single_files manuell zu löschen, um Inkonsistenzen zu verhindern.
# HISTORY enthält für jede hier geänderte Datei einen Eintrag zu aktuellen Version
#
# Format File: Datum | UTC-Sekunden | File1
# inkrementell: neue Update-Dateien (innerhalb einer Version) werden jeweils
# angehängt - das alte Format verhinderte ein Update, wenn Nutzer Updates übersprangen.
# Format Zeitstempel: Datum | UTC-Sekunden (Konsole: date, date +"%s")
# Bsp.: So 17. Dez 22:32:12 CET 2017 | 1513546328 | ./Contents/Resources/ZDFarabic.png
# Die UTC-Sekunden ersparen hier Zeitfunktionen (einfacher int-Vergleich)
################################################################################
@route(PREFIX + '/check_repo')
# mode = check oder replace
# Rückgabe:
# Fehler: 0, info-string (exception-string)
# Erfolg: 1, info-string (ausgetauschte Dateien)
def check_repo(mode=''):
Log('update_single: check_repo')
Log('mode: ' + mode)
repo_cont = ''; hist_cont = ''
try: # Repo_FILE laden
repo_cont = HTTP.Request(REPO_BASE + Plugin_FILE, cacheTime=1).content
# repo_cont = Core.storage.load('/tmp/update_single_files') # Test lokal
repo_cont = repo_cont.strip()
hist_cont = HTTP.Request(REPO_BASE + '/HISTORY', cacheTime=1).content
except Exception as exception:
Log(str(exception))
return 0, str(exception) + ' (Github: update_single_files)'
Log('repo_cont: ' + repo_cont)
Log('hist_cont: ' + hist_cont[:40])
# Hinw.: storage.join_path erwartet Liste der Pfadelemente - anders als os.path.join
LOCAL_FILE = Core.storage.join_path(Core.bundle_path, 'Contents', 'Resources', 'update_single_files')
Log(LOCAL_FILE)
plugin_cont = '';
Log(os.path.exists(LOCAL_FILE))
if os.path.exists(LOCAL_FILE):
try: # Plugin_FILE laden
plugin_cont = Core.storage.load(LOCAL_FILE)
except Exceptionn as exception:
Log(str(exception))
repo_lines = repo_cont.splitlines()
plugin_lines = plugin_cont.splitlines()
if repo_cont == '': # leere Datei: kein Austausch, mode egal
Log('Repo update_single_files: leer')
return 0, 'alles aktuell'
force_replace = False
if plugin_cont.count('|') <= 1: # Test auf altes Format: nur 1 x '|' in gesamter Datei
force_replace = True
Log('Plugin update_single_files: altes Format/fehlt/leer')
to_replace = []
for line in repo_lines: # Abgleich Repo_FILE mit Plugin_FILE
if line.strip() == '' or '|' in line == False: # leer, Format falsch?
continue
line_date, line_stamp, line_file = line.split('|') # Zeile Repo_FILE lesen
Log('repo: date|stamp|file'); Log(line_date); Log(line_stamp); Log(line_file);
line_stamp = int(line_stamp.strip())
line_file = line_file.strip()
line_file = line_file.replace('./', '/') # ls-Ausgabe bereinigen
if force_replace == True: # Austausch erzwingen (plugin_cont leer oder altes Format)
to_replace.append(line_file)
else:
if plugin_lines:
for p_line in plugin_lines:
if p_line.strip() == '' or '|' not in p_line: # leer, Format falsch?
continue
# Log(p_line)
p_line_date, p_line_stamp, p_line_file = p_line.split('|') # Zeile Plugin_FILE lesen
p_line_stamp = int(p_line_stamp.strip())
p_line_file = p_line_file.strip()
p_line_file = p_line_file.replace('./', '/') # ls-Ausgabe bereinigen
if p_line_file == line_file: # Dateiname stimmt überein
Log('line_stamp: ' + str(line_stamp)); Log('p_line_stamp: ' + str(p_line_stamp));
if line_stamp > p_line_stamp: # Repo_Datei ist jünger
to_replace.append(line_file)
Log('to_replace: ' + line_file)
if mode == 'check':
if len(to_replace) == 0:
Log('Abgleich: alles aktuell')
return 0, 'alles aktuell'
else:
Log("Einzeldatei(en): " + ', '.join(to_replace))
return 1, "Einzeldatei(en): " + ', '.join(to_replace)
# ----------------------------------------------------------------------
# ab hier wird ersetzt (mode=replace):
cnt = 0
for line in to_replace: # Bsp. /Contents/Resources/ZDFarabic.png
try:
repo_url = REPO_BASE + line
cont = HTTP.Request(repo_url).content
plugin_path = os.path.join(Core.bundle_path + line)
Log(plugin_path)
# Log(cont[2000:3000])
Core.storage.save(plugin_path, cont) # verwendet temp-File: ..s/._file
except Exception as exception:
msg = str(exception)
Log(msg)
return ObjectContainer(header=L('Fehler'), message=msg)
cnt = cnt + 1
# zum Schluß neues update_single_files + neues HISTORY (hist_cont)speichern:
plugin_file = Core.storage.join_path(Core.bundle_path, 'Contents', 'Resources', 'update_single_files')
hist_file = Core.storage.join_path(Core.bundle_path, 'HISTORY')
try:
Core.storage.save(plugin_file, repo_cont)
Core.storage.save(hist_file, hist_cont)
except Exception as exception:
msg = str(exception) + ' (Plugin: update_single_files)'
Log(msg)
return ObjectContainer(header=L('Fehler'), message=msg)
msg = 'Update erfolgreich - Plugin bitte neu starten |\r\n'
msg = msg + '%s Datei(en) erneuert | %s' % (cnt, ', '.join(repo_lines))
return ObjectContainer(header=L('Info'), message=msg)
# ----------------------------------------------------------------------
|
{
"content_hash": "61c48a98fcb12a1422a57a0f0b6c31b2",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 103,
"avg_line_length": 44.2312925170068,
"alnum_prop": 0.6499538603506614,
"repo_name": "rols1/Plex-Plugin-ARDMediathek2016",
"id": "9565228e8ece13692e3771c7b4a4142744944400",
"size": "6814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Contents/Code/update_single.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "309517"
}
],
"symlink_target": ""
}
|
"""
OpenFlow 1.3 definitions.
"""
from ryu.lib import type_desc
from ryu.ofproto import nx_match
from ryu.ofproto import ofproto_utils
from ryu.ofproto import oxm_fields
from struct import calcsize
# struct ofp_header
OFP_HEADER_PACK_STR = '!BBHI'
OFP_HEADER_SIZE = 8
assert calcsize(OFP_HEADER_PACK_STR) == OFP_HEADER_SIZE
# enum ofp_type
OFPT_HELLO = 0 # Symmetric message
OFPT_ERROR = 1 # Symmetric message
OFPT_ECHO_REQUEST = 2 # Symmetric message
OFPT_ECHO_REPLY = 3 # Symmetric message
OFPT_EXPERIMENTER = 4 # Symmetric message
OFPT_FEATURES_REQUEST = 5 # Controller/switch message
OFPT_FEATURES_REPLY = 6 # Controller/switch message
OFPT_GET_CONFIG_REQUEST = 7 # Controller/switch message
OFPT_GET_CONFIG_REPLY = 8 # Controller/switch message
OFPT_SET_CONFIG = 9 # Controller/switch message
OFPT_PACKET_IN = 10 # Async message
OFPT_FLOW_REMOVED = 11 # Async message
OFPT_PORT_STATUS = 12 # Async message
OFPT_PACKET_OUT = 13 # Controller/switch message
OFPT_FLOW_MOD = 14 # Controller/switch message
OFPT_GROUP_MOD = 15 # Controller/switch message
OFPT_PORT_MOD = 16 # Controller/switch message
OFPT_TABLE_MOD = 17 # Controller/switch message
OFPT_MULTIPART_REQUEST = 18 # Controller/switch message
OFPT_MULTIPART_REPLY = 19 # Controller/switch message
OFPT_BARRIER_REQUEST = 20 # Controller/switch message
OFPT_BARRIER_REPLY = 21 # Controller/switch message
OFPT_QUEUE_GET_CONFIG_REQUEST = 22 # Controller/switch message
OFPT_QUEUE_GET_CONFIG_REPLY = 23 # Controller/switch message
OFPT_ROLE_REQUEST = 24 # Controller/switch message
OFPT_ROLE_REPLY = 25 # Controller/switch message
OFPT_GET_ASYNC_REQUEST = 26 # Controller/switch message
OFPT_GET_ASYNC_REPLY = 27 # Controller/switch message
OFPT_SET_ASYNC = 28 # Controller/switch message
OFPT_METER_MOD = 29 # Controller/switch message
# struct ofp_port
OFP_MAX_PORT_NAME_LEN = 16
OFP_ETH_ALEN = 6
OFP_ETH_ALEN_STR = str(OFP_ETH_ALEN)
_OFP_PORT_PACK_STR = 'I4x' + OFP_ETH_ALEN_STR + 's' + '2x' + \
str(OFP_MAX_PORT_NAME_LEN) + 's' + 'IIIIIIII'
OFP_PORT_PACK_STR = '!' + _OFP_PORT_PACK_STR
OFP_PORT_SIZE = 64
assert calcsize(OFP_PORT_PACK_STR) == OFP_PORT_SIZE
# enum ofp_port_config
OFPPC_PORT_DOWN = 1 << 0 # Port is administratively down.
OFPPC_NO_RECV = 1 << 2 # Drop all packets recieved by port.
OFPPC_NO_FWD = 1 << 5 # Drop packets forwarded to port.
OFPPC_NO_PACKET_IN = 1 << 6 # Do not send packet-in msgs for port.
# enum ofp_port_state
OFPPS_LINK_DOWN = 1 << 0 # No physical link present.
OFPPS_BLOCKED = 1 << 1 # Port is blocked.
OFPPS_LIVE = 1 << 2 # Live for Fast Failover Group.
# enum ofp_port_no
OFPP_MAX = 0xffffff00
OFPP_IN_PORT = 0xfffffff8 # Send the packet out the input port. This
# virtual port must be explicitly used
# in order to send back out of the input
# port.
OFPP_TABLE = 0xfffffff9 # Perform actions in flow table.
# NB: This can only be the destination
# port for packet-out messages.
OFPP_NORMAL = 0xfffffffa # Process with normal L2/L3 switching.
OFPP_FLOOD = 0xfffffffb # All physical ports except input port and
# those disabled by STP.
OFPP_ALL = 0xfffffffc # All physical ports except input port.
OFPP_CONTROLLER = 0xfffffffd # Send to controller.
OFPP_LOCAL = 0xfffffffe # Local openflow "port".
OFPP_ANY = 0xffffffff # Not associated with a physical port.
# All ones is used to indicate all queues in a port (for stats retrieval).
OFPQ_ALL = 0xffffffff
# enum ofp_port_features
OFPPF_10MB_HD = 1 << 0 # 10 Mb half-duplex rate support.
OFPPF_10MB_FD = 1 << 1 # 10 Mb full-duplex rate support.
OFPPF_100MB_HD = 1 << 2 # 100 Mb half-duplex rate support.
OFPPF_100MB_FD = 1 << 3 # 100 Mb full-duplex rate support.
OFPPF_1GB_HD = 1 << 4 # 1 Gb half-duplex rate support.
OFPPF_1GB_FD = 1 << 5 # 1 Gb full-duplex rate support.
OFPPF_10GB_FD = 1 << 6 # 10 Gb full-duplex rate support.
OFPPF_40GB_FD = 1 << 7 # 40 Gb full-duplex rate support.
OFPPF_100GB_FD = 1 << 8 # 100 Gb full-duplex rate support.
OFPPF_1TB_FD = 1 << 9 # 1 Tb full-duplex rate support.
OFPPF_OTHER = 1 << 10 # Other rate, not in the list.
OFPPF_COPPER = 1 << 11 # Copper medium.
OFPPF_FIBER = 1 << 12 # Fiber medium.
OFPPF_AUTONEG = 1 << 13 # Auto-negotiation.
OFPPF_PAUSE = 1 << 14 # Pause.
OFPPF_PAUSE_ASYM = 1 << 15 # Asymmetric pause.
# struct ofp_packet_queue
OFP_PACKET_QUEUE_PACK_STR = '!IIH6x'
OFP_PACKET_QUEUE_SIZE = 16
assert calcsize(OFP_PACKET_QUEUE_PACK_STR) == OFP_PACKET_QUEUE_SIZE
# enum ofp_queue_properties
OFPQT_MIN_RATE = 1 # Minimum datarate guaranteed.
OFPQT_MAX_RATE = 2 # Maximum datarate.
OFPQT_EXPERIMENTER = 0xffff # Experimenter defined property.
# struct ofp_queue_prop_header
OFP_QUEUE_PROP_HEADER_PACK_STR = '!HH4x'
OFP_QUEUE_PROP_HEADER_SIZE = 8
assert calcsize(OFP_QUEUE_PROP_HEADER_PACK_STR) == OFP_QUEUE_PROP_HEADER_SIZE
# struct ofp_queue_prop_min_rate
OFP_QUEUE_PROP_MIN_RATE_PACK_STR = '!H6x'
OFP_QUEUE_PROP_MIN_RATE_SIZE = 16
assert (calcsize(OFP_QUEUE_PROP_MIN_RATE_PACK_STR) +
OFP_QUEUE_PROP_HEADER_SIZE) == OFP_QUEUE_PROP_MIN_RATE_SIZE
# struct ofp_queue_prop_max_rate
OFP_QUEUE_PROP_MAX_RATE_PACK_STR = '!H6x'
OFP_QUEUE_PROP_MAX_RATE_SIZE = 16
assert (calcsize(OFP_QUEUE_PROP_MAX_RATE_PACK_STR) +
OFP_QUEUE_PROP_HEADER_SIZE) == OFP_QUEUE_PROP_MAX_RATE_SIZE
# struct ofp_queue_prop_experimenter
OFP_QUEUE_PROP_EXPERIMENTER_PACK_STR = '!I4x'
OFP_QUEUE_PROP_EXPERIMENTER_SIZE = 16
assert (calcsize(OFP_QUEUE_PROP_EXPERIMENTER_PACK_STR) +
OFP_QUEUE_PROP_HEADER_SIZE) == OFP_QUEUE_PROP_EXPERIMENTER_SIZE
# struct ofp_match
_OFP_MATCH_PACK_STR = 'HHBBBB'
OFP_MATCH_PACK_STR = '!' + _OFP_MATCH_PACK_STR
OFP_MATCH_SIZE = 8
assert calcsize(OFP_MATCH_PACK_STR) == OFP_MATCH_SIZE
# enum ofp_match_type
OFPMT_STANDARD = 0 # Deprecated
OFPMT_OXM = 1 # OpenFlow Extensible Match
# enum ofp_oxm_class
OFPXMC_NXM_0 = 0x0000 # Backward compatibility with NXM
OFPXMC_NXM_1 = 0x0001 # Backward compatibility with NXM
OFPXMC_OPENFLOW_BASIC = 0x8000 # Basic class for OpenFlow
OFPXMC_EXPERIMENTER = 0xFFFF # Experimenter class
# enum ofp_vlan_id
OFPVID_PRESENT = 0x1000 # bit that indicate that a VLAN id is set.
OFPVID_NONE = 0x0000 # No VLAN id was set.
# enum ofp_ipv6exthdr_flags
OFPIEH_NONEXT = 1 << 0 # "No next header" encountered.
OFPIEH_ESP = 1 << 1 # Encrypted Sec Payload header present.
OFPIEH_AUTH = 1 << 2 # Authentication header present.
OFPIEH_DEST = 1 << 3 # 1 or 2 dest headers present.
OFPIEH_FRAG = 1 << 4 # Fragment header present.
OFPIEH_ROUTER = 1 << 5 # Router header present.
OFPIEH_HOP = 1 << 6 # Hop-by-hop header present.
OFPIEH_UNREP = 1 << 7 # Unexpected repeats encountered.
OFPIEH_UNSEQ = 1 << 8 # Unexpected sequencing encountered.
# ofp_oxm_experimenter_header
OFP_OXM_EXPERIMENTER_HEADER_PACK_STR = '!II'
OFP_OXM_EXPERIMENTER_HEADER_SIZE = 8
assert (calcsize(OFP_OXM_EXPERIMENTER_HEADER_PACK_STR) ==
OFP_OXM_EXPERIMENTER_HEADER_SIZE)
# enum ofp_instruction_type
OFPIT_GOTO_TABLE = 1 # Setup the next table in the lookup pipeline.
OFPIT_WRITE_METADATA = 2 # Setup the metadata field for use later in
# pipeline.
OFPIT_WRITE_ACTIONS = 3 # Write the action(s) onto the datapath
# action set
OFPIT_APPLY_ACTIONS = 4 # Applies the action(s) immediately
OFPIT_CLEAR_ACTIONS = 5 # Clears all actions from the datapath action
# set
OFPIT_METER = 6 # Apply meter (rate limiter)
OFPIT_EXPERIMENTER = 0xFFFF # Experimenter instruction
# struct ofp_instruction_goto_table
OFP_INSTRUCTION_GOTO_TABLE_PACK_STR = '!HHB3x'
OFP_INSTRUCTION_GOTO_TABLE_SIZE = 8
assert (calcsize(OFP_INSTRUCTION_GOTO_TABLE_PACK_STR) ==
OFP_INSTRUCTION_GOTO_TABLE_SIZE)
# struct ofp_instruction_write_metadata
OFP_INSTRUCTION_WRITE_METADATA_PACK_STR = '!HH4xQQ'
OFP_INSTRUCTION_WRITE_METADATA_SIZE = 24
assert (calcsize(OFP_INSTRUCTION_WRITE_METADATA_PACK_STR) ==
OFP_INSTRUCTION_WRITE_METADATA_SIZE)
# struct ofp_instruction_actions
OFP_INSTRUCTION_ACTIONS_PACK_STR = '!HH4x'
OFP_INSTRUCTION_ACTIONS_SIZE = 8
assert (calcsize(OFP_INSTRUCTION_ACTIONS_PACK_STR) ==
OFP_INSTRUCTION_ACTIONS_SIZE)
# struct ofp_instruction_meter
OFP_INSTRUCTION_METER_PACK_STR = '!HHI'
OFP_INSTRUCTION_METER_SIZE = 8
assert calcsize(OFP_INSTRUCTION_METER_PACK_STR) == OFP_INSTRUCTION_METER_SIZE
# enum ofp_action_type
OFPAT_OUTPUT = 0 # Output to switch port.
OFPAT_COPY_TTL_OUT = 11 # Copy TTL "outwards" -- from
# next-to-outermost to outermost
OFPAT_COPY_TTL_IN = 12 # Copy TTL "inwards" -- from outermost to
# next-to-outermost
OFPAT_SET_MPLS_TTL = 15 # MPLS TTL.
OFPAT_DEC_MPLS_TTL = 16 # Decrement MPLS TTL
OFPAT_PUSH_VLAN = 17 # Push a new VLAN tag
OFPAT_POP_VLAN = 18 # Pop the outer VLAN tag
OFPAT_PUSH_MPLS = 19 # Push a new MPLS tag
OFPAT_POP_MPLS = 20 # Pop the outer MPLS tag
OFPAT_SET_QUEUE = 21 # Set queue id when outputting to a port
OFPAT_GROUP = 22 # Apply group
OFPAT_SET_NW_TTL = 23 # IP TTL.
OFPAT_DEC_NW_TTL = 24 # Decrement IP TTL.
OFPAT_SET_FIELD = 25 # Set a header field using OXM TLV format.
OFPAT_PUSH_PBB = 26 # Push a new PBB service tag (I-TAG)
OFPAT_POP_PBB = 27 # Pop the outer PBB service tag (I-TAG)
OFPAT_EXPERIMENTER = 0xffff
# struct ofp_action_header
OFP_ACTION_HEADER_PACK_STR = '!HH4x'
OFP_ACTION_HEADER_SIZE = 8
assert calcsize(OFP_ACTION_HEADER_PACK_STR) == OFP_ACTION_HEADER_SIZE
# struct ofp_action_output
OFP_ACTION_OUTPUT_PACK_STR = '!HHIH6x'
OFP_ACTION_OUTPUT_SIZE = 16
assert calcsize(OFP_ACTION_OUTPUT_PACK_STR) == OFP_ACTION_OUTPUT_SIZE
# enum ofp_controller_max_len
OFPCML_MAX = 0xffe5 # maximum max_len value which can be used to
# request a specific byte length.
OFPCML_NO_BUFFER = 0xffff # indicates that no buffering should be
# applied and the whole packet is to be
# sent to the controller.
# struct ofp_action_group
OFP_ACTION_GROUP_PACK_STR = '!HHI'
OFP_ACTION_GROUP_SIZE = 8
assert calcsize(OFP_ACTION_GROUP_PACK_STR) == OFP_ACTION_GROUP_SIZE
# struct ofp_action_set_queue
OFP_ACTION_SET_QUEUE_PACK_STR = '!HHI'
OFP_ACTION_SET_QUEUE_SIZE = 8
assert calcsize(OFP_ACTION_SET_QUEUE_PACK_STR) == OFP_ACTION_SET_QUEUE_SIZE
# struct ofp_action_mpls_ttl
OFP_ACTION_MPLS_TTL_PACK_STR = '!HHB3x'
OFP_ACTION_MPLS_TTL_SIZE = 8
assert calcsize(OFP_ACTION_MPLS_TTL_PACK_STR) == OFP_ACTION_MPLS_TTL_SIZE
# struct ofp_action_nw_ttl
OFP_ACTION_NW_TTL_PACK_STR = '!HHB3x'
OFP_ACTION_NW_TTL_SIZE = 8
assert calcsize(OFP_ACTION_NW_TTL_PACK_STR) == OFP_ACTION_NW_TTL_SIZE
# struct ofp_action_push
OFP_ACTION_PUSH_PACK_STR = '!HHH2x'
OFP_ACTION_PUSH_SIZE = 8
assert calcsize(OFP_ACTION_PUSH_PACK_STR) == OFP_ACTION_PUSH_SIZE
# struct ofp_action_pop_mpls
OFP_ACTION_POP_MPLS_PACK_STR = '!HHH2x'
OFP_ACTION_POP_MPLS_SIZE = 8
assert calcsize(OFP_ACTION_POP_MPLS_PACK_STR) == OFP_ACTION_POP_MPLS_SIZE
# struct ofp_action_set_field
OFP_ACTION_SET_FIELD_PACK_STR = '!HH4x'
OFP_ACTION_SET_FIELD_SIZE = 8
assert calcsize(OFP_ACTION_SET_FIELD_PACK_STR) == OFP_ACTION_SET_FIELD_SIZE
# struct ofp_action_experimenter_header
OFP_ACTION_EXPERIMENTER_HEADER_PACK_STR = '!HHI'
OFP_ACTION_EXPERIMENTER_HEADER_SIZE = 8
assert (calcsize(OFP_ACTION_EXPERIMENTER_HEADER_PACK_STR) ==
OFP_ACTION_EXPERIMENTER_HEADER_SIZE)
# ofp_switch_features
OFP_SWITCH_FEATURES_PACK_STR = '!QIBB2xII'
OFP_SWITCH_FEATURES_SIZE = 32
assert (calcsize(OFP_SWITCH_FEATURES_PACK_STR) + OFP_HEADER_SIZE ==
OFP_SWITCH_FEATURES_SIZE)
# enum ofp_capabilities
OFPC_FLOW_STATS = 1 << 0 # Flow statistics.
OFPC_TABLE_STATS = 1 << 1 # Table statistics.
OFPC_PORT_STATS = 1 << 2 # Port statistics.
OFPC_GROUP_STATS = 1 << 3 # Group statistics.
OFPC_IP_REASM = 1 << 5 # Can reassemble IP fragments.
OFPC_QUEUE_STATS = 1 << 6 # Queue statistics.
OFPC_PORT_BLOCKED = 1 << 8 # Switch will block looping ports.
# struct ofp_switch_config
OFP_SWITCH_CONFIG_PACK_STR = '!HH'
OFP_SWITCH_CONFIG_SIZE = 12
assert (calcsize(OFP_SWITCH_CONFIG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_SWITCH_CONFIG_SIZE)
# enum ofp_config_flags
OFPC_FRAG_NORMAL = 0 # No special handling for fragments.
OFPC_FRAG_DROP = 1 # Drop fragments.
OFPC_FRAG_REASM = 2 # Reassemble (only if OFPC_IP_REASM set).
OFPC_FRAG_MASK = 3
# enum ofp_table
OFPTT_MAX = 0xfe
OFPTT_ALL = 0xff
# struct ofp_table_mod
OFP_TABLE_MOD_PACK_STR = '!B3xI'
OFP_TABLE_MOD_SIZE = 16
assert (calcsize(OFP_TABLE_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_TABLE_MOD_SIZE)
_OFP_FLOW_MOD_PACK_STR0 = 'QQBBHHHIIIH2x'
OFP_FLOW_MOD_PACK_STR = '!' + _OFP_FLOW_MOD_PACK_STR0 + _OFP_MATCH_PACK_STR
OFP_FLOW_MOD_PACK_STR0 = '!' + _OFP_FLOW_MOD_PACK_STR0
OFP_FLOW_MOD_SIZE = 56
assert (calcsize(OFP_FLOW_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_FLOW_MOD_SIZE)
# enum ofp_flow_mod_command
OFPFC_ADD = 0 # New flow.
OFPFC_MODIFY = 1 # Modify all matching flows.
OFPFC_MODIFY_STRICT = 2 # Modify entry strictly matching wildcards
OFPFC_DELETE = 3 # Delete all matching flows.
OFPFC_DELETE_STRICT = 4 # Strictly match wildcards and priority.
# By default, choose a priority in the middle.
OFP_DEFAULT_PRIORITY = 0x8000
# enum ofp_flow_mod_flags
OFPFF_SEND_FLOW_REM = 1 << 0 # Send flow removed message when flow
# expires or is deleted.
OFPFF_CHECK_OVERLAP = 1 << 1 # Check for overlapping entries first.
OFPFF_RESET_COUNTS = 1 << 2 # Reset flow packet and byte counts.
OFPFF_NO_PKT_COUNTS = 1 << 3 # Don't keep track of packet count.
OFPFF_NO_BYT_COUNTS = 1 << 4 # Don't keep track of byte count.
# struct ofp_group_mod
OFP_GROUP_MOD_PACK_STR = '!HBxI'
OFP_GROUP_MOD_SIZE = 16
assert (calcsize(OFP_GROUP_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_GROUP_MOD_SIZE)
# enum ofp_group_mod_command
OFPGC_ADD = 0 # New group.
OFPGC_MODIFY = 1 # Modify all matching groups.
OFPGC_DELETE = 2 # Delete all matching groups.
# enum ofp_group
OFPG_MAX = 0xffffff00 # Last usable group number.
# Fake groups
OFPG_ALL = 0xfffffffc # Represents all groups for group delete commands.
OFPG_ANY = 0xffffffff # Wildcard group used only for flow stats requests.
# Selects all flows regardless of group
# (including flows with no group).
# enum ofp_group_type
OFPGT_ALL = 0 # All (multicast/broadcast) group.
OFPGT_SELECT = 1 # Select group.
OFPGT_INDIRECT = 2 # Indirect group.
OFPGT_FF = 3 # Fast failover group.
# struct ofp_bucket
OFP_BUCKET_PACK_STR = '!HHII4x'
OFP_BUCKET_SIZE = 16
assert calcsize(OFP_BUCKET_PACK_STR) == OFP_BUCKET_SIZE
# struct ofp_port_mod
OFP_PORT_MOD_PACK_STR = '!I4x' + OFP_ETH_ALEN_STR + 's2xIII4x'
OFP_PORT_MOD_SIZE = 40
assert (calcsize(OFP_PORT_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_PORT_MOD_SIZE)
# struct ofp_meter_mod
OFP_METER_MOD_PACK_STR = '!HHI'
OFP_METER_MOD_SIZE = 16
assert (calcsize(OFP_METER_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_METER_MOD_SIZE)
# enum ofp_meter
OFPM_MAX = 0xffff0000
OFPM_SLOWPATH = 0xfffffffd # Meter for slow datapath, if any.
OFPM_CONTROLLER = 0xfffffffe # Meter for controller connection.
OFPM_ALL = 0xffffffff # Represents all meters for stat requests
# commands.
# enum ofp_meter_mod_command
OFPMC_ADD = 0 # New meter.
OFPMC_MODIFY = 1 # Modify specified meter.
OFPMC_DELETE = 2 # Delete specified meter.
# enum ofp_meter_flags
OFPMF_KBPS = 1 << 0 # Rate value in kb/s (kilo-bit per second).
OFPMF_PKTPS = 1 << 1 # Rate value in packet/sec.
OFPMF_BURST = 1 << 2 # Do burst size.
OFPMF_STATS = 1 << 3 # Collect statistics.
# struct ofp_meter_band_header
OFP_METER_BAND_HEADER_PACK_STR = '!HHII'
OFP_METER_BAND_HEADER_SIZE = 12
assert (calcsize(OFP_METER_BAND_HEADER_PACK_STR) ==
OFP_METER_BAND_HEADER_SIZE)
# enum ofp_meter_band_type
OFPMBT_DROP = 1 # Drop packet.
OFPMBT_DSCP_REMARK = 2 # Remark DSCP in the IP header.
OFPMBT_EXPERIMENTER = 0xFFFF # Experimenter meter band.
# struct ofp_meter_band_drop
OFP_METER_BAND_DROP_PACK_STR = '!HHII4x'
OFP_METER_BAND_DROP_SIZE = 16
assert (calcsize(OFP_METER_BAND_DROP_PACK_STR) ==
OFP_METER_BAND_DROP_SIZE)
# struct ofp_meter_band_dscp_remark
OFP_METER_BAND_DSCP_REMARK_PACK_STR = '!HHIIB3x'
OFP_METER_BAND_DSCP_REMARK_SIZE = 16
assert (calcsize(OFP_METER_BAND_DSCP_REMARK_PACK_STR) ==
OFP_METER_BAND_DSCP_REMARK_SIZE)
# struct ofp_meter_band_experimenter
OFP_METER_BAND_EXPERIMENTER_PACK_STR = '!HHIII'
OFP_METER_BAND_EXPERIMENTER_SIZE = 16
assert (calcsize(OFP_METER_BAND_EXPERIMENTER_PACK_STR) ==
OFP_METER_BAND_EXPERIMENTER_SIZE)
# struct ofp_multipart_request
OFP_MULTIPART_REQUEST_PACK_STR = '!HH4x'
OFP_MULTIPART_REQUEST_SIZE = 16
assert (calcsize(OFP_MULTIPART_REQUEST_PACK_STR) + OFP_HEADER_SIZE ==
OFP_MULTIPART_REQUEST_SIZE)
# enum ofp_multipart_request_flags
OFPMPF_REQ_MORE = 1 << 0 # More requests to follow.
# struct ofp_multipart_reply
OFP_MULTIPART_REPLY_PACK_STR = '!HH4x'
OFP_MULTIPART_REPLY_SIZE = 16
assert (calcsize(OFP_MULTIPART_REPLY_PACK_STR) + OFP_HEADER_SIZE ==
OFP_MULTIPART_REPLY_SIZE)
# enum ofp_multipart_reply_flags
OFPMPF_REPLY_MORE = 1 << 0 # More replies to follow.
# enum ofp_multipart_types
OFPMP_DESC = 0
OFPMP_FLOW = 1
OFPMP_AGGREGATE = 2
OFPMP_TABLE = 3
OFPMP_PORT_STATS = 4
OFPMP_QUEUE = 5
OFPMP_GROUP = 6
OFPMP_GROUP_DESC = 7
OFPMP_GROUP_FEATURES = 8
OFPMP_METER = 9
OFPMP_METER_CONFIG = 10
OFPMP_METER_FEATURES = 11
OFPMP_TABLE_FEATURES = 12
OFPMP_PORT_DESC = 13
OFPMP_EXPERIMENTER = 0xffff
# struct ofp_desc
DESC_STR_LEN = 256
DESC_STR_LEN_STR = str(DESC_STR_LEN)
SERIAL_NUM_LEN = 32
SERIAL_NUM_LEN_STR = str(SERIAL_NUM_LEN)
OFP_DESC_PACK_STR = '!' + \
DESC_STR_LEN_STR + 's' + \
DESC_STR_LEN_STR + 's' + \
DESC_STR_LEN_STR + 's' + \
SERIAL_NUM_LEN_STR + 's' + \
DESC_STR_LEN_STR + 's'
OFP_DESC_SIZE = 1056
assert calcsize(OFP_DESC_PACK_STR) == OFP_DESC_SIZE
# struct ofp_flow_stats_request
_OFP_FLOW_STATS_REQUEST_0_PACK_STR = 'B3xII4xQQ'
OFP_FLOW_STATS_REQUEST_0_PACK_STR = '!' + _OFP_FLOW_STATS_REQUEST_0_PACK_STR
OFP_FLOW_STATS_REQUEST_0_SIZE = 32
assert (calcsize(OFP_FLOW_STATS_REQUEST_0_PACK_STR) ==
OFP_FLOW_STATS_REQUEST_0_SIZE)
OFP_FLOW_STATS_REQUEST_PACK_STR = (OFP_FLOW_STATS_REQUEST_0_PACK_STR +
_OFP_MATCH_PACK_STR)
OFP_FLOW_STATS_REQUEST_SIZE = 40
assert (calcsize(OFP_FLOW_STATS_REQUEST_PACK_STR) ==
OFP_FLOW_STATS_REQUEST_SIZE)
# struct ofp_flow_stats
_OFP_FLOW_STATS_0_PACK_STR = 'HBxIIHHHH4xQQQ'
OFP_FLOW_STATS_0_PACK_STR = '!' + _OFP_FLOW_STATS_0_PACK_STR
OFP_FLOW_STATS_0_SIZE = 48
assert calcsize(OFP_FLOW_STATS_0_PACK_STR) == OFP_FLOW_STATS_0_SIZE
OFP_FLOW_STATS_PACK_STR = (OFP_FLOW_STATS_0_PACK_STR +
_OFP_MATCH_PACK_STR)
OFP_FLOW_STATS_SIZE = 56
assert calcsize(OFP_FLOW_STATS_PACK_STR) == OFP_FLOW_STATS_SIZE
# struct ofp_flow_stats_request
_OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR = 'B3xII4xQQ'
OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR = '!' + \
_OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR
OFP_AGGREGATE_STATS_REQUEST_0_SIZE = 32
assert (calcsize(OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR) ==
OFP_AGGREGATE_STATS_REQUEST_0_SIZE)
OFP_AGGREGATE_STATS_REQUEST_PACK_STR = \
OFP_AGGREGATE_STATS_REQUEST_0_PACK_STR + _OFP_MATCH_PACK_STR
OFP_AGGREGATE_STATS_REQUEST_SIZE = 40
assert (calcsize(OFP_AGGREGATE_STATS_REQUEST_PACK_STR) ==
OFP_AGGREGATE_STATS_REQUEST_SIZE)
# struct ofp_aggregate_stats_request
OFP_AGGREGATE_STATS_REQUEST_PACK_STR = '!B3xII4xQQ' + _OFP_MATCH_PACK_STR
OFP_AGGREGATE_STATS_REQUEST_SIZE = 40
assert (calcsize(OFP_AGGREGATE_STATS_REQUEST_PACK_STR) ==
OFP_AGGREGATE_STATS_REQUEST_SIZE)
# struct ofp_aggregate_stats_reply
OFP_AGGREGATE_STATS_REPLY_PACK_STR = '!QQI4x'
OFP_AGGREGATE_STATS_REPLY_SIZE = 24
assert (calcsize(OFP_AGGREGATE_STATS_REPLY_PACK_STR) ==
OFP_AGGREGATE_STATS_REPLY_SIZE)
# struct ofp_table_stats
OFP_TABLE_STATS_PACK_STR = '!B3xIQQ'
OFP_TABLE_STATS_SIZE = 24
assert calcsize(OFP_TABLE_STATS_PACK_STR) == OFP_TABLE_STATS_SIZE
# struct ofp_table_features
OFP_MAX_TABLE_NAME_LEN = 32
OFP_MAX_TABLE_NAME_LEN_STR = str(OFP_MAX_TABLE_NAME_LEN)
OFP_TABLE_FEATURES_PACK_STR = '!HB5x' + OFP_MAX_TABLE_NAME_LEN_STR + \
's' + 'QQII'
OFP_TABLE_FEATURES_SIZE = 64
assert (calcsize(OFP_TABLE_FEATURES_PACK_STR) ==
OFP_TABLE_FEATURES_SIZE)
# enum ofp_table_feature_prop_type
OFPTFPT_INSTRUCTIONS = 0
OFPTFPT_INSTRUCTIONS_MISS = 1
OFPTFPT_NEXT_TABLES = 2
OFPTFPT_NEXT_TABLES_MISS = 3
OFPTFPT_WRITE_ACTIONS = 4
OFPTFPT_WRITE_ACTIONS_MISS = 5
OFPTFPT_APPLY_ACTIONS = 6
OFPTFPT_APPLY_ACTIONS_MISS = 7
OFPTFPT_MATCH = 8
OFPTFPT_WILDCARDS = 10
OFPTFPT_WRITE_SETFIELD = 12
OFPTFPT_WRITE_SETFIELD_MISS = 13
OFPTFPT_APPLY_SETFIELD = 14
OFPTFPT_APPLY_SETFIELD_MISS = 15
OFPTFPT_EXPERIMENTER = 0xFFFE
OFPTFPT_EXPERIMENTER_MISS = 0xFFFF
# struct ofp_table_feature_prop_instructions
OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_SIZE)
# struct ofp_table_feature_prop_next_tables
OFP_TABLE_FEATURE_PROP_NEXT_TABLES_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_NEXT_TABLES_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_NEXT_TABLES_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_NEXT_TABLES_SIZE)
# struct ofp_table_feature_prop_actions
OFP_TABLE_FEATURE_PROP_ACTIONS_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_ACTIONS_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_ACTIONS_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_ACTIONS_SIZE)
# struct ofp_table_feature_prop_oxm
OFP_TABLE_FEATURE_PROP_OXM_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_OXM_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_OXM_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_OXM_SIZE)
# struct ofp_port_stats_request
OFP_PORT_STATS_REQUEST_PACK_STR = '!I4x'
OFP_PORT_STATS_REQUEST_SIZE = 8
assert (calcsize(OFP_PORT_STATS_REQUEST_PACK_STR) ==
OFP_PORT_STATS_REQUEST_SIZE)
# struct ofp_port_stats
OFP_PORT_STATS_PACK_STR = '!I4xQQQQQQQQQQQQII'
OFP_PORT_STATS_SIZE = 112
assert calcsize(OFP_PORT_STATS_PACK_STR) == OFP_PORT_STATS_SIZE
# struct ofp_queue_stats_request
OFP_QUEUE_STATS_REQUEST_PACK_STR = '!II'
OFP_QUEUE_STATS_REQUEST_SIZE = 8
assert (calcsize(OFP_QUEUE_STATS_REQUEST_PACK_STR) ==
OFP_QUEUE_STATS_REQUEST_SIZE)
# struct ofp_queue_stats
OFP_QUEUE_STATS_PACK_STR = '!IIQQQII'
OFP_QUEUE_STATS_SIZE = 40
assert calcsize(OFP_QUEUE_STATS_PACK_STR) == OFP_QUEUE_STATS_SIZE
# struct ofp_group_stats_request
OFP_GROUP_STATS_REQUEST_PACK_STR = '!I4x'
OFP_GROUP_STATS_REQUEST_SIZE = 8
assert (calcsize(OFP_GROUP_STATS_REQUEST_PACK_STR) ==
OFP_GROUP_STATS_REQUEST_SIZE)
# struct ofp_group_stats
OFP_GROUP_STATS_PACK_STR = '!H2xII4xQQII'
OFP_GROUP_STATS_SIZE = 40
assert calcsize(OFP_GROUP_STATS_PACK_STR) == OFP_GROUP_STATS_SIZE
# struct ofp_bucket_counter
OFP_BUCKET_COUNTER_PACK_STR = '!QQ'
OFP_BUCKET_COUNTER_SIZE = 16
assert calcsize(OFP_BUCKET_COUNTER_PACK_STR) == OFP_BUCKET_COUNTER_SIZE
# struct ofp_group_desc
OFP_GROUP_DESC_PACK_STR = '!HBxI'
OFP_GROUP_DESC_SIZE = 8
assert calcsize(OFP_GROUP_DESC_PACK_STR) == OFP_GROUP_DESC_SIZE
# struct ofp_group_desc_stats
OFP_GROUP_DESC_STATS_PACK_STR = OFP_GROUP_DESC_PACK_STR
OFP_GROUP_DESC_STATS_SIZE = OFP_GROUP_DESC_SIZE
assert calcsize(OFP_GROUP_DESC_STATS_PACK_STR) == OFP_GROUP_DESC_STATS_SIZE
# struct ofp_group_features
OFP_GROUP_FEATURES_PACK_STR = '!II4I4I'
OFP_GROUP_FEATURES_SIZE = 40
assert calcsize(OFP_GROUP_FEATURES_PACK_STR) == OFP_GROUP_FEATURES_SIZE
# enum ofp_group_capabilities
OFPGFC_SELECT_WEIGHT = 1 << 0 # Support weight for select groups.
OFPGFC_SELECT_LIVENESS = 1 << 1 # Support liveness for select groups.
OFPGFC_CHAINING = 1 << 2 # Support chaining groups.
OFPGFC_CHAINING_CHECKS = 1 << 3 # Check chaining for loops and delete
# struct ofp_meter_multipart_request
OFP_METER_MULTIPART_REQUEST_PACK_STR = '!I4x'
OFP_METER_MULTIPART_REQUEST_SIZE = 8
assert (calcsize(OFP_METER_MULTIPART_REQUEST_PACK_STR) ==
OFP_METER_MULTIPART_REQUEST_SIZE)
# struct ofp_meter_stats
OFP_METER_STATS_PACK_STR = '!IH6xIQQII'
OFP_METER_STATS_SIZE = 40
assert calcsize(OFP_METER_STATS_PACK_STR) == OFP_METER_STATS_SIZE
# struct ofp_meter_band_stats
OFP_METER_BAND_STATS_PACK_STR = '!QQ'
OFP_METER_BAND_STATS_SIZE = 16
assert (calcsize(OFP_METER_BAND_STATS_PACK_STR) ==
OFP_METER_BAND_STATS_SIZE)
# struct ofp_meter_config
OFP_METER_CONFIG_PACK_STR = '!HHI'
OFP_METER_CONFIG_SIZE = 8
assert calcsize(OFP_METER_CONFIG_PACK_STR) == OFP_METER_CONFIG_SIZE
# struct ofp_meter_features
OFP_METER_FEATURES_PACK_STR = '!IIIBB2x'
OFP_METER_FEATURES_SIZE = 16
assert (calcsize(OFP_METER_FEATURES_PACK_STR) ==
OFP_METER_FEATURES_SIZE)
# struct ofp_experimenter_multipart_header
OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR = '!II'
OFP_EXPERIMENTER_MULTIPART_HEADER_SIZE = 8
assert (calcsize(OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR) ==
OFP_EXPERIMENTER_MULTIPART_HEADER_SIZE)
# struct ofp_queue_get_config_request
OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR = '!I4x'
OFP_QUEUE_GET_CONFIG_REQUEST_SIZE = 16
assert (calcsize(OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR) +
OFP_HEADER_SIZE) == OFP_QUEUE_GET_CONFIG_REQUEST_SIZE
# struct ofp_queue_get_config_reply
OFP_QUEUE_GET_CONFIG_REPLY_PACK_STR = '!I4x'
OFP_QUEUE_GET_CONFIG_REPLY_SIZE = 16
assert (calcsize(OFP_QUEUE_GET_CONFIG_REPLY_PACK_STR) +
OFP_HEADER_SIZE) == OFP_QUEUE_GET_CONFIG_REPLY_SIZE
# struct ofp_packet_out
OFP_PACKET_OUT_PACK_STR = '!IIH6x'
OFP_PACKET_OUT_SIZE = 24
assert (calcsize(OFP_PACKET_OUT_PACK_STR) + OFP_HEADER_SIZE ==
OFP_PACKET_OUT_SIZE)
# struct ofp_role_request
OFP_ROLE_REQUEST_PACK_STR = '!I4xQ'
OFP_ROLE_REQUEST_SIZE = 24
assert (calcsize(OFP_ROLE_REQUEST_PACK_STR) + OFP_HEADER_SIZE ==
OFP_ROLE_REQUEST_SIZE)
# enum ofp_controller_role
OFPCR_ROLE_NOCHANGE = 0 # Don't change current role.
OFPCR_ROLE_EQUAL = 1 # Default role, full access.
OFPCR_ROLE_MASTER = 2 # Full access, at most one master.
OFPCR_ROLE_SLAVE = 3 # Read-only access.
# struct ofp_async_config
OFP_ASYNC_CONFIG_PACK_STR = '!2I2I2I'
OFP_ASYNC_CONFIG_SIZE = 32
assert (calcsize(OFP_ASYNC_CONFIG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_ASYNC_CONFIG_SIZE)
# struct ofp_packet_in
OFP_PACKET_IN_PACK_STR = '!IHBBQ'
OFP_PACKET_IN_SIZE = 32
assert (calcsize(OFP_PACKET_IN_PACK_STR) + OFP_MATCH_SIZE + OFP_HEADER_SIZE ==
OFP_PACKET_IN_SIZE)
# enum ofp_packet_in_reason
OFPR_NO_MATCH = 0 # No matching flow.
OFPR_ACTION = 1 # Action explicitly output to controller.
OFPR_INVALID_TTL = 2 # Packet has invalid TTL.
# struct ofp_flow_removed
_OFP_FLOW_REMOVED_PACK_STR0 = 'QHBBIIHHQQ'
OFP_FLOW_REMOVED_PACK_STR = '!' + _OFP_FLOW_REMOVED_PACK_STR0 + \
_OFP_MATCH_PACK_STR
OFP_FLOW_REMOVED_PACK_STR0 = '!' + _OFP_FLOW_REMOVED_PACK_STR0
OFP_FLOW_REMOVED_SIZE = 56
assert (calcsize(OFP_FLOW_REMOVED_PACK_STR) + OFP_HEADER_SIZE ==
OFP_FLOW_REMOVED_SIZE)
# enum ofp_flow_removed_reason
OFPRR_IDLE_TIMEOUT = 0 # Flow idle time exceeded idle_timeout.
OFPRR_HARD_TIMEOUT = 1 # Time exceeded hard_timeout.
OFPRR_DELETE = 2 # Evicted by a DELETE flow mod.
OFPRR_GROUP_DELETE = 3 # Group was removed.
# struct ofp_port_status
OFP_PORT_STATUS_PACK_STR = '!B7x' + _OFP_PORT_PACK_STR
OFP_PORT_STATUS_DESC_OFFSET = OFP_HEADER_SIZE + 8
OFP_PORT_STATUS_SIZE = 80
assert (calcsize(OFP_PORT_STATUS_PACK_STR) + OFP_HEADER_SIZE ==
OFP_PORT_STATUS_SIZE)
# enum ofp_port_reason
OFPPR_ADD = 0 # The port was added.
OFPPR_DELETE = 1 # The port was removed.
OFPPR_MODIFY = 2 # Some attribute of the port has changed.
# OFPMP_EXPERIMENTER
# struct onf_experimenter_multipart_msg
# (experimenter == ONF_EXPERIMENTER_ID)
ONFMP_FLOW_MONITOR = 1870
# EXT-187 seems to have a lot of flaws.
# XXX the spec mentions ONFST_FLOW_MONITOR in some places.
# we assume it's same as ONFMP_FLOW_MONITOR.
# XXX the spec uses OFPP_NONE. we assume it means OFPP_ANY.
# XXX onf_flow_update_full.length is commented to be 24.
# but it needs to tell the actual length of instructions.
# we assume it's variable.
# XXX the spec seems confused between instructions and actions
# for onf_flow_update_full/ONFFMF_ACTIONS. we assume they all
# are instructions.
# XXX the spec does not define payload structures for any of
# ONFT_FLOW_MONITOR_CANCEL, ONFT_FLOW_MONITOR_PAUSED, or
# ONFT_FLOW_MONITOR_RESUMED. we assume they are same as NX.
# according to NX spec (OVS nicira-ext.h and ofp-msg.h):
# NXT_FLOW_MONITOR_CANCEL: a single u32 'id'.
# NXT_FLOW_MONITOR_PAUSED/RESUMED: empty payload
# (OF1.4 uses something different; OFPFMC_DELETE for CANCEL and
# OFPFME_ for PAUSED/RESUMED.)
# XXX onf_flow_monitor_request and onf_flow_update_full use
# match_len + oxm_fields instead of ofp_match. this pointless
# diverge from OF1.4 looks like a botch when updating from OF1.0.
# XXX the spec mentions "the current implementation of Open vSwitch"
# but, as of writing this, it doesn't have this extension implemented
# at all. we assume that it is about OF1.0 NX.
# XXX the spec mentions nx13_flow_monitor_request but i couldn't find
# it in OVS nicira-ext.h.
# onf_flow_monitor_request
# ONFMP_FLOW_MONITOR request's body is zero or more instances of this.
# id, flags, match_len, out_put, table_id, zeros[3]
ONF_FLOW_MONITOR_REQUEST_PACK_STR = '!IHHIB3x'
ONF_FLOW_MONITOR_REQUEST_SIZE = 16
assert (calcsize(ONF_FLOW_MONITOR_REQUEST_PACK_STR) ==
ONF_FLOW_MONITOR_REQUEST_SIZE)
# onf_flow_monitor_request.flags
ONFFMF_INITIAL = 1 << 0
ONFFMF_ADD = 1 << 1
ONFFMF_DELETE = 1 << 2
ONFFMF_MODIFY = 1 << 3
ONFFMF_ACTIONS = 1 << 4
ONFFMF_OWN = 1 << 5
# onf_flow_update_header
# ONFMP_FLOW_MONITOR request's body is an array of this
# length, event
ONF_FLOW_UPDATE_HEADER_PACK_STR = '!HH'
ONF_FLOW_UPDATE_HEADER_SIZE = 4
assert (calcsize(ONF_FLOW_UPDATE_HEADER_PACK_STR) ==
ONF_FLOW_UPDATE_HEADER_SIZE)
# onf_flow_update_full, excluding onf_flow_update_header
# reason, priority, idle_timeout, hard_timeout, match_len, table_id,
# pad, cookie
ONF_FLOW_UPDATE_FULL_PACK_STR = '!HHHHHBxQ'
ONF_FLOW_UPDATE_FULL_SIZE = 24 - ONF_FLOW_UPDATE_HEADER_SIZE
assert (calcsize(ONF_FLOW_UPDATE_FULL_PACK_STR) ==
ONF_FLOW_UPDATE_FULL_SIZE)
# onf_flow_update_abbrev, excluding onf_flow_update_header
# xid
ONF_FLOW_UPDATE_ABBREV_PACK_STR = '!I'
ONF_FLOW_UPDATE_ABBREV_SIZE = 8 - ONF_FLOW_UPDATE_HEADER_SIZE
assert (calcsize(ONF_FLOW_UPDATE_ABBREV_PACK_STR) ==
ONF_FLOW_UPDATE_ABBREV_SIZE)
# enum onf_flow_udpate_event
ONFFME_ADDED = 0 # some variations in the spec; ONFMFE_ADD, ONFFME_ADD
ONFFME_DELETED = 1
ONFFME_MODIFIED = 2
ONFFME_ABBREV = 3
# enum onf_flow_monitor_msg_type
ONFT_FLOW_MONITOR_CANCEL = 1870 # controller -> switch
ONFT_FLOW_MONITOR_PAUSED = 1871 # switch -> controller
ONFT_FLOW_MONITOR_RESUMED = 1872 # switch -> controller
# struct ofp_error_msg
OFP_ERROR_MSG_PACK_STR = '!HH'
OFP_ERROR_MSG_SIZE = 12
assert (calcsize(OFP_ERROR_MSG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_ERROR_MSG_SIZE)
# enum ofp_error_type
OFPET_HELLO_FAILED = 0 # Hello protocol failed.
OFPET_BAD_REQUEST = 1 # Request was not understood.
OFPET_BAD_ACTION = 2 # Error in action description.
OFPET_BAD_INSTRUCTION = 3 # Error in instruction list.
OFPET_BAD_MATCH = 4 # Error in match.
OFPET_FLOW_MOD_FAILED = 5 # Problem modifying flow entry.
OFPET_GROUP_MOD_FAILED = 6 # Problem modifying group entry.
OFPET_PORT_MOD_FAILED = 7 # OFPT_PORT_MOD failed.
OFPET_TABLE_MOD_FAILED = 8 # Table mod request failed.
OFPET_QUEUE_OP_FAILED = 9 # Queue operation failed.
OFPET_SWITCH_CONFIG_FAILED = 10 # Switch config request failed.
OFPET_ROLE_REQUEST_FAILED = 11 # Controller Role request failed.
OFPET_METER_MOD_FAILED = 12 # Error in meter.
OFPET_TABLE_FEATURES_FAILED = 13 # Setting table features failed.
OFPET_EXPERIMENTER = 0xffff # Experimenter error messages.
# enum ofp_hello_failed_code
OFPHFC_INCOMPATIBLE = 0 # No compatible version.
OFPHFC_EPERM = 1 # Permissions error.
# enum ofp_bad_request_code
OFPBRC_BAD_VERSION = 0 # ofp_header.version not supported.
OFPBRC_BAD_TYPE = 1 # ofp_header.type not supported.
OFPBRC_BAD_MULTIPART = 2 # ofp_multipart_request.type not
# supported.
OFPBRC_BAD_EXPERIMENTER = 3 # Experimenter id not supported
# (in ofp_experimenter_header
# or ofp_multipart_request or
# ofp_multipart_reply).
OFPBRC_BAD_EXP_TYPE = 4 # Experimenter type not supported.
OFPBRC_EPERM = 5 # Permissions error.
OFPBRC_BAD_LEN = 6 # Wrong request length for type.
OFPBRC_BUFFER_EMPTY = 7 # Specified buffer has already been
# used.
OFPBRC_BUFFER_UNKNOWN = 8 # Specified buffer does not exist.
OFPBRC_BAD_TABLE_ID = 9 # Specified table-id invalid or does
# not exist.
OFPBRC_IS_SLAVE = 10 # Denied because controller is slave.
OFPBRC_BAD_PORT = 11 # Invalid port.
OFPBRC_BAD_PACKET = 12 # Invalid packet in packet-out
OFPBRC_MULTIPART_BUFFER_OVERFLOW = 13 # ofp_multipart_request
# overflowed the assigned buffer.
# enum ofp_bad_action_code
OFPBAC_BAD_TYPE = 0 # Unknown action type.
OFPBAC_BAD_LEN = 1 # Length problem in actions.
OFPBAC_BAD_EXPERIMENTER = 2 # Unknown experimenter id specified.
OFPBAC_BAD_EXP_TYPE = 3 # Unknown action type for experimenter id.
OFPBAC_BAD_OUT_PORT = 4 # Problem validating output action.
OFPBAC_BAD_ARGUMENT = 5 # Bad action argument.
OFPBAC_EPERM = 6 # Permissions error.
OFPBAC_TOO_MANY = 7 # Can't handle this many actions.
OFPBAC_BAD_QUEUE = 8 # Problem validating output queue.
OFPBAC_BAD_OUT_GROUP = 9 # Invalid group id in forward action.
OFPBAC_MATCH_INCONSISTENT = 10 # Action can't apply for this match,
# or Set-Field missing prerequisite.
OFPBAC_UNSUPPORTED_ORDER = 11 # Action order is unsupported for
# the action list in an Apply-Actions
# instruction
OFPBAC_BAD_TAG = 12 # Actions uses an unsupported tag/encap.
OFPBAC_BAD_SET_TYPE = 13 # Unsupported type in SET_FIELD action.
OFPBAC_BAD_SET_LEN = 14 # Length problem in SET_FIELD action.
OFPBAC_BAD_SET_ARGUMENT = 15 # Bad arguement in SET_FIELD action.
# enum ofp_bad_instruction_code
OFPBIC_UNKNOWN_INST = 0 # Unknown instruction.
OFPBIC_UNSUP_INST = 1 # Switch or table does not support
# the instruction.
OFPBIC_BAD_TABLE_ID = 2 # Invalid Table-Id specified
OFPBIC_UNSUP_METADATA = 3 # Metadata value unsupported by datapath.
OFPBIC_UNSUP_METADATA_MASK = 4 # Metadata mask value unsupported by
# datapath.
OFPBIC_BAD_EXPERIMENTER = 5 # Unknown experimenter id specified.
OFPBIC_BAD_EXP_TYPE = 6 # Unknown instruction for experimenter id.
OFPBIC_BAD_LEN = 7 # Length problem in instrucitons.
OFPBIC_EPERM = 8 # Permissions error.
# enum ofp_bad_match_code
OFPBMC_BAD_TYPE = 0 # Unsupported match type apecified by
# the match.
OFPBMC_BAD_LEN = 1 # Length problem in math.
OFPBMC_BAD_TAG = 2 # Match uses an unsupported tag/encap.
OFPBMC_BAD_DL_ADDR_MASK = 3 # Unsupported datalink addr mask -
# switch does not support arbitrary
# datalink address mask.
OFPBMC_BAD_NW_ADDR_MASK = 4 # Unsupported network addr mask -
# switch does not support arbitrary
# network addres mask.
OFPBMC_BAD_WILDCARDS = 5 # Unsupported combination of fields
# masked or omitted in the match.
OFPBMC_BAD_FIELD = 6 # Unsupported field type in the match.
OFPBMC_BAD_VALUE = 7 # Unsupported value in a match field.
OFPBMC_BAD_MASK = 8 # Unsupported mask specified in the
# match.
OFPBMC_BAD_PREREQ = 9 # A prerequisite was not met.
OFPBMC_DUP_FIELD = 10 # A field type was duplicated.
OFPBMC_EPERM = 11 # Permissions error.
# enum ofp_flow_mod_failed_code
OFPFMFC_UNKNOWN = 0 # Unspecified error.
OFPFMFC_TABLE_FULL = 1 # Flow not added because table was full.
OFPFMFC_BAD_TABLE_ID = 2 # Table does not exist
OFPFMFC_OVERLAP = 3 # Attempted to add overlapping flow
# with CHECK_OVERLAP flag set.
OFPFMFC_EPERM = 4 # Permissions error.
OFPFMFC_BAD_TIMEOUT = 5 # Flow not added because of
# unsupported idle/hard timeout.
OFPFMFC_BAD_COMMAND = 6 # Unsupported or unknown command.
OFPFMFC_BAD_FLAGS = 7 # Unsupported or unknown flags.
# enum ofp_group_mod_failed_code
OFPGMFC_GROUP_EXISTS = 0
OFPGMFC_INVALID_GROUP = 1
OFPGMFC_WEIGHT_UNSUPPORTED = 2 # Switch does not support unequal load
# sharing with select groups.
OFPGMFC_OUT_OF_GROUPS = 3 # The group table is full.
OFPGMFC_OUT_OF_BUCKETS = 4 # The maximum number of action buckets
# for a group has been exceeded.
OFPGMFC_CHAINING_UNSUPPORTED = 5 # Switch does not support groups that
# forward to groups.
OFPGMFC_WATCH_UNSUPPORTED = 6 # This group cannot watch the
# watch_port or watch_group specified.
OFPGMFC_LOOP = 7 # Group entry would cause a loop.
OFPGMFC_UNKNOWN_GROUP = 8 # Group not modified because a group
# MODIFY attempted to modify a
# non-existent group.
OFPGMFC_CHAINED_GROUP = 9 # Group not deleted because another
# group is forwarding to it.
OFPGMFC_BAD_TYPE = 10 # Unsupported or unknown group type.
OFPGMFC_BAD_COMMAND = 11 # Unsupported or unknown command.
OFPGMFC_BAD_BUCKET = 12 # Error in bucket.
OFPGMFC_BAD_WATCH = 13 # Error in watch port/group.
OFPGMFC_EPERM = 14 # Permissions error.
# enum ofp_port_mod_failed_code
OFPPMFC_BAD_PORT = 0 # Specified port does not exist.
OFPPMFC_BAD_HW_ADDR = 1 # Specified hardware address does not
# match the port number.
OFPPMFC_BAD_CONFIG = 2 # Specified config is invalid.
OFPPMFC_BAD_ADVERTISE = 3 # Specified advertise is invalid.
OFPPMFC_EPERM = 4 # Permissions error.
# enum ofp_table_mod_failed_code
OFPTMFC_BAD_TABLE = 0 # Specified table does not exist.
OFPTMFC_BAD_CONFIG = 1 # Specified config is invalid.
OFPTMFC_EPERM = 2 # Permissions error
# enum ofp_queue_op_failed_code
OFPQOFC_BAD_PORT = 0 # Invalid port (or port does not exist).
OFPQOFC_BAD_QUEUE = 1 # Queue does not exist.
OFPQOFC_EPERM = 2 # Permissions error.
# enum ofp_switch_config_failed_code
OFPSCFC_BAD_FLAGS = 0 # Specified flags is invalid.
OFPSCFC_BAD_LEN = 1 # Specified len is invalid.
OFPQCFC_EPERM = 2 # Permissions error (depracated).
# New or updated Ryu applications shall use
# OFPSCFC_EPERM. The variable name is a typo of
# in specifications before v1.3.1 (EXT-208).
OFPSCFC_EPERM = 2 # Permissions error.
# enum ofp_role_request_failed_code
OFPRRFC_STALE = 0 # Stale Message: old generation_id.
OFPRRFC_UNSUP = 1 # Controller role change unsupported.
OFPRRFC_BAD_ROLE = 2 # Invalid role.
# enum ofp_meter_mod_failed_code
OFPMMFC_UNKNOWN = 0 # Unspecified error.
OFPMMFC_METER_EXISTS = 1 # Meter not added because a Meter ADD
# attempted to replace an existing Meter.
OFPMMFC_INVALID_METER = 2 # Meter not added because Meter specified
# is invalid.
OFPMMFC_UNKNOWN_METER = 3 # Meter not modified because a Meter
# MODIFY attempted to modify a non-existent
# Meter.
OFPMMFC_BAD_COMMAND = 4 # Unsupported or unknown command.
OFPMMFC_BAD_FLAGS = 5 # Flag configuration unsupported.
OFPMMFC_BAD_RATE = 6 # Rate unsupported.
OFPMMFC_BAD_BURST = 7 # Burst size unsupported.
OFPMMFC_BAD_BAND = 8 # Band unsupported.
OFPMMFC_BAD_BAND_VALUE = 9 # Band value unsupported.
OFPMMFC_OUT_OF_METERS = 10 # No more meters availabile.
OFPMMFC_OUT_OF_BANDS = 11 # The maximum number of properties
# for a meter has been exceeded.
# enum ofp_table_features_failed_code
OFPTFFC_BAD_TABLE = 0 # Specified table does not exist.
OFPTFFC_BAD_METADATA = 1 # Invalid metadata mask.
OFPTFFC_BAD_TYPE = 2 # Unknown property type.
OFPTFFC_BAD_LEN = 3 # Length problem in properties.
OFPTFFC_BAD_ARGUMENT = 4 # Unsupported property value.
OFPTFFC_EPERM = 5 # Permissions error.
# struct ofp_error_experimenter_msg
OFP_ERROR_EXPERIMENTER_MSG_PACK_STR = '!HHI'
OFP_ERROR_EXPERIMENTER_MSG_SIZE = 16
assert (calcsize(OFP_ERROR_EXPERIMENTER_MSG_PACK_STR) +
OFP_HEADER_SIZE) == OFP_ERROR_EXPERIMENTER_MSG_SIZE
# struct ofp_experimenter_header
OFP_EXPERIMENTER_HEADER_PACK_STR = '!II'
OFP_EXPERIMENTER_HEADER_SIZE = 16
assert (calcsize(OFP_EXPERIMENTER_HEADER_PACK_STR) + OFP_HEADER_SIZE
== OFP_EXPERIMENTER_HEADER_SIZE)
# exp_type values for OFPET_EXPERIMENTER (experimenter=ONF_EXPERIMENTER_ID)
ONFERR_ET_UNKNOWN = 2300
ONFERR_ET_EPERM = 2301
ONFERR_ET_BAD_ID = 2302
ONFERR_ET_BUNDLE_EXIST = 2303
ONFERR_ET_BUNDLE_CLOSED = 2304
ONFERR_ET_OUT_OF_BUNDLES = 2305
ONFERR_ET_BAD_TYPE = 2306
ONFERR_ET_BAD_FLAGS = 2307
ONFERR_ET_MSG_BAD_LEN = 2308
ONFERR_ET_MSG_BAD_XID = 2309
ONFERR_ET_MSG_UNSUP = 2310
ONFERR_ET_MSG_CONFLICT = 2311
ONFERR_ET_MSG_TOO_MANY = 2312
ONFERR_ET_FAILED = 2313
ONFERR_ET_TIMEOUT = 2314
ONFERR_ET_BUNDLE_IN_PROGRESS = 2315
ONFERR_ET_CANT_SYNC = 2320
ONFERR_ET_BAD_PRIORITY = 2360
ONFERR_ET_ASYNC_INVALUD = 2370
ONFERR_ET_ASYNC_UNSUPPORTED = 2371
ONFERR_ET_ASYNC_EPERM = 2372
ONFERR_DUP_INSTRUCTION = 2600 # the lack of _ET_ is per spec
ONFERR_ET_MPART_REQUEST_TIMEOUT = 2640
ONFERR_ET_MPART_REPLY_TIMEOUT = 2641
# struct ofp_hello
OFP_HELLO_HEADER_SIZE = 8
# struct ofp_hello_elem_header
OFP_HELLO_ELEM_HEADER_PACK_STR = '!HH'
OFP_HELLO_ELEM_HEADER_SIZE = 4
assert (calcsize(OFP_HELLO_ELEM_HEADER_PACK_STR) == OFP_HELLO_ELEM_HEADER_SIZE)
# enum ofp_hello_elem_type
OFPHET_VERSIONBITMAP = 1
# struct ofp_hello_elem_versionbitmap
OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_PACK_STR = '!HH'
OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE = 4
assert (calcsize(OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_PACK_STR) ==
OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE)
# OXM
def _oxm_tlv_header(class_, field, hasmask, length):
return (class_ << 16) | (field << 9) | (hasmask << 8) | length
def oxm_tlv_header(field, length):
return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 0, length)
def oxm_tlv_header_w(field, length):
return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 1, length * 2)
def oxm_tlv_header_extract_hasmask(header):
return (header >> 8) & 1
def oxm_tlv_header_extract_length(header):
if oxm_tlv_header_extract_hasmask(header):
length = (header & 0xff) // 2
else:
length = header & 0xff
return length
oxm_types = [
oxm_fields.OpenFlowBasic('in_port', 0, type_desc.Int4),
oxm_fields.OpenFlowBasic('in_phy_port', 1, type_desc.Int4),
oxm_fields.OpenFlowBasic('metadata', 2, type_desc.Int8),
oxm_fields.OpenFlowBasic('eth_dst', 3, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('eth_src', 4, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('eth_type', 5, type_desc.Int2),
oxm_fields.OpenFlowBasic('vlan_vid', 6, type_desc.Int2),
oxm_fields.OpenFlowBasic('vlan_pcp', 7, type_desc.Int1),
oxm_fields.OpenFlowBasic('ip_dscp', 8, type_desc.Int1),
oxm_fields.OpenFlowBasic('ip_ecn', 9, type_desc.Int1),
oxm_fields.OpenFlowBasic('ip_proto', 10, type_desc.Int1),
oxm_fields.OpenFlowBasic('ipv4_src', 11, type_desc.IPv4Addr),
oxm_fields.OpenFlowBasic('ipv4_dst', 12, type_desc.IPv4Addr),
oxm_fields.OpenFlowBasic('tcp_src', 13, type_desc.Int2),
oxm_fields.OpenFlowBasic('tcp_dst', 14, type_desc.Int2),
oxm_fields.OpenFlowBasic('udp_src', 15, type_desc.Int2),
oxm_fields.OpenFlowBasic('udp_dst', 16, type_desc.Int2),
oxm_fields.OpenFlowBasic('sctp_src', 17, type_desc.Int2),
oxm_fields.OpenFlowBasic('sctp_dst', 18, type_desc.Int2),
oxm_fields.OpenFlowBasic('icmpv4_type', 19, type_desc.Int1),
oxm_fields.OpenFlowBasic('icmpv4_code', 20, type_desc.Int1),
oxm_fields.OpenFlowBasic('arp_op', 21, type_desc.Int2),
oxm_fields.OpenFlowBasic('arp_spa', 22, type_desc.IPv4Addr),
oxm_fields.OpenFlowBasic('arp_tpa', 23, type_desc.IPv4Addr),
oxm_fields.OpenFlowBasic('arp_sha', 24, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('arp_tha', 25, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('ipv6_src', 26, type_desc.IPv6Addr),
oxm_fields.OpenFlowBasic('ipv6_dst', 27, type_desc.IPv6Addr),
oxm_fields.OpenFlowBasic('ipv6_flabel', 28, type_desc.Int4),
oxm_fields.OpenFlowBasic('icmpv6_type', 29, type_desc.Int1),
oxm_fields.OpenFlowBasic('icmpv6_code', 30, type_desc.Int1),
oxm_fields.OpenFlowBasic('ipv6_nd_target', 31, type_desc.IPv6Addr),
oxm_fields.OpenFlowBasic('ipv6_nd_sll', 32, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('ipv6_nd_tll', 33, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('mpls_label', 34, type_desc.Int4),
oxm_fields.OpenFlowBasic('mpls_tc', 35, type_desc.Int1),
oxm_fields.OpenFlowBasic('mpls_bos', 36, type_desc.Int1),
oxm_fields.OpenFlowBasic('pbb_isid', 37, type_desc.Int3),
oxm_fields.OpenFlowBasic('tunnel_id', 38, type_desc.Int8),
oxm_fields.OpenFlowBasic('ipv6_exthdr', 39, type_desc.Int2),
# EXT-256 Old version of ONF Extension
oxm_fields.OldONFExperimenter('pbb_uca', 2560, type_desc.Int1),
# EXT-109 TCP flags match field Extension
oxm_fields.ONFExperimenter('tcp_flags', 42, type_desc.Int2),
# EXT-233 Output match Extension
# NOTE(yamamoto): The spec says uint64_t but I assume it's an error.
oxm_fields.ONFExperimenter('actset_output', 43, type_desc.Int4),
] + nx_match.oxm_types
oxm_fields.generate(__name__)
# Note: struct ofp_prop_experimenter is specific to this implementation.
# It does not have a corresponding structure in the specification.
# This structure defines common structure for ofp_*_prop_experimenter.
# struct ofp_prop_experimenter
OFP_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_PROP_EXPERIMENTER_PACK_STR) ==
OFP_PROP_EXPERIMENTER_SIZE)
# generate utility methods
ofproto_utils.generate(__name__)
# define constants
OFP_VERSION = 0x04
OFP_TCP_PORT = 6633
MAX_XID = 0xffffffff
OFP_NO_BUFFER = 0xffffffff
|
{
"content_hash": "a2cc5aa67614f601f1b6716c5ac54e2d",
"timestamp": "",
"source": "github",
"line_count": 1204,
"max_line_length": 79,
"avg_line_length": 40.958471760797345,
"alnum_prop": 0.6656730340268484,
"repo_name": "alanquillin/ryu",
"id": "6b31ec2a4a0c054197ea8c7f65e1e813c70150a5",
"size": "49991",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "ryu/ofproto/ofproto_v1_3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28540"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "873335"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "5302388"
},
{
"name": "Shell",
"bytes": "14253"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import six
from chainer import computational_graph as c
from chainer import function
from chainer import testing
from chainer import variable
class MockFunction(function.Function):
def __init__(self, n_in, n_out):
self.n_in = n_in
self.n_out = n_out
def forward_cpu(self, xs):
assert len(xs) == self.n_in
return tuple(np.zeros((1, 2)).astype(np.float32)
for _ in six.moves.range(self.n_out))
def backward_cpu(self, xs, gys):
assert len(xs) == self.n_in
assert len(gys) == self.n_out
return tuple(np.zeros_like(xs).astype(np.float32)
for _ in six.moves.range(self.n_in))
def mock_function(xs, n_out):
return MockFunction(len(xs), n_out)(*xs)
def _check(self, outputs, node_num, edge_num):
g = c.build_computational_graph(outputs)
self.assertEqual(len(g.nodes), node_num)
self.assertEqual(len(g.edges), edge_num)
class TestGraphBuilder(unittest.TestCase):
# x-f-y-g-z
def setUp(self):
self.x = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = mock_function((self.x,), 1)
self.z = mock_function((self.y,), 1)
# x
def test_head_variable(self):
_check(self, (self.x, ), 1, 0)
def test_intermediate_variable(self):
# x-f-y
_check(self, (self.y, ), 3, 2)
def test_tail_variable(self):
# x-f-y-g-z
_check(self, (self.z, ), 5, 4)
def test_multiple_outputs(self):
_check(self, (self.x, self.y), 3, 2)
def test_multiple_outputs2(self):
_check(self, (self.x, self.z), 5, 4)
def test_multiple_outputs3(self):
_check(self, (self.y, self.z), 5, 4)
def test_multiple_outputs4(self):
_check(self, (self.x, self.y, self.z), 5, 4)
class TestGraphBuilder2(unittest.TestCase):
# x-f-y1
# \
# g-y2
def setUp(self):
self.x = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y1 = mock_function((self.x,), 1)
self.y2 = mock_function((self.x,), 1)
def test_head_node(self):
_check(self, (self.x, ), 1, 0)
def test_tail_node(self):
_check(self, (self.y1, ), 3, 2)
def test_tail_node2(self):
_check(self, (self.y2, ), 3, 2)
def test_multiple_tails(self):
_check(self, (self.y1, self.y2), 5, 4)
class TestGraphBuilder3(unittest.TestCase):
# x-f-y1
# \
# y2
def setUp(self):
self.x = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y1, self.y2 = mock_function((self.x,), 2)
def test_head_node(self):
_check(self, (self.x, ), 1, 0)
def test_tail_node(self):
_check(self, (self.y1, ), 3, 2)
def test_tail_node2(self):
_check(self, (self.y2, ), 3, 2)
def test_multiple_tails(self):
_check(self, (self.y1, self.y2), 4, 3)
class TestGraphBuilder4(unittest.TestCase):
# x1-f-y
# /
# x2
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = mock_function((self.x1, self.x2), 1)
def test_head_node1(self):
_check(self, (self.x1, ), 1, 0)
def test_head_node2(self):
_check(self, (self.x2, ), 1, 0)
def test_multiple_heads(self):
_check(self, (self.x1, self.x2), 2, 0)
def test_tail_node(self):
_check(self, (self.y, ), 4, 3)
class TestGraphBuilder5(unittest.TestCase):
def setUp(self):
self.x = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = 2 * self.x
self.f = self.y.creator_node
self.g = c.build_computational_graph((self.y,))
def test_edges(self):
self.assertEqual(len(self.g.edges), 2)
self.assertSetEqual(set(self.g.edges),
{(self.x.node, self.f), (self.f, self.y.node)})
def test_nodes(self):
self.assertEqual(len(self.g.nodes), 3)
self.assertSetEqual(set(self.g.nodes),
{self.x.node, self.f, self.y.node})
class TestGraphBuilder6(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = self.x1 + self.x2
self.f = self.y.creator_node
self.g = c.build_computational_graph((self.y,))
def test_edges(self):
self.assertEqual(len(self.g.edges), 3)
self.assertSetEqual(set(self.g.edges),
{(self.x1.node, self.f),
(self.x2.node, self.f),
(self.f, self.y.node)})
def test_nodes(self):
self.assertEqual(len(self.g.nodes), 4)
self.assertSetEqual(set(self.g.nodes),
{self.x1.node, self.x2.node, self.f, self.y.node})
class TestGraphBuilder7(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x3 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = 0.3 * (self.x1 + self.x2) + self.x3
def test_tail_node(self):
_check(self, (self.y, ), 9, 8)
class TestGraphBuilderStylization(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = self.x1 + self.x2
self.f = self.y.creator_node
self.variable_style = {'label': 'variable_0', 'shape': 'octagon',
'style': 'filled', 'fillcolor': '#E0E0E0'}
self.function_style = {'label': 'function_0', 'shape': 'record',
'style': 'filled', 'fillcolor': '#6495ED'}
self.g = c.build_computational_graph(
(self.y,), variable_style=self.variable_style,
function_style=self.function_style)
def test_dotfile_content(self):
dotfile_content = self.g.dump()
for style in [self.variable_style, self.function_style]:
for key, value in style.items():
self.assertIn('{0}="{1}"'.format(key, value), dotfile_content)
def test_unsupported_format(self):
with self.assertRaises(NotImplementedError):
self.g.dump('graphml')
class TestGraphBuilderShowName(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(
np.zeros((1, 2)).astype(np.float32), name='x1')
self.x2 = variable.Variable(
np.zeros((1, 2)).astype(np.float32), name='x2')
self.y = self.x1 + self.x2
self.y.name = 'y'
def test_show_name(self):
g = c.build_computational_graph((self.x1, self.x2, self.y))
dotfile_content = g.dump()
for var in [self.x1, self.x2, self.y]:
self.assertIn('label="%s:' % var.name, dotfile_content)
def test_dont_show_name(self):
g = c.build_computational_graph(
(self.x1, self.x2, self.y), show_name=False)
dotfile_content = g.dump()
for var in [self.x1, self.x2, self.y]:
self.assertNotIn('label="%s:' % var.name, dotfile_content)
class TestGraphBuilderRankdir(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = self.x1 + self.x2
def test_randir(self):
for rankdir in ['TB', 'BT', 'LR', 'RL']:
g = c.build_computational_graph((self.y,), rankdir=rankdir)
self.assertIn('rankdir=%s' % rankdir, g.dump())
def test_randir_invalid(self):
self.assertRaises(ValueError,
c.build_computational_graph, (self.y,), rankdir='TL')
class TestGraphBuilderRemoveVariable(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype('f'))
self.x2 = variable.Variable(np.zeros((1, 2)).astype('f'))
self.y = self.x1 + self.x2
self.f = self.y.creator_node
self.g = c.build_computational_graph((self.y,), remove_variable=True)
def test_remove_variable(self):
self.assertIn(self.f.label, self.g.dump())
self.assertNotIn(str(id(self.x1)), self.g.dump())
self.assertNotIn(str(id(self.x2)), self.g.dump())
testing.run_module(__name__, __file__)
|
{
"content_hash": "e54237b6c14efae6c591d90c815aa2c2",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 79,
"avg_line_length": 31.297101449275363,
"alnum_prop": 0.576638110673767,
"repo_name": "ronekko/chainer",
"id": "71cfca4f2c64e41d3dbb92af1f871c25021b211b",
"size": "8638",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/test_computational_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3722585"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
"""Constants and static functions to support protocol buffer wire format."""
__author__ = 'robinson@google.com (Will Robinson)'
import struct
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf.internal.utils import string_to_bytes
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# Bounds for various integer types.
INT32_MAX = int((1 << 31) - 1)
INT32_MIN = int(-(1 << 31))
UINT32_MAX = (1 << 32) - 1
INT64_MAX = (1 << 63) - 1
INT64_MIN = -(1 << 63)
UINT64_MAX = (1 << 64) - 1
# "struct" format strings that will encode/decode the specified formats.
FORMAT_UINT32_LITTLE_ENDIAN = b'<I'
FORMAT_UINT64_LITTLE_ENDIAN = b'<Q'
FORMAT_FLOAT_LITTLE_ENDIAN = b'<f'
FORMAT_DOUBLE_LITTLE_ENDIAN = b'<d'
# We'll have to provide alternate implementations of AppendLittleEndian*() on
# any architectures where these checks fail.
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
raise AssertionError('Format "I" is not a 32-bit number.')
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
raise AssertionError('Format "Q" is not a 64-bit number.')
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
def ZigZagEncode(value):
"""ZigZag Transform: Encodes signed integers so that they can be
effectively used with varint encoding. See wire_format.h for
more details.
"""
if value >= 0:
return value << 1
return (value << 1) ^ (~0)
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
# The *ByteSize() functions below return the number of bytes required to
# serialize "field number + type" information and then serialize the value.
def Int32ByteSize(field_number, int32):
return Int64ByteSize(field_number, int32)
def Int32ByteSizeNoTag(int32):
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
def Int64ByteSize(field_number, int64):
# Have to convert to uint before calling UInt64ByteSize().
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
def UInt32ByteSize(field_number, uint32):
return UInt64ByteSize(field_number, uint32)
def UInt64ByteSize(field_number, uint64):
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
def SInt32ByteSize(field_number, int32):
return UInt32ByteSize(field_number, ZigZagEncode(int32))
def SInt64ByteSize(field_number, int64):
return UInt64ByteSize(field_number, ZigZagEncode(int64))
def Fixed32ByteSize(field_number, fixed32):
return TagByteSize(field_number) + 4
def Fixed64ByteSize(field_number, fixed64):
return TagByteSize(field_number) + 8
def SFixed32ByteSize(field_number, sfixed32):
return TagByteSize(field_number) + 4
def SFixed64ByteSize(field_number, sfixed64):
return TagByteSize(field_number) + 8
def FloatByteSize(field_number, flt):
return TagByteSize(field_number) + 4
def DoubleByteSize(field_number, double):
return TagByteSize(field_number) + 8
def BoolByteSize(field_number, b):
return TagByteSize(field_number) + 1
def EnumByteSize(field_number, enum):
return UInt32ByteSize(field_number, enum)
def StringByteSize(field_number, string):
return BytesByteSize(field_number, string_to_bytes(string))
def BytesByteSize(field_number, b):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(len(b))
+ len(b))
def GroupByteSize(field_number, message):
return (2 * TagByteSize(field_number) # START and END group.
+ message.ByteSize())
def MessageByteSize(field_number, message):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
+ message.ByteSize())
def MessageSetItemByteSize(field_number, msg):
# First compute the sizes of the tags.
# There are 2 tags for the beginning and ending of the repeated group, that
# is field number 1, one with field number 2 (type_id) and one with field
# number 3 (message).
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
# Add the number of bytes for type_id.
total_size += _VarUInt64ByteSizeNoTag(field_number)
message_size = msg.ByteSize()
# The number of bytes for encoding the length of the message.
total_size += _VarUInt64ByteSizeNoTag(message_size)
# The size of the message.
total_size += message_size
return total_size
def TagByteSize(field_number):
"""Returns the bytes required to serialize a tag with this field number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
# Private helper function for the *ByteSize() functions above.
def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10
NON_PACKABLE_TYPES = (
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_GROUP,
descriptor.FieldDescriptor.TYPE_MESSAGE,
descriptor.FieldDescriptor.TYPE_BYTES
)
def IsTypePackable(field_type):
"""Return true iff packable = true is valid for fields of this type.
Args:
field_type: a FieldDescriptor::Type value.
Returns:
True iff fields of this type are packable.
"""
return field_type not in NON_PACKABLE_TYPES
|
{
"content_hash": "11e42ffd0666b141a1a0b0cfdcdbe068",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 77,
"avg_line_length": 28.70539419087137,
"alnum_prop": 0.7263660017346054,
"repo_name": "openx/python3-protobuf",
"id": "8863c551462a37cfc9a291e244af7580b4df1597",
"size": "8536",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/google/protobuf/internal/wire_format.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "20769"
},
{
"name": "C++",
"bytes": "3095159"
},
{
"name": "Emacs Lisp",
"bytes": "7798"
},
{
"name": "Java",
"bytes": "1068353"
},
{
"name": "Python",
"bytes": "499068"
},
{
"name": "Shell",
"bytes": "16081"
},
{
"name": "VimL",
"bytes": "3731"
}
],
"symlink_target": ""
}
|
from PyQt4.QtGui import QStandardItem, QStandardItemModel, QIcon
class BaseTreeItem(QStandardItem):
def __init__(self, name):
QStandardItem.__init__(self, name)
self.setEditable(False)
self.setColumnCount(1)
self.setData(self)
def getModel(self):
item = self
while item is not None and type(item) is not DBMSTreeModel:
item = item.parent
return item
def getConnectionItem(self):
item = self
while item is not None and type(item) is not ConnectionTreeItem:
item = item.parent()
return item
def getConnection(self):
item = self.getConnectionItem()
return item.connection if type(item) is ConnectionTreeItem else None
def open(self):
self.refresh()
def refresh(self):
self.setRowCount(0)
def __repr__(self):
return "<" + self.__class__.__name__ + " " + self.getName() + ">"
class ConnectionTreeItem(BaseTreeItem):
def __init__(self, name, connection):
BaseTreeItem.__init__(self, name)
self.connection = connection
self.refresh()
def refresh(self):
BaseTreeItem.refresh(self)
if self.connection.isOpen():
databases = EntityDatabasesTreeItem()
privileges = EntityPrivilegesTreeItem()
self.insertRow(0, databases)
self.insertRow(1, privileges)
self.refreshIcon()
def open(self):
if not self.connection.isOpen():
self.connection.open()
BaseTreeItem.open(self)
def refreshIcon(self):
if self.connection.isOpen():
self.setIcon(QIcon(":/16/database-server"))
else:
self.setIcon(QIcon(":/16/database-connect"))
class EntityDatabasesTreeItem(BaseTreeItem):
def __init__(self):
BaseTreeItem.__init__(self, "Databases")
self.setIcon(QIcon(":/16/database"))
self.rowsByDb = {}
def getDbList(self):
def showDbSize(t):
for row in t.result:
i = self.rowsByDb[row[0]]
self.setChild(i, 1, BaseTreeItem("%d MB" % (row[1] / 1024 / 1024)))
self.getConnection().asyncQuery("SELECT TABLE_SCHEMA, SUM(DATA_LENGTH) + SUM(INDEX_LENGTH) FROM `information_schema`.`TABLES` GROUP BY TABLE_SCHEMA", callback=showDbSize)
dblist = []
db = self.getConnection().cursor()
db.execute("SHOW DATABASES")
for row in db.fetchall():
dblist.append(row[0])
return dblist
def refresh(self):
BaseTreeItem.refresh(self)
self.rowsByDb = {}
for i, db in enumerate(self.getDbList()):
self.rowsByDb[db] = i
self.insertRow(i, DatabaseTreeItem(db))
class EntityPrivilegesTreeItem(BaseTreeItem):
def __init__(self):
BaseTreeItem.__init__(self, "Privileges")
self.setIcon(QIcon(":/16/group"))
def getPrivList(self):
privlist = []
db = self.getConnection().cursor()
db.execute("SELECT GRANTEE FROM `information_schema`.`USER_PRIVILEGES` GROUP BY GRANTEE")
for row in db.fetchall():
privlist.append(row[0])
return privlist
def refresh(self):
BaseTreeItem.refresh(self)
for i, priv in enumerate(self.getPrivList()):
self.insertRow(i, PrivilegeTreeItem(priv))
class DatabaseTreeItem(BaseTreeItem):
def __init__(self, db):
BaseTreeItem.__init__(self, db)
self.setIcon(QIcon(":/16/database"))
self.rowsByTable = {}
def getTableList(self):
def showTableSize(t):
for row in t.result:
size = row[1]
if size is None:
size = 0
i = self.rowsByTable[row[0]]
self.setChild(i, 1, BaseTreeItem("%d MB" % (size / 1024 / 1024)))
self.getConnection().asyncQuery("SELECT TABLE_NAME, DATA_LENGTH + INDEX_LENGTH FROM `information_schema`.`TABLES` WHERE TABLE_SCHEMA = %s", (self.text(),), callback=showTableSize)
tablelist = []
conn = self.getConnection()
db = conn.cursor()
db.execute("SHOW TABLES IN %s" % conn.quoteIdentifier(self.text()))
for row in db.fetchall():
tablelist.append(row[0])
return tablelist
def getTriggerList(self):
triglist = []
conn = self.getConnection()
db = conn.cursor()
db.execute("SHOW TRIGGERS FROM %s" % conn.quoteIdentifier(self.text()))
for row in db.fetchall():
triglist.append(row[0])
return triglist
def getProcedureList(self):
proclist = []
conn = self.getConnection()
db = conn.cursor()
db.execute("SHOW PROCEDURE STATUS WHERE Db=%s", (self.text(),))
for row in db.fetchall():
proclist.append(row[1])
return proclist
def getFunctionList(self):
funclist = []
conn = self.getConnection()
db = conn.cursor()
db.execute("SHOW FUNCTION STATUS WHERE Db=%s", (self.text(),))
for row in db.fetchall():
funclist.append(row[1])
return funclist
def refresh(self):
BaseTreeItem.refresh(self)
i = None
for i, proc in enumerate(self.getProcedureList()):
self.insertRow(i, ProcedureTreeItem(proc))
if i is None:
i = -1
for func in self.getFunctionList():
i += 1
self.insertRow(i, FunctionTreeItem(func))
for trig in self.getTriggerList():
i += 1
self.insertRow(i, TriggerTreeItem(trig))
self.rowsByTable = {}
for table in self.getTableList():
i += 1
self.rowsByTable[table] = i
self.insertRow(i, TableTreeItem(table))
class TableTreeItem(BaseTreeItem):
def __init__(self, table):
BaseTreeItem.__init__(self, table)
self.setIcon(QIcon(":/16/database-table"))
class PrivilegeTreeItem(BaseTreeItem):
def __init__(self, priv):
BaseTreeItem.__init__(self, priv)
self.setIcon(QIcon(":/16/user"))
class ProcedureTreeItem(BaseTreeItem):
def __init__(self, proc):
BaseTreeItem.__init__(self, proc + "()")
self.name = proc
self.setIcon(QIcon(":/16/code"))
class FunctionTreeItem(BaseTreeItem):
def __init__(self, func):
BaseTreeItem.__init__(self, func + "()")
self.name = func
self.setIcon(QIcon(":/16/code"))
class TriggerTreeItem(BaseTreeItem):
def __init__(self, trig):
BaseTreeItem.__init__(self, trig)
self.setIcon(QIcon(":/16/database-lightning"))
class DBMSTreeModel(QStandardItemModel):
def __init__(self, parent=None, connections=None):
QStandardItemModel.__init__(self, parent)
self.setColumnCount(2)
self.setHorizontalHeaderLabels(["Connections", "Dimension"])
self.setConnections(connections)
def setConnections(self, connections):
self.connections = connections
self.refresh()
def refresh(self):
self.clear()
try:
for i, connectionName in enumerate(sorted(self.connections.iterkeys())):
self.insertRow(i, [ConnectionTreeItem(connectionName, self.connections[connectionName]), BaseTreeItem("")])
except Exception as e:
self.clear()
raise e
finally:
self.reset()
|
{
"content_hash": "b1f525d37456d6963b36acc8d35801c6",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 187,
"avg_line_length": 28.78846153846154,
"alnum_prop": 0.586372745490982,
"repo_name": "mtorromeo/sqlantaresia",
"id": "0720b8554007170f5c7ce199a004b1c11de68be4",
"size": "7510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlantaresia/dbmodels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88609"
},
{
"name": "Shell",
"bytes": "478"
}
],
"symlink_target": ""
}
|
"""open_discussions constants"""
from rest_framework import status
PERMISSION_DENIED_ERROR_TYPE = "PermissionDenied"
NOT_AUTHENTICATED_ERROR_TYPE = "NotAuthenticated"
DJANGO_PERMISSION_ERROR_TYPES = (
status.HTTP_401_UNAUTHORIZED,
status.HTTP_403_FORBIDDEN,
)
ISOFORMAT = "%Y-%m-%dT%H:%M:%SZ"
|
{
"content_hash": "2742453f8933231269b76d409b6b11af",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 49,
"avg_line_length": 27.545454545454547,
"alnum_prop": 0.7425742574257426,
"repo_name": "mitodl/open-discussions",
"id": "8f5fd3284ce0117aae3d1186713608859a01c17e",
"size": "303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open_discussions/constants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "78316"
},
{
"name": "JavaScript",
"bytes": "1704037"
},
{
"name": "Procfile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "2264549"
},
{
"name": "SCSS",
"bytes": "133442"
},
{
"name": "Shell",
"bytes": "11787"
},
{
"name": "TypeScript",
"bytes": "307134"
}
],
"symlink_target": ""
}
|
"""This module provides a base class for transports that provide
asynchronous (non-blocking) transmit and receive functionality.
"""
from thespian.system.transport import (TransmitOnly, SendStatus,
Thespian__UpdateWork)
from thespian.system.utilis import thesplog, partition
from thespian.system.timing import ExpirationTimer
import logging
from thespian.system.addressManager import CannotPickleAddress
from collections import deque
import threading
from contextlib import contextmanager
import time
if hasattr(threading, 'main_thread'):
# python 3.4 or later
is_main_thread = lambda: threading.main_thread() == threading.current_thread()
else:
if hasattr(threading, 'name'):
is_main_thread = lambda: 'MainThread' in threading.current_thread().name
else:
is_main_thread = lambda: 'MainThread' in threading.current_thread().getName()
# Transmits are passed along until there are MAX_PENDING_TRANSMITS, at
# which point they are queued internally. If the number of internally
# queue transmits exceeds MAX_QUEUED_TRANSMITS then the transport is
# put into transmit-only mode (attempting to drain all current work
# before new work is accepted) until the transmit queue depth drops
# back below QUEUE_TRANSMIT_UNBLOCK_THRESHOLD. If the number of
# queued transmits exceeds the DROP_TRANSMITS_LEVEL then additional
# transmits are immediately failed instead of being queued.
MAX_PENDING_TRANSMITS = 20
MAX_QUEUED_TRANSMITS = 950
QUEUE_TRANSMIT_UNBLOCK_THRESHOLD = 780
DROP_TRANSMITS_LEVEL = MAX_QUEUED_TRANSMITS + 100
@contextmanager
def exclusive_processing(transport):
while not transport._exclusively_processing():
time.sleep(0.000001)
yield
transport._not_processing()
class asyncTransportBase(object):
"""This class should be used as a base-class for Transports where the
transmit operation occurs asynchronously. The send operation
will reject TransmitIntent objects until they are fully
serializeable, and will then submit the TransmitIntent to the
actual Transport for sending.
This module provides queue management for transmits to ensure
that only a limited number of transmits are active from this
Actor at any one time. Note that the system level
functionality is responsible for ensuring that only one
TransmitIntent *PER TARGET* is submitted to this module at any
one time, but this module ensures that the number of
TransmitIntents *FOR ALL TARGETS* does not exceed a maximum
threshold.
"""
# Expects from subclass:
# self.serializer - serializer callable that returns serialized form
# of intent that should be sent (stored in .serMsg)
# self._scheduleTransmitActual -- called to do the actual transmit (with .serMsg set)
def __init__(self, *args, **kw):
super(asyncTransportBase, self).__init__(*args, **kw)
self._aTB_numPendingTransmits = 0 # counts recursion and in-progress
self._aTB_lock = threading.Lock() # protects the following:
self._aTB_processing = False # limits to a single operation
self._aTB_sending = False # transmit is being performed
self._aTB_queuedPendingTransmits = deque()
self._aTB_rx_pause_enabled = True
self._aTB_interrupted = False
def setAddressManager(self, addrManager):
self._addressMgr = addrManager
def enableRXPauseFlowControl(self, enable=True):
self._aTB_rx_pause_enabled = enable
def _updateStatusResponse(self, resp):
"""Called to update a Thespian_SystemStatus or Thespian_ActorStatus
with common information
"""
with self._aTB_lock:
for each in self._aTB_queuedPendingTransmits:
resp.addPendingMessage(self.myAddress,
each.targetAddr,
each.message)
def _canSendNow(self):
return (MAX_PENDING_TRANSMITS > self._aTB_numPendingTransmits)
def _async_txdone(self, _TXresult, _TXIntent):
self._aTB_numPendingTransmits -= 1
# If in the context of an initiated transmit, do not process
# timeouts or do more scheduling because that could recurse
# indefinitely. In addition, ensure that this is not part of
# a callback chain that has looped back around here, which
# also represents recursion. All those entry points will
# re-check for additional work and initiated the work at that
# point.
while self._canSendNow():
if not self._runQueued():
break
def _runQueued(self, has_exclusive_flag=False):
"""Perform queued transmits; returns False if there are no transmits
or if another process is already in this critical section
(and will therefore be perform the transmits).
"""
v, e = self._complete_expired_intents()
while e:
v, e = self._complete_expired_intents()
# If something is queued, submit it to the lower level for transmission
# 1. Sync with the lower level, since this will be modifying lower-level objects
while True:
nextTransmit = None
with self._aTB_lock:
if has_exclusive_flag or not self._aTB_processing:
# 2. If another process is in the sending critical
# section, defer to it
if self._aTB_sending:
return False
# Nothing to send by this point, return
if not self._aTB_queuedPendingTransmits:
return False
self._aTB_processing = True
self._aTB_sending = True
nextTransmit = self._aTB_queuedPendingTransmits.popleft()
try:
if nextTransmit:
self._submitTransmit(nextTransmit)
return True
return False
finally:
self._aTB_sending = False
self._aTB_processing = False
time.sleep(0.00001)
def scheduleTransmit(self, addressManager, transmitIntent, has_exclusive_flag=False):
"""Requests that a transmit be performed. The message and target
address must be fully valid at this point; any local
addresses should throw a CannotPickleAddress exception and
the caller is responsible for retrying later when those
addresses are available.
If addressManager is None then the intent address is
assumed to be valid but it cannot be updated if it is a
local address or a dead address. A value of None is
normally only used at Admin or Actor startup time when
confirming the established connection back to the parent,
at which time the target address should always be valid.
Any transmit attempts from a thread other than the main
thread are queued; calls to the underlying transmit layer
are done only from the context of the main thread.
"""
if addressManager:
# Verify the target address is useable
targetAddr, txmsg = addressManager.prepMessageSend(
transmitIntent.targetAddr,
transmitIntent.message)
try:
isDead = txmsg == SendStatus.DeadTarget
except Exception:
# txmsg may have an __eq__ that caused an exception
isDead = False
if isDead:
# Address Manager has indicated that these messages
# should never be attempted because the target is
# dead. This is *only* for special messages like
# DeadEnvelope and ChildActorExited which would
# endlessly recurse or bounce back and forth. This
# code indicates here that the transmit was
# "successful" to allow normal cleanup but to avoid
# recursive error generation.
thesplog('Faking dead target transmit result Sent for %s',
transmitIntent, level=logging.WARNING)
transmitIntent.tx_done(SendStatus.Sent)
return
if not targetAddr:
raise CannotPickleAddress(transmitIntent.targetAddr)
# In case the prep made some changes...
transmitIntent.changeTargetAddr(targetAddr)
transmitIntent.changeMessage(txmsg)
# Verify that the message can be serialized. This may throw
# an exception for local-only ActorAddresses or for attempting
# to send other invalid elements in the message. The invalid
# address will cause the caller to store this intent and retry
# it at some future point (the code up to and including this
# serialization should be idempotent).
transmitIntent.serMsg = self.serializer(transmitIntent)
self._schedulePreparedIntent(transmitIntent, has_exclusive_flag=has_exclusive_flag)
def _qtx(self, transmitIntent):
with self._aTB_lock:
if len(self._aTB_queuedPendingTransmits) < DROP_TRANSMITS_LEVEL:
self._aTB_queuedPendingTransmits.append(transmitIntent)
return True
return False
def _queue_tx(self, transmitIntent):
if self._qtx(transmitIntent):
return True
thesplog('Dropping TX: overloaded', level=logging.WARNING)
transmitIntent.tx_done(SendStatus.Failed)
return False
def _complete_expired_intents(self):
with self._aTB_lock:
expiredTX, validTX = partition(lambda i: i.expired(),
self._aTB_queuedPendingTransmits,
deque)
self._aTB_queuedPendingTransmits = validTX
rlen = len(validTX)
for each in expiredTX:
thesplog('TX intent %s timed out', each, level=logging.WARNING)
each.tx_done(SendStatus.Failed)
return rlen, bool(expiredTX)
def _drain_tx_queue_if_needed(self, max_delay=None):
v, _ = self._complete_expired_intents()
if v >= MAX_QUEUED_TRANSMITS and self._aTB_rx_pause_enabled:
# Try to drain our local work before accepting more
# because it looks like we're getting really behind. This
# is dangerous though, because if other Actors are having
# the same issue this can create a deadlock.
finish_time = ExpirationTimer(max_delay if max_delay else None)
thesplog('Entering tx-only mode to drain excessive queue'
' (%s > %s, drain-to %s in %s)',
v, MAX_QUEUED_TRANSMITS,
QUEUE_TRANSMIT_UNBLOCK_THRESHOLD, finish_time,
level=logging.WARNING)
while v > QUEUE_TRANSMIT_UNBLOCK_THRESHOLD:
with finish_time as rem_time:
if rem_time.expired():
break
if 0 == self.run(TransmitOnly, rem_time.remaining()):
thesplog('Exiting tx-only mode because no transport work available.')
# This may happend because the lower-level
# subtransport layer has nothing left to send,
# so it has to return to allow this layer to
# queue more transmits.
break
v, _ = self._complete_expired_intents()
thesplog('Exited tx-only mode after draining excessive queue (%s)',
len(self._aTB_queuedPendingTransmits),
level=logging.WARNING)
def _exclusively_processing(self):
"Protects critical sections by only allowing a single thread entry"
with self._aTB_lock:
if self._aTB_processing:
return False # Another thread is processing, not exclusive
self._aTB_processing = True
return True # This thread exclusively holds the processing mutex
def _not_processing(self):
"Exit from critical section"
self._aTB_processing = False
def _schedulePreparedIntent(self, transmitIntent, has_exclusive_flag=False):
# If there's nothing to send, that's implicit success
if not transmitIntent.serMsg:
transmitIntent.tx_done(SendStatus.Sent)
return
if isinstance(transmitIntent.message, Thespian__UpdateWork):
# The UpdateWork should not actually be transmitted, but
# it *should* cause the main thread to be interrupted if
# it is in a blocking wait for work to do.
transmitIntent.tx_done(SendStatus.Sent)
self._aTB_interrupted = False
else:
if not self._queue_tx(transmitIntent):
# TX overflow, intent discarded, no further work needed here
return
if not self._canSendNow():
drainer = False
if has_exclusive_flag or self._exclusively_processing():
if not self._aTB_sending:
self._aTB_sending = True
drainer = True
self._not_processing()
if drainer:
try:
self._drain_tx_queue_if_needed(transmitIntent.delay())
finally:
self._aTB_sending = False
else:
time.sleep(0.1) # slow down threads not performing draining
while self._canSendNow():
if not self._runQueued(has_exclusive_flag=has_exclusive_flag):
# Before exiting, ensure that if the main thread
# is waiting for input on select() that it is
# awakened in case it needs to monitor new
# transmit sockets.
if not is_main_thread() and not self._aTB_interrupted:
self._aTB_interrupted = True
self.interrupt_wait()
break
def _submitTransmit(self, transmitIntent, has_exclusive_flag=False):
self._aTB_numPendingTransmits += 1
transmitIntent.addCallback(self._async_txdone, self._async_txdone)
thesplog('actualTransmit of %s', transmitIntent.identify(),
level=logging.DEBUG)
self._scheduleTransmitActual(transmitIntent, has_exclusive_flag=has_exclusive_flag)
def deadAddress(self, addressManager, childAddr):
# Go through pending transmits and update any to this child to
# a dead letter delivery
with self._aTB_lock:
for each in self._aTB_queuedPendingTransmits:
if each.targetAddr == childAddr:
newtgt, newmsg = addressManager.prepMessageSend(
each.targetAddr, each.message)
each.changeTargetAddr(newtgt)
# n.b. prepMessageSend might return
# SendStatus.DeadTarget for newmsg; when this is later
# attempted, that will be handled normally and the
# transmit will be completed as "Sent"
each.changeMessage(newmsg)
|
{
"content_hash": "873102b962d205621aca015ced4ca163",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 93,
"avg_line_length": 44.56609195402299,
"alnum_prop": 0.614223998968341,
"repo_name": "kquick/Thespian",
"id": "46e321eeec18c754faefba0890c339d9a3cf68ca",
"size": "15509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thespian/system/transport/asyncTransportBase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Promela",
"bytes": "11021"
},
{
"name": "Python",
"bytes": "1241745"
},
{
"name": "Shell",
"bytes": "48"
}
],
"symlink_target": ""
}
|
import json
import requests
import urlparse
import testresources
import testtools
from keystoneclient.v2_0 import Client as keystoneclient
class CloneEnvTests(testtools.TestCase, testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
@classmethod
def setUpClass(cls):
super(CloneEnvTests, cls).setUpClass()
cls.url = "http://10.20.0.2:8000"
keystone_url = "http://10.20.0.2:5000/v2.0"
cls.endpoint = urlparse.urljoin(cls.url,
"api/clusters/1/upgrade/clone")
ksclient = keystoneclient(auth_url=keystone_url, username="admin",
password="admin", tenant_name="admin")
cls.headers = {"X-Auth-Token": ksclient.auth_token,
"Content-Type": "application/json"}
cls.clusters = []
def tearDown(self):
super(CloneEnvTests, self).tearDown()
for cluster in self.clusters:
self._delete_cluster(cluster)
def _delete_cluster(self, cluster_id):
endpoint = urlparse.urljoin(self.url,
"api/clusters/{0}".format(cluster_id))
return requests.delete(endpoint, headers=self.headers)
def _get_cluster(self, cluster_id):
endpoint = urlparse.urljoin(self.url,
"api/clusters/{0}".format(cluster_id))
return requests.get(endpoint, headers=self.headers).json()
def _get_cluster_attributes(self, cluster_id):
endpoint = urlparse.urljoin(self.url,
"api/clusters/{0}/attributes".format(
cluster_id))
return requests.get(endpoint, headers=self.headers).json()
def _get_releases(self):
endpoint = urlparse.urljoin(self.url, "api/releases")
return requests.get(endpoint, headers=self.headers).json()
def _get_release_details(self, release_id):
endpoint = urlparse.urljoin(self.url, "api/releases/{0}".format(
release_id))
return requests.get(endpoint, headers=self.headers).json()
def _get_list_networks(self, cluster_id):
net_provider = self._get_cluster(cluster_id)["net_provider"]
endpoint = urlparse.urljoin(self.url,
"/api/clusters/{0}"
"/network_configuration/{1}".format(
cluster_id, net_provider))
return requests.get(endpoint, headers=self.headers).json()
def _get_deployable_release_id(self, cluster_id):
cluster = self._get_cluster(cluster_id)
releases = self._get_releases()
release_details = self._get_release_details(cluster["release_id"])
if release_details["is_deployable"]:
return release_details["id"]
else:
return next(release["id"]
for release in releases
if release["id"] > cluster["release_id"] and
release["operating_system"] == release_details[
"operating_system"] and release["is_deployable"])
def test_env_clone_to_deployable_release_id(self):
cluster = self._get_cluster(1)
release_id = self._get_deployable_release_id(1)
post_body = {
"name": "new_test_cluster",
"release_id": release_id
}
resp = requests.post(self.endpoint, data=json.dumps(post_body),
headers=self.headers)
self.assertEqual(200, resp.status_code)
self.assertEqual(release_id, resp.json()["release_id"])
self.assertEqual(cluster["net_provider"], resp.json()["net_provider"])
self.assertEqual(cluster["mode"], resp.json()["mode"])
self.clusters.append(resp.json()["id"])
cluster = self._get_cluster_attributes(1)
cloned_cluster = self._get_cluster_attributes(resp.json()["id"])
for key in cloned_cluster["editable"]:
if key == "repo_setup":
continue
for key1, value1 in cloned_cluster["editable"][key].items():
if "value" in value1:
if "value" in cluster["editable"].get(key, {}).get(
key1, {}):
self.assertEqual(
cluster["editable"][key][key1]["value"],
value1["value"])
elif "values" in value1:
if "values" in cluster["editable"].get(key, {}).get(
key1, {}):
self.assertEqual(
cluster["editable"][key][key1]["values"],
value1["values"])
old_cluster_net_cfg = self._get_list_networks(1)
new_cluster_net_cfg = self._get_list_networks(resp.json()["id"])
self.assertEqual(old_cluster_net_cfg["management_vip"],
new_cluster_net_cfg["management_vip"])
self.assertEqual(old_cluster_net_cfg["public_vip"],
new_cluster_net_cfg["public_vip"])
for parameter in new_cluster_net_cfg["networking_parameters"]:
if parameter in old_cluster_net_cfg["networking_parameters"]:
self.assertEqual(
old_cluster_net_cfg["networking_parameters"][parameter],
new_cluster_net_cfg["networking_parameters"][parameter])
for network in new_cluster_net_cfg["networks"]:
if network["name"] not in ["public", "management", "storage"]:
continue
for old_network in old_cluster_net_cfg["networks"]:
if network["name"] == old_network["name"] and network["name"]:
self.assertEqual(old_network["cidr"], network["cidr"])
self.assertEqual(old_network["ip_ranges"],
network["ip_ranges"])
self.assertEqual(old_network["vlan_start"],
network["vlan_start"])
def test_clone_nonexistent_cluster(self):
endpoint = urlparse.urljoin(self.url,
"api/clusters/xa/upgrade/clone")
post_body = {
"name": "new_test_cluster",
"release_id": 123456
}
resp = requests.post(endpoint, data=json.dumps(post_body),
headers=self.headers)
self.assertEqual(404, resp.status_code)
def test_clone_wo_name_in_body(self):
self.skip("https://mirantis.jira.com/browse/OCTANE-124")
release_id = self._get_deployable_release_id(1)
post_body = {
"release_id": release_id
}
resp = requests.post(self.endpoint, data=json.dumps(post_body),
headers=self.headers)
self.assertEqual(400, resp.status_code)
def test_clone_wo_release_id_in_body(self):
self.skip("https://mirantis.jira.com/browse/OCTANE-124")
post_body = {
"name": "new_test_cluster"
}
resp = requests.post(self.endpoint, data=json.dumps(post_body),
headers=self.headers)
self.assertEqual(400, resp.status_code)
def test_clone_with_empty_body(self):
resp = requests.post(self.endpoint, data=None,
headers=self.headers)
self.assertEqual(400, resp.status_code)
def test_clone_with_too_long_name(self):
self.skip("https://mirantis.jira.com/browse/OCTANE-124")
release_id = self._get_deployable_release_id(1)
post_body = {
"name":
"MANYMANYSYMBOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOLSSS",
"release_id": release_id
}
resp = requests.post(self.endpoint, data=json.dumps(post_body),
headers=self.headers)
self.assertEqual(400, resp.status_code)
def test_clone_with_nonexistent_release_id(self):
post_body = {
"name": "new_test_cluster",
"release_id": 123456
}
resp = requests.post(self.endpoint, data=json.dumps(post_body),
headers=self.headers)
self.assertEqual(404, resp.status_code)
def test_clone_with_incorrect_release_id(self):
post_body = {
"name": "new_test_cluster",
"release_id": "djigurda"
}
resp = requests.post(self.endpoint, data=json.dumps(post_body),
headers=self.headers)
self.assertEqual(400, resp.status_code)
|
{
"content_hash": "34b7c501087808d5717017ca1a74fb1d",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 78,
"avg_line_length": 39.062780269058294,
"alnum_prop": 0.5486166915394329,
"repo_name": "Mirantis/octane",
"id": "349854088f18081708727bc0795ca64afde04f72",
"size": "9257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octane/tests/clone_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "204473"
},
{
"name": "Shell",
"bytes": "33878"
}
],
"symlink_target": ""
}
|
"""
Gmail
Access Gmail mailboxes including sending user email.
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import gmail_client
from gmail_client.rest import ApiException
from gmail_client.models.history_message_deleted import HistoryMessageDeleted
class TestHistoryMessageDeleted(unittest.TestCase):
""" HistoryMessageDeleted unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testHistoryMessageDeleted(self):
"""
Test HistoryMessageDeleted
"""
model = gmail_client.models.history_message_deleted.HistoryMessageDeleted()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "23c881ef3634461ea8c5aa9b617830f4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 83,
"avg_line_length": 20.3,
"alnum_prop": 0.6945812807881774,
"repo_name": "CanopyIQ/gmail_client",
"id": "73f1ccf766534701dedd3b43b7e1228eb9b76abb",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_history_message_deleted.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "757232"
},
{
"name": "Shell",
"bytes": "1656"
}
],
"symlink_target": ""
}
|
import unittest
from cmdlinetool import util
class TestUtil(unittest.TestCase):
def test_adder(self):
self.assertEqual(util.adder(3, 5), 8)
def test_adder2(self):
self.assertEqual(util.adder(-3, -5), -8)
|
{
"content_hash": "8b5d4e10ea1d6db5e20753d53cf3ab9e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 48,
"avg_line_length": 23.1,
"alnum_prop": 0.670995670995671,
"repo_name": "tobiajo/cmdlinetool",
"id": "bbae527dbf03eaa350ec9bf2c3ba735accff1829",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmdlinetool/test/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "995"
}
],
"symlink_target": ""
}
|
import collections
import itertools
import unittest
import dontasq
class MethodsTest(unittest.TestCase):
def test_builtins(self):
actual = [1, 2, 4, 10, 20, 65] \
.where(lambda x: x % 2 == 0) \
.select(lambda x: x * 2) \
.to_list()
expected = [4, 8, 20, 40]
self.assertEqual(expected, actual)
actual = (-6, 2, 5) \
.select_many(lambda x: [x, x ** 2]) \
.to_tuple()
expected = (-6, 36, 2, 4, 5, 25)
self.assertEqual(expected, actual)
actual = 'Australia Canada Russia' \
.split() \
.to_dictionary(key_selector=lambda word: word[:2].upper())
expected = {
'AU': 'Australia',
'CA': 'Canada',
'RU': 'Russia',
}
self.assertEqual(expected, actual)
actual = 'Formula1'.all(str.isalnum)
expected = True
self.assertEqual(expected, actual)
def test_dict_methods(self):
dictionary = {12: 22, 20: 2, 30: 3, 88: 2}
actual = dictionary.keys().sum()
expected = 150
self.assertEqual(expected, actual)
actual = dictionary.values() \
.distinct() \
.order_by() \
.to_list()
expected = [2, 3, 22]
self.assertEqual(expected, actual)
def test_collections(self):
order = collections.deque()
order.append(5)
order.appendleft(3)
order.appendleft(6)
actual = order.select(lambda x: x + 2).to_list()
expected = [8, 5, 7]
self.assertEqual(expected, actual)
def test_itertools(self):
actual = itertools.count(1) \
.select(lambda x: x * 3 - 2) \
.take(6) \
.to_list()
expected = [1, 4, 7, 10, 13, 16]
self.assertEqual(expected, actual)
class QueryTest(unittest.TestCase):
def test_builtins(self):
actual = [1, 2, 4, 10, 20, 65].query() \
.where(lambda x: x % 2 == 0) \
.select(lambda x: x * 2) \
.to_list()
expected = [4, 8, 20, 40]
self.assertEqual(expected, actual)
actual = (-6, 2, 5).query() \
.select_many(lambda x: [x, x ** 2]) \
.to_tuple()
expected = (-6, 36, 2, 4, 5, 25)
self.assertEqual(expected, actual)
actual = 'Australia Canada Russia' \
.split() \
.query() \
.to_dictionary(key_selector=lambda word: word[:2].upper())
expected = {
'AU': 'Australia',
'CA': 'Canada',
'RU': 'Russia',
}
self.assertEqual(expected, actual)
actual = 'Formula1'.query().all(str.isalnum)
expected = True
self.assertEqual(expected, actual)
actual = 'abcdef'.query().to_list()
expected = ['a', 'b', 'c', 'd', 'e', 'f']
self.assertEqual(expected, actual)
test_str = 'kgsfidj_ddf'
self.assertEqual(len(test_str), test_str.query().count())
def test_dict_methods(self):
dictionary = {12: 22, 20: 2, 30: 3, 88: 2}
actual = dictionary.keys().query().sum()
expected = 150
self.assertEqual(expected, actual)
actual = dictionary.values().query() \
.distinct() \
.order_by() \
.to_list()
expected = [2, 3, 22]
self.assertEqual(expected, actual)
def test_collections(self):
order = collections.deque()
order.append(5)
order.appendleft(3)
order.appendleft(6)
actual = order.query().select(lambda x: x + 2).to_list()
expected = [8, 5, 7]
self.assertEqual(expected, actual)
def test_itertools(self):
actual = itertools.count(1).query() \
.select(lambda x: x * 3 - 2) \
.take(6) \
.to_list()
expected = [1, 4, 7, 10, 13, 16]
self.assertEqual(expected, actual)
class CollisionTest(unittest.TestCase):
def test_collision(self):
actual = ', '.join(['London', 'Paris'])
expected = 'London, Paris'
self.assertEqual(expected, actual)
actual = ['Masha', 'Alice'] \
.join(['Ann', 'Misha'],
inner_key_selector=lambda name: name[0],
outer_key_selector=lambda name: name[0])
expected = [('Masha', 'Misha'), ('Alice', 'Ann')]
self.assertEqual(expected, actual)
class ProblemsTest(unittest.TestCase):
@staticmethod
def get_most_frequent_words(text, count):
"""Problem from a LINQ course:
https://ulearn.azurewebsites.net/Course/Linq
Return *count* the most frequent words contained in *text* with their
frequency. Prefer lexicographically lesser words among words with
the same frequency. Compare words case-insensetively and output them
in the lower case.
"""
return text.split() \
.group_by(str.lower) \
.select(lambda g: (g.key, g.count())) \
.order_by_descending(lambda x: x[1]) \
.then_by(lambda x: x[0]) \
.take(count) \
.to_list()
def test_most_frequent_words(self):
text = ('A box of biscuits, a box of mixed biscuits, '
'and a biscuit mixer.')
actual = ProblemsTest.get_most_frequent_words(text, 4)
expected = [('a', 3), ('biscuits,', 2), ('box', 2), ('of', 2)]
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e747c53affab44c7df42f8b9c91fdf6e",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 77,
"avg_line_length": 31.579234972677597,
"alnum_prop": 0.504585568437446,
"repo_name": "borzunov/dontasq",
"id": "ff2f4f4c4b3e0be83041ff43dec541ee070fe81b",
"size": "5826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dontasq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11688"
}
],
"symlink_target": ""
}
|
"""
Test Cases para datos de demanda energética
"""
from unittest import TestCase
class TestsDemanda(TestCase):
"""Tests para el almacén local de datos de demanda energética."""
def test_create_db(self):
"""Test de `force_update` los datos de demanda energética."""
from esiosdata import DatosREE
dem = DatosREE(force_update=True, verbose=True)
print(dem)
print(dem.last_entry())
self.assertIsNotNone(dem.data['data'])
self.assertEqual(dem.data['data'].empty, False)
def test_attrs_dem(self):
"""Test de attributos extra de los datos de demanda energética."""
from esiosdata import DatosREE
import pandas as pd
dem = DatosREE(update=True, verbose=True)
url = dem.url_data_dia('2017-01-01')
print(url)
assert(len(url) > 0)
url = dem.url_data_dia(pd.Timestamp('2017-01-01'))
print(url)
assert(len(url) > 0)
url = dem.url_data_dia('20170101')
print(url)
assert(len(url) > 0)
self.assertIsNone(dem.procesa_data_dia('2017-01-10', {})[0])
dem.integridad_data()
def test_data_dia(self):
"""Test de descarga y procesado de los datos de DatosREE de días concretos."""
from esiosdata.importdemdata import dem_data_dia
data = dem_data_dia('2006-03-01')
print(data)
self.assertIsNone(data)
data = dem_data_dia('2009-03-01')
print(data)
self.assertIsNone(data)
data_1 = dem_data_dia('2015-03-01')
print(data_1)
self.assertIsNotNone(data_1)
data_2 = dem_data_dia(str_dia='2017-01-22')
print(data_2)
self.assertIsNotNone(data_2)
self.assertEqual(data_2['data_dias'].empty, False)
data = dem_data_dia('2015-10-22', '2015-10-27')
print(data)
self.assertIsNotNone(data)
self.assertEqual(data['data_dias'].empty, False)
data = dem_data_dia('2015-03-01', '2015-04-25')
print(data)
self.assertIsNone(data)
# self.assertIsNotNone(data)
# self.assertEqual(data['data_dias'].empty, False)
data = dem_data_dia('2007-03-01', '2007-04-25')
print(data)
self.assertIsNone(data)
|
{
"content_hash": "13cc2010bfbe38d2559ca1377042c5c8",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 86,
"avg_line_length": 29.192307692307693,
"alnum_prop": 0.5972771190162495,
"repo_name": "azogue/esiosdata",
"id": "6cc73156cd80bcedb2740594ad8f7148a65a489b",
"size": "2307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_demanda.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8688"
},
{
"name": "Jupyter Notebook",
"bytes": "4067807"
},
{
"name": "Python",
"bytes": "126329"
}
],
"symlink_target": ""
}
|
import os
from flask import Flask
application = Flask(__name__)
route = application.route
application.debug = True
@route('/qps/caiyun-backend-wrapper-new')
def index():
root_path = os.path.dirname(os.getcwd())
cmd = "%s/bin/perf-test" % os.path.join(root_path, 'caiyun-backend-wrapper-new')
os.system("ssh caiyun@inner.bench.caiyunapp.com '%s'" % cmd)
return "OK"
def main():
application.debug = True
application.run()
if __name__ == "__main__":
main()
|
{
"content_hash": "561de9ea0e38af1effe7c640da8bd2f2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 84,
"avg_line_length": 23.19047619047619,
"alnum_prop": 0.6550308008213552,
"repo_name": "caiyunapp/qa-badge",
"id": "3bf549ada8cf7a793435e5398c2a90b2a177ec7d",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/webhook/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3086"
},
{
"name": "Shell",
"bytes": "1538"
}
],
"symlink_target": ""
}
|
import zoo
zoo.hours()
|
{
"content_hash": "6610439bdd3fc57066b027c63d5c6ed8",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 11,
"avg_line_length": 6.25,
"alnum_prop": 0.68,
"repo_name": "sungkim-1029/coursework",
"id": "0083473160df1089b4ca885548c33a74ca754670",
"size": "49",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsc433/hw/zoouser1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "53345"
},
{
"name": "Python",
"bytes": "1124"
}
],
"symlink_target": ""
}
|
import os
import logging
import time
from .env import env
class Formatter(logging.Formatter):
def __init__(self, fmt):
logging.Formatter.__init__(self, fmt)
self.last = time.time()
def format(self, record):
current = time.time()
record.delta = '%sms' % int((current - self.last) * 1000 // 1)
self.last = current
return logging.Formatter.format(self, record)
def setup_logging():
if not env.DEBUG:
logging.getLogger().addHandler(logging.NullHandler())
return
log_names = env.DEBUG.split(os.pathsep)
handler = logging.StreamHandler()
handler.setFormatter(Formatter('[+%(delta)s|%(name)s] %(message)s'))
for log_name in log_names:
if log_name == '*':
log_name = None # Root
logger = logging.getLogger(log_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
|
{
"content_hash": "84496ab7d37a438df12d3ae8bce35462",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 24.07894736842105,
"alnum_prop": 0.614207650273224,
"repo_name": "srossross/stable.world",
"id": "8322ab95af5a16dc51c540ca784ff5be5f91b459",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "stable_world/sw_logging.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "421"
},
{
"name": "Python",
"bytes": "196705"
},
{
"name": "Shell",
"bytes": "1146"
}
],
"symlink_target": ""
}
|
from conditions import *
"""Nodes for scons to gn
Nodes class provides a means to store, explore and write out a tree
describing the table generated by interating over the scons file. The
tree is created in the form of:
TopNode
Condition
Object
Condition
Property
"""
REMAP = {
'arm' : 'arm',
'AND' : 'android',
'CHR' : 'chrome',
'LIN' : 'linux',
'MAC' : 'mac',
'IOS' : 'ios',
'WIN' : 'win',
'x86' : 'x86',
'x64' : 'x64',
'bionic' : 'bionic',
'glibc' : 'glibc',
'newlib' : 'newlib',
'pnacl' : 'pnacl'
}
def Express(use, avail, tag):
use = sorted([REMAP[x] for x in use])
avail = sorted([REMAP[x] for x in avail])
if use == avail:
return None
if len(use) == 1:
return '(%s == "%s")' % (tag, use[0])
luse = len(use)
lavail = len(avail)
if luse > (lavail - luse):
items = ['%s != "%s"' % (tag, i) for i in avail if i not in use]
return '(' + ' || '.join(items) + ')'
items = ['%s == "%s"' % (tag, i) for i in use]
return '(' + ' || '.join(items) + ')'
def Condition(os_use, os_avail, cpu_use, cpu_avail):
# We can not hash a list, so we sometimes use a space sperated string.
if isinstance(cpu_use, str):
cpu_use = cpu_use.split(' ')
if isinstance(cpu_avail, str):
cpu_avail = cpu_avail.split(' ')
o_cond = Express(os_use, os_avail, 'os')
c_cond = Express(cpu_use, cpu_avail, 'cpu_arch')
if not o_cond and not c_cond:
return None
if o_cond and c_cond:
return 'if (%s && %s) {' % (o_cond, c_cond)
if o_cond:
return 'if %s {' % o_cond
return 'if %s {' % c_cond
#
# TopNode
# Condition
# Object
# Condition
# Property
#
class Node(object):
def __init__(self, name=''):
self.children = []
self.name = name
self.parent = None
def DumpInfo(self, depth=0):
print '%s%s(%s)' % (' ' * depth, str(type(self)), self.name)
for child in self.children:
child.DumpInfo(depth+1)
def Write(self, fileobj, depth, text):
for line in text.split('\n'):
string = ' ' * depth + line + '\n'
fileobj.write(string)
def Dump(self, fileobj, depth):
adjust = self.DumpStart(fileobj, depth)
for idx, child in enumerate(self.children):
self.DumpChild(fileobj, child, adjust)
if idx != len(self.children) - 1:
fileobj.write('\n')
self.DumpEnd(fileobj, depth)
def DumpStart(self, fileobj, depth):
self.Write(fileobj, depth, self.name)
return depth
def DumpEnd(self, fileobj, depth):
pass
def DumpChild(self, fileobj, child, depth):
child.Dump(fileobj, depth)
def AddChild(self, child):
self.children.append(child)
child.parent = self
def Examine(self, obj):
obj.Enter(self)
for child in self.children:
child.Examine(obj)
obj.Exit(self)
class TopNode(Node):
def __init__(self, name):
Node.__init__(self, name)
def DumpStart(self, fileobj, depth):
self.Write(fileobj, depth, "# Autogenerated from %s.\n\n" % self.name)
return depth
class ConditionNode(Node):
def __init__(self, os_use, os_avail, cpu_use, cpu_avail):
name = Condition(os_use, os_avail, cpu_use, cpu_avail)
Node.__init__(self, name)
def Dump(self, fileobj, depth):
if self.name:
self.Write(fileobj, depth, self.name)
depth += 1
for child in self.children:
child.Dump(fileobj, depth)
if self.name:
self.Write(fileobj, depth - 1, '}')
class ObjectNode(Node):
def __init__(self, name, obj_type):
Node.__init__(self, name)
self.obj_type = obj_type
self.conditional = set()
self.unconditional = set()
def DumpStart(self, fileobj, depth):
self.Write(fileobj, depth, '%s("%s") {' % (self.obj_type, self.name))
depth += 1
# For every conditional only property, set and empty array
for cond in self.conditional:
if cond not in self.unconditional:
self.Write(fileobj, depth, '%s = []' % cond)
return depth
def DumpEnd(self, fileobj, depth):
self.Write(fileobj, depth, '}')
class PropertyNode(Node):
def __init__(self, name):
Node.__init__(self, name)
def Dump(self, fileobj, depth):
if self.parent.name:
self.Write(fileobj, depth, '%s += [' % self.name)
else:
self.Write(fileobj, depth, '%s = [' % self.name)
for child in self.children:
child.Dump(fileobj, depth + 1)
self.Write(fileobj, depth, ']')
class ValueNode(Node):
def __init__(self, name):
Node.__init__(self, name)
def Dump(self, fileobj, depth):
self.Write(fileobj, depth, '"%s",' % self.name)
class OrganizeProperties(object):
def __init__(self):
self.cond = None
self.obj = None
pass
def Enter(self, node):
if isinstance(node, ObjectNode):
self.obj = node
if isinstance(node, ConditionNode):
self.cond = node.name
if isinstance(node, PropertyNode):
if self.cond == None:
self.obj.unconditional |= set([node.name])
else:
self.obj.conditional |= set([node.name])
node.children = sorted(node.children, key=lambda x: x.name)
def Exit(self, node):
pass
|
{
"content_hash": "7774f24fa22d98cb847dbe0b3674e0ef",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 75,
"avg_line_length": 23.832558139534882,
"alnum_prop": 0.5977751756440282,
"repo_name": "yantrabuddhi/nativeclient",
"id": "f103f09bc75b79ec8bb1515d643cf638b8d0aeb0",
"size": "5296",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/scons_to_gn/nodes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "204107"
},
{
"name": "Batchfile",
"bytes": "10099"
},
{
"name": "C",
"bytes": "8116723"
},
{
"name": "C++",
"bytes": "7487748"
},
{
"name": "HTML",
"bytes": "127706"
},
{
"name": "JavaScript",
"bytes": "5925"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Makefile",
"bytes": "19801"
},
{
"name": "Objective-C++",
"bytes": "2658"
},
{
"name": "Python",
"bytes": "2809873"
},
{
"name": "Ragel in Ruby Host",
"bytes": "105121"
},
{
"name": "Shell",
"bytes": "174704"
}
],
"symlink_target": ""
}
|
from multiverse.server.util import *
from multiverse.simpleclient import *
from java.lang import *
# PlayerClient instance
Log.debug("playerclient.py starting PlayerThread");
#playerClient = PlayerClient("--zero_y --position (63505,71222,300303) --polygon 54848,315218,53685,284092,-69679,284014,-69527,314322")
playerClient = PlayerClient("--random_start")
Log.debug("completed playerclient.py")
|
{
"content_hash": "a76747d9ba05e56ad7de9fb3cfdddd02",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 136,
"avg_line_length": 33.416666666666664,
"alnum_prop": 0.7780548628428927,
"repo_name": "longde123/MultiversePlatform",
"id": "1f144e882cbebfa434c911807ffd6d28a35da1c1",
"size": "1619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/config/common/playerclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1148"
},
{
"name": "Batchfile",
"bytes": "56002"
},
{
"name": "C",
"bytes": "2958956"
},
{
"name": "C#",
"bytes": "11292123"
},
{
"name": "C++",
"bytes": "428039"
},
{
"name": "CSS",
"bytes": "107446"
},
{
"name": "Groff",
"bytes": "3653"
},
{
"name": "HTML",
"bytes": "767415"
},
{
"name": "Inno Setup",
"bytes": "2093"
},
{
"name": "Java",
"bytes": "4444010"
},
{
"name": "JavaScript",
"bytes": "115349"
},
{
"name": "Makefile",
"bytes": "35639"
},
{
"name": "Matlab",
"bytes": "2076"
},
{
"name": "Objective-C",
"bytes": "44581"
},
{
"name": "Perl",
"bytes": "6299"
},
{
"name": "Python",
"bytes": "4648545"
},
{
"name": "Scheme",
"bytes": "48864"
},
{
"name": "Shell",
"bytes": "880494"
},
{
"name": "XSLT",
"bytes": "1834"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import json
import logging
import warnings
from json import JSONDecodeError
from urllib.parse import parse_qsl, quote, unquote, urlencode, urlsplit
from sqlalchemy import Boolean, Column, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import reconstructor, synonym
from airflow.configuration import ensure_secrets_loaded
from airflow.exceptions import AirflowException, AirflowNotFoundException, RemovedInAirflow3Warning
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.secrets_masker import mask_secret
from airflow.utils.module_loading import import_string
log = logging.getLogger(__name__)
def parse_netloc_to_hostname(*args, **kwargs):
"""This method is deprecated."""
warnings.warn("This method is deprecated.", RemovedInAirflow3Warning)
return _parse_netloc_to_hostname(*args, **kwargs)
# Python automatically converts all letters to lowercase in hostname
# See: https://issues.apache.org/jira/browse/AIRFLOW-3615
def _parse_netloc_to_hostname(uri_parts):
"""Parse a URI string to get correct Hostname."""
hostname = unquote(uri_parts.hostname or "")
if "/" in hostname:
hostname = uri_parts.netloc
if "@" in hostname:
hostname = hostname.rsplit("@", 1)[1]
if ":" in hostname:
hostname = hostname.split(":", 1)[0]
hostname = unquote(hostname)
return hostname
class Connection(Base, LoggingMixin):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
.. seealso::
For more information on how to use this class, see: :doc:`/howto/connection`
:param conn_id: The connection ID.
:param conn_type: The connection type.
:param description: The connection description.
:param host: The host.
:param login: The login.
:param password: The password.
:param schema: The schema.
:param port: The port number.
:param extra: Extra metadata. Non-standard data such as private/SSH keys can be saved here. JSON
encoded object.
:param uri: URI address describing connection parameters.
"""
EXTRA_KEY = "__extra__"
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN), unique=True, nullable=False)
conn_type = Column(String(500), nullable=False)
description = Column(Text().with_variant(Text(5000), "mysql").with_variant(String(5000), "sqlite"))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column("password", String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column("extra", Text())
def __init__(
self,
conn_id: str | None = None,
conn_type: str | None = None,
description: str | None = None,
host: str | None = None,
login: str | None = None,
password: str | None = None,
schema: str | None = None,
port: int | None = None,
extra: str | dict | None = None,
uri: str | None = None,
):
super().__init__()
self.conn_id = conn_id
self.description = description
if extra and not isinstance(extra, str):
extra = json.dumps(extra)
if uri and (conn_type or host or login or password or schema or port or extra):
raise AirflowException(
"You must create an object using the URI or individual values "
"(conn_type, host, login, password, schema, port or extra)."
"You can't mix these two ways to create this object."
)
if uri:
self._parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
if self.extra:
self._validate_extra(self.extra, self.conn_id)
if self.password:
mask_secret(self.password)
@staticmethod
def _validate_extra(extra, conn_id) -> None:
"""
Here we verify that ``extra`` is a JSON-encoded Python dict. From Airflow 3.0, we should no
longer suppress these errors but raise instead.
"""
if extra is None:
return None
try:
extra_parsed = json.loads(extra)
if not isinstance(extra_parsed, dict):
warnings.warn(
"Encountered JSON value in `extra` which does not parse as a dictionary in "
f"connection {conn_id!r}. From Airflow 3.0, the `extra` field must contain a JSON "
"representation of a Python dict.",
RemovedInAirflow3Warning,
stacklevel=3,
)
except json.JSONDecodeError:
warnings.warn(
f"Encountered non-JSON in `extra` field for connection {conn_id!r}. Support for "
"non-JSON `extra` will be removed in Airflow 3.0",
RemovedInAirflow3Warning,
stacklevel=2,
)
return None
@reconstructor
def on_db_load(self):
if self.password:
mask_secret(self.password)
def parse_from_uri(self, **uri):
"""This method is deprecated. Please use uri parameter in constructor."""
warnings.warn(
"This method is deprecated. Please use uri parameter in constructor.",
RemovedInAirflow3Warning,
)
self._parse_from_uri(**uri)
@staticmethod
def _normalize_conn_type(conn_type):
if conn_type == "postgresql":
conn_type = "postgres"
elif "-" in conn_type:
conn_type = conn_type.replace("-", "_")
return conn_type
def _parse_from_uri(self, uri: str):
uri_parts = urlsplit(uri)
conn_type = uri_parts.scheme
self.conn_type = self._normalize_conn_type(conn_type)
self.host = _parse_netloc_to_hostname(uri_parts)
quoted_schema = uri_parts.path[1:]
self.schema = unquote(quoted_schema) if quoted_schema else quoted_schema
self.login = unquote(uri_parts.username) if uri_parts.username else uri_parts.username
self.password = unquote(uri_parts.password) if uri_parts.password else uri_parts.password
self.port = uri_parts.port
if uri_parts.query:
query = dict(parse_qsl(uri_parts.query, keep_blank_values=True))
if self.EXTRA_KEY in query:
self.extra = query[self.EXTRA_KEY]
else:
self.extra = json.dumps(query)
def get_uri(self) -> str:
"""Return connection in URI format"""
if "_" in self.conn_type:
self.log.warning(
"Connection schemes (type: %s) shall not contain '_' according to RFC3986.",
self.conn_type,
)
uri = f"{str(self.conn_type).lower().replace('_', '-')}://"
authority_block = ""
if self.login is not None:
authority_block += quote(self.login, safe="")
if self.password is not None:
authority_block += ":" + quote(self.password, safe="")
if authority_block > "":
authority_block += "@"
uri += authority_block
host_block = ""
if self.host:
host_block += quote(self.host, safe="")
if self.port:
if host_block == "" and authority_block == "":
host_block += f"@:{self.port}"
else:
host_block += f":{self.port}"
if self.schema:
host_block += f"/{quote(self.schema, safe='')}"
uri += host_block
if self.extra:
try:
query: str | None = urlencode(self.extra_dejson)
except TypeError:
query = None
if query and self.extra_dejson == dict(parse_qsl(query, keep_blank_values=True)):
uri += ("?" if self.schema else "/?") + query
else:
uri += ("?" if self.schema else "/?") + urlencode({self.EXTRA_KEY: self.extra})
return uri
def get_password(self) -> str | None:
"""Return encrypted password."""
if self._password and self.is_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
f"Can't decrypt encrypted password for login={self.login} "
f"FERNET_KEY configuration is missing"
)
return fernet.decrypt(bytes(self._password, "utf-8")).decode()
else:
return self._password
def set_password(self, value: str | None):
"""Encrypt password and set in object attribute."""
if value:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, "utf-8")).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def password(cls):
"""Password. The value is decrypted/encrypted when reading/setting the value."""
return synonym("_password", descriptor=property(cls.get_password, cls.set_password))
def get_extra(self) -> dict:
"""Return encrypted extra-data."""
if self._extra and self.is_extra_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
f"Can't decrypt `extra` params for login={self.login}, "
f"FERNET_KEY configuration is missing"
)
extra_val = fernet.decrypt(bytes(self._extra, "utf-8")).decode()
else:
extra_val = self._extra
if extra_val:
self._validate_extra(extra_val, self.conn_id)
return extra_val
def set_extra(self, value: str):
"""Encrypt extra-data and save in object attribute to object."""
if value:
self._validate_extra(value, self.conn_id)
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, "utf-8")).decode()
self.is_extra_encrypted = fernet.is_encrypted
else:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
"""Extra data. The value is decrypted/encrypted when reading/setting the value."""
return synonym("_extra", descriptor=property(cls.get_extra, cls.set_extra))
def rotate_fernet_key(self):
"""Encrypts data with a new key. See: :ref:`security/fernet`"""
fernet = get_fernet()
if self._password and self.is_encrypted:
self._password = fernet.rotate(self._password.encode("utf-8")).decode()
if self._extra and self.is_extra_encrypted:
self._extra = fernet.rotate(self._extra.encode("utf-8")).decode()
def get_hook(self, *, hook_params=None):
"""Return hook based on conn_type"""
from airflow.providers_manager import ProvidersManager
hook = ProvidersManager().hooks.get(self.conn_type, None)
if hook is None:
raise AirflowException(f'Unknown hook type "{self.conn_type}"')
try:
hook_class = import_string(hook.hook_class_name)
except ImportError:
warnings.warn(
"Could not import %s when discovering %s %s",
hook.hook_class_name,
hook.hook_name,
hook.package_name,
)
raise
if hook_params is None:
hook_params = {}
return hook_class(**{hook.connection_id_attribute_name: self.conn_id}, **hook_params)
def __repr__(self):
return self.conn_id or ""
def log_info(self):
"""
This method is deprecated. You can read each field individually or use the
default representation (`__repr__`).
"""
warnings.warn(
"This method is deprecated. You can read each field individually or "
"use the default representation (__repr__).",
RemovedInAirflow3Warning,
stacklevel=2,
)
return (
f"id: {self.conn_id}. Host: {self.host}, Port: {self.port}, Schema: {self.schema}, "
f"Login: {self.login}, Password: {'XXXXXXXX' if self.password else None}, "
f"extra: {'XXXXXXXX' if self.extra_dejson else None}"
)
def debug_info(self):
"""
This method is deprecated. You can read each field individually or use the
default representation (`__repr__`).
"""
warnings.warn(
"This method is deprecated. You can read each field individually or "
"use the default representation (__repr__).",
RemovedInAirflow3Warning,
stacklevel=2,
)
return (
f"id: {self.conn_id}. Host: {self.host}, Port: {self.port}, Schema: {self.schema}, "
f"Login: {self.login}, Password: {'XXXXXXXX' if self.password else None}, "
f"extra: {self.extra_dejson}"
)
def test_connection(self):
"""Calls out get_hook method and executes test_connection method on that."""
status, message = False, ""
try:
hook = self.get_hook()
if getattr(hook, "test_connection", False):
status, message = hook.test_connection()
else:
message = (
f"Hook {hook.__class__.__name__} doesn't implement or inherit test_connection method"
)
except Exception as e:
message = str(e)
return status, message
@property
def extra_dejson(self) -> dict:
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except JSONDecodeError:
self.log.exception("Failed parsing the json for conn_id %s", self.conn_id)
# Mask sensitive keys from this list
mask_secret(obj)
return obj
@classmethod
def get_connection_from_secrets(cls, conn_id: str) -> Connection:
"""
Get connection by conn_id.
:param conn_id: connection id
:return: connection
"""
for secrets_backend in ensure_secrets_loaded():
try:
conn = secrets_backend.get_connection(conn_id=conn_id)
if conn:
return conn
except Exception:
log.exception(
"Unable to retrieve connection from secrets backend (%s). "
"Checking subsequent secrets backend.",
type(secrets_backend).__name__,
)
raise AirflowNotFoundException(f"The conn_id `{conn_id}` isn't defined")
@classmethod
def from_json(cls, value, conn_id=None) -> Connection:
kwargs = json.loads(value)
extra = kwargs.pop("extra", None)
if extra:
kwargs["extra"] = extra if isinstance(extra, str) else json.dumps(extra)
conn_type = kwargs.pop("conn_type", None)
if conn_type:
kwargs["conn_type"] = cls._normalize_conn_type(conn_type)
port = kwargs.pop("port", None)
if port:
try:
kwargs["port"] = int(port)
except ValueError:
raise ValueError(f"Expected integer value for `port`, but got {port!r} instead.")
return Connection(conn_id=conn_id, **kwargs)
|
{
"content_hash": "73851a14a6f099a530c0288059fb93ae",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 105,
"avg_line_length": 37.38425925925926,
"alnum_prop": 0.5793808049535604,
"repo_name": "apache/airflow",
"id": "73a284be9e5ef05d62875110a7f478d0a883dfcc",
"size": "16937",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/models/connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
}
|
c.InteractiveShellApp.exec_lines = [ # noqa
'from pathlib import Path; import sys; sys.path.append(str((Path(__file__).parent / "..").resolve()))'
]
|
{
"content_hash": "d113732b9f4c13d7939b5480106581bc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 106,
"avg_line_length": 52.333333333333336,
"alnum_prop": 0.6369426751592356,
"repo_name": "nok/sklearn-porter",
"id": "651007d6572dde7fc51a4f4c289f5492c9243d50",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/.ipython_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "194988"
}
],
"symlink_target": ""
}
|
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import (
AutoMlImageClassification,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import (
AutoMlImageClassificationInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import (
AutoMlImageClassificationMetadata,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import (
AutoMlImageObjectDetection,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import (
AutoMlImageObjectDetectionInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import (
AutoMlImageObjectDetectionMetadata,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import (
AutoMlImageSegmentation,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import (
AutoMlImageSegmentationInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import (
AutoMlImageSegmentationMetadata,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import (
AutoMlTables,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import (
AutoMlTablesInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import (
AutoMlTablesMetadata,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import (
AutoMlTextClassification,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import (
AutoMlTextClassificationInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import (
AutoMlTextExtraction,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import (
AutoMlTextExtractionInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import (
AutoMlTextSentiment,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import (
AutoMlTextSentimentInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import (
AutoMlVideoActionRecognition,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import (
AutoMlVideoActionRecognitionInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import (
AutoMlVideoClassification,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import (
AutoMlVideoClassificationInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import (
AutoMlVideoObjectTracking,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import (
AutoMlVideoObjectTrackingInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.export_evaluated_data_items_config import (
ExportEvaluatedDataItemsConfig,
)
__all__ = (
"AutoMlImageClassification",
"AutoMlImageClassificationInputs",
"AutoMlImageClassificationMetadata",
"AutoMlImageObjectDetection",
"AutoMlImageObjectDetectionInputs",
"AutoMlImageObjectDetectionMetadata",
"AutoMlImageSegmentation",
"AutoMlImageSegmentationInputs",
"AutoMlImageSegmentationMetadata",
"AutoMlTables",
"AutoMlTablesInputs",
"AutoMlTablesMetadata",
"AutoMlTextClassification",
"AutoMlTextClassificationInputs",
"AutoMlTextExtraction",
"AutoMlTextExtractionInputs",
"AutoMlTextSentiment",
"AutoMlTextSentimentInputs",
"AutoMlVideoActionRecognition",
"AutoMlVideoActionRecognitionInputs",
"AutoMlVideoClassification",
"AutoMlVideoClassificationInputs",
"AutoMlVideoObjectTracking",
"AutoMlVideoObjectTrackingInputs",
"ExportEvaluatedDataItemsConfig",
)
|
{
"content_hash": "d1882b260e414102cf84ce4d3e9e4a59",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 114,
"avg_line_length": 42.62135922330097,
"alnum_prop": 0.8136674259681094,
"repo_name": "sasha-gitg/python-aiplatform",
"id": "bd4624d83b2d2a584a7bf5612576ad741c5b01f5",
"size": "4992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "11216304"
},
{
"name": "Shell",
"bytes": "30838"
}
],
"symlink_target": ""
}
|
import os
import shutil
import sys
import time
from unittest.mock import Mock
import pytest
import requests
from tornado import ioloop
from sudospawner import SudoSpawner
@pytest.fixture(scope="module")
def io_loop(request):
"""Same as pytest-tornado.io_loop, but re-scoped to module-level"""
io_loop = ioloop.IOLoop()
io_loop.make_current()
def _close():
io_loop.clear_current()
io_loop.close(all_fds=True)
request.addfinalizer(_close)
return io_loop
@pytest.fixture
def user():
"""pytest fixture creating a mock user"""
user = Mock()
user.name = "lutier"
return user
_mock_server_sh = """
#!/bin/sh
exec "{}" -m sudospawner.tests.mockserver "$@"
""".format(
sys.executable
).lstrip()
@pytest.fixture(autouse=True)
def mockserver(request):
script_dir = os.path.dirname(shutil.which("sudospawner"))
sudospawner_singleuser = os.path.join(script_dir, "sudospawner-singleuser")
if os.path.exists(sudospawner_singleuser):
restore_existing = True
shutil.move(sudospawner_singleuser, sudospawner_singleuser + ".save")
else:
restore_existing = False
with open(sudospawner_singleuser, "w") as f:
os.fchmod(f.fileno(), 0o755)
f.write(_mock_server_sh)
def restore_singleuser():
if restore_existing:
shutil.move(sudospawner_singleuser + ".save", sudospawner_singleuser)
else:
os.remove(sudospawner_singleuser)
request.addfinalizer(restore_singleuser)
class MockSudoSpawner(SudoSpawner):
def get_env(self):
env = dict(os.environ)
env.update(self.environment)
return env
def do(self, *args, **kwargs):
kwargs['_skip_sudo'] = True
return super().do(*args, **kwargs)
@pytest.mark.gen_test
def test_spawn(user):
spawner = MockSudoSpawner(user=user)
ip, port = yield spawner.start()
pid = spawner.pid
status = yield spawner.poll()
assert status is None
url = "http://{}:{}".format(ip, port)
r = requests.get(url)
r.raise_for_status()
yield spawner.stop()
# check that the process is gone
with pytest.raises(ProcessLookupError):
os.kill(pid, 0)
@pytest.mark.gen_test
def test_poll(user):
spawner = MockSudoSpawner(user=user)
ip, port = yield spawner.start()
pid = spawner.pid
status = yield spawner.poll()
assert status is None
os.kill(pid, 9)
for i in range(10):
try:
os.kill(pid, 0)
except ProcessLookupError:
break
else:
time.sleep(1)
status = yield spawner.poll()
assert isinstance(status, int)
@pytest.mark.gen_test(timeout=10)
def test_env(user):
spawner = MockSudoSpawner(user=user)
spawner.environment["TEST_KEY"] = "TEST_VALUE"
ip, port = yield spawner.start()
status = yield spawner.poll()
time.sleep(1)
assert status is None
url = "http://{}:{}/env".format(ip, port)
r = requests.get(url)
yield spawner.stop()
r.raise_for_status()
env = r.json()
assert "TEST_KEY" in env
assert env["TEST_KEY"] == "TEST_VALUE"
|
{
"content_hash": "40ee46986afeb0c59bb6883c86dc935c",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 81,
"avg_line_length": 24.5625,
"alnum_prop": 0.6393129770992366,
"repo_name": "jupyter/sudospawner",
"id": "0a98c8884a6c3b1e2bd9c389cab6e4dc16d6dd72",
"size": "3144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sudospawner/tests/test_sudospawner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9894"
}
],
"symlink_target": ""
}
|
from django.db.models import Q
from .choices import FHRPGroupProtocolChoices, IPAddressRoleChoices
# BGP ASN bounds
BGP_ASN_MIN = 1
BGP_ASN_MAX = 2**32 - 1
#
# VRFs
#
# Per RFC 4364 section 4.2, a route distinguisher may be encoded as one of the following:
# * Type 0 (16-bit AS number : 32-bit integer)
# * Type 1 (32-bit IPv4 address : 16-bit integer)
# * Type 2 (32-bit AS number : 16-bit integer)
# 21 characters are sufficient to convey the longest possible string value (255.255.255.255:65535)
# Also used for RouteTargets
VRF_RD_MAX_LENGTH = 21
#
# Prefixes
#
PREFIX_LENGTH_MIN = 1
PREFIX_LENGTH_MAX = 127 # IPv6
#
# IPAddresses
#
IPADDRESS_ASSIGNMENT_MODELS = Q(
Q(app_label='dcim', model='interface') |
Q(app_label='ipam', model='fhrpgroup') |
Q(app_label='virtualization', model='vminterface')
)
IPADDRESS_MASK_LENGTH_MIN = 1
IPADDRESS_MASK_LENGTH_MAX = 128 # IPv6
IPADDRESS_ROLES_NONUNIQUE = (
# IPAddress roles which are exempt from unique address enforcement
IPAddressRoleChoices.ROLE_ANYCAST,
IPAddressRoleChoices.ROLE_VIP,
IPAddressRoleChoices.ROLE_VRRP,
IPAddressRoleChoices.ROLE_HSRP,
IPAddressRoleChoices.ROLE_GLBP,
IPAddressRoleChoices.ROLE_CARP,
)
#
# FHRP groups
#
FHRPGROUPASSIGNMENT_PRIORITY_MIN = 0
FHRPGROUPASSIGNMENT_PRIORITY_MAX = 255
FHRP_PROTOCOL_ROLE_MAPPINGS = {
FHRPGroupProtocolChoices.PROTOCOL_VRRP2: IPAddressRoleChoices.ROLE_VRRP,
FHRPGroupProtocolChoices.PROTOCOL_VRRP3: IPAddressRoleChoices.ROLE_VRRP,
FHRPGroupProtocolChoices.PROTOCOL_HSRP: IPAddressRoleChoices.ROLE_HSRP,
FHRPGroupProtocolChoices.PROTOCOL_GLBP: IPAddressRoleChoices.ROLE_GLBP,
FHRPGroupProtocolChoices.PROTOCOL_CARP: IPAddressRoleChoices.ROLE_CARP,
FHRPGroupProtocolChoices.PROTOCOL_OTHER: IPAddressRoleChoices.ROLE_VIP,
}
#
# VLANs
#
# 12-bit VLAN ID (values 0 and 4095 are reserved)
VLAN_VID_MIN = 1
VLAN_VID_MAX = 4094
# models values for ContentTypes which may be VLANGroup scope types
VLANGROUP_SCOPE_TYPES = (
'region', 'sitegroup', 'site', 'location', 'rack', 'clustergroup', 'cluster',
)
#
# Services
#
# 16-bit port number
SERVICE_PORT_MIN = 1
SERVICE_PORT_MAX = 65535
|
{
"content_hash": "ac93a29ce88a4a2cabe4fd3b53173601",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 98,
"avg_line_length": 23.75,
"alnum_prop": 0.7386727688787186,
"repo_name": "digitalocean/netbox",
"id": "ab88dfc1a963636f548c15e667ea8491a6833362",
"size": "2185",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/ipam/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815170"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
}
|
import hashlib
import hmac
import sys
if sys.version_info[0] == 2:
text_type = unicode
else:
text_type = str
class Crypto:
@staticmethod
def sha1_hmac_hash(secret_key, content):
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('ascii')
if isinstance(content, text_type):
content = content.encode('ascii')
return hmac.new(hashlib.sha1(secret_key).digest(), content, hashlib.sha1).hexdigest()
@staticmethod
def sha256_hmac_hash(secret_key, content):
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('ascii')
if isinstance(content, text_type):
content = content.encode('ascii')
return hmac.new(hashlib.sha256(secret_key).digest(), content, hashlib.sha256).hexdigest()
@staticmethod
def secure_compare(left, right):
if left == None or right == None:
return False
left_bytes = [ord(char) for char in left]
right_bytes = [ord(char) for char in right]
if len(left_bytes) != len(right_bytes):
return False
result = 0
for left_byte, right_byte in zip(left_bytes, right_bytes):
result |= left_byte ^ right_byte
return result == 0
|
{
"content_hash": "1cb81c20c31cf3430468ab60718a82a9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 97,
"avg_line_length": 31.390243902439025,
"alnum_prop": 0.6130536130536131,
"repo_name": "mapleoin/braintree_python",
"id": "39672e399d86aa3e23be90f20a1ba51a45405775",
"size": "1287",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "braintree/util/crypto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "763249"
},
{
"name": "Ruby",
"bytes": "588"
},
{
"name": "Shell",
"bytes": "232"
}
],
"symlink_target": ""
}
|
import logging
logger = logging.getLogger(__name__)
class SchedulerPlugin(object):
""" Interface to extend the Scheduler
The scheduler operates by triggering and responding to events like
``task_finished``, ``update_graph``, ``task_erred``, etc..
A plugin enables custom code to run at each of those same events. The
scheduler will run the analogous methods on this class when each event is
triggered. This runs user code within the scheduler thread that can
perform arbitrary operations in synchrony with the scheduler itself.
Plugins are often used for diagnostics and measurement, but have full
access to the scheduler and could in principle affect core scheduling.
To implement a plugin implement some of the methods of this class and add
the plugin to the scheduler with ``Scheduler.add_plugin(myplugin)``.
Examples
--------
>>> class Counter(SchedulerPlugin):
... def __init__(self):
... self.counter = 0
...
... def transition(self, key, start, finish, *args, **kwargs):
... if start == 'processing' and finish == 'memory':
... self.counter += 1
...
... def restart(self, scheduler):
... self.counter = 0
>>> c = Counter()
>>> scheduler.add_plugin(c) # doctest: +SKIP
"""
def update_graph(self, scheduler, dsk=None, keys=None, restrictions=None, **kwargs):
""" Run when a new graph / tasks enter the scheduler """
def restart(self, scheduler, **kwargs):
""" Run when the scheduler restarts itself """
def transition(self, key, start, finish, *args, **kwargs):
""" Run whenever a task changes state
Parameters
----------
key: string
start: string
Start state of the transition.
One of released, waiting, processing, memory, error.
finish: string
Final state of the transition.
*args, **kwargs: More options passed when transitioning
This may include worker ID, compute time, etc.
"""
def add_worker(self, scheduler=None, worker=None, **kwargs):
""" Run when a new worker enters the cluster """
def remove_worker(self, scheduler=None, worker=None, **kwargs):
""" Run when a worker leaves the cluster"""
|
{
"content_hash": "1063d36eddc08bc38cb18f17ea269520",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 88,
"avg_line_length": 36.03076923076923,
"alnum_prop": 0.625533731853117,
"repo_name": "mrocklin/distributed",
"id": "cfe5fa42b4960d5a53d9e1c503e1078515dbedd5",
"size": "2342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributed/diagnostics/plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5161"
},
{
"name": "CSS",
"bytes": "2993"
},
{
"name": "CoffeeScript",
"bytes": "1093"
},
{
"name": "HTML",
"bytes": "12419"
},
{
"name": "Python",
"bytes": "1957174"
},
{
"name": "Shell",
"bytes": "3011"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.models import Group, User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.utils.timezone import now
from django.utils.encoding import iri_to_uri
from django.utils.safestring import mark_safe
from django.views.decorators.cache import cache_control, never_cache
import waffle
from django_statsd.clients import statsd
from mozilla_django_oidc.auth import default_username_algo
from product_details import product_details
import forms
from remo.base.decorators import permission_check
from remo.base.templatetags.helpers import urlparams
from remo.events.utils import get_events_for_user
from remo.profiles.models import UserProfile, UserStatus
from remo.profiles.models import FunctionalArea
from remo.voting.tasks import rotm_nomination_end_date
USERNAME_ALGO = getattr(settings, 'OIDC_USERNAME_ALGO', default_username_algo)
@never_cache
@user_passes_test(lambda u: u.groups.filter(Q(name='Rep') | Q(name='Admin')),
login_url=settings.LOGIN_REDIRECT_URL)
@permission_check(permissions=['profiles.can_edit_profiles'],
filter_field='display_name', owner_field='user',
model=UserProfile)
def edit(request, display_name):
"""Edit user profile.
Permission to edit user profile is granted to the user who owns
the profile and all the users with permissions to edit profiles.
Argument display_name should be lowered before queries because we
allow case-insensitive profile urls. E.g. both /u/Giorgos and
/u/giorgos are the same person.
"""
def profile_date_form_validation(form):
"""Convenience function to only validate datejoinedform when
user has permissions.
"""
if request.user.has_perm('profiles.can_edit_profiles'):
if form.is_valid():
return True
return False
return True
user = get_object_or_404(User,
userprofile__display_name__iexact=display_name)
userform = forms.ChangeUserForm(request.POST or None, instance=user)
profileform = forms.ChangeProfileForm(request.POST or None,
instance=user.userprofile,
request=request)
profile_date_form = forms.ChangeDatesForm(request.POST or None,
instance=user.userprofile)
if (userform.is_valid() and profileform.is_valid() and
profile_date_form_validation(profile_date_form)):
userform.save()
profileform.save()
if request.user.has_perm('profiles.can_edit_profiles'):
# Update groups.
groups = {'Mentor': 'mentor_group',
'Admin': 'admin_group',
'Council': 'council_group',
'Rep': 'rep_group',
'Alumni': 'alumni_group',
'Review': 'review_group',
'Peers': 'peers_group'}
for group_db, group_html in groups.items():
if Group.objects.filter(name=group_db).exists():
if request.POST.get(group_html, None):
user.groups.add(Group.objects.get(name=group_db))
else:
user.groups.remove(Group.objects.get(name=group_db))
# Update date fields
profile_date_form.save()
messages.success(request, 'Profile successfully edited.')
statsd.incr('profiles.edit_profile')
if request.user == user:
return redirect('profiles_view_my_profile')
else:
redirect_url = reverse('profiles_view_profile',
kwargs={'display_name':
user.userprofile.display_name})
return redirect(redirect_url)
else:
# If forms are not valid and the fields are dirty, get a fresh copy
# of the object.
# This is needed when an invalid display_name is used.
# Django tries to resolve the url based on this display_name, which
# results in a NoReverseMatch error. See also bug:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1147541
user = User.objects.get(pk=user.id)
group_bits = map(lambda x: user.groups.filter(name=x).exists(),
['Admin', 'Council', 'Mentor', 'Rep', 'Alumni', 'Review', 'Peers'])
functional_areas = map(int, profileform['functional_areas'].value())
user_is_alumni = user.groups.filter(name='Alumni').exists()
return render(request, 'profiles_edit.jinja',
{'userform': userform,
'profileform': profileform,
'profile_date_form': profile_date_form,
'pageuser': user,
'group_bits': group_bits,
'range_years': range(1950, now().date().year - 11),
'functional_areas': functional_areas,
'user_is_alumni': user_is_alumni})
def redirect_list_profiles(request):
profiles_url = reverse('profiles_list_profiles')
extra_path = iri_to_uri('/' + request.path_info[len(profiles_url):])
return redirect(urlparams(profiles_url, hash=extra_path), permanent=True)
@cache_control(private=True)
def list_profiles(request):
"""List users in Rep Group."""
countries = product_details.get_regions('en').values()
countries.sort()
reps = (User.objects
.filter(userprofile__registration_complete=True,
groups__name='Rep')
.order_by('userprofile__country', 'last_name', 'first_name'))
return render(request, 'profiles_people.jinja',
{'countries': countries,
'reps': reps,
'areas': FunctionalArea.objects.all()})
@cache_control(private=True, max_age=60 * 5)
def view_profile(request, display_name):
"""View user profile."""
user = get_object_or_404(User,
userprofile__display_name__iexact=display_name)
user_is_alumni = user.groups.filter(name='Alumni').exists()
if not user.groups.filter(Q(name='Rep') | Q(name='Alumni')).exists():
raise Http404
if (not user.userprofile.registration_complete and
not request.user.has_perm('profiles.can_edit_profiles')):
raise Http404
nominee_form = forms.RotmNomineeForm(request.POST or None,
instance=user.userprofile)
usergroups = user.groups.filter(Q(name='Mentor') | Q(name='Council'))
is_nomination_period = now().date() < rotm_nomination_end_date()
data = {'pageuser': user,
'user_profile': user.userprofile,
'added_by': user.userprofile.added_by,
'mentor': user.userprofile.mentor,
'usergroups': usergroups,
'user_nominated': user.userprofile.is_rotm_nominee,
'is_nomination_period': is_nomination_period,
'user_is_alumni': user_is_alumni}
if UserStatus.objects.filter(user=user, is_unavailable=True).exists():
status = UserStatus.objects.filter(user=user).latest('created_on')
data['user_status'] = status
if user == request.user:
today = now().date()
date = (status.expected_date.strftime('%d %B %Y')
if status.expected_date > today else None)
msg = render_to_string(
'includes/view_profile_unavailable_msg.jinja',
{'date': date,
'display_name': user.userprofile.display_name})
messages.info(request, mark_safe(msg))
if nominee_form.is_valid():
if ((is_nomination_period or waffle.switch_is_active('enable_rotm_tasks')) and
request.user.groups.filter(name='Mentor').exists() and request.user != user):
nominee_form.save(nominated_by=request.user)
return redirect('profiles_view_profile', display_name=display_name)
messages.warning(request, ('Only mentors can nominate a mentee.'))
if user_is_alumni:
msg = render_to_string('includes/alumni_msg.jinja')
messages.info(request, mark_safe(msg))
today = now().date()
# NGReports
data['ng_reports'] = (user.ng_reports
.filter(report_date__lte=today)
.order_by('-report_date'))
past_user_events = get_events_for_user(user, to_date=today)
data['future_events'] = get_events_for_user(user, from_date=today)
data['past_events'] = past_user_events.reverse()[:10]
data['featured_rep'] = user.featuredrep_users.all()
data['request_user'] = request.user
data['nominee_form'] = nominee_form
return render(request, 'profiles_view.jinja', data)
@permission_check()
def view_my_profile(request):
"""View logged-in user profile."""
return view_profile(request,
display_name=request.user.userprofile.display_name)
@cache_control(private=True, no_cache=True)
@permission_check(permissions=['profiles.create_user'])
def invite(request):
"""Invite a user."""
form = forms.InviteUserForm(request.POST or None)
if form.is_valid():
email = form.cleaned_data['email']
user = User.objects.create_user(username=USERNAME_ALGO(email),
email=email)
# Add new users to Rep group
user.groups.add(Group.objects.get(name='Rep'))
if request.user.groups.filter(name='Mentor').exists():
user.userprofile.mentor = request.user
user.userprofile.added_by = request.user
user.userprofile.save()
messages.success(request, ('User was successfully invited, '
'now shoot some mails!'))
return redirect('profiles_invite')
return render(request, 'profiles_invite.jinja', {'form': form})
@permission_check(permissions=['profiles.can_delete_profiles'])
def delete_user(request, display_name):
"""Delete a user."""
user = get_object_or_404(User, userprofile__display_name=display_name)
if request.method == 'POST':
user.delete()
messages.success(request, 'User was deleted.')
statsd.incr('profiles.delete_profile')
return redirect('main')
@cache_control(private=True)
def list_alumni(request):
"""List users in Alumni Group."""
query = User.objects.filter(groups__name='Alumni')
alumni_paginator = Paginator(query, settings.ITEMS_PER_PAGE)
alumni_page = request.GET.get('page', 1)
try:
objects = alumni_paginator.page(alumni_page)
except PageNotAnInteger:
objects = alumni_paginator.page(1)
except EmptyPage:
objects = alumni_paginator.page(alumni_paginator.num_pages)
return render(request, 'profiles_list_alumni.jinja', {'objects': objects})
|
{
"content_hash": "881abde38c210e339c678bf58f1f4141",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 93,
"avg_line_length": 39.46853146853147,
"alnum_prop": 0.6211020552799433,
"repo_name": "flamingspaz/remo",
"id": "8ecabb5dc0a62487c2cfa3fe35f51f88bf754261",
"size": "11288",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "remo/profiles/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "993"
},
{
"name": "CSS",
"bytes": "316399"
},
{
"name": "HTML",
"bytes": "334699"
},
{
"name": "JavaScript",
"bytes": "593637"
},
{
"name": "Python",
"bytes": "748350"
},
{
"name": "Shell",
"bytes": "638"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
}
|
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
# changes to tokenize more like Posix shells by Vinay Sajip, July 2016.
import os
import re
import sys
from collections import deque
from io import StringIO
__all__ = ["shlex", "split", "quote", "join"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False,
punctuation_chars=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if not punctuation_chars:
punctuation_chars = ''
elif punctuation_chars is True:
punctuation_chars = '();<>|&'
self._punctuation_chars = punctuation_chars
if punctuation_chars:
# _pushback_chars is a push back queue used by lookahead logic
self._pushback_chars = deque()
# these chars added because allowed in file names, args, wildcards
self.wordchars += '~-./*?='
#remove any punctuation chars from wordchars
t = self.wordchars.maketrans(dict.fromkeys(punctuation_chars))
self.wordchars = self.wordchars.translate(t)
@property
def punctuation_chars(self):
return self._punctuation_chars
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print("shlex: pushing token " + repr(tok))
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('shlex: pushing to file %s' % (self.infile,))
else:
print('shlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('shlex: popping to %s, line %d' \
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print("shlex: popping token " + repr(tok))
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print("shlex: token=" + repr(raw))
else:
print("shlex: token=EOF")
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
if self.punctuation_chars and self._pushback_chars:
nextchar = self._pushback_chars.pop()
else:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno += 1
if self.debug >= 3:
print("shlex: in state %r I see character: %r" % (self.state,
nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno += 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.punctuation_chars:
self.token = nextchar
self.state = 'c'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token += nextchar
self.state = ' '
break
else:
self.state = 'a'
elif (self.posix and nextchar in self.escape and self.state
in self.escapedquotes):
escapedstate = self.state
self.state = nextchar
else:
self.token += nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if (escapedstate in self.quotes and
nextchar != self.state and nextchar != escapedstate):
self.token += self.state
self.token += nextchar
self.state = escapedstate
elif self.state in ('a', 'c'):
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno += 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state == 'c':
if nextchar in self.punctuation_chars:
self.token += nextchar
else:
if nextchar not in self.whitespace:
self._pushback_chars.append(nextchar)
self.state = ' '
break
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif (nextchar in self.wordchars or nextchar in self.quotes
or (self.whitespace_split and
nextchar not in self.punctuation_chars)):
self.token += nextchar
else:
if self.punctuation_chars:
self._pushback_chars.append(nextchar)
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print("shlex: I see punctuation in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print("shlex: raw token=" + repr(result))
else:
print("shlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, str) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
def join(split_command):
"""Return a shell-escaped string from *split_command*."""
return ' '.join(quote(arg) for arg in split_command)
_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.ASCII).search
def quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _print_tokens(lexer):
while 1:
tt = lexer.get_token()
if not tt:
break
print("Token: " + repr(tt))
if __name__ == '__main__':
if len(sys.argv) == 1:
_print_tokens(shlex())
else:
fn = sys.argv[1]
with open(fn) as f:
_print_tokens(shlex(f, fn))
|
{
"content_hash": "479e4f299c437484ab4122d53ffc96b5",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 78,
"avg_line_length": 38.28115942028985,
"alnum_prop": 0.4945862042856061,
"repo_name": "batermj/algorithm-challenger",
"id": "ae0f5ddec18716facde32ff586a141a5a7767a2f",
"size": "13269",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/shlex.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.comment'
db.add_column('products_order', 'comment',
self.gf('django.db.models.fields.CharField')(blank=True, max_length=255, default=''),
keep_default=False)
# Adding field 'Order.invoice_address'
db.add_column('products_order', 'invoice_address',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='invoices', null=True, to=orm['contacts.Address']),
keep_default=False)
# Changing field 'Order.delivery_address'
db.alter_column('products_order', 'delivery_address_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['contacts.Address']))
def backwards(self, orm):
# Deleting field 'Order.comment'
db.delete_column('products_order', 'comment')
# Deleting field 'Order.invoice_address'
db.delete_column('products_order', 'invoice_address_id')
# Changing field 'Order.delivery_address'
db.alter_column('products_order', 'delivery_address_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['contacts.Address']))
models = {
'categories.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True', 'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'contacts.address': {
'Meta': {'object_name': 'Address'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts.Contact']"}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'contacts.contact': {
'Meta': {'object_name': 'Contact'},
'addressed_as': ('django.db.models.fields.CharField', [], {'max_length': '100', 'default': "'calculated'"}),
'addressed_as_custom': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '255', 'default': "''"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'symmetrical': 'False', 'null': 'True', 'to': "orm['categories.Category']"}),
'company': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'default': "''"}),
'company_or_individual': ('django.db.models.fields.CharField', [], {'max_length': '10', 'default': "'individual'"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'department': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_artist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_title': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'default': "''"}),
'main_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'main_address'", 'null': 'True', 'to': "orm['contacts.Address']"}),
'main_phonenumber': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'main_phonenumber'", 'null': 'True', 'to': "orm['contacts.PhoneNumber']"}),
'migration_id': ('django.db.models.fields.IntegerField', [], {'blank': 'True', 'null': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name_first': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'default': "''"}),
'name_last': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'default': "''"}),
'name_middle': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'default': "''"}),
'reference': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '255', 'default': "''"}),
'suffix': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'default': "''"}),
'title': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100', 'default': "''"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts.ContactType']"})
},
'contacts.contacttype': {
'Meta': {'object_name': 'ContactType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'contacts.phonenumber': {
'Meta': {'object_name': 'PhoneNumber'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts.Contact']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'products.order': {
'Meta': {'object_name': 'Order'},
'comment': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '255', 'default': "''"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts.Contact']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'delivery_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'default': 'None', 'related_name': "'deliveries'", 'null': 'True', 'to': "orm['contacts.Address']"}),
'delivery_note': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_address': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invoices'", 'null': 'True', 'to': "orm['contacts.Address']"}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'products.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'item_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['products.Order']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['products.Product']"}),
'qty': ('django.db.models.fields.IntegerField', [], {})
},
'products.product': {
'Meta': {'object_name': 'Product'},
'colour': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'has_colour': ('django.db.models.fields.BooleanField', [], {}),
'has_size': ('django.db.models.fields.BooleanField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initials_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'item_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.DecimalField', [], {'blank': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'stock_quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['products']
|
{
"content_hash": "59a1a980165ce384207646dc23ffd786",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 202,
"avg_line_length": 72.368,
"alnum_prop": 0.5594738005748398,
"repo_name": "drummonds/galleria",
"id": "41dc5f8aee68ddfefcff90d52b725be8371b6a71",
"size": "9070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galleria/products/migrations/0004_auto__add_field_order_comment__add_field_order_invoice_address__chg_fi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "34"
},
{
"name": "Python",
"bytes": "212645"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
class Plant(models.Model):
name = models.CharField(max_length=100)
# Humidity. LOW, MODERATE, HIGH
ideal_humidity = models.CharField(max_length=100, choices=[('LOW', 'LOW'), ('MODERATE', 'MODERATE'), ('HIGH', 'HIGH')])
# How much sun the plant prefers. Not required.
sun_preference = models.CharField(max_length=100, blank=True, help_text="How much sun the plant prefers",
choices=[('MINIMAL', 'MINIMAL'), ('FULL', 'FULL')])
# How much shade the plant can tolerate. Not required.
shade_tolerance = models.CharField(max_length=100, blank=True, help_text="How much shade the plant can tolerate",
choices=[('NONE', 'NONE'), ('LIGHT', 'LIGHT'), ('PERMANENT', 'PERMANENT')])
# How often the plant needs fertilizer. In days. Not required
fertilizing_interval = models.IntegerField(default=0, blank=True,
help_text="How often the plant needs fertilizer (in days)")
# Not used for now
ideal_ph_min = models.DecimalField(max_digits=3, decimal_places=2, default=-1, blank=True,
help_text="Minimum ideal pH value")
ideal_ph_max = models.DecimalField(max_digits=3, decimal_places=2, default=-1, blank=True,
help_text="Maximum ideal pH value")
def __str__(self):
return self.name
class PlantInstance(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
plant_type = models.ForeignKey(Plant)
sensor_offset_max = models.IntegerField(help_text="Humidity sensor offset for calibration (max)", default=0)
sensor_offset_min = models.IntegerField(help_text="Humidity sensor offset for calibration (min)", default=0)
pin_number = models.IntegerField(help_text="Pin number of the humidity sensor", unique=True)
def __str__(self):
return str(self.plant_type.name)
class MoistureLog(models.Model):
date = models.DateTimeField(auto_now_add=True)
moisture_level = models.IntegerField(default=0)
plant_instance = models.ForeignKey(PlantInstance, on_delete=models.CASCADE)
def __str__(self):
return str(self.date)
class WateringLog(models.Model):
date = models.DateTimeField(auto_now_add=True)
num_seconds_open = models.IntegerField()
plant_instance = models.ForeignKey(PlantInstance, on_delete=models.CASCADE)
def __str__(self):
return str(self.date)
|
{
"content_hash": "e2a7f0deceefe5fb8ed4a27ae06ad647",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 123,
"avg_line_length": 43.54237288135593,
"alnum_prop": 0.6488906189178669,
"repo_name": "mgnusl/herb-pi",
"id": "28a8e12bd71f6fd567b966b536c2bb13a06828f6",
"size": "2569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plants/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8566"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "34959"
}
],
"symlink_target": ""
}
|
"""Module to re-distribute conversion-values of no-consent customers."""
import logging
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from scipy import sparse
from scipy import special
from sklearn import neighbors
class NearestCustomerMatcher:
"""Class to find nearest neighbors and distribute conversion value.
When we have a dataset of customers that gave consent to cookie-tracking, and
customers that did not give consent, we want to ensure that the total
conversion values (e.g. value of a purchase) across all customers are
accessible to SmartBidding.
The NearestCustomerMatcher finds the most similar customers among the
consenting customers to each of the no-consent customers, and distributes
the conversion values of any no-consent customer across the matches in the
set of consenting customers, in proportion to their distance.
Similarity is defined as the distance between customers in their feature-
space, for instance based on adgroup-levels. Which distance-metric to
choose is up to the user.
The more similar a consenting customer is to a given no-consent
customer, the larger the share of the no-consent customer's conversion-
value that will be added to the consenting customer's conversion value.
"""
def __init__(self,
data_consent: pd.DataFrame,
conversion_column: str,
id_columns: List[Union[str, int]],
metric: str = "manhattan",
neighbor: Callable[..., Any] = neighbors.NearestNeighbors):
"""Initialises class.
Args:
data_consent: Dataframe of consented customers (preprocessed).
conversion_column: Name of column in dataframe of conversion-value.
id_columns: Names of columns that identify customers. Usually GCLID and
timestamp.
metric: Distance metric to use when finding nearest neighbors.
neighbor: sklearn NearestNeighbor object.
Raises:
ValueError: if the conversion values contain NaNs or Nones, or if
conversion values < 0.
"""
# TODO() Test behaviour under different distance metrics.
self._neighbor = neighbor(metric=metric, algorithm="auto")
self._columns_consent = data_consent.drop(id_columns, axis=1).columns
self._data_consent = data_consent[id_columns + [conversion_column]]
features_consent = data_consent.drop(id_columns + [conversion_column],
axis=1).astype(np.float16).values
self._features_consent = sparse.csr_matrix(features_consent)
self._conversion_column = conversion_column
self._consent_id = data_consent[id_columns]
self._id_columns = id_columns
if any(self._data_consent[self._conversion_column].isna()):
raise ValueError("The conversion column must not contain NaNs/Nones.")
if any(self._data_consent[self._conversion_column] <= 0):
raise ValueError("The conversion values must be larger than zero.")
self._neighbor = self._neighbor.fit(self._features_consent)
# These attributes will be populated with data later.
self._data_noconsent = None
self._data_noconsent_match = None
self._data_noconsent_nomatch = None
@property
def total_non_matched_conversion_value(self) -> float:
return self._data_noconsent_nomatch[self._conversion_column].sum()
@property
def total_matched_conversion_value(self) -> float:
return self._data_noconsent_match[self._conversion_column].sum()
@property
def percentage_matched_conversion_value(self) -> float:
return (self.total_matched_conversion_value /
(self.total_non_matched_conversion_value +
self.total_matched_conversion_value)) * 100
@property
def number_non_matched_conversions(self) -> int:
return len(self._data_noconsent_nomatch)
@property
def number_matched_conversions(self) -> int:
return len(self._data_noconsent_match)
@property
def percentage_matched_conversions(self) -> float:
return self.number_matched_conversions / len(self._data_noconsent) * 100
@property
def distance_statistics(self):
return self._data_adjusted["average_distance"].describe()
@property
def nearest_distances_statistics_nonconsenting(self):
return self._data_noconsent_match["distance_to_nearest_neighbor"].describe(
percentiles=[.25, .5, .75, .9, .95, .99])
@property
def summary_statistics_matched_conversions(self):
return pd.DataFrame(
{
"percentage_matched_conversion_value":
self.percentage_matched_conversion_value,
"percentage_matched_conversions":
self.percentage_matched_conversions,
"number_matched_conversions":
self.number_matched_conversions,
"total_matched_conversion_value":
self.total_matched_conversion_value
},
index=["summary_statistics_matched_conversions"])
def min_radius_by_percentile(self, percentile: float = .95) -> float:
radius = self._data_noconsent_match[
"distance_to_nearest_neighbor"].quantile(percentile)
return radius
def _get_proportional_number_nearest_neighbors(
self, number_nearest_neighbors: float) -> int:
return int(number_nearest_neighbors * len(self._data_consent))
def _fit_neighbor(self):
self._neighbor.fit(self._features_consent)
self._fitted = True
def _get_neighbors_within_radius(
self, data_noconsent: pd.DataFrame, radius: float
) -> Tuple[Sequence[np.ndarray], Sequence[np.ndarray], Sequence[bool]]:
"""Gets neighbors within specified radius.
Args:
data_noconsent: Data of no-consent customers.
radius: Radius within which nearest neighbors are found.
Returns:
neighbors_index: Array of indices-arrays of neighboring points.
neighbors_distances: Array of distance-arrays to neighboring points.
has_neighbors_array: Array of booleans indicating whether a given non-
consenting customer had at least one neighbor or not. Takes advantage
of numpy's functionality, e.g.:
(np.array([0,1,2]) > 0)
>>> array([False, True, True])
"""
neighbors_distance, neighbors_index = self._neighbor.radius_neighbors(
data_noconsent.drop([self._conversion_column], axis=1),
radius=radius,
return_distance=True,
)
has_neighbors_array = np.array(
[len(neighbors) for neighbors in neighbors_index]) > 0
if not any(has_neighbors_array):
logging.warning("No matching customers within radius %d.", radius)
neighbors_index = neighbors_index[has_neighbors_array]
neighbors_distance = neighbors_distance[has_neighbors_array]
return neighbors_index, neighbors_distance, has_neighbors_array
def _get_n_nearest_neighbors(
self, data_noconsent: pd.DataFrame, number_nearest_neighbors: float
) -> Tuple[Sequence[np.ndarray], Sequence[np.ndarray], Sequence[bool]]:
"""Gets n nearest neighbors.
Args:
data_noconsent: Data of no-consent customers.
number_nearest_neighbors: Number of neighbors to return. If <1,
number_nearest_neighbors is calculated as the proportion in the set of
consenting customers.
Returns:
neighbors_index: Array of indices-arrays of neighboring points.
neighbors_distances: Array of distance-arrays to neighboring points.
has_neighbors_array: Array of booleans indicating whether a given non-
consenting customer had at least one neighbor or not. Takes advantage
of numpy's functionality, e.g.:
(np.array([0,1,2]) > 0)
>>> array([False, True, True])
Raises:
ValueError: if the actual number of nearest neighbors is not
`number_nearest_neighbors`.
"""
if number_nearest_neighbors < 1:
number_nearest_neighbors = (
self._get_proportional_number_nearest_neighbors(
number_nearest_neighbors))
neighbors_distance, neighbors_index = self._neighbor.kneighbors(
data_noconsent.drop([self._conversion_column], axis=1),
n_neighbors=number_nearest_neighbors,
return_distance=True)
has_neighbors_array = np.array(
[len(neighbors) for neighbors in neighbors_index]) > 0
if np.shape(neighbors_distance)[1] != number_nearest_neighbors:
raise ValueError(
f"Returned number of neighbors is not {number_nearest_neighbors}.")
return neighbors_index, neighbors_distance, has_neighbors_array
def _get_nearest_neighbors(
self,
data_noconsent: pd.DataFrame,
radius: Optional[float] = None,
number_nearest_neighbors: Optional[float] = None
) -> Tuple[Sequence[np.ndarray], Sequence[np.ndarray], Sequence[bool]]:
"""Get indices and distances to nearest neighbors.
Finds nearest neighbors based on radius or number_nearest_neighbors for each
entry in data_noconsent. If nearest neighbors are defined
via radius, entries in data_noconsent without sufficiently close
neighbor are removed.
Args:
data_noconsent: Data of no-consent customers.
radius: Radius within which neighbors have to lie.
number_nearest_neighbors: Defines the number (or proportion) of nearest
neighbors. If smaller 1, number_nearest_neighbors is calculated as the
proportion of the number of consenting customers.
Returns:
A 3-tuple with:
Array of indices-arrays of nearest neighbors in data_consent.
Array of distances-arrays of nearest neigbors in data_consent.
Array of booleans indicating whether a given no-consent customer
had at least one neighbor or not.
Raises:
ValueError: if not exactly one of radius or number_nearest_neighbors are
provided.
"""
has_radius = radius is not None
has_number_nearest_neighbors = number_nearest_neighbors is not None
if has_radius == has_number_nearest_neighbors:
raise ValueError("Exactly one of radius or number_nearest_neighbors has ",
"to be provided.")
if has_radius:
return self._get_neighbors_within_radius(data_noconsent, radius)
elif has_number_nearest_neighbors:
return self._get_n_nearest_neighbors(data_noconsent,
number_nearest_neighbors)
def _assert_all_columns_match_and_conversions_are_valid(self, data_noconsent):
"""Checks that all consenting and no-consent data match and are valid.
Args:
data_noconsent: Data of no-consent customers.
Raises:
ValueError: if columns of consenting and no-consent data don't match,
the conversion values contain NaNs/Nones or if conversion values <0.
"""
if not all(self._columns_consent == data_noconsent.columns) or (len(
self._columns_consent) != len(data_noconsent.columns)):
raise ValueError(
"Consented and non-consented data must have same columns.")
for data in (data_noconsent, self._data_consent):
if any(data[self._conversion_column].isna()):
raise ValueError("The conversion column should not contain NaNs.")
if any(data[self._conversion_column] <= 0):
ValueError("The conversion values should be larger than zero.")
def get_indices_and_values_to_nearest_neighbors(
self,
data_noconsent: pd.DataFrame,
radius: Optional[float] = None,
number_nearest_neighbors: Optional[float] = None
) -> Tuple[Sequence[np.ndarray], Sequence[np.ndarray], Sequence[np.ndarray],
Sequence[np.ndarray], Sequence[bool]]:
"""Gets indices of nearest neighbours as well as the needed conversions.
Args:
data_noconsent: Data of no-consent customers.
radius: Radius within which neighbors have to lie.
number_nearest_neighbors: Defines the number (or proportion) of nearest
neighbors.
Returns:
neighbors_data_index: Arrays of indices to the nearest neighbors in the
consenting-customer data.
neighbors_distance: Arrays of distances to the nearest neighbors.
weighted_conversion_values: Conversion values of no-consent customers
weighted by their distance to each nearest neighbor.
weighted_distance: Weighted distances between no-consent and
consenting customers.
has_neighbor: Whether or not a given no-consent customer had a
nearest neighbor.
"""
data_noconsent = data_noconsent.drop(self._id_columns, axis=1)
self._assert_all_columns_match_and_conversions_are_valid(data_noconsent)
neighbors_index, neighbors_distance, has_neighbor = (
self._get_nearest_neighbors(data_noconsent, radius,
number_nearest_neighbors))
neighbors_data_index = [
self._data_consent.index[index] for index in neighbors_index
]
non_consent_conversion_values = data_noconsent[has_neighbor][
self._conversion_column].values
weighted_conversion_values, weighted_distance = (
_calculate_weighted_conversion_values(
non_consent_conversion_values,
neighbors_distance,
))
return (neighbors_data_index, neighbors_distance,
weighted_conversion_values, weighted_distance, has_neighbor)
def calculate_adjusted_conversions(
self,
data_noconsent: pd.DataFrame,
radius: Optional[float] = None,
number_nearest_neighbors: Optional[float] = None) -> pd.DataFrame:
"""Calculates adjusted conversions for identified nearest neighbors.
Finds nearest neighbors based on radius or number_nearest_neighbors for each
entry in data_noconsent. If nearest neighbors are defined via radius,
entries in data_noconsent without sufficiently close neighbor are ignored.
Conversion values of consenting customers that are identified as nearest
neighbor to a no-consent customer are adjusted by adding the weighted
proportional conversion value of the respective no-consent customer.
The weighted conversion value is calculated as the product of the conversion
value with the softmax over all neighbor-similarities.
Args:
data_noconsent: Data for no-consent customer(s). Needs to be pre-
processed and have the same columns as data_consent.
radius: Radius within which neighbors have to lie.
number_nearest_neighbors: Defines the number (or proportion) of nearest
neighbors.
Returns:
data_adjusted: Copy of data_consent including the modelled conversion
values.
"""
(neighbors_data_index, neighbors_distance, weighted_conversion_values,
weighted_distance,
has_neighbor) = self.get_indices_and_values_to_nearest_neighbors(
data_noconsent, radius, number_nearest_neighbors)
self._data_noconsent = data_noconsent.drop(self._id_columns, axis=1)
self._data_noconsent_nomatch = data_noconsent[np.invert(
has_neighbor)].copy()
self._data_noconsent_match = data_noconsent[has_neighbor].copy()
self._data_noconsent_match["distance_to_nearest_neighbor"] = [
min(distances) for distances in neighbors_distance
]
self._data_adjusted = _distribute_conversion_values(
self._data_consent, self._conversion_column,
self._data_noconsent_match[self._conversion_column].values,
weighted_conversion_values, neighbors_data_index, neighbors_distance,
weighted_distance)
return self._data_adjusted
def _calculate_weighted_conversion_values(
conversion_values: Sequence[np.ndarray],
neighbors_distance: Sequence[np.ndarray],
) -> Tuple[Sequence[np.float], Sequence[np.float]]:
"""Calculate weighted conversion values as function of distance.
The weighted conversion value is calculated as the product of the conversion
value with the softmax over all neighbor-similarities.
Args:
conversion_values: Array of conversion_values for non-consented customers.
neighbors_distance: Array of arrays of neighbor-distances.
Returns:
weighted_conversion_values: Array of weighted conversion_values per non-
consented customer.
softmax_similarity: Array of softmax similarities per non-consented
customer.
"""
if len(conversion_values) != len(neighbors_distance):
raise ValueError("All of conversion_values and neighbors_distance",
"must have the same length.")
if any((dist < 0).any() for dist in neighbors_distance):
raise ValueError("Distances should not contain negative values."
"Please review which distance metric you used.")
softmax_similarity = [
special.softmax(-distance) for distance in neighbors_distance
]
weighted_conversion_values = [
conversion_value * weight
for conversion_value, weight in zip(conversion_values, softmax_similarity)
]
return weighted_conversion_values, softmax_similarity
def _distribute_conversion_values(
data_consent: pd.DataFrame,
conversion_column: str,
non_consent_conversion_values: Sequence[float],
weighted_conversion_values: Sequence[np.ndarray],
neighbors_index: Sequence[np.ndarray],
neighbors_distance: Sequence[np.ndarray],
weighted_distance: Sequence[np.ndarray],
) -> pd.DataFrame:
"""Distribute conversion-values of no-consent over consenting customers.
Conversion values of consenting customers that are identified as nearest
neighbor to a no-consent customer are adjusted by adding the weighted
proportional conversion value of the respective no-consent customer.
Additionally, metrics like average distance to no-consent customers
and total number of added conversions are calculated.
Args:
data_consent: DataFrame of consented customers.
conversion_column: String indicating the conversion KPI in data_consent.
non_consent_conversion_values: Array of original conversion values.
weighted_conversion_values: Array of arrays of weighted conversion_values,
based on distance between consenting and no-consent customers.
neighbors_index: Array of arrays of neighbor-indices.
neighbors_distance: Array of arrays of neighbor-distances.
weighted_distance: Array of arrays of weighted neighbor-distances.
Returns:
data_adjusted: Copy of data_consent including the modelled conversion
values.
"""
data_adjusted = data_consent.copy()
data_adjusted["adjusted_conversion"] = 0
data_adjusted["average_distance"] = 0
data_adjusted["n_added_conversions"] = 0
data_adjusted["sum_distribution_weights"] = 0
for index, values, distance, weight in zip(neighbors_index,
weighted_conversion_values,
neighbors_distance,
weighted_distance):
data_adjusted.loc[index, "adjusted_conversion"] += values
data_adjusted.loc[index, "average_distance"] += distance
data_adjusted.loc[index, "sum_distribution_weights"] += weight
data_adjusted.loc[index, "n_added_conversions"] += 1
data_adjusted["average_distance"] = (data_adjusted["average_distance"] /
data_adjusted["n_added_conversions"])
naive_conversion_adjustments = np.sum(non_consent_conversion_values) / len(
data_consent)
data_adjusted["naive_adjusted_conversion"] = data_adjusted[
conversion_column] + naive_conversion_adjustments
return data_adjusted
def get_adjustments_and_summary_calculations(
matcher: NearestCustomerMatcher,
data_noconsent: pd.DataFrame,
number_nearest_neighbors: Optional[float] = None,
radius: Optional[float] = None,
percentile: Optional[float] = None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Calculates adjusted conversions for consenting customers.
Args:
matcher: Matcher object which has been fit to all of data_consent. It
provides the functionality to get the nearest neighbors for a given
no-consent customer.
data_noconsent: Dataframe of no-consent customers. Needs to have the
same columns as data_consent to calculate similarity between data points.
number_nearest_neighbors: Number of consenting customers to chose to match
to. If float, is taken as proportion of all customers.
radius: Radius to find matching consenting customers.
percentile: Percentile of matched no-consent customers based on which
radius is set.
Returns:
A two-tuple with:
- adjusted conversion values for new and old customers.
- summary statistics on matched conversions (% of counts,% of conversion
value).
Raises:
ValueError: if not exactly one of number_nearest_neighbors, radius,
or percentile is provided.
ValueError: if the provided percentile is not within the range of 0-1.
"""
has_number_nearest_neighbors = number_nearest_neighbors is not None
has_radius = radius is not None
has_percentile = percentile is not None
if (has_number_nearest_neighbors + has_radius + has_percentile) != 1:
raise ValueError("Exactly one of number_nearest_neighbors, radius,",
" or percentile has to be specified.")
if has_percentile and not 0 < percentile <= 1:
raise ValueError("The percentile has to be a value between 0 and 1.")
if number_nearest_neighbors or radius:
data_adjusted = matcher.calculate_adjusted_conversions(
data_noconsent=data_noconsent,
number_nearest_neighbors=number_nearest_neighbors,
radius=radius)
elif percentile:
matcher.calculate_adjusted_conversions(
data_noconsent=data_noconsent, number_nearest_neighbors=1)
radius = matcher.min_radius_by_percentile(percentile=percentile)
data_adjusted = matcher.calculate_adjusted_conversions(
data_noconsent=data_noconsent, radius=radius)
return data_adjusted, matcher.summary_statistics_matched_conversions
|
{
"content_hash": "d7b02f4b44462fe8fe074408369d3827",
"timestamp": "",
"source": "github",
"line_count": 507,
"max_line_length": 80,
"avg_line_length": 43.226824457593686,
"alnum_prop": 0.7027742288738821,
"repo_name": "google/consent-based-conversion-adjustments",
"id": "53f470099d08fecbd048fb5f6d4534ad4143cd20",
"size": "22492",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cocoa/nearest_consented_customers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "13686"
},
{
"name": "Python",
"bytes": "70143"
},
{
"name": "Shell",
"bytes": "1546"
}
],
"symlink_target": ""
}
|
import re
from collections import defaultdict
import pkg_resources
import sqlalchemy as sa
from sqlalchemy import Column, exc, inspect, schema
from sqlalchemy.dialects.postgresql.base import PGCompiler, PGDDLCompiler
from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import (
BinaryExpression, BooleanClauseList, Delete
)
from sqlalchemy.types import VARCHAR, NullType
from .commands import CopyCommand, UnloadFromSelect
from .compat import string_types
try:
from alembic.ddl import postgresql
except ImportError:
pass
else:
from alembic.ddl.base import RenameTable
compiles(RenameTable, 'redshift')(postgresql.visit_rename_table)
class RedshiftImpl(postgresql.PostgresqlImpl):
__dialect__ = 'redshift'
__all__ = ['CopyCommand', 'UnloadFromSelect', 'RedshiftDialect']
# Regex for parsing and identity constraint out of adsrc, e.g.:
# "identity"(445178, 0, '1,1'::text)
IDENTITY_RE = re.compile(r"""
"identity" \(
(?P<current>-?\d+)
,\s
(?P<base>-?\d+)
,\s
'(?P<seed>-?\d+),(?P<step>-?\d+)'
.*
\)
""", re.VERBOSE)
# Regex for SQL identifiers (valid table and column names)
SQL_IDENTIFIER_RE = re.compile(r"""
[_a-zA-Z][\w$]* # SQL standard identifier
| # or
(?:"[^"]+")+ # SQL delimited (quoted) identifier
""", re.VERBOSE)
# Regex for foreign key constraints, e.g.:
# FOREIGN KEY(col1) REFERENCES othertable (col2)
# See https://docs.aws.amazon.com/redshift/latest/dg/r_names.html
# for a definition of valid SQL identifiers.
FOREIGN_KEY_RE = re.compile(r"""
^FOREIGN\ KEY \s* \( # FOREIGN KEY, arbitrary whitespace, literal '('
(?P<columns> # Start a group to capture the referring columns
(?: # Start a non-capturing group
\s* # Arbitrary whitespace
([_a-zA-Z][\w$]* | ("[^"]+")+) # SQL identifier
\s* # Arbitrary whitespace
,? # There will be a colon if this isn't the last one
)+ # Close the non-capturing group; require at least one
) # Close the 'columns' group
\s* \) # Arbitrary whitespace and literal ')'
\s* REFERENCES \s*
((?P<referred_schema>([_a-zA-Z][\w$]* | ("[^"]*")+))\.)? # SQL identifier
(?P<referred_table>[_a-zA-Z][\w$]* | ("[^"]*")+) # SQL identifier
\s* \( # FOREIGN KEY, arbitrary whitespace, literal '('
(?P<referred_columns> # Start a group to capture the referring columns
(?: # Start a non-capturing group
\s* # Arbitrary whitespace
([_a-zA-Z][\w$]* | ("[^"]+")+) # SQL identifier
\s* # Arbitrary whitespace
,? # There will be a colon if this isn't the last one
)+ # Close the non-capturing group; require at least one
) # Close the 'columns' group
\s* \) # Arbitrary whitespace and literal ')'
""", re.VERBOSE)
# Regex for primary key constraints, e.g.:
# PRIMARY KEY (col1, col2)
PRIMARY_KEY_RE = re.compile(r"""
^PRIMARY \s* KEY \s* \( # FOREIGN KEY, arbitrary whitespace, literal '('
(?P<columns> # Start a group to capture column names
(?:
\s* # Arbitrary whitespace
# SQL identifier or delimited identifier
( [_a-zA-Z][\w$]* | ("[^"]*")+ )
\s* # Arbitrary whitespace
,? # There will be a colon if this isn't the last one
)+ # Close the non-capturing group; require at least one
)
\s* \) \s* # Arbitrary whitespace and literal ')'
""", re.VERBOSE)
def _get_relation_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
def _get_schema_and_relation(key):
if '.' not in key:
return (None, key)
identifiers = SQL_IDENTIFIER_RE.findall(key)
if len(identifiers) == 1:
return (None, key)
elif len(identifiers) == 2:
return identifiers
raise ValueError("%s does not look like a valid relation identifier")
def unquoted(key):
"""
Return *key* with one level of double quotes removed.
Redshift stores some identifiers without quotes in internal tables,
even though the name must be quoted elsewhere.
In particular, this happens for tables named as a keyword.
"""
if key.startswith('"') and key.endswith('"'):
return key[1:-1]
return key
class RedshiftCompiler(PGCompiler):
def visit_now_func(self, fn, **kw):
return "SYSDATE"
class RedshiftDDLCompiler(PGDDLCompiler):
"""
Handles Redshift-specific ``CREATE TABLE`` syntax.
Users can specify the `diststyle`, `distkey`, `sortkey` and `encode`
properties per table and per column.
Table level properties can be set using the dialect specific syntax. For
example, to specify a distribution key and style you apply the following:
>>> import sqlalchemy as sa
>>> from sqlalchemy.schema import CreateTable
>>> engine = sa.create_engine('redshift+psycopg2://example')
>>> metadata = sa.MetaData()
>>> user = sa.Table(
... 'user',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String),
... redshift_diststyle='KEY',
... redshift_distkey='id',
... redshift_interleaved_sortkey=['id', 'name'],
... )
>>> print(CreateTable(user).compile(engine))
<BLANKLINE>
CREATE TABLE "user" (
id INTEGER NOT NULL,
name VARCHAR,
PRIMARY KEY (id)
) DISTSTYLE KEY DISTKEY (id) INTERLEAVED SORTKEY (id, name)
<BLANKLINE>
<BLANKLINE>
A single sort key can be applied without a wrapping list:
>>> customer = sa.Table(
... 'customer',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String),
... redshift_sortkey='id',
... )
>>> print(CreateTable(customer).compile(engine))
<BLANKLINE>
CREATE TABLE customer (
id INTEGER NOT NULL,
name VARCHAR,
PRIMARY KEY (id)
) SORTKEY (id)
<BLANKLINE>
<BLANKLINE>
Column-level special syntax can also be applied using the column info
dictionary. For example, we can specify the ENCODE for a column:
>>> product = sa.Table(
... 'product',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String, info={'encode': 'lzo'})
... )
>>> print(CreateTable(product).compile(engine))
<BLANKLINE>
CREATE TABLE product (
id INTEGER NOT NULL,
name VARCHAR ENCODE lzo,
PRIMARY KEY (id)
)
<BLANKLINE>
<BLANKLINE>
We can also specify the distkey and sortkey options:
>>> sku = sa.Table(
... 'sku',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column(
... 'name', sa.String, info={'distkey': True, 'sortkey': True}
... )
... )
>>> print(CreateTable(sku).compile(engine))
<BLANKLINE>
CREATE TABLE sku (
id INTEGER NOT NULL,
name VARCHAR DISTKEY SORTKEY,
PRIMARY KEY (id)
)
<BLANKLINE>
<BLANKLINE>
"""
def post_create_table(self, table):
text = ""
info = table.dialect_options['redshift']
diststyle = info.get('diststyle')
if diststyle:
diststyle = diststyle.upper()
if diststyle not in ('EVEN', 'KEY', 'ALL'):
raise exc.CompileError(
u"diststyle {0} is invalid".format(diststyle)
)
text += " DISTSTYLE " + diststyle
distkey = info.get('distkey')
if distkey:
text += " DISTKEY ({0})".format(self.preparer.quote(distkey))
sortkey = info.get('sortkey')
interleaved_sortkey = info.get('interleaved_sortkey')
if sortkey and interleaved_sortkey:
raise exc.ArgumentError(
"Parameters sortkey and interleaved_sortkey are "
"mutually exclusive; you may not specify both."
)
if sortkey or interleaved_sortkey:
if isinstance(sortkey, string_types):
keys = [sortkey]
else:
keys = sortkey or interleaved_sortkey
keys = [key.name if isinstance(key, Column) else key
for key in keys]
if interleaved_sortkey:
text += " INTERLEAVED"
sortkey_string = ", ".join(self.preparer.quote(key)
for key in keys)
text += " SORTKEY ({0})".format(sortkey_string)
return text
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
colspec += " " + self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
# Identity constraints show up as *default* when reflected.
m = IDENTITY_RE.match(default)
if m:
colspec += " IDENTITY({seed},{step})".format(**m.groupdict())
else:
colspec += " DEFAULT " + default
colspec += self._fetch_redshift_column_attributes(column)
if not column.nullable:
colspec += " NOT NULL"
return colspec
def _fetch_redshift_column_attributes(self, column):
text = ""
if not hasattr(column, 'info'):
return text
info = column.info
identity = info.get('identity')
if identity:
text += " IDENTITY({0},{1})".format(identity[0], identity[1])
encode = info.get('encode')
if encode:
text += " ENCODE " + encode
distkey = info.get('distkey')
if distkey:
text += " DISTKEY"
sortkey = info.get('sortkey')
if sortkey:
text += " SORTKEY"
return text
class RedshiftDialect(PGDialect_psycopg2):
"""
Define Redshift-specific behavior.
Most public methods are overrides of the underlying interfaces defined in
:class:`~sqlalchemy.engine.interfaces.Dialect` and
:class:`~sqlalchemy.engine.Inspector`.
"""
name = 'redshift'
statement_compiler = RedshiftCompiler
ddl_compiler = RedshiftDDLCompiler
max_identifier_length = 127
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": {}
}),
(schema.Table, {
"ignore_search_path": False,
"diststyle": None,
"distkey": None,
"sortkey": None,
"interleaved_sortkey": None,
}),
]
def __init__(self, *args, **kw):
super(RedshiftDialect, self).__init__(*args, **kw)
# Cache domains, as these will be static;
# Redshift does not support user-created domains.
self._domains = None
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
"""
Return information about columns in `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_columns`.
"""
cols = self._get_redshift_columns(connection, table_name, schema, **kw)
if not self._domains:
self._domains = self._load_domains(connection)
domains = self._domains
columns = []
for col in cols:
column_info = self._get_column_info(
name=col.name, format_type=col.format_type,
default=col.default, notnull=col.notnull, domains=domains,
enums=[], schema=col.schema, encode=col.encode)
columns.append(column_info)
return columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""
Return information about the primary key constraint on `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_pk_constraint`.
"""
constraints = self._get_redshift_constraints(connection, table_name,
schema)
pk_constraints = [c for c in constraints if c.contype == 'p']
if not pk_constraints:
return {'constrained_columns': [], 'name': ''}
pk_constraint = pk_constraints[0]
m = PRIMARY_KEY_RE.match(pk_constraint.condef)
colstring = m.group('columns')
constrained_columns = SQL_IDENTIFIER_RE.findall(colstring)
return {
'constrained_columns': constrained_columns,
'name': None,
}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""
Return information about foreign keys in `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_pk_constraint`.
"""
constraints = self._get_redshift_constraints(connection, table_name,
schema)
fk_constraints = [c for c in constraints if c.contype == 'f']
uniques = defaultdict(lambda: defaultdict(dict))
for con in fk_constraints:
uniques[con.conname]["key"] = con.conkey
uniques[con.conname]["condef"] = con.condef
fkeys = []
for conname, attrs in uniques.items():
m = FOREIGN_KEY_RE.match(attrs['condef'])
colstring = m.group('referred_columns')
referred_columns = SQL_IDENTIFIER_RE.findall(colstring)
referred_table = m.group('referred_table')
referred_schema = m.group('referred_schema')
colstring = m.group('columns')
constrained_columns = SQL_IDENTIFIER_RE.findall(colstring)
fkey_d = {
'name': None,
'constrained_columns': constrained_columns,
'referred_schema': referred_schema,
'referred_table': referred_table,
'referred_columns': referred_columns,
}
fkeys.append(fkey_d)
return fkeys
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""
Return a list of table names for `schema`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_table_names`.
"""
return self._get_table_or_view_names('r', connection, schema, **kw)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
"""
Return a list of view names for `schema`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_view_names`.
"""
return self._get_table_or_view_names('v', connection, schema, **kw)
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Return view definition.
Given a :class:`.Connection`, a string `view_name`,
and an optional string `schema`, return the view definition.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_view_definition`.
"""
view = self._get_redshift_relation(connection, view_name, schema, **kw)
return sa.text(view.view_definition)
def get_indexes(self, connection, table_name, schema, **kw):
"""
Return information about indexes in `table_name`.
Because Redshift does not support traditional indexes,
this always returns an empty list.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_indexes`.
"""
return []
@reflection.cache
def get_unique_constraints(self, connection, table_name,
schema=None, **kw):
"""
Return information about unique constraints in `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_unique_constraints`.
"""
constraints = self._get_redshift_constraints(connection,
table_name, schema)
constraints = [c for c in constraints if c.contype == 'u']
uniques = defaultdict(lambda: defaultdict(dict))
for con in constraints:
uniques[con.conname]["key"] = con.conkey
uniques[con.conname]["cols"][con.attnum] = con.attname
return [
{'name': None,
'column_names': [uc["cols"][i] for i in uc["key"]]}
for name, uc in uniques.items()
]
@reflection.cache
def get_table_options(self, connection, table_name, schema, **kw):
"""
Return a dictionary of options specified when the table of the
given name was created.
Overrides interface
:meth:`~sqlalchemy.engine.Inspector.get_table_options`.
"""
def keyfunc(column):
num = int(column.sortkey)
# If sortkey is interleaved, column numbers alternate
# negative values, so take abs.
return abs(num)
table = self._get_redshift_relation(connection, table_name,
schema, **kw)
columns = self._get_redshift_columns(connection, table_name,
schema, **kw)
sortkey_cols = sorted([col for col in columns if col.sortkey],
key=keyfunc)
interleaved = any([int(col.sortkey) < 0 for col in sortkey_cols])
sortkey = [col.name for col in sortkey_cols]
interleaved_sortkey = None
if interleaved:
interleaved_sortkey = sortkey
sortkey = None
distkeys = [col.name for col in columns if col.distkey]
distkey = distkeys[0] if distkeys else None
return {
'redshift_diststyle': table.diststyle,
'redshift_distkey': distkey,
'redshift_sortkey': sortkey,
'redshift_interleaved_sortkey': interleaved_sortkey,
}
def create_connect_args(self, *args, **kwargs):
"""
Build DB-API compatible connection arguments.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.create_connect_args`.
"""
default_args = {
'sslmode': 'verify-full',
'sslrootcert': pkg_resources.resource_filename(
__name__,
'redshift-ssl-ca-cert.pem'
),
}
cargs, cparams = super(RedshiftDialect, self).create_connect_args(
*args, **kwargs
)
default_args.update(cparams)
return cargs, default_args
def _get_table_or_view_names(self, relkind, connection, schema=None, **kw):
default_schema = inspect(connection).default_schema_name
if not schema:
schema = default_schema
info_cache = kw.get('info_cache')
all_relations = self._get_all_relation_info(connection,
info_cache=info_cache)
relation_names = []
for key, relation in all_relations.items():
this_schema, this_relation = _get_schema_and_relation(key)
if this_schema is None:
this_schema = default_schema
if this_schema == schema and relation.relkind == relkind:
relation_names.append(this_relation)
return relation_names
def _get_column_info(self, *args, **kwargs):
kw = kwargs.copy()
encode = kw.pop('encode', None)
column_info = super(RedshiftDialect, self)._get_column_info(
*args,
**kw
)
if isinstance(column_info['type'], VARCHAR):
if column_info['type'].length is None:
column_info['type'] = NullType()
if 'info' not in column_info:
column_info['info'] = {}
if encode and encode != 'none':
column_info['info']['encode'] = encode
return column_info
def _get_redshift_relation(self, connection, table_name,
schema=None, **kw):
info_cache = kw.get('info_cache')
all_relations = self._get_all_relation_info(connection,
info_cache=info_cache)
key = _get_relation_key(table_name, schema)
if key not in all_relations.keys():
key = unquoted(key)
try:
return all_relations[key]
except KeyError:
raise sa.exc.NoSuchTableError(key)
def _get_redshift_columns(self, connection, table_name, schema=None, **kw):
info_cache = kw.get('info_cache')
all_columns = self._get_all_column_info(connection,
info_cache=info_cache)
key = _get_relation_key(table_name, schema)
if key not in all_columns.keys():
key = unquoted(key)
return all_columns[key]
def _get_redshift_constraints(self, connection, table_name,
schema=None, **kw):
info_cache = kw.get('info_cache')
all_constraints = self._get_all_constraint_info(connection,
info_cache=info_cache)
key = _get_relation_key(table_name, schema)
if key not in all_constraints.keys():
key = unquoted(key)
return all_constraints[key]
@reflection.cache
def _get_all_relation_info(self, connection, **kw):
result = connection.execute("""
SELECT
c.relkind,
n.oid as "schema_oid",
n.nspname as "schema",
c.oid as "rel_oid",
c.relname,
CASE c.reldiststyle
WHEN 0 THEN 'EVEN' WHEN 1 THEN 'KEY' WHEN 8 THEN 'ALL' END
AS "diststyle",
c.relowner AS "owner_id",
u.usename AS "owner_name",
TRIM(TRAILING ';' FROM pg_catalog.pg_get_viewdef(c.oid, true))
AS "view_definition",
pg_catalog.array_to_string(c.relacl, '\n') AS "privileges"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
JOIN pg_catalog.pg_user u ON u.usesysid = c.relowner
WHERE c.relkind IN ('r', 'v', 'm', 'S', 'f')
AND n.nspname !~ '^pg_'
ORDER BY c.relkind, n.oid, n.nspname;
""")
relations = {}
for rel in result:
schema = rel.schema
if schema == inspect(connection).default_schema_name:
schema = None
key = _get_relation_key(rel.relname, schema)
relations[key] = rel
return relations
@reflection.cache
def _get_all_column_info(self, connection, **kw):
all_columns = defaultdict(list)
with connection.contextual_connect() as cc:
# We fetch the current search_path, which may or may not quote
# '$user' depending on whether other schemas need quoting.
search_path = cc.execute("SHOW search_path").scalar()
if '$user' in search_path and '"$user"' not in search_path:
search_path = search_path.replace('$user', '"$user"')
# Because pg_table_def only shows results for schemas on the
# search_path, we explicitly include all non-system schemas, then
# replace the original value for search_path.
schema_names = ['"%s"' % r.name for r in cc.execute("""
SELECT nspname AS "name"
FROM pg_catalog.pg_namespace
WHERE nspname !~ '^pg_' AND nspname <> 'information_schema'
ORDER BY 1
""")]
modified_search_path = ','.join(schema_names)
cc.execute("SET LOCAL search_path TO %s" % modified_search_path)
result = cc.execute("""
SELECT
n.nspname as "schema",
c.relname as "table_name",
d.column as "name",
encoding as "encode",
type, distkey, sortkey, "notnull", adsrc, attnum,
pg_catalog.format_type(att.atttypid, att.atttypmod),
pg_catalog.pg_get_expr(ad.adbin, ad.adrelid) AS DEFAULT,
n.oid as "schema_oid",
c.oid as "table_oid"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
JOIN pg_catalog.pg_table_def d
ON (d.schemaname, d.tablename) = (n.nspname, c.relname)
JOIN pg_catalog.pg_attribute att
ON (att.attrelid, att.attname) = (c.oid, d.column)
LEFT JOIN pg_catalog.pg_attrdef ad
ON (att.attrelid, att.attnum) = (ad.adrelid, ad.adnum)
WHERE n.nspname !~ '^pg_'
ORDER BY n.nspname, c.relname, att.attnum
""")
for col in result:
schema = col.schema
if schema == inspect(connection).default_schema_name:
schema = None
key = _get_relation_key(col.table_name, schema)
all_columns[key].append(col)
cc.execute("SET LOCAL search_path TO %s" % search_path)
return dict(all_columns)
@reflection.cache
def _get_all_constraint_info(self, connection, **kw):
result = connection.execute("""
SELECT
n.nspname as "schema",
c.relname as "table_name",
t.contype,
t.conname,
t.conkey,
a.attnum,
a.attname,
pg_catalog.pg_get_constraintdef(t.oid, true) as condef,
n.oid as "schema_oid",
c.oid as "rel_oid"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
JOIN pg_catalog.pg_constraint t
ON t.conrelid = c.oid
JOIN pg_catalog.pg_attribute a
ON t.conrelid = a.attrelid AND a.attnum = ANY(t.conkey)
WHERE n.nspname !~ '^pg_'
ORDER BY n.nspname, c.relname
""")
all_constraints = defaultdict(list)
for con in result:
schema = con.schema
if schema == inspect(connection).default_schema_name:
schema = None
key = _get_relation_key(con.table_name, schema)
all_constraints[key].append(con)
return all_constraints
def gen_columns_from_children(root):
"""
Generates columns that are being used in child elements of the delete query
this will be used to determine tables for the using clause.
:param root: the delete query
:return: a generator of columns
"""
if isinstance(root, (Delete, BinaryExpression, BooleanClauseList)):
for child in root.get_children():
yc = gen_columns_from_children(child)
for it in yc:
yield it
elif isinstance(root, sa.Column):
yield root
@compiles(Delete, 'redshift')
def visit_delete_stmt(element, compiler, **kwargs):
"""
Adds redshift-dialect specific compilation rule for the
delete statement.
Redshift DELETE syntax can be found here:
https://docs.aws.amazon.com/redshift/latest/dg/r_DELETE.html
.. :code-block: sql
DELETE [ FROM ] table_name
[ { USING } table_name, ...]
[ WHERE condition ]
By default, SqlAlchemy compiles DELETE statements with the
syntax:
.. :code-block: sql
DELETE [ FROM ] table_name
[ WHERE condition ]
problem illustration:
>>> from sqlalchemy import Table, Column, Integer, MetaData, delete
>>> from sqlalchemy_redshift.dialect import RedshiftDialect
>>> meta = MetaData()
>>> table1 = Table(
... 'table_1',
... meta,
... Column('pk', Integer, primary_key=True)
... )
...
>>> table2 = Table(
... 'table_2',
... meta,
... Column('pk', Integer, primary_key=True)
... )
...
>>> del_stmt = delete(table1).where(table1.c.pk==table2.c.pk)
>>> str(del_stmt.compile(dialect=RedshiftDialect()))
'DELETE FROM table_1 USING table_2 WHERE table_1.pk = table_2.pk'
>>> str(del_stmt)
'DELETE FROM table_1 WHERE table_1.pk = table_2.pk'
>>> del_stmt2 = delete(table1)
>>> str(del_stmt2)
'DELETE FROM table_1'
>>> del_stmt3 = delete(table1).where(table1.c.pk > 1000)
>>> str(del_stmt3)
'DELETE FROM table_1 WHERE table_1.pk > :pk_1'
>>> str(del_stmt3.compile(dialect=RedshiftDialect()))
'DELETE FROM table_1 WHERE table_1.pk > %(pk_1)s'
"""
# Set empty strings for the default where clause and using clause
whereclause = ''
usingclause = ''
# determine if the delete query needs a ``USING`` injected
# by inspecting the whereclause's children & their children...
# first, the where clause text is buit, if applicable
# then, the using clause text is built, if applicable
# note:
# the tables in the using clause are sorted in the order in
# which they first appear in the where clause.
delete_stmt_table = compiler.process(element.table, asfrom=True, **kwargs)
whereclause_tuple = element.get_children()
if whereclause_tuple:
usingclause_tables = []
whereclause = ' WHERE {clause}'.format(
clause=compiler.process(*whereclause_tuple, **kwargs)
)
whereclause_columns = gen_columns_from_children(element)
for col in whereclause_columns:
table = compiler.process(col.table, asfrom=True, **kwargs)
if table != delete_stmt_table and table not in usingclause_tables:
usingclause_tables.append(table)
if usingclause_tables:
usingclause = ' USING {clause}'.format(
clause=', '.join(usingclause_tables)
)
return 'DELETE FROM {table}{using}{where}'.format(
table=delete_stmt_table,
using=usingclause,
where=whereclause)
|
{
"content_hash": "0ce7b100b97d61eb81c37003212aa3b7",
"timestamp": "",
"source": "github",
"line_count": 835,
"max_line_length": 79,
"avg_line_length": 36.461077844311376,
"alnum_prop": 0.5706355723435703,
"repo_name": "solackerman/sqlalchemy-redshift",
"id": "e2188ad73b90dac1a3a5d0ac91157604674bf99d",
"size": "30445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlalchemy_redshift/dialect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104824"
}
],
"symlink_target": ""
}
|
import itertools
from .. import objtypes
from ..mixin import ValueType
array_supers = 'java/lang/Object','java/lang/Cloneable','java/io/Serializable'
obj_fset = frozenset([objtypes.ObjectTT])
def isAnySubtype(env, x, seq):
return any(objtypes.isSubtype(env,x,y) for y in seq)
class TypeConstraint(ValueType):
__slots__ = "env supers exact isBot".split()
def __init__(self, env, supers, exact):
self.env, self.supers, self.exact = env, frozenset(supers), frozenset(exact)
self.isBot = objtypes.ObjectTT in supers
temp = self.supers | self.exact
assert objtypes.NullTT not in temp
assert all(objtypes.isBaseTClass(tt) for tt in supers)
assert all(objtypes.dim(tt) < 999 for tt in exact)
def _key(self): return self.supers, self.exact
def __nonzero__(self): return bool(self.supers or self.exact)
def getSingleTType(self):
# comSuper doesn't care about order so we can freely pass in nondeterministic order
return objtypes.commonSupertype(self.env, list(self.supers) + list(self.exact))
def isBoolOrByteArray(self):
if self.supers or len(self.exact) != 2:
return False
tt1, tt2 = self.exact
bases = objtypes.baset(tt1), objtypes.baset(tt2)
return objtypes.dim(tt1) == objtypes.dim(tt2) and sorted(bases) == [objtypes.baset(objtypes.BoolTT), objtypes.baset(objtypes.ByteTT)]
@staticmethod
def reduce(env, supers, exact):
newsupers = []
for x in supers:
if not isAnySubtype(env, x, newsupers):
newsupers = [y for y in newsupers if not objtypes.isSubtype(env, y, x)]
newsupers.append(x)
newexact = [x for x in exact if not isAnySubtype(env, x, newsupers)]
return TypeConstraint(env, newsupers, newexact)
def join(*cons):
assert len(set(map(type, cons))) == 1
env = cons[0].env
# optimize for the common case of joining with itself or with bot
cons = set(c for c in cons if not c.isBot)
if not cons:
return TypeConstraint(env, obj_fset, [])
elif len(cons) == 1:
return cons.pop()
assert(len(cons) == 2) # joining more than 2 not currently supported
supers_l, exact_l = zip(*(c._key() for c in cons))
newsupers = set()
for t1,t2 in itertools.product(*supers_l):
if objtypes.isSubtype(env, t1, t2):
newsupers.add(t1)
elif objtypes.isSubtype(env, t2, t1):
newsupers.add(t2)
else: # TODO: need to add special handling for interfaces here
pass
newexact = frozenset.union(*exact_l)
for c in cons:
newexact = [x for x in newexact if x in c.exact or isAnySubtype(env, x, c.supers)]
return TypeConstraint.reduce(env, newsupers, newexact)
def meet(*cons):
supers = frozenset.union(*(c.supers for c in cons))
exact = frozenset.union(*(c.exact for c in cons))
return TypeConstraint.reduce(cons[0].env, supers, exact)
class ObjectConstraint(ValueType):
__slots__ = "null types isBot".split()
def __init__(self, null, types):
self.null, self.types = null, types
self.isBot = null and types.isBot
@staticmethod
def constNull(env):
return ObjectConstraint(True, TypeConstraint(env, [], []))
@staticmethod
def fromTops(env, supers, exact, nonnull=False):
types = TypeConstraint(env, supers, exact)
if nonnull and not types:
return None
return ObjectConstraint(not nonnull, types)
def _key(self): return self.null, self.types
def isConstNull(self): return self.null and not self.types
def getSingleTType(self):
return self.types.getSingleTType() if self.types else objtypes.NullTT
def join(*cons):
null = all(c.null for c in cons)
types = TypeConstraint.join(*(c.types for c in cons))
if not null and not types:
return None
res = ObjectConstraint(null, types)
return cons[0] if cons[0] == res else res
def meet(*cons):
null = any(c.null for c in cons)
types = TypeConstraint.meet(*(c.types for c in cons))
return ObjectConstraint(null, types)
def __str__(self): # pragma: no cover
if not self.types:
return 'Obj(null)'
return 'Obj({}, {}, {})'.format(self.null, sorted(self.types.supers), sorted(self.types.exact))
__repr__ = __str__
|
{
"content_hash": "f2d8bc16546bce9b54668b1b4286840c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 141,
"avg_line_length": 36.89430894308943,
"alnum_prop": 0.6207580431908329,
"repo_name": "orneryhippo/saturdays",
"id": "8befcd19bf32c839097b0f20fd847520e743c4e3",
"size": "4538",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Krakatau-master/Krakatau/Krakatau/ssa/constraints/obj_c.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "2404"
},
{
"name": "Jasmin",
"bytes": "40997"
},
{
"name": "Java",
"bytes": "40405"
},
{
"name": "Objective-J",
"bytes": "10978"
},
{
"name": "Python",
"bytes": "572639"
}
],
"symlink_target": ""
}
|
from typing import Iterable, List, Optional, Union
from airflow.cli.cli_parser import ActionCommand, GroupCommand, airflow_commands
from airflow.cli.simple_table import AirflowConsole, SimpleTable
from airflow.utils.cli import suppress_logs_and_warning
@suppress_logs_and_warning
def cheat_sheet(args):
"""Display cheat-sheet."""
display_commands_index()
def display_commands_index():
"""Display list of all commands."""
def display_recursive(
prefix: List[str],
commands: Iterable[Union[GroupCommand, ActionCommand]],
help_msg: Optional[str] = None,
):
actions: List[ActionCommand] = []
groups: List[GroupCommand] = []
for command in commands:
if isinstance(command, GroupCommand):
groups.append(command)
else:
actions.append(command)
console = AirflowConsole()
if actions:
table = SimpleTable(title=help_msg or "Miscellaneous commands")
table.add_column(width=40)
table.add_column()
for action_command in sorted(actions, key=lambda d: d.name):
table.add_row(" ".join([*prefix, action_command.name]), action_command.help)
console.print(table)
if groups:
for group_command in sorted(groups, key=lambda d: d.name):
group_prefix = [*prefix, group_command.name]
display_recursive(group_prefix, group_command.subcommands, group_command.help)
display_recursive(["airflow"], airflow_commands)
|
{
"content_hash": "e766a2b13e5c8d7d1f47085aaf593f26",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 94,
"avg_line_length": 35.63636363636363,
"alnum_prop": 0.6371173469387755,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "001a8721330cb15d88b37d7058281b9ebdb1873b",
"size": "2353",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "airflow/cli/commands/cheat_sheet_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
""" Functions dealing with execute commands. """
import argparse
def execute_module_command(module, command='', command_line=[], *args, **kwargs):
"""
Executes the given command line in the given module.
module the module command to execute
command_line list of strings to be parsed as a command line
interactive whether the program is running in interactive mode
"""
module_commands = getattr(module, 'commands', [])
module_description = getattr(module, 'description', '')
module_arguments = getattr(module, 'arguments', [])
module_execute = getattr(module, 'execute', None)
interactive = kwargs.get('interactive', False)
# If running in interactive mode, ignore the dashed commands (e.g. -h)
if interactive:
module_commands = list(filter(lambda c: not c.startswith('-'), module_commands))
# If the command can be handled by the module, parse the command line
# and execute the command in the context of the parsed arguments.
if module_execute and command in module_commands:
parser = argparse.ArgumentParser(prog=module_commands[0], description=module_description)
for arg in module_arguments:
parser.add_argument(*arg[0], **arg[1])
parsed_args = parser.parse_args(command_line)
return module_execute(parsed_args=parsed_args, *args, **kwargs)
return None
|
{
"content_hash": "f7087c17ae417dcfa56b15817097a531",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 97,
"avg_line_length": 42.90909090909091,
"alnum_prop": 0.6758474576271186,
"repo_name": "robobrobro/coffer",
"id": "c713f25cd0fc96b406e028a7e033ef55c2db8c1c",
"size": "1416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "coffer/command/execute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24943"
},
{
"name": "Shell",
"bytes": "139"
}
],
"symlink_target": ""
}
|
"""Tests for packages info."""
import json
import os
import sys
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import pynacl.working_directory
import archive_info
import error
import package_info
import packages_info
import revision_info
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_PACKAGES_JSON = os.path.join(CURRENT_DIR, 'test_packages.json')
TEST_PLATFORM = 'platform'
TEST_ARCH_ALL = 'arch_all'
TEST_ARCH_SHARED = 'arch_shared'
TEST_ARCH_NON_SHARED = 'arch_non_shared'
TEST_EMPTY_PACKAGE_TARGET = 'empty_package_target'
TEST_SINGLE_PACKAGE_PACKAGE_TARGET = 'package_1'
TEST_MULTI_PACKAGE_PACKAGE_TARGET = 'package_2'
class TestRevisionInfo(unittest.TestCase):
def setUp(self):
self._packages = packages_info.PackagesInfo(TEST_PACKAGES_JSON)
def test_RevTargetSets(self):
# Tests that we can properly set a target revision.
package = package_info.PackageInfo()
package.AppendArchive(archive_info.ArchiveInfo(name='test_name',
hash='hash_value'))
revision_desc = revision_info.RevisionInfo(self._packages)
revision_desc.SetTargetRevision('test_package', 'package_target', package)
self.assertEqual(package, revision_desc.GetPackageInfo('package_target'))
def test_RevisionTargetSamePackage(self):
# Tests that all the targets must all be the same.
package = package_info.PackageInfo()
package.AppendArchive(archive_info.ArchiveInfo(name='test_name',
hash='hash_value'))
revision_desc = revision_info.RevisionInfo(self._packages)
revision_desc.SetTargetRevision('test1', 'package_target', package)
self.assertRaises(
error.Error,
revision_desc.SetTargetRevision,
'test2',
'package_target',
package
)
def test_RevisionFileSaveLoad(self):
# Tests that we can properly save and load a revision file.
package = package_info.PackageInfo()
package.AppendArchive(archive_info.ArchiveInfo(name='test_name',
hash='hash_value'))
revision = revision_info.RevisionInfo(self._packages)
revision.SetRevisionNumber(100)
package_targets = self._packages.GetPackageTargetsForPackage(
TEST_SINGLE_PACKAGE_PACKAGE_TARGET
)
self.assertEqual(
1,
len(package_targets),
"Invalid test data, single package package target requires 1 target"
)
revision.SetTargetRevision(
TEST_SINGLE_PACKAGE_PACKAGE_TARGET,
package_targets[0],
package
)
with pynacl.working_directory.TemporaryWorkingDirectory() as work_dir:
revision_file = os.path.join(work_dir, 'test_revision.json')
revision.SaveRevisionFile(revision_file)
new_revision = revision_info.RevisionInfo(self._packages, revision_file)
self.assertEqual(revision, new_revision)
def test_RevisionFileRequiresRevisionNumber(self):
# Tests that we can properly save and load a revision file.
package = package_info.PackageInfo()
package.AppendArchive(archive_info.ArchiveInfo(name='test_name',
hash='hash_value'))
revision = revision_info.RevisionInfo(self._packages)
package_targets = self._packages.GetPackageTargetsForPackage(
TEST_SINGLE_PACKAGE_PACKAGE_TARGET
)
for package_target in package_targets:
revision.SetTargetRevision(
TEST_SINGLE_PACKAGE_PACKAGE_TARGET,
package_target,
package
)
with pynacl.working_directory.TemporaryWorkingDirectory() as work_dir:
revision_file = os.path.join(work_dir, 'test_revision.json')
self.assertRaises(
error.Error,
revision.SaveRevisionFile,
revision_file
)
def test_AlteredRevisionFileFails(self):
# Tests that an altered revision file will fail to load.
package = package_info.PackageInfo()
package.AppendArchive(archive_info.ArchiveInfo(name='test_name',
hash='hash_value'))
revision = revision_info.RevisionInfo(self._packages)
revision.SetRevisionNumber(100)
package_targets = self._packages.GetPackageTargetsForPackage(
TEST_SINGLE_PACKAGE_PACKAGE_TARGET
)
for package_target in package_targets:
revision.SetTargetRevision(
TEST_SINGLE_PACKAGE_PACKAGE_TARGET,
package_target,
package
)
with pynacl.working_directory.TemporaryWorkingDirectory() as work_dir:
revision_file = os.path.join(work_dir, 'altered_revision.json')
revision.SaveRevisionFile(revision_file)
# Alter the file directly and save it back out
with open(revision_file, 'rt') as f:
revision_json = json.load(f)
revision_json[revision_info.FIELD_REVISION] = 'noise'
with open(revision_file, 'wt') as f:
json.dump(revision_json, f)
new_revision = revision_info.RevisionInfo(self._packages)
self.assertRaises(
error.Error,
new_revision.LoadRevisionFile,
revision_file
)
def test_RevisionFileMustSetAllTargets(self):
# Tests that a revision file fails if not all package targets are set.
package = package_info.PackageInfo()
package.AppendArchive(archive_info.ArchiveInfo(name='test_name',
hash='hash_value'))
package_targets = self._packages.GetPackageTargetsForPackage(
TEST_MULTI_PACKAGE_PACKAGE_TARGET
)
self.assertTrue(
len(package_targets) > 0,
'Invalid test data, multiple package targets expected'
)
revision = revision_info.RevisionInfo(self._packages)
revision.SetRevisionNumber(100)
revision.SetTargetRevision(
TEST_MULTI_PACKAGE_PACKAGE_TARGET,
package_targets[0],
package
)
with pynacl.working_directory.TemporaryWorkingDirectory() as work_dir:
revision_file = os.path.join(work_dir, 'incomplete_revision.json')
self.assertRaises(
error.Error,
revision.SaveRevisionFile,
revision_file
)
def test_RevisionFileSavesForMultiTargets(self):
# Tests that a revision successfully saves a multi-package target package.
package = package_info.PackageInfo()
package.AppendArchive(archive_info.ArchiveInfo(name='test_name',
hash='hash_value'))
package_targets = self._packages.GetPackageTargetsForPackage(
TEST_MULTI_PACKAGE_PACKAGE_TARGET
)
self.assertTrue(
len(package_targets) > 0,
'Invalid test data, multiple package targets expected'
)
revision = revision_info.RevisionInfo(self._packages)
revision.SetRevisionNumber(100)
for package_target in package_targets:
revision.SetTargetRevision(
TEST_MULTI_PACKAGE_PACKAGE_TARGET,
package_target,
package
)
with pynacl.working_directory.TemporaryWorkingDirectory() as work_dir:
revision_file = os.path.join(work_dir, 'complete_revision.json')
revision.SaveRevisionFile(revision_file)
new_revision = revision_info.RevisionInfo(self._packages, revision_file)
self.assertEqual(revision, new_revision)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "1d709992835ea1b36f550e9627df176d",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 78,
"avg_line_length": 34.12442396313364,
"alnum_prop": 0.6668467251856853,
"repo_name": "mxOBS/deb-pkg_trusty_chromium-browser",
"id": "8cbfd76491d28b141449f589e10d46669c20eaff",
"size": "7595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "native_client/build/package_version/revision_info_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "230130"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "12435900"
},
{
"name": "C++",
"bytes": "264378706"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "795726"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Emacs Lisp",
"bytes": "2360"
},
{
"name": "Go",
"bytes": "31783"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "19491230"
},
{
"name": "Java",
"bytes": "7637875"
},
{
"name": "JavaScript",
"bytes": "12723911"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "14392"
},
{
"name": "Makefile",
"bytes": "208315"
},
{
"name": "Objective-C",
"bytes": "1460032"
},
{
"name": "Objective-C++",
"bytes": "7760068"
},
{
"name": "PLpgSQL",
"bytes": "175360"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427212"
},
{
"name": "Python",
"bytes": "11447382"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104846"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1208350"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
}
|
import psutil
from uiBasicWidget import *
from ctaAlgo.uiCtaWidget import CtaEngineManager
from dataRecorder.uiDrWidget import DrEngineManager
from riskManager.uiRmWidget import RmEngineManager
########################################################################
class MainWindow(QtGui.QMainWindow):
"""主窗口"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
super(MainWindow, self).__init__()
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.widgetDict = {} # 用来保存子窗口的字典
self.initUi()
self.loadWindowSettings()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle('VnTrader')
self.initCentral()
self.initMenu()
self.initStatusBar()
#----------------------------------------------------------------------
def initCentral(self):
"""初始化中心区域"""
widgetMarketM, dockMarketM = self.createDock(MarketMonitor, u'行情', QtCore.Qt.RightDockWidgetArea)
widgetLogM, dockLogM = self.createDock(LogMonitor, u'日志', QtCore.Qt.BottomDockWidgetArea)
widgetErrorM, dockErrorM = self.createDock(ErrorMonitor, u'错误', QtCore.Qt.BottomDockWidgetArea)
widgetTradeM, dockTradeM = self.createDock(TradeMonitor, u'成交', QtCore.Qt.BottomDockWidgetArea)
widgetOrderM, dockOrderM = self.createDock(OrderMonitor, u'委托', QtCore.Qt.RightDockWidgetArea)
widgetPositionM, dockPositionM = self.createDock(PositionMonitor, u'持仓', QtCore.Qt.BottomDockWidgetArea)
widgetAccountM, dockAccountM = self.createDock(AccountMonitor, u'资金', QtCore.Qt.BottomDockWidgetArea)
widgetTradingW, dockTradingW = self.createDock(TradingWidget, u'交易', QtCore.Qt.LeftDockWidgetArea)
self.tabifyDockWidget(dockTradeM, dockErrorM)
self.tabifyDockWidget(dockTradeM, dockLogM)
self.tabifyDockWidget(dockPositionM, dockAccountM)
dockTradeM.raise_()
dockPositionM.raise_()
# 连接组件之间的信号
widgetPositionM.itemDoubleClicked.connect(widgetTradingW.closePosition)
#----------------------------------------------------------------------
def initMenu(self):
"""初始化菜单"""
# 创建操作
connectCtpAction = QtGui.QAction(u'连接CTP', self)
connectCtpAction.triggered.connect(self.connectCtp)
connectLtsAction = QtGui.QAction(u'连接LTS', self)
connectLtsAction.triggered.connect(self.connectLts)
connectKsotpAction = QtGui.QAction(u'连接金仕达期权', self)
connectKsotpAction.triggered.connect(self.connectKsotp)
connectFemasAction = QtGui.QAction(u'连接飞马', self)
connectFemasAction.triggered.connect(self.connectFemas)
connectXspeedAction = QtGui.QAction(u'连接飞创', self)
connectXspeedAction.triggered.connect(self.connectXspeed)
connectKsgoldAction = QtGui.QAction(u'连接金仕达黄金', self)
connectKsgoldAction.triggered.connect(self.connectKsgold)
connectSgitAction = QtGui.QAction(u'连接飞鼠', self)
connectSgitAction.triggered.connect(self.connectSgit)
connectWindAction = QtGui.QAction(u'连接Wind', self)
connectWindAction.triggered.connect(self.connectWind)
connectIbAction = QtGui.QAction(u'连接IB', self)
connectIbAction.triggered.connect(self.connectIb)
connectOandaAction = QtGui.QAction(u'连接OANDA', self)
connectOandaAction.triggered.connect(self.connectOanda)
connectDbAction = QtGui.QAction(u'连接数据库', self)
connectDbAction.triggered.connect(self.mainEngine.dbConnect)
testAction = QtGui.QAction(u'测试', self)
testAction.triggered.connect(self.test)
exitAction = QtGui.QAction(u'退出', self)
exitAction.triggered.connect(self.close)
aboutAction = QtGui.QAction(u'关于', self)
aboutAction.triggered.connect(self.openAbout)
contractAction = QtGui.QAction(u'查询合约', self)
contractAction.triggered.connect(self.openContract)
drAction = QtGui.QAction(u'行情数据记录', self)
drAction.triggered.connect(self.openDr)
ctaAction = QtGui.QAction(u'CTA策略', self)
ctaAction.triggered.connect(self.openCta)
rmAction = QtGui.QAction(u'风险管理', self)
rmAction.triggered.connect(self.openRm)
# 创建菜单
menubar = self.menuBar()
# 设计为只显示存在的接口
sysMenu = menubar.addMenu(u'系统')
if 'CTP' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectCtpAction)
if 'LTS' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectLtsAction)
if 'FEMAS' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectFemasAction)
if 'XSPEED' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectXspeedAction)
if 'KSOTP' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectKsotpAction)
if 'KSGOLD' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectKsgoldAction)
if 'SGIT' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectSgitAction)
sysMenu.addSeparator()
if 'IB' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectIbAction)
if 'OANDA' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectOandaAction)
sysMenu.addSeparator()
if 'Wind' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectWindAction)
sysMenu.addSeparator()
sysMenu.addAction(connectDbAction)
sysMenu.addSeparator()
sysMenu.addAction(exitAction)
functionMenu = menubar.addMenu(u'功能')
functionMenu.addAction(contractAction)
functionMenu.addAction(drAction)
functionMenu.addAction(rmAction)
# 算法相关
algoMenu = menubar.addMenu(u'算法')
algoMenu.addAction(ctaAction)
# 帮助
helpMenu = menubar.addMenu(u'帮助')
helpMenu.addAction(aboutAction)
helpMenu.addAction(testAction)
#----------------------------------------------------------------------
def initStatusBar(self):
"""初始化状态栏"""
self.statusLabel = QtGui.QLabel()
self.statusLabel.setAlignment(QtCore.Qt.AlignLeft)
self.statusBar().addPermanentWidget(self.statusLabel)
self.statusLabel.setText(self.getCpuMemory())
self.sbCount = 0
self.sbTrigger = 10 # 10秒刷新一次
self.eventEngine.register(EVENT_TIMER, self.updateStatusBar)
#----------------------------------------------------------------------
def updateStatusBar(self, event):
"""在状态栏更新CPU和内存信息"""
self.sbCount += 1
if self.sbCount == self.sbTrigger:
self.sbCount = 0
self.statusLabel.setText(self.getCpuMemory())
#----------------------------------------------------------------------
def getCpuMemory(self):
"""获取CPU和内存状态信息"""
cpuPercent = psutil.cpu_percent()
memoryPercent = psutil.virtual_memory().percent
return u'CPU使用率:%d%% 内存使用率:%d%%' % (cpuPercent, memoryPercent)
#----------------------------------------------------------------------
def connectCtp(self):
"""连接CTP接口"""
self.mainEngine.connect('CTP')
#----------------------------------------------------------------------
def connectLts(self):
"""连接LTS接口"""
self.mainEngine.connect('LTS')
#----------------------------------------------------------------------
def connectKsotp(self):
"""连接金仕达期权接口"""
self.mainEngine.connect('KSOTP')
#----------------------------------------------------------------------
def connectFemas(self):
"""连接飞马接口"""
self.mainEngine.connect('FEMAS')
#----------------------------------------------------------------------
def connectXspeed(self):
"""连接飞马接口"""
self.mainEngine.connect('XSPEED')
#----------------------------------------------------------------------
def connectKsgold(self):
"""连接金仕达黄金接口"""
self.mainEngine.connect('KSGOLD')
#----------------------------------------------------------------------
def connectSgit(self):
"""连接飞鼠接口"""
self.mainEngine.connect('SGIT')
#----------------------------------------------------------------------
def connectWind(self):
"""连接Wind接口"""
self.mainEngine.connect('Wind')
#----------------------------------------------------------------------
def connectIb(self):
"""连接Ib"""
self.mainEngine.connect('IB')
#----------------------------------------------------------------------
def connectOanda(self):
"""连接OANDA"""
self.mainEngine.connect('OANDA')
#----------------------------------------------------------------------
def test(self):
"""测试按钮用的函数"""
# 有需要使用手动触发的测试函数可以写在这里
pass
#----------------------------------------------------------------------
def openAbout(self):
"""打开关于"""
try:
self.widgetDict['aboutW'].show()
except KeyError:
self.widgetDict['aboutW'] = AboutWidget(self)
self.widgetDict['aboutW'].show()
#----------------------------------------------------------------------
def openContract(self):
"""打开合约查询"""
try:
self.widgetDict['contractM'].show()
except KeyError:
self.widgetDict['contractM'] = ContractMonitor(self.mainEngine)
self.widgetDict['contractM'].show()
#----------------------------------------------------------------------
def openCta(self):
"""打开CTA组件"""
try:
self.widgetDict['ctaM'].showMaximized()
except KeyError:
self.widgetDict['ctaM'] = CtaEngineManager(self.mainEngine.ctaEngine, self.eventEngine)
self.widgetDict['ctaM'].showMaximized()
#----------------------------------------------------------------------
def openDr(self):
"""打开行情数据记录组件"""
try:
self.widgetDict['drM'].showMaximized()
except KeyError:
self.widgetDict['drM'] = DrEngineManager(self.mainEngine.drEngine, self.eventEngine)
self.widgetDict['drM'].showMaximized()
#----------------------------------------------------------------------
def openRm(self):
"""打开组件"""
try:
self.widgetDict['rmM'].show()
except KeyError:
self.widgetDict['rmM'] = RmEngineManager(self.mainEngine.rmEngine, self.eventEngine)
self.widgetDict['rmM'].show()
#----------------------------------------------------------------------
def closeEvent(self, event):
"""关闭事件"""
reply = QtGui.QMessageBox.question(self, u'退出',
u'确认退出?', QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
for widget in self.widgetDict.values():
widget.close()
self.saveWindowSettings()
self.mainEngine.exit()
event.accept()
else:
event.ignore()
#----------------------------------------------------------------------
def createDock(self, widgetClass, widgetName, widgetArea):
"""创建停靠组件"""
widget = widgetClass(self.mainEngine, self.eventEngine)
dock = QtGui.QDockWidget(widgetName)
dock.setWidget(widget)
dock.setObjectName(widgetName)
dock.setFeatures(dock.DockWidgetFloatable|dock.DockWidgetMovable)
self.addDockWidget(widgetArea, dock)
return widget, dock
#----------------------------------------------------------------------
def saveWindowSettings(self):
"""保存窗口设置"""
settings = QtCore.QSettings('vn.py', 'vn.trader')
settings.setValue('state', self.saveState())
settings.setValue('geometry', self.saveGeometry())
#----------------------------------------------------------------------
def loadWindowSettings(self):
"""载入窗口设置"""
settings = QtCore.QSettings('vn.py', 'vn.trader')
self.restoreState(settings.value('state').toByteArray())
self.restoreGeometry(settings.value('geometry').toByteArray())
########################################################################
class AboutWidget(QtGui.QDialog):
"""显示关于信息"""
#----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(AboutWidget, self).__init__(parent)
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
""""""
self.setWindowTitle(u'关于VnTrader')
text = u"""
Developed by traders, for traders.
License:MIT
Website:www.vnpy.org
Github:www.github.com/vnpy/vnpy
"""
label = QtGui.QLabel()
label.setText(text)
label.setMinimumWidth(500)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(label)
self.setLayout(vbox)
|
{
"content_hash": "63d79b59858df75bde53140862ef4e35",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 112,
"avg_line_length": 37.98365122615804,
"alnum_prop": 0.5001434720229555,
"repo_name": "lukesummer/vnpy",
"id": "32de7b3367aef8e36bedfe751a4852d0eaed6440",
"size": "14637",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vn.trader/uiMainWindow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49442"
},
{
"name": "C++",
"bytes": "2201633"
},
{
"name": "Python",
"bytes": "1972314"
},
{
"name": "R",
"bytes": "1354"
},
{
"name": "Shell",
"bytes": "4223"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import botbot.core.fields
class Migration(migrations.Migration):
dependencies = [
('bots', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ActivePlugin',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('configuration', botbot.core.fields.JSONField(default={}, help_text=b'User-specified attributes for this plugin {"username": "joe", "api-key": "foo"}', blank=True)),
('channel', models.ForeignKey(to='bots.Channel')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Plugin',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('slug', models.SlugField()),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='activeplugin',
name='plugin',
field=models.ForeignKey(to='plugins.Plugin'),
preserve_default=True,
),
]
|
{
"content_hash": "4b7dc74e1b4b56b42ff2649fd2a23be9",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 182,
"avg_line_length": 32.95238095238095,
"alnum_prop": 0.5332369942196532,
"repo_name": "Freso/botbot-web",
"id": "f2ada209530e0e8228160a87207ca11fb81c3b1b",
"size": "1408",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "botbot/apps/plugins/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "446600"
},
{
"name": "HTML",
"bytes": "118073"
},
{
"name": "JavaScript",
"bytes": "22155"
},
{
"name": "Makefile",
"bytes": "2629"
},
{
"name": "Python",
"bytes": "230474"
}
],
"symlink_target": ""
}
|
from decimal import Decimal
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import crypto, timezone
from django_hosts.resolvers import reverse
from sorl.thumbnail import ImageField, get_thumbnail
DISPLAY_LOGO_AMOUNT = Decimal("200.00")
DEFAULT_DONATION_AMOUNT = 50
INTERVAL_CHOICES = (
('monthly', 'Monthly donation'),
('quarterly', 'Quarterly donation'),
('yearly', 'Yearly donation'),
('onetime', 'One-time donation'),
)
class DjangoHeroManager(models.Manager):
def for_campaign(self, campaign, hero_type=None):
donors = self.get_queryset().filter(
donation__campaign=campaign,
is_visible=True,
approved=True,
).annotate(donated_amount=models.Sum('donation__payment__amount'))
if hero_type:
donors = donors.filter(hero_type=hero_type)
return donors.order_by('-donated_amount', 'name')
class FundraisingModel(models.Model):
id = models.CharField(max_length=12, primary_key=True)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(default=timezone.now)
class Meta:
abstract = True
def save(self, *args, **kwargs):
self.modified = timezone.now()
if not self.id:
self.id = crypto.get_random_string(length=12)
return super(FundraisingModel, self).save(*args, **kwargs)
class DjangoHero(FundraisingModel):
email = models.EmailField(blank=True)
stripe_customer_id = models.CharField(max_length=100, blank=True)
logo = ImageField(upload_to="fundraising/logos/", blank=True)
url = models.URLField(blank=True, verbose_name='URL')
name = models.CharField(max_length=100, blank=True)
HERO_TYPE_CHOICES = (
('individual', 'Individual'),
('organization', 'Organization'),
)
hero_type = models.CharField(max_length=30, choices=HERO_TYPE_CHOICES, blank=True)
is_visible = models.BooleanField(
default=False,
verbose_name="Agreed to displaying on the fundraising page?",
)
is_subscribed = models.BooleanField(
default=False,
verbose_name="Agreed to being contacted by DSF?",
)
approved = models.NullBooleanField(
verbose_name="Name, URL, and Logo approved?",
)
objects = DjangoHeroManager()
def __str__(self):
return self.name if self.name else 'Anonymous #{}'.format(self.pk)
class Meta:
verbose_name = "Django hero"
verbose_name_plural = "Django heroes"
@property
def thumbnail(self):
return get_thumbnail(self.logo, '170x170', quality=100)
@property
def name_with_fallback(self):
return self.name if self.name else 'Anonymous Hero'
@receiver(post_save, sender=DjangoHero)
def create_thumbnail_on_save(sender, **kwargs):
return kwargs['instance'].thumbnail
class Campaign(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
goal = models.DecimalField(max_digits=9, decimal_places=2)
template = models.CharField(max_length=50, default="fundraising/campaign_default.html")
stretch_goal = models.DecimalField(max_digits=9, decimal_places=2, blank=True, null=True)
stretch_goal_url = models.URLField(blank=True, null=True)
start_date = models.DateTimeField(blank=True, null=True)
end_date = models.DateTimeField(blank=True, null=True)
is_active = models.BooleanField(default=False, help_text="Should donation form be enabled or not?")
is_public = models.BooleanField(default=False, help_text="Should campaign be visible at all?")
def __str__(self):
return self.name
class Donation(FundraisingModel):
interval = models.CharField(max_length=20, choices=INTERVAL_CHOICES, blank=True)
subscription_amount = models.DecimalField(max_digits=9, decimal_places=2, null=True, blank=True)
donor = models.ForeignKey(DjangoHero, null=True)
campaign = models.ForeignKey(Campaign, null=True, blank=True)
stripe_subscription_id = models.CharField(max_length=100, blank=True)
stripe_customer_id = models.CharField(max_length=100, blank=True)
receipt_email = models.EmailField(blank=True)
def __str__(self):
return '{} from {}'.format(self.get_interval_display(), self.donor)
def get_absolute_url(self):
return reverse('fundraising:thank-you', kwargs={'donation': self.id})
def total_payments(self):
return self.payment_set.aggregate(models.Sum('amount'))['amount__sum']
class Payment(models.Model):
donation = models.ForeignKey(Donation)
amount = models.DecimalField(max_digits=9, decimal_places=2, null=True)
stripe_charge_id = models.CharField(max_length=100, blank=True)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '${}'.format(self.amount)
class Testimonial(models.Model):
campaign = models.ForeignKey(Campaign, null=True)
author = models.CharField(max_length=255)
body = models.TextField()
is_active = models.BooleanField(default=True)
def __str__(self):
return self.author
|
{
"content_hash": "d577aa1b88d2aa630274a9404825067e",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 103,
"avg_line_length": 35.285714285714285,
"alnum_prop": 0.6874879506458453,
"repo_name": "khkaminska/djangoproject.com",
"id": "b5b5c11275c71300c2abff88ffd1d138b554a1de",
"size": "5187",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "fundraising/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133625"
},
{
"name": "CoffeeScript",
"bytes": "24188"
},
{
"name": "HTML",
"bytes": "220435"
},
{
"name": "JavaScript",
"bytes": "806901"
},
{
"name": "Makefile",
"bytes": "1628"
},
{
"name": "Python",
"bytes": "507628"
},
{
"name": "Ruby",
"bytes": "19821"
},
{
"name": "Smalltalk",
"bytes": "1917"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class LinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="linecolor", parent_name="layout.yaxis", **kwargs):
super(LinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "layoutstyle"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "39a561c195116e0047b2eb55a800bee3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 86,
"avg_line_length": 38.25,
"alnum_prop": 0.6209150326797386,
"repo_name": "plotly/python-api",
"id": "7f93d055a914abdc16e6e54194f9f6b4c44da672",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/yaxis/_linecolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""Module implementing core cutting functionality."""
import json
import ssl
import urllib2
from core import search_tab_template
class GlobeCutter(object):
"""Class implementing core cutting functionality."""
POI_SEARCH_SERVICE_URL = "POISearch"
PLACES_SEARCH_SERVICE_URL = "/gesearch/PlacesSearch"
SEARCH_TAB_LIST = "searchTabs : \n[%s\n]"
@staticmethod
def GetSearchTabs(source):
"""Get search tabs from the server.
Args:
source: database URL (server).
Returns:
search tabs in json formatted string or empty string.
"""
search_tabs = ""
url = "%s/search_json" % source
try:
fp = urllib2.urlopen(url, context=ssl._create_unverified_context())
if fp.getcode() == 200:
search_tabs = fp.read()
fp.close()
except:
print ("No search tabs found.")
return search_tabs
@staticmethod
def GetSearchTabByUrl(search_tabs, url):
"""Gets search tab by URL from search tab list.
Args:
search_tabs: search tab list.
url: service URL to look for search tab.
Returns:
None or found search tab.
"""
result_search_tab = None
for search_tab in search_tabs:
if search_tab["service_url"] == url:
result_search_tab = search_tab
return result_search_tab
@staticmethod
def AddSearchTabsToServerDefs(server_defs_in, search_tabs_in):
"""Inserts search tabs into ServerDefs.
Note: in search json it is expecting POISearch definition as:
{ ...
"service_url": "POISearch",
"additional_query_param": "DbId=2&...",
...
}
and Places search definition as:
{ ...
"service_url": "/gesearch/PlacesSearch",
"additional_query_param": "...",
...
}
Args:
server_defs_in: ServerDefs in jsonp formatted string.
search_tabs_in: Search tabs in json formatted string.
Returns:
updated ServerDefs merged with search tabs in jsonp formatted string,
or None if there is no search tabs to add.
"""
# Detect whether POI and Places search are present in source database.
search_tabs = json.loads(search_tabs_in)
poi_search_tab = GlobeCutter.GetSearchTabByUrl(
search_tabs, GlobeCutter.POI_SEARCH_SERVICE_URL)
search_tabs_out = []
# Build POI search tab if source database has POI Search tab.
if poi_search_tab:
poi_search_tab_out = search_tab_template.FormatPoiSearchTab(
poi_search_tab)
if poi_search_tab_out:
search_tabs_out.append(poi_search_tab_out)
# Build GEPlaces search tab.
places_search_tab_out = search_tab_template.FormatGePlacesSearchTab()
if places_search_tab_out:
search_tabs_out.append(places_search_tab_out)
if not search_tabs_out:
return None
search_tabs_out_str = GlobeCutter.SEARCH_TAB_LIST % ", ".join(
search_tabs_out)
# Insert search tabs into ServerDefs.
server_defs_obj_end = server_defs_in.rfind("}")
server_defs_out = "%s,\n%s%s" % (
server_defs_in[:server_defs_obj_end],
search_tabs_out_str,
server_defs_in[server_defs_obj_end:])
return server_defs_out
def main():
pass
if __name__ == "__main__":
main()
|
{
"content_hash": "bcd3542797b80d8f66ebec276c7ef9be",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 75,
"avg_line_length": 27,
"alnum_prop": 0.6455026455026455,
"repo_name": "google/earthenterprise",
"id": "1f895cfa0ddabc9b7031ab1b42097442737852ba",
"size": "3857",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/core/globe_cutter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "137545"
},
{
"name": "C++",
"bytes": "12184054"
},
{
"name": "CSS",
"bytes": "337423"
},
{
"name": "Groovy",
"bytes": "29553"
},
{
"name": "HTML",
"bytes": "3293039"
},
{
"name": "Java",
"bytes": "9028"
},
{
"name": "JavaScript",
"bytes": "1532016"
},
{
"name": "Jinja",
"bytes": "10350"
},
{
"name": "Makefile",
"bytes": "3425"
},
{
"name": "PLpgSQL",
"bytes": "14509"
},
{
"name": "Perl",
"bytes": "377259"
},
{
"name": "Prolog",
"bytes": "1423"
},
{
"name": "Python",
"bytes": "2764293"
},
{
"name": "QMake",
"bytes": "3360"
},
{
"name": "Raku",
"bytes": "6715"
},
{
"name": "SWIG",
"bytes": "1959"
},
{
"name": "Shell",
"bytes": "254566"
}
],
"symlink_target": ""
}
|
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type of identity that created the resource."""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class TemplateSpecExpandKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""TemplateSpecExpandKind."""
#: Includes version information with the Template Spec.
VERSIONS = "versions"
|
{
"content_hash": "1d1b4bb342d6d3a7d10a805d5fc4ce11",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 27.944444444444443,
"alnum_prop": 0.7375745526838966,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ec0f6b2bc39a47d1ca523ee0ca825e16eaed0f85",
"size": "971",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "sdk/resources/azure-mgmt-resource/azure/mgmt/resource/templatespecs/v2022_02_01/models/_template_specs_client_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Unit test for smugmug.py"""
import unittest
import freezegun
from smugcli import smugmug
class MockNode():
"""A mock version of `smugmug.Node`."""
def __init__(self):
self._reset_count = 0
def reset_cache(self):
"""Override of `smugmug.Node.reset_cache`, tracking invocation count."""
self._reset_count += 1
@property
def reset_count(self):
"""Returns the number of time `reset_cache` was called on this node."""
return self._reset_count
class TestChildCacheGarbageCollector(unittest.TestCase):
"""Test for `smugmug.ChildCacheGarbageCollector`."""
def test_clears_child_cache(self):
"""Tests that nodes get reset, oldest visited first."""
collector = smugmug.ChildCacheGarbageCollector(3)
nodes = [MockNode(), MockNode(), MockNode(), MockNode(), MockNode()]
collector.visited(nodes[0])
collector.visited(nodes[1])
collector.visited(nodes[2])
collector.visited(nodes[3])
collector.visited(nodes[4])
self.assertEqual(nodes[0].reset_count, 1)
self.assertEqual(nodes[1].reset_count, 1)
self.assertEqual(nodes[2].reset_count, 0)
self.assertEqual(nodes[3].reset_count, 0)
self.assertEqual(nodes[4].reset_count, 0)
def test_repeated_visit_are_ignored(self):
"""Tests that repeating visits do not count."""
collector = smugmug.ChildCacheGarbageCollector(2)
nodes = [MockNode(), MockNode(), MockNode()]
collector.visited(nodes[0])
collector.visited(nodes[1])
collector.visited(nodes[2])
collector.visited(nodes[2])
collector.visited(nodes[2])
self.assertEqual(nodes[0].reset_count, 1)
self.assertEqual(nodes[1].reset_count, 0)
self.assertEqual(nodes[2].reset_count, 0)
def test_optimally_resets_alternating_nodes(self):
"""Tests that alternating visits do not count."""
collector = smugmug.ChildCacheGarbageCollector(2)
nodes = [MockNode(), MockNode()]
collector.visited(nodes[1])
collector.visited(nodes[0])
collector.visited(nodes[1])
collector.visited(nodes[0])
self.assertEqual(nodes[0].reset_count, 0)
self.assertEqual(nodes[1].reset_count, 0)
def test_heap_does_not_grow_out_of_control(self):
"""Tests garbage collector's memory usage."""
collector = smugmug.ChildCacheGarbageCollector(1)
node = MockNode()
collector.visited(node)
collector.visited(node)
collector.visited(node)
collector.visited(node)
self.assertEqual(len(collector.nodes), 1)
self.assertEqual(len(collector.oldest), 1)
def test_time_keyed_heap_works_with_nodes_created_on_same_timestamp(self):
"""Tests that nodes created on the same timestamps gets GCed the same."""
with freezegun.freeze_time('2019-01-01') as frozen_time:
collector = smugmug.ChildCacheGarbageCollector(1)
nodes = [MockNode(), MockNode(), MockNode()]
collector.visited(nodes[0])
collector.visited(nodes[1])
frozen_time.tick()
collector.visited(nodes[2])
self.assertEqual(nodes[0].reset_count, 1)
self.assertEqual(nodes[1].reset_count, 1)
self.assertEqual(nodes[2].reset_count, 0)
|
{
"content_hash": "8794dc28eb55532aca55720eeb7fb1a6",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 77,
"avg_line_length": 31.393939393939394,
"alnum_prop": 0.693050193050193,
"repo_name": "graveljp/smugcli",
"id": "0817dc5941463cc5767a18bf61a12de931666320",
"size": "3108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/smugmug_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223689"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Player'
db.create_table(u'elections_player', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=45)),
('character', self.gf('django.db.models.fields.CharField')(max_length=45)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal(u'elections', ['Player'])
# Adding model 'Loot'
db.create_table(u'elections_loot', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=45)),
('loot_type', self.gf('django.db.models.fields.CharField')(max_length=5)),
('bonus', self.gf('django.db.models.fields.IntegerField')()),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['elections.Player'], null=True, blank=True)),
))
db.send_create_signal(u'elections', ['Loot'])
# Adding model 'Election'
db.create_table(u'elections_election', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('player', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['elections.Player'], null=True, blank=True)),
('weight', self.gf('django.db.models.fields.IntegerField')()),
('loot', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['elections.Loot'])),
('awarded', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=140)),
))
db.send_create_signal(u'elections', ['Election'])
def backwards(self, orm):
# Deleting model 'Player'
db.delete_table(u'elections_player')
# Deleting model 'Loot'
db.delete_table(u'elections_loot')
# Deleting model 'Election'
db.delete_table(u'elections_election')
models = {
u'elections.election': {
'Meta': {'object_name': 'Election'},
'awarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['elections.Loot']"}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['elections.Player']", 'null': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {})
},
u'elections.loot': {
'Meta': {'object_name': 'Loot'},
'bonus': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loot_type': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['elections.Player']", 'null': 'True', 'blank': 'True'})
},
u'elections.player': {
'Meta': {'object_name': 'Player'},
'character': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'})
}
}
complete_apps = ['elections']
|
{
"content_hash": "046c95c8b48727ebfa68a15e294d12eb",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 142,
"avg_line_length": 50.40506329113924,
"alnum_prop": 0.5743345052737318,
"repo_name": "jcderr/loot",
"id": "3ac50db932fdfd5fc5e2c9bf63fc37d8786e6316",
"size": "4006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elections/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "85665"
},
{
"name": "JavaScript",
"bytes": "58006"
},
{
"name": "Python",
"bytes": "38898"
}
],
"symlink_target": ""
}
|
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
# Fallback in case fast_tensor_util is not properly compiled.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16))
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromFloat16(x) for x in proto_values])
def ExtractBitsFromBFloat16(x):
return np.asscalar(
np.asarray(x, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16))
def SlowAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromBFloat16(x) for x in proto_values])
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype:
SlowAppendBFloat16ArrayToTensorProto,
# TODO(sesse): We should have a
# fast_tensor_util.AppendFloat16ArrayToTensorProto,
# but it seems np.float16_t doesn't exist?
np.float16:
SlowAppendFloat16ArrayToTensorProto,
np.float32:
fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64:
fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32:
fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64:
fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16:
fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.uint32:
fast_tensor_util.AppendUInt32ArrayToTensorProto,
np.uint64:
fast_tensor_util.AppendUInt64ArrayToTensorProto,
np.int8:
fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16:
fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64:
fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128:
fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object:
fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool:
fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x[0]) for x in proto_values])
def SlowAppendUInt32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint32_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendUInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint64_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend(
[np.asscalar(v) for x in proto_values for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend(
[np.asscalar(v) for x in proto_values for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([np.asscalar(x) for x in proto_values])
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype: SlowAppendBFloat16ArrayToTensorProto,
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.uint32: SlowAppendUInt32ArrayToTensorProto,
np.uint64: SlowAppendUInt64ArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, (list, tuple)):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,
dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32, dtypes.uint32, dtypes.uint64
])
class _Message(object):
def __init__(self, message):
self._message = message
def __repr__(self):
return self._message
def _FirstNotNone(l):
for x in l:
if x is not None:
if isinstance(x, ops.Tensor):
return _Message("list containing Tensors")
else:
return x
return None
def _NotNone(v):
if v is None:
return _Message("None")
else:
return v
def _FilterTuple(v):
if not isinstance(v, (list, tuple)):
return v
if isinstance(v, tuple):
if not any(isinstance(x, (list, tuple)) for x in v):
return None
if isinstance(v, list):
if not any(isinstance(x, (list, tuple)) for x in v):
return _FirstNotNone(
[None if isinstance(x, (list, tuple)) else x for x in v])
return _FirstNotNone([_FilterTuple(x) for x in v])
def _FilterInt(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterInt(x) for x in v])
return None if isinstance(
v, (compat.integral_types, tensor_shape.Dimension)) else _NotNone(v)
def _FilterFloat(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterFloat(x) for x in v])
return None if isinstance(v, compat.real_types) else _NotNone(v)
def _FilterComplex(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterComplex(x) for x in v])
return None if isinstance(v, compat.complex_types) else _NotNone(v)
def _FilterStr(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterStr(x) for x in v])
if isinstance(v, compat.bytes_or_text_types):
return None
else:
return _NotNone(v)
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
def _FilterNotTensor(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterNotTensor(x) for x in v])
return str(v) if isinstance(v, ops.Tensor) else None
_TF_TO_IS_OK = {
dtypes.bool: [_FilterBool],
dtypes.complex128: [_FilterComplex],
dtypes.complex64: [_FilterComplex],
dtypes.float16: [_FilterFloat],
dtypes.float32: [_FilterFloat],
dtypes.float64: [_FilterFloat],
dtypes.int16: [_FilterInt],
dtypes.int32: [_FilterInt],
dtypes.int64: [_FilterInt],
dtypes.int8: [_FilterInt],
dtypes.qint16: [_FilterInt, _FilterTuple],
dtypes.qint32: [_FilterInt, _FilterTuple],
dtypes.qint8: [_FilterInt, _FilterTuple],
dtypes.quint16: [_FilterInt, _FilterTuple],
dtypes.quint8: [_FilterInt, _FilterTuple],
dtypes.string: [_FilterStr],
dtypes.uint16: [_FilterInt],
dtypes.uint8: [_FilterInt],
}
def _AssertCompatible(values, dtype):
fn_list = _TF_TO_IS_OK.get(dtype, [_FilterNotTensor])
mismatch = _FirstNotNone([fn(values) for fn in fn_list])
if mismatch is not None:
if dtype is None:
raise TypeError("List of Tensors when single Tensor expected")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
@tf_export("make_tensor_proto")
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False):
"""Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tensor_util.MakeNdarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the compatible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
"""
if isinstance(values, tensor_pb2.TensorProto):
return values
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (
dtype in [
dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16,
dtypes.qint32
])
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
elif callable(getattr(values, "__array__", None)) or isinstance(
getattr(values, "__array_interface__", None), dict):
# If a class has the __array__ method, or __array_interface__ dict, then it
# is possible to convert to numpy array.
nparray = np.asarray(values, dtype=dtype)
# This is the preferred way to create an array from the object, so replace
# the `values` with the array so that _FlattenToStrings is not run.
values = nparray
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
if dtype and dtype.is_numpy_compatible:
np_dt = dtype.as_numpy_dtype
else:
np_dt = None
# If shape is None, numpy.prod returns None when dtype is not set, but raises
# exception when dtype is set to np.int64
if shape is not None and np.prod(shape, dtype=np.int64) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" %
(values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (not hasattr(dtype, "base_dtype") or
dtype.base_dtype != numpy_dtype.base_dtype):
raise TypeError("Incompatible types: %s vs. %s. Value is %s" %
(dtype, nparray.dtype, values))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape, dtype=np.int64)
is_same_size = shape_size == nparray.size
if verify_shape:
if not nparray.shape == tuple(shape):
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
# At this point, values may be a list of objects that we could not
# identify a common type for (hence it was inferred as
# np.object/dtypes.string). If we are unable to convert it to a
# string, we raise a more helpful error message.
#
# Ideally, we'd be able to convert the elements of the list to a
# common type, but this type inference requires some thinking and
# so we defer it for now.
try:
str_values = [compat.as_bytes(x) for x in proto_values]
except TypeError:
raise TypeError("Failed to convert object of type %s to Tensor. "
"Contents: %s. Consider casting elements to a "
"supported type." % (type(values), values))
tensor_proto.string_val.extend(str_values)
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError(
"Element type not supported in TensorProto: %s" % numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
@tf_export("make_ndarray")
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape, dtype=np.int64)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return (np.frombuffer(tensor.tensor_content, dtype=dtype).copy()
.reshape(shape))
elif tensor_dtype == dtypes.float16:
# the half_val field of the TensorProto stores the binary representation
# of the fp16: we need to reinterpret this as a proper float16
if len(tensor.half_val) == 1:
tmp = np.array(tensor.half_val[0], dtype=np.uint16)
tmp.dtype = np.float16
return np.repeat(tmp, num_elements).reshape(shape)
else:
tmp = np.fromiter(tensor.half_val, dtype=np.uint16)
tmp.dtype = np.float16
return tmp.reshape(shape)
elif tensor_dtype == dtypes.float32:
if len(tensor.float_val) == 1:
return np.repeat(
np.array(tensor.float_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float64:
if len(tensor.double_val) == 1:
return np.repeat(
np.array(tensor.double_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)
elif tensor_dtype in [
dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16, dtypes.int8,
dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16,
dtypes.bfloat16
]:
if len(tensor.int_val) == 1:
return np.repeat(np.array(tensor.int_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.int64:
if len(tensor.int64_val) == 1:
return np.repeat(
np.array(tensor.int64_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.string:
if len(tensor.string_val) == 1:
return np.repeat(
np.array(tensor.string_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.array(
[x for x in tensor.string_val], dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
if len(tensor.scomplex_val) == 2:
return np.repeat(
np.array(
complex(tensor.scomplex_val[0], tensor.scomplex_val[1]),
dtype=dtype), num_elements).reshape(shape)
else:
return np.array(
[complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
if len(tensor.dcomplex_val) == 2:
return np.repeat(
np.array(
complex(tensor.dcomplex_val[0], tensor.dcomplex_val[1]),
dtype=dtype), num_elements).reshape(shape)
else:
return np.array(
[complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.bool:
if len(tensor.bool_val) == 1:
return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor, partial):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array(
[dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.ndarray(
shape=(),
buffer=np.array([input_shape.ndims], dtype=np.int32),
dtype=np.int32)
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "ConcatV2":
dim = constant_value(tensor.op.inputs[-1])
if dim is None:
return None
values = []
for x in tensor.op.inputs[:-1]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "Pack":
values = []
# Some imported GraphDefs have Pack ops with zero inputs. Those are invalid
# and shouldn't be produced, but to deal sensibly with them here we check
# and return None.
if not tensor.op.inputs:
return None
# We can't handle axis != 0 Packs at the moment.
if tensor.op.get_attr("axis") != 0:
return None
for x in tensor.op.inputs:
value = constant_value(x, partial)
if value is None and not partial:
return None
values.append(value)
return np.array(values)
elif tensor.op.type == "Fill":
fill_shape = tensor.shape
fill_value = constant_value(tensor.op.inputs[1])
if fill_shape.is_fully_defined() and fill_value is not None:
return np.full(fill_shape.as_list(), fill_value, dtype=fill_value.dtype)
else:
return None
elif tensor.op.type == "Equal":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.equal(value1, value2)
elif tensor.op.type == "NotEqual":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.not_equal(value1, value2)
else:
return None
def constant_value(tensor, partial=False): # pylint: disable=invalid-name
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
TODO(mrry): Consider whether this function should use a registration
mechanism like gradients and ShapeFunctions, so that it is easily
extensible.
NOTE: If `constant_value(tensor)` returns a non-`None` result, it will no
longer be possible to feed a different value for `tensor`. This allows the
result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
partial: If True, the returned numpy array is allowed to have partially
evaluated values. Values that can't be evaluated will be None.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
if isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
ret = _ConstantValue(tensor, partial)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
"""
if context.in_eager_mode():
return tensor_shape.as_shape(
[dim if dim != -1 else None for dim in tensor.numpy()])
shape = tensor.get_shape().with_rank(1)
if tensor.get_shape() == [0]:
return tensor_shape.scalar()
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.scalar() # Empty list.
# Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
# would not be rank 1.
assert tensor.op.get_attr("axis") == 0
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "ConcatV2":
# We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[:-1]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "StridedSlice":
try:
begin = constant_value(tensor.op.inputs[1])
end = constant_value(tensor.op.inputs[2])
strides = constant_value(tensor.op.inputs[3])
if begin is not None and end is not None and strides is not None:
begin = begin[0]
end = end[0]
strides = strides[0]
begin_mask = tensor.op.get_attr("begin_mask")
if begin_mask == 1:
begin = None
end_mask = tensor.op.get_attr("end_mask")
if end_mask == 1:
end = None
ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
new_axis_mask = tensor.op.get_attr("new_axis_mask")
shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
valid_attributes = (not ellipsis_mask and not new_axis_mask and
not shrink_axis_mask and (not begin_mask or
(begin_mask == 1)) and
(not end_mask or (end_mask == 1)))
if valid_attributes: # additional inputs not supported
prev = constant_value_as_shape(tensor.op.inputs[0])
prev = prev[begin:end:strides]
ret = tensor_shape.TensorShape(prev)
return ret
except ValueError: # Could come from get_attr or slicing prev.
pass
except TypeError: # Could come from slicing prev.
pass
ret = tensor_shape.unknown_shape(shape[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(
tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
return ret
def is_tensor(x): # pylint: disable=invalid-name
"""Check whether `x` is of tensor type.
Check whether an object is a tensor. Equivalent to
`isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
Args:
x: A python object to check.
Returns:
`True` if `x` is a tensor, `False` if not.
"""
return isinstance(x, ops._TensorLike) or ops.is_dense_tensor_like(x) # pylint: disable=protected-access
|
{
"content_hash": "a266d91b08362f1a223935a20cbe9fbc",
"timestamp": "",
"source": "github",
"line_count": 916,
"max_line_length": 106,
"avg_line_length": 36.4028384279476,
"alnum_prop": 0.6783025940920677,
"repo_name": "zasdfgbnm/tensorflow",
"id": "27afaa074a6becd5c8b7db94be59e8da1611c13a",
"size": "34034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/tensor_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9096"
},
{
"name": "C",
"bytes": "341181"
},
{
"name": "C++",
"bytes": "37811513"
},
{
"name": "CMake",
"bytes": "193934"
},
{
"name": "Go",
"bytes": "1061098"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "551109"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48122"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "1556"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "32936295"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425164"
}
],
"symlink_target": ""
}
|
from django.templatetags.static import static
from django.urls import reverse
from django.db import models
from django.utils.translation import gettext_lazy as _
from easy_thumbnails.exceptions import InvalidImageFormatError
from easy_thumbnails.files import get_thumbnailer
DEFAULT_COACH_PHOTO = static('img/global/coach-empty.jpg')
class Coach(models.Model):
name = models.CharField(max_length=200)
twitter_handle = models.CharField(
max_length=200,
null=True,
blank=True,
help_text=_("No @, No http://, just username"))
photo = models.ImageField(
upload_to="event/coaches/",
null=True,
blank=True,
help_text=_("For best display keep it square")
)
url = models.URLField(null=True, blank=True)
class Meta:
ordering = ("name",)
verbose_name_plural = _("Coaches")
def __str__(self):
return self.name
def photo_display_for_admin(self):
coach_change_url = reverse("admin:coach_coach_change", args=[self.id])
return f"""
<a href=\"{coach_change_url}\" target=\"_blank\">
<img src=\"{self.photo_url}\" width=\"100\" />
</a>"""
photo_display_for_admin.allow_tags = True
@property
def photo_url(self):
if self.photo:
try:
return get_thumbnailer(self.photo)['coach'].url
except InvalidImageFormatError:
return DEFAULT_COACH_PHOTO
return DEFAULT_COACH_PHOTO
|
{
"content_hash": "cf529f66e8407b957bdeb3a0ffdbf66c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 29.784313725490197,
"alnum_prop": 0.6175115207373272,
"repo_name": "DjangoGirls/djangogirls",
"id": "da721b818cef6264ccd6a188eecace6a8500f808",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "coach/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "428291"
},
{
"name": "JavaScript",
"bytes": "13711"
},
{
"name": "Python",
"bytes": "422267"
},
{
"name": "Stylus",
"bytes": "32803"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import numpy.linalg
from numpy.testing import assert_array_almost_equal
from numpy.testing import dec, assert_array_equal, assert_allclose
from numpy import inf
import theano
from theano import tensor, function
from theano.tensor.basic import _allclose
from theano.tests.test_rop import break_op
from theano.tests import unittest_tools as utt
from theano import config
from theano.tensor.slinalg import ( Cholesky,
cholesky,
CholeskyGrad,
Solve,
solve,
Eigvalsh,
EigvalshGrad,
eigvalsh,
expm,
kron)
from theano.tests.unittest_tools import attr
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
try:
import scipy.linalg
imported_scipy = True
except ImportError:
# some ops (e.g. Cholesky, Solve, A_Xinv_b) won't work
imported_scipy = False
def check_lower_triangular(pd, ch_f):
ch = ch_f(pd)
assert ch[0, pd.shape[1] - 1] == 0
assert ch[pd.shape[0] - 1, 0] != 0
assert numpy.allclose(numpy.dot(ch, ch.T), pd)
assert not numpy.allclose(numpy.dot(ch.T, ch), pd)
def check_upper_triangular(pd, ch_f):
ch = ch_f(pd)
assert ch[4, 0] == 0
assert ch[0, 4] != 0
assert numpy.allclose(numpy.dot(ch.T, ch), pd)
assert not numpy.allclose(numpy.dot(ch, ch.T), pd)
def test_cholesky():
if not imported_scipy:
raise SkipTest("Scipy needed for the Cholesky op.")
rng = numpy.random.RandomState(utt.fetch_seed())
r = rng.randn(5, 5).astype(config.floatX)
pd = numpy.dot(r, r.T)
x = tensor.matrix()
chol = cholesky(x)
# Check the default.
ch_f = function([x], chol)
yield check_lower_triangular, pd, ch_f
# Explicit lower-triangular.
chol = Cholesky(lower=True)(x)
ch_f = function([x], chol)
yield check_lower_triangular, pd, ch_f
# Explicit upper-triangular.
chol = Cholesky(lower=False)(x)
ch_f = function([x], chol)
yield check_upper_triangular, pd, ch_f
def test_cholesky_grad():
if not imported_scipy:
raise SkipTest("Scipy needed for the Cholesky op.")
rng = numpy.random.RandomState(utt.fetch_seed())
r = rng.randn(5, 5).astype(config.floatX)
pd = numpy.dot(r, r.T)
eps = None
if config.floatX == "float64":
eps = 2e-8
# Check the default.
yield (lambda: utt.verify_grad(cholesky, [pd], 3, rng, eps=eps))
# Explicit lower-triangular.
yield (lambda: utt.verify_grad(Cholesky(lower=True), [pd], 3,
rng, eps=eps))
# Explicit upper-triangular.
yield (lambda: utt.verify_grad(Cholesky(lower=False), [pd], 3,
rng, eps=eps))
@attr('slow')
def test_cholesky_and_cholesky_grad_shape():
if not imported_scipy:
raise SkipTest("Scipy needed for the Cholesky op.")
rng = numpy.random.RandomState(utt.fetch_seed())
x = tensor.matrix()
for l in (cholesky(x), Cholesky(lower=True)(x), Cholesky(lower=False)(x)):
f_chol = theano.function([x], l.shape)
g = tensor.grad(l.sum(), x)
f_cholgrad = theano.function([x], g.shape)
topo_chol = f_chol.maker.fgraph.toposort()
topo_cholgrad = f_cholgrad.maker.fgraph.toposort()
if config.mode != 'FAST_COMPILE':
assert sum([node.op.__class__ == Cholesky
for node in topo_chol]) == 0
assert sum([node.op.__class__ == CholeskyGrad
for node in topo_cholgrad]) == 0
for shp in [2, 3, 5]:
m = numpy.cov(rng.randn(shp, shp + 10)).astype(config.floatX)
yield numpy.testing.assert_equal, f_chol(m), (shp, shp)
yield numpy.testing.assert_equal, f_cholgrad(m), (shp, shp)
def test_eigvalsh():
if not imported_scipy:
raise SkipTest("Scipy needed for the geigvalsh op.")
import scipy.linalg
A = theano.tensor.dmatrix('a')
B = theano.tensor.dmatrix('b')
f = function([A, B], eigvalsh(A, B))
rng = numpy.random.RandomState(utt.fetch_seed())
a = rng.randn(5, 5)
a = a + a.T
for b in [10 * numpy.eye(5, 5) + rng.randn(5, 5)]:
w = f(a, b)
refw = scipy.linalg.eigvalsh(a, b)
numpy.testing.assert_array_almost_equal(w, refw)
# We need to test None separatly, as otherwise DebugMode will
# complain, as this isn't a valid ndarray.
b = None
B = theano.tensor.NoneConst
f = function([A], eigvalsh(A, B))
w = f(a)
refw = scipy.linalg.eigvalsh(a, b)
numpy.testing.assert_array_almost_equal(w, refw)
def test_eigvalsh_grad():
if not imported_scipy:
raise SkipTest("Scipy needed for the geigvalsh op.")
import scipy.linalg
rng = numpy.random.RandomState(utt.fetch_seed())
a = rng.randn(5, 5)
a = a + a.T
b = 10 * numpy.eye(5, 5) + rng.randn(5, 5)
tensor.verify_grad(lambda a, b: eigvalsh(a, b).dot([1, 2, 3, 4, 5]),
[a, b], rng=numpy.random)
class test_Solve(utt.InferShapeTester):
def setUp(self):
super(test_Solve, self).setUp()
self.op_class = Solve
self.op = Solve()
def test_infer_shape(self):
if not imported_scipy:
raise SkipTest("Scipy needed for the Solve op.")
rng = numpy.random.RandomState(utt.fetch_seed())
A = theano.tensor.matrix()
b = theano.tensor.matrix()
self._compile_and_check([A, b], # theano.function inputs
[self.op(A, b)], # theano.function outputs
# A must be square
[numpy.asarray(rng.rand(5, 5),
dtype=config.floatX),
numpy.asarray(rng.rand(5, 1),
dtype=config.floatX)],
self.op_class,
warn=False)
rng = numpy.random.RandomState(utt.fetch_seed())
A = theano.tensor.matrix()
b = theano.tensor.vector()
self._compile_and_check([A, b], # theano.function inputs
[self.op(A, b)], # theano.function outputs
# A must be square
[numpy.asarray(rng.rand(5, 5),
dtype=config.floatX),
numpy.asarray(rng.rand(5),
dtype=config.floatX)],
self.op_class,
warn=False)
def test_solve_correctness(self):
if not imported_scipy:
raise SkipTest("Scipy needed for the Cholesky and Solve ops.")
rng = numpy.random.RandomState(utt.fetch_seed())
A = theano.tensor.matrix()
b = theano.tensor.matrix()
y = self.op(A, b)
gen_solve_func = theano.function([A, b], y)
cholesky_lower = Cholesky(lower=True)
L = cholesky_lower(A)
y_lower = self.op(L, b)
lower_solve_func = theano.function([L, b], y_lower)
cholesky_upper = Cholesky(lower=False)
U = cholesky_upper(A)
y_upper = self.op(U, b)
upper_solve_func = theano.function([U, b], y_upper)
b_val = numpy.asarray(rng.rand(5, 1), dtype=config.floatX)
# 1-test general case
A_val = numpy.asarray(rng.rand(5, 5), dtype=config.floatX)
# positive definite matrix:
A_val = numpy.dot(A_val.transpose(), A_val)
assert numpy.allclose(scipy.linalg.solve(A_val, b_val),
gen_solve_func(A_val, b_val))
# 2-test lower traingular case
L_val = scipy.linalg.cholesky(A_val, lower=True)
assert numpy.allclose(scipy.linalg.solve_triangular(L_val, b_val, lower=True),
lower_solve_func(L_val, b_val))
# 3-test upper traingular case
U_val = scipy.linalg.cholesky(A_val, lower=False)
assert numpy.allclose(scipy.linalg.solve_triangular(U_val, b_val, lower=False),
upper_solve_func(U_val, b_val))
def verify_solve_grad(self, m, n, A_structure, lower, rng):
# ensure diagonal elements of A relatively large to avoid numerical
# precision issues
A_val = (rng.normal(size=(m, m)) * 0.5 +
numpy.eye(m)).astype(config.floatX)
if A_structure == 'lower_triangular':
A_val = numpy.tril(A_val)
elif A_structure == 'upper_triangular':
A_val = numpy.triu(A_val)
if n is None:
b_val = rng.normal(size=m).astype(config.floatX)
else:
b_val = rng.normal(size=(m, n)).astype(config.floatX)
eps = None
if config.floatX == "float64":
eps = 2e-8
solve_op = Solve(A_structure=A_structure, lower=lower)
utt.verify_grad(solve_op, [A_val, b_val], 3, rng, eps=eps)
def test_solve_grad(self):
if not imported_scipy:
raise SkipTest("Scipy needed for the Solve op.")
rng = numpy.random.RandomState(utt.fetch_seed())
structures = ['general', 'lower_triangular', 'upper_triangular']
for A_structure in structures:
lower = (A_structure == 'lower_triangular')
self.verify_solve_grad(5, None, A_structure, lower, rng)
self.verify_solve_grad(6, 1, A_structure, lower, rng)
self.verify_solve_grad(4, 3, A_structure, lower, rng)
# lower should have no effect for A_structure == 'general' so also
# check lower=True case
self.verify_solve_grad(4, 3, 'general', lower=True, rng=rng)
def test_expm():
if not imported_scipy:
raise SkipTest("Scipy needed for the expm op.")
rng = numpy.random.RandomState(utt.fetch_seed())
A = rng.randn(5, 5).astype(config.floatX)
ref = scipy.linalg.expm(A)
x = tensor.matrix()
m = expm(x)
expm_f = function([x], m)
val = expm_f(A)
numpy.testing.assert_array_almost_equal(val, ref)
def test_expm_grad_1():
# with symmetric matrix (real eigenvectors)
if not imported_scipy:
raise SkipTest("Scipy needed for the expm op.")
rng = numpy.random.RandomState(utt.fetch_seed())
# Always test in float64 for better numerical stability.
A = rng.randn(5, 5)
A = A + A.T
tensor.verify_grad(expm, [A], rng=rng)
def test_expm_grad_2():
# with non-symmetric matrix with real eigenspecta
if not imported_scipy:
raise SkipTest("Scipy needed for the expm op.")
rng = numpy.random.RandomState(utt.fetch_seed())
# Always test in float64 for better numerical stability.
A = rng.randn(5, 5)
w = rng.randn(5)**2
A = (numpy.diag(w**0.5)).dot(A + A.T).dot(numpy.diag(w**(-0.5)))
assert not numpy.allclose(A, A.T)
tensor.verify_grad(expm, [A], rng=rng)
def test_expm_grad_3():
# with non-symmetric matrix (complex eigenvectors)
if not imported_scipy:
raise SkipTest("Scipy needed for the expm op.")
rng = numpy.random.RandomState(utt.fetch_seed())
# Always test in float64 for better numerical stability.
A = rng.randn(5, 5)
tensor.verify_grad(expm, [A], rng=rng)
class TestKron(utt.InferShapeTester):
rng = numpy.random.RandomState(43)
def setUp(self):
super(TestKron, self).setUp()
self.op = kron
def test_perform(self):
if not imported_scipy:
raise SkipTest('kron tests need the scipy package to be installed')
for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
x = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp0))
a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]:
if len(shp0) + len(shp1) == 2:
continue
y = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp1))
f = function([x, y], kron(x, y))
b = self.rng.rand(*shp1).astype(config.floatX)
out = f(a, b)
# Newer versions of scipy want 4 dimensions at least,
# so we have to add a dimension to a and flatten the result.
if len(shp0) + len(shp1) == 3:
scipy_val = scipy.linalg.kron(
a[numpy.newaxis, :], b).flatten()
else:
scipy_val = scipy.linalg.kron(a, b)
utt.assert_allclose(out, scipy_val)
def test_numpy_2d(self):
for shp0 in [(2, 3)]:
x = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp0))
a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
for shp1 in [(6, 7)]:
if len(shp0) + len(shp1) == 2:
continue
y = tensor.tensor(dtype='floatX',
broadcastable=(False,) * len(shp1))
f = function([x, y], kron(x, y))
b = self.rng.rand(*shp1).astype(config.floatX)
out = f(a, b)
assert numpy.allclose(out, numpy.kron(a, b))
|
{
"content_hash": "e530ff14b936205854cc7bc15e9e19e2",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 87,
"avg_line_length": 37.43561643835616,
"alnum_prop": 0.5530591334894613,
"repo_name": "JazzeYoung/VeryDeepAutoEncoder",
"id": "3886271ff4d0e2b2cc959895c9662c282279261a",
"size": "13664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "theano/tensor/tests/test_slinalg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "260790"
},
{
"name": "C++",
"bytes": "323987"
},
{
"name": "CSS",
"bytes": "1750"
},
{
"name": "Cuda",
"bytes": "2767955"
},
{
"name": "HTML",
"bytes": "4611"
},
{
"name": "Jupyter Notebook",
"bytes": "4603376"
},
{
"name": "Makefile",
"bytes": "116"
},
{
"name": "Python",
"bytes": "16514506"
},
{
"name": "Shell",
"bytes": "16447"
}
],
"symlink_target": ""
}
|
class dataObject:
"""
Each of the data points in the database
should be represented as one of these objects.
The actual data is put in the attributes vector.
"""
def __init__(self, numberOfAttributes):
self.attributes = [0.0]*numberOfAttributes
def setAttributes(self, attributesIn):
for index in range(len(attributesIn)):
self.attributes[index] = attributesIn[index]
class Database:
"""
A container class for the dataObjects.
"""
def __init__(self, database_in):
self.datapoints = database_in
self.numElements = len(database_in)
self.numAttributes = len(database_in[0].attributes)
self.pureData = [0.0]*self.numElements*self.numAttributes
index = 0
for element in range(self.numElements):
for attribute in range(self.numAttributes):
self.pureData[index] = self.datapoints[element].attributes[attribute]
index += 1
def getVectorWithAttributeNr(self, x):
return list(self.pureData[x*self.numAttributes:(x+1)*self.numAttributes])
|
{
"content_hash": "5c5a3b7d115cad454021d69dbd54c3c1",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 85,
"avg_line_length": 34.121212121212125,
"alnum_prop": 0.6376554174067496,
"repo_name": "ViktorWase/Frank-the-Science-Bot",
"id": "563fe1a802af88a2e8e692b0b893912a394d90c7",
"size": "1126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataObjectClass.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "98257"
}
],
"symlink_target": ""
}
|
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from ApplePie import *
from Utils import *
from ApplePieTk.subFrame.BaseFrame import *
class Frame1(BaseFrame):
btnPath = None
entPath = None
def __init__(self, parentFrame):
super().__init__(parentFrame)
#컴포넌트 초기화
self.btnPath = tk.Button(self.frame, width=10, height=1, text='browse', command=self.setPathDialog)
self.entPath = tk.Entry(self.frame, width=80)
self.entPath.bind('<KeyPress>', self.setPathDirect)
def pack(self, applePieTk, row=None, col=None, colspan=None, align=None, side=None):
super().pack(applePieTk, row, col, colspan, align, side)
self.btnPath.pack(side=tk.LEFT)
self.entPath.pack(side=tk.LEFT)
def setDirName(self, dirName):
self.entPath.delete(0, tk.END)
self.entPath.insert(tk.END, dirName)
self.pie.refresh(dirName)
self.refreshMain()
#fileDialog를통한 경로입력
def setPathDialog(self):
dirName = filedialog.askdirectory()
if self.checkDirectory(dirName) != True :
return
self.setDirName(dirName)
#텍스트박스를 통한 경로입력
def setPathDirect(self, event):
if event.keycode != 13:
return
dirName = self.entPath.get()
if self.checkDirectory(dirName) != True :
return
self.setDirName(dirName)
#디렉토리여부체크
def checkDirectory(self, dirName):
if len(dirName) == 0:
return False
elif isDir(dirName) != True:
return self.error('디렉토리가 아닙니다.')
return True
|
{
"content_hash": "51e73eb5f24ed9197a9191780b0489d1",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 107,
"avg_line_length": 30.016129032258064,
"alnum_prop": 0.5502418054809243,
"repo_name": "rurilee/applepie",
"id": "3076283558c2667fc2b648a12abb61294433c577",
"size": "1971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applepie/app/ruri/ApplePieTk/subFrame/Frame1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24470"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import fnmatch
from os import listdir, path
import re
import logging
import numpy as np
import numpy.ma as ma
from astropy.table import Table, MaskedColumn
import astropy.io.fits as fits
from astropy.utils import minversion
import warnings
from astropy.utils.exceptions import AstropyUserWarning
from .ccddata import fits_ccddata_reader, _recognized_fits_file_extensions
logger = logging.getLogger(__name__)
__all__ = ['ImageFileCollection']
__doctest_skip__ = ['*']
_ASTROPY_LT_1_3 = not minversion("astropy", "1.3")
class ImageFileCollection:
"""
Representation of a collection of image files.
The class offers a table summarizing values of
keywords in the FITS headers of the files in the collection and offers
convenient methods for iterating over the files in the collection. The
generator methods use simple filtering syntax and can automate storage
of any FITS files modified in the loop using the generator.
Parameters
----------
location : str or None, optional
Path to directory containing FITS files.
Default is ``None``.
keywords : list of str, '*' or None, optional
Keywords that should be used as column headings in the summary table.
If the value is or includes '*' then all keywords that appear in any
of the FITS headers of the files in the collection become table
columns. Default value is '*' unless ``info_file`` is specified.
Default is ``None``.
find_fits_by_reading: bool, optional
If ``True``, read each file in location to check whether the file is a
FITS file and include it in the collection based on that, rather than
by file name. Compressed files, e.g. image.fits.gz, will **NOT** be
properly detected. *Will be ignored if `filenames` is not ``None``.*
filenames: str, list of str, or None, optional
List of the names of FITS files which will be added to the collection.
The filenames may either be in ``location`` or the name can be a
relative or absolute path to the file.
Default is ``None``.
glob_include: str or None, optional
Unix-style filename pattern to select filenames to include in the file
collection. Can be used in conjunction with ``glob_exclude`` to
easily select subsets of files in the target directory.
Default is ``None``.
glob_exclude: str or None, optional
Unix-style filename pattern to select filenames to exclude from the
file collection. Can be used in conjunction with ``glob_include`` to
easily select subsets of files in the target directory.
Default is ``None``.
ext: str or int, optional
The extension from which the header and data will be read in all
files.Default is ``0``.
Raises
------
ValueError
Raised if keywords are set to a combination of '*' and any other
value.
"""
def __init__(self, location=None, keywords=None,
find_fits_by_reading=False,
filenames=None, glob_include=None, glob_exclude=None, ext=0):
# Include or exclude files from the collection based on glob pattern
# matching - has to go above call to _get_files()
if glob_exclude is not None:
glob_exclude = str(glob_exclude) # some minimal validation
self._glob_exclude = glob_exclude
if glob_include is not None:
glob_include = str(glob_include)
self._glob_include = glob_include
if location is not None:
self._location = location
else:
self._location = ''
self._find_fits_by_reading = find_fits_by_reading
self._filenames = filenames
self._files = []
self._files = self._get_files()
if self._files == []:
warnings.warn("no FITS files in the collection.",
AstropyUserWarning)
self._summary = {}
if keywords is None:
# Use all keywords.
keywords = '*'
# Used internally to keep track of whether the user asked for all
# keywords or a specific list. The keywords setter takes care of
# actually setting the correct value, this just ensure that there
# is always *some* value.
self._all_keywords = False
self._ext = ext
if keywords:
self.keywords = keywords
def __repr__(self):
if self.location is None:
location = ""
else:
location = "location={!r}".format(self.location)
if self._all_keywords:
kw = ""
else:
kw = "keywords={!r}".format(self.keywords[1:])
if self.glob_exclude is None:
glob_exclude = ''
else:
glob_exclude = "glob_exclude={!r}".format(self.glob_exclude)
if self.glob_include is None:
glob_include = ''
else:
glob_include = "glob_include={!r}".format(self.glob_include)
if self.ext == 0:
ext = ""
else:
ext = "ext={}".format(self.ext)
if self._filenames is None:
filenames = ""
else:
filenames = "filenames={}".format(self._filenames)
params = [location, kw, filenames, glob_include, glob_exclude, ext]
params = ', '.join([p for p in params if p])
str_repr = "{self.__class__.__name__}({params})".format(
self=self, params=params)
return str_repr
@property
def summary(self):
"""
`~astropy.table.Table` of values of FITS keywords for files in the
collection.
Each keyword is a column heading. In addition, there is a column
called ``file`` that contains the name of the FITS file. The directory
is not included as part of that name.
The first column is always named ``file``.
The order of the remaining columns depends on how the summary was
constructed.
If a wildcard, ``*`` was used then the order is the order in which
the keywords appear in the FITS files from which the summary is
constructed.
If an explicit list of keywords was supplied in setting up the
collection then the order of the columns is the order of the
keywords.
"""
return self._summary
@property
def location(self):
"""
str, Path name to directory containing FITS files.
"""
return self._location
@property
def keywords(self):
"""
list of str, Keywords currently in the summary table.
Setting the keywords causes the summary table to be regenerated unless
the new keywords are a subset of the old.
.. versionchanged:: 1.3
Added ``deleter`` for ``keywords`` property.
"""
if self.summary:
return self.summary.keys()
else:
return []
@keywords.setter
def keywords(self, keywords):
# since keywords are drawn from self.summary, setting
# summary sets the keywords.
if keywords is None:
self._summary = []
return
if keywords == '*':
self._all_keywords = True
else:
self._all_keywords = False
logging.debug('keywords in setter before pruning: %s.', keywords)
# remove duplicates and force a copy so we can sort the items later
# by their given position.
new_keys_set = set(keywords)
new_keys_lst = list(new_keys_set)
new_keys_set.add('file')
logging.debug('keywords after pruning %s.', new_keys_lst)
current_set = set(self.keywords)
if new_keys_set.issubset(current_set):
logging.debug('table columns before trimming: %s.',
' '.join(current_set))
cut_keys = current_set.difference(new_keys_set)
logging.debug('will try removing columns: %s.',
' '.join(cut_keys))
for key in cut_keys:
self._summary.remove_column(key)
logging.debug('after removal column names are: %s.',
' '.join(self.keywords))
else:
logging.debug('should be building new table...')
# Reorder the keywords to match the initial ordering.
new_keys_lst.sort(key=keywords.index)
self._summary = self._fits_summary(new_keys_lst)
@keywords.deleter
def keywords(self):
# since keywords are drawn from self._summary, setting
# _summary = [] deletes the keywords.
self._summary = []
@property
def files(self):
"""
list of str, Unfiltered list of FITS files in location.
"""
return self._files
@property
def glob_include(self):
"""
str or None, Unix-style filename pattern to select filenames to include
in the file collection.
"""
return self._glob_include
@property
def glob_exclude(self):
"""
str or None, Unix-style filename pattern to select filenames to exclude
in the file collection.
"""
return self._glob_exclude
@property
def ext(self):
"""
str or int, The extension from which the header and data will
be read in all files.
"""
return self._ext
def values(self, keyword, unique=False):
"""
List of values for a keyword.
Parameters
----------
keyword : str
Keyword (i.e. table column) for which values are desired.
unique : bool, optional
If True, return only the unique values for the keyword.
Default is ``False``.
Returns
-------
list
Values as a list.
"""
if keyword not in self.keywords:
raise ValueError(
'keyword %s is not in the current summary' % keyword)
if unique:
return list(set(self.summary[keyword].tolist()))
else:
return self.summary[keyword].tolist()
def files_filtered(self, **kwd):
"""Determine files whose keywords have listed values.
Parameters
----------
include_path : bool, keyword-only
If the keyword ``include_path=True`` is set, the returned list
contains not just the filename, but the full path to each file.
Default is ``False``.
regex_match : bool, keyword-only
If ``True``, then string values in the ``**kwd`` dictionary are
treated as regular expression patterns and matching is done by
regular expression search. The search is always
**case insensitive**.
**kwd :
``**kwd`` is dict of keywords and values the files must have.
The value '*' represents any value.
A missing keyword is indicated by value ''.
Returns
-------
filenames : list
The files that satisfy the keyword-value restrictions specified by
the ``**kwd``.
Examples
--------
Some examples for filtering::
>>> keys = ['imagetyp','filter']
>>> collection = ImageFileCollection('test/data', keywords=keys)
>>> collection.files_filtered(imagetyp='LIGHT', filter='R')
>>> collection.files_filtered(imagetyp='*', filter='')
In case you want to filter with keyword names that cannot be used
as keyword argument name, you have to unpack them using a dictionary.
For example if a keyword name contains a space or a ``-``::
>>> add_filters = {'exp-time': 20, 'ESO TPL ID': 1050}
>>> collection.files_filtered(imagetyp='LIGHT', **add_filters)
Notes
-----
Value comparison is case *insensitive* for strings, whether matching
exactly or matching with regular expressions.
"""
# force a copy by explicitly converting to a list
current_file_mask = self.summary['file'].mask.tolist()
include_path = kwd.pop('include_path', False)
self._find_keywords_by_values(**kwd)
filtered_files = self.summary['file'].compressed()
self.summary['file'].mask = current_file_mask
if include_path:
filtered_files = [path.join(self._location, f)
for f in filtered_files.tolist()]
return filtered_files
def refresh(self):
"""
Refresh the collection by re-reading headers.
"""
keywords = '*' if self._all_keywords else self.keywords
# Re-load list of files
self._files = self._get_files()
self._summary = self._fits_summary(header_keywords=keywords)
def sort(self, keys):
"""Sort the list of files to determine the order of iteration.
Sort the table of files according to one or more keys. This does not
create a new object, instead is sorts in place.
Parameters
----------
keys : str, list of str
The key(s) to order the table by.
"""
if len(self._summary) > 0:
self._summary.sort(keys)
self._files = self.summary['file'].tolist()
def filter(self, **kwd):
"""
Create a new collection by filtering the current collection.
Parameters
----------
regex_match : bool, keyword-only
If ``True``, then string values in the ``**kwd`` dictionary are
treated as regular expression patterns and matching is done by
regular expression search. The search is always
**case insensitive**.
**kwd :
``**kwd`` is dict of keywords and values the files must have.
The value '*' represents any value.
A missing keyword is indicated by value ''.
Returns
-------
`ImageFileCollection`
A new collection with the files matched by the arguments
to filter.
"""
files = self.files_filtered(include_path=True, **kwd)
return ImageFileCollection(filenames=files,
keywords=self.keywords)
def _get_files(self):
""" Helper method which checks whether ``files`` should be set
to a subset of file names or to all file names in a directory.
Returns
-------
files : list or str
List of file names which will be added to the collection.
"""
files = []
if self._filenames:
if isinstance(self._filenames, str):
files.append(self._filenames)
else:
files = self._filenames
else:
# Check if self.location is set, otherwise proceed with empty list
if self.location != '':
files = self._fits_files_in_directory()
if self.glob_include is not None:
files = fnmatch.filter(files, self.glob_include)
if self.glob_exclude is not None:
files = [file for file in files
if not fnmatch.fnmatch(file, self.glob_exclude)]
return files
def _dict_from_fits_header(self, file_name, input_summary=None,
missing_marker=None):
"""
Construct an ordered dictionary whose keys are the header keywords
and values are a list of the values from this file and the input
dictionary. If the input dictionary is ordered then that order is
preserved.
Parameters
----------
file_name : str
Name of FITS file.
input_summary : dict or None, optional
Existing dictionary to which new values should be appended.
Default is ``None``.
missing_marker : any type, optional
Fill value for missing header-keywords.
Default is ``None``.
Returns
-------
file_table : `~astropy.table.Table`
"""
def _add_val_to_dict(key, value, tbl_dict, n_previous, missing_marker):
try:
tbl_dict[key].append(value)
except KeyError:
tbl_dict[key] = [missing_marker] * n_previous
tbl_dict[key].append(value)
if input_summary is None:
summary = OrderedDict()
n_previous = 0
else:
summary = input_summary
n_previous = len(summary['file'])
h = fits.getheader(file_name, self.ext)
assert 'file' not in h
if self.location:
# We have a location and can reconstruct the path using it
name_for_file_column = path.basename(file_name)
else:
# No location, so use whatever path the user passed in
name_for_file_column = file_name
# Try opening header before this so that file name is only added if
# file is valid FITS
try:
summary['file'].append(name_for_file_column)
except KeyError:
summary['file'] = [name_for_file_column]
missing_in_this_file = [k for k in summary if (k not in h and
k != 'file')]
multi_entry_keys = {'comment': [],
'history': []}
alreadyencountered = set()
for k, v in h.items():
if k == '':
continue
k = k.lower()
if k in ['comment', 'history']:
multi_entry_keys[k].append(str(v))
# Accumulate these in a separate dictionary until the
# end to avoid adding multiple entries to summary.
continue
elif k in alreadyencountered:
# The "normal" multi-entries HISTORY, COMMENT and BLANK are
# already processed so any further duplication is probably
# a mistake. It would lead to problems in ImageFileCollection
# to add it as well, so simply ignore those.
warnings.warn(
'Header from file "{f}" contains multiple entries for '
'"{k}", the pair "{k}={v}" will be ignored.'
''.format(k=k, v=v, f=file_name),
UserWarning)
continue
else:
# Add the key to the already encountered keys so we don't add
# it more than once.
alreadyencountered.add(k)
_add_val_to_dict(k, v, summary, n_previous, missing_marker)
for k, v in multi_entry_keys.items():
if v:
joined = ','.join(v)
_add_val_to_dict(k, joined, summary, n_previous,
missing_marker)
for missing in missing_in_this_file:
summary[missing].append(missing_marker)
return summary
def _set_column_name_case_to_match_keywords(self, header_keys,
summary_table):
for k in header_keys:
k_lower = k.lower()
if k_lower != k:
try:
summary_table.rename_column(k_lower, k)
except KeyError:
pass
def _fits_summary(self, header_keywords):
"""
Generate a summary table of keywords from FITS headers.
Parameters
----------
header_keywords : list of str or '*'
Keywords whose value should be extracted from FITS headers or '*'
to extract all.
"""
if not self.files:
return None
# Make sure we have a list...for example, in python 3, dict.keys()
# is not a list.
original_keywords = list(header_keywords)
# Get rid of any duplicate keywords, also forces a copy.
header_keys = set(original_keywords)
header_keys.add('file')
file_name_column = MaskedColumn(name='file', data=self.files)
if not header_keys or (header_keys == {'file'}):
summary_table = Table(masked=True)
summary_table.add_column(file_name_column)
return summary_table
summary_dict = None
missing_marker = None
for file_name in file_name_column.tolist():
file_path = path.join(self.location, file_name)
try:
# Note: summary_dict is an OrderedDict, so should preserve
# the order of the keywords in the FITS header.
summary_dict = self._dict_from_fits_header(
file_path, input_summary=summary_dict,
missing_marker=missing_marker)
except IOError as e:
logger.warning('unable to get FITS header for file %s: %s.',
file_path, e)
continue
summary_table = Table(summary_dict, masked=True)
for column in summary_table.colnames:
summary_table[column].mask = [
v is missing_marker for v in summary_table[column].tolist()]
self._set_column_name_case_to_match_keywords(header_keys,
summary_table)
missing_columns = header_keys - set(summary_table.colnames)
missing_columns -= {'*'}
length = len(summary_table)
for column in missing_columns:
all_masked = MaskedColumn(name=column, data=np.zeros(length),
mask=np.ones(length))
summary_table.add_column(all_masked)
if '*' not in header_keys:
# Rearrange table columns to match order of keywords.
# File always comes first.
header_keys -= {'file'}
original_order = ['file'] + sorted(header_keys,
key=original_keywords.index)
summary_table = summary_table[original_order]
if not summary_table.masked:
summary_table = Table(summary_table, masked=True)
return summary_table
def _find_keywords_by_values(self, **kwd):
"""
Find files whose keywords have given values.
Parameters
----------
match_regex : bool, optional
If ``True`` match string values by using a regular expression
search instead of equality. Default value is ``False``.
The remaining arguments are keyword/value pairs specifying the
values to match.
`**kwd` is list of keywords and values the files must have.
The value '*' represents any value.
A missing keyword is indicated by value ''
Example::
>>> keys = ['imagetyp','filter']
>>> collection = ImageFileCollection('test/data', keywords=keys)
>>> collection.files_filtered(imagetyp='LIGHT', filter='R')
>>> collection.files_filtered(imagetyp='*', filter='')
>>> collection.files_filtered(imagetyp='bias|filter', regex_match=True)
NOTE: Value comparison is case *insensitive* for strings.
"""
regex_match = kwd.pop('regex_match', False)
keywords = kwd.keys()
values = kwd.values()
if set(keywords).issubset(self.keywords):
# we already have the information in memory
use_info = self.summary
else:
# we need to load information about these keywords.
use_info = self._fits_summary(header_keywords=keywords)
matches = np.ones(len(use_info), dtype=bool)
for key, value in zip(keywords, values):
logger.debug('key %s, value %s', key, value)
logger.debug('value in table %s', use_info[key])
value_missing = use_info[key].mask
logger.debug('value missing: %s', value_missing)
value_not_missing = np.logical_not(value_missing)
if value == '*':
have_this_value = value_not_missing
elif value is not None:
if isinstance(value, str):
# need to loop explicitly over array rather than using
# where to correctly do string comparison.
have_this_value = np.zeros(len(use_info), dtype=bool)
# We are going to do a regex match no matter what.
if regex_match:
pattern = re.compile(value,
flags=re.IGNORECASE)
else:
# This pattern matches the prior behavior.
pattern = re.compile('^' + value + '$',
flags=re.IGNORECASE)
for idx, file_key_value in enumerate(use_info[key].tolist()):
if value_not_missing[idx]:
try:
value_matches = (
pattern.search(file_key_value) is not None)
except TypeError:
# In case we're dealing with an object column
# there could be values other than strings in it
# so it could fail with an TypeError.
value_matches = False
else:
value_matches = False
have_this_value[idx] = (value_not_missing[idx] &
value_matches)
else:
have_this_value = value_not_missing
tmp = (use_info[key][value_not_missing] == value)
have_this_value[value_not_missing] = tmp
have_this_value &= value_not_missing
else:
# this case--when value==None--is asking for the files which
# are missing a value for this keyword
have_this_value = value_missing
matches &= have_this_value
# the numpy convention is that the mask is True for values to
# be omitted, hence use ~matches.
logger.debug('Matches: %s', matches)
self.summary['file'].mask = ma.nomask
self.summary['file'].mask[~matches] = True
def _fits_files_in_directory(self, extensions=None,
compressed=True):
"""
Get names of FITS files in directory, based on filename extension.
Parameters
----------
extensions : list of str or None, optional
List of filename extensions that are FITS files. Default is
``['fit', 'fits', 'fts']``.
Default is ``None``.
compressed : bool, optional
If ``True``, compressed files should be included in the list
(e.g. `.fits.gz`).
Default is ``True``.
Returns
-------
list
*Names* of the files (with extension), not the full pathname.
"""
full_extensions = extensions or list(_recognized_fits_file_extensions)
# The common compressed fits image .fz is supported using ext=1 when calling ImageFileCollection
if compressed:
for comp in ['.gz', '.bz2', '.Z', '.zip', '.fz']:
with_comp = [extension + comp for extension in full_extensions]
full_extensions.extend(with_comp)
all_files = listdir(self.location)
files = []
if not self._find_fits_by_reading:
for extension in full_extensions:
files.extend(fnmatch.filter(all_files, '*' + extension))
else:
for infile in all_files:
inpath = path.join(self.location, infile)
with open(inpath, 'rb') as fp:
# Hmm, first argument to is_fits is not actually used in
# that function. *shrug*
if fits.connect.is_fits('just some junk', infile, fp):
files.append(infile)
files.sort()
return files
def _generator(self, return_type,
save_with_name="", save_location='',
clobber=False,
overwrite=False,
do_not_scale_image_data=True,
return_fname=False,
ccd_kwargs=None,
**kwd):
"""
Generator that yields each {name} in the collection.
If any of the parameters ``save_with_name``, ``save_location`` or
``overwrite`` evaluates to ``True`` the generator will write a copy of
each FITS file it is iterating over. In other words, if
``save_with_name`` and/or ``save_location`` is a string with non-zero
length, and/or ``overwrite`` is ``True``, a copy of each FITS file will
be made.
Parameters
----------
save_with_name : str, optional
string added to end of file name (before extension) if
FITS file should be saved after iteration. Unless
``save_location`` is set, files will be saved to location of
the source files ``self.location``.
Default is ``''``.
save_location : str, optional
Directory in which to save FITS files; implies that FITS
files will be saved. Note this provides an easy way to
copy a directory of files--loop over the {name} with
``save_location`` set.
Default is ``''``.
overwrite : bool, optional
If ``True``, overwrite input FITS files.
Default is ``False``.
clobber : bool, optional
Alias for ``overwrite``.
Default is ``False``.
do_not_scale_image_data : bool, optional
If ``True``, prevents fits from scaling images. Default is
``{default_scaling}``.
Default is ``True``.
return_fname : bool, optional
If True, return the tuple (header, file_name) instead of just
header. The file name returned is the name of the file only,
not the full path to the file.
Default is ``False``.
ccd_kwargs : dict, optional
Dict with parameters for `~astropy.nddata.fits_ccddata_reader`.
For instance, the key ``'unit'`` can be used to specify the unit
of the data. If ``'unit'`` is not given then ``'adu'`` is used as
the default unit.
See `~astropy.nddata.fits_ccddata_reader` for a complete list of
parameters that can be passed through ``ccd_kwargs``.
regex_match : bool, keyword-only
If ``True``, then string values in the ``**kwd`` dictionary are
treated as regular expression patterns and matching is done by
regular expression search. The search is always
**case insensitive**.
**kwd :
Any additional keywords are used to filter the items returned; see
`files_filtered` examples for details.
Returns
-------
`{return_type}`
If ``return_fname`` is ``False``, yield the next {name} in the
collection.
(`{return_type}`, str)
If ``return_fname`` is ``True``, yield a tuple of
({name}, ``file name``) for the next item in the collection.
"""
# store mask so we can reset at end--must COPY, otherwise
# current_mask just points to the mask of summary
if not self.summary:
return
current_mask = {}
for col in self.summary.columns:
current_mask[col] = self.summary[col].mask
if kwd:
self._find_keywords_by_values(**kwd)
ccd_kwargs = ccd_kwargs or {}
for full_path in self._paths():
add_kwargs = {'do_not_scale_image_data': do_not_scale_image_data}
# We need to open the file here, get the appropriate values and then
# close it again before it "yields" otherwise it's not garantueed
# that the generator actually advances and closes the file again.
# For example if one uses "next" on the generator manually the
# file handle could "leak".
if return_type == 'header':
return_thing = fits.getheader(full_path, self.ext)
elif return_type == 'data':
return_thing = fits.getdata(full_path, self.ext, **add_kwargs)
elif return_type == 'ccd':
return_thing = fits_ccddata_reader(
full_path, hdu=self.ext, **ccd_kwargs)
elif return_type == 'hdu':
with fits.open(full_path, **add_kwargs) as hdulist:
ext_index = hdulist.index_of(self.ext)
# Need to copy the HDU to prevent lazy loading problems
# and "IO operations on closed file" errors
return_thing = hdulist[ext_index].copy()
else:
raise ValueError('no generator for {}'.format(return_type))
file_name = path.basename(full_path)
if return_fname:
yield return_thing, file_name
else:
yield return_thing
if save_location:
destination_dir = save_location
else:
destination_dir = path.dirname(full_path)
basename = path.basename(full_path)
if save_with_name:
base, ext = path.splitext(basename)
basename = base + save_with_name + ext
new_path = path.join(destination_dir, basename)
# I really should have called the option overwrite from
# the beginning. The hack below ensures old code works,
# at least...
if clobber or overwrite:
if _ASTROPY_LT_1_3:
nuke_existing = {'clobber': True}
else:
nuke_existing = {'overwrite': True}
else:
nuke_existing = {}
if return_type == 'ccd':
pass
elif (new_path != full_path) or nuke_existing:
with fits.open(full_path, **add_kwargs) as hdulist:
ext_index = hdulist.index_of(self.ext)
if return_type == 'hdu':
hdulist[ext_index] = return_thing
elif return_type == 'data':
hdulist[ext_index].data = return_thing
elif return_type == 'header':
hdulist[ext_index].header = return_thing
try:
hdulist.writeto(new_path, **nuke_existing)
except IOError:
logger.error('error writing file %s', new_path)
raise
# reset mask
for col in self.summary.columns:
self.summary[col].mask = current_mask[col]
def _paths(self):
"""
Full path to each file.
"""
unmasked_files = self.summary['file'].compressed().tolist()
return [path.join(self.location, file_) for file_ in unmasked_files]
def headers(self, do_not_scale_image_data=True, **kwd):
return self._generator('header',
do_not_scale_image_data=do_not_scale_image_data,
**kwd)
headers.__doc__ = _generator.__doc__.format(
name='header', default_scaling='True',
return_type='astropy.io.fits.Header')
def hdus(self, do_not_scale_image_data=False, **kwd):
return self._generator('hdu',
do_not_scale_image_data=do_not_scale_image_data,
**kwd)
hdus.__doc__ = _generator.__doc__.format(
name='HDUList', default_scaling='False',
return_type='astropy.io.fits.HDUList')
def data(self, do_not_scale_image_data=False, **kwd):
return self._generator('data',
do_not_scale_image_data=do_not_scale_image_data,
**kwd)
data.__doc__ = _generator.__doc__.format(
name='image', default_scaling='False', return_type='numpy.ndarray')
def ccds(self, ccd_kwargs=None, **kwd):
if kwd.get('clobber') or kwd.get('overwrite'):
raise NotImplementedError(
"overwrite=True (or clobber=True) is not supported for CCDs.")
return self._generator('ccd', ccd_kwargs=ccd_kwargs, **kwd)
ccds.__doc__ = _generator.__doc__.format(
name='CCDData', default_scaling='True', return_type='astropy.nddata.CCDData')
|
{
"content_hash": "52c999a29585b0beb9665cdb579928eb",
"timestamp": "",
"source": "github",
"line_count": 1002,
"max_line_length": 104,
"avg_line_length": 37.03792415169661,
"alnum_prop": 0.5495257598620392,
"repo_name": "mwcraig/ccdproc",
"id": "ff58c46895f833522642bdbd37aadb7546ecf6b5",
"size": "37177",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ccdproc/image_collection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7763"
},
{
"name": "Python",
"bytes": "349209"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_launch
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: Launch an Ansible Job.
description:
- Launch an Ansible Tower jobs. See
U(https://www.ansible.com/tower) for an overview.
options:
job_template:
description:
- Name of the job_template to use.
required: True
job_explanation:
description:
- Job explanation field.
default: null
job_type:
description:
- Job_type to use for the job, only used if prompt for job_type is set.
choices: ["run", "check", "scan"]
default: null
inventory:
description:
- Inventory to use for the job, only used if prompt for inventory is set.
default: null
credential:
description:
- Credential to use for job, only used if prompt for credential is set.
default: null
extra_vars:
description:
- Extra_vars to use for the job_template. Prepend '@' if a file.
default: null
limit:
description:
- Limit to use for the job_template.
default: null
tags:
description:
- Specific tags to use for from playbook.
default: null
use_job_endpoint:
description:
- Disable launching jobs from job template.
default: False
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Launch a job
tower_job_launch:
job_template: "My Job Template"
register: job
- name: Wait for job max 120s
tower_job_wait:
job_id: job.id
timeout: 120
'''
RETURN = '''
id:
description: job id of the newly launched job
returned: success
type: int
sample: 86
status:
description: status of newly launched job
returned: success
type: string
sample: pending
'''
from ansible.module_utils.basic import AnsibleModule
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import (
tower_auth_config,
tower_check_mode,
tower_argument_spec,
)
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
job_template=dict(required=True),
job_type=dict(choices=['run', 'check', 'scan']),
inventory=dict(),
credential=dict(),
limit=dict(),
tags=dict(type='list'),
extra_vars=dict(type='list'),
))
module = AnsibleModule(
argument_spec,
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
json_output = {}
tags = module.params.get('tags')
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
try:
params = module.params.copy()
if isinstance(tags, list):
params['tags'] = ','.join(tags)
job = tower_cli.get_resource('job')
lookup_fields = ('job_template', 'inventory', 'credential')
for field in lookup_fields:
try:
name = params.pop(field)
result = tower_cli.get_resource(field).get(name=name)
params[field] = result['id']
except exc.NotFound as excinfo:
module.fail_json(msg='Unable to launch job, {0}/{1} was not found: {2}'.format(field, name, excinfo), changed=False)
result = job.launch(no_input=True, **params)
json_output['id'] = result['id']
json_output['status'] = result['status']
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Unable to launch job: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
{
"content_hash": "cf87bbaf6a7d1f65fb6c30e8fe38ef6d",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 136,
"avg_line_length": 27.470967741935485,
"alnum_prop": 0.6014560826679192,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "b332edf36e3737b2fdbbabe3811050f127353e45",
"size": "4446",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/web_infrastructure/ansible_tower/tower_job_launch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
from GenericRequest import GenericRequest
from kol.database import SkillDatabase
from kol.manager import PatternManager
class UseSkillRequest(GenericRequest):
def __init__(self, session, skillId, numTimes=1, targetPlayer=None):
super(UseSkillRequest, self).__init__(session)
self.get = True
self.url = session.serverURL + "runskillz.php"
self.requestData["pwd"] = session.pwd
self.requestData["action"] = "Skillz"
self.requestData["whichskill"] = skillId
self.requestData["ajax"] = 1
self.requestData["quantity"] = numTimes
if targetPlayer != None:
self.requestData["targetplayer"] = targetPlayer
else:
self.requestData["targetplayer"] = session.userId
def parseResponse(self):
resultsPattern = PatternManager.getOrCompilePattern('results')
match = resultsPattern.search(self.responseText)
if match:
results = match.group(1)
self.responseData["results"] = results
|
{
"content_hash": "baf444227dab6fe759eacfdcbcb37612",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 41.04,
"alnum_prop": 0.6627680311890838,
"repo_name": "ijzer/cwbot-ndy",
"id": "4e8b2a42945bd3bed62610f4ed8bdba0528cd430",
"size": "1026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kol/request/UseSkillRequest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2986880"
}
],
"symlink_target": ""
}
|
"""
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
from __future__ import unicode_literals
from django.db.backends import util
from django.utils import six
from django.utils import tree
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
def __init__(self, sql, params):
self.data = sql, list(params)
def as_sql(self, qn=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
& and |).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + list(six.iteritems(kwargs)))
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def clone(self):
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
def __get__(self, instance, owner):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
from django.db.models.fields import FieldDoesNotExist
non_deferred_model = instance._meta.proxy_for_model
opts = non_deferred_model._meta
assert instance is not None
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
f = opts.get_field_by_name(self.field_name)[0]
except FieldDoesNotExist:
f = [f for f in opts.fields if f.attname == self.field_name][0]
name = f.name
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, name)
if val is None:
# We use only() instead of values() here because we want the
# various data coersion methods (to_python(), etc.) to be
# called here.
val = getattr(
non_deferred_model._base_manager.only(name).using(
instance._state.db).get(pk=instance.pk),
self.field_name
)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field_by_name(name)[0]
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.rel:
return False
if field.rel.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.name not in load_fields:
if restricted and field.name in requested:
raise InvalidQuery("Field %s.%s cannot be both deferred"
" and traversed using select_related"
" at the same time." %
(field.model._meta.object_name, field.name))
return False
return True
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
class Meta:
proxy = True
app_label = model._meta.app_label
# The app_cache wants a unique name for each model, otherwise the new class
# won't be created (we get an old one back). Therefore, we generate the
# name using the passed in attrs. It's OK to reuse an existing class
# object if the attrs are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(list(attrs))))
name = util.truncate_name(name, 80, 32)
overrides = dict((attr, DeferredAttribute(attr, model)) for attr in attrs)
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(str(name), (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True
|
{
"content_hash": "9997f567590b70c1b6813f85a403befb",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 85,
"avg_line_length": 35.734299516908216,
"alnum_prop": 0.6145734757334055,
"repo_name": "adambrenecki/django",
"id": "2a92978beb0232220470e590e7bb48afbda7b91c",
"size": "7397",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/models/query_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50381"
},
{
"name": "JavaScript",
"bytes": "100819"
},
{
"name": "Python",
"bytes": "8829204"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
#TODO: Why is this loading templates from the /cards app index?
def index(request):
return render(request, 'index.html')
def home(request):
if request.is_ajax():
return render(request, 'home.html')
return render(request, 'index.html')
def auth_view(request): #do i want this to also include a parameter or lookup page from request?
path = request.path
return render(request, 'auth/' + path.split('/')[-1])
|
{
"content_hash": "07ecc2c6b8a7fcc0328d7215d3b972e2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 96,
"avg_line_length": 29.41176470588235,
"alnum_prop": 0.74,
"repo_name": "techtyler/HoldEmBonusSimulator",
"id": "1f53fb2557a93098a24564245663386d303aed4c",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holdembonus/holdembonus/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16421"
},
{
"name": "JavaScript",
"bytes": "20455"
},
{
"name": "Python",
"bytes": "15243"
}
],
"symlink_target": ""
}
|
try:
from PIL import Image
except ImportError:
import Image
def makeThumb(image, maxSize, method = 3):
'Resize PIL image, to fit into the maxSize'
orig_size = image.size
if orig_size[0] > maxSize[0] or orig_size[1] > maxSize[1]:
min_extent = min( orig_size[0], orig_size[1] )
left = (orig_size[0] - min_extent) / 2
top = (orig_size[1] - min_extent) / 2
result = image.crop( (left, top, left + min_extent, top + min_extent) )
result = result.resize( maxSize, Image.ANTIALIAS )
return (result, maxSize)
else:
return (image, orig_size)
|
{
"content_hash": "b86811443e6c1fcd47d679237eeeffaf",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 32.578947368421055,
"alnum_prop": 0.5993537964458805,
"repo_name": "svetlyak40wt/django-faces",
"id": "2b0b84ac36ea0c5a75ed10aff36d49b18d11ebd9",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_faces/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24329"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, unicode_literals
__doc__ = """
Opens a new tab with all possible combinations of currently selected glyphs.
"""
Font = Glyphs.font
selectedLayers = Font.selectedLayers
namesOfSelectedGlyphs = ["/%s" % l.parent.name for l in selectedLayers if hasattr(l.parent, 'name')]
editString = ""
for leftGlyphName in namesOfSelectedGlyphs:
for rightGlyphName in namesOfSelectedGlyphs:
editString += (leftGlyphName + rightGlyphName)
editString += (leftGlyphName + "\n")
# in case last line fails, the text is in the macro window:
Glyphs.clearLog() # clears macro window log
print(editString)
# opens new Edit tab:
Font.newTab(editString)
|
{
"content_hash": "ff66752a86159e037a241a5a4300d469",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 100,
"avg_line_length": 31.09090909090909,
"alnum_prop": 0.7543859649122807,
"repo_name": "mekkablue/Glyphs-Scripts",
"id": "d1f5dd9dbc56626afb9c2d5049435e69a6cec23c",
"size": "761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Kerning/New Tab with Selected Glyph Combos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2142474"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import errno
import logging
import os
import pkgutil
import threading
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict, namedtuple
from contextlib import contextmanager
import six
from twitter.common.collections import OrderedSet
from pants.backend.jvm.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.backend.jvm.subsystems.jar_dependency_management import (JarDependencyManagement,
PinnedJarArtifactSet)
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.generator import Generator, TemplateData
from pants.base.revision import Revision
from pants.build_graph.target import Target
from pants.util.dirutil import safe_mkdir, safe_open
IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers'])
logger = logging.getLogger(__name__)
class IvyResolveMappingError(Exception):
"""Raised when there is a failure mapping the ivy resolve results to pants objects."""
class IvyModuleRef(object):
# latest.integration is ivy magic meaning "just get the latest version"
_ANY_REV = 'latest.integration'
def __init__(self, org, name, rev, classifier=None, ext=None):
self.org = org
self.name = name
self.rev = rev
self.classifier = classifier
self.ext = ext or 'jar'
self._id = (self.org, self.name, self.rev, self.classifier, self.ext)
def __eq__(self, other):
return isinstance(other, IvyModuleRef) and self._id == other._id
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._id)
def __str__(self):
return 'IvyModuleRef({})'.format(':'.join((x or '') for x in self._id))
def __repr__(self):
return ('IvyModuleRef(org={!r}, name={!r}, rev={!r}, classifier={!r}, ext={!r})'
.format(*self._id))
def __cmp__(self, other):
# We can't just re-use __repr__ or __str_ because we want to order rev last
return cmp((self.org, self.name, self.classifier, self.ext, self.rev),
(other.org, other.name, other.classifier, other.ext, other.rev))
@property
def caller_key(self):
"""This returns an identifier for an IvyModuleRef that only retains the caller org and name.
Ivy represents dependees as `<caller/>`'s with just org and name and rev information.
This method returns a `<caller/>` representation of the current ref.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV)
@property
def unversioned(self):
"""This returns an identifier for an IvyModuleRef without version information.
It's useful because ivy might return information about a different version of a dependency than
the one we request, and we want to ensure that all requesters of any version of that dependency
are able to learn about it.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV, classifier=self.classifier,
ext=self.ext)
class IvyInfo(object):
def __init__(self, conf):
self._conf = conf
self.modules_by_ref = {} # Map from ref to referenced module.
self.refs_by_unversioned_refs = {} # Map from unversioned ref to the resolved versioned ref
# Map from ref of caller to refs of modules required by that caller.
self._deps_by_caller = defaultdict(OrderedSet)
# Map from _unversioned_ ref to OrderedSet of IvyArtifact instances.
self._artifacts_by_ref = defaultdict(OrderedSet)
def add_module(self, module):
if not module.artifact:
# Module was evicted, so do not record information about it
return
ref_unversioned = module.ref.unversioned
if ref_unversioned in self.refs_by_unversioned_refs:
raise IvyResolveMappingError('Already defined module {}, as rev {}!'
.format(ref_unversioned, module.ref.rev))
if module.ref in self.modules_by_ref:
raise IvyResolveMappingError('Already defined module {}, would be overwritten!'
.format(module.ref))
self.refs_by_unversioned_refs[ref_unversioned] = module.ref
self.modules_by_ref[module.ref] = module
for caller in module.callers:
self._deps_by_caller[caller.caller_key].add(module.ref)
self._artifacts_by_ref[ref_unversioned].add(module.artifact)
def _do_traverse_dependency_graph(self, ref, collector, memo, visited):
memoized_value = memo.get(ref)
if memoized_value:
return memoized_value
if ref in visited:
# Ivy allows for circular dependencies
# If we're here, that means we're resolving something that
# transitively depends on itself
return set()
visited.add(ref)
acc = collector(ref)
# NB(zundel): ivy does not return deps in a consistent order for the same module for
# different resolves. Sort them to get consistency and prevent cache invalidation.
# See https://github.com/pantsbuild/pants/issues/2607
deps = sorted(self._deps_by_caller.get(ref.caller_key, ()))
for dep in deps:
acc.update(self._do_traverse_dependency_graph(dep, collector, memo, visited))
memo[ref] = acc
return acc
def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited)
def get_resolved_jars_for_jar_library(self, jar_library, memo=None):
"""Collects jars for the passed jar_library.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the library's jar_dependencies, but will NOT
walk into its non-jar dependencies.
:param jar_library A JarLibrary to collect the transitive artifacts for.
:param memo see `traverse_dependency_graph`
:returns: all the artifacts for all of the jars in this library, including transitive deps
:rtype: list of :class:`pants.backend.jvm.jar_dependency_utils.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in jar_library.jar_dependencies:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars
class IvyUtils(object):
"""Useful methods related to interaction with ivy."""
ivy_lock = threading.RLock()
INTERNAL_ORG_NAME = 'internal'
class IvyError(Exception):
"""Indicates an error preparing an ivy operation."""
class IvyResolveReportError(IvyError):
"""Indicates that an ivy report cannot be found."""
class IvyResolveConflictingDepsError(IvyError):
"""Indicates two or more locally declared dependencies conflict."""
class BadRevisionError(IvyError):
"""Indicates an unparseable version number."""
@staticmethod
def _generate_exclude_template(exclude):
return TemplateData(org=exclude.org, name=exclude.name)
@staticmethod
def _generate_override_template(jar):
return TemplateData(org=jar.org, module=jar.name, version=jar.rev)
@staticmethod
@contextmanager
def cachepath(path):
if not os.path.exists(path):
yield ()
else:
with safe_open(path, 'r') as cp:
yield (path.strip() for path in cp.read().split(os.pathsep) if path.strip())
@classmethod
def symlink_cachepath(cls, ivy_cache_dir, inpath, symlink_dir, outpath):
"""Symlinks all paths listed in inpath that are under ivy_cache_dir into symlink_dir.
If there is an existing symlink for a file under inpath, it is used rather than creating
a new symlink. Preserves all other paths. Writes the resulting paths to outpath.
Returns a map of path -> symlink to that path.
"""
safe_mkdir(symlink_dir)
# The ivy_cache_dir might itself be a symlink. In this case, ivy may return paths that
# reference the realpath of the .jar file after it is resolved in the cache dir. To handle
# this case, add both the symlink'ed path and the realpath to the jar to the symlink map.
real_ivy_cache_dir = os.path.realpath(ivy_cache_dir)
symlink_map = OrderedDict()
with safe_open(inpath, 'r') as infile:
inpaths = filter(None, infile.read().strip().split(os.pathsep))
paths = OrderedSet([os.path.realpath(path) for path in inpaths])
for path in paths:
if path.startswith(real_ivy_cache_dir):
symlink_map[path] = os.path.join(symlink_dir, os.path.relpath(path, real_ivy_cache_dir))
else:
# This path is outside the cache. We won't symlink it.
symlink_map[path] = path
# Create symlinks for paths in the ivy cache dir.
for path, symlink in six.iteritems(symlink_map):
if path == symlink:
# Skip paths that aren't going to be symlinked.
continue
safe_mkdir(os.path.dirname(symlink))
try:
os.symlink(path, symlink)
except OSError as e:
# We don't delete and recreate the symlink, as this may break concurrently executing code.
if e.errno != errno.EEXIST:
raise
# (re)create the classpath with all of the paths
with safe_open(outpath, 'w') as outfile:
outfile.write(':'.join(OrderedSet(symlink_map.values())))
return dict(symlink_map)
@staticmethod
def identify(targets):
targets = list(targets)
if len(targets) == 1 and targets[0].is_jvm and getattr(targets[0], 'provides', None):
return targets[0].provides.org, targets[0].provides.name
else:
return IvyUtils.INTERNAL_ORG_NAME, Target.maybe_readable_identify(targets)
@classmethod
def xml_report_path(cls, cache_dir, resolve_hash_name, conf):
"""The path to the xml report ivy creates after a retrieve.
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution.
:param string conf: The ivy conf name (e.g. "default").
:returns: The report path.
:rtype: string
"""
return os.path.join(cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME,
resolve_hash_name, conf))
@classmethod
def parse_xml_report(cls, cache_dir, resolve_hash_name, conf):
"""Parse the ivy xml report corresponding to the name passed to ivy.
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution; if `None` returns `None` instead of attempting to
parse any report.
:param string conf: the ivy conf name (e.g. "default")
:returns: The info in the xml report or None if target is empty.
:rtype: :class:`IvyInfo`
:raises: :class:`IvyResolveMappingError` if no report exists.
"""
# TODO(John Sirois): Cleanup acceptance of None, this is IvyResolve's concern, not ours.
if not resolve_hash_name:
return None
path = cls.xml_report_path(cache_dir, resolve_hash_name, conf)
if not os.path.exists(path):
raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path))
return cls._parse_xml_report(conf, path)
@classmethod
def _parse_xml_report(cls, conf, path):
logger.debug("Parsing ivy report {}".format(path))
ret = IvyInfo(conf)
etree = ET.parse(path)
doc = etree.getroot()
for module in doc.findall('dependencies/module'):
org = module.get('organisation')
name = module.get('name')
for revision in module.findall('revision'):
rev = revision.get('name')
callers = []
for caller in revision.findall('caller'):
callers.append(IvyModuleRef(caller.get('organisation'),
caller.get('name'),
caller.get('callerrev')))
for artifact in revision.findall('artifacts/artifact'):
classifier = artifact.get('extra-classifier')
ext = artifact.get('ext')
ivy_module_ref = IvyModuleRef(org=org, name=name, rev=rev,
classifier=classifier, ext=ext)
artifact_cache_path = artifact.get('location')
ivy_module = IvyModule(ivy_module_ref, artifact_cache_path, callers)
ret.add_module(ivy_module)
return ret
@classmethod
def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None):
if resolve_hash_name:
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
else:
org, name = cls.identify(targets)
extra_configurations = [conf for conf in confs if conf and conf != 'default']
jars_by_key = OrderedDict()
for jar in jars:
jars = jars_by_key.setdefault((jar.org, jar.name), [])
jars.append(jar)
manager = JarDependencyManagement.global_instance()
artifact_set = PinnedJarArtifactSet(manager.for_targets(targets))
for jars in jars_by_key.values():
for i, dep in enumerate(jars):
direct_coord = M2Coordinate.create(dep)
managed_coord = artifact_set[direct_coord]
if direct_coord.rev != managed_coord.rev:
# It may be necessary to actually change the version number of the jar we want to resolve
# here, because overrides do not apply directly (they are exclusively transitive). This is
# actually a good thing, because it gives us more control over what happens.
coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force)
dep = copy.copy(dep)
dep.rev = coord.rev
jars[i] = dep
elif dep.force:
# If this dependency is marked as 'force' and there is no version conflict, use the normal
# pants behavior for 'force'.
artifact_set.put(direct_coord)
dependencies = [cls._generate_jar_template(jars) for jars in jars_by_key.values()]
# As it turns out force is not transitive - it only works for dependencies pants knows about
# directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
# don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
# edit the generated ivy.xml and use the override feature [3] though and that does work
# transitively as you'd hope.
#
# [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
# [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
# src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
# [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
overrides = [cls._generate_override_template(coord) for coord in artifact_set]
excludes = [cls._generate_exclude_template(exclude) for exclude in excludes]
template_data = TemplateData(
org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies,
excludes=excludes,
overrides=overrides)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy.mustache')
template_text = pkgutil.get_data(__name__, template_relpath)
generator = Generator(template_text, lib=template_data)
with safe_open(ivyxml, 'w') as output:
generator.write(output)
@classmethod
def calculate_classpath(cls, targets, gather_excludes=True):
jars = OrderedDict()
global_excludes = set()
provide_excludes = set()
targets_processed = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jar):
# TODO(John Sirois): Maven allows for depending on an artifact at one rev and one of its
# attachments (classified artifacts) at another. Ivy does not, allow this, the dependency
# can carry only 1 rev and that hosts multiple artifacts for that rev. This conflict
# resolution happens at the classifier level, allowing skew in a
# multi-artifact/multi-classifier dependency. We only find out about the skew later in
# `_generate_jar_template` below which will blow up with a conflict. Move this logic closer
# together to get a more clear validate, then emit ivy.xml then resolve flow instead of the
# spread-out validations happening here.
# See: https://github.com/pantsbuild/pants/issues/2239
coordinate = (jar.org, jar.name, jar.classifier)
existing = jars.get(coordinate)
jars[coordinate] = jar if not existing else cls._resolve_conflict(existing=existing,
proposed=jar)
def collect_jars(target):
if isinstance(target, JarLibrary):
for jar in target.jar_dependencies:
add_jar(jar)
def collect_excludes(target):
target_excludes = target.payload.get_field_value('excludes')
if target_excludes:
global_excludes.update(target_excludes)
def collect_provide_excludes(target):
if not target.is_exported:
return
logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format(
target.provides.org, target.provides.name, target))
provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name))
def collect_elements(target):
targets_processed.add(target)
collect_jars(target)
if gather_excludes:
collect_excludes(target)
collect_provide_excludes(target)
for target in targets:
target.walk(collect_elements, predicate=lambda target: target not in targets_processed)
# If a source dep is exported (ie, has a provides clause), it should always override
# remote/binary versions of itself, ie "round trip" dependencies.
# TODO: Move back to applying provides excludes as target-level excludes when they are no
# longer global.
if provide_excludes:
additional_excludes = tuple(provide_excludes)
for coordinate, jar in jars.items():
jar.excludes += additional_excludes
return jars.values(), global_excludes
@classmethod
def _resolve_conflict(cls, existing, proposed):
if existing.rev is None:
return proposed
if proposed.rev is None:
return existing
if proposed == existing:
if proposed.force:
return proposed
return existing
elif existing.force and proposed.force:
raise cls.IvyResolveConflictingDepsError('Cannot force {}#{};{} to both rev {} and {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
elif existing.force:
logger.debug('Ignoring rev {} for {}#{};{} already forced to {}'.format(
proposed.rev, proposed.org, proposed.name, proposed.classifier or '', existing.rev
))
return existing
elif proposed.force:
logger.debug('Forcing {}#{};{} from {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
return proposed
else:
if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev):
logger.debug('Upgrading {}#{};{} from rev {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev,
))
return proposed
else:
return existing
@classmethod
def _generate_jar_template(cls, jars):
Dependency = namedtuple('DependencyAttributes', ['org', 'name', 'rev', 'mutable', 'force',
'transitive'])
global_dep_attributes = set(Dependency(org=jar.org,
name=jar.name,
rev=jar.rev,
mutable=jar.mutable,
force=jar.force,
transitive=jar.transitive)
for jar in jars)
if len(global_dep_attributes) != 1:
# TODO(John Sirois): Need to provide information about where these came from - could be
# far-flung JarLibrary targets. The jars here were collected from targets via
# `calculate_classpath` above and so merging of the 2 could provide the context needed.
# See: https://github.com/pantsbuild/pants/issues/2239
conflicting_dependencies = sorted(str(g) for g in global_dep_attributes)
raise cls.IvyResolveConflictingDepsError('Found conflicting dependencies:\n\t{}'
.format('\n\t'.join(conflicting_dependencies)))
jar_attributes = global_dep_attributes.pop()
excludes = set()
for jar in jars:
excludes.update(jar.excludes)
any_have_url = False
Artifact = namedtuple('Artifact', ['name', 'type_', 'ext', 'url', 'classifier'])
artifacts = OrderedDict()
for jar in jars:
ext = jar.ext
url = jar.url
if url:
any_have_url = True
classifier = jar.classifier
artifact = Artifact(name=jar.name,
type_=ext or 'jar',
ext=ext,
url=url,
classifier=classifier)
artifacts[(ext, url, classifier)] = artifact
if len(artifacts) == 1:
# If the only artifact has no attributes that we need a nested <artifact/> for, just emit
# a <dependency/>.
artifacts.pop((None, None, None), None)
template = TemplateData(
org=jar_attributes.org,
module=jar_attributes.name,
version=jar_attributes.rev,
mutable=jar_attributes.mutable,
force=jar_attributes.force,
transitive=jar_attributes.transitive,
artifacts=artifacts.values(),
any_have_url=any_have_url,
excludes=[cls._generate_exclude_template(exclude) for exclude in excludes])
return template
|
{
"content_hash": "fc8cdf23834eaada379f0f2675829807",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 100,
"avg_line_length": 41.98947368421052,
"alnum_prop": 0.656430183003259,
"repo_name": "dturner-tw/pants",
"id": "bf83e902873156f1cfe155c3438705d0b444b70b",
"size": "24081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/ivy_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11538"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1849"
},
{
"name": "HTML",
"bytes": "70358"
},
{
"name": "Java",
"bytes": "293253"
},
{
"name": "JavaScript",
"bytes": "31042"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4404984"
},
{
"name": "Scala",
"bytes": "85217"
},
{
"name": "Shell",
"bytes": "50774"
},
{
"name": "Thrift",
"bytes": "2919"
}
],
"symlink_target": ""
}
|
"""
Manages addition of mol2 output information to segments.
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
from .TrajOutput import TrajOutput
################################### CLASSES ###################################
class Mol2TrajOutput(TrajOutput):
"""
Manages addition of mol2 output information to segments.
"""
def __init__(self, manual_bonds=False, **kwargs):
"""
Initializes.
Arguments:
manual_bonds (bool): Write bonds to mol2 manually
kwargs (dict): additional keyword arguments
"""
import os
self.manual_bonds = manual_bonds
super(self.__class__, self).__init__(**kwargs)
def receive_segment(self, **kwargs):
"""
Receives a trajectory segment and sends to each target.
Arugments:
kwargs (dict): Additional keyword arguments
"""
import os
while True:
segment = yield
segment_mol2 = "{0}/{1:04d}/{1:04d}{2}.mol2".format(self.outpath,
int(segment.number), self.suffix)
if not os.path.isfile(segment_mol2) or self.force:
segment.outputs.append(
dict(
format = "mol2",
filename = segment_mol2,
selection = self.selection,
first = 0,
last = 0))
if self.manual_bonds:
segment.outputs[-1]["format"] = "mol2_manual_bonds"
for target in self.targets:
target.send(segment)
@staticmethod
def add_subparser(level1_subparser, level2_subparsers, level3_classes):
"""
Adds subparser for this input format to nascent parser.
Arguments:
level1_subparser (Subparser): Level 1 subparser to which level
2 subparser will be added
level2_subparsers (Subparsers): Nascent collection of level 2
subparsers to which level 2 subparser will be added
level3_classes (list): Classes for which level 3 subparsers
will be added
Returns:
(*Subparser*, *Subparsers*): New level 2 subparser and
associated collection of level 3 subparsers
"""
level2_subparser = level2_subparsers.add_parser(
name = "mol2",
usage = "convert.py {0} mol2".format(level1_subparser.name),
help = "mol2 output")
setattr(level2_subparser, "name", "mol2")
level3_subparsers = level2_subparser.add_subparsers(
title = "Converter")
for level3_class in level3_classes:
level3_subparser = level3_class.add_subparser(level1_subparser,
level2_subparser, level3_subparsers)
arg_groups = {ag.title: ag
for ag in level3_subparser._action_groups}
if level3_subparser.name == "vmd":
arg_groups["action"].add_argument("--manual-bonds",
action = "store_true",
dest = "manual_bonds",
help = "Write bonds to mol2 manually; useful for "
"topologies in which atoms are not well-ordered")
Mol2TrajOutput.add_shared_args(level3_subparser)
level3_subparser.set_defaults(output_coroutine=Mol2TrajOutput)
return level2_subparser, level3_subparsers
|
{
"content_hash": "8bb34621912c7b14d2d0e5c56133b27d",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 37.463157894736845,
"alnum_prop": 0.5487496487777466,
"repo_name": "KarlTDebiec/md_format_converter",
"id": "05581a36fa0802a15b8baec866deb96af0ef7c21",
"size": "3820",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Mol2TrajOutput.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45195"
},
{
"name": "Shell",
"bytes": "660"
},
{
"name": "Tcl",
"bytes": "8081"
}
],
"symlink_target": ""
}
|
import sys
import yahooscraper as ys
from datetime import datetime, date
from urllib.parse import urljoin
# Environment variables
USERNAME_ENV = 'YAHOO_USERNAME'
PASSWORD_ENV = 'YAHOO_PASSWORD'
# Command-line args
REQUIRED_ARGS = [
'<league_id>',
'<team_id>'
]
OPTIONAL_ARGS = []
# Error messages
LOGIN_ERROR_MSG = 'Failed to log in'
def usage():
"""
Print usage and exit
"""
msg_lines = [
' '.join((
'Usage: python',
sys.argv[0],
' '.join(REQUIRED_ARGS),
' '.join(OPTIONAL_ARGS))),
'Environment variables %s and %s must also be set' % (
USERNAME_ENV,
PASSWORD_ENV)]
sys.exit('\n\n'.join(msg_lines))
def required_num_args():
min_args = len(REQUIRED_ARGS) + 1
max_args = min_args + len(OPTIONAL_ARGS)
return range(min_args, max_args + 1)
def parsed_and_bounded_arg(i, max, min, parse):
"""
Returns parsed and bounded arg from argv.
The `parse` parameter is a single-argument function which is called with
the arg. The output of this function is only returned if it is between
min and max.
If parse fails or arg is not within bounds, None is returned.
"""
if len(sys.argv) > i:
try:
parsed_arg = parse(sys.argv[i])
return parsed_arg if min <= parsed_arg <= max else None
except:
return None
else:
return None
def date_from_argv(i, max, min=date.today()):
return parsed_and_bounded_arg(
i, max, min,
lambda arg: datetime.strptime(arg, '%Y-%m-%d').date())
def int_from_argv(i, max, min=1):
return parsed_and_bounded_arg(i, max, min, lambda arg: int(arg))
def output_team_info(session, league_id, team_id):
"""
Output team name and league
"""
response = session.get(ys.fantasy.team.url('nba', league_id, team_id))
league = ys.fantasy.team.league(response.text)
team = ys.fantasy.team.team(response.text)
print('%s - %s:\n' % (league, team))
|
{
"content_hash": "8577f6eb32dc8eb226022cd730d304b5",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 76,
"avg_line_length": 24.76829268292683,
"alnum_prop": 0.605120630231413,
"repo_name": "jbrudvik/yahoo-fantasy-basketball",
"id": "edba2b17bbd63147e2694efe8a56a21fbc7e8bb6",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5397"
}
],
"symlink_target": ""
}
|
"""Support for RESTful API sensors."""
import logging
import json
import voluptuous as vol
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from homeassistant.components.sensor import PLATFORM_SCHEMA, DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_FORCE_UPDATE,
CONF_HEADERS,
CONF_NAME,
CONF_METHOD,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_RESOURCE_TEMPLATE,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_TIMEOUT,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
CONF_DEVICE_CLASS,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_FORCE_UPDATE = False
DEFAULT_TIMEOUT = 10
CONF_JSON_ATTRS = "json_attributes"
METHODS = ["POST", "GET"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_RESOURCE, CONF_RESOURCE): cv.url,
vol.Exclusive(CONF_RESOURCE_TEMPLATE, CONF_RESOURCE): cv.template,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_JSON_ATTRS, default=[]): cv.ensure_list_csv,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(METHODS),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_RESOURCE, CONF_RESOURCE_TEMPLATE), PLATFORM_SCHEMA
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the RESTful sensor."""
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
resource_template = config.get(CONF_RESOURCE_TEMPLATE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
json_attrs = config.get(CONF_JSON_ATTRS)
force_update = config.get(CONF_FORCE_UPDATE)
timeout = config.get(CONF_TIMEOUT)
if value_template is not None:
value_template.hass = hass
if resource_template is not None:
resource_template.hass = hass
resource = resource_template.render()
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = HTTPDigestAuth(username, password)
else:
auth = HTTPBasicAuth(username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout)
rest.update()
if rest.data is None:
raise PlatformNotReady
# Must update the sensor now (including fetching the rest resource) to
# ensure it's updating its state.
add_entities(
[
RestSensor(
hass,
rest,
name,
unit,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
)
],
True,
)
class RestSensor(Entity):
"""Implementation of a REST sensor."""
def __init__(
self,
hass,
rest,
name,
unit_of_measurement,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
):
"""Initialize the REST sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._device_class = device_class
self._value_template = value_template
self._json_attrs = json_attrs
self._attributes = None
self._force_update = force_update
self._resource_template = resource_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return if the sensor data are available."""
return self.rest.data is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""Get the latest data from REST API and update the state."""
if self._resource_template is not None:
self.rest.set_url(self._resource_template.render())
self.rest.update()
value = self.rest.data
if self._json_attrs:
self._attributes = {}
if value:
try:
json_dict = json.loads(value)
if isinstance(json_dict, dict):
attrs = {
k: json_dict[k] for k in self._json_attrs if k in json_dict
}
self._attributes = attrs
else:
_LOGGER.warning("JSON result was not a dictionary")
except ValueError:
_LOGGER.warning("REST result could not be parsed as JSON")
_LOGGER.debug("Erroneous JSON: %s", value)
else:
_LOGGER.warning("Empty reply found when expecting JSON data")
if value is not None and self._value_template is not None:
value = self._value_template.render_with_possible_json_value(value, None)
self._state = value
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
class RestData:
"""Class for handling the data retrieval."""
def __init__(
self, method, resource, auth, headers, data, verify_ssl, timeout=DEFAULT_TIMEOUT
):
"""Initialize the data object."""
self._request = requests.Request(
method, resource, headers=headers, auth=auth, data=data
).prepare()
self._verify_ssl = verify_ssl
self._timeout = timeout
self.data = None
def set_url(self, url):
"""Set url."""
self._request.prepare_url(url, None)
def update(self):
"""Get the latest data from REST service with provided method."""
_LOGGER.debug("Updating from %s", self._request.url)
try:
with requests.Session() as sess:
response = sess.send(
self._request, timeout=self._timeout, verify=self._verify_ssl
)
self.data = response.text
except requests.exceptions.RequestException as ex:
_LOGGER.error(
"Error fetching data: %s from %s failed with %s",
self._request,
self._request.url,
ex,
)
self.data = None
|
{
"content_hash": "508fb2f38571f9ffaabd6ba41c286d62",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 88,
"avg_line_length": 31.856589147286822,
"alnum_prop": 0.6038447499695827,
"repo_name": "qedi-r/home-assistant",
"id": "41adb8559036cf28351f318b9c20a80c3ee5cce1",
"size": "8219",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rest/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from .generator import GraphGenerator
|
{
"content_hash": "994a71d68c7b2ca0b51b9efd4ce2fd06",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 37,
"avg_line_length": 38,
"alnum_prop": 0.868421052631579,
"repo_name": "lucasdavid/edge",
"id": "e23c1629319c57c9d9a2afab46a61a52f6fa0d61",
"size": "38",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edge/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13216"
}
],
"symlink_target": ""
}
|
import subprocess
from mock import patch, Mock, MagicMock
from datetime import datetime, timedelta
from .. import worker
class TestWorker(object):
@patch("os.path.exists")
def test_restart_file_path_doesnt_exist(self, m_exists):
m_exists.return_value = False
result = worker.need_restart()
assert not result
@patch("os.path.getmtime")
@patch("os.path.exists")
@patch("teuthology.worker.datetime")
def test_needs_restart(self, m_datetime, m_exists, m_getmtime):
m_exists.return_value = True
m_datetime.utcfromtimestamp.return_value = datetime.utcnow() + timedelta(days=1)
result = worker.need_restart()
assert result
@patch("os.path.getmtime")
@patch("os.path.exists")
@patch("teuthology.worker.datetime")
def test_does_not_need_restart(self, m_datetime, m_exists, getmtime):
m_exists.return_value = True
m_datetime.utcfromtimestamp.return_value = datetime.utcnow() - timedelta(days=1)
result = worker.need_restart()
assert not result
@patch("os.symlink")
def test_symlink_success(self, m_symlink):
worker.symlink_worker_log("path/to/worker.log", "path/to/archive")
m_symlink.assert_called_with("path/to/worker.log", "path/to/archive/worker.log")
@patch("teuthology.worker.log")
@patch("os.symlink")
def test_symlink_failure(self, m_symlink, m_log):
m_symlink.side_effect = IOError
worker.symlink_worker_log("path/to/worker.log", "path/to/archive")
# actually logs the exception
assert m_log.exception.called
@patch("teuthology.worker.run_with_watchdog")
@patch("teuthology.worker.teuth_config")
@patch("subprocess.Popen")
@patch("os.environ")
@patch("yaml.safe_dump")
@patch("tempfile.NamedTemporaryFile")
def test_run_job_with_watchdog(self, m_tempfile, m_safe_dump, m_environ,
m_popen, m_t_config, m_run_watchdog):
config = {
"suite_path": "suite/path",
"config": {"foo": "bar"},
"verbose": True,
"owner": "the_owner",
"archive_path": "archive/path",
"name": "the_name",
"description": "the_description"
}
m_tmp = MagicMock()
temp_file = Mock()
temp_file.name = "the_name"
m_tmp.__enter__.return_value = temp_file
m_tempfile.return_value = m_tmp
env = dict(PYTHONPATH="python/path")
m_environ.copy.return_value = env
m_p = Mock()
m_p.returncode = 0
m_popen.return_value = m_p
m_t_config.results_server = True
worker.run_job(config, "teuth/bin/path")
m_run_watchdog.assert_called_with(m_p, config)
expected_args = [
'teuth/bin/path/teuthology',
'-v',
'--lock',
'--block',
'--owner', 'the_owner',
'--archive', 'archive/path',
'--name', 'the_name',
'--description',
'the_description',
'--',
"the_name"
]
m_popen.assert_called_with(args=expected_args, env=env)
@patch("time.sleep")
@patch("teuthology.worker.symlink_worker_log")
@patch("teuthology.worker.teuth_config")
@patch("subprocess.Popen")
@patch("os.environ")
@patch("yaml.safe_dump")
@patch("tempfile.NamedTemporaryFile")
def test_run_job_no_watchdog(self, m_tempfile, m_safe_dump, m_environ,
m_popen, m_t_config, m_symlink_log, m_sleep):
config = {
"suite_path": "suite/path",
"config": {"foo": "bar"},
"verbose": True,
"owner": "the_owner",
"archive_path": "archive/path",
"name": "the_name",
"description": "the_description",
"worker_log": "worker/log.log"
}
m_tmp = MagicMock()
temp_file = Mock()
temp_file.name = "the_name"
m_tmp.__enter__.return_value = temp_file
m_tempfile.return_value = m_tmp
env = dict(PYTHONPATH="python/path")
m_environ.copy.return_value = env
m_p = Mock()
m_p.returncode = 1
m_popen.return_value = m_p
m_t_config.results_server = False
worker.run_job(config, "teuth/bin/path")
m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"])
@patch("teuthology.worker.report.try_push_job_info")
@patch("teuthology.worker.symlink_worker_log")
@patch("time.sleep")
def test_run_with_watchdog_no_reporting(self, m_sleep, m_symlink_log, m_try_push):
config = {
"name": "the_name",
"job_id": "1",
"worker_log": "worker_log",
"archive_path": "archive/path",
"teuthology_branch": "master"
}
process = Mock()
process.poll.return_value = "not None"
worker.run_with_watchdog(process, config)
m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"])
m_try_push.assert_called_with(
dict(name=config["name"], job_id=config["job_id"]),
dict(status='dead')
)
@patch("subprocess.Popen")
@patch("teuthology.worker.symlink_worker_log")
@patch("time.sleep")
def test_run_with_watchdog_with_reporting(self, m_sleep, m_symlink_log, m_popen):
config = {
"name": "the_name",
"job_id": "1",
"worker_log": "worker_log",
"archive_path": "archive/path",
"teuthology_branch": "argonaut"
}
process = Mock()
process.poll.return_value = "not None"
m_proc = Mock()
m_proc.poll.return_value = "not None"
m_popen.return_value = m_proc
worker.run_with_watchdog(process, config)
m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"])
expected_cmd = "teuthology-report -v -D -r the_name -j 1"
m_popen.assert_called_with(
expected_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
|
{
"content_hash": "0301a1e37152f10e010cead0e53b5c6a",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 88,
"avg_line_length": 36.5,
"alnum_prop": 0.569218372280419,
"repo_name": "t-miyamae/teuthology",
"id": "8fa8e7879690960120183047a711b3f44403eae9",
"size": "6205",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "teuthology/test/test_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "728821"
},
{
"name": "Shell",
"bytes": "9833"
}
],
"symlink_target": ""
}
|
from datetime import date
from django import template
from django.conf import settings
from django.utils import translation
from demo.models import PersonPage, BlogPage, EventPage, CoursePage, \
Advert, Page, StandardPage, StandardIndexPage, FormPage
register = template.Library()
# settings value
@register.assignment_tag
def get_google_maps_key():
return getattr(settings, 'GOOGLE_MAPS_KEY', "")
@register.assignment_tag(takes_context=True)
def get_site_root(context):
# NB this returns a core.Page, not the implementation-specific model used
# so object-comparison to self will return false as objects would differ
return context['request'].site.root_page
@register.simple_tag
def next_pg_translated(url, language):
language = language if language == 'en' else 'br'
if language == 'en':
return url.replace(language, 'br')
else:
return url.replace(language, 'en')
def has_menu_children(page):
return page.get_children().live().in_menu().exists()
# Retrieves the top menu items - the immediate children of the parent page
# The has_menu_children method is necessary because the bootstrap menu requires
# a dropdown class to be applied to a parent
@register.inclusion_tag('demo/tags/top_menu.html', takes_context=True)
def top_menu(context, parent, calling_page=None):
language = translation.get_language_from_request(context['request'])
language = language if language == 'en' else 'br'
homepage = [page for page in parent.get_children().live()
if page.slug == language][0]
menuitems = homepage.get_children().live().in_menu()
for menuitem in menuitems:
menuitem.show_dropdown = has_menu_children(menuitem)
# We don't directly check if calling_page is None since the template
# engine can pass an empty string to calling_page
# if the variable passed as calling_page does not exist.
menuitem.active = (calling_page.url.startswith(menuitem.url)
if calling_page else False)
return {
'calling_page': calling_page,
'menuitems': menuitems,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves the children of the top menu items for the drop downs
@register.inclusion_tag('demo/tags/top_menu_children.html', takes_context=True)
def top_menu_children(context, parent):
menuitems_children = parent.get_children()
menuitems_children = menuitems_children.live().in_menu()
return {
'parent': parent,
'menuitems_children': menuitems_children,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves all live pages which are children of the calling page
# for standard index listing
@register.inclusion_tag(
'demo/tags/standard_index_listing.html',
takes_context=True
)
def standard_index_listing(context, calling_page):
pages = StandardPage.objects.live().child_of(calling_page)
pages_index = StandardIndexPage.objects.live().child_of(calling_page)
if calling_page.single_form:
form_pages = None
else:
form_pages = FormPage.objects.live().child_of(calling_page)
return {
'pages': pages,
'pages_index': pages_index,
'form_pages': form_pages,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Person feed for home page
@register.inclusion_tag(
'demo/tags/person_listing.html',
takes_context=True
)
def person_listing(context, calling_page):
people = PersonPage.objects.live().descendant_of(calling_page)
return {
'people': people,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Blog feed for home page
@register.inclusion_tag(
'demo/tags/blog_listing_homepage.html',
takes_context=True
)
def blog_listing_homepage(context, calling_page, count=9):
blogs = BlogPage.objects.live().descendant_of(calling_page).order_by('-date')
return {
'blogs': blogs[:count].select_related('feed_image'),
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Events feed for home page
@register.inclusion_tag(
'demo/tags/event_listing_homepage.html',
takes_context=True
)
def event_listing_homepage(context, calling_page, count=5):
events = EventPage.objects.live().descendant_of(calling_page)
events = events.filter(date_from__gte=date.today()).order_by('date_from')
return {
'events': events[:count].select_related('feed_image'),
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Courses feed for home page
@register.inclusion_tag(
'demo/tags/course_listing_homepage.html',
takes_context=True
)
def course_listing_homepage(context, calling_page, count=5):
courses = CoursePage.objects.live().descendant_of(calling_page)
courses = courses.filter(date_from__gte=date.today()).order_by('date_from')
return {
'courses': courses[:count].select_related('feed_image'),
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Advert snippets
@register.inclusion_tag('demo/tags/adverts.html', takes_context=True)
def adverts(context):
return {
'adverts': Advert.objects.select_related('page'),
'request': context['request'],
}
@register.inclusion_tag('demo/tags/breadcrumbs.html', takes_context=True)
def breadcrumbs(context):
self = context.get('self')
if self is None or self.depth <= 3:
# When on the home page, displaying breadcrumbs is irrelevant.
ancestors = ()
else:
ancestors = Page.objects.ancestor_of(
self, inclusive=True).filter(depth__gt=2)
return {
'ancestors': ancestors,
'request': context['request'],
}
@register.inclusion_tag('demo/tags/form.html', takes_context=True)
def form_page(context, calling_page):
formpage = FormPage.objects.live().child_of(calling_page)
if formpage:
formpage = formpage[0]
fb = formpage.form_builder(formpage.form_fields.all())
form_class = fb.get_form_class()
form_params = formpage.get_form_parameters()
return {
'formpage': formpage,
'form': form_class(**form_params),
'request': context['request'],
}
|
{
"content_hash": "679ea853e4c003a36758223606faa777",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 81,
"avg_line_length": 33.08955223880597,
"alnum_prop": 0.6749360998346113,
"repo_name": "dudunato/ywam-cerrado",
"id": "5031eae70fb565965599897ad35ba461b2b3aab2",
"size": "6651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/templatetags/demo_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27935"
},
{
"name": "HTML",
"bytes": "51566"
},
{
"name": "JavaScript",
"bytes": "824"
},
{
"name": "Python",
"bytes": "96424"
},
{
"name": "Shell",
"bytes": "6308"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..extern.six.moves import zip
import warnings
import weakref
import re
from copy import deepcopy
import numpy as np
from numpy import ma
# Remove this when Numpy no longer emits this warning and that Numpy version
# becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
try:
from numpy.ma.core import MaskedArrayFutureWarning
except ImportError:
# For Numpy versions that do not raise this warning.
MaskedArrayFutureWarning = None
from ..units import Unit, Quantity
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, dtype_info_name
from ..extern.six.moves import range
from . import groups
from . import pprint
from .np_utils import fix_column_name
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
# Always emit this warning, not just the first instance
warnings.simplefilter('always', StringTruncateWarning)
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = set(
[np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.equal,
np.isfinite, np.isinf, np.isnan, np.sign, np.signbit])
def col_copy(col, copy_indices=True):
"""
Mixin-safe version of Column.copy() (with copy_data=True).
Parameters
----------
col : Column or mixin column
Input column
copy_indices : bool
Copy the column ``indices`` attribute
Returns
-------
col : Copy of input column
"""
if isinstance(col, BaseColumn):
return col.copy()
# The new column should have None for the parent_table ref. If the
# original parent_table weakref there at the point of copying then it
# generates an infinite recursion. Instead temporarily remove the weakref
# on the original column and restore after the copy in an exception-safe
# manner.
parent_table = col.info.parent_table
indices = col.info.indices
col.info.parent_table = None
col.info.indices = []
try:
newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)
newcol.info = col.info
newcol.info.indices = deepcopy(indices or []) if copy_indices else []
for index in newcol.info.indices:
index.replace_col(col, newcol)
finally:
col.info.parent_table = parent_table
col.info.indices = indices
return newcol
class FalseArray(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=np.bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError('Cannot set any element of {0} class to True'
.format(self.__class__.__name__))
if six.PY2: # avoid falling back to ndarray.__setslice__
def __setslice__(self, start, stop, val):
self.__setitem__(slice(start, stop), val)
class ColumnInfo(BaseColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information.
"""
attrs_from_parent = BaseColumnInfo.attr_names
_supports_indexing = True
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Column instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Column (or subclass)
New instance of this class consistent with ``cols``
"""
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'unit', 'format', 'description'))
return self._parent_cls(length=length, **attrs)
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if data is None:
dtype = (np.dtype(dtype).str, shape)
self_data = np.zeros(length, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, '_name'):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = deepcopy(data.meta)
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = np.array(data.to(unit), dtype=dtype, copy=copy)
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = deepcopy(data.info.meta)
else:
if not six.PY2 and np.dtype(dtype).char == 'S':
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = fix_column_name(name)
self.unit = unit
self.format = format
self.description = description
self.meta = meta
self._parent_table = None
self.indices = deepcopy(getattr(data, 'indices', [])) if \
copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def parent_table(self):
if self._parent_table is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order='C', data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ('_name', 'unit', 'format', 'description', 'meta', 'indices')
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super(type(self), self).__setstate__() gives an infinite
# recursion. Manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (self.name, self.unit, self.format, self.description,
self.meta, self.indices)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if six.callable(super(BaseColumn, self).__array_finalize__):
super(BaseColumn, self).__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, 'indices'): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super(BaseColumn, self).__array_wrap__(out_arr, context)
if (self.shape != out_arr.shape or
(isinstance(out_arr, BaseColumn) and
(context is not None and context[0] in _comparison_functions))):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
val = fix_column_name(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False,
show_dtype=False, outs={}):
yield str_val
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : boolean
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError('Comparison `col` must be a Column or '
'MaskedColumn object')
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,
html=False):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
html=html)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(self, max_lines=max_lines, show_name=show_name,
show_unit=show_unit)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict='silent')
@unit.deleter
def unit(self):
self._unit = None
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(
new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``
"""
if self.parent_table:
if hasattr(self.parent_table, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)
elif hasattr(self, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(self, copy=False, dtype=self.dtype, order='A')
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : `~astropy.units.Unit` or str
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of equivalence pairs, optional
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self
"""
for attr in ('name', 'unit', 'format', 'description'):
val = getattr(obj, attr, None)
setattr(self, attr, val)
self.meta = deepcopy(getattr(obj, 'meta', {}))
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
value = np.asarray(value)
if value.dtype.char == 'U':
value = np.char.encode(value, encoding='utf-8')
return value
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError("Cannot convert a MaskedColumn with masked value to a Column")
self = super(Column, cls).__new__(cls, data=data, name=name, dtype=dtype,
shape=shape, length=length, description=description,
unit=unit, format=format, meta=meta,
copy=copy, copy_indices=copy_indices)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError("cannot set mask value to a column in non-masked Table")
super(Column, self).__setattr__(item, value)
if item == 'unit' and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (('name', self.name),
('dtype', dtype_info_name(self.dtype)),
('shape', shape),
('unit', unit),
('format', self.format),
('description', self.description),
('length', len(self))):
if val is not None:
descr_vals.append('{0}={1!r}'.format(attr, val))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from ..utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html)
out = descr + '\n'.join(data_lines)
if six.PY2 and isinstance(out, six.text_type):
out = out.encode('utf-8')
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __unicode__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return '\n'.join(lines)
if not six.PY2:
__str__ = __unicode__
def __bytes__(self):
return six.text_type(self).encode('utf-8')
if six.PY2:
__str__ = __bytes__
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = int(re.search(r'(\d+)$', self.dtype.str).group(1))
if value_str_len > self_str_len:
warnings.warn('truncated right side string(s) longer than {} '
'character(s) during assignment'
.format(self_str_len),
StringTruncateWarning,
stacklevel=3)
def __setitem__(self, index, value):
if not six.PY2 and self.dtype.char == 'S':
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
if six.PY2:
# avoid falling through to ndarray.__setslice__, instead using
# self.__setitem__, which is much faster (see above). [#3020]
def __setslice__(self, start, stop, value):
self.__setitem__(slice(start, stop), value)
def _make_compare(oper):
"""
Make comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
"""
def _compare(self, other):
if not six.PY2 and self.dtype.char == 'S':
other = self._encode_str(other)
return getattr(self.data, oper)(other)
return _compare
__eq__ = _make_compare('__eq__')
__ne__ = _make_compare('__ne__')
__gt__ = _make_compare('__gt__')
__lt__ = _make_compare('__lt__')
__ge__ = _make_compare('__ge__')
__le__ = _make_compare('__le__')
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
data = np.insert(self, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str or None
Value used when filling masked column elements
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None, mask=None, fill_value=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if mask is None and hasattr(data, 'mask'):
mask = data.mask
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,
unit=unit, format=format, description=description,
meta=meta, copy=copy, copy_indices=copy_indices)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds.
if fill_value is None and getattr(data, 'fill_value', None) is not None:
# Coerce the fill_value to the correct type since `data` may be a
# different dtype than self.
fill_value = self.dtype.type(data.fill_value)
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work."""
# another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
out = self.view(ma.MaskedArray)
# The following is necessary because of a bug in Numpy, which was
# fixed in numpy/numpy#2703. The fix should be included in Numpy 1.8.0.
out.fill_value = self.fill_value
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
data = super(MaskedColumn, self).filled(fill_value)
# Use parent table definition of Column if available
column_cls = self.parent_table.Column if (self.parent_table is not None) else Column
out = column_cls(name=self.name, data=data, unit=self.unit,
format=self.format, description=self.description,
meta=deepcopy(self.meta))
return out
def insert(self, obj, values, mask=None, axis=0):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
mask : boolean array_like
Mask value(s) to insert. If not supplied then False is used.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=axis)
new_data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
new_data = np.insert(self_ma.data, obj, values, axis=axis)
if mask is None:
if self.dtype.kind == 'O':
mask = False
else:
mask = np.zeros(values.shape, dtype=np.bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if not six.PY2 and self.dtype.char == 'S':
value = self._encode_str(value)
if issubclass(self.dtype.type, np.character):
# Account for a bug in np.ma.MaskedArray setitem.
# https://github.com/numpy/numpy/issues/8624
value = np.ma.asanyarray(value, dtype=self.dtype.type)
# Check for string truncation after filling masked items with
# empty (zero-length) string. Note that filled() does not make
# a copy if there are no masked items.
self._check_string_truncate(value.filled(''))
# update indices
self.info.adjust_indices(index, value, len(self))
# Remove this when Numpy no longer emits this warning and that
# Numpy version becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
if MaskedArrayFutureWarning is None:
ma.MaskedArray.__setitem__(self, index, value)
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore', MaskedArrayFutureWarning)
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
{
"content_hash": "2027024da0195fc6bb9248ed2394fb0e",
"timestamp": "",
"source": "github",
"line_count": 1263,
"max_line_length": 98,
"avg_line_length": 36.933491686460805,
"alnum_prop": 0.5901987266062126,
"repo_name": "AustereCuriosity/astropy",
"id": "0432c4ed0ce00eb5a128e8834bb694a9adbbeda9",
"size": "46711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astropy/table/column.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366877"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "8239657"
},
{
"name": "Shell",
"bytes": "593"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
}
|
import shlex
import os, sys, pickle, re
import subprocess, shutil
from collections import OrderedDict
from . import backends
from .. import modules
from .. import environment, mesonlib
from .. import build
from .. import mlog
from .. import dependencies
from .. import compilers
from ..compilers import CompilerArgs
from ..mesonlib import File, MesonException, OrderedSet
from ..mesonlib import get_meson_script, get_compiler_for_source
from .backends import CleanTrees, InstallData
from ..build import InvalidArguments
if mesonlib.is_windows():
quote_func = lambda s: '"{}"'.format(s)
execute_wrapper = 'cmd /c'
rmfile_prefix = 'del /f /s /q {} &&'
else:
quote_func = shlex.quote
execute_wrapper = ''
rmfile_prefix = 'rm -f {} &&'
def ninja_quote(text):
for char in ('$', ' ', ':'):
text = text.replace(char, '$' + char)
if '\n' in text:
raise MesonException('Ninja does not support newlines in rules. '
'Please report this error with a test case to the Meson bug tracker.')
return text
class NinjaBuildElement:
def __init__(self, all_outputs, outfilenames, rule, infilenames):
if isinstance(outfilenames, str):
self.outfilenames = [outfilenames]
else:
self.outfilenames = outfilenames
assert(isinstance(rule, str))
self.rule = rule
if isinstance(infilenames, str):
self.infilenames = [infilenames]
else:
self.infilenames = infilenames
self.deps = set()
self.orderdeps = set()
self.elems = []
self.all_outputs = all_outputs
def add_dep(self, dep):
if isinstance(dep, list):
self.deps.update(dep)
else:
self.deps.add(dep)
def add_orderdep(self, dep):
if isinstance(dep, list):
self.orderdeps.update(dep)
else:
self.orderdeps.add(dep)
def add_item(self, name, elems):
if isinstance(elems, str):
elems = [elems]
self.elems.append((name, elems))
def write(self, outfile):
self.check_outputs()
line = 'build %s: %s %s' % (
' '.join([ninja_quote(i) for i in self.outfilenames]),
self.rule,
' '.join([ninja_quote(i) for i in self.infilenames]))
if len(self.deps) > 0:
line += ' | ' + ' '.join([ninja_quote(x) for x in self.deps])
if len(self.orderdeps) > 0:
line += ' || ' + ' '.join([ninja_quote(x) for x in self.orderdeps])
line += '\n'
# This is the only way I could find to make this work on all
# platforms including Windows command shell. Slash is a dir separator
# on Windows, too, so all characters are unambiguous and, more importantly,
# do not require quoting.
line = line.replace('\\', '/')
outfile.write(line)
# All the entries that should remain unquoted
raw_names = {'DEPFILE', 'DESC', 'pool', 'description'}
for e in self.elems:
(name, elems) = e
should_quote = name not in raw_names
line = ' %s = ' % name
noq_templ = "%s"
newelems = []
for i in elems:
if not should_quote or i == '&&': # Hackety hack hack
quoter = ninja_quote
else:
quoter = lambda x: ninja_quote(quote_func(x))
i = i.replace('\\', '\\\\')
if quote_func('') == '""':
i = i.replace('"', '\\"')
newelems.append(quoter(i))
line += ' '.join(newelems)
line += '\n'
outfile.write(line)
outfile.write('\n')
def check_outputs(self):
for n in self.outfilenames:
if n in self.all_outputs:
raise MesonException('Multiple producers for Ninja target "%s". Please rename your targets.' % n)
self.all_outputs[n] = True
class NinjaBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.name = 'ninja'
self.ninja_filename = 'build.ninja'
self.target_arg_cache = {}
self.fortran_deps = {}
self.all_outputs = {}
def detect_vs_dep_prefix(self, tempfilename):
'''VS writes its dependency in a locale dependent format.
Detect the search prefix to use.'''
# Of course there is another program called 'cl' on
# some platforms. Let's just require that on Windows
# cl points to msvc.
if not mesonlib.is_windows() or shutil.which('cl') is None:
return open(tempfilename, 'a')
filename = os.path.join(self.environment.get_scratch_dir(),
'incdetect.c')
with open(filename, 'w') as f:
f.write('''#include<stdio.h>
int dummy;
''')
# The output of cl dependency information is language
# and locale dependent. Any attempt at converting it to
# Python strings leads to failure. We _must_ do this detection
# in raw byte mode and write the result in raw bytes.
pc = subprocess.Popen(['cl', '/showIncludes', '/c', 'incdetect.c'],
cwd=self.environment.get_scratch_dir(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdo, _) = pc.communicate()
# We want to match 'Note: including file: ' in the line
# 'Note: including file: d:\MyDir\include\stdio.h', however
# different locales have different messages with a different
# number of colons. Match up to the the drive name 'd:\'.
matchre = re.compile(rb"^(.*\s)[a-zA-Z]:\\.*stdio.h$")
for line in stdo.split(b'\r\n'):
match = matchre.match(line)
if match:
with open(tempfilename, 'ab') as binfile:
binfile.write(b'msvc_deps_prefix = ' + match.group(1) + b'\n')
return open(tempfilename, 'a')
raise MesonException('Could not determine vs dep dependency prefix string.')
def generate(self, interp):
self.interpreter = interp
outfilename = os.path.join(self.environment.get_build_dir(), self.ninja_filename)
tempfilename = outfilename + '~'
with open(tempfilename, 'w') as outfile:
outfile.write('# This is the build file for project "%s"\n' %
self.build.get_project())
outfile.write('# It is autogenerated by the Meson build system.\n')
outfile.write('# Do not edit by hand.\n\n')
outfile.write('ninja_required_version = 1.5.1\n\n')
with self.detect_vs_dep_prefix(tempfilename) as outfile:
self.generate_rules(outfile)
self.generate_phony(outfile)
outfile.write('# Build rules for targets\n\n')
for t in self.build.get_targets().values():
self.generate_target(t, outfile)
outfile.write('# Test rules\n\n')
self.generate_tests(outfile)
outfile.write('# Install rules\n\n')
self.generate_install(outfile)
self.generate_dist(outfile)
if 'b_coverage' in self.environment.coredata.base_options and \
self.environment.coredata.base_options['b_coverage'].value:
outfile.write('# Coverage rules\n\n')
self.generate_coverage_rules(outfile)
outfile.write('# Suffix\n\n')
self.generate_utils(outfile)
self.generate_ending(outfile)
# Only ovewrite the old build file after the new one has been
# fully created.
os.replace(tempfilename, outfilename)
self.generate_compdb()
# http://clang.llvm.org/docs/JSONCompilationDatabase.html
def generate_compdb(self):
ninja_exe = environment.detect_ninja()
native_compilers = ['%s_COMPILER' % i for i in self.build.compilers]
cross_compilers = ['%s_CROSS_COMPILER' % i for i in self.build.cross_compilers]
ninja_compdb = [ninja_exe, '-t', 'compdb'] + native_compilers + cross_compilers
builddir = self.environment.get_build_dir()
try:
jsondb = subprocess.check_output(ninja_compdb, cwd=builddir)
with open(os.path.join(builddir, 'compile_commands.json'), 'wb') as f:
f.write(jsondb)
except Exception:
mlog.warning('Could not create compilation database.')
# Get all generated headers. Any source file might need them so
# we need to add an order dependency to them.
def get_generated_headers(self, target):
header_deps = []
# XXX: Why don't we add deps to CustomTarget headers here?
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue
for src in genlist.get_outputs():
if self.environment.is_header(src):
header_deps.append(self.get_target_generated_dir(target, genlist, src))
if 'vala' in target.compilers and not isinstance(target, build.Executable):
vala_header = File.from_built_file(self.get_target_dir(target), target.vala_header)
header_deps.append(vala_header)
# Recurse and find generated headers
for dep in target.link_targets:
if isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
header_deps += self.get_generated_headers(dep)
return header_deps
def get_target_generated_sources(self, target):
"""
Returns a dictionary with the keys being the path to the file
(relative to the build directory) of that type and the value
being the GeneratorList or CustomTarget that generated it.
"""
srcs = OrderedDict()
for gensrc in target.get_generated_sources():
for s in gensrc.get_outputs():
f = self.get_target_generated_dir(target, gensrc, s)
srcs[f] = s
return srcs
def get_target_sources(self, target):
srcs = OrderedDict()
for s in target.get_sources():
# BuildTarget sources are always mesonlib.File files which are
# either in the source root, or generated with configure_file and
# in the build root
if not isinstance(s, File):
raise InvalidArguments('All sources in target {!r} must be of type mesonlib.File'.format(s))
f = s.rel_to_builddir(self.build_to_src)
srcs[f] = s
return srcs
# Languages that can mix with C or C++ but don't support unity builds yet
# because the syntax we use for unity builds is specific to C/++/ObjC/++.
# Assembly files cannot be unitified and neither can LLVM IR files
langs_cant_unity = ('d', 'fortran')
def get_target_source_can_unity(self, target, source):
if isinstance(source, File):
source = source.fname
if self.environment.is_llvm_ir(source) or \
self.environment.is_assembly(source):
return False
suffix = os.path.splitext(source)[1][1:]
for lang in self.langs_cant_unity:
if lang not in target.compilers:
continue
if suffix in target.compilers[lang].file_suffixes:
return False
return True
def generate_target(self, target, outfile):
if isinstance(target, build.CustomTarget):
self.generate_custom_target(target, outfile)
if isinstance(target, build.RunTarget):
self.generate_run_target(target, outfile)
name = target.get_id()
if name in self.processed_targets:
return
self.processed_targets[name] = True
# Generate rules for all dependency targets
self.process_target_dependencies(target, outfile)
# If target uses a language that cannot link to C objects,
# just generate for that language and return.
if isinstance(target, build.Jar):
self.generate_jar_target(target, outfile)
return
if 'rust' in target.compilers:
self.generate_rust_target(target, outfile)
return
if 'cs' in target.compilers:
self.generate_cs_target(target, outfile)
return
if 'swift' in target.compilers:
self.generate_swift_target(target, outfile)
return
# Now we handle the following languages:
# ObjC++, ObjC, C++, C, D, Fortran, Vala
# target_sources:
# Pre-existing target C/C++ sources to be built; dict of full path to
# source relative to build root and the original File object.
# generated_sources:
# GeneratedList and CustomTarget sources to be built; dict of the full
# path to source relative to build root and the generating target/list
# vala_generated_sources:
# Array of sources generated by valac that have to be compiled
if 'vala' in target.compilers:
# Sources consumed by valac are filtered out. These only contain
# C/C++ sources, objects, generated libs, and unknown sources now.
target_sources, generated_sources, \
vala_generated_sources = self.generate_vala_compile(target, outfile)
else:
target_sources = self.get_target_sources(target)
generated_sources = self.get_target_generated_sources(target)
vala_generated_sources = []
self.scan_fortran_module_outputs(target)
# Generate rules for GeneratedLists
self.generate_generator_list_rules(target, outfile)
# Generate rules for building the remaining source files in this target
outname = self.get_target_filename(target)
obj_list = []
use_pch = self.environment.coredata.base_options.get('b_pch', False)
is_unity = self.is_unity(target)
if use_pch and target.has_pch():
pch_objects = self.generate_pch(target, outfile)
else:
pch_objects = []
header_deps = []
unity_src = []
unity_deps = [] # Generated sources that must be built before compiling a Unity target.
header_deps += self.get_generated_headers(target)
if is_unity:
# Warn about incompatible sources if a unity build is enabled
langs = set(target.compilers.keys())
langs_cant = langs.intersection(self.langs_cant_unity)
if langs_cant:
langs_are = langs = ', '.join(langs_cant).upper()
langs_are += ' are' if len(langs_cant) > 1 else ' is'
msg = '{} not supported in Unity builds yet, so {} ' \
'sources in the {!r} target will be compiled normally' \
''.format(langs_are, langs, target.name)
mlog.log(mlog.red('FIXME'), msg)
# Get a list of all generated headers that will be needed while building
# this target's sources (generated sources and pre-existing sources).
# This will be set as dependencies of all the target's sources. At the
# same time, also deal with generated sources that need to be compiled.
generated_source_files = []
for rel_src, gensrc in generated_sources.items():
dirpart, fnamepart = os.path.split(rel_src)
raw_src = File(True, dirpart, fnamepart)
if self.environment.is_source(rel_src) and not self.environment.is_header(rel_src):
if is_unity and self.get_target_source_can_unity(target, rel_src):
unity_deps.append(raw_src)
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
unity_src.append(abs_src)
else:
generated_source_files.append(raw_src)
elif self.environment.is_object(rel_src):
obj_list.append(rel_src)
elif self.environment.is_library(rel_src):
pass
else:
# Assume anything not specifically a source file is a header. This is because
# people generate files with weird suffixes (.inc, .fh) that they then include
# in their source files.
header_deps.append(raw_src)
# These are the generated source files that need to be built for use by
# this target. We create the Ninja build file elements for this here
# because we need `header_deps` to be fully generated in the above loop.
for src in generated_source_files:
if self.environment.is_llvm_ir(src):
o = self.generate_llvm_ir_compile(target, outfile, src)
else:
o = self.generate_single_compile(target, outfile, src, True,
header_deps=header_deps)
obj_list.append(o)
# Generate compilation targets for C sources generated from Vala
# sources. This can be extended to other $LANG->C compilers later if
# necessary. This needs to be separate for at least Vala
vala_generated_source_files = []
for src in vala_generated_sources:
dirpart, fnamepart = os.path.split(src)
raw_src = File(True, dirpart, fnamepart)
if is_unity:
unity_src.append(os.path.join(self.environment.get_build_dir(), src))
header_deps.append(raw_src)
else:
# Generated targets are ordered deps because the must exist
# before the sources compiling them are used. After the first
# compile we get precise dependency info from dep files.
# This should work in all cases. If it does not, then just
# move them from orderdeps to proper deps.
if self.environment.is_header(src):
header_deps.append(raw_src)
else:
# We gather all these and generate compile rules below
# after `header_deps` (above) is fully generated
vala_generated_source_files.append(raw_src)
for src in vala_generated_source_files:
# Passing 'vala' here signifies that we want the compile
# arguments to be specialized for C code generated by
# valac. For instance, no warnings should be emitted.
obj_list.append(self.generate_single_compile(target, outfile, src, 'vala', [], header_deps))
# Generate compile targets for all the pre-existing sources for this target
for f, src in target_sources.items():
if not self.environment.is_header(src):
if self.environment.is_llvm_ir(src):
obj_list.append(self.generate_llvm_ir_compile(target, outfile, src))
elif is_unity and self.get_target_source_can_unity(target, src):
abs_src = os.path.join(self.environment.get_build_dir(),
src.rel_to_builddir(self.build_to_src))
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, False, [], header_deps))
obj_list += self.flatten_object_list(target)
if is_unity:
for src in self.generate_unity_files(target, unity_src):
obj_list.append(self.generate_single_compile(target, outfile, src, True, unity_deps + header_deps))
linker = self.determine_linker(target)
elem = self.generate_link(target, outfile, outname, obj_list, linker, pch_objects)
self.generate_shlib_aliases(target, self.get_target_dir(target))
elem.write(outfile)
def process_target_dependencies(self, target, outfile):
for t in target.get_dependencies():
tname = t.get_basename() + t.type_suffix()
if tname not in self.processed_targets:
self.generate_target(t, outfile)
def custom_target_generator_inputs(self, target, outfile):
for s in target.sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, build.GeneratedList):
self.generate_genlist_for_target(s, target, outfile)
def unwrap_dep_list(self, target):
deps = []
for i in target.get_dependencies():
# FIXME, should not grab element at zero but rather expand all.
if isinstance(i, list):
i = i[0]
# Add a dependency on all the outputs of this target
for output in i.get_outputs():
deps.append(os.path.join(self.get_target_dir(i), output))
return deps
def generate_custom_target(self, target, outfile):
self.custom_target_generator_inputs(target, outfile)
(srcs, ofilenames, cmd) = self.eval_custom_target_command(target)
deps = self.unwrap_dep_list(target)
deps += self.get_custom_target_depend_files(target)
desc = 'Generating {0} with a {1} command.'
if target.build_always:
deps.append('PHONY')
if target.depfile is None:
rulename = 'CUSTOM_COMMAND'
else:
rulename = 'CUSTOM_COMMAND_DEP'
elem = NinjaBuildElement(self.all_outputs, ofilenames, rulename, srcs)
elem.add_dep(deps)
for d in target.extra_depends:
# Add a dependency on all the outputs of this target
for output in d.get_outputs():
elem.add_dep(os.path.join(self.get_target_dir(d), output))
# If the target requires capturing stdout, then use the serialized
# executable wrapper to capture that output and save it to a file.
#
# If the command line requires a newline, also use the wrapper, as
# ninja does not support them in its build rule syntax.
#
# Windows doesn't have -rpath, so for EXEs that need DLLs built within
# the project, we need to set PATH so the DLLs are found. We use
# a serialized executable wrapper for that and check if the
# CustomTarget command needs extra paths first.
if (target.capture or any('\n' in c for c in cmd) or
((mesonlib.is_windows() or mesonlib.is_cygwin()) and
self.determine_windows_extra_paths(target.command[0]))):
exe_data = self.serialize_executable(target.command[0], cmd[1:],
# All targets are built from the build dir
self.environment.get_build_dir(),
capture=ofilenames[0] if target.capture else None)
cmd = [sys.executable, self.environment.get_build_command(),
'--internal', 'exe', exe_data]
cmd_type = 'meson_exe.py custom'
else:
cmd_type = 'custom'
if target.depfile is not None:
rel_dfile = os.path.join(self.get_target_dir(target), target.depfile)
abs_pdir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(target))
os.makedirs(abs_pdir, exist_ok=True)
elem.add_item('DEPFILE', rel_dfile)
cmd = self.replace_paths(target, cmd)
elem.add_item('COMMAND', cmd)
elem.add_item('description', desc.format(target.name, cmd_type))
elem.write(outfile)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_run_target(self, target, outfile):
cmd = [sys.executable, self.environment.get_build_command(), '--internal', 'commandrunner']
deps = self.unwrap_dep_list(target)
arg_strings = []
for i in target.args:
if isinstance(i, str):
arg_strings.append(i)
elif isinstance(i, (build.BuildTarget, build.CustomTarget)):
relfname = self.get_target_filename(i)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
deps.append(relfname)
elif isinstance(i, mesonlib.File):
relfname = i.rel_to_builddir(self.build_to_src)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
else:
raise AssertionError('Unreachable code in generate_run_target: ' + str(i))
elem = NinjaBuildElement(self.all_outputs, target.name, 'CUSTOM_COMMAND', [])
cmd += [self.environment.get_source_dir(),
self.environment.get_build_dir(),
target.subdir,
get_meson_script(self.environment, 'mesonintrospect')]
texe = target.command
try:
texe = texe.held_object
except AttributeError:
pass
if isinstance(texe, build.Executable):
abs_exe = os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))
deps.append(self.get_target_filename(texe))
if self.environment.is_cross_build() and \
self.environment.cross_info.need_exe_wrapper():
exe_wrap = self.environment.cross_info.config['binaries'].get('exe_wrapper', None)
if exe_wrap is not None:
cmd += [exe_wrap]
cmd.append(abs_exe)
elif isinstance(texe, dependencies.ExternalProgram):
cmd += texe.get_command()
elif isinstance(texe, build.CustomTarget):
deps.append(self.get_target_filename(texe))
cmd += [os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))]
else:
cmd.append(target.command)
cmd += arg_strings
elem.add_dep(deps)
cmd = self.replace_paths(target, cmd)
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Running external command %s.' % target.name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_coverage_rules(self, outfile):
e = NinjaBuildElement(self.all_outputs, 'coverage', 'CUSTOM_COMMAND', 'PHONY')
e.add_item('COMMAND', [sys.executable,
self.environment.get_build_command(),
'--internal', 'coverage',
self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_log_dir()])
e.add_item('description', 'Generates coverage reports.')
e.write(outfile)
self.generate_coverage_legacy_rules(outfile)
def generate_coverage_legacy_rules(self, outfile):
(gcovr_exe, lcov_exe, genhtml_exe) = environment.find_coverage_tools()
added_rule = False
if gcovr_exe:
added_rule = True
elem = NinjaBuildElement(self.all_outputs, 'coverage-xml', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-x', '-r', self.environment.get_source_dir(),
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.xml')])
elem.add_item('DESC', 'Generating XML coverage report.')
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, 'coverage-text', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-r', self.environment.get_source_dir(),
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.txt')])
elem.add_item('DESC', 'Generating text coverage report.')
elem.write(outfile)
if lcov_exe and genhtml_exe:
added_rule = True
htmloutdir = os.path.join(self.environment.get_log_dir(), 'coveragereport')
covinfo = os.path.join(self.environment.get_log_dir(), 'coverage.info')
phony_elem = NinjaBuildElement(self.all_outputs, 'coverage-html', 'phony', os.path.join(htmloutdir, 'index.html'))
phony_elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, os.path.join(htmloutdir, 'index.html'), 'CUSTOM_COMMAND', '')
command = [lcov_exe, '--directory', self.environment.get_build_dir(),
'--capture', '--output-file', covinfo, '--no-checksum',
'&&', genhtml_exe, '--prefix', self.environment.get_build_dir(),
'--output-directory', htmloutdir, '--title', 'Code coverage',
'--legend', '--show-details', covinfo]
elem.add_item('COMMAND', command)
elem.add_item('DESC', 'Generating HTML coverage report.')
elem.write(outfile)
if not added_rule:
mlog.warning('coverage requested but neither gcovr nor lcov/genhtml found.')
def generate_install(self, outfile):
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
if self.environment.is_cross_build():
bins = self.environment.cross_info.config['binaries']
if 'strip' not in bins:
mlog.warning('Cross file does not specify strip binary, result will not be stripped.')
strip_bin = None
else:
strip_bin = mesonlib.stringlistify(bins['strip'])
else:
strip_bin = self.environment.native_strip_bin
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(),
strip_bin,
get_meson_script(self.environment, 'mesonintrospect'))
elem = NinjaBuildElement(self.all_outputs, 'install', 'CUSTOM_COMMAND', 'PHONY')
elem.add_dep('all')
elem.add_item('DESC', 'Installing files.')
elem.add_item('COMMAND', [sys.executable, self.environment.get_build_command(), '--internal', 'install', install_data_file])
elem.add_item('pool', 'console')
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_data_install(d)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
elem.write(outfile)
with open(install_data_file, 'wb') as ofile:
pickle.dump(d, ofile)
def generate_target_install(self, d):
for t in self.build.get_targets().values():
if not t.should_install():
continue
# Find the installation directory.
outdirs = t.get_custom_install_dir()
custom_install_dir = False
if outdirs[0] is not None and outdirs[0] is not True:
# Either the value is set, or is set to False which means
# we want this specific output out of many outputs to not
# be installed.
custom_install_dir = True
elif isinstance(t, build.SharedModule):
outdirs[0] = self.environment.get_shared_module_dir()
elif isinstance(t, build.SharedLibrary):
outdirs[0] = self.environment.get_shared_lib_dir()
elif isinstance(t, build.StaticLibrary):
outdirs[0] = self.environment.get_static_lib_dir()
elif isinstance(t, build.Executable):
outdirs[0] = self.environment.get_bindir()
else:
assert(isinstance(t, build.BuildTarget))
# XXX: Add BuildTarget-specific install dir cases here
outdirs[0] = self.environment.get_libdir()
# Sanity-check the outputs and install_dirs
num_outdirs, num_out = len(outdirs), len(t.get_outputs())
if num_outdirs != 1 and num_outdirs != num_out:
m = 'Target {!r} has {} outputs: {!r}, but only {} "install_dir"s were found.\n' \
"Pass 'false' for outputs that should not be installed and 'true' for\n" \
'using the default installation directory for an output.'
raise MesonException(m.format(t.name, num_out, t.get_outputs(), num_outdirs))
# Install the target output(s)
if isinstance(t, build.BuildTarget):
should_strip = self.get_option_for_target('strip', t)
# Install primary build output (library/executable/jar, etc)
# Done separately because of strip/aliases/rpath
if outdirs[0] is not False:
i = [self.get_target_filename(t), outdirs[0],
t.get_aliases(), should_strip, t.install_rpath]
d.targets.append(i)
# On toolchains/platforms that use an import library for
# linking (separate from the shared library with all the
# code), we need to install that too (dll.a/.lib).
if isinstance(t, build.SharedLibrary) and t.get_import_filename():
if custom_install_dir:
# If the DLL is installed into a custom directory,
# install the import library into the same place so
# it doesn't go into a surprising place
implib_install_dir = outdirs[0]
else:
implib_install_dir = self.environment.get_import_lib_dir()
# Install the import library.
i = [self.get_target_filename_for_linking(t),
implib_install_dir,
# It has no aliases, should not be stripped, and
# doesn't have an install_rpath
{}, False, '']
d.targets.append(i)
# Install secondary outputs. Only used for Vala right now.
if num_outdirs > 1:
for output, outdir in zip(t.get_outputs()[1:], outdirs[1:]):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
d.targets.append([f, outdir, {}, False, None])
elif isinstance(t, build.CustomTarget):
# If only one install_dir is specified, assume that all
# outputs will be installed into it. This is for
# backwards-compatibility and because it makes sense to
# avoid repetition since this is a common use-case.
#
# To selectively install only some outputs, pass `false` as
# the install_dir for the corresponding output by index
if num_outdirs == 1 and num_out > 1:
for output in t.get_outputs():
f = os.path.join(self.get_target_dir(t), output)
d.targets.append([f, outdirs[0], {}, False, None])
else:
for output, outdir in zip(t.get_outputs(), outdirs):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
d.targets.append([f, outdir, {}, False, None])
def generate_custom_install_script(self, d):
result = []
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for i in self.build.install_scripts:
exe = i['exe']
args = i['args']
fixed_args = []
for a in args:
a = a.replace('@SOURCE_ROOT@', srcdir)
a = a.replace('@BUILD_ROOT@', builddir)
fixed_args.append(a)
result.append(build.RunScript(exe, fixed_args))
d.install_scripts = result
def generate_header_install(self, d):
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for h in headers:
outdir = h.get_custom_install_dir()
if outdir is None:
outdir = os.path.join(incroot, h.get_install_subdir())
for f in h.get_sources():
if not isinstance(f, File):
msg = 'Invalid header type {!r} can\'t be installed'
raise MesonException(msg.format(f))
abspath = f.absolute_path(srcdir, builddir)
i = [abspath, outdir]
d.headers.append(i)
def generate_man_install(self, d):
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
subdir = os.path.join(manroot, 'man' + num)
srcabs = os.path.join(self.environment.get_source_dir(), m.get_source_subdir(), f)
dstabs = os.path.join(subdir, os.path.split(f)[1] + '.gz')
i = [srcabs, dstabs]
d.man.append(i)
def generate_data_install(self, d):
data = self.build.get_data()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for de in data:
assert(isinstance(de, build.Data))
subdir = de.install_dir
for f in de.sources:
assert(isinstance(f, mesonlib.File))
plain_f = os.path.split(f.fname)[1]
dstabs = os.path.join(subdir, plain_f)
i = [f.absolute_path(srcdir, builddir), dstabs, de.install_mode]
d.data.append(i)
def generate_subdir_install(self, d):
for sd in self.build.get_install_subdirs():
inst_subdir = sd.installable_subdir.rstrip('/')
idir_parts = inst_subdir.split('/')
if len(idir_parts) > 1:
subdir = os.path.join(sd.source_subdir, '/'.join(idir_parts[:-1]))
inst_dir = idir_parts[-1]
else:
subdir = sd.source_subdir
inst_dir = sd.installable_subdir
src_dir = os.path.join(self.environment.get_source_dir(), subdir)
dst_dir = os.path.join(self.environment.get_prefix(), sd.install_dir)
d.install_subdirs.append([src_dir, inst_dir, dst_dir, sd.install_mode])
def generate_tests(self, outfile):
self.serialize_tests()
test_exe = get_meson_script(self.environment, 'mesontest')
cmd = [sys.executable, '-u', test_exe, '--no-rebuild']
if not self.environment.coredata.get_builtin_option('stdsplit'):
cmd += ['--no-stdsplit']
if self.environment.coredata.get_builtin_option('errorlogs'):
cmd += ['--print-errorlogs']
elem = NinjaBuildElement(self.all_outputs, 'test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests.')
elem.add_item('pool', 'console')
elem.write(outfile)
# And then benchmarks.
cmd = [sys.executable, '-u', test_exe, '--benchmark', '--logbase',
'benchmarklog', '--num-processes=1', '--no-rebuild']
elem = NinjaBuildElement(self.all_outputs, 'benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
elem.add_item('pool', 'console')
elem.write(outfile)
def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile)
outfile.write('# Rules for linking.\n\n')
if self.environment.is_cross_build():
self.generate_static_link_rules(True, outfile)
self.generate_static_link_rules(False, outfile)
self.generate_dynamic_link_rules(outfile)
outfile.write('# Other rules\n\n')
outfile.write('rule CUSTOM_COMMAND\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' restat = 1\n\n')
# Ninja errors out if you have deps = gcc but no depfile, so we must
# have two rules for custom commands.
outfile.write('rule CUSTOM_COMMAND_DEP\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' deps = gcc\n')
outfile.write(' depfile = $DEPFILE\n')
outfile.write(' restat = 1\n\n')
outfile.write('rule REGENERATE_BUILD\n')
c = (ninja_quote(quote_func(sys.executable)),
ninja_quote(quote_func(self.environment.get_build_command())),
'--internal',
'regenerate',
ninja_quote(quote_func(self.environment.get_source_dir())),
ninja_quote(quote_func(self.environment.get_build_dir())))
outfile.write(" command = %s %s %s %s %s %s --backend ninja\n" % c)
outfile.write(' description = Regenerating build files.\n')
outfile.write(' generator = 1\n\n')
outfile.write('\n')
def generate_phony(self, outfile):
outfile.write('# Phony build target, always out of date\n')
outfile.write('build PHONY: phony\n')
outfile.write('\n')
def generate_jar_target(self, target, outfile):
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
class_list = []
compiler = target.compilers['java']
c = 'c'
m = ''
e = ''
f = 'f'
main_class = target.get_main_class()
if main_class != '':
e = 'e'
for src in src_list:
plain_class_path = self.generate_single_java_compile(src, target, compiler, outfile)
class_list.append(plain_class_path)
class_dep_list = [os.path.join(self.get_target_private_dir(target), i) for i in class_list]
jar_rule = 'java_LINKER'
commands = [c + m + e + f]
if e != '':
commands.append(main_class)
commands.append(self.get_target_filename(target))
# Java compilation can produce an arbitrary number of output
# class files for a single source file. Thus tell jar to just
# grab everything in the final package.
commands += ['-C', self.get_target_private_dir(target), '.']
elem = NinjaBuildElement(self.all_outputs, outname_rel, jar_rule, [])
elem.add_dep(class_dep_list)
elem.add_item('ARGS', commands)
elem.write(outfile)
def generate_cs_resource_tasks(self, target, outfile):
args = []
deps = []
for r in target.resources:
rel_sourcefile = os.path.join(self.build_to_src, target.subdir, r)
if r.endswith('.resources'):
a = '-resource:' + rel_sourcefile
elif r.endswith('.txt') or r.endswith('.resx'):
ofilebase = os.path.splitext(os.path.basename(r))[0] + '.resources'
ofilename = os.path.join(self.get_target_private_dir(target), ofilebase)
elem = NinjaBuildElement(self.all_outputs, ofilename, "CUSTOM_COMMAND", rel_sourcefile)
elem.add_item('COMMAND', ['resgen', rel_sourcefile, ofilename])
elem.add_item('DESC', 'Compiling resource %s.' % rel_sourcefile)
elem.write(outfile)
deps.append(ofilename)
a = '-resource:' + ofilename
else:
raise InvalidArguments('Unknown resource file %s.' % r)
args.append(a)
return args, deps
def generate_cs_target(self, target, outfile):
buildtype = self.get_option_for_target('buildtype', target)
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
compiler = target.compilers['cs']
rel_srcs = [s.rel_to_builddir(self.build_to_src) for s in src_list]
deps = []
commands = target.extra_args.get('cs', [])
commands += compiler.get_buildtype_args(buildtype)
if isinstance(target, build.Executable):
commands.append('-target:exe')
elif isinstance(target, build.SharedLibrary):
commands.append('-target:library')
else:
raise MesonException('Unknown C# target type.')
(resource_args, resource_deps) = self.generate_cs_resource_tasks(target, outfile)
commands += resource_args
deps += resource_deps
commands += compiler.get_output_args(outname_rel)
for l in target.link_targets:
lname = os.path.join(self.get_target_dir(l), l.get_filename())
commands += compiler.get_link_args(lname)
deps.append(lname)
if '-g' in commands:
outputs = [outname_rel, outname_rel + '.mdb']
else:
outputs = [outname_rel]
elem = NinjaBuildElement(self.all_outputs, outputs, 'cs_COMPILER', rel_srcs)
elem.add_dep(deps)
elem.add_item('ARGS', commands)
elem.write(outfile)
def generate_single_java_compile(self, src, target, compiler, outfile):
args = []
args += compiler.get_buildtype_args(self.get_option_for_target('buildtype', target))
args += self.build.get_global_args(compiler)
args += self.build.get_project_args(compiler, target.subproject)
args += target.get_java_args()
args += compiler.get_output_args(self.get_target_private_dir(target))
for i in target.include_dirs:
for idir in i.get_incdirs():
args += ['-sourcepath', os.path.join(self.build_to_src, i.curdir, idir)]
rel_src = src.rel_to_builddir(self.build_to_src)
plain_class_path = src.fname[:-4] + 'class'
rel_obj = os.path.join(self.get_target_private_dir(target), plain_class_path)
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler.get_language() + '_COMPILER', rel_src)
element.add_item('ARGS', args)
element.write(outfile)
return plain_class_path
def generate_java_link(self, outfile):
rule = 'rule java_LINKER\n'
command = ' command = jar $ARGS\n'
description = ' description = Creating JAR $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def determine_dep_vapis(self, target):
"""
Peek into the sources of BuildTargets we're linking with, and if any of
them was built with Vala, assume that it also generated a .vapi file of
the same name as the BuildTarget and return the path to it relative to
the build directory.
"""
result = OrderedSet()
for dep in target.link_targets:
for i in dep.sources:
if hasattr(i, 'fname'):
i = i.fname
if i.endswith('vala'):
vapiname = dep.name + '.vapi'
fullname = os.path.join(self.get_target_dir(dep), vapiname)
result.add(fullname)
break
return list(result)
def split_vala_sources(self, t):
"""
Splits the target's sources into .vala, .vapi, and other sources.
Handles both pre-existing and generated sources.
Returns a tuple (vala, vapi, others) each of which is a dictionary with
the keys being the path to the file (relative to the build directory)
and the value being the object that generated or represents the file.
"""
vala = OrderedDict()
vapi = OrderedDict()
others = OrderedDict()
othersgen = OrderedDict()
# Split pre-existing sources
for s in t.get_sources():
# BuildTarget sources are always mesonlib.File files which are
# either in the source root, or generated with configure_file and
# in the build root
if not isinstance(s, File):
msg = 'All sources in target {!r} must be of type ' \
'mesonlib.File, not {!r}'.format(t, s)
raise InvalidArguments(msg)
f = s.rel_to_builddir(self.build_to_src)
if s.endswith('.vala'):
srctype = vala
elif s.endswith('.vapi'):
srctype = vapi
else:
srctype = others
srctype[f] = s
# Split generated sources
for gensrc in t.get_generated_sources():
for s in gensrc.get_outputs():
f = self.get_target_generated_dir(t, gensrc, s)
if s.endswith('.vala'):
srctype = vala
elif s.endswith('.vapi'):
srctype = vapi
# Generated non-Vala (C/C++) sources. Won't be used for
# generating the Vala compile rule below.
else:
srctype = othersgen
# Duplicate outputs are disastrous
if f in srctype and srctype[f] is not gensrc:
msg = 'Duplicate output {0!r} from {1!r} {2!r}; ' \
'conflicts with {0!r} from {4!r} {3!r}' \
''.format(f, type(gensrc).__name__, gensrc.name,
srctype[f].name, type(srctype[f]).__name__)
raise InvalidArguments(msg)
# Store 'somefile.vala': GeneratedList (or CustomTarget)
srctype[f] = gensrc
return vala, vapi, (others, othersgen)
def generate_vala_compile(self, target, outfile):
"""Vala is compiled into C. Set up all necessary build steps here."""
(vala_src, vapi_src, other_src) = self.split_vala_sources(target)
extra_dep_files = []
if not vala_src:
msg = 'Vala library {!r} has no Vala source files.'
raise InvalidArguments(msg.format(target.name))
valac = target.compilers['vala']
c_out_dir = self.get_target_private_dir(target)
# C files generated by valac
vala_c_src = []
# Files generated by valac
valac_outputs = []
# All sources that are passed to valac on the commandline
all_files = list(vapi_src.keys())
for (vala_file, gensrc) in vala_src.items():
all_files.append(vala_file)
# Figure out where the Vala compiler will write the compiled C file
# If the Vala file is in a subdir of the build dir (in our case
# because it was generated/built by something else), the subdir path
# components will be preserved in the output path. But if the Vala
# file is outside the build directory, the path components will be
# stripped and just the basename will be used.
if isinstance(gensrc, (build.CustomTarget, build.GeneratedList)) or gensrc.is_built:
vala_c_file = os.path.splitext(vala_file)[0] + '.c'
else:
vala_c_file = os.path.splitext(os.path.basename(vala_file))[0] + '.c'
# All this will be placed inside the c_out_dir
vala_c_file = os.path.join(c_out_dir, vala_c_file)
vala_c_src.append(vala_c_file)
valac_outputs.append(vala_c_file)
args = self.generate_basic_compiler_args(target, valac)
# Tell Valac to output everything in our private directory. Sadly this
# means it will also preserve the directory components of Vala sources
# found inside the build tree (generated sources).
args += ['-d', c_out_dir]
if not isinstance(target, build.Executable):
# Library name
args += ['--library=' + target.name]
# Outputted header
hname = os.path.join(self.get_target_dir(target), target.vala_header)
args += ['-H', hname]
if self.is_unity(target):
# Without this the declarations will get duplicated in the .c
# files and cause a build failure when all of them are
# #include-d in one .c file.
# https://github.com/mesonbuild/meson/issues/1969
args += ['--use-header']
valac_outputs.append(hname)
# Outputted vapi file
vapiname = os.path.join(self.get_target_dir(target), target.vala_vapi)
# Force valac to write the vapi and gir files in the target build dir.
# Without this, it will write it inside c_out_dir
args += ['--vapi', os.path.join('..', target.vala_vapi)]
valac_outputs.append(vapiname)
target.outputs += [target.vala_header, target.vala_vapi]
# Install header and vapi to default locations if user requests this
if len(target.install_dir) > 1 and target.install_dir[1] is True:
target.install_dir[1] = self.environment.get_includedir()
if len(target.install_dir) > 2 and target.install_dir[2] is True:
target.install_dir[2] = os.path.join(self.environment.get_datadir(), 'vala', 'vapi')
# Generate GIR if requested
if isinstance(target.vala_gir, str):
girname = os.path.join(self.get_target_dir(target), target.vala_gir)
args += ['--gir', os.path.join('..', target.vala_gir)]
valac_outputs.append(girname)
target.outputs.append(target.vala_gir)
# Install GIR to default location if requested by user
if len(target.install_dir) > 3 and target.install_dir[3] is True:
target.install_dir[3] = os.path.join(self.environment.get_datadir(), 'gir-1.0')
# Detect gresources and add --gresources arguments for each
for (gres, gensrc) in other_src[1].items():
if isinstance(gensrc, modules.GResourceTarget):
gres_xml, = self.get_custom_target_sources(gensrc)
args += ['--gresources=' + gres_xml]
extra_args = []
for a in target.extra_args.get('vala', []):
if isinstance(a, File):
relname = a.rel_to_builddir(self.build_to_src)
extra_dep_files.append(relname)
extra_args.append(relname)
else:
extra_args.append(a)
dependency_vapis = self.determine_dep_vapis(target)
extra_dep_files += dependency_vapis
args += extra_args
element = NinjaBuildElement(self.all_outputs, valac_outputs,
valac.get_language() + '_COMPILER',
all_files + dependency_vapis)
element.add_item('ARGS', args)
element.add_dep(extra_dep_files)
element.write(outfile)
return other_src[0], other_src[1], vala_c_src
def generate_rust_target(self, target, outfile):
rustc = target.compilers['rust']
relsrc = []
for i in target.get_sources():
if not rustc.can_compile(i):
raise InvalidArguments('Rust target %s contains a non-rust source file.' % target.get_basename())
relsrc.append(i.rel_to_builddir(self.build_to_src))
target_name = os.path.join(target.subdir, target.get_filename())
args = ['--crate-type']
if isinstance(target, build.Executable):
cratetype = 'bin'
elif hasattr(target, 'rust_crate_type'):
cratetype = target.rust_crate_type
elif isinstance(target, build.SharedLibrary):
cratetype = 'dylib'
elif isinstance(target, build.StaticLibrary):
cratetype = 'rlib'
else:
raise InvalidArguments('Unknown target type for rustc.')
args.append(cratetype)
args += rustc.get_buildtype_args(self.get_option_for_target('buildtype', target))
depfile = os.path.join(target.subdir, target.name + '.d')
args += ['--emit', 'dep-info={}'.format(depfile), '--emit', 'link']
args += target.get_extra_args('rust')
args += ['-o', os.path.join(target.subdir, target.get_filename())]
orderdeps = [os.path.join(t.subdir, t.get_filename()) for t in target.link_targets]
linkdirs = OrderedDict()
for d in target.link_targets:
linkdirs[d.subdir] = True
for d in linkdirs.keys():
if d == '':
d = '.'
args += ['-L', d]
has_shared_deps = False
for dep in target.get_dependencies():
if isinstance(dep, build.SharedLibrary):
has_shared_deps = True
if isinstance(target, build.SharedLibrary) or has_shared_deps:
# add prefer-dynamic if any of the Rust libraries we link
# against are dynamic, otherwise we'll end up with
# multiple implementations of crates
args += ['-C', 'prefer-dynamic']
# build the usual rpath arguments as well...
# Set runtime-paths so we can run executables without needing to set
# LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.
if '/' in target.name or '\\' in target.name:
# Target names really should not have slashes in them, but
# unfortunately we did not check for that and some downstream projects
# now have them. Once slashes are forbidden, remove this bit.
target_slashname_workaround_dir = os.path.join(os.path.split(target.name)[0],
self.get_target_dir(target))
else:
target_slashname_workaround_dir = self.get_target_dir(target)
rpath_args = rustc.build_rpath_args(self.environment.get_build_dir(),
target_slashname_workaround_dir,
self.determine_rpath_dirs(target),
target.install_rpath)
# ... but then add rustc's sysroot to account for rustup
# installations
for rpath_arg in rpath_args:
args += ['-C', 'link-arg=' + rpath_arg + ':' + os.path.join(rustc.get_sysroot(), 'lib')]
element = NinjaBuildElement(self.all_outputs, target_name, 'rust_COMPILER', relsrc)
if len(orderdeps) > 0:
element.add_orderdep(orderdeps)
element.add_item('ARGS', args)
element.add_item('targetdep', depfile)
element.add_item('cratetype', cratetype)
element.write(outfile)
if isinstance(target, build.SharedLibrary):
self.generate_shsym(outfile, target)
def swift_module_file_name(self, target):
return os.path.join(self.get_target_private_dir(target),
self.target_swift_modulename(target) + '.swiftmodule')
def target_swift_modulename(self, target):
return target.name
def is_swift_target(self, target):
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_modules(self, target):
result = []
for l in target.link_targets:
if self.is_swift_target(l):
result.append(self.swift_module_file_name(l))
return result
def determine_swift_dep_dirs(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_swift_link_deps(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_filename(l))
return result
def split_swift_generated_sources(self, target):
all_srcs = self.get_target_generated_sources(target)
srcs = []
others = []
for i in all_srcs:
if i.endswith('.swift'):
srcs.append(i)
else:
others.append(i)
return srcs, others
def generate_swift_target(self, target, outfile):
module_name = self.target_swift_modulename(target)
swiftc = target.compilers['swift']
abssrc = []
abs_headers = []
header_imports = []
for i in target.get_sources():
if swiftc.can_compile(i):
relsrc = i.rel_to_builddir(self.build_to_src)
abss = os.path.normpath(os.path.join(self.environment.get_build_dir(), relsrc))
abssrc.append(abss)
elif self.environment.is_header(i):
relh = i.rel_to_builddir(self.build_to_src)
absh = os.path.normpath(os.path.join(self.environment.get_build_dir(), relh))
abs_headers.append(absh)
header_imports += swiftc.get_header_import_args(absh)
else:
raise InvalidArguments('Swift target %s contains a non-swift source file.' % target.get_basename())
os.makedirs(self.get_target_private_dir_abs(target), exist_ok=True)
compile_args = swiftc.get_compile_only_args()
compile_args += swiftc.get_module_args(module_name)
link_args = swiftc.get_output_args(os.path.join(self.environment.get_build_dir(), self.get_target_filename(target)))
rundir = self.get_target_private_dir(target)
out_module_name = self.swift_module_file_name(target)
in_module_files = self.determine_swift_dep_modules(target)
abs_module_dirs = self.determine_swift_dep_dirs(target)
module_includes = []
for x in abs_module_dirs:
module_includes += swiftc.get_include_args(x)
link_deps = self.get_swift_link_deps(target)
abs_link_deps = [os.path.join(self.environment.get_build_dir(), x) for x in link_deps]
(rel_generated, _) = self.split_swift_generated_sources(target)
abs_generated = [os.path.join(self.environment.get_build_dir(), x) for x in rel_generated]
# We need absolute paths because swiftc needs to be invoked in a subdir
# and this is the easiest way about it.
objects = [] # Relative to swift invocation dir
rel_objects = [] # Relative to build.ninja
for i in abssrc + abs_generated:
base = os.path.split(i)[1]
oname = os.path.splitext(base)[0] + '.o'
objects.append(oname)
rel_objects.append(os.path.join(self.get_target_private_dir(target), oname))
# Swiftc does not seem to be able to emit objects and module files in one go.
elem = NinjaBuildElement(self.all_outputs, rel_objects,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_dep(abs_headers)
elem.add_item('ARGS', compile_args + header_imports + abs_generated + module_includes)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, out_module_name,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_item('ARGS', compile_args + abs_generated + module_includes + swiftc.get_mod_gen_args())
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
if isinstance(target, build.StaticLibrary):
elem = self.generate_link(target, outfile, self.get_target_filename(target),
rel_objects, self.build.static_linker)
elem.write(outfile)
elif isinstance(target, build.Executable):
elem = NinjaBuildElement(self.all_outputs, self.get_target_filename(target), 'swift_COMPILER', [])
elem.add_dep(rel_objects)
elem.add_dep(link_deps)
elem.add_item('ARGS', link_args + swiftc.get_std_exe_link_args() + objects + abs_link_deps)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
else:
raise MesonException('Swift supports only executable and static library targets.')
def generate_static_link_rules(self, is_cross, outfile):
if 'java' in self.build.compilers:
if not is_cross:
self.generate_java_link(outfile)
if is_cross:
if self.environment.cross_info.need_cross_compiler():
static_linker = self.build.static_cross_linker
else:
static_linker = self.build.static_linker
crstr = '_CROSS'
else:
static_linker = self.build.static_linker
crstr = ''
if static_linker is None:
return
rule = 'rule STATIC%s_LINKER\n' % crstr
# We don't use @file.rsp on Windows with ArLinker because llvm-ar and
# gcc-ar blindly pass the --plugin argument to `ar` and you cannot pass
# options as arguments while using the @file.rsp syntax.
# See: https://github.com/mesonbuild/meson/issues/1646
if mesonlib.is_windows() and not isinstance(static_linker, compilers.ArLinker):
command_template = ''' command = {executable} @$out.rsp
rspfile = $out.rsp
rspfile_content = $LINK_ARGS {output_args} $in
'''
else:
command_template = ' command = {executable} $LINK_ARGS {output_args} $in\n'
cmdlist = []
# FIXME: Must normalize file names with pathlib.Path before writing
# them out to fix this properly on Windows. See:
# https://github.com/mesonbuild/meson/issues/1517
# https://github.com/mesonbuild/meson/issues/1526
if isinstance(static_linker, compilers.ArLinker) and not mesonlib.is_windows():
# `ar` has no options to overwrite archives. It always appends,
# which is never what we want. Delete an existing library first if
# it exists. https://github.com/mesonbuild/meson/issues/1355
cmdlist = [execute_wrapper, rmfile_prefix.format('$out')]
cmdlist += static_linker.get_exelist()
command = command_template.format(
executable=' '.join(cmdlist),
output_args=' '.join(static_linker.get_output_args('$out')))
description = ' description = Linking static target $out.\n\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
def generate_dynamic_link_rules(self, outfile):
ctypes = [(self.build.compilers, False)]
if self.environment.is_cross_build():
if self.environment.cross_info.need_cross_compiler():
ctypes.append((self.build.cross_compilers, True))
else:
# Native compiler masquerades as the cross compiler.
ctypes.append((self.build.compilers, True))
else:
ctypes.append((self.build.cross_compilers, True))
for (complist, is_cross) in ctypes:
for langname, compiler in complist.items():
if langname == 'java' \
or langname == 'vala' \
or langname == 'rust' \
or langname == 'cs':
continue
crstr = ''
cross_args = []
if is_cross:
crstr = '_CROSS'
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_link_args']
except KeyError:
pass
rule = 'rule %s%s_LINKER\n' % (langname, crstr)
if mesonlib.is_windows():
command_template = ''' command = {executable} @$out.rsp
rspfile = $out.rsp
rspfile_content = $ARGS {output_args} $in $LINK_ARGS {cross_args} $aliasing
'''
else:
command_template = ' command = {executable} $ARGS {output_args} $in $LINK_ARGS {cross_args} $aliasing\n'
command = command_template.format(
executable=' '.join(compiler.get_linker_exelist()),
cross_args=' '.join(cross_args),
output_args=' '.join(compiler.get_linker_output_args('$out'))
)
description = ' description = Linking target $out.'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
outfile.write('\n')
symrule = 'rule SHSYM\n'
symcmd = ' command = "%s" "%s" %s %s %s %s $CROSS\n' % (ninja_quote(sys.executable),
self.environment.get_build_command(),
'--internal',
'symbolextractor',
'$in',
'$out')
synstat = ' restat = 1\n'
syndesc = ' description = Generating symbol file $out.\n'
outfile.write(symrule)
outfile.write(symcmd)
outfile.write(synstat)
outfile.write(syndesc)
outfile.write('\n')
def generate_java_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Java object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_cs_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling C Sharp target $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_vala_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Vala source $in.\n'
restat = ' restat = 1\n' # ValaC does this always to take advantage of it.
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(restat)
outfile.write('\n')
def generate_rust_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Rust source $in.\n'
depfile = ' depfile = $targetdep\n'
depstyle = ' deps = gcc\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(depfile)
outfile.write(depstyle)
outfile.write('\n')
def generate_swift_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
full_exe = [ninja_quote(sys.executable),
ninja_quote(self.environment.get_build_command()),
'--internal',
'dirchanger',
'$RUNDIR']
invoc = (' '.join(full_exe) + ' ' +
' '.join(ninja_quote(i) for i in compiler.get_exelist()))
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Swift source $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_fortran_dep_hack(self, outfile):
if mesonlib.is_windows():
cmd = 'cmd /C ""'
else:
cmd = 'true'
template = '''# Workaround for these issues:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485
rule FORTRAN_DEP_HACK
command = %s
description = Dep hack
restat = 1
'''
outfile.write(template % cmd)
def generate_llvm_ir_compile_rule(self, compiler, is_cross, outfile):
if getattr(self, 'created_llvm_ir_rule', False):
return
rule = 'rule llvm_ir{}_COMPILER\n'.format('_CROSS' if is_cross else '')
if mesonlib.is_windows():
command_template = ' command = {executable} @$out.rsp\n' \
' rspfile = $out.rsp\n' \
' rspfile_content = {cross_args} $ARGS {output_args} {compile_only_args} $in\n'
else:
command_template = ' command = {executable} {cross_args} $ARGS {output_args} {compile_only_args} $in\n'
command = command_template.format(
executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),
cross_args=' '.join(self.get_cross_info_lang_args(compiler.language, is_cross)),
output_args=' '.join(compiler.get_output_args('$out')),
compile_only_args=' '.join(compiler.get_compile_only_args())
)
description = ' description = Compiling LLVM IR object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
self.created_llvm_ir_rule = True
def get_cross_info_lang_args(self, lang, is_cross):
if is_cross:
try:
return self.environment.cross_info.config['properties'][lang + '_args']
except KeyError:
pass
return []
def generate_compile_rule_for(self, langname, compiler, is_cross, outfile):
if langname == 'java':
if not is_cross:
self.generate_java_compile_rule(compiler, outfile)
return
if langname == 'cs':
if not is_cross:
self.generate_cs_compile_rule(compiler, outfile)
return
if langname == 'vala':
if not is_cross:
self.generate_vala_compile_rules(compiler, outfile)
return
if langname == 'rust':
if not is_cross:
self.generate_rust_compile_rules(compiler, outfile)
return
if langname == 'swift':
if not is_cross:
self.generate_swift_compile_rules(compiler, outfile)
return
if langname == 'fortran':
self.generate_fortran_dep_hack(outfile)
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_COMPILER\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = quote_func(d)
quoted_depargs.append(d)
cross_args = self.get_cross_info_lang_args(langname, is_cross)
if mesonlib.is_windows():
command_template = ''' command = {executable} @$out.rsp
rspfile = $out.rsp
rspfile_content = {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in
'''
else:
command_template = ' command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\n'
command = command_template.format(
executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),
cross_args=' '.join(cross_args),
dep_args=' '.join(quoted_depargs),
output_args=' '.join(compiler.get_output_args('$out')),
compile_only_args=' '.join(compiler.get_compile_only_args())
)
description = ' description = Compiling %s object $out.\n' % langname.title()
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_pch_rule_for(self, langname, compiler, is_cross, outfile):
if langname != 'c' and langname != 'cpp':
return
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_PCH\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = quote_func(d)
quoted_depargs.append(d)
if compiler.get_id() == 'msvc':
output = ''
else:
output = ' '.join(compiler.get_output_args('$out'))
command = " command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\n".format(
executable=' '.join(compiler.get_exelist()),
cross_args=' '.join(cross_args),
dep_args=' '.join(quoted_depargs),
output_args=output,
compile_only_args=' '.join(compiler.get_compile_only_args())
)
description = ' description = Precompiling header %s.\n' % '$in'
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_compile_rules(self, outfile):
for langname, compiler in self.build.compilers.items():
if compiler.get_id() == 'clang':
self.generate_llvm_ir_compile_rule(compiler, False, outfile)
self.generate_compile_rule_for(langname, compiler, False, outfile)
self.generate_pch_rule_for(langname, compiler, False, outfile)
if self.environment.is_cross_build():
# In case we are going a target-only build, make the native compilers
# masquerade as cross compilers.
if self.environment.cross_info.need_cross_compiler():
cclist = self.build.cross_compilers
else:
cclist = self.build.compilers
for langname, compiler in cclist.items():
if compiler.get_id() == 'clang':
self.generate_llvm_ir_compile_rule(compiler, True, outfile)
self.generate_compile_rule_for(langname, compiler, True, outfile)
self.generate_pch_rule_for(langname, compiler, True, outfile)
outfile.write('\n')
def generate_generator_list_rules(self, target, outfile):
# CustomTargets have already written their rules,
# so write rules for GeneratedLists here
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue
self.generate_genlist_for_target(genlist, target, outfile)
def replace_paths(self, target, args):
source_target_dir = self.get_target_source_dir(target)
relout = self.get_target_private_dir(target)
args = [x.replace("@SOURCE_DIR@", self.build_to_src).replace("@BUILD_DIR@", relout)
for x in args]
args = [x.replace("@CURRENT_SOURCE_DIR@", source_target_dir) for x in args]
args = [x.replace("@SOURCE_ROOT@", self.build_to_src).replace("@BUILD_ROOT@", '.')
for x in args]
return args
def generate_genlist_for_target(self, genlist, target, outfile):
generator = genlist.get_generator()
exe = generator.get_exe()
exe_arr = self.exe_object_to_cmd_array(exe)
infilelist = genlist.get_inputs()
outfilelist = genlist.get_outputs()
base_args = generator.get_arglist()
extra_dependencies = [os.path.join(self.build_to_src, i) for i in genlist.extra_depends]
source_target_dir = self.get_target_source_dir(target)
for i in range(len(infilelist)):
if len(generator.outputs) == 1:
sole_output = os.path.join(self.get_target_private_dir(target), outfilelist[i])
else:
sole_output = ''
curfile = infilelist[i]
infilename = curfile.rel_to_builddir(self.build_to_src)
outfiles = genlist.get_outputs_for(curfile)
outfiles = [os.path.join(self.get_target_private_dir(target), of) for of in outfiles]
if generator.depfile is None:
rulename = 'CUSTOM_COMMAND'
args = base_args
else:
rulename = 'CUSTOM_COMMAND_DEP'
depfilename = generator.get_dep_outname(infilename)
depfile = os.path.join(self.get_target_private_dir(target), depfilename)
args = [x.replace('@DEPFILE@', depfile) for x in base_args]
args = [x.replace("@INPUT@", infilename).replace('@OUTPUT@', sole_output)
for x in args]
args = self.replace_outputs(args, self.get_target_private_dir(target), outfilelist)
# We have consumed output files, so drop them from the list of remaining outputs.
if sole_output == '':
outfilelist = outfilelist[len(generator.outputs):]
relout = self.get_target_private_dir(target)
args = self.replace_paths(target, args)
cmdlist = exe_arr + self.replace_extra_args(args, genlist)
elem = NinjaBuildElement(self.all_outputs, outfiles, rulename, infilename)
if generator.depfile is not None:
elem.add_item('DEPFILE', depfile)
if len(extra_dependencies) > 0:
elem.add_dep(extra_dependencies)
elem.add_item('DESC', 'Generating $out')
if isinstance(exe, build.BuildTarget):
elem.add_dep(self.get_target_filename(exe))
elem.add_item('COMMAND', cmdlist)
elem.write(outfile)
def scan_fortran_module_outputs(self, target):
compiler = None
for lang, c in self.build.compilers.items():
if lang == 'fortran':
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(r"\s*module\s+(\w+)", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for Fortran sources generated by
# custom_target() and generator() as those are run after
# the configuration (configure_file() is OK)
if not compiler.can_compile(s):
continue
filename = s.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
with open(filename) as f:
for line in f:
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1).lower()
if modname == 'procedure':
# MODULE PROCEDURE construct
continue
if modname in module_files:
raise InvalidArguments(
'Namespace collision: module %s defined in '
'two files %s and %s.' %
(modname, module_files[modname], s))
module_files[modname] = s
self.fortran_deps[target.get_basename()] = module_files
def get_fortran_deps(self, compiler, src, target):
mod_files = []
usere = re.compile(r"\s*use\s+(\w+)", re.IGNORECASE)
dirname = self.get_target_private_dir(target)
tdeps = self.fortran_deps[target.get_basename()]
with open(src) as f:
for line in f:
usematch = usere.match(line)
if usematch is not None:
usename = usematch.group(1).lower()
if usename not in tdeps:
# The module is not provided by any source file. This
# is due to:
# a) missing file/typo/etc
# b) using a module provided by the compiler, such as
# OpenMP
# There's no easy way to tell which is which (that I
# know of) so just ignore this and go on. Ideally we
# would print a warning message to the user but this is
# a common occurrence, which would lead to lots of
# distracting noise.
continue
mod_source_file = tdeps[usename]
# Check if a source uses a module it exports itself.
# Potential bug if multiple targets have a file with
# the same name.
if mod_source_file.fname == os.path.split(src)[1]:
continue
mod_name = compiler.module_name_to_filename(
usematch.group(1))
mod_files.append(os.path.join(dirname, mod_name))
return mod_files
def get_cross_stdlib_args(self, target, compiler):
if not target.is_cross:
return []
if not self.environment.cross_info.has_stdlib(compiler.language):
return []
return compiler.get_no_stdinc_args()
def get_compile_debugfile_args(self, compiler, target, objfile):
if compiler.id != 'msvc':
return []
# The way MSVC uses PDB files is documented exactly nowhere so
# the following is what we have been able to decipher via
# reverse engineering.
#
# Each object file gets the path of its PDB file written
# inside it. This can be either the final PDB (for, say,
# foo.exe) or an object pdb (for foo.obj). If the former, then
# each compilation step locks the pdb file for writing, which
# is a bottleneck and object files from one target can not be
# used in a different target. The latter seems to be the
# sensible one (and what Unix does) but there is a catch. If
# you try to use precompiled headers MSVC will error out
# because both source and pch pdbs go in the same file and
# they must be the same.
#
# This means:
#
# - pch files must be compiled anew for every object file (negating
# the entire point of having them in the first place)
# - when using pch, output must go to the target pdb
#
# Since both of these are broken in some way, use the one that
# works for each target. This unfortunately means that you
# can't combine pch and object extraction in a single target.
#
# PDB files also lead to filename collisions. A target foo.exe
# has a corresponding foo.pdb. A shared library foo.dll _also_
# has pdb file called foo.pdb. So will a static library
# foo.lib, which clobbers both foo.pdb _and_ the dll file's
# export library called foo.lib (by default, currently we name
# them libfoo.a to avoidt this issue). You can give the files
# unique names such as foo_exe.pdb but VC also generates a
# bunch of other files which take their names from the target
# basename (i.e. "foo") and stomp on each other.
#
# CMake solves this problem by doing two things. First of all
# static libraries do not generate pdb files at
# all. Presumably you don't need them and VC is smart enough
# to look up the original data when linking (speculation, not
# tested). The second solution is that you can only have
# target named "foo" as an exe, shared lib _or_ static
# lib. This makes filename collisions not happen. The downside
# is that you can't have an executable foo that uses a shared
# library libfoo.so, which is a common idiom on Unix.
#
# If you feel that the above is completely wrong and all of
# this is actually doable, please send patches.
if target.has_pch():
tfilename = self.get_target_filename_abs(target)
return compiler.get_compile_debugfile_args(tfilename, pch=True)
else:
return compiler.get_compile_debugfile_args(objfile, pch=False)
def get_link_debugfile_args(self, linker, target, outname):
return linker.get_link_debugfile_args(outname)
def generate_llvm_ir_compile(self, target, outfile, src):
compiler = get_compiler_for_source(target.compilers.values(), src)
commands = CompilerArgs(compiler)
# Compiler args for compiling this target
commands += compilers.get_base_compile_args(self.environment.coredata.base_options,
compiler)
if isinstance(src, File):
if src.is_built:
src_filename = os.path.join(src.subdir, src.fname)
else:
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
if isinstance(src, File) and src.is_built:
rel_src = src.fname
elif isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise InvalidArguments('Invalid source type: {!r}'.format(src))
# Write the Ninja build command
compiler_name = 'llvm_ir{}_COMPILER'.format('_CROSS' if target.is_cross else '')
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
element.add_item('ARGS', commands)
element.write(outfile)
return rel_obj
def get_source_dir_include_args(self, target, compiler):
curdir = target.get_subdir()
tmppath = os.path.normpath(os.path.join(self.build_to_src, curdir))
return compiler.get_include_args(tmppath, False)
def get_build_dir_include_args(self, target, compiler):
curdir = target.get_subdir()
if curdir == '':
curdir = '.'
return compiler.get_include_args(curdir, False)
def get_custom_target_dir_include_args(self, target, compiler):
custom_target_include_dirs = []
for i in target.get_generated_sources():
# Generator output goes into the target private dir which is
# already in the include paths list. Only custom targets have their
# own target build dir.
if not isinstance(i, build.CustomTarget):
continue
idir = self.get_target_dir(i)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
incs = []
for i in custom_target_include_dirs:
incs += compiler.get_include_args(i, False)
return incs
def _generate_single_compile(self, target, compiler, is_generated=False):
base_proxy = backends.OptionOverrideProxy(target.option_overrides,
self.environment.coredata.base_options)
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
commands = CompilerArgs(compiler)
# Add compiler args for compiling this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
commands += compilers.get_base_compile_args(base_proxy,
compiler)
# The code generated by valac is usually crap and has tons of unused
# variables and such, so disable warnings for Vala C sources.
no_warn_args = (is_generated == 'vala')
# Add compiler args and include paths from several sources; defaults,
# build options, external dependencies, etc.
commands += self.generate_basic_compiler_args(target, compiler, no_warn_args)
# Add include dirs from the `include_directories:` kwarg on the target
# and from `include_directories:` of internal deps of the target.
#
# Target include dirs should override internal deps include dirs.
# This is handled in BuildTarget.process_kwargs()
#
# Include dirs from internal deps should override include dirs from
# external deps and must maintain the order in which they are specified.
# Hence, we must reverse the list so that the order is preserved.
for i in reversed(target.get_include_dirs()):
basedir = i.get_curdir()
for d in i.get_incdirs():
# Avoid superfluous '/.' at the end of paths when d is '.'
if d not in ('', '.'):
expdir = os.path.join(basedir, d)
else:
expdir = basedir
srctreedir = os.path.join(self.build_to_src, expdir)
# Add source subdir first so that the build subdir overrides it
sargs = compiler.get_include_args(srctreedir, i.is_system)
commands += sargs
# There may be include dirs where a build directory has not been
# created for some source dir. For example if someone does this:
#
# inc = include_directories('foo/bar/baz')
#
# But never subdir()s into the actual dir.
if os.path.isdir(os.path.join(self.environment.get_build_dir(), expdir)):
bargs = compiler.get_include_args(expdir, i.is_system)
else:
bargs = []
commands += bargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
# Add per-target compile args, f.ex, `c_args : ['-DFOO']`. We set these
# near the end since these are supposed to override everything else.
commands += self.escape_extra_args(compiler,
target.get_extra_args(compiler.get_language()))
# Add source dir and build dir. Project-specific and target-specific
# include paths must override per-target compile args, include paths
# from external dependencies, internal dependencies, and from
# per-target `include_directories:`
#
# We prefer headers in the build dir and the custom target dir over the
# source dir since, for instance, the user might have an
# srcdir == builddir Autotools build in their source tree. Many
# projects that are moving to Meson have both Meson and Autotools in
# parallel as part of the transition.
commands += self.get_source_dir_include_args(target, compiler)
commands += self.get_custom_target_dir_include_args(target, compiler)
commands += self.get_build_dir_include_args(target, compiler)
# Finally add the private dir for the target to the include path. This
# must override everything else and must be the final path added.
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
return commands
def generate_single_compile(self, target, outfile, src, is_generated=False, header_deps=[], order_deps=[]):
"""
Compiles C/C++, ObjC/ObjC++, Fortran, and D sources
"""
if isinstance(src, str) and src.endswith('.h'):
raise AssertionError('BUG: sources should not contain headers {!r}'.format(src))
compiler = get_compiler_for_source(target.compilers.values(), src)
key = (target, compiler, is_generated)
if key in self.target_arg_cache:
commands = self.target_arg_cache[key]
else:
commands = self._generate_single_compile(target, compiler, is_generated)
self.target_arg_cache[key] = commands
commands = CompilerArgs(commands.compiler, commands)
if isinstance(src, mesonlib.File) and src.is_built:
rel_src = os.path.join(src.subdir, src.fname)
if os.path.isabs(rel_src):
assert(rel_src.startswith(self.environment.get_build_dir()))
rel_src = rel_src[len(self.environment.get_build_dir()) + 1:]
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
elif isinstance(src, mesonlib.File):
rel_src = src.rel_to_builddir(self.build_to_src)
abs_src = src.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
elif is_generated:
raise AssertionError('BUG: broken generated source file handling for {!r}'.format(src))
else:
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise InvalidArguments('Invalid source type: {!r}'.format(src))
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
if isinstance(src, File):
if src.is_built:
src_filename = os.path.join(src.subdir, src.fname)
if os.path.isabs(src_filename):
assert(src_filename.startswith(self.environment.get_build_dir()))
src_filename = src_filename[len(self.environment.get_build_dir()) + 1:]
else:
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
dep_file = compiler.depfile_for_object(rel_obj)
# Add MSVC debug file generation compile flags: /Fd /FS
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
# PCH handling
if self.environment.coredata.base_options.get('b_pch', False):
commands += self.get_pch_include_args(compiler, target)
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if not pchlist:
pch_dep = []
elif compiler.id == 'intel':
pch_dep = []
else:
arr = []
i = os.path.join(self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0]))
arr.append(i)
pch_dep = arr
crstr = ''
if target.is_cross:
crstr = '_CROSS'
compiler_name = '%s%s_COMPILER' % (compiler.get_language(), crstr)
extra_deps = []
if compiler.get_language() == 'fortran':
# Can't read source file to scan for deps if it's generated later
# at build-time. Skip scanning for deps, and just set the module
# outdir argument instead.
# https://github.com/mesonbuild/meson/issues/1348
if not is_generated:
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(self.get_target_private_dir(target),
compiler.module_name_to_filename(modname))
if srcfile == src:
depelem = NinjaBuildElement(self.all_outputs, modfile, 'FORTRAN_DEP_HACK', rel_obj)
depelem.write(outfile)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
for d in header_deps:
if isinstance(d, File):
d = d.rel_to_builddir(self.build_to_src)
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_dep(d)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, File):
d = d.rel_to_builddir(self.build_to_src)
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_orderdep(pch_dep)
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item('DEPFILE', dep_file)
element.add_item('ARGS', commands)
element.write(outfile)
return rel_obj
def has_dir_part(self, fname):
# FIXME FIXME: The usage of this is a terrible and unreliable hack
if isinstance(fname, File):
return fname.subdir != ''
return '/' in fname or '\\' in fname
# Fortran is a bit weird (again). When you link against a library, just compiling a source file
# requires the mod files that are output when single files are built. To do this right we would need to
# scan all inputs and write out explicit deps for each file. That is stoo slow and too much effort so
# instead just have an ordered dependendy on the library. This ensures all required mod files are created.
# The real deps are then detected via dep file generation from the compiler. This breaks on compilers that
# produce incorrect dep files but such is life.
def get_fortran_orderdeps(self, target, compiler):
if compiler.language != 'fortran':
return []
return [os.path.join(self.get_target_dir(lt), lt.get_filename()) for lt in target.link_targets]
def generate_msvc_pch_command(self, target, compiler, pch):
if len(pch) != 2:
raise RuntimeError('MSVC requires one header and one source to produce precompiled headers.')
header = pch[0]
source = pch[1]
pchname = compiler.get_pch_name(header)
dst = os.path.join(self.get_target_private_dir(target), pchname)
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
just_name = os.path.split(header)[1]
(objname, pch_args) = compiler.gen_pch_args(just_name, source, dst)
commands += pch_args
commands += self.get_compile_debugfile_args(compiler, target, objname)
dep = dst + '.' + compiler.get_depfile_suffix()
return commands, dep, dst, [objname]
def generate_gcc_pch_command(self, target, compiler, pch):
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
dst = os.path.join(self.get_target_private_dir(target),
os.path.split(pch)[-1] + '.' + compiler.get_pch_suffix())
dep = dst + '.' + compiler.get_depfile_suffix()
return commands, dep, dst, [] # Gcc does not create an object file during pch generation.
def generate_pch(self, target, outfile):
cstr = ''
pch_objects = []
if target.is_cross:
cstr = '_CROSS'
for lang in ['c', 'cpp']:
pch = target.get_pch(lang)
if not pch:
continue
if '/' not in pch[0] or '/' not in pch[-1]:
msg = 'Precompiled header of {!r} must not be in the same ' \
'directory as source, please put it in a subdirectory.' \
''.format(target.get_basename())
raise InvalidArguments(msg)
compiler = target.compilers[lang]
if compiler.id == 'msvc':
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[-1])
(commands, dep, dst, objs) = self.generate_msvc_pch_command(target, compiler, pch)
extradep = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
elif compiler.id == 'intel':
# Intel generates on target generation
continue
else:
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
(commands, dep, dst, objs) = self.generate_gcc_pch_command(target, compiler, pch[0])
extradep = None
pch_objects += objs
rulename = compiler.get_language() + cstr + '_PCH'
elem = NinjaBuildElement(self.all_outputs, dst, rulename, src)
if extradep is not None:
elem.add_dep(extradep)
elem.add_item('ARGS', commands)
elem.add_item('DEPFILE', dep)
elem.write(outfile)
return pch_objects
def generate_shsym(self, outfile, target):
target_name = self.get_target_filename(target)
targetdir = self.get_target_private_dir(target)
symname = os.path.join(targetdir, target_name + '.symbols')
elem = NinjaBuildElement(self.all_outputs, symname, 'SHSYM', target_name)
if self.environment.is_cross_build() and self.environment.cross_info.need_cross_compiler():
elem.add_item('CROSS', '--cross-host=' + self.environment.cross_info.config['host_machine']['system'])
elem.write(outfile)
def get_cross_stdlib_link_args(self, target, linker):
if isinstance(target, build.StaticLibrary) or not target.is_cross:
return []
if not self.environment.cross_info.has_stdlib(linker.language):
return []
return linker.get_no_stdlib_link_args()
def get_target_type_link_args(self, target, linker):
abspath = os.path.join(self.environment.get_build_dir(), target.subdir)
commands = []
if isinstance(target, build.Executable):
# Currently only used with the Swift compiler to add '-emit-executable'
commands += linker.get_std_exe_link_args()
# If gui_app, and that's significant on this platform
if target.gui_app and hasattr(linker, 'get_gui_app_args'):
commands += linker.get_gui_app_args()
elif isinstance(target, build.SharedLibrary):
if isinstance(target, build.SharedModule):
commands += linker.get_std_shared_module_link_args()
else:
commands += linker.get_std_shared_lib_link_args()
# All shared libraries are PIC
commands += linker.get_pic_args()
# Add -Wl,-soname arguments on Linux, -install_name on OS X
commands += linker.get_soname_args(target.prefix, target.name, target.suffix,
abspath, target.soversion,
isinstance(target, build.SharedModule))
# This is only visited when building for Windows using either GCC or Visual Studio
if target.vs_module_defs and hasattr(linker, 'gen_vs_module_defs_args'):
commands += linker.gen_vs_module_defs_args(target.vs_module_defs.rel_to_builddir(self.build_to_src))
# This is only visited when building for Windows using either GCC or Visual Studio
if target.import_filename:
commands += linker.gen_import_library_args(os.path.join(target.subdir, target.import_filename))
elif isinstance(target, build.StaticLibrary):
commands += linker.get_std_link_args()
else:
raise RuntimeError('Unknown build target type.')
return commands
def get_link_whole_args(self, linker, target):
target_args = self.build_target_link_arguments(linker, target.link_whole_targets)
return linker.get_link_whole_for(target_args) if len(target_args) else []
def generate_link(self, target, outfile, outname, obj_list, linker, extra_args=[]):
if isinstance(target, build.StaticLibrary):
linker_base = 'STATIC'
else:
linker_base = linker.get_language() # Fixme.
if isinstance(target, build.SharedLibrary):
self.generate_shsym(outfile, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
linker_rule = linker_base + crstr + '_LINKER'
# Create an empty commands list, and start adding link arguments from
# various sources in the order in which they must override each other
# starting from hard-coded defaults followed by build options and so on.
#
# Once all the linker options have been passed, we will start passing
# libraries and library paths from internal and external sources.
commands = CompilerArgs(linker)
# First, the trivial ones that are impossible to override.
#
# Add linker args for linking this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
if not isinstance(target, build.StaticLibrary):
commands += compilers.get_base_link_args(self.environment.coredata.base_options,
linker,
isinstance(target, build.SharedModule))
# Add -nostdlib if needed; can't be overriden
commands += self.get_cross_stdlib_link_args(target, linker)
# Add things like /NOLOGO; usually can't be overriden
commands += linker.get_linker_always_args()
# Add buildtype linker args: optimization level, etc.
commands += linker.get_buildtype_linker_args(self.get_option_for_target('buildtype', target))
# Add /DEBUG and the pdb filename when using MSVC
commands += self.get_link_debugfile_args(linker, target, outname)
# Add link args specific to this BuildTarget type, such as soname args,
# PIC, import library generation, etc.
commands += self.get_target_type_link_args(target, linker)
# Archives that are copied wholesale in the result. Must be before any
# other link targets so missing symbols from whole archives are found in those.
if not isinstance(target, build.StaticLibrary):
commands += self.get_link_whole_args(linker, target)
if not isinstance(target, build.StaticLibrary):
# Add link args added using add_project_link_arguments()
commands += self.build.get_project_link_args(linker, target.subproject)
# Add link args added using add_global_link_arguments()
# These override per-project link arguments
commands += self.build.get_global_link_args(linker)
if not target.is_cross:
# Link args added from the env: LDFLAGS. We want these to
# override all the defaults but not the per-target link args.
commands += self.environment.coredata.external_link_args[linker.get_language()]
# Now we will add libraries and library paths from various sources
# Add link args to link to all internal libraries (link_with:) and
# internal dependencies needed by this target.
if linker_base == 'STATIC':
# Link arguments of static libraries are not put in the command
# line of the library. They are instead appended to the command
# line where the static library is used.
dependencies = []
else:
dependencies = target.get_dependencies()
commands += self.build_target_link_arguments(linker, dependencies)
# For 'automagic' deps: Boost and GTest. Also dependency('threads').
# pkg-config puts the thread flags itself via `Cflags:`
for d in target.external_deps:
if d.need_threads():
commands += linker.thread_link_flags()
# Only non-static built targets need link args and link dependencies
if not isinstance(target, build.StaticLibrary):
commands += target.link_args
# External deps must be last because target link libraries may depend on them.
for dep in target.get_external_deps():
# Extend without reordering or de-dup to preserve `-L -l` sets
# https://github.com/mesonbuild/meson/issues/1718
commands.extend_direct(dep.get_link_args())
for d in target.get_dependencies():
if isinstance(d, build.StaticLibrary):
for dep in d.get_external_deps():
commands.extend_direct(dep.get_link_args())
# Add link args for c_* or cpp_* build options. Currently this only
# adds c_winlibs and cpp_winlibs when building for Windows. This needs
# to be after all internal and external libraries so that unresolved
# symbols from those can be found here. This is needed when the
# *_winlibs that we want to link to are static mingw64 libraries.
commands += linker.get_option_link_args(self.environment.coredata.compiler_options)
# Set runtime-paths so we can run executables without needing to set
# LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.
if '/' in target.name or '\\' in target.name:
# Target names really should not have slashes in them, but
# unfortunately we did not check for that and some downstream projects
# now have them. Once slashes are forbidden, remove this bit.
target_slashname_workaround_dir = os.path.join(os.path.split(target.name)[0],
self.get_target_dir(target))
else:
target_slashname_workaround_dir = self.get_target_dir(target)
commands += linker.build_rpath_args(self.environment.get_build_dir(),
target_slashname_workaround_dir,
self.determine_rpath_dirs(target),
target.install_rpath)
# Add libraries generated by custom targets
custom_target_libraries = self.get_custom_target_provided_libraries(target)
commands += extra_args
commands += custom_target_libraries
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
dep_targets = [self.get_dependency_filename(t) for t in dependencies]
dep_targets.extend([self.get_dependency_filename(t)
for t in target.link_depends])
elem = NinjaBuildElement(self.all_outputs, outname, linker_rule, obj_list)
elem.add_dep(dep_targets + custom_target_libraries)
elem.add_item('LINK_ARGS', commands)
return elem
def get_dependency_filename(self, t):
if isinstance(t, build.SharedLibrary):
return os.path.join(self.get_target_private_dir(t), self.get_target_filename(t) + '.symbols')
elif isinstance(t, mesonlib.File):
if t.is_built:
return t.relative_name()
else:
return t.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
return self.get_target_filename(t)
def generate_shlib_aliases(self, target, outdir):
aliases = target.get_aliases()
for alias, to in aliases.items():
aliasfile = os.path.join(self.environment.get_build_dir(), outdir, alias)
try:
os.remove(aliasfile)
except Exception:
pass
try:
os.symlink(to, aliasfile)
except NotImplementedError:
mlog.debug("Library versioning disabled because symlinks are not supported.")
except OSError:
mlog.debug("Library versioning disabled because we do not have symlink creation privileges.")
def generate_custom_target_clean(self, outfile, trees):
e = NinjaBuildElement(self.all_outputs, 'clean-ctlist', 'CUSTOM_COMMAND', 'PHONY')
d = CleanTrees(self.environment.get_build_dir(), trees)
d_file = os.path.join(self.environment.get_scratch_dir(), 'cleantrees.dat')
e.add_item('COMMAND', [sys.executable,
self.environment.get_build_command(),
'--internal', 'cleantrees', d_file])
e.add_item('description', 'Cleaning custom target directories.')
e.write(outfile)
# Write out the data file passed to the script
with open(d_file, 'wb') as ofile:
pickle.dump(d, ofile)
return 'clean-ctlist'
def generate_gcov_clean(self, outfile):
gcno_elem = NinjaBuildElement(self.all_outputs, 'clean-gcno', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcno_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcno'])
gcno_elem.add_item('description', 'Deleting gcno files.')
gcno_elem.write(outfile)
gcda_elem = NinjaBuildElement(self.all_outputs, 'clean-gcda', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcda_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcda'])
gcda_elem.add_item('description', 'Deleting gcda files.')
gcda_elem.write(outfile)
def get_user_option_args(self):
cmds = []
for (k, v) in self.environment.coredata.user_options.items():
cmds.append('-D' + k + '=' + (v.value if isinstance(v.value, str) else str(v.value).lower()))
# The order of these arguments must be the same between runs of Meson
# to ensure reproducible output. The order we pass them shouldn't
# affect behavior in any other way.
return sorted(cmds)
def generate_dist(self, outfile):
elem = NinjaBuildElement(self.all_outputs, 'dist', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('DESC', 'Creating source packages')
elem.add_item('COMMAND', [sys.executable,
self.environment.get_build_command(),
'--internal', 'dist',
self.environment.source_dir,
self.environment.build_dir,
sys.executable,
self.environment.get_build_command()])
elem.add_item('pool', 'console')
elem.write(outfile)
# For things like scan-build and other helper tools we might have.
def generate_utils(self, outfile):
cmd = [sys.executable, self.environment.get_build_command(),
'--internal', 'scanbuild', self.environment.source_dir, self.environment.build_dir,
sys.executable, self.environment.get_build_command()] + self.get_user_option_args()
elem = NinjaBuildElement(self.all_outputs, 'scan-build', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
elem.write(outfile)
cmd = [sys.executable, self.environment.get_build_command(),
'--internal', 'uninstall']
elem = NinjaBuildElement(self.all_outputs, 'uninstall', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
elem.write(outfile)
def generate_ending(self, outfile):
targetlist = []
for t in self.get_build_by_default_targets().values():
# Add the first output of each target to the 'all' target so that
# they are all built
targetlist.append(os.path.join(self.get_target_dir(t), t.get_outputs()[0]))
elem = NinjaBuildElement(self.all_outputs, 'all', 'phony', targetlist)
elem.write(outfile)
default = 'default all\n\n'
outfile.write(default)
ninja_command = environment.detect_ninja()
if ninja_command is None:
raise MesonException('Could not detect Ninja v1.6 or newer')
elem = NinjaBuildElement(self.all_outputs, 'clean', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', [ninja_command, '-t', 'clean'])
elem.add_item('description', 'Cleaning.')
# If we have custom targets in this project, add all their outputs to
# the list that is passed to the `cleantrees.py` script. The script
# will manually delete all custom_target outputs that are directories
# instead of files. This is needed because on platforms other than
# Windows, Ninja only deletes directories while cleaning if they are
# empty. https://github.com/mesonbuild/meson/issues/1220
ctlist = []
for t in self.build.get_targets().values():
if isinstance(t, build.CustomTarget):
# Create a list of all custom target outputs
for o in t.get_outputs():
ctlist.append(os.path.join(self.get_target_dir(t), o))
if ctlist:
elem.add_dep(self.generate_custom_target_clean(outfile, ctlist))
if 'b_coverage' in self.environment.coredata.base_options and \
self.environment.coredata.base_options['b_coverage'].value:
self.generate_gcov_clean(outfile)
elem.add_dep('clean-gcda')
elem.add_dep('clean-gcno')
elem.write(outfile)
deps = self.get_regen_filelist()
elem = NinjaBuildElement(self.all_outputs, 'build.ninja', 'REGENERATE_BUILD', deps)
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, 'reconfigure', 'REGENERATE_BUILD', 'PHONY')
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, deps, 'phony', '')
elem.write(outfile)
|
{
"content_hash": "875c51a92b3a804ca1269ced0b63a2ef",
"timestamp": "",
"source": "github",
"line_count": 2518,
"max_line_length": 132,
"avg_line_length": 49.53455123113582,
"alnum_prop": 0.5783865691745238,
"repo_name": "wberrier/meson",
"id": "761d508f9c0e3306b80bc4f099699b4e58cfbdf9",
"size": "125321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesonbuild/backend/ninjabackend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2394"
},
{
"name": "Batchfile",
"bytes": "795"
},
{
"name": "C",
"bytes": "91370"
},
{
"name": "C#",
"bytes": "631"
},
{
"name": "C++",
"bytes": "17456"
},
{
"name": "CMake",
"bytes": "1670"
},
{
"name": "D",
"bytes": "2026"
},
{
"name": "Emacs Lisp",
"bytes": "1226"
},
{
"name": "Fortran",
"bytes": "1946"
},
{
"name": "Inno Setup",
"bytes": "372"
},
{
"name": "Java",
"bytes": "994"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "110"
},
{
"name": "Meson",
"bytes": "193206"
},
{
"name": "Objective-C",
"bytes": "699"
},
{
"name": "Objective-C++",
"bytes": "332"
},
{
"name": "Protocol Buffer",
"bytes": "92"
},
{
"name": "Python",
"bytes": "1231164"
},
{
"name": "Roff",
"bytes": "232"
},
{
"name": "Rust",
"bytes": "618"
},
{
"name": "Shell",
"bytes": "1787"
},
{
"name": "Swift",
"bytes": "972"
},
{
"name": "Vala",
"bytes": "6167"
},
{
"name": "Vim script",
"bytes": "9434"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from pymatgen.core.interface import Interface
from pymatgen.core.surface import SlabGenerator
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.testing import PymatgenTest
class InterfaceTest(PymatgenTest):
def setUp(self):
self.interface: Interface = self.get_structure("Si_SiO2_Interface")
def test_basic_props(self):
interface = self.interface
assert isinstance(interface, Interface)
assert len(interface.substrate_indices) == 14
assert len(interface.film_indices) == 36
assert len(interface.film_sites) == len(interface.film_indices)
assert len(interface.substrate_sites) == len(interface.substrate_indices)
assert interface.gap == 2.0
assert np.allclose(interface.in_plane_offset, [0, 0])
assert interface.vacuum_over_film == 20.0
assert interface.film_termination == "O2_P6/mmm_4"
assert interface.substrate_termination == "Si_P6/mmm_7"
assert interface.film_layers == 6
assert interface.substrate_layers == 2
iface_dict = interface.as_dict()
for k in [
"lattice",
"sites",
"in_plane_offset",
"gap",
"vacuum_over_film",
"interface_properties",
]:
assert k in iface_dict
assert isinstance(interface.from_dict(iface_dict), Interface)
def test_gap_setter(self):
interface = self.interface
assert np.allclose(interface.gap, 2.0)
max_sub_c = np.max(np.array([s.frac_coords for s in interface.substrate])[:, 2])
min_film_c = np.min(np.array([f.frac_coords for f in interface.film])[:, 2])
gap = (min_film_c - max_sub_c) * interface.lattice.c
assert np.allclose(interface.gap, gap)
interface.gap += 1
assert np.allclose(interface.gap, 3.0)
max_sub_c = np.max(np.array([s.frac_coords for s in interface.substrate])[:, 2])
min_film_c = np.min(np.array([f.frac_coords for f in interface.film])[:, 2])
gap = (min_film_c - max_sub_c) * interface.lattice.c
assert np.allclose(interface.gap, gap)
def test_in_plane_offset_setter(self):
interface = self.interface
init_coords = np.array(self.interface.frac_coords)
interface.in_plane_offset = np.array([0.2, 0.2])
assert np.allclose(interface.in_plane_offset, np.array([0.2, 0.2]))
test_coords = np.array(init_coords)
for i in interface.film_indices:
test_coords[i] += [0.2, 0.2, 0]
assert np.allclose(np.mod(test_coords, 1.0), np.mod(interface.frac_coords, 1.0))
def test_vacuum_over_film_setter(self):
interface = self.interface
init_coords = self.interface.cart_coords
assert interface.vacuum_over_film == 20
interface.vacuum_over_film += 10
assert interface.vacuum_over_film == 30
assert np.allclose(init_coords, interface.cart_coords)
def test_get_shifts_based_on_adsorbate_sites(self):
# Only testing two tolerances as there appears to be significant numerical noise in this method
assert len(self.interface.get_shifts_based_on_adsorbate_sites()) == 42
assert len(self.interface.get_shifts_based_on_adsorbate_sites(tolerance=20.0)) == 1
def test_from_slabs(self):
si_struct = self.get_structure("Si")
sio2_struct = self.get_structure("SiO2")
si_conventional = SpacegroupAnalyzer(si_struct).get_conventional_standard_structure()
sio2_conventional = SpacegroupAnalyzer(sio2_struct).get_conventional_standard_structure()
si_slab = SlabGenerator(si_conventional, (1, 1, 1), 5, 10, reorient_lattice=True).get_slab()
sio2_slab = SlabGenerator(sio2_conventional, (1, 0, 0), 5, 10, reorient_lattice=True).get_slab()
interface = Interface.from_slabs(film_slab=si_slab, substrate_slab=sio2_slab)
assert isinstance(interface, Interface)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "3cb9bc1dd34e769cc7d419583a1243e9",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 104,
"avg_line_length": 38.714285714285715,
"alnum_prop": 0.6487084870848708,
"repo_name": "fraricci/pymatgen",
"id": "d78406f58eaee25395e4b49a2d2ef8c0c6d69ba4",
"size": "4159",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pymatgen/core/tests/test_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "38793"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "OpenEdge ABL",
"bytes": "312"
},
{
"name": "Python",
"bytes": "9195124"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12027"
}
],
"symlink_target": ""
}
|
from yelp.client import Client
from yelp.oauth1_authenticator import Oauth1Authenticator
import re
def Yelp(command, yelp_authkeys):
lowerCom = command.lower()
searchType = re.findall(r'yelp (.*?) in', lowerCom, re.DOTALL)
searchKey=""
for t in searchType:
searchKey+=t
searchType=searchKey
params = {
'term' : searchType
}
keyword = 'in '
before_key, keyword, after_key = lowerCom.partition(keyword)
location = after_key
auth = Oauth1Authenticator(
consumer_key = yelp_authkeys["CONSUMER_KEY"],
consumer_secret= yelp_authkeys["CONSUMER_SECRET"],
token = yelp_authkeys["TOKEN"],
token_secret = yelp_authkeys["TOKEN_SECRET"]
)
client = Client(auth)
response = client.search(location, **params)
out =""
for x in range(0,3):
out += str(x+1) + ". " + str(response.businesses[x].name) + "\n Address: " + str(response.businesses[x].location.display_address).strip('[]').replace("'","").replace(",","") + "\n Ratings: " + str(response.businesses[x].rating) + " with " + str(response.businesses[x].review_count) + " Reviews \n Phone: " + str(response.businesses[x].display_phone) + "\n"
return(out)
|
{
"content_hash": "a6a0091daf3683d319206d13c0899a77",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 364,
"avg_line_length": 34.361111111111114,
"alnum_prop": 0.6265157639450283,
"repo_name": "jcallin/eSeMeS",
"id": "6bfc37258f7965a4cc634b31850d40cb7ecd85c6",
"size": "1237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_yelp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10465"
}
],
"symlink_target": ""
}
|
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
def import_attribute(import_path=None, options=None):
if import_path is None:
raise ImproperlyConfigured("No import path was given.")
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a module." % import_path)
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured(
'Module "%s" does not define a "%s" class.' % (module, classname))
|
{
"content_hash": "bae58c0ae3de1283363edee903e3a0c2",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 33.28,
"alnum_prop": 0.6334134615384616,
"repo_name": "jazzband/django-queued-storage",
"id": "1499dbce4661f4c292a15bbcd993a74d1525c38c",
"size": "832",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "queued_storage/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "103"
},
{
"name": "Python",
"bytes": "40056"
}
],
"symlink_target": ""
}
|
from AccessControl import ClassSecurityInfo
from smtplib import SMTPServerDisconnected, SMTPRecipientsRefused
from Products.CMFCore.WorkflowCore import WorkflowException
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser import BrowserView
from bika.lims.config import VERIFIED_STATES
from bika.lims.interfaces import IInvoiceView
from bika.lims.permissions import *
from bika.lims.utils import to_utf8, isAttributeHidden, encode_header
from bika.lims.workflow import doActionFor
from DateTime import DateTime
from Products.Archetypes import PloneMessageFactory as PMF
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.interface import implements
from decimal import Decimal
import plone, App
class InvoiceView(BrowserView):
implements(IInvoiceView)
template = ViewPageTemplateFile("templates/analysisrequest_invoice.pt")
print_template = ViewPageTemplateFile("templates/analysisrequest_invoice_print.pt")
content = ViewPageTemplateFile("templates/analysisrequest_invoice_content.pt")
title = _('Invoice')
description = ''
def __call__(self):
context = self.context
workflow = getToolByName(context, 'portal_workflow')
# Collect related data and objects
invoice = context.getInvoice()
sample = context.getSample()
samplePoint = sample.getSamplePoint()
reviewState = workflow.getInfoFor(context, 'review_state')
# Collection invoice information
if invoice:
self.invoiceId = invoice.getId()
else:
self.invoiceId = _('Proforma (Not yet invoiced)')
# Collect verified invoice information
verified = reviewState in VERIFIED_STATES
if verified:
self.verifiedBy = context.getVerifier()
self.verified = verified
self.request['verified'] = verified
# Collect published date
datePublished = context.getDatePublished()
if datePublished is not None:
datePublished = self.ulocalized_time(
datePublished, long_format=1
)
self.datePublished = datePublished
# Collect received date
dateReceived = context.getDateReceived()
if dateReceived is not None:
dateReceived = self.ulocalized_time(dateReceived, long_format=1)
self.dateReceived = dateReceived
# Collect general information
self.reviewState = reviewState
contact = context.getContact()
self.contact = contact.Title() if contact else ""
self.clientOrderNumber = context.getClientOrderNumber()
self.clientReference = context.getClientReference()
self.clientSampleId = sample.getClientSampleID()
self.sampleType = sample.getSampleType().Title()
self.samplePoint = samplePoint and samplePoint.Title()
self.requestId = context.getRequestID()
self.headers = [
{'title': 'Invoice ID', 'value': self.invoiceId},
{'title': 'Client Reference',
'value': self.clientReference },
{'title': 'Sample Type', 'value': self.sampleType},
{'title': 'Request ID', 'value': self.requestId},
{'title': 'Date Received', 'value': self.dateReceived},
]
if not isAttributeHidden('AnalysisRequest', 'ClientOrderNumber'):
self.headers.append({'title': 'Client Sample Id',
'value': self.clientOrderNumber})
if not isAttributeHidden('AnalysisRequest', 'SamplePoint'):
self.headers.append(
{'title': 'Sample Point', 'value': self.samplePoint})
if self.verified:
self.headers.append(
{'title': 'Verified By', 'value': self.verifiedBy})
if self.datePublished:
self.headers.append(
{'title': 'datePublished', 'value': self.datePublished})
# <tal:published tal:condition="view/datePublished">
# <th i18n:translate="">Date Published</th>
# <td tal:content="view/datePublished"></td>
# </tal:published>
#</tr>
analyses = []
profiles = []
# Retrieve required data from analyses collection
all_analyses, all_profiles, analyses_from_profiles = context.getServicesAndProfiles()
# Relating category with solo analysis
for analysis in all_analyses:
service = analysis.getService()
categoryName = service.getCategory().Title()
# Find the category
try:
category = (
o for o in analyses if o['name'] == categoryName
).next()
except:
category = {'name': categoryName, 'analyses': []}
analyses.append(category)
# Append the analysis to the category
category['analyses'].append({
'title': analysis.Title(),
'price': analysis.getPrice(),
'priceVat': "%.2f" % analysis.getVATAmount(),
'priceTotal': "%.2f" % analysis.getTotalPrice(),
})
# Relating analysis services with their profiles
# We'll take the analysis contained on each profile
for profile in all_profiles:
# If profile's checkbox "Use Analysis Profile Price" is enabled, only the profile price will be displayed.
# Otherwise each analysis will display its own price.
pservices = []
if profile.getUseAnalysisProfilePrice():
# We have to use the profiles price only
for pservice in profile.getService():
pservices.append({
'title': pservice.Title(),
'price': None,
'priceVat': None,
'priceTotal': None,
})
profiles.append({'name': profile.title,
'price': profile.getAnalysisProfilePrice(),
'priceVat': profile.getVATAmount(),
'priceTotal': profile.getTotalPrice(),
'analyses': pservices})
else:
# We need the analyses prices instead of profile price
for pservice in profile.getService():
# We want the analysis instead of the service, because we want the price for the client
# (for instance the bulk price)
panalysis = self._getAnalysisForProfileService(pservice.getKeyword(), analyses_from_profiles)
pservices.append({
'title': pservice.Title(),
'price': panalysis.getPrice() if panalysis else pservice.getPrice(),
'priceVat': "%.2f" % panalysis.getVATAmount() if panalysis
else pservice.getVATAmount(),
'priceTotal': "%.2f" % panalysis.getTotalPrice() if panalysis
else pservice.getTotalPrice(),
})
profiles.append({'name': profile.title,
'price': None,
'priceVat': None,
'priceTotal': None,
'analyses': pservices})
self.analyses = analyses
self.profiles = profiles
# Get subtotals
self.subtotal = context.getSubtotal()
self.subtotalVATAmount = "%.2f" % context.getSubtotalVATAmount()
self.subtotalTotalPrice = "%.2f" % context.getSubtotalTotalPrice()
# Get totals
self.memberDiscount = Decimal(context.getDefaultMemberDiscount())
self.discountAmount = context.getDiscountAmount()
self.VATAmount = "%.2f" % context.getVATAmount()
self.totalPrice = "%.2f" % context.getTotalPrice()
# Render the template
return self.template()
def _getAnalysisForProfileService(self, service_keyword, analyses):
"""
This function gets the analysis object from the analyses list using the keyword 'service_keyword'
:service_keyword: a service keyword
:analyses: a list of analyses
"""
for analysis in analyses:
if service_keyword == analysis.getService().getKeyword():
return analysis
return 0
def getPriorityIcon(self):
priority = self.context.getPriority()
if priority:
icon = priority.getBigIcon()
if icon:
return '/'.join(icon.getPhysicalPath())
def getPreferredCurrencyAbreviation(self):
return self.context.bika_setup.getCurrency()
class InvoicePrintView(InvoiceView):
template = ViewPageTemplateFile("templates/analysisrequest_invoice_print.pt")
def __call__(self):
return InvoiceView.__call__(self)
class InvoiceCreate(InvoiceView):
"""
It generates an invoice object with the proforma information in the AR/invoice.
"""
security = ClassSecurityInfo()
def __call__(self):
# Create the invoice object and link it to the current AR.
self.context.issueInvoice(RESPONSE=self.request.response)
# Run the InvoiceView __call__ is necessary to fill out the template required fields.
InvoiceView.__call__(self)
# Get the invoice template in HTML format
templateHTML = self.print_template()
# Send emails with the invoice
self.emailInvoice(templateHTML)
# Reload the page to see the the new fields
self.request.response.redirect(
'%s/invoice' % self.aq_parent.absolute_url())
security.declarePublic('printInvoice')
def emailInvoice(self, templateHTML, to=[]):
"""
Send the invoice via email.
:param templateHTML: The invoice template in HTML, ready to be send.
:param to: A list with the addresses to send the invoice.
"""
ar = self.aq_parent
# SMTP errors are silently ignored if server is in debug mode
debug_mode = App.config.getConfiguration().debug_mode
# Useful variables
lab = ar.bika_setup.laboratory
# Compose and send email.
subject = t(_('Invoice')) + ' ' + ar.getInvoice().getId()
mime_msg = MIMEMultipart('related')
mime_msg['Subject'] = subject
mime_msg['From'] = formataddr(
(encode_header(lab.getName()), lab.getEmailAddress()))
mime_msg.preamble = 'This is a multi-part MIME message.'
msg_txt_t = MIMEText(templateHTML.encode('utf-8'), _subtype='html')
mime_msg.attach(msg_txt_t)
# Build the responsible's addresses
mngrs = ar.getResponsible()
for mngrid in mngrs['ids']:
name = mngrs['dict'][mngrid].get('name', '')
email = mngrs['dict'][mngrid].get('email', '')
if (email != ''):
to.append(formataddr((encode_header(name), email)))
# Build the client's address
caddress = ar.aq_parent.getEmailAddress()
cname = ar.aq_parent.getName()
if (caddress != ''):
to.append(formataddr((encode_header(cname), caddress)))
if len(to) > 0:
# Send the emails
mime_msg['To'] = ','.join(to)
try:
host = getToolByName(ar, 'MailHost')
host.send(mime_msg.as_string(), immediate=True)
except SMTPServerDisconnected as msg:
pass
if not debug_mode:
raise SMTPServerDisconnected(msg)
except SMTPRecipientsRefused as msg:
raise WorkflowException(str(msg))
|
{
"content_hash": "fd1ca3cfa5f92d28e3116fe2fcd53e68",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 118,
"avg_line_length": 45.154981549815496,
"alnum_prop": 0.5875623110239437,
"repo_name": "hocinebendou/bika.gsoc",
"id": "937217d7f59170a87f9c998c3c1cf6bfe35f144c",
"size": "12237",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bika/lims/browser/analysisrequest/invoice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
}
|
from ..singletons import agent, tracer, async_tracer, tornado_tracer
from ..log import logger
def extract_custom_headers(tracing_scope, headers):
try:
for custom_header in agent.options.extra_http_headers:
# Headers are in the following format: b'x-header-1'
for header_key, value in headers.items():
if header_key.lower() == custom_header.lower():
tracing_scope.span.set_tag("http.header.%s" % custom_header, value)
except Exception:
logger.debug("extract_custom_headers: ", exc_info=True)
def get_active_tracer():
try:
if tracer.active_span:
return tracer
elif async_tracer.active_span:
return async_tracer
elif tornado_tracer.active_span:
return tornado_tracer
else:
return None
except Exception:
# Do not try to log this with instana, as there is no active tracer and there will be an infinite loop at least
# for PY2
return None
|
{
"content_hash": "9a2899474a6292530e36826098fa4466",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 119,
"avg_line_length": 35.6551724137931,
"alnum_prop": 0.6228239845261122,
"repo_name": "instana/python-sensor",
"id": "ba57239567fe066ef217bb7ab56625269591c324",
"size": "1100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instana/util/traceutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "419"
},
{
"name": "HTML",
"bytes": "154"
},
{
"name": "Python",
"bytes": "1056302"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import os
import unittest
import warnings
from django import http
from django.contrib.formtools import preview, utils
from django.test import TestCase, override_settings
from django.utils._os import upath
from django.contrib.formtools.tests.forms import (
HashTestBlankForm, HashTestForm, TestForm,
)
success_string = "Done was called!"
success_string_encoded = success_string.encode()
class TestFormPreview(preview.FormPreview):
def get_context(self, request, form):
context = super(TestFormPreview, self).get_context(request, form)
context.update({'custom_context': True})
return context
def get_initial(self, request):
return {'field1': 'Works!'}
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
ROOT_URLCONF='django.contrib.formtools.tests.urls',
)
class PreviewTests(TestCase):
def setUp(self):
super(PreviewTests, self).setUp()
# Create a FormPreview instance to share between tests
self.preview = preview.FormPreview(TestForm)
input_template = '<input type="hidden" name="%s" value="%s" />'
self.input = input_template % (self.preview.unused_name('stage'), "%d")
self.test_data = {'field1': 'foo', 'field1_': 'asdf'}
def test_unused_name(self):
"""
Verifies name mangling to get uniue field name.
"""
self.assertEqual(self.preview.unused_name('field1'), 'field1__')
def test_form_get(self):
"""
Test contrib.formtools.preview form retrieval.
Use the client library to see if we can successfully retrieve
the form (mostly testing the setup ROOT_URLCONF
process). Verify that an additional hidden input field
is created to manage the stage.
"""
response = self.client.get('/preview/')
stage = self.input % 1
self.assertContains(response, stage, 1)
self.assertEqual(response.context['custom_context'], True)
self.assertEqual(response.context['form'].initial, {'field1': 'Works!'})
def test_form_preview(self):
"""
Test contrib.formtools.preview form preview rendering.
Use the client library to POST to the form to see if a preview
is returned. If we do get a form back check that the hidden
value is correctly managing the state of the form.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 1, 'date1': datetime.date(2006, 10, 25)})
response = self.client.post('/preview/', self.test_data)
# Check to confirm stage is set to 2 in output form.
stage = self.input % 2
self.assertContains(response, stage, 1)
def test_form_submit(self):
"""
Test contrib.formtools.preview form submittal.
Use the client library to POST to the form with stage set to 3
to see if our forms done() method is called. Check first
without the security hash, verify failure, retry with security
hash and verify sucess.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 2, 'date1': datetime.date(2006, 10, 25)})
response = self.client.post('/preview/', self.test_data)
self.assertNotEqual(response.content, success_string_encoded)
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string_encoded)
def test_bool_submit(self):
"""
Test contrib.formtools.preview form submittal when form contains:
BooleanField(required=False)
Ticket: #6209 - When an unchecked BooleanField is previewed, the preview
form's hash would be computed with no value for ``bool1``. However, when
the preview form is rendered, the unchecked hidden BooleanField would be
rendered with the string value 'False'. So when the preview form is
resubmitted, the hash would be computed with the value 'False' for
``bool1``. We need to make sure the hashes are the same in both cases.
"""
self.test_data.update({'stage': 2})
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash, 'bool1': 'False'})
with warnings.catch_warnings(record=True):
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string_encoded)
def test_form_submit_good_hash(self):
"""
Test contrib.formtools.preview form submittal, using a correct
hash
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 2})
response = self.client.post('/preview/', self.test_data)
self.assertNotEqual(response.content, success_string_encoded)
hash = utils.form_hmac(TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string_encoded)
def test_form_submit_bad_hash(self):
"""
Test contrib.formtools.preview form submittal does not proceed
if the hash is incorrect.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 2})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.content, success_string_encoded)
hash = utils.form_hmac(TestForm(self.test_data)) + "bad"
self.test_data.update({'hash': hash})
response = self.client.post('/previewpreview/', self.test_data)
self.assertNotEqual(response.content, success_string_encoded)
class FormHmacTests(unittest.TestCase):
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Speaking español.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Speaking español. '})
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True)
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
|
{
"content_hash": "b56bf07b5bd0775b8007655e4a40dcae",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 81,
"avg_line_length": 40.16304347826087,
"alnum_prop": 0.6500676589986468,
"repo_name": "mbox/django",
"id": "5d3b2747a7270207d75fc05d54d6d4dcf3006926",
"size": "7416",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/contrib/formtools/tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52957"
},
{
"name": "JavaScript",
"bytes": "102668"
},
{
"name": "Python",
"bytes": "9362347"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
}
|
"""Support for IHC sensors."""
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.const import CONF_UNIT_OF_MEASUREMENT
from homeassistant.util.unit_system import TEMPERATURE_UNITS
from . import IHC_CONTROLLER, IHC_INFO
from .ihcdevice import IHCDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IHC sensor platform."""
if discovery_info is None:
return
devices = []
for name, device in discovery_info.items():
ihc_id = device["ihc_id"]
product_cfg = device["product_cfg"]
product = device["product"]
# Find controller that corresponds with device id
ctrl_id = device["ctrl_id"]
ihc_key = f"ihc{ctrl_id}"
info = hass.data[ihc_key][IHC_INFO]
ihc_controller = hass.data[ihc_key][IHC_CONTROLLER]
unit = product_cfg[CONF_UNIT_OF_MEASUREMENT]
sensor = IHCSensor(ihc_controller, name, ihc_id, info, unit, product)
devices.append(sensor)
add_entities(devices)
class IHCSensor(IHCDevice, SensorEntity):
"""Implementation of the IHC sensor."""
def __init__(
self, ihc_controller, name, ihc_id: int, info: bool, unit, product=None
) -> None:
"""Initialize the IHC sensor."""
super().__init__(ihc_controller, name, ihc_id, info, product)
self._state = None
self._unit_of_measurement = unit
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return (
SensorDeviceClass.TEMPERATURE
if self._unit_of_measurement in TEMPERATURE_UNITS
else None
)
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def on_ihc_change(self, ihc_id, value):
"""Handle IHC resource change."""
self._state = value
self.schedule_update_ha_state()
|
{
"content_hash": "a423a9c5e17398569af7c784b7dde41b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 34.111111111111114,
"alnum_prop": 0.6389018147975802,
"repo_name": "home-assistant/home-assistant",
"id": "d032043b9321578379ed5f9d75998628a2aab2dd",
"size": "2149",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ihc/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
"""Read CSV as unicode from both python 2 and 3 transparently."""
from __future__ import absolute_import
import csv
import warnings
import six
# noinspection PyUnusedLocal
class UnicodeReader(six.Iterator):
"""CSV reader that can handle unicode.
Must be used as a context manager:
with UnicodeReader('myfile.csv') as reader:
pass # do things with reader
:param filename: path to file to open
:param dialect: a csv.Dialect instance or dialect name
:param encoding: text encoding of file
:param fallback_encoding: encoding to fall back to if default
encoding fails; gives warning if it's used.
All other parameters will be passed through to csv.reader()
"""
def __init__(self, filename, dialect=csv.excel,
encoding="utf-8", fallback_encoding="latin-1", **kwargs):
self.filename = filename
self.dialect = dialect
self.encoding = encoding
self.kwargs = kwargs
self.fileobj = None
self.reader = None
self.fallback_encoding = fallback_encoding
def __enter__(self):
if six.PY3:
self.fileobj = open(self.filename, 'rt',
encoding=self.encoding, newline='')
try:
self.fileobj.read()
except UnicodeDecodeError:
warnings.warn("Decoding with '%s' codec failed; falling "
"back to '%s'" % (self.encoding,
self.fallback_encoding))
self.fileobj = open(self.filename, 'rt',
encoding=self.fallback_encoding,
newline='')
self.encoding = self.fallback_encoding
finally:
self.fileobj.seek(0)
else:
self.fileobj = open(self.filename, 'rb')
try:
self.fileobj.read().decode(self.encoding)
except UnicodeDecodeError:
warnings.warn("Decoding with '%s' codec failed; falling "
"back to '%s'" % (self.encoding,
self.fallback_encoding))
self.encoding = self.fallback_encoding
finally:
self.fileobj.seek(0)
self.reader = csv.reader(self.fileobj, dialect=self.dialect,
**self.kwargs)
return self
def __exit__(self, type_, value, traceback):
self.fileobj.close()
def __next__(self):
row = next(self.reader)
if six.PY3:
return row
return [s.decode(self.encoding) for s in row]
def __iter__(self):
return self
# noinspection PyUnusedLocal
class UnicodeWriter(object):
"""CSV writer that can handle unicode.
Must be used as a context manager:
with UnicodeWriter('myfile.csv') as writer:
pass # do things with writer
:param filename: path to file to open
:param dialect: a csv.Dialect instance or dialect name
:param encoding: text encoding of file
All other parameters will be passed through to csv.writer()
"""
def __init__(self, filename, dialect=csv.excel,
encoding="utf-8", lineterminator='\n', **kwargs):
self.filename = filename
self.dialect = dialect
self.encoding = encoding
self.lineterminator = lineterminator
self.kwargs = kwargs
self.writer = None
self.fileobj = None
def __enter__(self):
if six.PY3:
self.fileobj = open(self.filename, 'wt',
encoding=self.encoding, newline='')
else:
self.fileobj = open(self.filename, 'wb')
self.writer = csv.writer(self.fileobj, dialect=self.dialect,
lineterminator=self.lineterminator,
**self.kwargs)
return self
def __exit__(self, type_, value, traceback):
self.fileobj.close()
def writerow(self, row):
"""Write a row to the output.
:param row: list of cells to write to the file
"""
if not six.PY3:
row = [(s or '').encode(self.encoding) for s in row]
self.writer.writerow(row)
def writerows(self, rows):
"""Write many rows to the output.
:param rows: list of lists of cells to write
"""
for row in rows:
self.writerow(row)
|
{
"content_hash": "eac959809ba739fb6c88c253e9ea86d8",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 74,
"avg_line_length": 33.01459854014598,
"alnum_prop": 0.5525093964183064,
"repo_name": "chill17/pycounter",
"id": "cbe7ed16054de7c954f32ac84c31bbbcc8e99e46",
"size": "4523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycounter/csvhelper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103443"
}
],
"symlink_target": ""
}
|
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedPrivateEndpointsOperations(object):
"""ManagedPrivateEndpointsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.managedprivateendpoints.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
managed_private_endpoint_name, # type: str
managed_virtual_network_name="default", # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedPrivateEndpoint"
"""Get Managed Private Endpoints.
:param managed_private_endpoint_name: Managed private endpoint name.
:type managed_private_endpoint_name: str
:param managed_virtual_network_name: Managed virtual network name.
:type managed_virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedPrivateEndpoint, or the result of cls(response)
:rtype: ~azure.synapse.managedprivateendpoints.v2020_12_01.models.ManagedPrivateEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedPrivateEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
'managedPrivateEndpointName': self._serialize.url("managed_private_endpoint_name", managed_private_endpoint_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('ManagedPrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints/{managedPrivateEndpointName}'} # type: ignore
def create(
self,
managed_private_endpoint_name, # type: str
managed_virtual_network_name="default", # type: str
properties=None, # type: Optional["_models.ManagedPrivateEndpointProperties"]
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedPrivateEndpoint"
"""Create Managed Private Endpoints.
:param managed_private_endpoint_name: Managed private endpoint name.
:type managed_private_endpoint_name: str
:param managed_virtual_network_name: Managed virtual network name.
:type managed_virtual_network_name: str
:param properties: Managed private endpoint properties.
:type properties: ~azure.synapse.managedprivateendpoints.v2020_12_01.models.ManagedPrivateEndpointProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedPrivateEndpoint, or the result of cls(response)
:rtype: ~azure.synapse.managedprivateendpoints.v2020_12_01.models.ManagedPrivateEndpoint
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedPrivateEndpoint"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_managed_private_endpoint = _models.ManagedPrivateEndpoint(properties=properties)
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
'managedPrivateEndpointName': self._serialize.url("managed_private_endpoint_name", managed_private_endpoint_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_managed_private_endpoint, 'ManagedPrivateEndpoint')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('ManagedPrivateEndpoint', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints/{managedPrivateEndpointName}'} # type: ignore
def delete(
self,
managed_private_endpoint_name, # type: str
managed_virtual_network_name="default", # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete Managed Private Endpoints.
:param managed_private_endpoint_name: Managed private endpoint name.
:type managed_private_endpoint_name: str
:param managed_virtual_network_name: Managed virtual network name.
:type managed_virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
'managedPrivateEndpointName': self._serialize.url("managed_private_endpoint_name", managed_private_endpoint_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints/{managedPrivateEndpointName}'} # type: ignore
def list(
self,
managed_virtual_network_name="default", # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ManagedPrivateEndpointListResponse"]
"""List Managed Private Endpoints.
:param managed_virtual_network_name: Managed virtual network name.
:type managed_virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedPrivateEndpointListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.synapse.managedprivateendpoints.v2020_12_01.models.ManagedPrivateEndpointListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedPrivateEndpointListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'managedVirtualNetworkName': self._serialize.url("managed_virtual_network_name", managed_virtual_network_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedPrivateEndpointListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints'} # type: ignore
|
{
"content_hash": "81d7861376bb2fb6230797d8dfffcb72",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 153,
"avg_line_length": 48.766891891891895,
"alnum_prop": 0.6560443366816765,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5cd53fa45f3a6b9115fadd1ae1636f0c61f88dc1",
"size": "14902",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/synapse/azure-synapse-managedprivateendpoints/azure/synapse/managedprivateendpoints/v2020_12_01/operations/_managed_private_endpoints_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import argparse
from utils import Storage
class CLIArgumentsTreeParser(object):
def __init__(self, config, root_name):
self.parser = argparse.ArgumentParser()
self.config = config
self.root_name = root_name
def build(self, parser, node, node_name):
subparsers = parser.add_subparsers(dest=node_name)
for item in node:
subparser = subparsers.add_parser(item['name'], **item['desc'])
if 'subcommands' in item:
self.build(subparser, item['subcommands'], item['name'])
elif 'arguments' in item:
for name in item['arguments']:
subparser.add_argument(name, **item['arguments'][name])
def parse(self):
self.build(self.parser, self.config, self.root_name)
self.raw_data = vars(self.parser.parse_args())
self.data = Storage()
self.structuring(self.root_name, self.data)
return self.data
def structuring(self, name, result, nodes = []):
if name in self.raw_data:
nodes.append(name)
result.name = name
if len(self.raw_data) > len(nodes):
result.sub = Storage()
self.structuring(self.raw_data[name], result.sub, nodes)
else:
result.sub = Storage(name = self.raw_data[name])
else:
result.name = name
result.args = Storage()
for dname in self.raw_data:
if dname not in nodes:
result.args[dname] = self.raw_data[dname]
|
{
"content_hash": "b21ab35035bf598324446e4e2ec277f7",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 31.7,
"alnum_prop": 0.5621451104100946,
"repo_name": "voidpp/deprecated-python-tools",
"id": "9638c825cec6dcd59e3fe73cf4244c79191c9f46",
"size": "1586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cliargumentstreeparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26774"
}
],
"symlink_target": ""
}
|
'''
add numbers which are in the position of prime numbers
'''
N =int(raw_input())
marks = map(int, raw_input().split())
total = 0
for num in xrange(1,N+1):
if all(num%i!=0 for i in xrange(2,num)):
total += marks[num-1]
print total
|
{
"content_hash": "7e444bbd5afed900ac2d45d00502f31d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 54,
"avg_line_length": 17.333333333333332,
"alnum_prop": 0.5923076923076923,
"repo_name": "ganesh-95/python-programs",
"id": "029965387204148642b7d658fce881996f11785f",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thoughtworks/hr4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29683"
}
],
"symlink_target": ""
}
|
def add_native_methods(clazz):
def initD3D____(a0):
raise NotImplementedError()
def getDeviceCapsNative__int__(a0, a1):
raise NotImplementedError()
def getDeviceIdNative__int__(a0, a1):
raise NotImplementedError()
def enterFullScreenExclusiveNative__int__long__(a0, a1, a2):
raise NotImplementedError()
def exitFullScreenExclusiveNative__int__(a0, a1):
raise NotImplementedError()
def getCurrentDisplayModeNative__int__(a0, a1):
raise NotImplementedError()
def configDisplayModeNative__int__long__int__int__int__int__(a0, a1, a2, a3, a4, a5, a6):
raise NotImplementedError()
def enumDisplayModesNative__int__java_util_ArrayList__(a0, a1, a2):
raise NotImplementedError()
def getAvailableAcceleratedMemoryNative__int__(a0, a1):
raise NotImplementedError()
def isD3DAvailableOnDeviceNative__int__(a0, a1):
raise NotImplementedError()
clazz.initD3D____ = staticmethod(initD3D____)
clazz.getDeviceCapsNative__int__ = staticmethod(getDeviceCapsNative__int__)
clazz.getDeviceIdNative__int__ = staticmethod(getDeviceIdNative__int__)
clazz.enterFullScreenExclusiveNative__int__long__ = staticmethod(enterFullScreenExclusiveNative__int__long__)
clazz.exitFullScreenExclusiveNative__int__ = staticmethod(exitFullScreenExclusiveNative__int__)
clazz.getCurrentDisplayModeNative__int__ = staticmethod(getCurrentDisplayModeNative__int__)
clazz.configDisplayModeNative__int__long__int__int__int__int__ = staticmethod(configDisplayModeNative__int__long__int__int__int__int__)
clazz.enumDisplayModesNative__int__java_util_ArrayList__ = staticmethod(enumDisplayModesNative__int__java_util_ArrayList__)
clazz.getAvailableAcceleratedMemoryNative__int__ = staticmethod(getAvailableAcceleratedMemoryNative__int__)
clazz.isD3DAvailableOnDeviceNative__int__ = staticmethod(isD3DAvailableOnDeviceNative__int__)
|
{
"content_hash": "d5b9abd0bff2a67956f589a1d0bb9350",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 139,
"avg_line_length": 46.595238095238095,
"alnum_prop": 0.7250894225855902,
"repo_name": "laffra/pava",
"id": "e9efddd36d2a07d0053f4e01a613fd9e60da55fb",
"size": "1957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pava/implementation/natives/sun/java2d/d3d/D3DGraphicsDevice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "144"
},
{
"name": "Python",
"bytes": "369288"
}
],
"symlink_target": ""
}
|
import logging
import urlparse
from django.conf import settings # noqa
from django.contrib.auth import logout # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from keystoneclient import exceptions as keystone_exceptions
from openstack_auth import backend
from horizon import exceptions
from horizon import messages
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
DEFAULT_ROLE = None
# Set up our data structure for managing Identity API versions, and
# add a couple utility methods to it.
class IdentityAPIVersionManager(base.APIVersionManager):
def upgrade_v2_user(self, user):
if getattr(user, "project_id", None) is None:
user.project_id = getattr(user, "tenantId", None)
return user
def get_project_manager(self, *args, **kwargs):
if VERSIONS.active < 3:
manager = keystoneclient(*args, **kwargs).tenants
else:
manager = keystoneclient(*args, **kwargs).projects
return manager
VERSIONS = IdentityAPIVersionManager("identity", preferred_version=3)
# Import from oldest to newest so that "preferred" takes correct precedence.
try:
from keystoneclient.v2_0 import client as keystone_client_v2
VERSIONS.load_supported_version(2.0, {"client": keystone_client_v2})
except ImportError:
pass
try:
from keystoneclient.v3 import client as keystone_client_v3
VERSIONS.load_supported_version(3, {"client": keystone_client_v3})
except ImportError:
pass
class Service(base.APIDictWrapper):
""" Wrapper for a dict based on the service data from keystone. """
_attrs = ['id', 'type', 'name']
def __init__(self, service, region, *args, **kwargs):
super(Service, self).__init__(service, *args, **kwargs)
self.public_url = base.get_url_for_service(service, region,
'publicURL')
self.url = base.get_url_for_service(service, region, 'internalURL')
if self.url:
self.host = urlparse.urlparse(self.url).hostname
else:
self.host = None
self.disabled = None
self.region = region
def __unicode__(self):
if(self.type == "identity"):
return _("%(type)s (%(backend)s backend)") \
% {"type": self.type, "backend": keystone_backend_name()}
else:
return self.type
def __repr__(self):
return "<Service: %s>" % unicode(self)
def _get_endpoint_url(request, endpoint_type, catalog=None):
if getattr(request.user, "service_catalog", None):
url = base.url_for(request,
service_type='identity',
endpoint_type=endpoint_type)
else:
auth_url = getattr(settings, 'OPENSTACK_KEYSTONE_URL')
url = request.session.get('region_endpoint', auth_url)
# TODO(gabriel): When the Service Catalog no longer contains API versions
# in the endpoints this can be removed.
bits = urlparse.urlparse(url)
root = "://".join((bits.scheme, bits.netloc))
url = "%s/v%s" % (root, VERSIONS.active)
return url
def keystoneclient(request, admin=False):
"""Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
Calls requiring the admin endpoint should have ``admin=True`` passed in
as a keyword argument.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
"""
user = request.user
if admin:
if not user.is_superuser:
raise exceptions.NotAuthorized
endpoint_type = 'adminURL'
else:
endpoint_type = getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
api_version = VERSIONS.get_active_version()
# Take care of client connection caching/fetching a new client.
# Admin vs. non-admin clients are cached separately for token matching.
cache_attr = "_keystoneclient_admin" if admin \
else backend.KEYSTONE_CLIENT_ATTR
if hasattr(request, cache_attr) and (not user.token.id
or getattr(request, cache_attr).auth_token == user.token.id):
LOG.debug("Using cached client for token: %s" % user.token.id)
conn = getattr(request, cache_attr)
else:
endpoint = _get_endpoint_url(request, endpoint_type)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug("Creating a new keystoneclient connection to %s." % endpoint)
remote_addr = request.environ.get('REMOTE_ADDR', '')
conn = api_version['client'].Client(token=user.token.id,
endpoint=endpoint,
original_ip=remote_addr,
insecure=insecure,
cacert=cacert,
auth_url=endpoint,
debug=settings.DEBUG)
setattr(request, cache_attr, conn)
return conn
def domain_create(request, name, description=None, enabled=None):
manager = keystoneclient(request, admin=True).domains
return manager.create(name,
description=description,
enabled=enabled)
def domain_get(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.get(domain_id)
def domain_delete(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.delete(domain_id)
def domain_list(request):
manager = keystoneclient(request, admin=True).domains
return manager.list()
def domain_update(request, domain_id, name=None, description=None,
enabled=None):
manager = keystoneclient(request, admin=True).domains
return manager.update(domain_id, name, description, enabled)
def tenant_create(request, name, description=None, enabled=None, domain=None):
manager = VERSIONS.get_project_manager(request, admin=True)
if VERSIONS.active < 3:
return manager.create(name, description, enabled)
else:
return manager.create(name, domain,
description=description,
enabled=enabled)
def get_default_domain(request):
"""
Gets the default domain object to use when creating Identity object.
Returns the domain context if is set, otherwise return the domain
of the logon user.
"""
domain_id = request.session.get("domain_context", None)
domain_name = request.session.get("domain_context_name", None)
# if running in Keystone V3 or later
if VERSIONS.active >= 3 and not domain_id:
# if no domain context set, default to users' domain
domain_id = request.user.user_domain_id
try:
domain = domain_get(request, domain_id)
domain_name = domain.name
except Exception:
LOG.warning("Unable to retrieve Domain: %s" % domain_id)
domain = base.APIDictWrapper({"id": domain_id,
"name": domain_name})
return domain
# TODO(gabriel): Is there ever a valid case for admin to be false here?
# A quick search through the codebase reveals that it's always called with
# admin=true so I suspect we could eliminate it entirely as with the other
# tenant commands.
def tenant_get(request, project, admin=True):
manager = VERSIONS.get_project_manager(request, admin=admin)
return manager.get(project)
def tenant_delete(request, project):
manager = VERSIONS.get_project_manager(request, admin=True)
return manager.delete(project)
def tenant_list(request, paginate=False, marker=None, domain=None, user=None):
manager = VERSIONS.get_project_manager(request, admin=True)
page_size = request.session.get('horizon_pagesize',
getattr(settings, 'API_RESULT_PAGE_SIZE',
20))
limit = None
if paginate:
limit = page_size + 1
has_more_data = False
if VERSIONS.active < 3:
tenants = manager.list(limit, marker)
if paginate and len(tenants) > page_size:
tenants.pop(-1)
has_more_data = True
else:
tenants = manager.list(domain=domain, user=user)
return (tenants, has_more_data)
def tenant_update(request, project, name=None, description=None,
enabled=None, domain=None):
manager = VERSIONS.get_project_manager(request, admin=True)
if VERSIONS.active < 3:
return manager.update(project, name, description, enabled)
else:
return manager.update(project, name=name, description=description,
enabled=enabled, domain=domain)
def user_list(request, project=None, domain=None, group=None):
if VERSIONS.active < 3:
kwargs = {"tenant_id": project}
else:
kwargs = {
"project": project,
"domain": domain,
"group": group
}
users = keystoneclient(request, admin=True).users.list(**kwargs)
return [VERSIONS.upgrade_v2_user(user) for user in users]
def user_create(request, name=None, email=None, password=None, project=None,
enabled=None, domain=None):
manager = keystoneclient(request, admin=True).users
if VERSIONS.active < 3:
user = manager.create(name, password, email, project, enabled)
return VERSIONS.upgrade_v2_user(user)
else:
return manager.create(name, password=password, email=email,
project=project, enabled=enabled, domain=domain)
def user_delete(request, user_id):
return keystoneclient(request, admin=True).users.delete(user_id)
def user_get(request, user_id, admin=True):
user = keystoneclient(request, admin=admin).users.get(user_id)
return VERSIONS.upgrade_v2_user(user)
def user_update(request, user, **data):
manager = keystoneclient(request, admin=True).users
error = None
if not keystone_can_edit_user():
raise keystone_exceptions.ClientException(405, _("Identity service "
"does not allow editing user data."))
# The v2 API updates user model, password and default project separately
if VERSIONS.active < 3:
password = data.pop('password')
project = data.pop('project')
# Update user details
try:
user = manager.update(user, **data)
except Exception:
error = exceptions.handle(request, ignore=True)
# Update default tenant
try:
user_update_tenant(request, user, project)
user.tenantId = project
except Exception:
error = exceptions.handle(request, ignore=True)
# Check for existing roles
# Show a warning if no role exists for the project
user_roles = roles_for_user(request, user, project)
if not user_roles:
messages.warning(request,
_('User %s has no role defined for '
'that project.')
% data.get('name', None))
# If present, update password
# FIXME(gabriel): password change should be its own form + view
if password:
try:
user_update_password(request, user, password)
if user == request.user.id:
logout(request)
except Exception:
error = exceptions.handle(request, ignore=True)
if error is not None:
raise error
# v3 API is so much simpler...
else:
if not data['password']:
data.pop('password')
user = manager.update(user, **data)
return VERSIONS.upgrade_v2_user(user)
def user_update_enabled(request, user, enabled):
manager = keystoneclient(request, admin=True).users
if VERSIONS.active < 3:
return manager.update_enabled(user, enabled)
else:
return manager.update(user, enabled=enabled)
def user_update_password(request, user, password, admin=True):
manager = keystoneclient(request, admin=admin).users
if VERSIONS.active < 3:
return manager.update_password(user, password)
else:
return manager.update(user, password=password)
def user_update_own_password(request, origpassword, password):
client = keystoneclient(request, admin=False)
if VERSIONS.active < 3:
client.user_id = request.user.id
return client.users.update_own_password(origpassword, password)
else:
return client.users.update(request.user.id, password=password)
def user_update_tenant(request, user, project, admin=True):
manager = keystoneclient(request, admin=admin).users
if VERSIONS.active < 3:
return manager.update_tenant(user, project)
else:
return manager.update(user, project=project)
def group_create(request, domain_id, name, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.create(domain=domain_id,
name=name,
description=description)
def group_get(request, group_id, admin=True):
manager = keystoneclient(request, admin=admin).groups
return manager.get(group_id)
def group_delete(request, group_id):
manager = keystoneclient(request, admin=True).groups
return manager.delete(group_id)
def group_list(request, domain=None, project=None, user=None):
manager = keystoneclient(request, admin=True).groups
groups = manager.list(user=user)
# TODO(dklyle): once keystoneclient supports filtering by
# domain change this to use that cleaner implementation
if domain:
domain_groups = []
for group in groups:
if group.domain_id == domain:
domain_groups.append(group)
groups = domain_groups
if project:
project_groups = []
for group in groups:
roles = roles_for_group(request, group=group.id, project=project)
if roles and len(roles) > 0:
project_groups.append(group)
groups = project_groups
return groups
def group_update(request, group_id, name=None, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.update(group=group_id,
name=name,
description=description)
def add_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.add_to_group(group=group_id, user=user_id)
def remove_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.remove_from_group(group=group_id, user=user_id)
def role_create(request, name):
manager = keystoneclient(request, admin=True).roles
return manager.create(name)
def role_get(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.get(role_id)
def role_update(request, role_id, name=None):
manager = keystoneclient(request, admin=True).roles
return manager.update(role_id, name)
def role_delete(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.delete(role_id)
def role_list(request):
""" Returns a global list of available roles. """
return keystoneclient(request, admin=True).roles.list()
def roles_for_user(request, user, project):
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.roles_for_user(user, project)
else:
return manager.list(user=user, project=project)
def add_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
""" Adds a role for a user on a tenant. """
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.add_user_role(user, role, project)
else:
return manager.grant(role, user=user, project=project,
group=group, domain=domain)
def remove_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
""" Removes a given single role for a user from a tenant. """
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.remove_user_role(user, role, project)
else:
return manager.revoke(role, user=user, project=project,
group=group, domain=domain)
def remove_tenant_user(request, project=None, user=None, domain=None):
""" Removes all roles from a user on a tenant, removing them from it. """
client = keystoneclient(request, admin=True)
roles = client.roles.roles_for_user(user, project)
for role in roles:
remove_tenant_user_role(request, user=user, role=role.id,
project=project, domain=domain)
def roles_for_group(request, group, domain=None, project=None):
manager = keystoneclient(request, admin=True).roles
return manager.list(group=group, domain=domain, project=project)
def add_group_role(request, role, group, domain=None, project=None):
""" Adds a role for a group on a domain or project ."""
manager = keystoneclient(request, admin=True).roles
return manager.grant(role=role, group=group, domain=domain,
project=project)
def remove_group_role(request, role, group, domain=None, project=None):
""" Removes a given single role for a group from a domain or project. """
manager = keystoneclient(request, admin=True).roles
return manager.revoke(role=role, group=group, project=project,
domain=domain)
def remove_group_roles(request, group, domain=None, project=None):
""" Removes all roles from a group on a domain or project,
removing them from it.
"""
client = keystoneclient(request, admin=True)
roles = client.roles.list(group=group, domain=domain, project=project)
for role in roles:
remove_group_role(request, role=role.id, group=group,
domain=domain, project=project)
def get_default_role(request):
"""
Gets the default role object from Keystone and saves it as a global
since this is configured in settings and should not change from request
to request. Supports lookup by name or id.
"""
global DEFAULT_ROLE
default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
if default and DEFAULT_ROLE is None:
try:
roles = keystoneclient(request, admin=True).roles.list()
except Exception:
roles = []
exceptions.handle(request)
for role in roles:
if role.id == default or role.name == default:
DEFAULT_ROLE = role
break
return DEFAULT_ROLE
def list_ec2_credentials(request, user_id):
return keystoneclient(request).ec2.list(user_id)
def create_ec2_credentials(request, user_id, tenant_id):
return keystoneclient(request).ec2.create(user_id, tenant_id)
def get_user_ec2_credentials(request, user_id, access_token):
return keystoneclient(request).ec2.get(user_id, access_token)
def keystone_can_edit_domain():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
can_edit_domain = backend_settings.get('can_edit_domain', True)
multi_domain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
return can_edit_domain and multi_domain_support
def keystone_can_edit_user():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_user', True)
def keystone_can_edit_project():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_project', True)
def keystone_can_edit_group():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_group', True)
def keystone_can_edit_role():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_role', True)
def keystone_backend_name():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['name']
else:
return 'unknown'
|
{
"content_hash": "b1a603a10af8e28c1a80530d866b80f0",
"timestamp": "",
"source": "github",
"line_count": 603,
"max_line_length": 79,
"avg_line_length": 35.34825870646766,
"alnum_prop": 0.6411916490734225,
"repo_name": "openstack-ja/horizon",
"id": "e0b9f048d211f262b59bdf7084533e49b671171b",
"size": "22156",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/keystone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160649"
},
{
"name": "JavaScript",
"bytes": "421267"
},
{
"name": "Python",
"bytes": "2777515"
},
{
"name": "Shell",
"bytes": "13001"
}
],
"symlink_target": ""
}
|
import pickle
import socket
import codecs
from ConfigParser import SafeConfigParser
from os.path import expanduser
from OpenSSL import SSL
from txjsonrpc.netstring import jsonrpc
from binascii import unhexlify
from twisted.application import service, internet
from twisted.python.log import ILogObserver
from twisted.internet import ssl, task, reactor
from twisted.web import resource, server
from twisted.web.resource import NoResource
from subspace.network import Server
from subspace import log
from subspace.message import *
version = 20000
sys.path.append(os.path.dirname(__file__))
datafolder = expanduser("~") + "/.subspace/"
cfg = SafeConfigParser()
with codecs.open(datafolder + 'subspace.conf', 'r', encoding='utf-8') as f:
cfg.readfp(f)
bootstrap = cfg.items("bootstrap")
bootstrap_list = []
for node in bootstrap:
try:
socket.inet_aton(node[0])
tup = (str(node[0]), int(node[1]))
except socket.error:
ip = str(socket.gethostbyname(node[0]))
tup = (ip, int(node[1]))
bootstrap_list.append(tup)
ssl_seeds = cfg.items("seeds")
seed_list = []
for seed in ssl_seeds:
s = str(seed[0]) + ":" + str(seed[1])
seed_list.append(s)
if os.path.isfile(datafolder + 'keys.pickle'):
privkey = pickle.load(open(datafolder + "keys.pickle", "rb"))
else:
privkey = random_key()
pickle.dump(privkey, open(datafolder + "keys.pickle", "wb"))
pubkey = encode_pubkey(privkey_to_pubkey(privkey), "hex_compressed")
application = service.Application("subspace")
application.setComponent(ILogObserver, log.FileLogObserver(sys.stdout, log.INFO).emit)
if os.path.isfile('cache.pickle'):
kserver = Server.loadState('cache.pickle', bootstrap_list, seed_list)
else:
kserver = Server(id=unhexlify(pubkey[2:66]))
kserver.bootstrap(bootstrap_list, seed_list)
kserver.saveStateRegularly('cache.pickle', 10)
udpserver = internet.UDPServer(cfg.get("SUBSPACED", "port") if cfg.has_option("SUBSPACED", "port") else 8335, kserver.protocol)
udpserver.setServiceParent(application)
class MessageListener():
def __init__(self):
self.encrypted = {}
self.new_messages = []
loopingCall = task.LoopingCall(self.attempt_decrypt)
loopingCall.start(30, True)
def notify(self, key, value):
v = ["", value]
self.encrypted[key] = v
def attempt_decrypt(self):
self.new_messages.extend(MessageDecoder(privkey, self.encrypted).get_messages())
self.encrypted.clear()
listener = MessageListener()
kserver.protocol.addMessageListener(listener)
class ChainedOpenSSLContextFactory(ssl.DefaultOpenSSLContextFactory):
def __init__(self, privateKeyFileName, certificateChainFileName,
sslmethod=SSL.SSLv23_METHOD):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateChainFileName: Name of a file containing a certificate chain
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateChainFileName = certificateChainFileName
self.sslmethod = sslmethod
self.cacheContext()
def cacheContext(self):
ctx = SSL.Context(self.sslmethod)
ctx.use_certificate_chain_file(self.certificateChainFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
# Http-Server
class WebResource(resource.Resource):
def __init__(self, kserver):
resource.Resource.__init__(self)
self.kserver = kserver
# throttle in seconds to check app for new data
self.throttle = .25
# define a list to store client requests
self.delayed_requests = []
# define a list to store incoming keys from new POSTs
self.incoming_posts = []
# setup a loop to process delayed requests
loopingCall = task.LoopingCall(self.processDelayedRequests)
loopingCall.start(self.throttle, False)
def getChild(self, child, request):
return self
def render_GET(self, request):
def respond(value):
value = value or NoResource().render(request)
request.write(value)
request.finish()
log.msg("Getting key: %s" % request.path.split('/')[-1])
d = self.kserver.get(request.path.split('/')[-1])
if d is not None:
respond(d)
return server.NOT_DONE_YET
else:
self.delayed_requests.append(request)
return server.NOT_DONE_YET
def render_POST(self, request):
key = request.path.split('/')[-1]
value = request.content.getvalue()
log.msg("Setting %s = %s" % (key, value))
self.kserver.set(key, value)
self.incoming_posts.append(key)
return value
def processDelayedRequests(self):
"""
Processes the delayed requests that did not have
any data to return last time around.
"""
#TODO
if cfg.has_option("SUBSPACED", "server"):
server_protocol = server.Site(WebResource(kserver))
if cfg.has_option("SUBSPACED", "useSSL"):
httpserver = internet.SSLServer(cfg.get("SUBSPACED", "serverport") if cfg.has_option("SUBSPACED", "serverport") else 8080,
server_protocol,
ChainedOpenSSLContextFactory(cfg.get("SUBSPACED", "sslkey"), cfg.get("SUBSPACED", "sslcert")))
#httpserver = internet.SSLServer(8335, website, ssl.DefaultOpenSSLContextFactory(cfg.get("SUBSPACED", "sslkey"), cfg.get("SUBSPACED", "sslcert")))
else:
httpserver = internet.TCPServer(cfg.get("SUBSPACED", "serverport") if cfg.has_option("SUBSPACED", "serverport") else 8080, server_protocol)
httpserver.setServiceParent(application)
# RPC-Server
class RPCCalls(jsonrpc.JSONRPC):
def jsonrpc_getinfo(self):
info = {}
info["version"] = version
num_peers = 0
for bucket in kserver.protocol.router.buckets:
num_peers += bucket.__len__()
info["known peers"] = num_peers
info["stored messages"] = len(kserver.storage.data)
size = sys.getsizeof(kserver.storage.data)
size += sum(map(sys.getsizeof, kserver.storage.data.itervalues())) + sum(map(sys.getsizeof, kserver.storage.data.iterkeys()))
info["db size"] = size
return info
def jsonrpc_getpubkey(self):
return pubkey
def jsonrpc_getprivkey(self):
return privkey
def jsonrpc_getmessages(self):
return MessageDecoder(privkey, kserver.storage.get_all()).get_messages()
def jsonrpc_getnew(self):
listener.attempt_decrypt()
messages = listener.new_messages
listener.new_messages = []
return messages
def jsonrpc_send(self, pubkey, message, store=True):
if type(message) is list:
message = " ".join(message)
r = kserver.getRange()
if r is False:
return "Counldn't find any peers. Maybe check your internet connection?"
else:
messages = MessageEncoder(pubkey, privkey, message, r).create_messages()
for key, value in messages.items():
log.msg("Setting %s = %s" % (key, value))
if store:
kserver.send(unhexlify(key), value)
else:
kserver.send(unhexlify(key), value, False)
return "Message sent successfully"
factory = jsonrpc.RPCFactory(RPCCalls)
factory.addIntrospection()
jsonrpcServer = internet.TCPServer(8334, factory, interface=cfg.get("SUBSPACED", "rpcallowip") if cfg.has_option("SUBSPACED", "rpcallowip") else "127.0.0.1")
jsonrpcServer.setServiceParent(application)
|
{
"content_hash": "b456c1a1d0c205e4811d5b3f6262d1a1",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 157,
"avg_line_length": 36.04651162790697,
"alnum_prop": 0.6557419354838709,
"repo_name": "cpacia/Subspace",
"id": "2757527851d399c7fd0a72af867743c73b48831a",
"size": "7750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "subspaced.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Protocol Buffer",
"bytes": "1586"
},
{
"name": "Python",
"bytes": "121900"
},
{
"name": "Shell",
"bytes": "1243"
}
],
"symlink_target": ""
}
|
"""Find the functions in a module missing type annotations.
To use it run
./functions_missing_types.py <module>
and it will print out a list of functions in the module that don't
have types.
"""
import argparse
import ast
import importlib
import os
NUMPY_ROOT = os.path.dirname(os.path.join(
os.path.abspath(__file__), "..",
))
# Technically "public" functions (they don't start with an underscore)
# that we don't want to include.
EXCLUDE_LIST = {
"numpy": {
# Stdlib modules in the namespace by accident
"absolute_import",
"division",
"print_function",
"warnings",
"sys",
"os",
"math",
# Accidentally public, deprecated, or shouldn't be used
"Tester",
"alen",
"add_docstring",
"add_newdoc",
"add_newdoc_ufunc",
"core",
"compat",
"fastCopyAndTranspose",
"get_array_wrap",
"int_asbuffer",
"oldnumeric",
"safe_eval",
"set_numeric_ops",
"test",
# Builtins
"bool",
"complex",
"float",
"int",
"long",
"object",
"str",
"unicode",
# More standard names should be preferred
"alltrue", # all
"sometrue", # any
}
}
class FindAttributes(ast.NodeVisitor):
"""Find top-level attributes/functions/classes in stubs files.
Do this by walking the stubs ast. See e.g.
https://greentreesnakes.readthedocs.io/en/latest/index.html
for more information on working with Python's ast.
"""
def __init__(self):
self.attributes = set()
def visit_FunctionDef(self, node):
if node.name == "__getattr__":
# Not really a module member.
return
self.attributes.add(node.name)
# Do not call self.generic_visit; we are only interested in
# top-level functions.
return
def visit_ClassDef(self, node):
if not node.name.startswith("_"):
self.attributes.add(node.name)
return
def visit_AnnAssign(self, node):
self.attributes.add(node.target.id)
def find_missing(module_name):
module_path = os.path.join(
NUMPY_ROOT,
module_name.replace(".", os.sep),
"__init__.pyi",
)
module = importlib.import_module(module_name)
module_attributes = {
attribute for attribute in dir(module) if not attribute.startswith("_")
}
if os.path.isfile(module_path):
with open(module_path) as f:
tree = ast.parse(f.read())
ast_visitor = FindAttributes()
ast_visitor.visit(tree)
stubs_attributes = ast_visitor.attributes
else:
# No stubs for this module yet.
stubs_attributes = set()
exclude_list = EXCLUDE_LIST.get(module_name, set())
missing = module_attributes - stubs_attributes - exclude_list
print("\n".join(sorted(missing)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("module")
args = parser.parse_args()
find_missing(args.module)
if __name__ == "__main__":
main()
|
{
"content_hash": "994cb7bf045cdefbee0d7366a6d00f09",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 79,
"avg_line_length": 23.856060606060606,
"alnum_prop": 0.582089552238806,
"repo_name": "madphysicist/numpy",
"id": "c2fe156f07163533c6d5583858ec50e4989c4088",
"size": "3171",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/functions_missing_types.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4953507"
},
{
"name": "C++",
"bytes": "407987"
},
{
"name": "Fortran",
"bytes": "11108"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Objective-C",
"bytes": "427"
},
{
"name": "Python",
"bytes": "9132022"
},
{
"name": "Shell",
"bytes": "9438"
},
{
"name": "Smarty",
"bytes": "4068"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.