text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from oslo_policy import policy
from managesf.policies import base
BASE_POLICY_NAME = 'managesf.node'
POLICY_ROOT = BASE_POLICY_NAME + ':%s'
rules = [
policy.RuleDefault(
name=POLICY_ROOT % 'get',
check_str=base.RULE_ANY),
policy.RuleDefault(
name=POLICY_ROOT % 'hold',
check_str=base.RULE_ADMIN_OR_SERVICE),
policy.RuleDefault(
name=POLICY_ROOT % 'delete',
check_str=base.RULE_ADMIN_OR_SERVICE),
policy.RuleDefault(
name=POLICY_ROOT % 'add_authorized_key',
check_str=base.RULE_ADMIN_OR_SERVICE),
policy.RuleDefault(
name=POLICY_ROOT % 'image-get',
check_str=base.RULE_ANY),
policy.RuleDefault(
name=POLICY_ROOT % 'image-start-update',
check_str=base.RULE_ADMIN_OR_SERVICE),
policy.RuleDefault(
name=POLICY_ROOT % 'image-update-status',
check_str=base.RULE_ADMIN_OR_SERVICE),
]
def list_rules():
return rules
|
{
"content_hash": "854af5b95b41cd71e9c3781a6f2ae4bb",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 49,
"avg_line_length": 26.63888888888889,
"alnum_prop": 0.6360792492179353,
"repo_name": "enovance/managesf",
"id": "e9c4c5681891da8ec739b5499f688d19e9bb6cdd",
"size": "1563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "managesf/policies/node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "283851"
},
{
"name": "Shell",
"bytes": "831"
}
],
"symlink_target": ""
}
|
import logging
import pytest
from tests.common.test_vector import *
from tests.common.test_dimensions import *
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.impala_cluster import ImpalaCluster
from CatalogService import CatalogService
from CatalogService.CatalogService import TGetFunctionsRequest, TGetFunctionsResponse
from ErrorCodes.ttypes import TErrorCode
from Status.ttypes import TStatus
from thrift.transport.TSocket import TSocket
from thrift.protocol import TBinaryProtocol
from thrift.transport.TTransport import TBufferedTransport, TTransportException
from tests.util.filesystem_utils import WAREHOUSE
from tests.util.thrift_util import create_transport
LOG = logging.getLogger('test_catalog_service_client')
# TODO: Add a test that asserts correct/compatible responses
# to create/drop function requests. For example, BDR relies
# on a stable catalog Thrift API.
class TestCatalogServiceClient(ImpalaTestSuite):
TEST_DB = 'catalog_service_client_test_db'
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestCatalogServiceClient, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet' and\
v.get_value('table_format').compression_codec == 'none')
def setup_method(self, method):
self.cleanup_db(self.TEST_DB)
self.client.execute("create database %s location '%s/%s.db'" %
(self.TEST_DB, WAREHOUSE, self.TEST_DB))
def teardown_method(self, method):
self.cleanup_db(self.TEST_DB)
def test_get_functions(self, vector):
impala_cluster = ImpalaCluster()
catalogd = impala_cluster.catalogd.service
trans_type = 'buffered'
if pytest.config.option.use_kerberos:
trans_type = 'kerberos'
transport = create_transport(host=catalogd.hostname, port=catalogd.service_port,
service='impala', transport_type=trans_type)
transport.open()
protocol = TBinaryProtocol.TBinaryProtocol(transport)
catalog_client = CatalogService.Client(protocol)
request = TGetFunctionsRequest()
request.db_name = self.TEST_DB
response = catalog_client.GetFunctions(request)
assert response.status.status_code == TErrorCode.OK
assert len(response.functions) == 0
# Add a function and make sure it shows up.
self.client.execute("create function %s.fn() RETURNS int "\
"LOCATION '%s/libTestUdfs.so' SYMBOL='Fn'" % (self.TEST_DB, WAREHOUSE))
response = catalog_client.GetFunctions(request)
LOG.debug(response)
assert len(response.functions) == 1
assert len(response.functions[0].arg_types) == 0
assert response.functions[0].name.db_name == self.TEST_DB
assert response.functions[0].name.function_name == 'fn'
assert response.functions[0].aggregate_fn is None
assert response.functions[0].scalar_fn is not None
assert '/test-warehouse/libTestUdfs.so' in response.functions[0].hdfs_location
# Add another scalar function with overloaded parameters ensure it shows up.
self.client.execute("create function %s.fn(int) RETURNS double "\
"LOCATION '%s/libTestUdfs.so' SYMBOL='Fn'" % (self.TEST_DB, WAREHOUSE))
response = catalog_client.GetFunctions(request)
LOG.debug(response)
assert response.status.status_code == TErrorCode.OK
assert len(response.functions) == 2
functions = [fn for fn in response.functions]
# Sort by number of arg in the function (ascending)
functions.sort(key=lambda fn: len(fn.arg_types))
assert len(functions[0].arg_types) == 0
assert len(functions[1].arg_types) == 1
assert functions[0].signature == 'fn()'
assert functions[1].signature == 'fn(INT)'
# Verify aggregate functions can also be retrieved
self.client.execute("create aggregate function %s.agg_fn(int, string) RETURNS int "\
"LOCATION '%s/libTestUdas.so' UPDATE_FN='TwoArgUpdate'" %
(self.TEST_DB, WAREHOUSE))
response = catalog_client.GetFunctions(request)
LOG.debug(response)
assert response.status.status_code == TErrorCode.OK
assert len(response.functions) == 3
functions = [fn for fn in response.functions if fn.aggregate_fn is not None]
# Should be only 1 aggregate function
assert len(functions) == 1
# Negative test cases for database name
request.db_name = self.TEST_DB + "_does_not_exist"
response = catalog_client.GetFunctions(request)
LOG.debug(response)
assert response.status.status_code == TErrorCode.GENERAL
assert 'Database does not exist: ' in str(response.status)
request = TGetFunctionsRequest()
response = catalog_client.GetFunctions(request)
LOG.debug(response)
assert response.status.status_code == TErrorCode.GENERAL
assert 'Database name must be set' in str(response.status)
|
{
"content_hash": "0a4968653624b9b4363c63eb1ffc28f6",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 88,
"avg_line_length": 42.05833333333333,
"alnum_prop": 0.7259758272240935,
"repo_name": "kapilrastogi/Impala",
"id": "033b17670c13f22537736c6cfeb53cd80b7c42ce",
"size": "5702",
"binary": false,
"copies": "3",
"ref": "refs/heads/cdh5-trunk",
"path": "tests/catalog_service/test_catalog_service_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "421674"
},
{
"name": "C++",
"bytes": "8270225"
},
{
"name": "CMake",
"bytes": "114760"
},
{
"name": "CSS",
"bytes": "89516"
},
{
"name": "Groff",
"bytes": "1633"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "3979799"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Lex",
"bytes": "22598"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Protocol Buffer",
"bytes": "630"
},
{
"name": "Python",
"bytes": "2099455"
},
{
"name": "Shell",
"bytes": "178431"
},
{
"name": "Thrift",
"bytes": "260303"
}
],
"symlink_target": ""
}
|
"""Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.5"
import os
import string
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. It assumes that all files are plain text files
unless they have the extension ".html" in which case it assumes
they are HTML files.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
if ctype.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory");
return None
list.sort(lambda a, b: cmp(a.lower(), b.lower()))
f = StringIO()
f.write("<title>Directory listing for %s</title>\n" % self.path)
f.write("<h2>Directory listing for %s</h2>\n" % self.path)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name = cgi.escape(name)
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n' % (linkname, displayname))
f.write("</ul>\n<hr>\n")
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
path = posixpath.normpath(urllib.unquote(path))
words = string.splitfields(path, '/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using text/plain
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
ext = string.lower(ext)
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
else:
return self.extensions_map['']
extensions_map = {
'': 'text/plain', # Default, *must* be present
'.html': 'text/html',
'.htm': 'text/html',
'.gif': 'image/gif',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
}
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
{
"content_hash": "3ac12165debd63842c6d9434125045cf",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 78,
"avg_line_length": 32.03030303030303,
"alnum_prop": 0.5849889624724062,
"repo_name": "MalloyPower/parsing-python",
"id": "4cfedbc9fe9ba8b1962fee713d163f4f6039ec2a",
"size": "6342",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/SimpleHTTPServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
import sys
import numpy as np
from astropy.io.fits.column import FITS2NUMPY, ColDefs, Column
from astropy.io.fits.fitsrec import FITS_rec, FITS_record
from astropy.io.fits.util import _is_int, _is_pseudo_integer, _pseudo_zero
from astropy.utils import lazyproperty
from .base import DELAYED, DTYPE2BITPIX
from .image import PrimaryHDU
from .table import _TableLikeHDU
class Group(FITS_record):
"""
One group of the random group data.
"""
def __init__(self, input, row=0, start=None, end=None, step=None, base=None):
super().__init__(input, row, start, end, step, base)
@property
def parnames(self):
return self.array.parnames
@property
def data(self):
# The last column in the coldefs is the data portion of the group
return self.field(self.array._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter value.
"""
if _is_int(parname):
result = self.array[self.row][parname]
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.array[self.row][indx[0]]
# if more than one group parameter have the same name
else:
result = self.array[self.row][indx[0]].astype("f8")
for i in indx[1:]:
result += self.array[self.row][i]
return result
def setpar(self, parname, value):
"""
Set the group parameter value.
"""
# TODO: It would be nice if, instead of requiring a multi-part value to
# be an array, there were an *option* to automatically split the value
# into multiple columns if it doesn't already fit in the array data
# type.
if _is_int(parname):
self.array[self.row][parname] = value
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
self.array[self.row][indx[0]] = value
# if more than one group parameter have the same name, the
# value must be a list (or tuple) containing arrays
else:
if isinstance(value, (list, tuple)) and len(indx) == len(value):
for i in range(len(indx)):
self.array[self.row][indx[i]] = value[i]
else:
raise ValueError(
"Parameter value must be a sequence with "
"{} arrays/numbers.".format(len(indx))
)
class GroupData(FITS_rec):
"""
Random groups data object.
Allows structured access to FITS Group data in a manner analogous
to tables.
"""
_record_type = Group
def __new__(
cls,
input=None,
bitpix=None,
pardata=None,
parnames=[],
bscale=None,
bzero=None,
parbscales=None,
parbzeros=None,
):
"""
Parameters
----------
input : array or FITS_rec instance
input data, either the group data itself (a
`numpy.ndarray`) or a record array (`FITS_rec`) which will
contain both group parameter info and the data. The rest
of the arguments are used only for the first case.
bitpix : int
data type as expressed in FITS ``BITPIX`` value (8, 16, 32,
64, -32, or -64)
pardata : sequence of array
parameter data, as a list of (numeric) arrays.
parnames : sequence of str
list of parameter names.
bscale : int
``BSCALE`` of the data
bzero : int
``BZERO`` of the data
parbscales : sequence of int
list of bscales for the parameters
parbzeros : sequence of int
list of bzeros for the parameters
"""
if not isinstance(input, FITS_rec):
if pardata is None:
npars = 0
else:
npars = len(pardata)
if parbscales is None:
parbscales = [None] * npars
if parbzeros is None:
parbzeros = [None] * npars
if parnames is None:
parnames = [f"PAR{idx + 1}" for idx in range(npars)]
if len(parnames) != npars:
raise ValueError(
"The number of parameter data arrays does "
"not match the number of parameters."
)
unique_parnames = _unique_parnames(parnames + ["DATA"])
if bitpix is None:
bitpix = DTYPE2BITPIX[input.dtype.name]
fits_fmt = GroupsHDU._bitpix2tform[bitpix] # -32 -> 'E'
format = FITS2NUMPY[fits_fmt] # 'E' -> 'f4'
data_fmt = f"{str(input.shape[1:])}{format}"
formats = ",".join(([format] * npars) + [data_fmt])
gcount = input.shape[0]
cols = [
Column(
name=unique_parnames[idx],
format=fits_fmt,
bscale=parbscales[idx],
bzero=parbzeros[idx],
)
for idx in range(npars)
]
cols.append(
Column(
name=unique_parnames[-1],
format=fits_fmt,
bscale=bscale,
bzero=bzero,
)
)
coldefs = ColDefs(cols)
self = FITS_rec.__new__(
cls,
np.rec.array(None, formats=formats, names=coldefs.names, shape=gcount),
)
# By default the data field will just be 'DATA', but it may be
# uniquified if 'DATA' is already used by one of the group names
self._data_field = unique_parnames[-1]
self._coldefs = coldefs
self.parnames = parnames
for idx, name in enumerate(unique_parnames[:-1]):
column = coldefs[idx]
# Note: _get_scale_factors is used here and in other cases
# below to determine whether the column has non-default
# scale/zero factors.
# TODO: Find a better way to do this than using this interface
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(name, pardata[idx])
else:
np.rec.recarray.field(self, idx)[:] = pardata[idx]
column = coldefs[self._data_field]
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(self._data_field, input)
else:
np.rec.recarray.field(self, npars)[:] = input
else:
self = FITS_rec.__new__(cls, input)
self.parnames = None
return self
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if isinstance(obj, GroupData):
self.parnames = obj.parnames
elif isinstance(obj, FITS_rec):
self.parnames = obj._coldefs.names
def __getitem__(self, key):
out = super().__getitem__(key)
if isinstance(out, GroupData):
out.parnames = self.parnames
return out
@property
def data(self):
"""
The raw group data represented as a multi-dimensional `numpy.ndarray`
array.
"""
# The last column in the coldefs is the data portion of the group
return self.field(self._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter values.
"""
if _is_int(parname):
result = self.field(parname)
else:
indx = self._unique[parname.upper()]
if len(indx) == 1:
result = self.field(indx[0])
# if more than one group parameter have the same name
else:
result = self.field(indx[0]).astype("f8")
for i in indx[1:]:
result += self.field(i)
return result
class GroupsHDU(PrimaryHDU, _TableLikeHDU):
"""
FITS Random Groups HDU class.
See the :ref:`astropy:random-groups` section in the Astropy documentation
for more details on working with this type of HDU.
"""
_bitpix2tform = {8: "B", 16: "I", 32: "J", 64: "K", -32: "E", -64: "D"}
_data_type = GroupData
_data_field = "DATA"
"""
The name of the table record array field that will contain the group data
for each group; 'DATA' by default, but may be preceded by any number of
underscores if 'DATA' is already a parameter name
"""
def __init__(self, data=None, header=None):
super().__init__(data=data, header=header)
if data is not DELAYED:
self.update_header()
# Update the axes; GROUPS HDUs should always have at least one axis
if len(self._axes) <= 0:
self._axes = [0]
self._header["NAXIS"] = 1
self._header.set("NAXIS1", 0, after="NAXIS")
@classmethod
def match_header(cls, header):
keyword = header.cards[0].keyword
return keyword == "SIMPLE" and "GROUPS" in header and header["GROUPS"] is True
@lazyproperty
def data(self):
"""
The data of a random group FITS file will be like a binary table's
data.
"""
if self._axes == [0]:
return
data = self._get_tbdata()
data._coldefs = self.columns
data.parnames = self.parnames
del self.columns
return data
@lazyproperty
def parnames(self):
"""The names of the group parameters as described by the header."""
pcount = self._header["PCOUNT"]
# The FITS standard doesn't really say what to do if a parname is
# missing, so for now just assume that won't happen
return [self._header["PTYPE" + str(idx + 1)] for idx in range(pcount)]
@lazyproperty
def columns(self):
if self._has_data and hasattr(self.data, "_coldefs"):
return self.data._coldefs
format = self._bitpix2tform[self._header["BITPIX"]]
pcount = self._header["PCOUNT"]
parnames = []
bscales = []
bzeros = []
for idx in range(pcount):
bscales.append(self._header.get("PSCAL" + str(idx + 1), None))
bzeros.append(self._header.get("PZERO" + str(idx + 1), None))
parnames.append(self._header["PTYPE" + str(idx + 1)])
formats = [format] * len(parnames)
dim = [None] * len(parnames)
# Now create columns from collected parameters, but first add the DATA
# column too, to contain the group data.
parnames.append("DATA")
bscales.append(self._header.get("BSCALE"))
bzeros.append(self._header.get("BZEROS"))
data_shape = self.shape[:-1]
formats.append(str(int(np.prod(data_shape))) + format)
dim.append(data_shape)
parnames = _unique_parnames(parnames)
self._data_field = parnames[-1]
cols = [
Column(name=name, format=fmt, bscale=bscale, bzero=bzero, dim=dim)
for name, fmt, bscale, bzero, dim in zip(
parnames, formats, bscales, bzeros, dim
)
]
coldefs = ColDefs(cols)
return coldefs
@property
def _nrows(self):
if not self._data_loaded:
# The number of 'groups' equates to the number of rows in the table
# representation of the data
return self._header.get("GCOUNT", 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
# Only really a lazyproperty for symmetry with _TableBaseHDU
return 0
@property
def is_image(self):
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
size = 0
naxis = self._header.get("NAXIS", 0)
# for random group image, NAXIS1 should be 0, so we skip NAXIS1.
if naxis > 1:
size = 1
for idx in range(1, naxis):
size = size * self._header["NAXIS" + str(idx + 1)]
bitpix = self._header["BITPIX"]
gcount = self._header.get("GCOUNT", 1)
pcount = self._header.get("PCOUNT", 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def update_header(self):
old_naxis = self._header.get("NAXIS", 0)
if self._data_loaded:
if isinstance(self.data, GroupData):
self._axes = list(self.data.data.shape)[1:]
self._axes.reverse()
self._axes = [0] + self._axes
field0 = self.data.dtype.names[0]
field0_code = self.data.dtype.fields[field0][0].name
elif self.data is None:
self._axes = [0]
field0_code = "uint8" # For lack of a better default
else:
raise ValueError("incorrect array type")
self._header["BITPIX"] = DTYPE2BITPIX[field0_code]
self._header["NAXIS"] = len(self._axes)
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
if idx == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(idx)
self._header.set("NAXIS" + str(idx + 1), axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header["NAXIS" + str(idx)]
except KeyError:
pass
if self._has_data and isinstance(self.data, GroupData):
self._header.set("GROUPS", True, after="NAXIS" + str(len(self._axes)))
self._header.set("PCOUNT", len(self.data.parnames), after="GROUPS")
self._header.set("GCOUNT", len(self.data), after="PCOUNT")
column = self.data._coldefs[self._data_field]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set("BSCALE", column.bscale)
if zero:
self._header.set("BZERO", column.bzero)
for idx, name in enumerate(self.data.parnames):
self._header.set("PTYPE" + str(idx + 1), name)
column = self.data._coldefs[idx]
scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
self._header.set("PSCAL" + str(idx + 1), column.bscale)
if zero:
self._header.set("PZERO" + str(idx + 1), column.bzero)
# Update the position of the EXTEND keyword if it already exists
if "EXTEND" in self._header:
if len(self._axes):
after = "NAXIS" + str(len(self._axes))
else:
after = "NAXIS"
self._header.set("EXTEND", after=after)
def _writedata_internal(self, fileobj):
"""
Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but
we have to get the data's byte order a different way...
TODO: Might be nice to store some indication of the data's byte order
as an attribute or function so that we don't have to do this.
"""
size = 0
if self.data is not None:
self.data._scale_back()
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f">i{self.data.dtype.itemsize}",
)
should_swap = False
else:
output = self.data
fname = self.data.dtype.names[0]
byteorder = self.data.dtype.fields[fname][0].str[0]
should_swap = byteorder in swap_types
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify locations and values of mandatory keywords.
self.req_cards(
"NAXIS", 2, lambda v: (_is_int(v) and 1 <= v <= 999), 1, option, errs
)
self.req_cards("NAXIS1", 3, lambda v: (_is_int(v) and v == 0), 0, option, errs)
after = self._header["NAXIS"] + 3
pos = lambda x: x >= after
self.req_cards("GCOUNT", pos, _is_int, 1, option, errs)
self.req_cards("PCOUNT", pos, _is_int, 0, option, errs)
self.req_cards("GROUPS", pos, lambda v: (v is True), True, option, errs)
return errs
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
# TODO: Maybe check this on a per-field basis instead of assuming
# that all fields have the same byte order?
byteorder = self.data.dtype.fields[self.data.dtype.names[0]][0].str[0]
if byteorder != ">":
if self.data.flags.writeable:
byteswapped = True
d = self.data.byteswap(True)
d.dtype = d.dtype.newbyteorder(">")
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = self.data.byteswap(False)
d.dtype = d.dtype.newbyteorder(">")
byteswapped = False
else:
byteswapped = False
d = self.data
byte_data = d.view(type=np.ndarray, dtype=np.ubyte)
cs = self._compute_checksum(byte_data)
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped:
d.byteswap(True)
d.dtype = d.dtype.newbyteorder("<")
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _summary(self):
summary = super()._summary()
name, ver, classname, length, shape, format, gcount = summary
# Drop the first axis from the shape
if shape:
shape = shape[1:]
if shape and all(shape):
# Update the format
format = self.columns[0].dtype.name
# Update the GCOUNT report
gcount = f"{self._gcount} Groups {self._pcount} Parameters"
return (name, ver, classname, length, shape, format, gcount)
def _par_indices(names):
"""
Given a list of objects, returns a mapping of objects in that list to the
index or indices at which that object was found in the list.
"""
unique = {}
for idx, name in enumerate(names):
# Case insensitive
name = name.upper()
if name in unique:
unique[name].append(idx)
else:
unique[name] = [idx]
return unique
def _unique_parnames(names):
"""
Given a list of parnames, including possible duplicates, returns a new list
of parnames with duplicates prepended by one or more underscores to make
them unique. This is also case insensitive.
"""
upper_names = set()
unique_names = []
for name in names:
name_upper = name.upper()
while name_upper in upper_names:
name = "_" + name
name_upper = "_" + name_upper
unique_names.append(name)
upper_names.add(name_upper)
return unique_names
|
{
"content_hash": "2ff79c9071aa8e0f70891483550ef0bb",
"timestamp": "",
"source": "github",
"line_count": 643,
"max_line_length": 87,
"avg_line_length": 33.28615863141524,
"alnum_prop": 0.5317478858104004,
"repo_name": "pllim/astropy",
"id": "16c465238b60623bdcb8108e969b932f3fd9ddc0",
"size": "21467",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "astropy/io/fits/hdu/groups.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78776"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12404182"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
"""Test GatewayClient"""
import os
import json
import uuid
from datetime import datetime
from io import StringIO
from unittest.mock import patch
import nose.tools as nt
from tornado import gen
from tornado.web import HTTPError
from tornado.httpclient import HTTPRequest, HTTPResponse
from notebook.gateway.managers import GatewayClient
from notebook.utils import maybe_future
from .launchnotebook import NotebookTestBase
def generate_kernelspec(name):
argv_stanza = ['python', '-m', 'ipykernel_launcher', '-f', '{connection_file}']
spec_stanza = {'spec': {'argv': argv_stanza, 'env': {}, 'display_name': name, 'language': 'python', 'interrupt_mode': 'signal', 'metadata': {}}}
kernelspec_stanza = {'name': name, 'spec': spec_stanza, 'resources': {}}
return kernelspec_stanza
# We'll mock up two kernelspecs - kspec_foo and kspec_bar
kernelspecs = {'default': 'kspec_foo', 'kernelspecs': {'kspec_foo': generate_kernelspec('kspec_foo'), 'kspec_bar': generate_kernelspec('kspec_bar')}}
# maintain a dictionary of expected running kernels. Key = kernel_id, Value = model.
running_kernels = dict()
def generate_model(name):
"""Generate a mocked kernel model. Caller is responsible for adding model to running_kernels dictionary."""
dt = datetime.utcnow().isoformat() + 'Z'
kernel_id = str(uuid.uuid4())
model = {'id': kernel_id, 'name': name, 'last_activity': str(dt), 'execution_state': 'idle', 'connections': 1}
return model
@gen.coroutine
def mock_gateway_request(url, **kwargs):
method = 'GET'
if kwargs['method']:
method = kwargs['method']
request = HTTPRequest(url=url, **kwargs)
endpoint = str(url)
# Fetch all kernelspecs
if endpoint.endswith('/api/kernelspecs') and method == 'GET':
response_buf = StringIO(json.dumps(kernelspecs))
response = yield maybe_future(HTTPResponse(request, 200, buffer=response_buf))
raise gen.Return(response)
# Fetch named kernelspec
if endpoint.rfind('/api/kernelspecs/') >= 0 and method == 'GET':
requested_kernelspec = endpoint.rpartition('/')[2]
kspecs = kernelspecs.get('kernelspecs')
if requested_kernelspec in kspecs:
response_buf = StringIO(json.dumps(kspecs.get(requested_kernelspec)))
response = yield maybe_future(HTTPResponse(request, 200, buffer=response_buf))
raise gen.Return(response)
else:
raise HTTPError(404, message='Kernelspec does not exist: %s' % requested_kernelspec)
# Create kernel
if endpoint.endswith('/api/kernels') and method == 'POST':
json_body = json.loads(kwargs['body'])
name = json_body.get('name')
env = json_body.get('env')
kspec_name = env.get('KERNEL_KSPEC_NAME')
nt.assert_equal(name, kspec_name) # Ensure that KERNEL_ env values get propagated
model = generate_model(name)
running_kernels[model.get('id')] = model # Register model as a running kernel
response_buf = StringIO(json.dumps(model))
response = yield maybe_future(HTTPResponse(request, 201, buffer=response_buf))
raise gen.Return(response)
# Fetch list of running kernels
if endpoint.endswith('/api/kernels') and method == 'GET':
kernels = []
for kernel_id in running_kernels.keys():
model = running_kernels.get(kernel_id)
kernels.append(model)
response_buf = StringIO(json.dumps(kernels))
response = yield maybe_future(HTTPResponse(request, 200, buffer=response_buf))
raise gen.Return(response)
# Interrupt or restart existing kernel
if endpoint.rfind('/api/kernels/') >= 0 and method == 'POST':
requested_kernel_id, sep, action = endpoint.rpartition('/api/kernels/')[2].rpartition('/')
if action == 'interrupt':
if requested_kernel_id in running_kernels:
response = yield maybe_future(HTTPResponse(request, 204))
raise gen.Return(response)
else:
raise HTTPError(404, message='Kernel does not exist: %s' % requested_kernel_id)
elif action == 'restart':
if requested_kernel_id in running_kernels:
response_buf = StringIO(json.dumps(running_kernels.get(requested_kernel_id)))
response = yield maybe_future(HTTPResponse(request, 204, buffer=response_buf))
raise gen.Return(response)
else:
raise HTTPError(404, message='Kernel does not exist: %s' % requested_kernel_id)
else:
raise HTTPError(404, message='Bad action detected: %s' % action)
# Shutdown existing kernel
if endpoint.rfind('/api/kernels/') >= 0 and method == 'DELETE':
requested_kernel_id = endpoint.rpartition('/')[2]
running_kernels.pop(requested_kernel_id) # Simulate shutdown by removing kernel from running set
response = yield maybe_future(HTTPResponse(request, 204))
raise gen.Return(response)
# Fetch existing kernel
if endpoint.rfind('/api/kernels/') >= 0 and method == 'GET':
requested_kernel_id = endpoint.rpartition('/')[2]
if requested_kernel_id in running_kernels:
response_buf = StringIO(json.dumps(running_kernels.get(requested_kernel_id)))
response = yield maybe_future(HTTPResponse(request, 200, buffer=response_buf))
raise gen.Return(response)
else:
raise HTTPError(404, message='Kernel does not exist: %s' % requested_kernel_id)
mocked_gateway = patch('notebook.gateway.managers.gateway_request', mock_gateway_request)
class TestGateway(NotebookTestBase):
mock_gateway_url = 'http://mock-gateway-server:8889'
mock_http_user = 'alice'
@classmethod
def setup_class(cls):
GatewayClient.clear_instance()
super(TestGateway, cls).setup_class()
@classmethod
def teardown_class(cls):
GatewayClient.clear_instance()
super(TestGateway, cls).teardown_class()
@classmethod
def get_patch_env(cls):
test_env = super(TestGateway, cls).get_patch_env()
test_env.update({'JUPYTER_GATEWAY_URL': TestGateway.mock_gateway_url,
'JUPYTER_GATEWAY_CONNECT_TIMEOUT': '44.4'})
return test_env
@classmethod
def get_argv(cls):
argv = super(TestGateway, cls).get_argv()
argv.extend(['--GatewayClient.request_timeout=96.0', '--GatewayClient.http_user=' + TestGateway.mock_http_user])
return argv
def setUp(self):
kwargs = dict()
GatewayClient.instance().load_connection_args(**kwargs)
super(TestGateway, self).setUp()
def test_gateway_options(self):
nt.assert_equal(self.notebook.gateway_config.gateway_enabled, True)
nt.assert_equal(self.notebook.gateway_config.url, TestGateway.mock_gateway_url)
nt.assert_equal(self.notebook.gateway_config.http_user, TestGateway.mock_http_user)
nt.assert_equal(self.notebook.gateway_config.connect_timeout, self.notebook.gateway_config.connect_timeout)
nt.assert_equal(self.notebook.gateway_config.connect_timeout, 44.4)
nt.assert_equal(self.notebook.gateway_config.request_timeout, 96.0)
nt.assert_equal(os.environ['KERNEL_LAUNCH_TIMEOUT'], str(96)) # Ensure KLT gets set from request-timeout
def test_gateway_class_mappings(self):
# Ensure appropriate class mappings are in place.
nt.assert_equal(self.notebook.kernel_manager_class.__name__, 'GatewayKernelManager')
nt.assert_equal(self.notebook.session_manager_class.__name__, 'GatewaySessionManager')
nt.assert_equal(self.notebook.kernel_spec_manager_class.__name__, 'GatewayKernelSpecManager')
def test_gateway_get_kernelspecs(self):
# Validate that kernelspecs come from gateway.
with mocked_gateway:
response = self.request('GET', '/api/kernelspecs')
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode('utf-8'))
kspecs = content.get('kernelspecs')
self.assertEqual(len(kspecs), 2)
self.assertEqual(kspecs.get('kspec_bar').get('name'), 'kspec_bar')
def test_gateway_get_named_kernelspec(self):
# Validate that a specific kernelspec can be retrieved from gateway.
with mocked_gateway:
response = self.request('GET', '/api/kernelspecs/kspec_foo')
self.assertEqual(response.status_code, 200)
kspec_foo = json.loads(response.content.decode('utf-8'))
self.assertEqual(kspec_foo.get('name'), 'kspec_foo')
response = self.request('GET', '/api/kernelspecs/no_such_spec')
self.assertEqual(response.status_code, 404)
def test_gateway_session_lifecycle(self):
# Validate session lifecycle functions; create and delete.
# create
session_id, kernel_id = self.create_session('kspec_foo')
# ensure kernel still considered running
self.assertTrue(self.is_kernel_running(kernel_id))
# interrupt
self.interrupt_kernel(kernel_id)
# ensure kernel still considered running
self.assertTrue(self.is_kernel_running(kernel_id))
# restart
self.restart_kernel(kernel_id)
# ensure kernel still considered running
self.assertTrue(self.is_kernel_running(kernel_id))
# delete
self.delete_session(session_id)
self.assertFalse(self.is_kernel_running(kernel_id))
def test_gateway_kernel_lifecycle(self):
# Validate kernel lifecycle functions; create, interrupt, restart and delete.
# create
kernel_id = self.create_kernel('kspec_bar')
# ensure kernel still considered running
self.assertTrue(self.is_kernel_running(kernel_id))
# interrupt
self.interrupt_kernel(kernel_id)
# ensure kernel still considered running
self.assertTrue(self.is_kernel_running(kernel_id))
# restart
self.restart_kernel(kernel_id)
# ensure kernel still considered running
self.assertTrue(self.is_kernel_running(kernel_id))
# delete
self.delete_kernel(kernel_id)
self.assertFalse(self.is_kernel_running(kernel_id))
def create_session(self, kernel_name):
"""Creates a session for a kernel. The session is created against the notebook server
which then uses the gateway for kernel management.
"""
with mocked_gateway:
nb_path = os.path.join(self.notebook_dir, 'testgw.ipynb')
kwargs = dict()
kwargs['json'] = {'path': nb_path, 'type': 'notebook', 'kernel': {'name': kernel_name}}
# add a KERNEL_ value to the current env and we'll ensure that that value exists in the mocked method
os.environ['KERNEL_KSPEC_NAME'] = kernel_name
# Create the kernel... (also tests get_kernel)
response = self.request('POST', '/api/sessions', **kwargs)
self.assertEqual(response.status_code, 201)
model = json.loads(response.content.decode('utf-8'))
self.assertEqual(model.get('path'), nb_path)
kernel_id = model.get('kernel').get('id')
# ensure its in the running_kernels and name matches.
running_kernel = running_kernels.get(kernel_id)
self.assertEqual(kernel_id, running_kernel.get('id'))
self.assertEqual(model.get('kernel').get('name'), running_kernel.get('name'))
session_id = model.get('id')
# restore env
os.environ.pop('KERNEL_KSPEC_NAME')
return session_id, kernel_id
def delete_session(self, session_id):
"""Deletes a session corresponding to the given session id.
"""
with mocked_gateway:
# Delete the session (and kernel)
response = self.request('DELETE', '/api/sessions/' + session_id)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.reason, 'No Content')
def is_kernel_running(self, kernel_id):
"""Issues request to get the set of running kernels
"""
with mocked_gateway:
# Get list of running kernels
response = self.request('GET', '/api/kernels')
self.assertEqual(response.status_code, 200)
kernels = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(kernels), len(running_kernels))
for model in kernels:
if model.get('id') == kernel_id:
return True
return False
def create_kernel(self, kernel_name):
"""Issues request to restart the given kernel
"""
with mocked_gateway:
kwargs = dict()
kwargs['json'] = {'name': kernel_name}
# add a KERNEL_ value to the current env and we'll ensure that that value exists in the mocked method
os.environ['KERNEL_KSPEC_NAME'] = kernel_name
response = self.request('POST', '/api/kernels', **kwargs)
self.assertEqual(response.status_code, 201)
model = json.loads(response.content.decode('utf-8'))
kernel_id = model.get('id')
# ensure its in the running_kernels and name matches.
running_kernel = running_kernels.get(kernel_id)
self.assertEqual(kernel_id, running_kernel.get('id'))
self.assertEqual(model.get('name'), kernel_name)
# restore env
os.environ.pop('KERNEL_KSPEC_NAME')
return kernel_id
def interrupt_kernel(self, kernel_id):
"""Issues request to interrupt the given kernel
"""
with mocked_gateway:
response = self.request('POST', '/api/kernels/' + kernel_id + '/interrupt')
self.assertEqual(response.status_code, 204)
self.assertEqual(response.reason, 'No Content')
def restart_kernel(self, kernel_id):
"""Issues request to restart the given kernel
"""
with mocked_gateway:
response = self.request('POST', '/api/kernels/' + kernel_id + '/restart')
self.assertEqual(response.status_code, 200)
model = json.loads(response.content.decode('utf-8'))
restarted_kernel_id = model.get('id')
# ensure its in the running_kernels and name matches.
running_kernel = running_kernels.get(restarted_kernel_id)
self.assertEqual(restarted_kernel_id, running_kernel.get('id'))
self.assertEqual(model.get('name'), running_kernel.get('name'))
def delete_kernel(self, kernel_id):
"""Deletes kernel corresponding to the given kernel id.
"""
with mocked_gateway:
# Delete the session (and kernel)
response = self.request('DELETE', '/api/kernels/' + kernel_id)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.reason, 'No Content')
|
{
"content_hash": "29868316620e346e575065297bbefd31",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 149,
"avg_line_length": 42.621468926553675,
"alnum_prop": 0.6371288441145281,
"repo_name": "sserrot/champion_relationships",
"id": "bc42014e449c4c029f2b45f6d8e56ce80b3049ac",
"size": "15088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/notebook/tests/test_gateway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
}
|
"""
Author: Mitchell Blowey
Constants for use in the fields of different headers.
This list is not complete, being only composed of the first ten protocol
options for IP headers, since it did not seem necessary to add in all possible 255 values,
most of which will never be used by this program.
I may one day reach that point where they are needed.
"""
from enum import Enum
class IPProtocol(Enum):
HOPOPT = 0
ICMP = 1
IGMP = 2
GGP = 3
IP_IN_IP = 4
ST = 5
TCP = 6
CBT = 7
EGP = 8
IGP = 9
class ICMPType(Enum):
ECHO_REPLY = 0
DEST_UNRCHBL = 3
REDIRECT = 5
ECHO_REQUEST = 8
ROUTER_ADVERTISEMENT = 9
ROUTER_SOLICITATION = 10
TIME_EXCEEDED = 11
BAD_IP_HEADER = 12
TIMESTAMP = 13
TIMESTAMP_REPLY = 14
|
{
"content_hash": "6ef7b12f29d980a121eaf1e6490b4fc8",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 90,
"avg_line_length": 19.463414634146343,
"alnum_prop": 0.6441102756892231,
"repo_name": "mblowey/PacketOps",
"id": "b06988d77af99f14e073bc2afb4060a643c5fefb",
"size": "798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20695"
}
],
"symlink_target": ""
}
|
import os
import time
import boto
import boto.ec2
from tabulate import tabulate
from jinja2 import Environment, FileSystemLoader
import sys, argparse
import yaml
def launch_instance(ec2, ami, instance_type, region, groups, key_name ):
print 'Launch instance [ami=%s, type=%s, groups=%s, key=%s'%(ami, instance_type, groups, key_name)
reservation = ec2.run_instances(ami, key_name=key_name, security_groups=groups, instance_type=instance_type, placement=region )
instance = reservation.instances[0]
# Wait for instance state to change to 'running'.
print 'waiting for instance'
while instance.state != 'running':
print '.'
time.sleep(5)
instance.update()
print 'done'
return instance
def tag_resource(ec2, resource, tags):
print 'Tagging %s with %s.'%(resource, tags)
ec2.create_tags(resource, tags)
def create_security_rule(ec2,vpc,group,protocol,start,end,cidr):
# Check to see if specified security group already exists.
# If we get an InvalidGroup.NotFound error back from EC2,
# it means that it doesn't exist and we need to create it.
print 'Creating security rule %s:%s[%s,%s]->%s'%(group,protocol,start,end,cidr)
try:
ec2_group = ec2.get_all_security_groups(groupnames=[group])[0]
except ec2.ResponseError, e:
if e.code == 'InvalidGroup.NotFound':
print 'Creating Security Group: %s' % group
# Create a security group to control access to instance via SSH.
ec2_group = ec2.create_security_group(group, group, vpc_id=vpc)
else:
raise
try:
ec2_group.authorize(protocol, start, end, cidr)
except ec2.ResponseError, e:
if e.code == 'InvalidPermission.Duplicate':
print 'Security Group: %s already authorized' % group
else:
raise
def create_key_pair(ec2, key_name, key_dir, key_extension='.pem'):
# Create an SSH key to use when logging into instances.
# Check to see if specified keypair already exists.
# If we get an InvalidKeyPair.NotFound error back from EC2,
# it means that it doesn't exist and we need to create it.
print 'Create key_pair %s at %s'%(key_name, key_dir)
try:
key = ec2.get_all_key_pairs(keynames=[key_name])[0]
print 'Keypair found. Not creating.'
except ec2.ResponseError, e:
if e.code == 'InvalidKeyPair.NotFound':
print 'Creating keypair: %s' % key_name
key = ec2.create_key_pair(key_name)
# AWS will store the public key but the private key is
# generated and returned and needs to be stored locally.
# The save method will also chmod the file to protect
# your private key.
key.save(key_dir)
else:
raise
def create_volume(ec2, region, volume_size):
print 'Create and attach %s EBS volume to %s at %s' % (volume_size,region, device_name)
# Determine the Availability Zone of the instance
volume = ec2.create_volume(volume_size, region)
# Wait for the volume to be created.
while volume.status != 'available':
time.sleep(5)
volume.update()
return volume
def attach_volume(instance, volume, device_name):
volume.attach(instance.id, device_name)
return volume
def connect(aws):
# Create a connection to EC2 service.
# You can pass credentials in to the connect_ec2 method explicitly
# or you can use the default credentials in your ~/.boto config file
# as we are doing here.
conn = boto.ec2.connect_to_region(aws['region'],aws_access_key_id=aws['access_key'], aws_secret_access_key=aws['secret_key'])
print conn
return conn
def get_all_instances(ec2, tags):
instances = []
print tags
tags = {'tag:'+k:v for k,v in tags.iteritems()}
print tags
reservations = ec2.get_all_instances(filters=tags)
for r in reservations:
print r
for instance in r.instances:
if instance.state == 'running':
instances.append(instance)
return instances
def get_all_volumes(ec2, tags):
tags = {'tag:'+k:v for k,v in tags.iteritems()}
tags
return ec2.get_all_volumes(filters=tags)
def list(aws, inventory, cluster=None, facet=None, index=None):
ec2 = connect(aws)
tags = {'env' : inventory.get_env()}
if cluster:
tags['cluster'] = cluster
if facet:
tags['facet'] = facet
if index:
tags['index'] = index
print(tags)
volumes = get_all_volumes(ec2,tags)
print volumes
instances = get_all_instances(ec2, tags)
i_data = get_data_from_instances(instances)
keys = i_data[0].keys()
t = [[v for k,v in i.iteritems()] for i in i_data]
print tabulate(t, headers=keys,tablefmt="grid")
def launch(aws, inventory, credentials_dir, cluster=None, facet=None, index=None):
# 2. Create keypair for env if it does not exist.
ec2 = connect(aws)
create_key_pair(ec2, inventory.get_env(), credentials_dir)
# 3. Create security groups if they do not exist.
# 4. Apply rules.
rules = inventory.get_security_rules()
for rule in rules:
create_security_rule(ec2, rule['vpc'],rule['group'], rule['protocol'], rule['start'], rule['end'], rule['cidr'] )
# 1. Get inventory of machines
instances = inventory.get_instances(cluster, facet, index)
print instances
for i in instances:
# 5. Launch instance if it does not exist.
existing_instances = get_all_instances(ec2, {'Name': i['name']})
if(len(existing_instances) > 0):
print 'Instance %s exists' % i['name']
else:
instance = launch_instance(ec2, i['image'], i['type'], i['region'], i['security'], inventory.get_env() )
# 6. Tag the instance. (Name, env, cluster, facet, index)
tags = {'Name': i['name'], 'env': i['env'], 'cluster': i['cluster'], 'facet': i['facet'], 'index': i['index']}
tag_resource(ec2, instance.id, tags)
# TODO: Tag root volume.
# 7. Create EBS volume and attach it to the instance.
# 8. Assign the same name and tags to it.
if(i['volume'] != None):
vols = get_all_volumes(ec2, tags)
if(len(vols) > 0):
print 'Volume already exists for tags : %s.'% tags
vol = vols[0]
else:
vol = create_volume(ec2, i['region'],i['volume'])
tag_resource(ec2, vol.id, tags)
attach_volume(instance, vol, '/dev/sdh')
# 9. TODO: Update the route53 record.
# 10. Update the inventory file.
all_instances = get_all_instances(ec2, {'env': i['env']} )
inventory.update_inventory_file(get_data_from_instances(all_instances))
def get_data_from_instances(instances):
instance_data = []
for instance in instances:
data = {}
data['Id'] = instance.id
data['Name'] = instance.tags['Name']
data['Env'] = instance.tags['env']
data['Cluster'] = instance.tags['cluster']
data['Facet'] = instance.tags['facet']
data['Index'] = instance.tags['index']
data['Public DNS'] = instance.public_dns_name
data['AMI'] = instance.image_id
data['Launch Time'] = instance.launch_time
data['VPC'] = instance.vpc_id
data['Public IP'] = instance.ip_address
data['Private IP'] = instance.private_ip_address
data['State'] = instance.state
instance_data.append(data)
return instance_data
def update(e2, cluster=None, facet=None, index=None):
# Update SG and rules
# Update assignment of instance to SGs.
# Update tags
pass
class Inventory(object):
def __init__(self, config_file, inventory_file='hosts'):
self.raw = yaml.load(open(config_file))
self._denormalize_()
self.inventory_file = inventory_file
def get_env(self):
return self.raw['env']
def get_instances(self, cluster='.', facet='.', index='.'):
# TODO : Validate if the facet/cluster/index exists
instances = self.instances
if cluster:
instances = [k for k in instances if k['cluster'] == cluster]
if facet:
instances = [k for k in instances if k['facet'] == facet]
if index:
instances = [k for k in instances if k['index'] == index]
return instances
def get_security_rules(self, name=None):
rules = self.rules
if name:
rules = [i for i in self.rules if i['group'] == name]
return rules
def update_inventory_file(self, instances):
clusters = [i for i in self.raw['clusters']]
facets = {i:[f for f in self.raw['clusters'][i]['facets']] \
for i in self.raw['clusters']}
instances= {c:{f:[i for i in instances if i['Facet'] == f] \
for f in self.raw['clusters'][c]['facets']} \
for c in self.raw['clusters']}
env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)))
template = env.get_template('hosts.j2')
s = template.render({'env': self.raw['env'], 'clusters' : clusters, 'facets' : facets, 'instances' : instances})
print s
with open(self.inventory_file, "wb") as fh:
fh.write(s)
def _denormalize_(self):
self.instances = []
for cluster in self.raw['clusters']:
facets = self.raw['clusters'][cluster]['facets']
for facet in facets:
count = self.raw['clusters'][cluster]['facets'][facet]['count']
for index in xrange(count):
name = "%s-%s-%s-%s"%(self.raw['env'],cluster,facet,index)
details = {}
details['name'] = name
# Add env/cluster level data
details['env']= self.raw['env']
details['cluster'] = cluster
details['facet']= facet
details['index']=index
details['image']= self.raw['image']
details['vpc']=self.raw['vpc']
details['region']=self.raw['region']
# Add facet data
details.update(self.raw['clusters'][cluster]['facets'][facet])
# Add security data
if not details.has_key('security'):
details['security'] = []
details['security'].append('default')
details['security'] = ["%s-%s"%(self.raw['env'],i) for i in details['security']]
self.instances.append( details)
self.rules = []
for group in self.raw['security']:
for protocol in self.raw['security'][group]:
for rule in self.raw['security'][group][protocol]:
details = {}
details['vpc'] = self.raw['vpc']
details['group'] = "%s-%s"%(self.raw['env'], group)
details['protocol'] = protocol
details[ 'cidr'] = rule
details[ 'start'] = self.raw['security'][group][protocol][rule][0]
details['end'] = self.raw['security'][group][protocol][rule][1]
self.rules.append(details)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-c", help="Path to aws credentials.(Default=credentials/aws.yaml", action="store_true")
parser.add_argument("action", help="launch|show|kill|update (Only launch is implemented.)")
parser.add_argument("env", help="Environment")
parser.add_argument("cluster", help="Cluster")
parser.add_argument("facet", help="Facet")
parser.add_argument("index", type=int, help="Index")
args = parser.parse_args()
print args.action
print args.cluster
print args.facet
print args.index
inventory = Inventory('%s.yaml'%(args.env))
aws = yaml.load(open('credentials/aws.yaml'))
if(args.action == 'launch'):
launch(aws, inventory,'credentials/ec2_keys',args.cluster,args.facet, args.index )
elif(args.action == 'list'):
list(aws, inventory)
elif(args.action == 'ex'):
ec2 = connect(aws)
all_instances = get_all_instances(ec2, {'env': 'sandbox'} )
inventory.update_inventory_file(get_data_from_instances(all_instances))
if __name__ == "__main__":
main(sys.argv[1:])
|
{
"content_hash": "65c28fb23798c3508b5a930bfccab3fd",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 131,
"avg_line_length": 36.76093294460642,
"alnum_prop": 0.5866444603061305,
"repo_name": "arrawatia/ansible-homebase",
"id": "7e5f2cdebe4eb7d287e1607be58daf4c903b5039",
"size": "12609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ec2-cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12609"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class LenmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="lenmode", parent_name="funnel.marker.colorbar", **kwargs
):
super(LenmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["fraction", "pixels"]),
**kwargs,
)
|
{
"content_hash": "06fdfd739b922c57f9ba063e0c410f2a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 83,
"avg_line_length": 35.714285714285715,
"alnum_prop": 0.608,
"repo_name": "plotly/plotly.py",
"id": "184ff1f12865c1221132d7ee0193f867f5289e68",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/marker/colorbar/_lenmode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
__version__='''$Id: test_encrypt.py 3959 2012-09-27 14:39:39Z robin $'''
__doc__="""Testing to encrypt a very minimal pdf using a Canvas and a DocTemplate.
TODO: Automatiocally test that this pdf is really encrypted.
"""
import unittest
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib import pdfencrypt
from reportlab.lib.styles import ParagraphStyle
from reportlab.platypus import SimpleDocTemplate, Paragraph
from test_pdfencryption import parsedoc
class EncryptTestCase(unittest.TestCase):
def test_canvas(self):
"Test generating an encrypted pdf by setting a user password on the Canvas."
fname = outputfile('test_encrypt_canvas.pdf')
c = Canvas(fname, encrypt='User')
c.setAuthor('Anonymous')
c.setFont('Helvetica-Bold', 36)
c.drawString(100,700, 'Top secret')
c.save()
parsedoc(fname)
def test_standardencryption(self):
"Test generating an encrypted pdf by passing a StandardEncryption object to the Canvas."
encrypt = pdfencrypt.StandardEncryption(userPassword='User', ownerPassword='Owner')
encrypt.setAllPermissions(0)
encrypt.canPrint = 1
fname = outputfile('test_encrypt_canvas2.pdf')
c = Canvas(fname, encrypt=encrypt)
c.setAuthor('Anonymous')
c.setFont('Helvetica-Bold', 36)
c.drawString(100,700, 'Top secret')
c.save()
parsedoc(fname)
def test_doctemplate(self):
"Test generating an encrypted pdf by setting a user password on the DocTemplate."
header = ParagraphStyle(name='Heading', fontSize=36)
story = [Paragraph("Top secret", header)]
fname = outputfile('test_encrypt_doctemplate.pdf')
doc = SimpleDocTemplate(fname, encrypt='User')
doc.build(story)
parsedoc(fname)
def makeSuite():
return makeSuiteForClasses(EncryptTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
{
"content_hash": "28f4596ba9f60070a2e6c974ae1c3a69",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 96,
"avg_line_length": 37.69642857142857,
"alnum_prop": 0.6925627664613927,
"repo_name": "nakagami/reportlab",
"id": "e37caeefe5f628c563847aeb181988f2be86b508",
"size": "2209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_encrypt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "754561"
},
{
"name": "C++",
"bytes": "1351"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "3439804"
},
{
"name": "Shell",
"bytes": "1673"
}
],
"symlink_target": ""
}
|
import errno
import os
from swift import gettext_ as _
from swift import __version__ as swiftver
from swift.common.swob import Request, Response
from swift.common.utils import get_logger, config_true_value, json
from swift.common.constraints import check_mount
from resource import getpagesize
from hashlib import md5
class ReconMiddleware(object):
"""
Recon middleware used for monitoring.
/recon/load|mem|async... will return various system metrics.
Needs to be added to the pipeline and requires a filter
declaration in the object-server.conf:
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
"""
def __init__(self, app, conf, *args, **kwargs):
self.app = app
self.devices = conf.get('devices', '/srv/node')
swift_dir = conf.get('swift_dir', '/etc/swift')
self.logger = get_logger(conf, log_route='recon')
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.object_recon_cache = os.path.join(self.recon_cache_path,
'object.recon')
self.container_recon_cache = os.path.join(self.recon_cache_path,
'container.recon')
self.account_recon_cache = os.path.join(self.recon_cache_path,
'account.recon')
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
self.rings = [self.account_ring_path, self.container_ring_path,
self.object_ring_path]
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
def _from_recon_cache(self, cache_keys, cache_file, openr=open):
"""retrieve values from a recon cache file
:params cache_keys: list of cache items to retrieve
:params cache_file: cache file to retrieve items from.
:params openr: open to use [for unittests]
:return: dict of cache items and their values or none if not found
"""
try:
with openr(cache_file, 'r') as f:
recondata = json.load(f)
return dict((key, recondata.get(key)) for key in cache_keys)
except IOError:
self.logger.exception(_('Error reading recon cache file'))
except ValueError:
self.logger.exception(_('Error parsing recon cache file'))
except Exception:
self.logger.exception(_('Error retrieving recon data'))
return dict((key, None) for key in cache_keys)
def get_version(self):
"""get swift version"""
verinfo = {'version': swiftver}
return verinfo
def get_mounted(self, openr=open):
"""get ALL mounted fs from /proc/mounts"""
mounts = []
with openr('/proc/mounts', 'r') as procmounts:
for line in procmounts:
mount = {}
mount['device'], mount['path'], opt1, opt2, opt3, \
opt4 = line.rstrip().split()
mounts.append(mount)
return mounts
def get_load(self, openr=open):
"""get info from /proc/loadavg"""
loadavg = {}
with openr('/proc/loadavg', 'r') as f:
onemin, fivemin, ftmin, tasks, procs = f.read().rstrip().split()
loadavg['1m'] = float(onemin)
loadavg['5m'] = float(fivemin)
loadavg['15m'] = float(ftmin)
loadavg['tasks'] = tasks
loadavg['processes'] = int(procs)
return loadavg
def get_mem(self, openr=open):
"""get info from /proc/meminfo"""
meminfo = {}
with openr('/proc/meminfo', 'r') as memlines:
for i in memlines:
entry = i.rstrip().split(":")
meminfo[entry[0]] = entry[1].strip()
return meminfo
def get_async_info(self):
"""get # of async pendings"""
return self._from_recon_cache(['async_pending'],
self.object_recon_cache)
def get_replication_info(self, recon_type):
"""get replication info"""
if recon_type == 'account':
return self._from_recon_cache(['replication_time',
'replication_stats',
'replication_last'],
self.account_recon_cache)
elif recon_type == 'container':
return self._from_recon_cache(['replication_time',
'replication_stats',
'replication_last'],
self.container_recon_cache)
elif recon_type == 'object':
return self._from_recon_cache(['object_replication_time',
'object_replication_last'],
self.object_recon_cache)
else:
return None
def get_device_info(self):
"""get devices"""
try:
return {self.devices: os.listdir(self.devices)}
except Exception:
self.logger.exception(_('Error listing devices'))
return {self.devices: None}
def get_updater_info(self, recon_type):
"""get updater info"""
if recon_type == 'container':
return self._from_recon_cache(['container_updater_sweep'],
self.container_recon_cache)
elif recon_type == 'object':
return self._from_recon_cache(['object_updater_sweep'],
self.object_recon_cache)
else:
return None
def get_expirer_info(self, recon_type):
"""get expirer info"""
if recon_type == 'object':
return self._from_recon_cache(['object_expiration_pass',
'expired_last_pass'],
self.object_recon_cache)
def get_auditor_info(self, recon_type):
"""get auditor info"""
if recon_type == 'account':
return self._from_recon_cache(['account_audits_passed',
'account_auditor_pass_completed',
'account_audits_since',
'account_audits_failed'],
self.account_recon_cache)
elif recon_type == 'container':
return self._from_recon_cache(['container_audits_passed',
'container_auditor_pass_completed',
'container_audits_since',
'container_audits_failed'],
self.container_recon_cache)
elif recon_type == 'object':
return self._from_recon_cache(['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
self.object_recon_cache)
else:
return None
def get_unmounted(self):
"""list unmounted (failed?) devices"""
mountlist = []
for entry in os.listdir(self.devices):
try:
mounted = check_mount(self.devices, entry)
except OSError as err:
mounted = str(err)
mpoint = {'device': entry, 'mounted': mounted}
if mpoint['mounted'] is not True:
mountlist.append(mpoint)
return mountlist
def get_diskusage(self):
"""get disk utilization statistics"""
devices = []
for entry in os.listdir(self.devices):
try:
mounted = check_mount(self.devices, entry)
except OSError as err:
devices.append({'device': entry, 'mounted': str(err),
'size': '', 'used': '', 'avail': ''})
continue
if mounted:
path = os.path.join(self.devices, entry)
disk = os.statvfs(path)
capacity = disk.f_bsize * disk.f_blocks
available = disk.f_bsize * disk.f_bavail
used = disk.f_bsize * (disk.f_blocks - disk.f_bavail)
devices.append({'device': entry, 'mounted': True,
'size': capacity, 'used': used,
'avail': available})
else:
devices.append({'device': entry, 'mounted': False,
'size': '', 'used': '', 'avail': ''})
return devices
def get_ring_md5(self, openr=open):
"""get all ring md5sum's"""
sums = {}
for ringfile in self.rings:
md5sum = md5()
if os.path.exists(ringfile):
try:
with openr(ringfile, 'rb') as f:
block = f.read(4096)
while block:
md5sum.update(block)
block = f.read(4096)
sums[ringfile] = md5sum.hexdigest()
except IOError as err:
sums[ringfile] = None
if err.errno != errno.ENOENT:
self.logger.exception(_('Error reading ringfile'))
return sums
def get_quarantine_count(self):
"""get obj/container/account quarantine counts"""
qcounts = {"objects": 0, "containers": 0, "accounts": 0}
qdir = "quarantined"
for device in os.listdir(self.devices):
for qtype in qcounts:
qtgt = os.path.join(self.devices, device, qdir, qtype)
if os.path.exists(qtgt):
linkcount = os.lstat(qtgt).st_nlink
if linkcount > 2:
qcounts[qtype] += linkcount - 2
return qcounts
def get_socket_info(self, openr=open):
"""
get info from /proc/net/sockstat and sockstat6
Note: The mem value is actually kernel pages, but we return bytes
allocated based on the systems page size.
"""
sockstat = {}
try:
with openr('/proc/net/sockstat', 'r') as proc_sockstat:
for entry in proc_sockstat:
if entry.startswith("TCP: inuse"):
tcpstats = entry.split()
sockstat['tcp_in_use'] = int(tcpstats[2])
sockstat['orphan'] = int(tcpstats[4])
sockstat['time_wait'] = int(tcpstats[6])
sockstat['tcp_mem_allocated_bytes'] = \
int(tcpstats[10]) * getpagesize()
except IOError as e:
if e.errno != errno.ENOENT:
raise
try:
with openr('/proc/net/sockstat6', 'r') as proc_sockstat6:
for entry in proc_sockstat6:
if entry.startswith("TCP6: inuse"):
sockstat['tcp6_in_use'] = int(entry.split()[2])
except IOError as e:
if e.errno != errno.ENOENT:
raise
return sockstat
def GET(self, req):
root, rcheck, rtype = req.split_path(1, 3, True)
all_rtypes = ['account', 'container', 'object']
if rcheck == "mem":
content = self.get_mem()
elif rcheck == "load":
content = self.get_load()
elif rcheck == "async":
content = self.get_async_info()
elif rcheck == 'replication' and rtype in all_rtypes:
content = self.get_replication_info(rtype)
elif rcheck == 'replication' and rtype is None:
#handle old style object replication requests
content = self.get_replication_info('object')
elif rcheck == "devices":
content = self.get_device_info()
elif rcheck == "updater" and rtype in ['container', 'object']:
content = self.get_updater_info(rtype)
elif rcheck == "auditor" and rtype in all_rtypes:
content = self.get_auditor_info(rtype)
elif rcheck == "expirer" and rtype == 'object':
content = self.get_expirer_info(rtype)
elif rcheck == "mounted":
content = self.get_mounted()
elif rcheck == "unmounted":
content = self.get_unmounted()
elif rcheck == "diskusage":
content = self.get_diskusage()
elif rcheck == "ringmd5":
content = self.get_ring_md5()
elif rcheck == "quarantined":
content = self.get_quarantine_count()
elif rcheck == "sockstat":
content = self.get_socket_info()
elif rcheck == "version":
content = self.get_version()
else:
content = "Invalid path: %s" % req.path
return Response(request=req, status="404 Not Found",
body=content, content_type="text/plain")
if content is not None:
return Response(request=req, body=json.dumps(content),
content_type="application/json")
else:
return Response(request=req, status="500 Server Error",
body="Internal server error.",
content_type="text/plain")
def __call__(self, env, start_response):
req = Request(env)
if req.path.startswith('/recon/'):
return self.GET(req)(env, start_response)
else:
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def recon_filter(app):
return ReconMiddleware(app, conf)
return recon_filter
|
{
"content_hash": "c9f3496d16f3e95815044882e21b1194",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 79,
"avg_line_length": 41.69526627218935,
"alnum_prop": 0.5097566167600937,
"repo_name": "xiaoguoai/ec-dev-swift",
"id": "357a431fd4980443f04e669ff58c416bf7bcb83c",
"size": "14688",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "swift/common/middleware/recon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15048"
},
{
"name": "Python",
"bytes": "3816353"
},
{
"name": "Shell",
"bytes": "2933"
}
],
"symlink_target": ""
}
|
"""Standard HTTP request handlers for an HTTP server."""
__author__ = 'mqian@google.com (Mike Qian)'
import gc
import linecache
import logging
import os
import re
import sys
import threading
import traceback
import db
import vars_common
import vars_export
import vars_helper
_HTML_HEADER = """<HTML>
<HEAD><TITLE>%s</TITLE></HEAD>
<BODY>
"""
_HTML_FOOTER = """
</BODY>
</HTML>
"""
_HTMLDOC_STYLE = """<style>
span {
position: relative;
cursor: hand;
}
span b {
text-decoration: none;
color: #CC3300;
}
span div {
display: none;
z-index: 999;
position: absolute;
top: 2em;
left: 80px;
border: 1px solid #3333CC;
padding: 1px;
background-color: #CCCCFF;
font-size: smaller;
width: 400;
}
span div:first-line {
text-decoration: underline;
}
span:hover div {
display: block;
}
body {
padding-bottom: 100px;
}
</style>
"""
def HandleAbortAbortAbort(request, unused_response):
"""Handles /abortabortabort requests."""
logging.error('/abortabortabort from %s' % request.remote_addr)
os.abort()
def HandleDBs(unused_request, response):
"""Renders /dbs page with database connection usage."""
response.headers.add_header('Content-Type', 'text/plain')
response.start_response(200)
for conn in db.CONNECTIONS:
response.out.write(str(conn()))
response.out.write('\n')
response.end_response()
def HandlePyHeaps(unused_request, response):
"""Renders /pyheaps page with Python object memory usage."""
response.headers.add_header('Content-Type', 'text/plain')
response.start_response(200)
count = {}
mem_used = {}
for obj in gc.get_objects():
count[type(obj)] = count.get(type(obj), 0) + 1
mem_used[type(obj)] = mem_used.get(type(obj), 0) + sys.getsizeof(obj)
response.out.write('Python objects (total bytes, count, type):\n\n')
for total_size, type_ in sorted(((v, k) for k, v in mem_used.iteritems()),
reverse=True):
response.out.write('%12s %12s %s\n' % (total_size, count[type_], type_))
response.end_response()
def HandlePyThreads(unused_request, response):
"""Renders /pythreads page with Python thread stacks."""
response.headers.add_header('Content-Type', 'text/plain')
response.start_response(200)
frames = sys._current_frames()
threads = [(t.ident, t) for t in threading.enumerate()]
thread_names = dict((tid, '(name: %s) ' % t.getName()) for tid, t in threads)
response.out.write('Python threads:\n\n')
for thread_id, frame in frames.items():
response.out.write('--- Thread %d %sstack: ---\n' %
(thread_id, thread_names.get(thread_id, '')))
frame_tuples = []
while frame:
filename = frame.f_code.co_filename
lineno = frame.f_lineno
line = linecache.getline(filename, lineno)
frame_tuples.append((filename, lineno, frame.f_code.co_name, line))
frame = frame.f_back
frame_tuples.reverse()
response.out.write(''.join(traceback.format_list(frame_tuples)))
response.end_response()
def HandleQuitQuitQuit(request, response, server):
"""Handles /quitquitquit requests."""
response.headers.add_header('Content-Type', 'text/plain')
response.start_response(200)
response.out.write('Shutting down...')
response.end_response()
logging.warn('/quitquitquit from %s', request.remote_addr)
server.Shutdown()
def HandleVars(request, response):
"""Renders /vars page with exported variables."""
requested_vars = request.get('var', '')
output = request.get('output', 'html')
regexes = [re.compile(x) for x in request.get('varregexp', [])]
if regexes and requested_vars:
title = 'Request Error'
error_msg = (
'ERROR: Can only specify one of "var=" or "varregexp=" parameters')
response.headers.add_header('Content-Type', 'text/html')
response.start_response(200)
response.out.write(_HTML_HEADER % title + error_msg + _HTML_FOOTER)
response.end_response()
return
values = {}
title = 'All Variables'
# Filter by specific vars.
if requested_vars:
vars_list = requested_vars.split(':')
if len(vars_list) == 1:
title = 'Variable: %s' % requested_vars.strip()
else:
title = 'Variables: %s' % requested_vars.strip()
for var in vars_list:
name = vars_common.CanonicalName(var)
value = vars_helper.FetchVar(var)
if value is None:
values[name] = '(Variable not found)'
else:
values[name] = value
# Filter vars by regexes.
elif regexes:
title = 'Matching Variables'
for k, v in vars_helper.FetchAllVars().iteritems():
for regex in regexes:
if regex.match(k):
values[k] = v
break
# Just show all the vars.
else:
values = vars_helper.FetchAllVars()
callback = _OUTPUT_FORMATS.get(output, _OUTPUT_FORMATS['html'])
callback(response, values, title)
def HandleVarsDoc(request, response):
"""Renders /varsdoc page with documentation for exported variables."""
title = 'All Variables'
if request.get('output', '') == 'text':
response.headers.add_header('Content-Type', 'text/plain; charset=UTF-8')
response.start_response(200)
response.out.write(_HTML_HEADER % title)
response.out.write(vars_export.GetAllDocPlain())
response.out.write(_HTML_FOOTER)
response.end_response()
else:
response.headers.add_header('Content-Type', 'text/html; charset=UTF-8')
response.start_response(200)
response.out.write(_HTML_HEADER % title)
response.out.write(vars_export.GetAllDocHTML())
response.out.write(_HTML_FOOTER)
response.end_response()
def _WritePlainResponse(response, values, unused_title):
"""Write the HTTP response for a /vars?output=text request.
Args:
response: The HTTP response object.
values: A dictionary containing (name, value) mappings to be written.
"""
content = ''.join(vars_export.GetPairPlain(key, values[key])
for key in values)
if len(values) == 1:
value = content.split(' ', 1)[1]
if not value.startswith('map:'):
content = value
response.headers.add_header('Content-Type', 'text/plain; charset=UTF-8')
response.start_response(200)
response.out.write(content)
response.end_response()
def _WriteHTMLResponse(response, values, title='All Variables'):
"""Write the HTTP response for a /vars?output=html request.
Args:
title: The page title.
response: The HTTP response object.
values: A dictionary containing (name, value) mappings to be written.
"""
content = ''.join(vars_export.GetPairHTML(key, values[key]) for key in values)
if len(values) == 1:
value = content.split(' ', 1)[1]
if not value.startswith('map:'):
content = value
response.headers.add_header('Content-Type', 'text/html; charset=UTF-8')
response.start_response(200)
response.out.write(_HTML_HEADER % title)
response.out.write('<tt>\n' + content + '</tt>')
response.out.write(_HTML_FOOTER)
response.end_response()
def _WriteHTMLDocResponse(response, values, title='All Variables'):
"""Write the HTTP response for a /vars?output=htmldoc request.
Args:
title: The page title.
response: The HTTP response object.
values: A dictionary containing (name, value) mappings to be written.
"""
content = ''.join(vars_export.GetVarDocHTML(var) for var in values)
response.headers.add_header('Content-Type', 'text/html; charset=UTF-8')
response.start_response(200)
response.out.write(_HTML_HEADER % title)
response.out.write(_HTMLDOC_STYLE)
response.out.write('<tt>\n' + content + '</tt>')
response.out.write(_HTML_FOOTER)
response.end_response()
STANDARD_HANDLERS = {
'/abortabortabort': HandleAbortAbortAbort,
'/dbs': HandleDBs,
'/pyheaps': HandlePyHeaps,
'/pythreads': HandlePyThreads,
'/quitquitquit': HandleQuitQuitQuit,
'/vars': HandleVars,
'/varsdoc': HandleVarsDoc,
}
_OUTPUT_FORMATS = {
'text': _WritePlainResponse,
'html': _WriteHTMLResponse,
'htmldoc': _WriteHTMLDocResponse,
}
def StandardHandlers(callback, request, response, *args, **kwargs):
"""Handles standard page endpoints, or calls callback."""
uri = request.path.rstrip('/')
STANDARD_HANDLERS.get(uri, callback)(request, response, *args, **kwargs)
|
{
"content_hash": "ca58d557c6b8046bae007cfb08103ccb",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 80,
"avg_line_length": 28.919298245614034,
"alnum_prop": 0.6705896627032274,
"repo_name": "dbarobin/google-mysql-tools",
"id": "9d88e58057499d45a394b609b7ca78c03ca1508b",
"size": "8839",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pylib/http_handlers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5563"
},
{
"name": "Python",
"bytes": "422962"
}
],
"symlink_target": ""
}
|
'''
makes a dictionary of geographic location found in flubase name strings.
these name strings are submitted to google geocoder to obtain full
geographic information including longitude and latidue. based on this,
regions like south america are defined
'''
from Bio import SeqIO
import pickle
import time,gzip
fname = '../data/H3N2_HA1_2012_2014_flubase_filtered.fasta.gz'
#fname = '../data/H1N1_HA1_all_years_filtered.fasta.gz'
#fname = '../data/gisaid_H3N2_all_years_human_full_date_aligned_trimmed.fasta.gz'
places = set()
names = []
with gzip.open(fname, 'r') as infile:
for seq in SeqIO.parse(infile, 'fasta'):
names.append(seq.name)
#country = seq.name.split('|')[3]
country = ''
places.add((seq.name.split('/')[1],country))
# append to an existing pickle -- this is useful since geocoders have a quota
# one global file for all flu strains should be enough
try:
with open('../data/flubase_places.pickle', 'r') as infile:
place_to_coordinates = pickle.load(infile)
except:
place_to_coordinates = {}
from geopy import geocoders
g = geocoders.GoogleV3()
g.timeout=10
for place, country in places:
if place not in place_to_coordinates:
loc = g.geocode(place.replace('_', ' ')+', '+country.replace('_', ' '))
time.sleep(0.2)
try:
if loc:
print place, country,loc
country = loc[0].split(',')[-1].strip()
country = country.encode('ascii', 'replace')
location = loc[1]
else:
print place, country
location = ('nan', 'nan')
country = 'unknown'
place_to_coordinates[place] = {'country':country.lower(),
'lat':location[0], 'lng':location[1]}
except:
print "ERROR",place
# save pickle back to the same file with new places added
try:
with open('../data/flubase_places.pickle', 'w') as outfile:
pickle.dump(place_to_coordinates,outfile)
except:
print "can't save places_pickle"
|
{
"content_hash": "55f8e36b77567bc1a0b1928bdf0751f4",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 81,
"avg_line_length": 33.67741935483871,
"alnum_prop": 0.6154214559386973,
"repo_name": "rneher/FitnessInference",
"id": "0452d02dca650fd7049f8a18485cd507e4e32cd5",
"size": "2088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flu/sequence_and_annotations/make_list_of_places.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5909"
},
{
"name": "Python",
"bytes": "255715"
},
{
"name": "Shell",
"bytes": "34"
}
],
"symlink_target": ""
}
|
from txServiceRegistry.client import Client
from twisted.internet import reactor
RACKSPACE_USERNAME = '' # your username here
RACKSPACE_KEY = '' # your API key here
SERVICE_REGISTRY_URL = 'https://dfw.registry.api.rackspace.com/v1.0/'
client = Client(username=RACKSPACE_USERNAME,
apiKey=RACKSPACE_KEY,
baseUrl=SERVICE_REGISTRY_URL,
region='us')
|
{
"content_hash": "ad4c26b3879d17d6af5780184de4c400",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 35.90909090909091,
"alnum_prop": 0.6835443037974683,
"repo_name": "racker/service-registry",
"id": "34d1ee765721feca87198d711fee84c649f3d75b",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manual_fixtures/integration_guide/instantiate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "418622"
},
{
"name": "Python",
"bytes": "1399"
},
{
"name": "Shell",
"bytes": "13554"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import os
import random
import sys
sys.path.append(os.getcwd())
import pdb
import time
import numpy as np
import json
import progressbar
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, \
decode_txt, sample_batch_neg, l2_norm
import misc.dataLoader as dl
import misc.model as model
from misc.encoder_QIH import _netE
from misc.netG import _netG
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--input_img_h5', default='data/vdl_img_vgg.h5', help='path to dataset, now hdf5 file')
parser.add_argument('--input_ques_h5', default='data/visdial_data.h5', help='path to dataset, now hdf5 file')
parser.add_argument('--input_json', default='data/visdial_params.json', help='path to dataset, now hdf5 file')
parser.add_argument('--outf', default='./save', help='folder to output images and model checkpoints')
parser.add_argument('--encoder', default='scrach', help='what encoder to use.')
parser.add_argument('--num_val', default=1000, help='number of image split out as validation set.')
parser.add_argument('--update_D', action='store_true', help='whether train use the GAN loss.')
parser.add_argument('--update_LM', action='store_true', help='whether train use the GAN loss.')
parser.add_argument('--model_path_D', default='save/HCIAE-D-MLE.pth', help='folder to output images and model checkpoints')
parser.add_argument('--model_path_G', default='save/HCIAE-G-MLE.pth', help='folder to output images and model checkpoints')
parser.add_argument('--negative_sample', type=int, default=20, help='folder to output images and model checkpoints')
parser.add_argument('--neg_batch_sample', type=int, default=30, help='folder to output images and model checkpoints')
parser.add_argument('--niter', type=int, default=100, help='number of epochs to train for')
parser.add_argument('--start_epoch', type=int, default=0, help='start of epochs to train for')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=5)
parser.add_argument('--batchSize', type=int, default=100, help='input batch size')
parser.add_argument('--eval_iter', type=int, default=1, help='number of epochs to train for')
parser.add_argument('--save_iter', type=int, default=1, help='number of epochs to train for')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
parser.add_argument('--D_lr', type=float, default=5e-5, help='learning rate for, default=0.00005')
parser.add_argument('--G_lr', type=float, default=5e-5, help='learning rate for, default=0.00005')
parser.add_argument('--LM_lr', type=float, default=5e-5, help='learning rate for, default=0.00005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.8')
parser.add_argument('--cuda' , action='store_true', help='enables cuda')
parser.add_argument('--ngpu' , type=int, default=1, help='number of GPUs to use')
parser.add_argument('--verbose' , action='store_true', help='show the sampled caption')
parser.add_argument('--hidden_size', type=int, default=512, help='input batch size')
parser.add_argument('--model', type=str, default='LSTM', help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--ninp', type=int, default=300, help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=512, help='humber of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1, help='number of layers')
parser.add_argument('--dropout', type=int, default=0.5, help='number of layers')
parser.add_argument('--clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--margin', type=float, default=2, help='number of epochs to train for')
parser.add_argument('--gumble_weight', type=int, default=0.5, help='folder to output images and model checkpoints')
opt = parser.parse_args()
print(opt)
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# create new folder.
t = datetime.datetime.now()
cur_time = '%s-%s-%s' %(t.day, t.month, t.hour)
save_path = os.path.join(opt.outf, opt.encoder + '.' + cur_time)
try:
os.makedirs(save_path)
except OSError:
pass
if opt.model_path_D != '' :
print("=> loading checkpoint '{}'".format(opt.model_path_D))
checkpoint_D = torch.load(opt.model_path_D)
if opt.model_path_G != '':
print("=> loading checkpoint '{}'".format(opt.model_path_G))
checkpoint_G = torch.load(opt.model_path_G)
####################################################################################
# Data Loader
####################################################################################
dataset = dl.train(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,
input_json=opt.input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'train')
dataset_val = dl.validate(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,
input_json=opt.input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'val')
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=10,
shuffle=False, num_workers=int(opt.workers))
####################################################################################
# Build the Model
####################################################################################
vocab_size = dataset.vocab_size
ques_length = dataset.ques_length
ans_length = dataset.ans_length + 1
his_length = dataset.ans_length + dataset.ques_length
itow = dataset.itow
img_feat_size = 512
print('init Discriminator model...')
netE_d = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size)
netW_d = model._netW(vocab_size, opt.ninp, opt.dropout)
netD = model._netD(opt.model, opt.ninp, opt.nhid, opt.nlayers, vocab_size, opt.dropout)
critD =model.nPairLoss(opt.ninp, opt.margin)
if opt.model_path_D != '' :
print('Loading Discriminator model...')
netW_d.load_state_dict(checkpoint_D['netW'])
netE_d.load_state_dict(checkpoint_D['netE'])
netD.load_state_dict(checkpoint_D['netD'])
print('init Generative model...')
netE_g = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size)
netW_g = model._netW(vocab_size, opt.ninp, opt.dropout)
netG = _netG(opt.model, vocab_size, opt.ninp, opt.nhid, opt.nlayers, opt.dropout)
sampler = model.gumbel_sampler()
critG = model.G_loss(opt.ninp)
critLM = model.LMCriterion()
if opt.model_path_G != '':
print('Loading Generative model...')
netW_g.load_state_dict(checkpoint_G['netW'])
netE_g.load_state_dict(checkpoint_G['netE'])
netG.load_state_dict(checkpoint_G['netG'])
if opt.cuda: # ship to cuda, if has GPU
netW_d.cuda(), netW_g.cuda()
netE_d.cuda(), netE_g.cuda()
netD.cuda(), netG.cuda()
critD.cuda(), critG.cuda()
sampler.cuda(), critLM.cuda()
####################################################################################
# training model
####################################################################################
def train(epoch):
netW_d.train(), netE_d.train(), netE_g.train()
netD.train(), netG.train(), netW_g.train()
fake_len = torch.LongTensor(opt.batchSize)
fake_len = fake_len.cuda()
n_neg = opt.negative_sample
ques_hidden1 = netE_d.init_hidden(opt.batchSize)
ques_hidden2 = netE_g.init_hidden(opt.batchSize)
hist_hidden1 = netE_d.init_hidden(opt.batchSize)
hist_hidden2 = netE_g.init_hidden(opt.batchSize)
real_hidden = netD.init_hidden(opt.batchSize)
wrong_hidden = netD.init_hidden(opt.batchSize)
fake_hidden = netD.init_hidden(opt.batchSize)
data_iter = iter(dataloader)
err_d = 0
err_g = 0
err_lm = 0
average_loss = 0
count = 0
i = 0
loss_store = []
while i < len(dataloader):
t1 = time.time()
data = data_iter.next()
image, history, question, answer, answerT, answerLen, answerIdx, questionL, \
opt_answerT, opt_answerLen, opt_answerIdx = data
batch_size = question.size(0)
image = image.view(-1, 512)
img_input.data.resize_(image.size()).copy_(image)
err_d_tmp = 0
err_g_tmp = 0
err_lm_tmp = 0
err_d_fake_tmp = 0
err_g_fake_tmp = 0
for rnd in range(10):
# get the corresponding round QA and history.
ques = question[:,rnd,:].t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
ans = answer[:,rnd,:].t()
tans = answerT[:,rnd,:].t()
wrong_ans = opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
real_len = answerLen[:,rnd].long()
wrong_len = opt_answerLen[:,rnd,:].clone().view(-1)
ques_input.data.resize_(ques.size()).copy_(ques)
his_input.data.resize_(his.size()).copy_(his)
ans_input.data.resize_(ans.size()).copy_(ans)
ans_target.data.resize_(tans.size()).copy_(tans)
wrong_ans_input.data.resize_(wrong_ans.size()).copy_(wrong_ans)
batch_sample_idx.data.resize_(batch_size, opt.neg_batch_sample).zero_()
sample_batch_neg(answerIdx[:,rnd], opt_answerIdx[:,rnd,:], batch_sample_idx, opt.neg_batch_sample)
# -----------------------------------------
# update the Generator using MLE loss.
# -----------------------------------------
if opt.update_LM:
ques_emb_g = netW_g(ques_input, format = 'index')
his_emb_g = netW_g(his_input, format = 'index')
ques_hidden1 = repackage_hidden(ques_hidden1, batch_size)
hist_hidden1 = repackage_hidden(hist_hidden1, his_emb_g.size(1))
featG, ques_hidden1 = netE_g(ques_emb_g, his_emb_g, img_input, \
ques_hidden1, hist_hidden1, rnd+1)
_, ques_hidden1 = netG(featG.view(1, -1, opt.ninp), ques_hidden1)
# MLE loss for generator
ans_emb = netW_g(ans_input)
logprob, _ = netG(ans_emb, ques_hidden1)
lm_loss = critLM(logprob, ans_target.view(-1, 1))
lm_loss = lm_loss / torch.sum(ans_target.data.gt(0))
netW_g.zero_grad()
netG.zero_grad()
netE_g.zero_grad()
lm_loss.backward()
optimizerLM.step()
err_lm += lm_loss.data[0]
err_lm_tmp += lm_loss.data[0]
# sample the answer using gumble softmax sampler.
ques_emb_g = netW_g(ques_input, format = 'index')
his_emb_g = netW_g(his_input, format = 'index')
ques_hidden1 = repackage_hidden(ques_hidden1, batch_size)
hist_hidden1 = repackage_hidden(hist_hidden1, his_emb_g.size(1))
featG, ques_hidden1 = netE_g(ques_emb_g, his_emb_g, img_input, \
ques_hidden1, hist_hidden1, rnd+1)
_, ques_hidden1 = netG(featG.view(1, -1, opt.ninp), ques_hidden1)
# Gumble softmax to sample the output.
fake_onehot = []
fake_idx = []
noise_input.data.resize_(ans_length, batch_size, vocab_size+1)
noise_input.data.uniform_(0,1)
ans_sample = ans_input[0]
for di in range(ans_length):
ans_emb = netW_g(ans_sample, format = 'index')
logprob, ques_hidden1 = netG(ans_emb.view(1,-1,opt.ninp), ques_hidden1)
one_hot, idx = sampler(logprob, noise_input[di], opt.gumble_weight)
fake_onehot.append(one_hot.view(1, -1, vocab_size+1))
fake_idx.append(idx)
if di+1 < ans_length:
ans_sample = idx
# convert the list into the tensor variable.
fake_onehot = torch.cat(fake_onehot, 0)
fake_idx = torch.cat(fake_idx,0)
fake_len.resize_(batch_size).fill_(ans_length-1)
for di in range(ans_length-1, 0, -1):
fake_len.masked_fill_(fake_idx.data[di].eq(vocab_size), di)
# generate fake mask.
fake_mask.data.resize_(fake_idx.size()).fill_(1)
# get the real, wrong and fake index.
for b in range(batch_size):
fake_mask.data[:fake_len[b]+1, b] = 0
# apply the mask on the fake_idx.
fake_idx.masked_fill_(fake_mask, 0)
# get the fake diff mask.
#fake_diff_mask = torch.sum(fake_idx == ans_target, 0) != 0
fake_onehot = fake_onehot.view(-1, vocab_size+1)
######################################
# Discriminative trained generative model.
######################################
# forward the discriminator again.
ques_emb_d = netW_d(ques_input, format = 'index')
his_emb_d = netW_d(his_input, format = 'index')
ques_hidden2 = repackage_hidden(ques_hidden2, batch_size)
hist_hidden2 = repackage_hidden(hist_hidden2, his_emb_d.size(1))
featD, _ = netE_d(ques_emb_d, his_emb_d, img_input, \
ques_hidden2, hist_hidden2, rnd+1)
ans_real_emb = netW_d(ans_target, format='index')
#ans_wrong_emb = netW_d(wrong_ans_input, format='index')
ans_fake_emb = netW_d(fake_onehot, format='onehot')
ans_fake_emb = ans_fake_emb.view(ans_length, -1, opt.ninp)
real_hidden = repackage_hidden(real_hidden, batch_size)
#wrong_hidden = repackage_hidden(wrong_hidden, ans_wrong_emb.size(1))
fake_hidden = repackage_hidden(fake_hidden, batch_size)
fake_feat = netD(ans_fake_emb, fake_idx, fake_hidden, vocab_size)
real_feat = netD(ans_real_emb, ans_target, real_hidden, vocab_size)
d_g_loss, g_fake = critG(featD, real_feat, fake_feat)#, fake_diff_mask.detach())
netW_g.zero_grad()
netG.zero_grad()
netE_g.zero_grad()
d_g_loss.backward()
optimizerG.step()
err_g += d_g_loss.data[0]
err_g_tmp += d_g_loss.data[0]
err_g_fake_tmp += g_fake
count += 1
i += 1
loss_store.append({'iter':i, 'err_lm':err_lm_tmp/10, 'err_d':err_d_tmp/10, 'err_g':err_g_tmp/10, \
'd_fake': err_d_fake_tmp/10, 'g_fake':err_g_fake_tmp/10})
if i % 20 == 0:
print ('Epoch:%d %d/%d, err_lm %4f, err_d %4f, err_g %4f, d_fake %4f, g_fake %4f' \
% (epoch, i, len(dataloader), err_lm_tmp/10, err_d_tmp/10, err_g_tmp/10, err_d_fake_tmp/10, \
err_g_fake_tmp/10))
#average_loss = average_loss / count
err_g = err_g / count
err_d = err_d / count
err_lm = err_lm / count
return err_lm, err_d, err_g, loss_store
def val():
netE_g.eval()
netE_d.eval()
netW_g.eval()
netW_d.eval()
netG.eval()
netD.eval()
n_neg = 100
ques_hidden1 = netE_g.init_hidden(opt.batchSize)
ques_hidden2 = netE_d.init_hidden(opt.batchSize)
hist_hidden1 = netE_d.init_hidden(opt.batchSize)
hist_hidden2 = netE_g.init_hidden(opt.batchSize)
opt_hidden = netD.init_hidden(opt.batchSize)
data_iter_val = iter(dataloader_val)
count = 0
i = 0
rank_G = []
rank_D = []
while i < len(dataloader_val):
data = data_iter_val.next()
image, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen, img_id = data
batch_size = question.size(0)
image = image.view(-1, 512)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
# get the corresponding round QA and history.
ques = question[:,rnd,:].t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
opt_ans = opt_answer[:,rnd,:,:].clone().view(-1, ans_length).t()
opt_tans = opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
gt_id = answer_ids[:,rnd]
opt_len = opt_answerLen[:,rnd,:].clone().view(-1)
ques_input.data.resize_(ques.size()).copy_(ques)
his_input.data.resize_(his.size()).copy_(his)
opt_ans_input.data.resize_(opt_ans.size()).copy_(opt_ans)
opt_ans_target.data.resize_(opt_tans.size()).copy_(opt_tans)
gt_index.data.resize_(gt_id.size()).copy_(gt_id)
ques_emb_g = netW_g(ques_input, format = 'index')
his_emb_g = netW_g(his_input, format = 'index')
ques_emb_d = netW_d(ques_input, format = 'index')
his_emb_d = netW_d(his_input, format = 'index')
ques_hidden1 = repackage_hidden(ques_hidden1, batch_size)
ques_hidden2 = repackage_hidden(ques_hidden2, batch_size)
hist_hidden1 = repackage_hidden(hist_hidden1, his_emb_g.size(1))
hist_hidden2 = repackage_hidden(hist_hidden2, his_emb_d.size(1))
featG, ques_hidden1 = netE_g(ques_emb_g, his_emb_g, img_input, \
ques_hidden1, hist_hidden1, rnd+1)
featD, _ = netE_d(ques_emb_d, his_emb_d, img_input, \
ques_hidden2, hist_hidden2, rnd+1)
#featD = l2_norm(featD)
# Evaluate the Generator:
_, ques_hidden1 = netG(featG.view(1,-1,opt.ninp), ques_hidden1)
#_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
# extend the hidden
hidden_replicated = []
for hid in ques_hidden1:
hidden_replicated.append(hid.view(opt.nlayers, batch_size, 1, \
opt.nhid).expand(opt.nlayers, batch_size, 100, opt.nhid).clone().view(opt.nlayers, -1, opt.nhid))
hidden_replicated = tuple(hidden_replicated)
ans_emb = netW_g(opt_ans_input, format = 'index')
output, _ = netG(ans_emb, hidden_replicated)
logprob = - output
logprob_select = torch.gather(logprob, 1, opt_ans_target.view(-1,1))
mask = opt_ans_target.data.eq(0) # generate the mask
if isinstance(logprob, Variable):
mask = Variable(mask, volatile=logprob.volatile)
logprob_select.masked_fill_(mask.view_as(logprob_select), 0)
prob = logprob_select.view(ans_length, -1, 100).sum(0).view(-1,100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b*100
gt_score = prob.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(prob, 1)
count = sort_score.lt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_G += list(rank.view(-1).data.cpu().numpy())
opt_ans_emb = netW_d(opt_ans_target, format = 'index')
opt_hidden = repackage_hidden(opt_hidden, opt_ans_target.size(1))
opt_feat = netD(opt_ans_emb, opt_ans_target, opt_hidden, vocab_size)
opt_feat = opt_feat.view(batch_size, -1, opt.ninp)
#ans_emb = ans_emb.view(ans_length, -1, 100, opt.nhid)
featD = featD.view(-1, opt.ninp, 1)
score = torch.bmm(opt_feat, featD)
score = score.view(-1, 100)
gt_score = score.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(score, 1, descending=True)
count = sort_score.gt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_D += list(rank.view(-1).data.cpu().numpy())
i += 1
return rank_G, rank_D
####################################################################################
# Main
####################################################################################
img_input = torch.FloatTensor(opt.batchSize)
ques_input = torch.LongTensor(ques_length, opt.batchSize)
his_input = torch.LongTensor(his_length, opt.batchSize)
# answer input
ans_input = torch.LongTensor(ans_length, opt.batchSize)
ans_target = torch.LongTensor(ans_length, opt.batchSize)
wrong_ans_input = torch.LongTensor(ans_length, opt.batchSize)
sample_ans_input = torch.LongTensor(1, opt.batchSize)
fake_len = torch.LongTensor(opt.batchSize)
fake_diff_mask = torch.ByteTensor(opt.batchSize)
fake_mask = torch.ByteTensor(opt.batchSize)
# answer len
batch_sample_idx = torch.LongTensor(opt.batchSize)
# noise
noise_input = torch.FloatTensor(opt.batchSize)
# for evaluation:
opt_ans_input = torch.LongTensor(opt.batchSize)
gt_index = torch.LongTensor(opt.batchSize)
opt_ans_target = torch.LongTensor(opt.batchSize)
if opt.cuda:
ques_input, his_input, img_input = ques_input.cuda(), his_input.cuda(), img_input.cuda()
ans_input, ans_target = ans_input.cuda(), ans_target.cuda()
wrong_ans_input = wrong_ans_input.cuda()
sample_ans_input = sample_ans_input.cuda()
fake_len = fake_len.cuda()
noise_input = noise_input.cuda()
batch_sample_idx = batch_sample_idx.cuda()
fake_diff_mask = fake_diff_mask.cuda()
fake_mask = fake_mask.cuda()
opt_ans_input = opt_ans_input.cuda()
gt_index = gt_index.cuda()
opt_ans_target = opt_ans_target.cuda()
ques_input = Variable(ques_input)
img_input = Variable(img_input)
his_input = Variable(his_input)
ans_input = Variable(ans_input)
ans_target = Variable(ans_target)
wrong_ans_input = Variable(wrong_ans_input)
sample_ans_input = Variable(sample_ans_input)
noise_input = Variable(noise_input)
batch_sample_idx = Variable(batch_sample_idx)
fake_diff_mask = Variable(fake_diff_mask)
fake_mask = Variable(fake_mask)
opt_ans_input = Variable(opt_ans_input)
opt_ans_target = Variable(opt_ans_target)
gt_index = Variable(gt_index)
optimizerD = optim.Adam([{'params': netW_d.parameters()},
{'params': netE_d.parameters()},
{'params': netD.parameters()}], lr=opt.D_lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam([{'params': netW_g.parameters()},
{'params': netE_g.parameters()},
{'params': netG.parameters()}], lr=opt.G_lr, betas=(opt.beta1, 0.999))
optimizerLM = optim.Adam([{'params': netW_g.parameters()},
{'params': netE_g.parameters()},
{'params': netG.parameters()}], lr=opt.LM_lr, betas=(opt.beta1, 0.999))
history = []
train_his = {}
for epoch in range(opt.start_epoch+1, opt.niter):
t = time.time()
train_loss_lm, train_loss_d, train_loss_g, loss_store = train(epoch)
print ('Epoch: %d LM loss %4f Discriminator loss %4f Generator loss %4f Time: %3f' \
% (epoch, train_loss_lm, train_loss_d, train_loss_g, time.time()-t))
train_his = {'lossLM': train_loss_lm, 'loss_G':train_loss_g, 'loss_D':train_loss_d, 'loss_store':loss_store}
if epoch % opt.eval_iter == 0:
print('Evaluating ... ')
rank_G, rank_D = val()
R1 = np.sum(np.array(rank_G)==1) / float(len(rank_G))
R5 = np.sum(np.array(rank_G)<=5) / float(len(rank_G))
R10 = np.sum(np.array(rank_G)<=10) / float(len(rank_G))
ave = np.sum(np.array(rank_G)) / float(len(rank_G))
mrr = np.sum(1/(np.array(rank_G, dtype='float'))) / float(len(rank_G))
print ('Generator: %d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(epoch, len(dataloader_val), mrr, R1, R5, R10, ave))
val_his_G = {'R1': R1, 'R5':R5, 'R10': R10, 'Mean':ave, 'mrr':mrr}
R1 = np.sum(np.array(rank_D)==1) / float(len(rank_D))
R5 = np.sum(np.array(rank_D)<=5) / float(len(rank_D))
R10 = np.sum(np.array(rank_D)<=10) / float(len(rank_D))
ave = np.sum(np.array(rank_D)) / float(len(rank_D))
mrr = np.sum(1/(np.array(rank_D, dtype='float'))) / float(len(rank_D))
print ('Discriminator: %d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(epoch, len(dataloader_val), mrr, R1, R5, R10, ave))
val_his_D = {'R1': R1, 'R5':R5, 'R10': R10, 'Mean':ave, 'mrr':mrr}
history.append({'epoch':epoch, 'train': train_his, 'val_G': val_his_G, 'val_D':val_his_D})
# saving the model.
if epoch % opt.save_iter == 0:
torch.save({'epoch': epoch,
'opt': opt,
'netW_d': netW_d.state_dict(),
'netW_g': netW_g.state_dict(),
'netG': netG.state_dict(),
'netD': netD.state_dict(),
'netE_d': netE_d.state_dict(),
'netE_g': netE_g.state_dict()},
'%s/epoch_%d.pth' % (save_path, epoch))
json.dump(history, open('%s/log.json' %(save_path), 'w'))
|
{
"content_hash": "87091a6003c1259f9c312b0d05c04d7f",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 128,
"avg_line_length": 41.859223300970875,
"alnum_prop": 0.5816614480652519,
"repo_name": "jiasenlu/visDial.pytorch",
"id": "65ea077ef591df269b740d1487ccb068564c595b",
"size": "25869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train/train_all.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170342"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
# 第三方
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import numpy as np
# 我们自己
import load
train_samples, train_labels = load._train_samples, load._train_labels
test_samples, test_labels = load._test_samples, load._test_labels
print('Training set', train_samples.shape, train_labels.shape)
print(' Test set', test_samples.shape, test_labels.shape)
image_size = load.image_size
num_labels = load.num_labels
num_channels = load.num_channels
def get_chunk(samples, labels, chunkSize):
'''
Iterator/Generator: get a batch of data
这个函数是一个迭代器/生成器,用于每一次只得到 chunkSize 这么多的数据
用于 for loop, just like range() function
'''
if len(samples) != len(labels):
raise Exception('Length of samples and labels must equal')
stepStart = 0 # initial step
i = 0
while stepStart < len(samples):
stepEnd = stepStart + chunkSize
if stepEnd < len(samples):
yield i, samples[stepStart:stepEnd], labels[stepStart:stepEnd]
i += 1
stepStart = stepEnd
class Network():
def __init__(self, num_hidden, batch_size):
'''
@num_hidden: 隐藏层的节点数量
@batch_size:因为我们要节省内存,所以分批处理数据。每一批的数据量。
'''
self.batch_size = batch_size
self.test_batch_size = 500
# Hyper Parameters
self.num_hidden = num_hidden
# Graph Related
self.graph = tf.Graph()
self.tf_train_samples = None
self.tf_train_labels = None
self.tf_test_samples = None
self.tf_test_labels = None
self.tf_test_prediction = None
# 统计
self.merged = None
# 初始化
self.define_graph()
self.session = tf.Session(graph=self.graph)
self.writer = tf.train.SummaryWriter('./board', self.graph)
def define_graph(self):
'''
定义我的的计算图谱
'''
with self.graph.as_default():
# 这里只是定义图谱中的各种变量
with tf.name_scope('inputs'):
self.tf_train_samples = tf.placeholder(
tf.float32, shape=(self.batch_size, image_size, image_size, num_channels), name='tf_train_samples'
)
self.tf_train_labels = tf.placeholder(
tf.float32, shape=(self.batch_size, num_labels), name='tf_train_labels'
)
self.tf_test_samples = tf.placeholder(
tf.float32, shape=(self.test_batch_size, image_size, image_size, num_channels), name='tf_test_samples'
)
# fully connected layer 1, fully connected
with tf.name_scope('fc1'):
fc1_weights = tf.Variable(
tf.truncated_normal([image_size * image_size, self.num_hidden], stddev=0.1), name='fc1_weights'
)
fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]), name='fc1_biases')
tf.histogram_summary('fc1_weights', fc1_weights)
tf.histogram_summary('fc1_biases', fc1_biases)
# fully connected layer 2 --> output layer
with tf.name_scope('fc2'):
fc2_weights = tf.Variable(
tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1), name='fc2_weights'
)
fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]), name='fc2_biases')
tf.histogram_summary('fc2_weights', fc2_weights)
tf.histogram_summary('fc2_biases', fc2_biases)
# 想在来定义图谱的运算
def model(data):
# fully connected layer 1
shape = data.get_shape().as_list()
reshape = tf.reshape(data, [shape[0], shape[1] * shape[2] * shape[3]])
with tf.name_scope('fc1_model'):
fc1_model = tf.matmul(reshape, fc1_weights) + fc1_biases
hidden = tf.nn.relu(fc1_model)
# fully connected layer 2
with tf.name_scope('fc2_model'):
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation.
logits = model(self.tf_train_samples)
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels)
)
tf.scalar_summary('Loss', self.loss)
# Optimizer.
with tf.name_scope('optimizer'):
self.optimizer = tf.train.GradientDescentOptimizer(0.0001).minimize(self.loss)
# Predictions for the training, validation, and test data.
with tf.name_scope('predictions'):
self.train_prediction = tf.nn.softmax(logits, name='train_prediction')
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples), name='test_prediction')
self.merged = tf.merge_all_summaries()
def run(self):
'''
用到Session
'''
# private function
def print_confusion_matrix(confusionMatrix):
print('Confusion Matrix:')
for i, line in enumerate(confusionMatrix):
print(line, line[i]/np.sum(line))
a = 0
for i, column in enumerate(np.transpose(confusionMatrix, (1, 0))):
a += (column[i]/np.sum(column))*(np.sum(column)/26000)
print(column[i]/np.sum(column),)
print('\n',np.sum(confusionMatrix), a)
with self.session as session:
tf.initialize_all_variables().run()
### 训练
print('Start Training')
# batch 1000
for i, samples, labels in get_chunk(train_samples, train_labels, chunkSize=self.batch_size):
_, l, predictions, summary = session.run(
[self.optimizer, self.loss, self.train_prediction, self.merged],
feed_dict={self.tf_train_samples: samples, self.tf_train_labels: labels}
)
self.writer.add_summary(summary, i)
# labels is True Labels
accuracy, _ = self.accuracy(predictions, labels)
if i % 50 == 0:
print('Minibatch loss at step %d: %f' % (i, l))
print('Minibatch accuracy: %.1f%%' % accuracy)
###
### 测试
accuracies = []
confusionMatrices = []
for i, samples, labels in get_chunk(test_samples, test_labels, chunkSize=self.test_batch_size):
result = self.test_prediction.eval(feed_dict={self.tf_test_samples: samples})
accuracy, cm = self.accuracy(result, labels, need_confusion_matrix=True)
accuracies.append(accuracy)
confusionMatrices.append(cm)
print('Test Accuracy: %.1f%%' % accuracy)
print(' Average Accuracy:', np.average(accuracies))
print('Standard Deviation:', np.std(accuracies))
print_confusion_matrix(np.add.reduce(confusionMatrices))
###
def accuracy(self, predictions, labels, need_confusion_matrix=False):
'''
计算预测的正确率与召回率
@return: accuracy and confusionMatrix as a tuple
'''
_predictions = np.argmax(predictions, 1)
_labels = np.argmax(labels, 1)
cm = confusion_matrix(_labels, _predictions) if need_confusion_matrix else None
# == is overloaded for numpy array
accuracy = (100.0 * np.sum(_predictions == _labels) / predictions.shape[0])
return accuracy, cm
if __name__ == '__main__':
net = Network(num_hidden=128, batch_size=100)
net.run()
|
{
"content_hash": "a5116782606f19b287f4afd958b530af",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 107,
"avg_line_length": 31.915422885572138,
"alnum_prop": 0.6807482462977397,
"repo_name": "karst87/ml",
"id": "03040f2741cf92f54d6eb1580bb152a002df5778",
"size": "6702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01_openlibs/tensorflow/02_tfgirls/TensorFlow-and-DeepLearning-Tutorial-master/Season1/10-11/dp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "718"
},
{
"name": "Jupyter Notebook",
"bytes": "2245712"
},
{
"name": "Python",
"bytes": "180525"
}
],
"symlink_target": ""
}
|
"""Parser for syslog formatted log files.
Also see:
* https://www.rsyslog.com/doc/v8-stable/configuration/templates.html
"""
import re
from dfdatetime import time_elements as dfdatetime_time_elements
import pyparsing
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import text_parser
class SyslogLineEventData(events.EventData):
"""Syslog line event data.
Attributes:
body (str): message body.
hostname (str): hostname of the reporter.
pid (str): process identifier of the reporter.
reporter (str): reporter.
severity (str): severity.
"""
DATA_TYPE = 'syslog:line'
def __init__(self, data_type=DATA_TYPE):
"""Initializes an event data attribute container.
Args:
data_type (Optional[str]): event data type indicator.
"""
super(SyslogLineEventData, self).__init__(data_type=data_type)
self.body = None
self.hostname = None
self.pid = None
self.reporter = None
self.severity = None
class SyslogCommentEventData(events.EventData):
"""Syslog comment event data.
Attributes:
body (str): message body.
"""
DATA_TYPE = 'syslog:comment'
def __init__(self):
"""Initializes event data."""
super(SyslogCommentEventData, self).__init__(data_type=self.DATA_TYPE)
self.body = None
class SyslogParser(text_parser.PyparsingMultiLineTextParser):
"""Parses syslog formatted log files"""
NAME = 'syslog'
DATA_FORMAT = 'System log (syslog) file'
_ENCODING = 'utf-8'
_plugin_classes = {}
# The reporter and facility fields can contain any printable character, but
# to allow for processing of syslog formats that delimit the reporter and
# facility with printable characters, we remove certain common delimiters
# from the set of printable characters.
_REPORTER_CHARACTERS = ''.join(
[c for c in pyparsing.printables if c not in [':', '[', '<']])
_FACILITY_CHARACTERS = ''.join(
[c for c in pyparsing.printables if c not in [':', '>']])
_SYSLOG_SEVERITY = [
'EMERG',
'ALERT',
'CRIT',
'ERR',
'WARNING',
'NOTICE',
'INFO',
'DEBUG']
# TODO: change pattern to allow only spaces as a field separator.
_BODY_PATTERN = (
r'.*?(?=($|\n\w{3}\s+\d{1,2}\s\d{2}:\d{2}:\d{2})|' \
r'($|\n\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}' \
r'[\+|-]\d{2}:\d{2}\s))')
# The rsyslog file format (RSYSLOG_FileFormat) consists of:
# %TIMESTAMP% %HOSTNAME% %syslogtag%%msg%
#
# Where %TIMESTAMP% is in RFC-3339 date time format e.g.
# 2020-05-31T00:00:45.698463+00:00
_RSYSLOG_VERIFICATION_PATTERN = (
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.'
r'\d{6}[\+|-]\d{2}:\d{2} ' + _BODY_PATTERN)
# The rsyslog traditional file format (RSYSLOG_TraditionalFileFormat)
# consists of:
# %TIMESTAMP% %HOSTNAME% %syslogtag%%msg%
#
# Where %TIMESTAMP% is in yearless ctime date time format e.g.
# Jan 22 07:54:32
# TODO: change pattern to allow only spaces as a field separator.
_RSYSLOG_TRADITIONAL_VERIFICATION_PATTERN = (
r'^\w{3}\s+\d{1,2}\s\d{2}:\d{2}:\d{2}\s' + _BODY_PATTERN)
# The Chrome OS syslog messages are of a format beginning with an
# ISO 8601 combined date and time expression with timezone designator:
# 2016-10-25T12:37:23.297265-07:00
#
# This will then be followed by the SYSLOG Severity which will be one of:
# EMERG,ALERT,CRIT,ERR,WARNING,NOTICE,INFO,DEBUG
#
# 2016-10-25T12:37:23.297265-07:00 INFO
_CHROMEOS_VERIFICATION_PATTERN = (
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.'
r'\d{6}[\+|-]\d{2}:\d{2}\s'
r'(EMERG|ALERT|CRIT|ERR|WARNING|NOTICE|INFO|DEBUG)' + _BODY_PATTERN)
# Bundle all verification patterns into a single regular expression.
_VERIFICATION_REGEX = re.compile('({0:s})'.format('|'.join([
_CHROMEOS_VERIFICATION_PATTERN, _RSYSLOG_VERIFICATION_PATTERN,
_RSYSLOG_TRADITIONAL_VERIFICATION_PATTERN])))
_PYPARSING_COMPONENTS = {
'year': text_parser.PyparsingConstants.FOUR_DIGITS.setResultsName(
'year'),
'two_digit_month': (
text_parser.PyparsingConstants.TWO_DIGITS.setResultsName(
'two_digit_month')),
'month': text_parser.PyparsingConstants.MONTH.setResultsName('month'),
'day': text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName(
'day'),
'hour': text_parser.PyparsingConstants.TWO_DIGITS.setResultsName(
'hour'),
'minute': text_parser.PyparsingConstants.TWO_DIGITS.setResultsName(
'minute'),
'second': text_parser.PyparsingConstants.TWO_DIGITS.setResultsName(
'second'),
'fractional_seconds': pyparsing.Word(pyparsing.nums).setResultsName(
'fractional_seconds'),
'hostname': pyparsing.Word(pyparsing.printables).setResultsName(
'hostname'),
'reporter': pyparsing.Word(_REPORTER_CHARACTERS).setResultsName(
'reporter'),
'pid': text_parser.PyparsingConstants.PID.setResultsName('pid'),
'facility': pyparsing.Word(_FACILITY_CHARACTERS).setResultsName(
'facility'),
'severity': pyparsing.oneOf(_SYSLOG_SEVERITY).setResultsName('severity'),
'body': pyparsing.Regex(_BODY_PATTERN, re.DOTALL).setResultsName('body'),
'comment_body': pyparsing.SkipTo(' ---').setResultsName('body')
}
_PYPARSING_COMPONENTS['date'] = (
_PYPARSING_COMPONENTS['month'] +
_PYPARSING_COMPONENTS['day'] +
_PYPARSING_COMPONENTS['hour'] + pyparsing.Suppress(':') +
_PYPARSING_COMPONENTS['minute'] + pyparsing.Suppress(':') +
_PYPARSING_COMPONENTS['second'] + pyparsing.Optional(
pyparsing.Suppress('.') +
_PYPARSING_COMPONENTS['fractional_seconds']))
_PYPARSING_COMPONENTS['rfc3339_datetime'] = pyparsing.Combine(
pyparsing.Word(pyparsing.nums, exact=4) + pyparsing.Literal('-') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('-') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('T') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal(':') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal(':') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('.') +
pyparsing.Word(pyparsing.nums, exact=6) + pyparsing.oneOf(['-', '+']) +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Optional(
pyparsing.Literal(':') + pyparsing.Word(pyparsing.nums, exact=2)),
joinString='', adjacent=True)
_CHROMEOS_SYSLOG_LINE = (
_PYPARSING_COMPONENTS['rfc3339_datetime'].setResultsName('datetime') +
_PYPARSING_COMPONENTS['severity'] +
_PYPARSING_COMPONENTS['reporter'] +
pyparsing.Optional(pyparsing.Suppress(':')) +
pyparsing.Optional(
pyparsing.Suppress('[') + _PYPARSING_COMPONENTS['pid'] +
pyparsing.Suppress(']')) +
pyparsing.Optional(pyparsing.Suppress(':')) +
_PYPARSING_COMPONENTS['body'] + pyparsing.lineEnd())
_RSYSLOG_LINE = (
_PYPARSING_COMPONENTS['rfc3339_datetime'].setResultsName('datetime') +
_PYPARSING_COMPONENTS['hostname'] +
_PYPARSING_COMPONENTS['reporter'] +
pyparsing.Optional(
pyparsing.Suppress('[') + _PYPARSING_COMPONENTS['pid'] +
pyparsing.Suppress(']')) +
pyparsing.Optional(
pyparsing.Suppress('<') + _PYPARSING_COMPONENTS['facility'] +
pyparsing.Suppress('>')) +
pyparsing.Optional(pyparsing.Suppress(':')) +
_PYPARSING_COMPONENTS['body'] + pyparsing.lineEnd())
_RSYSLOG_TRADITIONAL_LINE = (
_PYPARSING_COMPONENTS['date'] +
_PYPARSING_COMPONENTS['hostname'] +
_PYPARSING_COMPONENTS['reporter'] +
pyparsing.Optional(
pyparsing.Suppress('[') + _PYPARSING_COMPONENTS['pid'] +
pyparsing.Suppress(']')) +
pyparsing.Optional(
pyparsing.Suppress('<') + _PYPARSING_COMPONENTS['facility'] +
pyparsing.Suppress('>')) +
pyparsing.Optional(pyparsing.Suppress(':')) +
_PYPARSING_COMPONENTS['body'] + pyparsing.lineEnd())
_SYSLOG_COMMENT = (
_PYPARSING_COMPONENTS['date'] + pyparsing.Suppress(':') +
pyparsing.Suppress('---') + _PYPARSING_COMPONENTS['comment_body'] +
pyparsing.Suppress('---') + pyparsing.LineEnd())
_KERNEL_SYSLOG_LINE = (
_PYPARSING_COMPONENTS['date'] +
pyparsing.Literal('kernel').setResultsName('reporter') +
pyparsing.Suppress(':') + _PYPARSING_COMPONENTS['body'] +
pyparsing.lineEnd())
LINE_STRUCTURES = [
('chromeos_syslog_line', _CHROMEOS_SYSLOG_LINE),
('kernel_syslog_line', _KERNEL_SYSLOG_LINE),
('rsyslog_line', _RSYSLOG_LINE),
('rsyslog_traditional_line', _RSYSLOG_TRADITIONAL_LINE),
('syslog_comment', _SYSLOG_COMMENT)]
_SUPPORTED_KEYS = frozenset([key for key, _ in LINE_STRUCTURES])
def __init__(self):
"""Initializes a parser."""
super(SyslogParser, self).__init__()
self._last_month = 0
self._maximum_year = 0
self._plugin_by_reporter = {}
self._year_use = 0
def _UpdateYear(self, mediator, month):
"""Updates the year to use for events, based on last observed month.
Args:
mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
month (int): month observed by the parser, where January is 1.
"""
if not self._year_use:
self._year_use = mediator.GetEstimatedYear()
if not self._maximum_year:
self._maximum_year = mediator.GetLatestYear()
if not self._last_month:
self._last_month = month
return
# Some syslog daemons allow out-of-order sequences, so allow some leeway
# to not cause Apr->May->Apr to cause the year to increment.
# See http://bugzilla.adiscon.com/show_bug.cgi?id=527
if self._last_month > (month + 1):
if self._year_use != self._maximum_year:
self._year_use += 1
self._last_month = month
def EnablePlugins(self, plugin_includes):
"""Enables parser plugins.
Args:
plugin_includes (list[str]): names of the plugins to enable, where None
or an empty list represents all plugins. Note that the default plugin
is handled separately.
"""
super(SyslogParser, self).EnablePlugins(plugin_includes)
self._plugin_by_reporter = {}
for plugin in self._plugins:
self._plugin_by_reporter[plugin.REPORTER] = plugin
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a matching entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): elements parsed from the file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in self._SUPPORTED_KEYS:
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key in ('chromeos_syslog_line', 'rsyslog_line'):
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
iso8601_string = self._GetValueFromStructure(structure, 'datetime')
try:
date_time.CopyFromStringISO8601(iso8601_string)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0:s}'.format(iso8601_string))
return
else:
# TODO: add support for fractional seconds.
month = self._GetValueFromStructure(structure, 'month')
try:
month = self._MONTH_DICT.get(month.lower(), 0)
except AttributeError:
parser_mediator.ProduceExtractionWarning(
'invalid month value: {0!s}'.format(month))
return
if month != 0:
self._UpdateYear(parser_mediator, month)
day = self._GetValueFromStructure(structure, 'day')
hours = self._GetValueFromStructure(structure, 'hour')
minutes = self._GetValueFromStructure(structure, 'minute')
seconds = self._GetValueFromStructure(structure, 'second')
time_elements_tuple = (
self._year_use, month, day, hours, minutes, seconds)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
plugin = None
if key == 'syslog_comment':
event_data = SyslogCommentEventData()
event_data.body = self._GetValueFromStructure(structure, 'body')
else:
event_data = SyslogLineEventData()
event_data.body = self._GetValueFromStructure(structure, 'body')
event_data.hostname = self._GetValueFromStructure(structure, 'hostname')
event_data.pid = self._GetValueFromStructure(structure, 'pid')
event_data.reporter = self._GetValueFromStructure(structure, 'reporter')
event_data.severity = self._GetValueFromStructure(structure, 'severity')
plugin = self._plugin_by_reporter.get(event_data.reporter, None)
if plugin:
attributes = {
'body': event_data.body,
'hostname': event_data.hostname,
'pid': event_data.pid,
'reporter': event_data.reporter,
'severity': event_data.severity}
file_entry = parser_mediator.GetFileEntry()
display_name = parser_mediator.GetDisplayName(file_entry)
logger.debug('Parsing file: {0:s} with plugin: {1:s}'.format(
display_name, plugin.NAME))
try:
# TODO: pass event_data instead of attributes.
plugin.Process(parser_mediator, date_time, attributes)
except errors.WrongPlugin:
plugin = None
if not plugin:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
def VerifyStructure(self, parser_mediator, lines):
"""Verifies that this is a syslog-formatted file.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
return bool(self._VERIFICATION_REGEX.match(lines))
manager.ParsersManager.RegisterParser(SyslogParser)
|
{
"content_hash": "cc88b3a64f3c62baa7e0d3ba9f440109",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 79,
"avg_line_length": 36.62282878411911,
"alnum_prop": 0.651737922623484,
"repo_name": "kiddinn/plaso",
"id": "c211781f88b65136b483cc395466b8da9d685406",
"size": "14783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/syslog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "68"
},
{
"name": "PowerShell",
"bytes": "9560"
},
{
"name": "Python",
"bytes": "4878625"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "26453"
}
],
"symlink_target": ""
}
|
"""Denmark specific Form helpers."""
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms import fields, widgets
from django.utils.translation import ugettext_lazy as _
from localflavor.deprecation import DeprecatedPhoneNumberFormFieldMixin
from .dk_municipalities import DK_MUNICIPALITIES
from .dk_postalcodes import DK_POSTALCODES
def postal_code_validator(value):
if value not in [entry[0] for entry in DK_POSTALCODES]:
raise ValidationError(_('Enter a postal code in the format XXXX.'))
class DKPostalCodeField(fields.CharField):
"""An Input widget that uses a list of Danish postal codes as valid input."""
default_validators = [postal_code_validator]
class DKMunicipalitySelect(widgets.Select):
"""A Select widget that uses a list of Danish municipalities (kommuner) as its choices."""
def __init__(self, attrs=None, *args, **kwargs):
super(DKMunicipalitySelect, self).__init__(
attrs,
choices=DK_MUNICIPALITIES,
*args,
**kwargs
)
class DKPhoneNumberField(fields.RegexField, DeprecatedPhoneNumberFormFieldMixin):
"""
Field with phone number validation.
Requires a phone number with 8 digits and optional country code.
.. deprecated:: 1.4
Use the django-phonenumber-field_ library instead.
.. _django-phonenumber-field: https://github.com/stefanfoulis/django-phonenumber-field
"""
default_error_messages = {
'invalid': _(
'A phone number must be 8 digits and may have country code'
),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(DKPhoneNumberField, self).__init__(
r'^(?:\+45)? ?(\d{2}\s?\d{2}\s?\d{2}\s?\d{2})$',
max_length,
min_length,
*args,
**kwargs
)
|
{
"content_hash": "33e283620006199f5cbcd443e36bb831",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 94,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.6577708006279435,
"repo_name": "thor/django-localflavor",
"id": "83305ad4b438b4eed0c44748cb0ad5c82e9f68e1",
"size": "1911",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "localflavor/dk/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "924448"
}
],
"symlink_target": ""
}
|
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
import yamlstratus
here = os.path.abspath(os.path.dirname(__file__))
long_description = """
YAML Stratus provides extensions to YAML that make it amenable to use with AWS
CloudFormation, as well as a python binding,
and a tool for converting to the standard JSON used by CloudFormation.
Standard YAML capabilities not part of JSON:
* comments
* back references
* block literals
Note also that JSON is YAML, so familiar JSON data can be embedded within YAML.
Extension to YAML provided by YAML Stratus:
* !include - YAML files can include other YAML files
* !param - YAML files can include parameters
* !merge - Allows the merging of data from two source data hierarchies
"""
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='yamlstratus',
scripts=['scripts/ystratus.py'],
version=yamlstratus.__version__,
url='https://github.com/kikinteractive/yaml-stratus/',
download_url='https://github.com/kikinteractive/yaml-stratus/tarball/0.1',
license='Apache Software License',
author='Kik Interactive',
tests_require=['pytest'],
cmdclass={'test': PyTest},
description='Python for yamlstratus builder',
long_description=long_description,
packages=['yamlstratus'],
include_package_data=True,
platforms='any',
test_suite='yamlstratus.test.test_yamlstratus',
classifiers=[
'Programming Language :: Python',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
"Topic :: Text Processing :: Markup",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7"
],
install_requires=[
'PyYAML',
],
extras_require={
'testing': ['pytest'],
}
)
|
{
"content_hash": "9fc6807d8ec9878cd8499d59058b5632",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 29.063291139240505,
"alnum_prop": 0.6750871080139372,
"repo_name": "kikinteractive/yaml-stratus",
"id": "37ddea3f4f41d6e4c9765fbb38de7579e3cdc532",
"size": "2296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33689"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement, print_function
import pytest
try:
from unittest import mock
except ImportError:
import mock
from k2catalogue import proposal_urls
@pytest.fixture
def campaign():
return 1
@pytest.fixture
def mapper(campaign):
return proposal_urls.BuildCampaignMapping(campaign)
def create_mock_row(proposal_id, pi, title, url):
return mock.Mock(find_all=lambda *args: [
mock.Mock(string=proposal_id),
mock.Mock(string=pi),
mock.Mock(string=title),
mock.Mock(a={'href': url})])
@pytest.fixture
def mock_row():
return create_mock_row('GO1001', 'Giampapa',
'Characterizing the Variability of the Nearby Late-Type Dwarf Stars',
'docs/Campaigns/C1/GO1001_Giampapa.pdf')
def test_build_mapping(mapper, mock_row):
with mock.patch('k2catalogue.proposal_urls.BuildCampaignMapping.table_rows',
new_callable=mock.PropertyMock) as mock_table_rows:
mock_table_rows.return_value = [mock_row, ]
mapping = mapper.create()
assert mapping['GO1001'] == {
'pi': 'Giampapa',
'title': ('Characterizing the Variability of the Nearby '
'Late-Type Dwarf Stars'),
'url': 'http://keplerscience.arc.nasa.gov/K2/docs/Campaigns/C1/GO1001_Giampapa.pdf'}
def test_build_url(mapper):
assert 'C01' in mapper.url
def test_response(mapper):
assert mapper.response.status_code == 200
def test_soup(mapper):
assert hasattr(mapper.soup, 'find_all')
def test_find_table(mapper):
assert mapper.table
def test_extract_contents(mapper, mock_row):
result = mapper.extract_contents(mock_row)
assert result == ('GO1001', 'Giampapa',
'Characterizing the Variability of the Nearby Late-Type Dwarf Stars',
'http://keplerscience.arc.nasa.gov/K2/docs/Campaigns/C1/GO1001_Giampapa.pdf')
def test_invalid_html(mapper):
entries = (
mock.Mock(string='proposal_id'),
None,
None,
None,
)
row = mock.Mock(find_all=lambda *args: entries)
result = mapper.extract_contents(row)
assert result is None
|
{
"content_hash": "301466bc0a45a4e50e1819b23ceb46bc",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 99,
"avg_line_length": 27.17283950617284,
"alnum_prop": 0.6456156292594275,
"repo_name": "mindriot101/k2catalogue",
"id": "8ecce0f5cd0d8a77b03aff479ea4446107156798",
"size": "2201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_proposal_mapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21575"
}
],
"symlink_target": ""
}
|
from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import errno
import stat
PEM_FILE = os.path.join(jupyter_data_dir(), 'notebook.pem')
c = get_config()
c.NotebookApp.ip = os.getenv('INTERFACE', '') or '*'
c.NotebookApp.port = int(os.getenv('PORT', '') or 8888)
c.NotebookApp.open_browser = False
# Set a certificate if USE_HTTPS is set to any value
if 'USE_HTTPS' in os.environ:
if not os.path.isfile(PEM_FILE):
# Ensure PEM_FILE directory exists
dir_name = os.path.dirname(PEM_FILE)
try:
os.makedirs(dir_name)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dir_name):
pass
else: raise
# Generate a certificate if one doesn't exist on disk
subprocess.check_call(['openssl', 'req', '-new',
'-newkey', 'rsa:2048', '-days', '365', '-nodes', '-x509',
'-subj', '/C=XX/ST=XX/L=XX/O=generated/CN=generated',
'-keyout', PEM_FILE, '-out', PEM_FILE])
# Restrict access to PEM_FILE
os.chmod(PEM_FILE, stat.S_IRUSR | stat.S_IWUSR)
c.NotebookApp.certfile = PEM_FILE
# Set a password if PASSWORD is set
if 'PASSWORD' in os.environ:
from IPython.lib import passwd
c.NotebookApp.password = passwd(os.environ['PASSWORD'])
del os.environ['PASSWORD']
|
{
"content_hash": "b511b87e94a63047cf5286a67468789c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 69,
"avg_line_length": 36.078947368421055,
"alnum_prop": 0.6258205689277899,
"repo_name": "torz/docker-stacks",
"id": "e8343b35e5ad5687242143fbbd98cd732ce31379",
"size": "1413",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "minimal-notebook/jupyter_notebook_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "528"
},
{
"name": "Python",
"bytes": "1413"
},
{
"name": "Shell",
"bytes": "406"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib.auth.models import User, Group
from categories.models import CategoryBase
from django.db import models
from datetime import datetime
class Skill(CategoryBase):
class Meta:
verbose_name = 'Skill'
verbose_name_plural = 'Skills'
def __str__(self):
return self.name
def __unicode__(self):
return str(self.name)
class Interest(CategoryBase):
class Meta:
verbose_name = 'Interest'
verbose_name_plural = 'Interests'
def __str__(self):
return self.name
def __unicode__(self):
return str(self.name)
class LMS(models.Model):
name = models.CharField(max_length=128)
url = models.CharField(max_length=128)
class Meta:
verbose_name = 'LMS'
verbose_name_plural = 'LMS'
def __str__(self):
return self.name
def __unicode__(self):
return self.name
class LMS_Web_Service(models.Model):
web_service_name = models.CharField(max_length=128)
# depending on the options we might be able to do a choicefield here
web_service_method = models.CharField(max_length=128)
web_service_url = models.CharField(max_length=128)
class Meta:
verbose_name = 'LMS Web Service'
verbose_name_plural = 'LMS Web Services'
def __str__(self):
return self.web_service_name + " - " + self.web_service_method
def __unicode__(self):
return self.web_service_name
class School(models.Model):
lms = models.ForeignKey(LMS, on_delete=models.CASCADE)
name = models.CharField(max_length=128)
url = models.CharField(max_length=128)
def __str__(self):
return self.name
def __unicode__(self):
return self.name
class User_Add_Ons(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
school = models.ForeignKey(School, on_delete=models.CASCADE)
# The user's ID w/in their LMS
lms = models.ForeignKey(LMS, on_delete=models.CASCADE)
class Meta:
verbose_name = 'User Add-ons'
verbose_name_plural = 'User Add-ons'
def __str__(self):
return self.user.username + " - " + self.school.name
def __unicode__(self):
return str(self.user)
class Volunteer_User_Add_Ons(models.Model):
"""
The name of the model is incorrect, but for the moment doesn't change because it's implies to update many interfaces.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
phone = models.CharField(max_length=13, )
canGetText = models.BooleanField(default=True)
workTitle = models.CharField(max_length=25)
isBusinessOwner = models.BooleanField(default=True)
workIndustry = models.CharField(max_length=25)
yearsInIndustry = models.IntegerField()
linkedinProfile = models.CharField(max_length=128, null=True, blank=True, )
hsGradChoices = (
(1, '1-4'),
(2, '5-10'),
(3, '11 or more'),
(4, 'Have not graduated'),)
yearsSinceHSGraduation = models.IntegerField(choices=hsGradChoices)
collegeLevelChoice = (
(1, "Associate"),
(2, "Bachelor's"),
(3, "Master's"),
(4, "Doctoral"),
(5, "None"),)
collegeLevel = models.IntegerField(choices=collegeLevelChoice)
collegeMajor = models.CharField(max_length=128, null=True, blank=True)
skills = models.ManyToManyField(Skill, null=True, blank=True)
interests = models.ManyToManyField(Interest, max_length=128, null=True, blank=True)
# User_Skill_Map
# User_Interest_Map
class Meta:
verbose_name = 'Volunteer add-ons'
verbose_name_plural = 'Volunteer add-ons'
def __str__(self):
return self.user.username + " - " + self.workTitle
def __unicode__(self):
return "Volunteer: " + str(self.user)
# return "Volunteer: "
class User_Group_Role_Map(models.Model):
group = models.ForeignKey(Group)
user = models.ForeignKey(User_Add_Ons, on_delete=models.CASCADE)
role = models.CharField(max_length=128)
class Meta:
verbose_name = 'Role'
verbose_name_plural = 'Roles'
def __str__(self):
return self.user.group.name + ": " + self.user.username + "-" + self.role
def __unicode__(self):
return str(self.group) + ': ' + str(self.user) + '-' + str(self.role)
class Class(models.Model):
school = models.ForeignKey(School, on_delete=models.CASCADE)
lms = models.ForeignKey(LMS, on_delete=models.CASCADE)
teacher = models.ForeignKey(User_Add_Ons, on_delete=models.CASCADE, )
name = models.CharField(max_length=128)
academic_year = models.IntegerField(default=None, null=True)
semester = models.CharField(max_length=128, default=None, null=True)
class Meta:
verbose_name = 'FAL Class'
verbose_name_plural = 'FAL Classes'
def __str__(self):
return self.name + " - " + self.teacher.user.first_name
def __unicode__(self):
return str(self.name) + ':' + str(self.teacher)
class Class_Group(models.Model):
group = models.OneToOneField(Group, on_delete=models.CASCADE)
falClass = models.ForeignKey(Class, on_delete=models.CASCADE)
class Meta:
verbose_name = 'Class Groups'
verbose_name_plural = 'Class Groups'
def __str__(self):
return self.user.group.name + " - " + self.falClass.name
def __unicode__(self):
return str(self.group.name) + ':' + str(self.falClass.name)
class Student_Class(models.Model):
student = models.ForeignKey(User, on_delete=models.CASCADE, related_name='User')
falClass = models.ForeignKey(Class, on_delete=models.CASCADE)
def __str__(self):
return self.student.username + ": " + self.falClass.name
def __unicode__(self):
return str(self.student) + ':' + str(self.falClass)
class Assignment(models.Model):
title = models.CharField(max_length=128)
falClass = models.ForeignKey(Class, on_delete=models.CASCADE)
document = models.CharField(max_length=128, blank=True, null=True)
due_date = models.DateTimeField(blank=True, null=True)
creation_date = models.DateTimeField(auto_now_add=True)
description = models.CharField(max_length=256, blank=True, null=True)
def __str__(self):
return self.title + " - " + self.falClass.name
def __unicode__(self):
return str(self.title) + ' (' + str(self.falClass) + ')'
class Interview(models.Model):
interviewer = models.CharField(max_length=256)
interviewee = models.ForeignKey(User, on_delete=models.CASCADE, related_name='interviewee', )
group = models.ForeignKey(Group)
date = models.DateTimeField(default=datetime.now, blank=True)
assignment = models.ForeignKey(Assignment, on_delete=models.CASCADE, related_name='assignment')
def __str__(self):
return self.assignment.title + " - " + self.group.name
def __unicode__(self):
return 'Interview of ' + str(self.interviewee) + ' by ' + str(self.assignment)
class Question(models.Model):
name = models.CharField(max_length=128)
created_by = models.ForeignKey(User_Add_Ons, on_delete=models.CASCADE, )
creation_date = models.DateTimeField()
def __str__(self):
return self.name + " - " + self.created_by.user.username
def __unicode__(self):
return str(self.created_by) + ':' + str(self.name)
class Interview_Question_Map(models.Model):
interview = models.ForeignKey(Interview, on_delete=models.CASCADE, )
question = models.ForeignKey(Question, on_delete=models.CASCADE, )
class Meta:
verbose_name = 'Interview Question'
verbose_name_plural = 'Interview Questions'
def __str__(self):
return self.question.name + " - " + self.interview.interviewee.username
def __unicode__(self):
return str(self.question) + ' (' + str(self.interview) + ')'
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, )
result = models.CharField(max_length=128)
created_by = models.ForeignKey(User_Add_Ons, on_delete=models.CASCADE, )
creation_date = models.DateTimeField()
def __unicode__(self):
return str(self.question)
def __str__(self):
return self.question.name + "(" + self.result + ")"
class Video(models.Model):
# interview = models.ForeignKey(Interview, on_delete=models.CASCADE, null=True, blank=True, )
name = models.CharField(max_length=128)
url = models.CharField(max_length=128)
tags = models.CharField(max_length=128, null=True, blank=True, )
created_by = models.ForeignKey(User_Add_Ons, on_delete=models.CASCADE, )
creation_date = models.DateTimeField(default=datetime.now, blank=True)
status = models.CharField(max_length=128)
def __str__(self):
return str(self.name) + ' (' + str(self.creation_date) + ')'
def __unicode__(self):
return str(self.name) + ' (' + str(self.creation_date) + ')'
class Question_Video_Map(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, )
video = models.ForeignKey(Video, on_delete=models.CASCADE, )
class Meta:
verbose_name = 'Video Question'
verbose_name_plural = 'Video Questions'
def __str__(self):
return str(self.question.name) + " - " + str(self.video.name)
def __unicode__(self):
return str(self.question) + ':' + str(self.video)
class Interview_Question_Video_Map(models.Model):
interview_question = models.ForeignKey(Interview_Question_Map, on_delete=models.CASCADE, )
video = models.ForeignKey(Video, on_delete=models.CASCADE, )
class Meta:
verbose_name = 'Interview Question Video'
verbose_name_plural = 'Interview Video Questions'
def __str__(self):
return str(self.interview_question.id) + " - " + str(self.video.name)
def __unicode__(self):
return str(self.interview_question) + '-' + str(self.video)
class Video_Comment(models.Model):
video = models.ForeignKey(Video, on_delete=models.CASCADE, )
comment = models.CharField(max_length=128)
created_by = models.ForeignKey(User_Add_Ons, on_delete=models.CASCADE, )
creation_date = models.DateTimeField()
class Meta:
verbose_name = 'Video Comment'
verbose_name_plural = 'Video Comments'
def __str__(self):
return self.video.name + ' (' + str(self.created_by) + ', ' + str(self.creation_date) + ')'
def __unicode__(self):
return str(self.video) + ' (' + str(self.created_by) + ', ' + str(self.creation_date) + ')'
class Assignment_Submission(models.Model):
name = models.CharField(max_length=128)
group = models.ForeignKey(Group)
class Meta:
verbose_name = 'Submission'
verbose_name_plural = 'Submissions'
def __str__(self):
return str(self.group.name) + ':' + str(self.name)
def __unicode__(self):
return str(self.group) + ':' + str(self.name)
class Type(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
def __unicode__(self):
return str(self.name)
class User_Type(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
type = models.ForeignKey(Type)
def __str__(self):
return str(self.user.username) + ':' + str(self.type.name)
def __unicode__(self):
return str(self.user.username) + ':' + str(self.type.name)
class Submission_Interview_Map(models.Model):
submission = models.ForeignKey(Assignment_Submission, on_delete=models.CASCADE, )
interview = models.ForeignKey(Interview, on_delete=models.CASCADE, )
class Meta:
verbose_name = 'Interview Submission'
verbose_name_plural = 'Interview Submissions'
def __str__(self):
return str(self.submission.name) + ':' + str(self.interview.interviewee)
def __unicode__(self):
return str(self.submission) + ':' + str(self.interview)
|
{
"content_hash": "0475cab061d06ece180bb223d1e761ef",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 121,
"avg_line_length": 31.817460317460316,
"alnum_prop": 0.6482913444749314,
"repo_name": "foraliving/pilot",
"id": "04c15734eb1d02dcc1b467a30ecd4a0840bc381c",
"size": "12052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foraliving/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1049"
},
{
"name": "CSS",
"bytes": "215607"
},
{
"name": "HTML",
"bytes": "293009"
},
{
"name": "JavaScript",
"bytes": "636123"
},
{
"name": "Python",
"bytes": "176766"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from . import views
app_name = 'evening_program'
urlpatterns = [
url(r'^$', views.evening_program, name='index'),
url(r'^neu/$', views.ProgramCreate.as_view(success_url='/heimabende'), name='add'),
url(r'^(?P<slug>[\w-]+)/$', views.evening_program_details, name='details'),
]
|
{
"content_hash": "41ac61517079f2910013bfb74a96ff63",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 87,
"avg_line_length": 29.454545454545453,
"alnum_prop": 0.6512345679012346,
"repo_name": "n2o/dpb",
"id": "464948fd7acd3368096c99301521845c3b3810f7",
"size": "324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evening_program/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2711"
},
{
"name": "CSS",
"bytes": "66421"
},
{
"name": "Dockerfile",
"bytes": "383"
},
{
"name": "HTML",
"bytes": "126117"
},
{
"name": "Java",
"bytes": "1403"
},
{
"name": "JavaScript",
"bytes": "414684"
},
{
"name": "Lasso",
"bytes": "2327"
},
{
"name": "PHP",
"bytes": "195366"
},
{
"name": "Perl",
"bytes": "2800"
},
{
"name": "Python",
"bytes": "156836"
},
{
"name": "Ruby",
"bytes": "1276"
},
{
"name": "Shell",
"bytes": "236"
}
],
"symlink_target": ""
}
|
"""An abstract for a collection of key_range.KeyRange objects."""
from google.appengine.ext import key_range
from mapreduce import namespace_range
__all__ = [
"KeyRangesFactory",
"KeyRanges"]
# pylint: disable=g-bad-name
class KeyRangesFactory(object):
"""Factory for KeyRanges."""
@classmethod
def create_from_list(cls, list_of_key_ranges):
"""Create a KeyRanges object.
Args:
list_of_key_ranges: a list of key_range.KeyRange object.
Returns:
A _KeyRanges object.
"""
return _KeyRangesFromList(list_of_key_ranges)
@classmethod
def create_from_ns_range(cls, ns_range):
"""Create a KeyRanges object.
Args:
ns_range: a namespace_range.NameSpace Range object.
Returns:
A _KeyRanges object.
"""
return _KeyRangesFromNSRange(ns_range)
@classmethod
def from_json(cls, json):
"""Deserialize from json.
Args:
json: a dict of json compatible fields.
Returns:
a KeyRanges object.
Raises:
ValueError: if the json is invalid.
"""
if json["name"] in _KEYRANGES_CLASSES:
return _KEYRANGES_CLASSES[json["name"]].from_json(json)
raise ValueError("Invalid json %s", json)
class KeyRanges(object):
"""An abstraction for a collection of key_range.KeyRange objects."""
def __iter__(self):
return self
def next(self):
"""Iterator iteraface."""
raise NotImplementedError()
def to_json(self):
return {"name": self.__class__.__name__}
@classmethod
def from_json(cls):
raise NotImplementedError()
def __eq__(self):
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
class _KeyRangesFromList(KeyRanges):
"""Create KeyRanges from a list."""
def __init__(self, list_of_key_ranges):
self._key_ranges = list_of_key_ranges
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._key_ranges == other._key_ranges
def next(self):
if self._key_ranges:
return self._key_ranges.pop()
raise StopIteration()
def __str__(self):
if len(self._key_ranges) == 1:
return "Single KeyRange %s" % (self._key_ranges[0])
if self._key_ranges:
return "From %s to %s" % (self._key_ranges[0], self._key_ranges[-1])
return "Empty KeyRange."
def to_json(self):
json = super(_KeyRangesFromList, self).to_json()
json.update(
{"list_of_key_ranges": [kr.to_json() for kr in self._key_ranges]})
return json
@classmethod
def from_json(cls, json):
return cls(
[key_range.KeyRange.from_json(kr) for kr in json["list_of_key_ranges"]])
class _KeyRangesFromNSRange(KeyRanges):
"""Create KeyRanges from a namespace range."""
def __init__(self, ns_range):
"""Init."""
self._ns_range = ns_range
if self._ns_range is not None:
self._iter = iter(self._ns_range)
self._last_ns = None
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._ns_range == other._ns_range
def __str__(self):
return str(self._ns_range)
def next(self):
if self._ns_range is None:
raise StopIteration()
self._last_ns = self._iter.next()
current_ns_range = self._ns_range
if self._last_ns == self._ns_range.namespace_end:
self._ns_range = None
return key_range.KeyRange(namespace=self._last_ns,
_app=current_ns_range.app)
def to_json(self):
json = super(_KeyRangesFromNSRange, self).to_json()
ns_range = self._ns_range
if self._ns_range is not None and self._last_ns is not None:
ns_range = ns_range.with_start_after(self._last_ns)
if ns_range is not None:
json.update({"ns_range": ns_range.to_json_object()})
return json
@classmethod
def from_json(cls, json):
if "ns_range" in json:
return cls(
namespace_range.NamespaceRange.from_json_object(json["ns_range"]))
else:
return cls(None)
_KEYRANGES_CLASSES = {
_KeyRangesFromList.__name__: _KeyRangesFromList,
_KeyRangesFromNSRange.__name__: _KeyRangesFromNSRange
}
|
{
"content_hash": "b971d9528c93e552ff605eebdebf8d6f",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 80,
"avg_line_length": 24.323529411764707,
"alnum_prop": 0.6336154776299879,
"repo_name": "VirusTotal/appengine-mapreduce",
"id": "819c2bb9f222dadc8a2a08765a6de3c58ecd5095",
"size": "4753",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/src/mapreduce/key_ranges.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3112"
},
{
"name": "HTML",
"bytes": "11973"
},
{
"name": "Java",
"bytes": "1044331"
},
{
"name": "JavaScript",
"bytes": "58331"
},
{
"name": "Python",
"bytes": "1117602"
},
{
"name": "Shell",
"bytes": "2416"
}
],
"symlink_target": ""
}
|
import sys
import json
import urllib2
import argparse
from operator import itemgetter
from prettytable import PrettyTable
AWE_URL = 'https://awe.mg-rast.org'
MGP = {
'mgrast-prod-4.0.3': [
'qc_stats',
'adapter trim',
'preprocess',
'dereplication',
'screen',
'rna detection',
'rna clustering',
'rna sims blat',
'genecalling',
'aa filtering',
'aa clustering',
'aa sims blat',
'aa sims annotation',
'rna sims annotation',
'index sim seq',
'md5 abundance',
'lca abundance',
'source abundance',
'dark matter extraction',
'abundance cassandra load',
'done stage',
'notify job completion'
],
'inbox_action': [
'step 1',
'step 2',
'step 3'
],
'submission': [
'step 1'
],
'mgrast-submit-ebi': [
'step 1',
'step 2'
]
}
CGS = [
'mgrast_dbload',
'mgrast_single',
'mgrast_multi'
]
def max_pipeline():
lens = map(lambda x: len(x), MGP.values())
return max(lens)
def get_awe(url, token):
header = {'Accept': 'application/json', 'Authorization': 'mgrast '+token}
req = urllib2.Request(url, headers=header)
res = urllib2.urlopen(req)
obj = json.loads(res.read())
return obj['data']
def client_status(c):
if c['busy']:
return 'busy'
if c['online']:
return 'online'
if c['suspended']:
return 'suspended'
return 'unknown'
def job_error(e):
if e['apperror']:
parts = e['apperror'].split('\n')
trim = filter(lambda x: x.find('ERR') != -1, parts)
msg = "\n".join(trim)
if "".join(msg.split()) != "":
return msg
if e['worknotes']:
return e['worknotes']
if e['servernotes']:
return e['servernotes']
return 'unknown'
def main(args):
global AWE_URL
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='subcommands', help='sub-command help', dest='commands')
info_parser = subparsers.add_parser("info")
client_parser = subparsers.add_parser("client")
client_parser.add_argument("-a", "--awe_url", dest="awe_url", default=AWE_URL, help="AWE API url")
client_parser.add_argument("-t", "--token", dest="token", default=None, help="User token")
client_parser.add_argument("-c", "--clientgroup", dest="clientgroup", default=None, help="clientgroup to view")
client_parser.add_argument("-p", "--pipeline", dest="pipeline", default='mgrast-prod-4.0.3', help="pipeline to view")
pipeline_parser = subparsers.add_parser("pipeline")
pipeline_parser.add_argument("-a", "--awe_url", dest="awe_url", default=AWE_URL, help="AWE API url")
pipeline_parser.add_argument("-t", "--token", dest="token", default=None, help="User token")
pipeline_parser.add_argument("-p", "--pipeline", dest="pipeline", default='mgrast-prod-4.0.3', help="pipeline to view")
suspend_parser = subparsers.add_parser("suspend")
suspend_parser.add_argument("-a", "--awe_url", dest="awe_url", default=AWE_URL, help="AWE API url")
suspend_parser.add_argument("-t", "--token", dest="token", default=None, help="User token")
suspend_parser.add_argument("-p", "--pipeline", dest="pipeline", default='mgrast-prod-4.0.3', help="pipeline to view")
suspend_parser.add_argument("-s", "--stage", dest="stage", type=int, default=None, help="index of stage to view")
try:
args = parser.parse_args()
except Exception as e:
print "Error: %s"%(str(e))
parser.print_help()
return 1
if not args.commands:
print "No command provided"
parser.print_help()
return 1
if args.commands == "info":
ptp = PrettyTable()
ptp.add_column('task #', range(max_pipeline()))
for k, v in MGP.iteritems():
ptp.add_column("pipeline: "+k, v)
ptp.align = "l"
print ptp
ptc = PrettyTable()
ptc.add_column("clientgroups", CGS)
ptc.align = "l"
print ptc
return 0
if not args.token:
print "Missing required --token"
parser.print_help()
return 1
AWE_URL = args.awe_url
stages = MGP[args.pipeline]
if args.commands == "client":
if args.clientgroup not in CGS:
print "Invalid clientgroup"
parser.print_help()
return 1
clients = get_awe(AWE_URL+'/client?group='+args.clientgroup, args.token)
pt = PrettyTable(["name", "host", "status", "job", "stage"])
seen = set()
for i, s in enumerate(stages):
for c in clients:
if 'data' in c['current_work']:
for d in c['current_work']['data']:
parts = d.split('_')
if int(parts[1]) == i:
pt.add_row([c['name'], c['host_ip'], client_status(c), parts[0], s])
seen.add(c['name'])
for c in clients:
if c['name'] not in seen:
pt.add_row([c['name'], c['host_ip'], client_status(c), "", ""])
pt.align = "l"
print pt
if args.commands == "pipeline":
clients = get_awe(AWE_URL+'/client', args.token)
pt = PrettyTable(["task #", "stage name"]+CGS)
for i, s in enumerate(stages):
num = 0
row = [i, s]+[0 for _ in range(len(CGS))]
for c in clients:
if (c['group'] in CGS) and ('data' in c['current_work']):
for d in c['current_work']['data']:
parts = d.split('_')
if int(parts[1]) == i:
row[CGS.index(c['group'])+2] += 1
pt.add_row(row)
pt.align = "l"
print pt
if args.commands == "suspend":
jobs = get_awe("%s/job?query&state=suspend&info.pipeline=%s&limit=0"%(AWE_URL, args.pipeline), args.token)
if args.stage == None:
pt = PrettyTable(["task #", "stage name", "suspended"])
for i, s in enumerate(stages):
num = 0
row = [i, s, 0]
for j in jobs:
if j['error'] and j['error']['taskfailed']:
parts = j['error']['taskfailed'].split('_')
if int(parts[1]) == i:
row[2] += 1
pt.add_row(row)
pt.align = "l"
print pt
else:
pt = PrettyTable(["id", "job name", "mg ID", "error"])
for j in jobs:
if j['error'] and j['error']['taskfailed']:
parts = j['error']['taskfailed'].split('_')
if int(parts[1]) == args.stage:
pt.add_row([j['id'], j['info']['name'], j['info']['userattr']['id'], job_error(j['error'])])
pt.align = "l"
print pt
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
{
"content_hash": "a1e79faa22c480a3f32561c172c0ad7d",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 123,
"avg_line_length": 33.98571428571429,
"alnum_prop": 0.5182849936948297,
"repo_name": "MG-RAST/MG-RAST",
"id": "7bdf87912230e315fb0485edeb08ae673485dd40",
"size": "7160",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/MGRAST/bin/awe-debuger.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "63625"
},
{
"name": "Dockerfile",
"bytes": "2069"
},
{
"name": "JavaScript",
"bytes": "1012878"
},
{
"name": "Makefile",
"bytes": "13045"
},
{
"name": "PLpgSQL",
"bytes": "13438"
},
{
"name": "Perl",
"bytes": "3895418"
},
{
"name": "Python",
"bytes": "157242"
},
{
"name": "R",
"bytes": "72550"
},
{
"name": "Raku",
"bytes": "24593"
},
{
"name": "Shell",
"bytes": "7184"
},
{
"name": "TSQL",
"bytes": "3415"
}
],
"symlink_target": ""
}
|
from io import BytesIO
import json, base64
from .command import download
import logging
LOG = logging.getLogger(__name__)
# json.dumps and json.loads expect ascii unless the json was explicitly encoded as utf8,
# which we don't do anywhere in builder.
# <bytes>.decode and <string>.encode() both default to UTF-8
# so we must be careful to preserve the ascii encoding through it's transformations
def decode_bvars(string):
"""decodes a base64 encoded json-serialised string.
input string can be utf-8 or ascii but output is always ascii"""
val = base64.b64decode(string).decode('ascii') # bytes => ascii
return json.loads(val)
def encode_bvars(data):
"encodes python data to a json-serialised, base64 encoded ascii string"
val = base64.b64encode(json.dumps(data).encode('ascii'))
return val.decode('ascii') # bytes => ascii
def read_from_current_host():
"returns the buildvars from the CURRENTLY CONNECTED host"
strbuffer = BytesIO()
download('/etc/build-vars.json.b64', strbuffer)
return decode_bvars(strbuffer.getvalue())
|
{
"content_hash": "bd32143ee8d047951a28493b7f32ed59",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 88,
"avg_line_length": 38.32142857142857,
"alnum_prop": 0.7315936626281454,
"repo_name": "elifesciences/builder",
"id": "937983a5c4440cb2fb489005937c6d9e08cc701d",
"size": "1073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/buildercore/bvars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "1182"
},
{
"name": "Python",
"bytes": "735556"
},
{
"name": "Shell",
"bytes": "33921"
},
{
"name": "Smarty",
"bytes": "142"
},
{
"name": "VCL",
"bytes": "4406"
}
],
"symlink_target": ""
}
|
"""Metric losses base class."""
from typing import Any, Callable, Dict, Optional
import tensorflow as tf
from tensorflow_similarity.types import FloatTensor
from tensorflow_similarity.utils import is_tensor_or_variable
class MetricLoss(tf.keras.losses.Loss):
"""Wraps a loss function in the `Loss` class."""
def __init__(
self, fn: Callable, reduction: Callable = tf.keras.losses.Reduction.AUTO, name: Optional[str] = None, **kwargs
):
"""Initializes `LossFunctionWrapper` class.
Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super().__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true: FloatTensor, y_pred: FloatTensor) -> FloatTensor:
"""Invokes the `LossFunctionWrapper` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
loss: FloatTensor = self.fn(y_true, y_pred, y_true, y_pred, **self._fn_kwargs)
return loss
def get_config(self) -> Dict[str, Any]:
"""Contains the loss configuration.
Returns:
A Python dict containing the configuration of the loss.
"""
config = {}
for k, v in iter(self._fn_kwargs.items()):
if is_tensor_or_variable(v):
config[k] = tf.keras.backend.eval(v)
else:
config[k] = v
base_config = super().get_config()
return {**base_config, **config}
|
{
"content_hash": "11b0b5807676472688a60c72d20219b2",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 118,
"avg_line_length": 34.03636363636364,
"alnum_prop": 0.5913461538461539,
"repo_name": "tensorflow/similarity",
"id": "369cf71db9201c1bd7723420fe01ec98f7e4f93a",
"size": "2561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_similarity/losses/metric_loss.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "607742"
}
],
"symlink_target": ""
}
|
from . import RequestOptions
class Pager(object):
"""
Generator that takes an endpoint with `.get` and lazily loads items from Server.
Supports all `RequestOptions` including starting on any page.
"""
def __init__(self, endpoint, request_opts=None):
self._endpoint = endpoint.get
self._options = request_opts
# If we have options we could be starting on any page, backfill the count
if self._options:
self._count = ((self._options.pagenumber - 1) * self._options.pagesize)
else:
self._count = 0
def __iter__(self):
# Fetch the first page
current_item_list, last_pagination_item = self._endpoint(self._options)
# Get the rest on demand as a generator
while self._count < last_pagination_item.total_available:
if len(current_item_list) == 0:
current_item_list, last_pagination_item = self._load_next_page(last_pagination_item)
try:
yield current_item_list.pop(0)
self._count += 1
except IndexError:
# The total count on Server changed while fetching exit gracefully
raise StopIteration
def _load_next_page(self, last_pagination_item):
next_page = last_pagination_item.page_number + 1
opts = RequestOptions(pagenumber=next_page, pagesize=last_pagination_item.page_size)
if self._options is not None:
opts.sort, opts.filter = self._options.sort, self._options.filter
current_item_list, last_pagination_item = self._endpoint(opts)
return current_item_list, last_pagination_item
|
{
"content_hash": "18736a21cc5252f2050c2768723b42ee",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 100,
"avg_line_length": 38.93023255813954,
"alnum_prop": 0.6248506571087217,
"repo_name": "Talvalin/server-client-python",
"id": "eaad398afb68a32f0938b409af3b308798e720d7",
"size": "1674",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "tableauserverclient/server/pager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "210691"
},
{
"name": "Shell",
"bytes": "140"
}
],
"symlink_target": ""
}
|
import pygraphviz as pgv
import wx
class GraphImgPanel(wx.Panel):
def __init__(self, parent, size):
wx.Panel.__init__(self, parent = parent, size = size)
self.size = size
rootSizer = wx.BoxSizer(wx.HORIZONTAL)
verticalSizer = wx.BoxSizer(wx.VERTICAL)
rootSizer.AddStretchSpacer()
rootSizer.Add(verticalSizer, flag = wx.CENTER)
rootSizer.AddStretchSpacer()
verticalSizer.Add(self.newScaledImgBitmap("resources\\sysFiles\\graphFiles\\simple.png"), flag = wx.CENTER)
self.SetSizer(rootSizer)
rootSizer.SetDimension(0, 0, size[0], size[1])
def newScaledImg(self, non_relative_path):
image = wx.Image(name = non_relative_path) #"..\\..\\resources\\Textures\\DefaultBuilding.jpg"
return image.Scale(self.size[0], self.size[1]) if self.size != wx.DefaultSize else image
def newScaledImgBitmap(self, non_relative_path):
return wx.StaticBitmap(self, wx.ID_ANY, wx.BitmapFromImage(self.newScaledImg(non_relative_path)), size = self.size)
class MainFrame(wx.Frame):
def __init__(self, size = wx.DefaultSize):
print "size = ", size
wx.Frame.__init__(self, None, title='Test', size = size)
GraphImgPanel(self, size)
def initNode(G, id, img, label):
n = G.get_node(id)
n.attr["shape"]="box"
n.attr["image"] = img
# n.attr["imagepos"]= "tc"
n.attr["xlabel"] = label
# n.attr["labelloc"] = "b"
def addSubGraph(A, n):
A.add_edge(1 + n,2 + n)
A.add_edge(1 + n,4 + n)
A.add_edge(3+ n,5 +n)
A.add_edge(1 +n,3+n)
A=pgv.AGraph()
addSubGraph(A, 0)
addSubGraph(A, 5)
addSubGraph(A, 10)
for i in range(1,14):
initNode(A, id=str(i), img="resources\Textures\dweller.JPG",label="dweller")
print(A.string()) # print to screen
# B=pgv.AGraph('resources\sysFiles\graphFiles\simple.dot') # create a new graph from file
A.layout() # layout with default (neato)
A.draw('resources\sysFiles\graphFiles\simple.png') # draw png
print("Wrote simple.png")
app = wx.App(False)
screenDims = wx.GetDisplaySize()
frm = MainFrame(size=screenDims)
# frm.ShowFullScreen(True)
frm.Show()
app.MainLoop()
|
{
"content_hash": "67b48002ab5331efa3e9a67df0812336",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 123,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.6549165120593692,
"repo_name": "DarthThanatos/citySimNG",
"id": "b9b194fff643127ba556848895d230f2775994d0",
"size": "2203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "citySimNGView/extra/pygraphviz_simple.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "908"
},
{
"name": "Java",
"bytes": "352930"
},
{
"name": "Python",
"bytes": "454503"
}
],
"symlink_target": ""
}
|
from twisted.internet import protocol, threads, defer
from protocol import ProtobufProtocol, gateway_pb2, common_pb2, dump
from logger import logger
from user import User
from hero import Hero
import random
global instances
class Gateway(ProtobufProtocol):
_checksum = 0
_version_checked = False
_user = None
def __init__(self):
ProtobufProtocol.__init__(self)
self.messages = {
gateway_pb2.VERSION: self.handle_version,
gateway_pb2.LOGIN: self.handle_login,
gateway_pb2.LOGOUT: self.handle_logout,
gateway_pb2.INFO: self.handle_info,
gateway_pb2.PROPERTIES: self.handle_properties,
gateway_pb2.BADGES: self.handle_badges,
gateway_pb2.NICKNAME: self.handle_nickname,
gateway_pb2.MAKE_HERO: self.handle_make_hero,
gateway_pb2.SELECT_HERO: self.handle_select_hero,
gateway_pb2.HEROES: self.handle_heroes,
gateway_pb2.START_GAME: self.handle_start_game,
gateway_pb2.FINISH_GAME: self.handle_finish_game,
gateway_pb2.SECOND_TREASURE: self.handle_second_treasure,
gateway_pb2.FINISH_MULTI_GAME: self.handle_finish_multi_game,
gateway_pb2.BATTLE_SKIP: self.handle_battle_skip,
gateway_pb2.START_SURVIVAL_GAME: self.handle_start_survival_game,
gateway_pb2.FINISH_SURVIVAL_GAME: self.handle_finish_survival_game,
gateway_pb2.WAVE: self.handle_wave,
gateway_pb2.LEVEL_UP: self.handle_level_up,
gateway_pb2.RESURRECTION: self.handle_resurrection,
gateway_pb2.MAKE_PUBLIC_ROOM: self.handle_make_public_room,
gateway_pb2.MAKE_PRIVATE_ROOM: self.handle_make_private_room,
gateway_pb2.CHANGE_PUBLIC_ROOM: self.handle_change_public_room,
gateway_pb2.DROP_OUT: self.handle_drop_out,
gateway_pb2.CONFIRM_TO_DROP_OUT: self.handle_confirm_to_drop_out,
gateway_pb2.DUNGEONS: self.handle_dungeons,
gateway_pb2.EPIC_DUNGEONS: self.handle_epic_dungeons,
gateway_pb2.STAGES: self.handle_stages,
gateway_pb2.UNLOCK_STAGE: self.handle_unlock_stage,
gateway_pb2.RESET_STAGE: self.handle_reset_stage,
gateway_pb2.QUERY_STAGE: self.handle_query_stage,
gateway_pb2.GIFTS: self.handle_gifts,
gateway_pb2.TAKE_GIFT: self.handle_take_gift,
gateway_pb2.TUTORIAL: self.handle_tutorial,
gateway_pb2.ESHOP: self.handle_eshop,
gateway_pb2.BUY_IN_ESHOP: self.handle_buy_in_eshop,
gateway_pb2.CASH_SHOP: self.handle_cash_shop,
gateway_pb2.BUY_IN_CASH_SHOP: self.handle_buy_in_cash_shop,
gateway_pb2.ONI_SHOP: self.handle_oni_shop,
gateway_pb2.BUY_IN_ONI_SHOP: self.handle_buy_in_oni_shop,
gateway_pb2.COSTUMES: self.handle_costumes,
gateway_pb2.SELECT_COSTUME: self.handle_select_costume,
gateway_pb2.COSTUMES_TO_MAKE: self.handle_costumes_to_make,
gateway_pb2.BUY_COSTUME: self.handle_buy_costume,
gateway_pb2.MAKE_COSTUME: self.handle_make_costume,
gateway_pb2.COSTUMES_TO_REINFORCE: self.handle_costumes_to_reinforce,
gateway_pb2.REINFORCE_COSTUME: self.handle_reinforce_costume,
gateway_pb2.INVENTORY: self.handle_inventory,
gateway_pb2.DROP_ITEM: self.handle_drop_item,
gateway_pb2.PUT_ON: self.handle_put_on,
gateway_pb2.TAKE_OFF: self.handle_take_off,
gateway_pb2.FIX_ITEM: self.handle_fix_item,
gateway_pb2.REINFORCE_ITEM: self.handle_reinforce_item,
gateway_pb2.EXPAND_INVENTORY: self.handle_expand_inventory,
gateway_pb2.MAKE_ITEM: self.handle_make_item,
gateway_pb2.LOTTERYS: self.handle_lotterys,
gateway_pb2.TAKE_LOTTERY: self.handle_take_lottery,
gateway_pb2.SKILLS: self.handle_skills,
gateway_pb2.EXPAND_SKILL_BUTTON: self.handle_expand_skill_button,
gateway_pb2.SKILL_BUTTON: self.handle_skill_button,
gateway_pb2.REINFORCE_SKILL: self.handle_reinforce_skill,
gateway_pb2.SKILL_AUTO_ASSIGN: self.handle_skill_auto_assign,
gateway_pb2.RESET_SKILL: self.handle_reset_skill,
gateway_pb2.HEART: self.handle_heart,
gateway_pb2.BUDDIES: self.handle_buddies,
gateway_pb2.EXFRIEND: self.handle_exfriend,
gateway_pb2.SEND_HEART: self.handle_send_heart,
gateway_pb2.RECEIVE_HEART: self.handle_receive_heart,
gateway_pb2.RECEIVE_HEART_ALL: self.handle_receive_heart_all,
gateway_pb2.ASK_FRIENDSHIPS: self.handle_ask_friendships,
gateway_pb2.PROPOSE_BUDDY: self.handle_propose_buddy,
gateway_pb2.ACCEPT_FRIENDSHIP: self.handle_accept_friendship,
gateway_pb2.REJECT_FRIENDSHIP: self.handle_reject_friendship,
gateway_pb2.FIND_BUDDY: self.handle_find_buddy,
gateway_pb2.SEARCHABLE: self.handle_searchable,
gateway_pb2.RECOMMEND_FRIENDSHIPS: self.handle_recommend_friendships,
gateway_pb2.FRIEND_PROFILE: self.handle_friend_profile,
gateway_pb2.KAKAO_INVITATION: self.handle_kakao_invitation,
gateway_pb2.INVITED_KAKAO_FRIENDS: self.handle_invited_kakao_friends,
gateway_pb2.KAKAO_FRIENDS: self.handle_kakao_friends,
gateway_pb2.LINK_KAKAO_FRIENDS: self.handle_link_kakao_friends,
gateway_pb2.UNLINK_KAKAO_FRIENDS: self.handle_unlink_kakao_friends,
gateway_pb2.BUDDIES_TO_INVITE_GAME: self.handle_buddies_to_invite_game,
gateway_pb2.INVITE_BUDDY_TO_PLAY_GAME: self.handle_invite_buddy_to_play_game,
gateway_pb2.BE_INVITED_TO_PLAY_GAME: self.handle_be_invited_to_play_game,
gateway_pb2.ACCEPT_GAME_INVITATION: self.handle_accept_game_invitation,
gateway_pb2.DECLINE_GAME_INVITATION: self.handle_decline_game_invitation,
gateway_pb2.CANCEL_GAME_INVITATION: self.handle_cancel_game_invitation,
gateway_pb2.RANKING: self.handle_ranking,
gateway_pb2.RANKER: self.handle_ranker,
gateway_pb2.DAILYSTAMP: self.handle_dailystamp,
gateway_pb2.ASK_EXCHANGE_HEART: self.handle_ask_exchange_heart,
gateway_pb2.EXCHANGE_HEART: self.handle_exchange_heart,
gateway_pb2.ACHIVEMENT: self.handle_achivement,
gateway_pb2.ACHIVEMENT_REWARD: self.handle_achivement_reward,
gateway_pb2.MATERIAL_COOLTIME: self.handle_material_cooltime,
gateway_pb2.COLLECT_MATERIAL: self.handle_collect_material,
gateway_pb2.RESET_MATERIAL_COOLTIME: self.handle_reset_material_cooltime,
gateway_pb2.QUERY_PROMOTION: self.handle_query_promotion,
gateway_pb2.PROMOTER: self.handle_promoter,
gateway_pb2.PROMOTION_COUNT: self.handle_promotion_count,
gateway_pb2.COUPON: self.handle_coupon,
gateway_pb2.KEYWORD_COUPON: self.handle_keyword_coupon,
gateway_pb2.COUPON_HISTORY: self.handle_coupon_history,
gateway_pb2.KAKAO_OPTIONS: self.handle_kakao_options,
gateway_pb2.QUERY_KAKAO_OPTIONS: self.handle_query_kakao_options,
gateway_pb2.REVIEW: self.handle_review,
gateway_pb2.MENU_ACCESS: self.handle_menu_access,
gateway_pb2.NOTIFY_MESSAGE: self.handle_notify_message,
gateway_pb2.BADGES: self.handle_badges,
gateway_pb2.REFILL_SURVIVAL_CHALLENGE: self.handle_refill_survival_challenge,
gateway_pb2.SURVIVAL_BUFF: self.handle_survival_buff,
gateway_pb2.REVIVAL: self.handle_revival,
gateway_pb2.TEST_PARAM: self.handle_test_param,
}
param_type = gateway_pb2.Request().test_param
self.test_param_messages = {
param_type.SET_CASH: self.handle_set_cash,
param_type.SET_HONBUL: self.handle_set_honbul,
param_type.SET_SKILL_POINT: self.handle_set_skill_point,
param_type.SET_LEVEL: self.handle_set_level,
param_type.SET_TALISMAN: self.handle_set_talisman,
param_type.SET_STONE: self.handle_set_stone,
param_type.SET_COIN: self.handle_set_coin,
param_type.SET_HEART: self.handle_set_heart,
param_type.ADD_ITEM: self.handle_add_item,
param_type.GET_USER_ID: self.handle_get_user_id,
param_type.SET_EXP: self.handle_set_exp,
param_type.SET_PLAYING_TIME: self.handle_set_playing_time,
param_type.SET_UNLOCK_STAGE_COUNT: self.handle_set_unlock_stage_count,
}
def connectionMade(self):
logger.debug("connectionMade")
def connectionLost(self, reason):
logger.debug("connectionLost")
if self._user:
del self._user
self._user = None
def _send(self, msg):
self._checksum = random.randint(1, 999999)
msg.checksum = self._checksum
# dump
dump.dump_response(msg)
# 데이터 전송
ProtobufProtocol.send(self, msg)
def send(self, msg):
def _success(result):
logger.debug("Send async success")
def _failure(result):
logger.error("Send async failed")
d = self.make_defer(_success, _failure)
d.callback(self._send(msg))
def process(self, message):
request = gateway_pb2.Request()
request.ParseFromString(message)
self._checksum = request.checksum
# dump
dump.dump_request(request)
# handle request
handler = self.messages.get(request.type, None)
if not handler:
logger.error("unknown message")
else:
if request.type > gateway_pb2.VERSION and self._version_checked is False:
return self.error(self.make_response(request), gateway_pb2.EC_VERSION, 'Need version check')
handler(request)
def validate_request(self, request):
pass
def error(self, response, code, reason):
response.error.code = code
response.error.reason = reason
self.send(response)
def make_response(self, request):
response = gateway_pb2.Response()
response.type = request.type
response.sequence = request.sequence
return response
def make_defer(self, success_callback, failure_callback):
d = defer.Deferred()
d.addCallback(success_callback)
d.addErrback(failure_callback)
return d
#----------------------------------------------------------------#
def handle_version(self, request):
response = self.make_response(request)
if not request.version:
return self.error(response, gateway_pb2.EC_VERSION, 'Invalid version request')
version = gateway_pb2.Request.Version()
if request.version.protocol != version.protocol:
return self.error(response, gateway_pb2.EC_VERSION, 'Protocol number is not match')
if request.version.service != '0.1-test':
return self.error(response, gateway_pb2.EC_VERSION, 'Service is not match')
self._version_checked = True
self.send(response)
def handle_login(self, request):
response = self.make_response(request)
if not request.login:
return self.error(response, gateway_pb2.EC_UNABLE_TO_OPERATE, 'Invalid login request')
game_id = request.login.game_id
hashed_kakao_id = ''
kakao_id = -1
if request.login.kakao_id:
kakao_id = request.login.kakao_id
if request.login.hashed_kakao_id:
hashed_kakao_id = request.login.hashed_kakao_id
def _success(user):
logger.info('%d user login' % user.user_id())
self._user = user
response.login.plug_ip = ''
response.login.plug_port = 5001
response.login.passwd = ''
self.send(response)
def _failure(ex):
code, reason = ex.value
return self.error(response, code, reason)
d = self.make_defer(_success, _failure)
d.callback(User.get_user_id(game_id, hashed_kakao_id, kakao_id))
def handle_logout(self, request):
if self.transport:
self.transport.loseConnection()
def handle_info(self, request):
assert(self._user)
response = self.make_response(request)
def _success(ignore):
self.send(response)
def _failure(ex):
code, reason = ex.value
return self.error(response, code, reason)
d = self.make_defer(_success, _failure)
d.callback(self._user.fill_info(response.info))
def handle_properties(self, request):
assert(self._user)
response = self.make_response(request)
def _success(ignore):
self.send(response)
def _failure(ex):
code, reason = ex.value
return self.error(response, code, reason)
d = self.make_defer(_success, _failure)
d.callback(self._user.fill_properties(response.properties))
def handle_badges(self, request):
assert(self._user)
response = self.make_response(request)
def _success(ignore):
self.send(response)
def _failure(ex):
code, reason = ex.value
return self.error(response, code, reason)
d = self.make_defer(_success, _failure)
d.callback(self._user.fill_badges(response.badges))
def handle_nickname(self, request):
assert(self._user)
response = self.make_response(request)
if not request.nickname:
return self.error(response, gateway_pb2.EC_UNABLE_TO_OPERATE, 'Invalid request')
if self._user.has_nickname():
return self.error(response, gateway_pb2.EC_UNABLE_TO_OPERATE, 'Already has nickname')
def _success(ignore):
self.send(response)
def _failure(ex):
code, reason = ex.value
return self.error(response, code, reason)
d = self.make_defer(_success, _failure)
d.callback(self._user.change_nickname(request.nickname))
def handle_make_hero(self, request):
assert(self._user)
response = self.make_response(request)
if not request.make_hero:
return self.error(response, gateway_pb2.EC_UNABLE_TO_OPERATE, 'Invalid request')
job = request.make_hero.job
if self._user.has_hero(job):
return self.error(response, gateway_pb2.EC_UNABLE_TO_OPERATE, 'Already opened hero')
def _success(hero):
hero.fill_hero(response.make_hero)
self.send(response)
def _failure(ex):
code, value = ex.value
return self.error(response, code, value)
d = self.make_defer(_success, _failure)
d.callback(self._user.make_hero(job))
def handle_select_hero(self, request):
assert(self._user)
response = self.make_response(request)
if not request.select_hero:
return self.error(response, gateway_pb2.EC_UNABLE_TO_OPERATE, 'Invalid request')
job = request.select_hero.job
if not self._user.has_hero(job):
return self.error(response, gateway_pb2.EC_UNABLE_TO_OPERATE, 'Dont have hero by job')
def _success(hero):
hero.fill_hero(response.select_hero)
self.send(response)
def _failure(ex):
code, value = ex.value
return self.error(response, code, value)
d = self.make_defer(_success, _failure)
d.callback(self._user.select_hero(job))
def handle_heroes(self, request):
assert(self._user)
response = self.make_response(request)
def _success(ignore):
self.send(response)
def _failure(ex):
code, value = ex.value
return self.error(response, code, value)
d = self.make_defer(_success, _failure)
d.callback(self._user.fill_heroes(response))
def handle_start_game(self, request):
assert(self._user)
def handle_finish_game(self, request):
assert(self._user)
def handle_second_treasure(self, request):
assert(self._user)
def handle_finish_multi_game(self, request):
assert(self._user)
def handle_battle_skip(self, request):
assert(self._user)
def handle_start_survival_game(self, request):
assert(self._user)
def handle_finish_survival_game(self, request):
assert(self._user)
def handle_wave(self, request):
assert(self._user)
def handle_level_up(self, request):
assert(self._user)
def handle_resurrection(self, request):
assert(self._user)
def handle_make_public_room(self, request):
assert(self._user)
def handle_make_private_room(self, request):
assert(self._user)
def handle_change_public_room(self, request):
assert(self._user)
def handle_drop_out(self, request):
assert(self._user)
def handle_confirm_to_drop_out(self, request):
assert(self._user)
def handle_dungeons(self, request):
assert(self._user)
response = self.make_response(request)
def _success(ignore):
self.send(response)
def _failure(ex):
code, reason = ex.value
return self.error(response, code, reason)
d = self.make_defer(_success, _failure)
d.callback(self._user.fill_dungeons(response.dungeons))
def handle_epic_dungeons(self, request):
assert(self._user)
response = self.make_response(request)
def _success(ignore):
self.send(response)
def _failure(ex):
code, reason = ex.value
return self.error(response, code, reason)
d = self.make_defer(_success, _failure)
d.callback(self._user.fill_epic_dungeons(response.epic_dungeons))
def handle_stages(self, request):
assert(self._user)
def handle_unlock_stage(self, request):
assert(self._user)
def handle_reset_stage(self, request):
assert(self._user)
def handle_query_stage(self, request):
assert(self._user)
def handle_gifts(self, request):
assert(self._user)
def handle_take_gift(self, request):
assert(self._user)
def handle_tutorial(self, request):
assert(self._user)
def handle_eshop(self, request):
assert(self._user)
def handle_buy_in_eshop(self, request):
assert(self._user)
def handle_cash_shop(self, request):
assert(self._user)
def handle_buy_in_cash_shop(self, request):
assert(self._user)
def handle_oni_shop(self, request):
assert(self._user)
response = self.make_response(request)
selected_hero = self._user.selected_hero()
if not selected_hero:
return self.error(response, gateway_pb2.EC_NO_HERO, 'No hero')
def _success(result):
self.send(response)
def _failure(result):
return self.error(response, gateway_pb2.EC_DATABASE, 'Database execute failed')
method = request.oni_shop.method
category = request.oni_shop.category
d = self.make_defer(_success, _failure)
if method == common_pb2.ONI_SHOP_METHOD_RESET_SKILL:
# 스킬초기화 구매정보
d.callback(self._user.fill_reset_skill(response.oni_shop.reset_skill))
else:
# 혼불/캐쉬 구매정보
d.callback(self._user.fill_oni_shop(method, category, response.oni_shop))
def handle_buy_in_oni_shop(self, request):
assert(self._user)
def handle_costumes(self, request):
assert(self._user)
def handle_select_costume(self, request):
assert(self._user)
def handle_costumes_to_make(self, request):
assert(self._user)
def handle_buy_costume(self, request):
assert(self._user)
def handle_make_costume(self, request):
assert(self._user)
def handle_costumes_to_reinforce(self, request):
assert(self._user)
def handle_reinforce_costume(self, request):
assert(self._user)
def handle_inventory(self, request):
assert(self._user)
def handle_drop_item(self, request):
assert(self._user)
def handle_put_on(self, request):
assert(self._user)
def handle_take_off(self, request):
assert(self._user)
def handle_fix_item(self, request):
assert(self._user)
def handle_reinforce_item(self, request):
assert(self._user)
def handle_expand_inventory(self, request):
assert(self._user)
def handle_make_item(self, request):
assert(self._user)
def handle_lotterys(self, request):
assert(self._user)
def handle_take_lottery(self, request):
assert(self._user)
def handle_skills(self, request):
assert(self._user)
def handle_expand_skill_button(self, request):
assert(self._user)
def handle_skill_button(self, request):
assert(self._user)
def handle_reinforce_skill(self, request):
assert(self._user)
def handle_skill_auto_assign(self, request):
assert(self._user)
def handle_reset_skill(self, request):
assert(self._user)
def handle_heart(self, request):
assert(self._user)
def handle_send_heart(self, request):
assert(self._user)
def handle_receive_heart(self, request):
assert(self._user)
def handle_receive_heart_all(self, request):
assert(self._user)
def handle_buddies(self, request):
assert(self._user)
def handle_exfriend(self, request):
assert(self._user)
def handle_ask_friendships(self, request):
assert(self._user)
def handle_propose_buddy(self, request):
assert(self._user)
def handle_accept_friendship(self, request):
assert(self._user)
def handle_reject_friendship(self, request):
assert(self._user)
def handle_find_buddy(self, request):
assert(self._user)
def handle_searchable(self, request):
assert(self._user)
def handle_recommend_friendships(self, request):
assert(self._user)
def handle_friend_profile(self, request):
assert(self._user)
def handle_kakao_invitation(self, request):
assert(self._user)
def handle_invited_kakao_friends(self, request):
assert(self._user)
def handle_kakao_friends(self, request):
assert(self._user)
def handle_link_kakao_friends(self, request):
assert(self._user)
def handle_unlink_kakao_friends(self, request):
assert(self._user)
def handle_buddies_to_invite_game(self, request):
assert(self._user)
def handle_invite_buddy_to_play_game(self, request):
assert(self._user)
def handle_be_invited_to_play_game(self, request):
assert(self._user)
def handle_accept_game_invitation(self, request):
assert(self._user)
def handle_decline_game_invitation(self, request):
assert(self._user)
def handle_cancel_game_invitation(self, request):
assert(self._user)
def handle_ranking(self, request):
assert(self._user)
def handle_ranker(self, request):
assert(self._user)
def handle_dailystamp(self, request):
assert(self._user)
def handle_ask_exchange_heart(self, request):
assert(self._user)
def handle_exchange_heart(self, request):
assert(self._user)
def handle_achivement(self, request):
assert(self._user)
def handle_achivement_reward(self, request):
assert(self._user)
def handle_collect_material(self, request):
assert(self._user)
response = self.make_response(request)
def _success(ignore):
self.send(response)
def _failure(ex):
code, reason = ex.value
return self.error(response, code, reason)
material_id = request.collect_material.material_id
d = self.make_defer(_success, _failure)
d.callback(self._user.collect_material(material_id, response.collect_material))
def handle_material_cooltime(self, request):
assert(self._user)
response = self.make_response(request)
def _success(ignore):
self.send(response)
def _failure(ex):
code, reason = ex.value
return self.error(response, code, reason)
d = self.make_defer(_success, _failure)
d.callback(self._user.material_cooltime(response.material_cooltime))
def handle_reset_material_cooltime(self, request):
assert(self._user)
def handle_query_promotion(self, request):
assert(self._user)
def handle_promoter(self, request):
assert(self._user)
def handle_promotion_count(self, request):
assert(self._user)
def handle_coupon(self, request):
assert(self._user)
def handle_keyword_coupon(self, request):
assert(self._user)
def handle_coupon_history(self, request):
assert(self._user)
def handle_kakao_options(self, request):
assert(self._user)
def handle_query_kakao_options(self, request):
assert(self._user)
def handle_review(self, request):
assert(self._user)
def handle_menu_access(self, request):
assert(self._user)
def handle_notify_message(self, request):
assert(self._user)
def handle_refill_survival_challenge(self, request):
assert(self._user)
def handle_survival_buff(self, request):
assert(self._user)
response = self.make_response(request)
def _success(result):
self.send(response)
def _failure(result):
return self.error(response, gateway_pb2.EC_DATABASE, 'Database execute failed')
d = self.make_defer(_success, _failure)
d.callback(self._user.fill_survival_buff(response.survival_buff))
def handle_revival(self, request):
assert(self._user)
response = self.make_response(request)
def _success(result):
self.send(response)
def _failure(result):
return self.error(response, gateway_pb2.EC_DATABASE, 'Database execute failed')
d = self.make_defer(_success, _failure)
#----------------------------------------------------------------#
def handle_test_param(self, request):
assert(self._user)
response = self.make_response(request)
for param in request.test_param.params:
handler = self.test_param_messages.get(param.type, None)
if not handler:
logger.error("unknown message")
else:
handler(param, response)
self.send(response)
def handle_set_cash(self, param, response):
assert(self._user)
self._user.set_cash(param.int_data)
def handle_set_honbul(self, param, response):
assert(self._user)
self._user.set_honbul(param.int_data)
def handle_set_skill_point(self, param, response):
assert(self._user)
selected_hero = self._user.selected_hero()
selected_hero.set_skill_point(param.int_data)
def handle_set_level(self, param, response):
assert(self._user)
selected_hero = self._user.selected_hero()
selected_hero.set_level(param.int_data)
def handle_set_talisman(self, param, response):
assert(self._user)
self._user.set_talisman(param.int_data)
def handle_set_stone(self, param, response):
assert(self._user)
self._user.set_stone(param.int_data)
def handle_set_coin(self, param, response):
assert(self._user)
self._user.set_coin(param.int_data)
def handle_set_heart(self, param, response):
assert(self._user)
self._user.set_heart(param.int_data)
def handle_add_item(self, param, response):
assert(self._user)
item_id = self._user.add_item(param.int_data)
response.sequence = item_id
def handle_get_user_id(self, param, response):
assert(self._user)
response.sequence = self._user.user_id()
def handle_set_exp(self, param, response):
assert(self._user)
selected_hero = self._user.selected_hero()
selected_hero.set_exp(param.int_data)
def handle_set_playing_time(self, param, response):
assert(self._user)
selected_hero = self._user.selected_hero()
selected_hero.set_playing_time(param.int_data)
def handle_set_unlock_stage_count(self, param, response):
assert(self._user)
selected_hero = self._user.selected_hero()
selected_hero.set_unlock_stage_count(0)
#----------------------------------------------------------------#
class GatewayFactory(protocol.Factory):
def buildProtocol(self, addr):
logger.debug("New client is connected : %s" % addr)
return Gateway()
#----------------------------------------------------------------#
|
{
"content_hash": "82b53e742e4154082bfa905e4adaf2fe",
"timestamp": "",
"source": "github",
"line_count": 1011,
"max_line_length": 100,
"avg_line_length": 26.512363996043522,
"alnum_prop": 0.6797492911505746,
"repo_name": "zzragida/PythonExamples",
"id": "107706a9e05b3e55b268754723c65a9c93f242ed",
"size": "26872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ServerPython/gateway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "830"
},
{
"name": "C#",
"bytes": "117476"
},
{
"name": "CSS",
"bytes": "104800"
},
{
"name": "Groff",
"bytes": "3853"
},
{
"name": "HTML",
"bytes": "265244"
},
{
"name": "JavaScript",
"bytes": "745160"
},
{
"name": "Makefile",
"bytes": "1884"
},
{
"name": "Protocol Buffer",
"bytes": "16964"
},
{
"name": "Python",
"bytes": "808982"
},
{
"name": "Shell",
"bytes": "53006"
}
],
"symlink_target": ""
}
|
"""
Fiona is OGR's neat, nimble, no-nonsense API.
Fiona provides a minimal, uncomplicated Python interface to the open
source GIS community's most trusted geodata access library and
integrates readily with other Python GIS packages such as pyproj, Rtree
and Shapely.
How minimal? Fiona can read features as mappings from shapefiles or
other GIS vector formats and write mappings as features to files using
the same formats. That's all. There aren't any feature or geometry
classes. Features and their geometries are just data.
A Fiona feature is a Python mapping inspired by the GeoJSON format. It
has `id`, 'geometry`, and `properties` keys. The value of `id` is
a string identifier unique within the feature's parent collection. The
`geometry` is another mapping with `type` and `coordinates` keys. The
`properties` of a feature is another mapping corresponding to its
attribute table. For example:
{'id': '1',
'geometry': {'type': 'Point', 'coordinates': (0.0, 0.0)},
'properties': {'label': u'Null Island'} }
is a Fiona feature with a point geometry and one property.
Features are read and written using objects returned by the
``collection`` function. These ``Collection`` objects are a lot like
Python ``file`` objects. A ``Collection`` opened in reading mode serves
as an iterator over features. One opened in a writing mode provides
a ``write`` method.
Usage
-----
Here's an example of reading a select few polygon features from
a shapefile and for each, picking off the first vertex of the exterior
ring of the polygon and using that as the point geometry for a new
feature writing to a "points.shp" file.
>>> import fiona
>>> with fiona.open('docs/data/test_uk.shp', 'r') as inp:
... output_schema = inp.schema.copy()
... output_schema['geometry'] = 'Point'
... with collection(
... "points.shp", "w",
... crs=inp.crs,
... driver="ESRI Shapefile",
... schema=output_schema
... ) as out:
... for f in inp.filter(
... bbox=(-5.0, 55.0, 0.0, 60.0)
... ):
... value = f['geometry']['coordinates'][0][0]
... f['geometry'] = {
... 'type': 'Point', 'coordinates': value}
... out.write(f)
Because Fiona collections are context managers, they are closed and (in
writing modes) flush contents to disk when their ``with`` blocks end.
"""
__all__ = ['bounds', 'listlayers', 'open', 'prop_type', 'prop_width']
__version__ = "1.4.8"
import logging
import os
from six import string_types
from fiona.collection import Collection, supported_drivers, vsi_path
from fiona._drivers import driver_count, GDALEnv
from fiona.odict import OrderedDict
from fiona.ogrext import _bounds, _listlayers, FIELD_TYPES_MAP
log = logging.getLogger('Fiona')
class NullHandler(logging.Handler):
def emit(self, record):
pass
log.addHandler(NullHandler())
def open(
path,
mode='r',
driver=None,
schema=None,
crs=None,
encoding=None,
layer=None,
vfs=None ):
"""Open file at ``path`` in ``mode`` "r" (read), "a" (append), or
"w" (write) and return a ``Collection`` object.
In write mode, a driver name such as "ESRI Shapefile" or "GPX" (see
OGR docs or ``ogr2ogr --help`` on the command line) and a schema
mapping such as:
{'geometry': 'Point',
'properties': [('class', 'int'), ('label', 'str'),
('value', 'float')]}
must be provided. If a particular ordering of properties ("fields"
in GIS parlance) in the written file is desired, a list of (key,
value) pairs as above or an ordered dict is required. If no ordering
is needed, a standard dict will suffice.
A coordinate reference system for collections in write mode can be
defined by the ``crs`` parameter. It takes Proj4 style mappings like
{'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
'no_defs': True}
The drivers used by Fiona will try to detect the encoding of data
files. If they fail, you may provide the proper ``encoding``, such
as 'Windows-1252' for the Natural Earth datasets.
When the provided path is to a file containing multiple named layers
of data, a layer can be singled out by ``layer``.
A virtual filesystem can be specified. The ``vfs`` parameter may be
an Apache Commons VFS style string beginning with "zip://" or
"tar://"". In this case, the ``path`` must be an absolute path
within that container.
"""
# Parse the vfs into a vsi and an archive path.
path, vsi, archive = parse_paths(path, vfs)
if mode in ('a', 'r'):
if archive:
if not os.path.exists(archive):
raise IOError("no such archive file: %r" % archive)
elif path != '-' and not os.path.exists(path):
raise IOError("no such file or directory: %r" % path)
c = Collection(path, mode,
encoding=encoding, layer=layer, vsi=vsi, archive=archive)
elif mode == 'w':
if schema:
# Make an ordered dict of schema properties.
this_schema = schema.copy()
this_schema['properties'] = OrderedDict(schema['properties'])
else:
this_schema = None
c = Collection(path, mode,
crs=crs, driver=driver, schema=this_schema,
encoding=encoding, layer=layer, vsi=vsi, archive=archive)
else:
raise ValueError(
"mode string must be one of 'r', 'w', or 'a', not %s" % mode)
return c
collection = open
def listlayers(path, vfs=None):
"""Returns a list of layer names in their index order.
The required ``path`` argument may be an absolute or relative file or
directory path.
A virtual filesystem can be specified. The ``vfs`` parameter may be
an Apache Commons VFS style string beginning with "zip://" or
"tar://"". In this case, the ``path`` must be an absolute path within
that container.
"""
if not isinstance(path, string_types):
raise TypeError("invalid path: %r" % path)
if vfs and not isinstance(vfs, string_types):
raise TypeError("invalid vfs: %r" % vfs)
path, vsi, archive = parse_paths(path, vfs)
if archive:
if not os.path.exists(archive):
raise IOError("no such archive file: %r" % archive)
elif not os.path.exists(path):
raise IOError("no such file or directory: %r" % path)
with drivers():
return _listlayers(vsi_path(path, vsi, archive))
def parse_paths(path, vfs=None):
archive = vsi = None
if vfs:
parts = vfs.split("://")
vsi = parts.pop(0) if parts else None
archive = parts.pop(0) if parts else None
else:
parts = path.split("://")
path = parts.pop() if parts else None
vsi = parts.pop() if parts else None
return path, vsi, archive
def prop_width(val):
"""Returns the width of a str type property.
Undefined for non-str properties. Example:
>>> prop_width('str:25')
25
>>> prop_width('str')
80
"""
if val.startswith('str'):
return int((val.split(":")[1:] or ["80"])[0])
return None
def prop_type(text):
"""Returns a schema property's proper Python type.
Example:
>>> prop_type('int')
<class 'int'>
>>> prop_type('str:25')
<class 'str'>
"""
key = text.split(':')[0]
return FIELD_TYPES_MAP[key]
def drivers(*args, **kwargs):
"""Returns a context manager with registered drivers."""
if driver_count == 0:
log.debug("Creating a chief GDALEnv in drivers()")
return GDALEnv(True, **kwargs)
else:
log.debug("Creating a not-responsible GDALEnv in drivers()")
return GDALEnv(False, **kwargs)
def bounds(ob):
"""Returns a (minx, miny, maxx, maxy) bounding box.
The ``ob`` may be a feature record or geometry."""
geom = ob.get('geometry') or ob
return _bounds(geom)
|
{
"content_hash": "d8536feb834173402bcee0ad03f48012",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 73,
"avg_line_length": 34.567796610169495,
"alnum_prop": 0.6211081147340034,
"repo_name": "johanvdw/Fiona",
"id": "2bd255273f7c78743b197276ef2f4f9e0aae3c1b",
"size": "8183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fiona/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "258319"
}
],
"symlink_target": ""
}
|
from datetime import date, timedelta, datetime
from dateutil import rrule
def week_start_date(year, week):
"""
Taken from http://stackoverflow.com/a/1287862/1064619
"""
d = date(year, 1, 1)
delta_days = d.isoweekday() - 1
delta_weeks = week
if year == d.isocalendar()[0]:
delta_weeks -= 1
delta = timedelta(days=-delta_days, weeks=delta_weeks)
return d + delta
def fill_date_index_blanks(index):
index_list = index.tolist()
start_date = datetime.strptime(index_list[0][:10], '%Y-%m-%d')
end_date = datetime.strptime(index_list[-1][:10], '%Y-%m-%d')
num_weeks = rrule.rrule(rrule.WEEKLY,
dtstart=start_date,
until=end_date).count()
new_index = [week_start_date((start_date + timedelta(weeks=i)).isocalendar()[0],
(start_date + timedelta(weeks=i)).isocalendar()[1]).strftime('%Y-%m-%d') for i in range(0, num_weeks)]
return new_index
|
{
"content_hash": "e49e35f5e9662f11cef4aa36e1c7ab80",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 135,
"avg_line_length": 31.1875,
"alnum_prop": 0.5861723446893787,
"repo_name": "worldofchris/jlf",
"id": "0a6139fe83b7881a329b490e5fc8902dd88c437b",
"size": "998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jlf_stats/index.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "173096"
}
],
"symlink_target": ""
}
|
from kao_decorators import lazy_property
from importlib import import_module
class KaoModule:
""" Represents a Python module """
def __init__(self, name):
""" Initialize the module with the name of the module it represents """
self.name = name
@property
def filename(self):
""" Return the filename of this module of the filesystem """
return self.module.__file__
@lazy_property
def module(self):
""" Represents the actual module underlying this module """
return self._import(self.name)
def importModule(self, *names):
""" Import the names given from this module and return
If none, then import and return this module """
modules = [self._import(self.join(name)) for name in names]
if len(modules) == 0:
return self.module
elif len(modules) == 1:
return modules[0]
else:
return modules
def _import(self, name):
""" Import the given name """
return import_module(name)
def join(self, childName):
""" REturn the module name of a child with the given name """
return "{0}.{1}".format(self.name, childName)
|
{
"content_hash": "8d6093f3a7a116423af750cee09a1787",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 32.65,
"alnum_prop": 0.5620214395099541,
"repo_name": "cloew/KaoModules",
"id": "5a9cb8b6bd0a21d364c0fd63e3f5349189728187",
"size": "1306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kao_modules/kao_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2238"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
import textwrap
from . import D2to1TestCase
from .util import open_config
class TestHooks(D2to1TestCase):
def setup(self):
super(TestHooks, self).setup()
with open_config(os.path.join(self.package_dir, 'setup.cfg')) as cfg:
cfg.set('global', 'setup-hooks',
'd2to1_testpackage._setup_hooks.test_hook_1\n'
'd2to1_testpackage._setup_hooks.test_hook_2')
cfg.set('build_ext', 'pre-hook.test_pre_hook',
'd2to1_testpackage._setup_hooks.test_pre_hook')
cfg.set('build_ext', 'post-hook.test_post_hook',
'd2to1_testpackage._setup_hooks.test_post_hook')
def test_global_setup_hooks(self):
"""
Test that setup_hooks listed in the [global] section of setup.cfg are
executed in order.
"""
stdout, _, return_code = self.run_setup('egg_info')
assert 'test_hook_1\ntest_hook_2' in stdout
assert return_code == 0
def test_command_hooks(self):
"""
Simple test that the appropriate command hooks run at the
beginning/end of the appropriate command.
"""
stdout, _, return_code = self.run_setup('egg_info')
assert 'build_ext pre-hook' not in stdout
assert 'build_ext post-hook' not in stdout
assert return_code == 0
stdout, _, return_code = self.run_setup('build_ext')
assert textwrap.dedent("""
running build_ext
running pre_hook d2to1_testpackage._setup_hooks.test_pre_hook for command build_ext
build_ext pre-hook
""") in stdout
assert stdout.endswith('build_ext post-hook')
assert return_code == 0
|
{
"content_hash": "f6b45ceaf7f6b3360f0af6658e4af130",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 95,
"avg_line_length": 34.21153846153846,
"alnum_prop": 0.599213041034289,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "047e0b90942dba6126ae9239f85a5d8545024e64",
"size": "1779",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/d2to1/d2to1/tests/test_hooks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
}
|
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import impala._thrift_gen.beeswax.BeeswaxService
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface(impala._thrift_gen.beeswax.BeeswaxService.Iface):
def Cancel(self, query_id):
"""
Parameters:
- query_id
"""
pass
def ResetCatalog(self):
pass
def ResetTable(self, request):
"""
Parameters:
- request
"""
pass
def GetRuntimeProfile(self, query_id):
"""
Parameters:
- query_id
"""
pass
def CloseInsert(self, handle):
"""
Parameters:
- handle
"""
pass
def PingImpalaService(self):
pass
def GetExecSummary(self, handle):
"""
Parameters:
- handle
"""
pass
class Client(impala._thrift_gen.beeswax.BeeswaxService.Client, Iface):
def __init__(self, iprot, oprot=None):
impala._thrift_gen.beeswax.BeeswaxService.Client.__init__(self, iprot, oprot)
def Cancel(self, query_id):
"""
Parameters:
- query_id
"""
self.send_Cancel(query_id)
return self.recv_Cancel()
def send_Cancel(self, query_id):
self._oprot.writeMessageBegin('Cancel', TMessageType.CALL, self._seqid)
args = Cancel_args()
args.query_id = query_id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_Cancel(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = Cancel_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.error is not None:
raise result.error
raise TApplicationException(TApplicationException.MISSING_RESULT, "Cancel failed: unknown result");
def ResetCatalog(self):
self.send_ResetCatalog()
return self.recv_ResetCatalog()
def send_ResetCatalog(self):
self._oprot.writeMessageBegin('ResetCatalog', TMessageType.CALL, self._seqid)
args = ResetCatalog_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ResetCatalog(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = ResetCatalog_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "ResetCatalog failed: unknown result");
def ResetTable(self, request):
"""
Parameters:
- request
"""
self.send_ResetTable(request)
return self.recv_ResetTable()
def send_ResetTable(self, request):
self._oprot.writeMessageBegin('ResetTable', TMessageType.CALL, self._seqid)
args = ResetTable_args()
args.request = request
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ResetTable(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = ResetTable_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "ResetTable failed: unknown result");
def GetRuntimeProfile(self, query_id):
"""
Parameters:
- query_id
"""
self.send_GetRuntimeProfile(query_id)
return self.recv_GetRuntimeProfile()
def send_GetRuntimeProfile(self, query_id):
self._oprot.writeMessageBegin('GetRuntimeProfile', TMessageType.CALL, self._seqid)
args = GetRuntimeProfile_args()
args.query_id = query_id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_GetRuntimeProfile(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = GetRuntimeProfile_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.error is not None:
raise result.error
raise TApplicationException(TApplicationException.MISSING_RESULT, "GetRuntimeProfile failed: unknown result");
def CloseInsert(self, handle):
"""
Parameters:
- handle
"""
self.send_CloseInsert(handle)
return self.recv_CloseInsert()
def send_CloseInsert(self, handle):
self._oprot.writeMessageBegin('CloseInsert', TMessageType.CALL, self._seqid)
args = CloseInsert_args()
args.handle = handle
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_CloseInsert(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = CloseInsert_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.error is not None:
raise result.error
if result.error2 is not None:
raise result.error2
raise TApplicationException(TApplicationException.MISSING_RESULT, "CloseInsert failed: unknown result");
def PingImpalaService(self):
self.send_PingImpalaService()
return self.recv_PingImpalaService()
def send_PingImpalaService(self):
self._oprot.writeMessageBegin('PingImpalaService', TMessageType.CALL, self._seqid)
args = PingImpalaService_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_PingImpalaService(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = PingImpalaService_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "PingImpalaService failed: unknown result");
def GetExecSummary(self, handle):
"""
Parameters:
- handle
"""
self.send_GetExecSummary(handle)
return self.recv_GetExecSummary()
def send_GetExecSummary(self, handle):
self._oprot.writeMessageBegin('GetExecSummary', TMessageType.CALL, self._seqid)
args = GetExecSummary_args()
args.handle = handle
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_GetExecSummary(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = GetExecSummary_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.error is not None:
raise result.error
if result.error2 is not None:
raise result.error2
raise TApplicationException(TApplicationException.MISSING_RESULT, "GetExecSummary failed: unknown result");
class Processor(impala._thrift_gen.beeswax.BeeswaxService.Processor, Iface, TProcessor):
def __init__(self, handler):
impala._thrift_gen.beeswax.BeeswaxService.Processor.__init__(self, handler)
self._processMap["Cancel"] = Processor.process_Cancel
self._processMap["ResetCatalog"] = Processor.process_ResetCatalog
self._processMap["ResetTable"] = Processor.process_ResetTable
self._processMap["GetRuntimeProfile"] = Processor.process_GetRuntimeProfile
self._processMap["CloseInsert"] = Processor.process_CloseInsert
self._processMap["PingImpalaService"] = Processor.process_PingImpalaService
self._processMap["GetExecSummary"] = Processor.process_GetExecSummary
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_Cancel(self, seqid, iprot, oprot):
args = Cancel_args()
args.read(iprot)
iprot.readMessageEnd()
result = Cancel_result()
try:
result.success = self._handler.Cancel(args.query_id)
except impala._thrift_gen.beeswax.ttypes.BeeswaxException, error:
result.error = error
oprot.writeMessageBegin("Cancel", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ResetCatalog(self, seqid, iprot, oprot):
args = ResetCatalog_args()
args.read(iprot)
iprot.readMessageEnd()
result = ResetCatalog_result()
result.success = self._handler.ResetCatalog()
oprot.writeMessageBegin("ResetCatalog", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ResetTable(self, seqid, iprot, oprot):
args = ResetTable_args()
args.read(iprot)
iprot.readMessageEnd()
result = ResetTable_result()
result.success = self._handler.ResetTable(args.request)
oprot.writeMessageBegin("ResetTable", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_GetRuntimeProfile(self, seqid, iprot, oprot):
args = GetRuntimeProfile_args()
args.read(iprot)
iprot.readMessageEnd()
result = GetRuntimeProfile_result()
try:
result.success = self._handler.GetRuntimeProfile(args.query_id)
except impala._thrift_gen.beeswax.ttypes.BeeswaxException, error:
result.error = error
oprot.writeMessageBegin("GetRuntimeProfile", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_CloseInsert(self, seqid, iprot, oprot):
args = CloseInsert_args()
args.read(iprot)
iprot.readMessageEnd()
result = CloseInsert_result()
try:
result.success = self._handler.CloseInsert(args.handle)
except impala._thrift_gen.beeswax.ttypes.QueryNotFoundException, error:
result.error = error
except impala._thrift_gen.beeswax.ttypes.BeeswaxException, error2:
result.error2 = error2
oprot.writeMessageBegin("CloseInsert", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_PingImpalaService(self, seqid, iprot, oprot):
args = PingImpalaService_args()
args.read(iprot)
iprot.readMessageEnd()
result = PingImpalaService_result()
result.success = self._handler.PingImpalaService()
oprot.writeMessageBegin("PingImpalaService", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_GetExecSummary(self, seqid, iprot, oprot):
args = GetExecSummary_args()
args.read(iprot)
iprot.readMessageEnd()
result = GetExecSummary_result()
try:
result.success = self._handler.GetExecSummary(args.handle)
except impala._thrift_gen.beeswax.ttypes.QueryNotFoundException, error:
result.error = error
except impala._thrift_gen.beeswax.ttypes.BeeswaxException, error2:
result.error2 = error2
oprot.writeMessageBegin("GetExecSummary", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class Cancel_args(object):
"""
Attributes:
- query_id
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'query_id', (impala._thrift_gen.beeswax.ttypes.QueryHandle, impala._thrift_gen.beeswax.ttypes.QueryHandle.thrift_spec), None, ), # 1
)
def __init__(self, query_id=None,):
self.query_id = query_id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.query_id = impala._thrift_gen.beeswax.ttypes.QueryHandle()
self.query_id.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Cancel_args')
if self.query_id is not None:
oprot.writeFieldBegin('query_id', TType.STRUCT, 1)
self.query_id.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Cancel_result(object):
"""
Attributes:
- success
- error
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (impala._thrift_gen.Status.ttypes.TStatus, impala._thrift_gen.Status.ttypes.TStatus.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'error', (impala._thrift_gen.beeswax.ttypes.BeeswaxException, impala._thrift_gen.beeswax.ttypes.BeeswaxException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, error=None,):
self.success = success
self.error = error
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = impala._thrift_gen.Status.ttypes.TStatus()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.error = impala._thrift_gen.beeswax.ttypes.BeeswaxException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Cancel_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.error is not None:
oprot.writeFieldBegin('error', TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ResetCatalog_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ResetCatalog_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ResetCatalog_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (impala._thrift_gen.Status.ttypes.TStatus, impala._thrift_gen.Status.ttypes.TStatus.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = impala._thrift_gen.Status.ttypes.TStatus()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ResetCatalog_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ResetTable_args(object):
"""
Attributes:
- request
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'request', (TResetTableReq, TResetTableReq.thrift_spec), None, ), # 1
)
def __init__(self, request=None,):
self.request = request
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.request = TResetTableReq()
self.request.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ResetTable_args')
if self.request is not None:
oprot.writeFieldBegin('request', TType.STRUCT, 1)
self.request.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ResetTable_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (impala._thrift_gen.Status.ttypes.TStatus, impala._thrift_gen.Status.ttypes.TStatus.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = impala._thrift_gen.Status.ttypes.TStatus()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ResetTable_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetRuntimeProfile_args(object):
"""
Attributes:
- query_id
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'query_id', (impala._thrift_gen.beeswax.ttypes.QueryHandle, impala._thrift_gen.beeswax.ttypes.QueryHandle.thrift_spec), None, ), # 1
)
def __init__(self, query_id=None,):
self.query_id = query_id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.query_id = impala._thrift_gen.beeswax.ttypes.QueryHandle()
self.query_id.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetRuntimeProfile_args')
if self.query_id is not None:
oprot.writeFieldBegin('query_id', TType.STRUCT, 1)
self.query_id.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetRuntimeProfile_result(object):
"""
Attributes:
- success
- error
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'error', (impala._thrift_gen.beeswax.ttypes.BeeswaxException, impala._thrift_gen.beeswax.ttypes.BeeswaxException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, error=None,):
self.success = success
self.error = error
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.error = impala._thrift_gen.beeswax.ttypes.BeeswaxException()
self.error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetRuntimeProfile_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.error is not None:
oprot.writeFieldBegin('error', TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CloseInsert_args(object):
"""
Attributes:
- handle
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'handle', (impala._thrift_gen.beeswax.ttypes.QueryHandle, impala._thrift_gen.beeswax.ttypes.QueryHandle.thrift_spec), None, ), # 1
)
def __init__(self, handle=None,):
self.handle = handle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.handle = impala._thrift_gen.beeswax.ttypes.QueryHandle()
self.handle.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CloseInsert_args')
if self.handle is not None:
oprot.writeFieldBegin('handle', TType.STRUCT, 1)
self.handle.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CloseInsert_result(object):
"""
Attributes:
- success
- error
- error2
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TInsertResult, TInsertResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'error', (impala._thrift_gen.beeswax.ttypes.QueryNotFoundException, impala._thrift_gen.beeswax.ttypes.QueryNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'error2', (impala._thrift_gen.beeswax.ttypes.BeeswaxException, impala._thrift_gen.beeswax.ttypes.BeeswaxException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, error=None, error2=None,):
self.success = success
self.error = error
self.error2 = error2
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TInsertResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.error = impala._thrift_gen.beeswax.ttypes.QueryNotFoundException()
self.error.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.error2 = impala._thrift_gen.beeswax.ttypes.BeeswaxException()
self.error2.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CloseInsert_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.error is not None:
oprot.writeFieldBegin('error', TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
if self.error2 is not None:
oprot.writeFieldBegin('error2', TType.STRUCT, 2)
self.error2.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PingImpalaService_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PingImpalaService_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PingImpalaService_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TPingImpalaServiceResp, TPingImpalaServiceResp.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TPingImpalaServiceResp()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('PingImpalaService_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetExecSummary_args(object):
"""
Attributes:
- handle
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'handle', (impala._thrift_gen.beeswax.ttypes.QueryHandle, impala._thrift_gen.beeswax.ttypes.QueryHandle.thrift_spec), None, ), # 1
)
def __init__(self, handle=None,):
self.handle = handle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.handle = impala._thrift_gen.beeswax.ttypes.QueryHandle()
self.handle.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetExecSummary_args')
if self.handle is not None:
oprot.writeFieldBegin('handle', TType.STRUCT, 1)
self.handle.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetExecSummary_result(object):
"""
Attributes:
- success
- error
- error2
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (impala._thrift_gen.ExecStats.ttypes.TExecSummary, impala._thrift_gen.ExecStats.ttypes.TExecSummary.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'error', (impala._thrift_gen.beeswax.ttypes.QueryNotFoundException, impala._thrift_gen.beeswax.ttypes.QueryNotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'error2', (impala._thrift_gen.beeswax.ttypes.BeeswaxException, impala._thrift_gen.beeswax.ttypes.BeeswaxException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, error=None, error2=None,):
self.success = success
self.error = error
self.error2 = error2
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = impala._thrift_gen.ExecStats.ttypes.TExecSummary()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.error = impala._thrift_gen.beeswax.ttypes.QueryNotFoundException()
self.error.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.error2 = impala._thrift_gen.beeswax.ttypes.BeeswaxException()
self.error2.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetExecSummary_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.error is not None:
oprot.writeFieldBegin('error', TType.STRUCT, 1)
self.error.write(oprot)
oprot.writeFieldEnd()
if self.error2 is not None:
oprot.writeFieldBegin('error2', TType.STRUCT, 2)
self.error2.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
{
"content_hash": "83f3edc10ce27c3eb1d368eb0b956b8b",
"timestamp": "",
"source": "github",
"line_count": 1279,
"max_line_length": 188,
"avg_line_length": 32.471462079749806,
"alnum_prop": 0.6554140280754136,
"repo_name": "laserson/impyla",
"id": "0bd319a92484675345d5f3d2ff4936aef0fcef42",
"size": "41682",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "impala/_thrift_gen/ImpalaService/ImpalaService.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "226216"
},
{
"name": "Shell",
"bytes": "6925"
},
{
"name": "Thrift",
"bytes": "87376"
}
],
"symlink_target": ""
}
|
"""
Tests for PDC client configuration.
"""
import os
try:
# Python 2.6 compatibility
import unittest2 as unittest
except ImportError:
import unittest
from pdc_client.config import (
ServerConfigManager,
ServerConfigMissingUrlError,
ServerConfigNotFoundError,
ServerConfigConflictError,
)
def fixture_path(file_name):
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
path = os.path.join(fixtures_path, file_name)
assert(os.path.exists(path))
return path
class ServerConfigTestCase(unittest.TestCase):
def test_read_config_file(self):
configs = ServerConfigManager(fixture_path('config.json'))
expected = {
"host": "https://www.example.com/1/",
"ssl-verify": False,
"develop": True,
"token": "TEST_TOKEN",
"plugins": ["bindings.py"]
}
config = configs.get('test-pdc-server-1')
self.assertEqual(config.config, expected)
expected = {
"host": "https://www.example.com/2/"
}
config = configs.get('test-pdc-server-2')
self.assertEqual(config.config, expected)
def test_read_config_dir(self):
configs = ServerConfigManager(fixture_path('config.json'), fixture_path('configs'))
expected = {
"host": "https://www.example.com/1/",
"ssl-verify": False,
"develop": True,
"token": "TEST_TOKEN",
"plugins": ["bindings.py"]
}
config = configs.get('test-pdc-server-1')
self.assertEqual(config.config, expected)
expected = {
"host": "https://www.example.com/4/"
}
config = configs.get('test-pdc-server-4')
self.assertEqual(config.config, expected)
def test_url(self):
configs = ServerConfigManager(fixture_path('config.json'))
config = configs.get('test-pdc-server-1')
self.assertEqual(config.url, 'https://www.example.com/1/')
config = configs.get('test-pdc-server-2')
self.assertEqual(config.url, 'https://www.example.com/2/')
def test_default_url(self):
configs = ServerConfigManager()
server = 'http://test-pdc-server-1'
config = configs.get(server)
self.assertEqual(config.url, server)
def test_missing_url(self):
configs = ServerConfigManager(fixture_path('missing-url.json'))
with self.assertRaises(ServerConfigMissingUrlError):
configs.get('test-pdc-server-1')
def test_ssl_verify(self):
configs = ServerConfigManager(fixture_path('config.json'))
config = configs.get('test-pdc-server-1')
self.assertEqual(config.ssl_verify, False)
def test_default_ssl_verify(self):
configs = ServerConfigManager(fixture_path('config.json'))
config = configs.get('test-pdc-server-2')
self.assertEqual(config.ssl_verify, True)
config = configs.get('http://test-pdc-server-3')
self.assertEqual(config.ssl_verify, True)
config = configs.get('http://test-pdc-server-4')
self.assertEqual(config.ssl_verify, True)
def test_develop(self):
configs = ServerConfigManager(fixture_path('config.json'))
config = configs.get('test-pdc-server-1')
self.assertEqual(config.is_development, True)
def test_default_develop(self):
configs = ServerConfigManager(fixture_path('config.json'))
config = configs.get('test-pdc-server-2')
self.assertEqual(config.is_development, False)
config = configs.get('http://test-pdc-server-3')
self.assertEqual(config.is_development, False)
config = configs.get('http://test-pdc-server-4')
self.assertEqual(config.is_development, False)
def test_token(self):
configs = ServerConfigManager(fixture_path('config.json'))
config = configs.get('test-pdc-server-1')
self.assertEqual(config.token, 'TEST_TOKEN')
def test_default_token(self):
configs = ServerConfigManager(fixture_path('config.json'))
config = configs.get('test-pdc-server-2')
self.assertEqual(config.token, None)
config = configs.get('http://test-pdc-server-3')
self.assertEqual(config.token, None)
config = configs.get('http://test-pdc-server-4')
self.assertEqual(config.token, None)
def test_same_server_in_multiple_configs(self):
configs = ServerConfigManager(fixture_path('configs-same-server'))
with self.assertRaises(ServerConfigConflictError):
configs.get('test')
def test_precendence(self):
config1 = fixture_path('precedence/config1.json')
config2 = fixture_path('precedence/config2.json')
server = 'test-pdc-server'
configs = ServerConfigManager(config1, config2)
config = configs.get(server)
self.assertEqual(config.url, 'https://www.example.com/1/')
configs = ServerConfigManager(config2, config1)
config = configs.get(server)
self.assertEqual(config.url, 'https://www.example.com/2/')
def test_get_config_value(self):
configs = ServerConfigManager(fixture_path('config.json'))
config = configs.get('test-pdc-server-1')
self.assertEqual(config.get('host'), 'https://www.example.com/1/')
config = configs.get('test-pdc-server-2')
self.assertEqual(config.get('host'), 'https://www.example.com/2/')
server = 'http://test-pdc-server-3'
config = configs.get(server)
self.assertEqual(config.get('host'), server)
def test_get_default_value(self):
configs = ServerConfigManager(fixture_path('config.json'))
config = configs.get('test-pdc-server-2')
self.assertEqual(config.get('develop', True), True)
config = configs.get('http://test-pdc-server-3')
self.assertEqual(config.get('develop', True), True)
configs = ServerConfigManager()
config = configs.get('http://test-pdc-server')
self.assertEqual(config.get('develop', True), True)
def test_get_bad_server(self):
configs = ServerConfigManager(fixture_path('config.json'), fixture_path('configs'))
with self.assertRaises(ServerConfigNotFoundError):
configs.get('test-pdc-server')
|
{
"content_hash": "f8790433c979f306114a73b37a89d71c",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 91,
"avg_line_length": 34.26486486486486,
"alnum_prop": 0.6329073986433191,
"repo_name": "product-definition-center/pdc-client",
"id": "5ba22212afd26e6309030692a452fcbfcad3fc77",
"size": "6448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/config/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "291383"
},
{
"name": "Shell",
"bytes": "2021"
}
],
"symlink_target": ""
}
|
"""Mesh utils."""
# pylint: disable=missing-function-docstring
# pylint: disable=unused-variable
# pylint: disable=unused-argument
# pylint: disable=missing-class-docstring
from matplotlib import cm
from psbody.mesh import Mesh
import trimesh
# colormap = cm.get_cmap("plasma")
def convert_points_to_mesh(points,
radius=0.02,
colormap=cm.get_cmap("jet"),
return_ps_mesh=False):
sphere = trimesh.primitives.Sphere(radius=radius)
new_mesh = trimesh.Trimesh()
# new_mesh = []
for _, point in enumerate(points):
new_sphere = trimesh.Trimesh(sphere.vertices + point, sphere.faces * 1)
new_mesh += new_sphere
# new_mesh.append(new_sphere)
# color = colormap(px*1.0/ 20)
# vc = trimesh.visual.color.VertexColor(color)
if return_ps_mesh:
new_mesh = Mesh(new_mesh.vertices, new_mesh.faces)
return new_mesh
def convert_verts_to_mesh(vertices, faces):
new_mesh = Mesh(vertices, faces)
return new_mesh
|
{
"content_hash": "3d8c20b8026c693abea4b68f4ac099fb",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 27.56756756756757,
"alnum_prop": 0.653921568627451,
"repo_name": "google-research/google-research",
"id": "a958defb6827e1b0f90a8a785881f623ea3c5c0a",
"size": "1628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "human_object_interaction/oci/oci/utils/mesh_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
import unittest
import turkey_vulture
import data
import facebook
import re
# TODO: Add test for big update
class MockGraphAPI(facebook.GraphAPI):
def __init__(self):
facebook.GraphAPI.__init__(self, "access_token", 60)
self._thread_order = {}
self.use_default_order()
self._api_version_regex = re.compile('/v\d\.\d/')
def use_default_order(self):
self._thread_order = {
'999': {
None: data.START_999_JSON,
'12': data.UNTIL_12_999_JSON,
'1': data.LAST_PAGE_JSON
}
}
def use_full_update_order(self):
self._thread_order = {
'999': {
None: data.UPDATE_999_JSON,
'37': data.START_999_JSON,
'12': data.UNTIL_12_999_JSON,
'1': data.LAST_PAGE_JSON
}
}
def use_partial_update_order(self):
self._thread_order = {
'999': {
None: data.PARTIAL_UPDATE_999_JSON
}
}
def get_object(self, id, **kwargs):
id_array = re.sub(self._api_version_regex, '', id).split('/')
thread_id = id_array[0]
data_path = id_array[1:]
if 'until' in kwargs:
until = kwargs['until']
else:
until = None
thread_page = self._thread_order[thread_id][until]
for path in data_path:
thread_page = thread_page.get(path)
return thread_page
class FacebookThreadTestCase(unittest.TestCase):
def setUp(self):
self.test_thread = turkey_vulture.FacebookThread(MockGraphAPI(), '999')
class UpdateThreadConstructorTestCase(unittest.TestCase):
def setUp(self):
self.test_thread = turkey_vulture.FacebookThread(MockGraphAPI(), '999', '36')
class UpdateThreadTestCase(FacebookThreadTestCase):
def setUp(self):
super(UpdateThreadTestCase, self).setUp()
next_page_exists = self.test_thread.get_next_page()
while next_page_exists is True:
next_page_exists = self.test_thread.get_next_page()
class TestConstructThread(FacebookThreadTestCase):
def test_assign_participants(self):
self.assertEqual(8, len(self.test_thread.participants))
def test_assign_thread_id(self):
self.assertEqual('999', self.test_thread.thread_id)
def test_assign_posts(self):
self.assertListEqual(self.test_thread._data, self.test_thread.posts)
self.assertEqual(25, len(self.test_thread.posts))
def test_assign_latest_post_id(self):
self.assertEqual('36', self.test_thread._latest_post_id)
class TestGetNextPage(FacebookThreadTestCase):
def test_get_next_page(self):
self.assertTrue(self.test_thread.get_next_page())
self.assertEqual(36, len(self.test_thread.posts))
self.assertTrue(self.test_thread.get_next_page())
self.assertEqual(36, len(self.test_thread.posts))
self.assertFalse(self.test_thread.get_next_page())
class TestUpdateThread(UpdateThreadTestCase):
def test_no_update_thread(self):
self.assertFalse(self.test_thread.update_thread())
def test_update_thread(self):
self.test_thread._graph.use_full_update_order()
self.assertTrue(self.test_thread.update_thread())
self.assertTrue(self.test_thread.update_thread())
self.assertFalse(self.test_thread.update_thread())
self.assertEqual(61, len(self.test_thread.posts))
def test_partial_update_thread(self):
self.test_thread._graph.use_partial_update_order()
self.assertTrue(self.test_thread.update_thread())
self.assertFalse(self.test_thread.update_thread())
self.assertEqual(42, len(self.test_thread.posts))
class TestUpdateParticipants(UpdateThreadTestCase):
def test_update_no_new_participants(self):
self.test_thread.update_participants()
self.assertEqual(8, len(self.test_thread.participants))
def test_update_new_participants(self):
self.test_thread._graph.use_full_update_order()
self.test_thread.update_participants()
self.assertEqual(9, len(self.test_thread.participants))
def test_update_remove_participants(self):
self.test_thread._graph.use_partial_update_order()
self.test_thread.update_participants()
self.assertEqual(7, len(self.test_thread.participants))
class TestGetPostId(FacebookThreadTestCase):
def test_get_post_id(self):
self.assertEqual('12', turkey_vulture.FacebookThread._get_post_id(self.test_thread._data[0]))
class TestNextPageUrl(FacebookThreadTestCase):
def test_next_page_exists(self):
expected_url = 'https://graph.facebook.com/v2.3/999/comments?access_token=placeholder&limit=25&until=12&__paging_token=enc_AxccviosOthErPLaCEHoLDer'
self.assertEqual(expected_url, self.test_thread._next_page_url)
def test_next_page_does_not_exist(self):
self.test_thread.get_next_page()
self.test_thread.get_next_page()
self.assertEqual(None, self.test_thread._next_page_url)
class TestPopPosts(FacebookThreadTestCase):
def test_pop_posts(self):
old_posts = self.test_thread.posts
self.assertListEqual(old_posts, self.test_thread.pop_posts())
self.assertListEqual([], self.test_thread.posts)
class TestChangeAccessToken(FacebookThreadTestCase):
def test_change_access_token(self):
self.test_thread.change_access_token('Barry')
self.assertEquals('Barry', self.test_thread._graph.access_token)
class TestUpdateThreadIdConstructor(UpdateThreadConstructorTestCase):
def test_full_update(self):
self.test_thread._graph.use_full_update_order()
self.assertTrue(self.test_thread.update_thread())
self.assertTrue(self.test_thread.update_thread())
self.assertFalse(self.test_thread.update_thread())
self.assertEqual(25, len(self.test_thread.posts))
def test_partial_update(self):
self.test_thread._graph.use_partial_update_order()
self.assertTrue(self.test_thread.update_thread())
self.assertFalse(self.test_thread.update_thread())
self.assertEqual(6, len(self.test_thread.posts))
|
{
"content_hash": "a5be15e2f4a8df945d49a0ecee93f722",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 156,
"avg_line_length": 35.537142857142854,
"alnum_prop": 0.6579835986493006,
"repo_name": "GTmmiller/TurkeyVulture",
"id": "7013c453b1bc0590d33d39d7e48fd167daadc5ff",
"size": "6219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51231"
}
],
"symlink_target": ""
}
|
''' Utility functions for algebra etc '''
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import numpy.linalg as npl
# epsilon for testing whether a number is close to zero
_EPS = np.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def sphere2cart(r, theta, phi):
''' Spherical to Cartesian coordinates
This is the standard physics convention where `theta` is the
inclination (polar) angle, and `phi` is the azimuth angle.
Imagine a sphere with center (0,0,0). Orient it with the z axis
running south-north, the y axis running west-east and the x axis
from posterior to anterior. `theta` (the inclination angle) is the
angle to rotate from the z-axis (the zenith) around the y-axis,
towards the x axis. Thus the rotation is counter-clockwise from the
point of view of positive y. `phi` (azimuth) gives the angle of
rotation around the z-axis towards the y axis. The rotation is
counter-clockwise from the point of view of positive z.
Equivalently, given a point P on the sphere, with coordinates x, y,
z, `theta` is the angle between P and the z-axis, and `phi` is
the angle between the projection of P onto the XY plane, and the X
axis.
Geographical nomenclature designates theta as 'co-latitude', and phi
as 'longitude'
Parameters
------------
r : array_like
radius
theta : array_like
inclination or polar angle
phi : array_like
azimuth angle
Returns
---------
x : array
x coordinate(s) in Cartesion space
y : array
y coordinate(s) in Cartesian space
z : array
z coordinate
Notes
--------
See these pages:
* http://en.wikipedia.org/wiki/Spherical_coordinate_system
* http://mathworld.wolfram.com/SphericalCoordinates.html
for excellent discussion of the many different conventions
possible. Here we use the physics conventions, used in the
wikipedia page.
Derivations of the formulae are simple. Consider a vector x, y, z of
length r (norm of x, y, z). The inclination angle (theta) can be
found from: cos(theta) == z / r -> z == r * cos(theta). This gives
the hypotenuse of the projection onto the XY plane, which we will
call Q. Q == r*sin(theta). Now x / Q == cos(phi) -> x == r *
sin(theta) * cos(phi) and so on.
We have deliberately named this function ``sphere2cart`` rather than
``sph2cart`` to distinguish it from the Matlab function of that
name, because the Matlab function uses an unusual convention for the
angles that we did not want to replicate. The Matlab function is
trivial to implement with the formulae given in the Matlab help.
'''
sin_theta = np.sin(theta)
x = r * np.cos(phi) * sin_theta
y = r * np.sin(phi) * sin_theta
z = r * np.cos(theta)
x, y, z = np.broadcast_arrays(x, y, z)
return x, y, z
def cart2sphere(x, y, z):
r''' Return angles for Cartesian 3D coordinates `x`, `y`, and `z`
See doc for ``sphere2cart`` for angle conventions and derivation
of the formulae.
$0\le\theta\mathrm{(theta)}\le\pi$ and $-\pi\le\phi\mathrm{(phi)}\le\pi$
Parameters
------------
x : array_like
x coordinate in Cartesian space
y : array_like
y coordinate in Cartesian space
z : array_like
z coordinate
Returns
---------
r : array
radius
theta : array
inclination (polar) angle
phi : array
azimuth angle
'''
r = np.sqrt(x * x + y * y + z * z)
theta = np.arccos(z / r)
theta = np.where(r > 0, theta, 0.)
phi = np.arctan2(y, x)
r, theta, phi = np.broadcast_arrays(r, theta, phi)
return r, theta, phi
def sph2latlon(theta, phi):
"""Convert spherical coordinates to latitude and longitude.
Returns
-------
lat, lon : ndarray
Latitude and longitude.
"""
return np.rad2deg(theta - np.pi / 2), np.rad2deg(phi - np.pi)
def normalized_vector(vec, axis=-1):
''' Return vector divided by its Euclidean (L2) norm
See :term:`unit vector` and :term:`Euclidean norm`
Parameters
------------
vec : array_like shape (3,)
Returns
----------
nvec : array shape (3,)
vector divided by L2 norm
Examples
-----------
>>> vec = [1, 2, 3]
>>> l2n = np.sqrt(np.dot(vec, vec))
>>> nvec = normalized_vector(vec)
>>> np.allclose(np.array(vec) / l2n, nvec)
True
>>> vec = np.array([[1, 2, 3]])
>>> vec.shape
(1, 3)
>>> normalized_vector(vec).shape
(1, 3)
'''
return vec / vector_norm(vec, axis, keepdims=True)
def vector_norm(vec, axis=-1, keepdims=False):
''' Return vector Euclidean (L2) norm
See :term:`unit vector` and :term:`Euclidean norm`
Parameters
-------------
vec : array_like
Vectors to norm.
axis : int
Axis over which to norm. By default norm over last axis. If `axis` is
None, `vec` is flattened then normed.
keepdims : bool
If True, the output will have the same number of dimensions as `vec`,
with shape 1 on `axis`.
Returns
---------
norm : array
Euclidean norms of vectors.
Examples
--------
>>> import numpy as np
>>> vec = [[8, 15, 0], [0, 36, 77]]
>>> vector_norm(vec)
array([ 17., 85.])
>>> vector_norm(vec, keepdims=True)
array([[ 17.],
[ 85.]])
>>> vector_norm(vec, axis=0)
array([ 8., 39., 77.])
'''
vec = np.asarray(vec)
vec_norm = np.sqrt((vec * vec).sum(axis))
if keepdims:
if axis is None:
shape = [1] * vec.ndim
else:
shape = list(vec.shape)
shape[axis] = 1
vec_norm = vec_norm.reshape(shape)
return vec_norm
def rodrigues_axis_rotation(r, theta):
""" Rodrigues formula
Rotation matrix for rotation around axis r for angle theta.
The rotation matrix is given by the Rodrigues formula:
R = Id + sin(theta)*Sn + (1-cos(theta))*Sn^2
with::
0 -nz ny
Sn = nz 0 -nx
-ny nx 0
where n = r / ||r||
In case the angle ||r|| is very small, the above formula may lead
to numerical instabilities. We instead use a Taylor expansion
around theta=0:
R = I + sin(theta)/tetha Sr + (1-cos(theta))/teta2 Sr^2
leading to:
R = I + (1-theta2/6)*Sr + (1/2-theta2/24)*Sr^2
Parameters
-----------
r : array_like shape (3,), axis
theta : float, angle in degrees
Returns
----------
R : array, shape (3,3), rotation matrix
Examples
---------
>>> import numpy as np
>>> from dipy.core.geometry import rodrigues_axis_rotation
>>> v=np.array([0,0,1])
>>> u=np.array([1,0,0])
>>> R=rodrigues_axis_rotation(v,40)
>>> ur=np.dot(R,u)
>>> np.round(np.rad2deg(np.arccos(np.dot(ur,u))))
40.0
"""
theta = np.deg2rad(theta)
if theta > 1e-30:
n = r / np.linalg.norm(r)
Sn = np.array([[0, -n[2], n[1]], [n[2], 0, -n[0]], [-n[1], n[0], 0]])
R = np.eye(3) + np.sin(theta) * Sn + \
(1 - np.cos(theta)) * np.dot(Sn, Sn)
else:
Sr = np.array([[0, -r[2], r[1]], [r[2], 0, -r[0]], [-r[1], r[0], 0]])
theta2 = theta * theta
R = np.eye(3) + (1 - theta2 / 6.) * \
Sr + (.5 - theta2 / 24.) * np.dot(Sr, Sr)
return R
def nearest_pos_semi_def(B):
''' Least squares positive semi-definite tensor estimation
Parameters
------------
B : (3,3) array_like
B matrix - symmetric. We do not check the symmetry.
Returns
---------
npds : (3,3) array
Estimated nearest positive semi-definite array to matrix `B`.
Examples
----------
>>> B = np.diag([1, 1, -1])
>>> nearest_pos_semi_def(B)
array([[ 0.75, 0. , 0. ],
[ 0. , 0.75, 0. ],
[ 0. , 0. , 0. ]])
References
----------
.. [1] Niethammer M, San Jose Estepar R, Bouix S, Shenton M, Westin CF.
On diffusion tensor estimation. Conf Proc IEEE Eng Med Biol Soc.
2006;1:2622-5. PubMed PMID: 17946125; PubMed Central PMCID:
PMC2791793.
'''
B = np.asarray(B)
vals, vecs = npl.eigh(B)
# indices of eigenvalues in descending order
inds = np.argsort(vals)[::-1]
vals = vals[inds]
cardneg = np.sum(vals < 0)
if cardneg == 0:
return B
if cardneg == 3:
return np.zeros((3, 3))
lam1a, lam2a, lam3a = vals
scalers = np.zeros((3,))
if cardneg == 2:
b112 = np.max([0, lam1a + (lam2a + lam3a) / 3.])
scalers[0] = b112
elif cardneg == 1:
lam1b = lam1a + 0.25 * lam3a
lam2b = lam2a + 0.25 * lam3a
if lam1b >= 0 and lam2b >= 0:
scalers[:2] = lam1b, lam2b
else: # one of the lam1b, lam2b is < 0
if lam2b < 0:
b111 = np.max([0, lam1a + (lam2a + lam3a) / 3.])
scalers[0] = b111
if lam1b < 0:
b221 = np.max([0, lam2a + (lam1a + lam3a) / 3.])
scalers[1] = b221
# resort the scalers to match the original vecs
scalers = scalers[np.argsort(inds)]
return np.dot(vecs, np.dot(np.diag(scalers), vecs.T))
def sphere_distance(pts1, pts2, radius=None, check_radius=True):
""" Distance across sphere surface between `pts1` and `pts2`
Parameters
------------
pts1 : (N,R) or (R,) array_like
where N is the number of points and R is the number of
coordinates defining a point (``R==3`` for 3D)
pts2 : (N,R) or (R,) array_like
where N is the number of points and R is the number of
coordinates defining a point (``R==3`` for 3D). It should be
possible to broadcast `pts1` against `pts2`
radius : None or float, optional
Radius of sphere. Default is to work out radius from mean of the
length of each point vector
check_radius : bool, optional
If True, check if the points are on the sphere surface - i.e
check if the vector lengths in `pts1` and `pts2` are close to
`radius`. Default is True.
Returns
---------
d : (N,) or (0,) array
Distances between corresponding points in `pts1` and `pts2`
across the spherical surface, i.e. the great circle distance
See also
----------
cart_distance : cartesian distance between points
vector_cosine : cosine of angle between vectors
Examples
----------
>>> print('%.4f' % sphere_distance([0,1],[1,0]))
1.5708
>>> print('%.4f' % sphere_distance([0,3],[3,0]))
4.7124
"""
pts1 = np.asarray(pts1)
pts2 = np.asarray(pts2)
lens1 = np.sqrt(np.sum(pts1 ** 2, axis=-1))
lens2 = np.sqrt(np.sum(pts2 ** 2, axis=-1))
if radius is None:
radius = (np.mean(lens1) + np.mean(lens2)) / 2.0
if check_radius:
if not (np.allclose(radius, lens1) and
np.allclose(radius, lens2)):
raise ValueError('Radii do not match sphere surface')
# Get angle with vector cosine
dots = np.inner(pts1, pts2)
lens = lens1 * lens2
angle_cos = np.arccos(dots / lens)
return angle_cos * radius
def cart_distance(pts1, pts2):
''' Cartesian distance between `pts1` and `pts2`
If either of `pts1` or `pts2` is 2D, then we take the first
dimension to index points, and the second indexes coordinate. More
generally, we take the last dimension to be the coordinate
dimension.
Parameters
----------
pts1 : (N,R) or (R,) array_like
where N is the number of points and R is the number of
coordinates defining a point (``R==3`` for 3D)
pts2 : (N,R) or (R,) array_like
where N is the number of points and R is the number of
coordinates defining a point (``R==3`` for 3D). It should be
possible to broadcast `pts1` against `pts2`
Returns
-------
d : (N,) or (0,) array
Cartesian distances between corresponding points in `pts1` and
`pts2`
See also
--------
sphere_distance : distance between points on sphere surface
Examples
----------
>>> cart_distance([0,0,0], [0,0,3])
3.0
'''
sqs = np.subtract(pts1, pts2) ** 2
return np.sqrt(np.sum(sqs, axis=-1))
def vector_cosine(vecs1, vecs2):
""" Cosine of angle between two (sets of) vectors
The cosine of the angle between two vectors ``v1`` and ``v2`` is
given by the inner product of ``v1`` and ``v2`` divided by the
product of the vector lengths::
v_cos = np.inner(v1, v2) / (np.sqrt(np.sum(v1**2)) *
np.sqrt(np.sum(v2**2)))
Parameters
-------------
vecs1 : (N, R) or (R,) array_like
N vectors (as rows) or single vector. Vectors have R elements.
vecs1 : (N, R) or (R,) array_like
N vectors (as rows) or single vector. Vectors have R elements.
It should be possible to broadcast `vecs1` against `vecs2`
Returns
----------
vcos : (N,) or (0,) array
Vector cosines. To get the angles you will need ``np.arccos``
Notes
--------
The vector cosine will be the same as the correlation only if all
the input vectors have zero mean.
"""
vecs1 = np.asarray(vecs1)
vecs2 = np.asarray(vecs2)
lens1 = np.sqrt(np.sum(vecs1 ** 2, axis=-1))
lens2 = np.sqrt(np.sum(vecs2 ** 2, axis=-1))
dots = np.inner(vecs1, vecs2)
lens = lens1 * lens2
return dots / lens
def lambert_equal_area_projection_polar(theta, phi):
r""" Lambert Equal Area Projection from polar sphere to plane
Return positions in (y1,y2) plane corresponding to the points
with polar coordinates (theta, phi) on the unit sphere, under the
Lambert Equal Area Projection mapping (see Mardia and Jupp (2000),
Directional Statistics, p. 161).
See doc for ``sphere2cart`` for angle conventions
- $0 \le \theta \le \pi$ and $0 \le \phi \le 2 \pi$
- $|(y_1,y_2)| \le 2$
The Lambert EAP maps the upper hemisphere to the planar disc of radius 1
and the lower hemisphere to the planar annulus between radii 1 and 2,
and *vice versa*.
Parameters
----------
theta : array_like
theta spherical coordinates
phi : array_like
phi spherical coordinates
Returns
---------
y : (N,2) array
planar coordinates of points following mapping by Lambert's EAP.
"""
return 2 * np.repeat(np.sin(theta / 2), 2).reshape((theta.shape[0], 2)) * np.column_stack((np.cos(phi), np.sin(phi)))
def lambert_equal_area_projection_cart(x, y, z):
r''' Lambert Equal Area Projection from cartesian vector to plane
Return positions in $(y_1,y_2)$ plane corresponding to the
directions of the vectors with cartesian coordinates xyz under the
Lambert Equal Area Projection mapping (see Mardia and Jupp (2000),
Directional Statistics, p. 161).
The Lambert EAP maps the upper hemisphere to the planar disc of radius 1
and the lower hemisphere to the planar annulus between radii 1 and 2,
The Lambert EAP maps the upper hemisphere to the planar disc of radius 1
and the lower hemisphere to the planar annulus between radii 1 and 2.
and *vice versa*.
See doc for ``sphere2cart`` for angle conventions
Parameters
------------
x : array_like
x coordinate in Cartesion space
y : array_like
y coordinate in Cartesian space
z : array_like
z coordinate
Returns
----------
y : (N,2) array
planar coordinates of points following mapping by Lambert's EAP.
'''
(r, theta, phi) = cart2sphere(x, y, z)
return lambert_equal_area_projection_polar(theta, phi)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
Code modified from the work of Christoph Gohlke link provided here
http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
Parameters
------------
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
Returns
---------
matrix : ndarray (4, 4)
Code modified from the work of Christoph Gohlke link provided here
http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
Examples
--------
>>> import numpy
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
M = np.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj * si
M[i, k] = sj * ci
M[j, i] = sj * sk
M[j, j] = -cj * ss + cc
M[j, k] = -cj * cs - sc
M[k, i] = -sj * ck
M[k, j] = cj * sc + cs
M[k, k] = cj * cc - ss
else:
M[i, i] = cj * ck
M[i, j] = sj * sc - cs
M[i, k] = sj * cc + ss
M[j, i] = cj * sk
M[j, j] = sj * ss + cc
M[j, k] = sj * cs - sc
M[k, i] = -sj
M[k, j] = cj * si
M[k, k] = cj * ci
return M
def compose_matrix(scale=None, shear=None, angles=None, translate=None, perspective=None):
"""Return 4x4 transformation matrix from sequence of
transformations.
Code modified from the work of Christoph Gohlke link provided here
http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
This is the inverse of the ``decompose_matrix`` function.
Parameters
-------------
scale : (3,) array_like
Scaling factors.
shear : array_like
Shear factors for x-y, x-z, y-z axes.
angles : array_like
Euler angles about static x, y, z axes.
translate : array_like
Translation vector along x, y, z axes.
perspective : array_like
Perspective partition of matrix.
Returns
---------
matrix : 4x4 array
Examples
----------
>>> import math
>>> import numpy as np
>>> import dipy.core.geometry as gm
>>> scale = np.random.random(3) - 0.5
>>> shear = np.random.random(3) - 0.5
>>> angles = (np.random.random(3) - 0.5) * (2*math.pi)
>>> trans = np.random.random(3) - 0.5
>>> persp = np.random.random(4) - 0.5
>>> M0 = gm.compose_matrix(scale, shear, angles, trans, persp)
"""
M = np.identity(4)
if perspective is not None:
P = np.identity(4)
P[3, :] = perspective[:4]
M = np.dot(M, P)
if translate is not None:
T = np.identity(4)
T[:3, 3] = translate[:3]
M = np.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = np.dot(M, R)
if shear is not None:
Z = np.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = np.dot(M, Z)
if scale is not None:
S = np.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = np.dot(M, S)
M /= M[3, 3]
return M
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
Code modified from the excellent work of Christoph Gohlke link provided here
http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
Parameters
------------
matrix : array_like
Non-degenerative homogeneous transformation matrix
Returns
---------
scale : (3,) ndarray
Three scaling factors.
shear : (3,) ndarray
Shear factors for x-y, x-z, y-z axes.
angles : (3,) ndarray
Euler angles about static x, y, z axes.
translate : (3,) ndarray
Translation vector along x, y, z axes.
perspective : ndarray
Perspective partition of matrix.
Raises
------
ValueError
If matrix is of wrong type or degenerative.
Examples
-----------
>>> import numpy as np
>>> T0=np.diag([2,1,1,1])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
"""
M = np.array(matrix, dtype=np.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not np.linalg.det(P):
raise ValueError("matrix is singular")
scale = np.zeros((3, ), dtype=np.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = np.dot(M[:, 3], np.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = np.array((0, 0, 0, 1), dtype=np.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = np.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = np.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = np.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if np.dot(row[0], np.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def circumradius(a, b, c):
''' a, b and c are 3-dimensional vectors which are the vertices of a
triangle. The function returns the circumradius of the triangle, i.e
the radius of the smallest circle that can contain the triangle. In
the degenerate case when the 3 points are collinear it returns
half the distance between the furthest apart points.
Parameters
----------
a, b, c : (3,) array_like
the three vertices of the triangle
Returns
-------
circumradius : float
the desired circumradius
'''
x = a - c
xx = np.linalg.norm(x) ** 2
y = b - c
yy = np.linalg.norm(y) ** 2
z = np.cross(x, y)
# test for collinearity
if np.linalg.norm(z) == 0:
return np.sqrt(np.max(np.dot(x, x), np.dot(y, y), np.dot(a - b, a - b))) / 2.
else:
m = np.vstack((x, y, z))
w = np.dot(np.linalg.inv(m.T), np.array([xx / 2., yy / 2., 0]))
return np.linalg.norm(w) / 2.
def vec2vec_rotmat(u, v):
r""" rotation matrix from 2 unit vectors
u,v being unit 3d vectors return a 3x3 rotation matrix R than aligns u to v.
In general there are many rotations that will map u to v. If S is any
rotation using v as an axis then R.S will also map u to v since (S.R)u =
S(Ru) = Sv = v. The rotation R returned by vec2vec_rotmat leaves fixed the
perpendicular to the plane spanned by u and v.
The transpose of R will align v to u.
Parameters
-----------
u : array, shape(3,)
v : array, shape(3,)
Returns
---------
R : array, shape(3,3)
Examples
---------
>>> import numpy as np
>>> from dipy.core.geometry import vec2vec_rotmat
>>> u=np.array([1,0,0])
>>> v=np.array([0,1,0])
>>> R=vec2vec_rotmat(u,v)
>>> np.dot(R,u)
array([ 0., 1., 0.])
>>> np.dot(R.T,v)
array([ 1., 0., 0.])
"""
norm_u_v = np.linalg.norm(u - v)
# return eye when u is the same with v
if np.linalg.norm(u - v) < np.finfo(float).eps:
return np.eye(3)
# This is the case of two antipodal vectors:
if norm_u_v == 2.0:
return -np.eye(3)
w = np.cross(u, v)
w = w / np.linalg.norm(w)
# vp is in plane of u,v, perpendicular to u
vp = (v - (np.dot(u, v) * u))
vp = vp / np.linalg.norm(vp)
# (u vp w) is an orthonormal basis
P = np.array([u, vp, w])
Pt = P.T
cosa = np.dot(u, v)
sina = np.sqrt(1 - cosa ** 2)
R = np.array([[cosa, -sina, 0], [sina, cosa, 0], [0, 0, 1]])
Rp = np.dot(Pt, np.dot(R, P))
# make sure that you don't return any Nans
if np.sum(np.isnan(Rp)) > 0:
return np.eye(3)
return Rp
def compose_transformations(*mats):
""" Compose multiple 4x4 affine transformations in one 4x4 matrix
Parameters
-----------
mat1 : array, (4, 4)
mat2 : array, (4, 4)
...
matN : array, (4, 4)
Returns
-------
matN x ... x mat2 x mat1 : array, (4, 4)
"""
prev = mats[0]
if len(mats) < 2:
raise ValueError('At least two or more matrices are needed')
for mat in mats[1:]:
prev = np.dot(mat, prev)
return prev
|
{
"content_hash": "7b71b8419448c04856ee2f9e45632d8f",
"timestamp": "",
"source": "github",
"line_count": 898,
"max_line_length": 121,
"avg_line_length": 29.41314031180401,
"alnum_prop": 0.5653276795517359,
"repo_name": "beni55/dipy",
"id": "b7563cc477d590568fa54cda802ef2b597acfb53",
"size": "26413",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dipy/core/geometry.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2694"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2138637"
}
],
"symlink_target": ""
}
|
'''
This library defines methods to work with the midi file
'''
from logbook import Logger
import struct
log = Logger('snail')
DEFAULT_MIDI_HEADER_LEN = 14
def parse_file_header(infile):
'''
:param infile: Receives a file like object; parses and returns parts of the midi header
returns:
header_len: in bytes, in addition to the 4 for the declaration and the 4 for this number
midi_format: the midi format (0, 1 or 2) http://en.wikipedia.org/wiki/MIDI#Standard_MIDI_files
track_count: the number of tracks specified by the header
resolution: the PPQN http://en.wikipedia.org/wiki/Pulses_per_quarter_note
'''
try:
# First four bytes are MIDI header declaration
midi_header_declaration = infile.read(4)
if midi_header_declaration != b'MThd':
err_text = "This is not a midi header - wrong declaration"
log.critical(err_text)
raise TypeError(err_text)
# next four bytes are header size in bytes
header_len = struct.unpack(">L", infile.read(4))[0] # always a tuple
log.info('Header size: 8 + {}'.format(header_len))
# next two bytes specify the format version
midi_format = struct.unpack(">H", infile.read(2))[0] # always a tuple
log.info('Midi file format: {}'.format(midi_format))
if midi_format not in (0, 1, 2):
err_text = "This is not a midi header - incorrect midi format"
log.critical(err_text)
raise TypeError(err_text)
# next two bytes specify the number of tracks
track_ct = 0
track_ct = struct.unpack(">H", infile.read(2))[0] # always a tuple
log.info('Number of tracks: {}'.format(track_ct))
# next two bytes specify the resolution/PPQ/Parts Per Quarter
# (in other words, how many ticks per quater note)
resolution = 0
resolution = struct.unpack(">H", infile.read(2))[0] # always a tuple
log.info('Resolution: {}'.format(resolution))
# NOTE: the assumption is that any remaining bytes
# in the header are padding
# NOTE: remember the 4 from the declaration and 4 for the size
if (header_len + 8) > DEFAULT_MIDI_HEADER_LEN:
infile.read(header_len + 8 - DEFAULT_MIDI_HEADER_LEN)
except Exception as ex:
log.critical(ex)
raise ex
return header_len, midi_format, track_ct, resolution
def build_file_header(midi_format, track_ct, resolution):
'''
Receives info regarding a midi file
:param midi_format: the midi format (0, 1 or 2) http://en.wikipedia.org/wiki/MIDI#Standard_MIDI_files
:param track_count: the number of tracks specified by the header
:param resolution: the PPQN http://en.wikipedia.org/wiki/Pulses_per_quarter_note
:returns
a :class:`bytearray` len 14
'''
try:
rslt_li = []
# First four bytes are MIDI header declaration
rslt_li.append(b'MThd')
# next four bytes are header size in bytes
# a len of 6 more bytes is standard
rslt_li.append(struct.pack(">L", 6))
# next two bytes specify the format version
if midi_format not in (0, 1, 2):
err_text = "{} is not a valid midi format - only (0, 1 or 2)".format(midi_format)
log.critical(err_text)
raise TypeError(err_text)
rslt_li.append(struct.pack(">H", midi_format))
# next two bytes specify the number of tracks
rslt_li.append(struct.pack(">H", track_ct))
# next two bytes specify the resolution/PPQ/Parts Per Quarter
# (in other words, how many ticks per quater note)
rslt_li.append(struct.pack(">H", resolution))
except Exception as ex:
log.critical(ex)
raise ex
btye_li = b''.join(rslt_li)
return btye_li
|
{
"content_hash": "e2b27ea8b0128b5ad4a4d73edf7d1b46",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 105,
"avg_line_length": 36.13084112149533,
"alnum_prop": 0.6259699948266942,
"repo_name": "sjzabel/snail",
"id": "2468f35077c8a7ac5c1f610aa5cb8b4770554b23",
"size": "3866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snail/file.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21496"
}
],
"symlink_target": ""
}
|
from hwt.interfaces.utils import addClkRstn
from hwt.synthesizer.param import Param
from hwtHls.hlsStreamProc.streamProc import HlsStreamProc
from hwtLib.examples.statements.ifStm import SimpleIfStatement
class SimpleIfStatementHls(SimpleIfStatement):
def _config(self):
self.CLK_FREQ = Param(int(100e6))
def _declr(self):
addClkRstn(self)
self.clk.FREQ = self.CLK_FREQ
super(SimpleIfStatementHls, self)._declr()
def _impl(self):
hls = HlsStreamProc(self)
r = hls.read
a = r(self.a)
b = r(self.b)
c = r(self.c)
tmp = hls.var("tmp", self.d._dtype)
hls.thread(
hls.While(True,
a, b, c,
hls.If(a,
tmp(b),
).Elif(b,
tmp(c),
).Else(
tmp(c)
),
hls.write(tmp, self.d)
)
)
hls.compile()
if __name__ == "__main__":
from hwtHls.platform.virtual import VirtualHlsPlatform, makeDebugPasses
from hwt.synthesizer.utils import to_rtl_str
u = SimpleIfStatementHls()
p = VirtualHlsPlatform(**makeDebugPasses("tmp"))
print(to_rtl_str(u, target_platform=p))
|
{
"content_hash": "3c25199fc51fcfe99f2656db43a6dddf",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 26.893617021276597,
"alnum_prop": 0.5522151898734177,
"repo_name": "Nic30/hwtHls",
"id": "90e5fde708adbeb6098de2b2e0bead2f95875b6a",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/syntaxElements/ifstm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1001"
},
{
"name": "C++",
"bytes": "131805"
},
{
"name": "Dockerfile",
"bytes": "1731"
},
{
"name": "LLVM",
"bytes": "74517"
},
{
"name": "Meson",
"bytes": "2683"
},
{
"name": "Python",
"bytes": "739246"
}
],
"symlink_target": ""
}
|
import itertools
import random
import string
import uuid
from debtcollector import removals
import netaddr
from oslo_utils import netutils
from oslo_utils import uuidutils
import six.moves
def rand_uuid():
"""Generate a random UUID string
:return: a random UUID (e.g. '1dc12c7d-60eb-4b61-a7a2-17cf210155b6')
:rtype: string
"""
return uuidutils.generate_uuid()
def rand_uuid_hex():
"""Generate a random UUID hex string
:return: a random UUID (e.g. '0b98cf96d90447bda4b46f31aeb1508c')
:rtype: string
"""
return uuid.uuid4().hex
def rand_name(name='', prefix='tempest'):
"""Generate a random name that includes a random number
:param str name: The name that you want to include
:param str prefix: The prefix that you want to include
:return: a random name. The format is
'<prefix>-<name>-<random number>'.
(e.g. 'prefixfoo-namebar-154876201')
:rtype: string
"""
randbits = str(random.randint(1, 0x7fffffff))
rand_name = randbits
if name:
rand_name = name + '-' + rand_name
if prefix:
rand_name = prefix + '-' + rand_name
return rand_name
def rand_password(length=15):
"""Generate a random password
:param int length: The length of password that you expect to set
(If it's smaller than 3, it's same as 3.)
:return: a random password. The format is
'<random upper letter>-<random number>-<random special character>
-<random ascii letters or digit characters or special symbols>'
(e.g. 'G2*ac8&lKFFgh%2')
:rtype: string
"""
upper = random.choice(string.ascii_uppercase)
ascii_char = string.ascii_letters
digits = string.digits
digit = random.choice(string.digits)
puncs = '~!@#%^&*_=+'
punc = random.choice(puncs)
seed = ascii_char + digits + puncs
pre = upper + digit + punc
password = pre + ''.join(random.choice(seed) for x in range(length - 3))
return password
def rand_url():
"""Generate a random url that includes a random number
:return: a random url. The format is 'https://url-<random number>.com'.
(e.g. 'https://url-154876201.com')
:rtype: string
"""
randbits = str(random.randint(1, 0x7fffffff))
return 'https://url-' + randbits + '.com'
def rand_int_id(start=0, end=0x7fffffff):
"""Generate a random integer value
:param int start: The value that you expect to start here
:param int end: The value that you expect to end here
:return: a random integer value
:rtype: int
"""
return random.randint(start, end)
def rand_mac_address():
"""Generate an Ethernet MAC address
:return: an random Ethernet MAC address
:rtype: string
"""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
mac = [0xfa, 0x16, 0x3e,
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(["%02x" % x for x in mac])
def rand_infiniband_guid_address():
"""Generate an Infiniband GUID address
:return: an random Infiniband GUID address
:rtype: string
"""
guid = []
for i in range(8):
guid.append("%02x" % random.randint(0x00, 0xff))
return ':'.join(guid)
def parse_image_id(image_ref):
"""Return the image id from a given image ref
This function just returns the last word of the given image ref string
splitting with '/'.
:param str image_ref: a string that includes the image id
:return: the image id string
:rtype: string
"""
return image_ref.rsplit('/')[-1]
def arbitrary_string(size=4, base_text=None):
"""Return size characters from base_text
This generates a string with an arbitrary number of characters, generated
by looping the base_text string. If the size is smaller than the size of
base_text, returning string is shrunk to the size.
:param int size: a returning characters size
:param str base_text: a string you want to repeat
:return: size string
:rtype: string
"""
if not base_text:
base_text = 'test'
return ''.join(itertools.islice(itertools.cycle(base_text), size))
def random_bytes(size=1024):
"""Return size randomly selected bytes as a string
:param int size: a returning bytes size
:return: size randomly bytes
:rtype: string
"""
return b''.join([six.int2byte(random.randint(0, 255))
for i in range(size)])
@removals.remove(
message="use get_ipv6_addr_by_EUI64 from oslo_utils.netutils",
version="Newton",
removal_version="Ocata")
def get_ipv6_addr_by_EUI64(cidr, mac):
"""Generate a IPv6 addr by EUI-64 with CIDR and MAC
:param str cidr: a IPv6 CIDR
:param str mac: a MAC address
:return: an IPv6 Address
:rtype: netaddr.IPAddress
"""
# Check if the prefix is IPv4 address
is_ipv4 = netutils.is_valid_ipv4(cidr)
if is_ipv4:
msg = "Unable to generate IP address by EUI64 for IPv4 prefix"
raise TypeError(msg)
try:
eui64 = int(netaddr.EUI(mac).eui64())
prefix = netaddr.IPNetwork(cidr)
return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
except (ValueError, netaddr.AddrFormatError):
raise TypeError('Bad prefix or mac format for generating IPv6 '
'address by EUI-64: %(prefix)s, %(mac)s:'
% {'prefix': cidr, 'mac': mac})
except TypeError:
raise TypeError('Bad prefix type for generate IPv6 address by '
'EUI-64: %s' % cidr)
# Courtesy of http://stackoverflow.com/a/312464
def chunkify(sequence, chunksize):
"""Yield successive chunks from `sequence`."""
for i in range(0, len(sequence), chunksize):
yield sequence[i:i + chunksize]
|
{
"content_hash": "a15c9540ce974828bed4c175655635ab",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 78,
"avg_line_length": 31.185929648241206,
"alnum_prop": 0.6342249436029649,
"repo_name": "vedujoshi/tempest",
"id": "a0941ef50b09db16f06c56b5f2ead4c0ada2ed60",
"size": "6842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/lib/common/utils/data_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4036844"
},
{
"name": "Shell",
"bytes": "11449"
}
],
"symlink_target": ""
}
|
from sklearn.svm import LinearSVC, SVC
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
from sklearn import feature_selection
import warnings
import json
import os
from sklearn.externals import joblib
import pickle
import numpy as np
# My helpers
from preprocess import extractFeatures
from preprocess import sampleSet
def readPredMeta(sensor):
path = predictionDir(sensor) + 'meta.json'
meta = json.loads(open(path,'r').read())
return meta
def generatePredictionData(meta,dataDir):
allFeatures = []
allLabels = []
for sample in meta:
# Extract features and a label
rawData = np.genfromtxt(dataDir + sample['filename'], delimiter=',')
features = extractFeatures(rawData)
# Stack features and labels
if allFeatures == []:
allFeatures = features
else:
allFeatures = np.vstack((allFeatures,features))
return allFeatures
def saveObject(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def modelDir(sensor):
return 'sensors/' + sensor + '/model/'
def dataDir(sensor):
return 'sensors/' + sensor + '/samples/'
def predictionDir(sensor):
return 'sensors/' + sensor + '/predictions/'
# Read a meta file
def readMeta(sensor):
path = dataDir(sensor) + 'meta.json'
meta = json.loads(open(path,'r').read())
return meta
# Generate a list of labels from meta.json
def generateLabels(meta):
labels = []
for sample in meta:
labels.append(sample['label'])
# Remove duplicate
output = []
for i in labels:
if not i in output:
output.append(i)
return output
def generateData(meta,labels,dataDir):
allFeatures = []
allLabels = []
for sample in meta:
# Extract features and a label
rawData = np.genfromtxt(dataDir + sample['filename'], delimiter=',')
features = extractFeatures(rawData)
indexedLabel = labels.index(sample['label'])
# Stack features and labels
if allFeatures == []:
allFeatures = features
else:
allFeatures = np.vstack((allFeatures,features))
allLabels.append(indexedLabel)
data = sampleSet(allFeatures,allLabels)
return data
# Train a classifer for a given sensor and save it
def trainClassifier(sensor):
# Load meta informaton about samplings
meta = readMeta(sensor)
# Geenrate a trainig set
labels = generateLabels(meta)
data = generateData(meta, labels, dataDir(sensor))
# Prescaling
scaler = preprocessing.Scaler().fit(data.features)
scaledFeatures = scaler.transform(data.features)
# Feature selection
selector = feature_selection.SelectKBest(feature_selection.f_regression).fit(scaledFeatures, data.labels)
selectedFeatures = selector.transform(scaledFeatures)
# Train a classifier
clf = SVC(kernel='linear', C=1).fit(selectedFeatures, data.labels)
# Save to files
if not os.path.exists(modelDir(sensor)):
os.makedirs(modelDir(sensor))
joblib.dump(clf, modelDir(sensor) + 'model.pkl')
joblib.dump(scaler, modelDir(sensor) + 'scaler.pkl')
joblib.dump(selector, modelDir(sensor) + 'selector.pkl')
saveObject(labels, modelDir(sensor)+'labels.pkl')
def crossValidation(sensor):
meta = readMeta(sensor)
# Geenrate a trainig set
labels = generateLabels(meta)
data = generateData(meta, labels, dataDir(sensor))
# Prescaling
scaler = preprocessing.Scaler().fit(data.features)
scaledFeatures = scaler.transform(data.features)
# Feature selection
selector = feature_selection.SelectKBest(feature_selection.f_regression).fit(scaledFeatures, data.labels)
selectedFeatures = selector.transform(scaledFeatures)
# Train a classifier
clf = SVC(kernel='linear', C=1)
scores = cross_val_score(clf, selectedFeatures, data.labels, cv=5)
return scores
# Main function
if __name__ == "__main__":
warnings.filterwarnings('ignore')
sensor = 'knocking'
trainClassifier(sensor)
crossValidation(sensor)
print "Finished training"
|
{
"content_hash": "9f6706177cf86a9be9b588ee7ffdff3c",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 109,
"avg_line_length": 28.038961038961038,
"alnum_prop": 0.6850393700787402,
"repo_name": "IoT-Expedition/Edge-Analytics",
"id": "3eda3135cb7c7efc87b237f6c0e8da19d349dcd5",
"size": "4338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virtual_sensor/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "2301"
},
{
"name": "Python",
"bytes": "99164"
}
],
"symlink_target": ""
}
|
from setuptools import find_packages, setup
from channels_api import __version__
setup(
name='channels_api',
version=__version__,
url='https://github.com/linuxlewis/channels-api',
author='Sam Bolgert',
author_email='sbolgert@gmail.com',
description="Helps build a RESTful API on top of WebSockets using channels.",
long_description=open('README.rst').read(),
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Django>=1.8',
'channels<=1.1.8.1',
'djangorestframework>=3.0'
],
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
|
{
"content_hash": "f968d3984e27389b6b549464ab98702d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 81,
"avg_line_length": 30.96153846153846,
"alnum_prop": 0.6211180124223602,
"repo_name": "linuxlewis/channels-api",
"id": "4c7148edc36152fecf84b528fae7f5f3437f8b1f",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "694"
},
{
"name": "Python",
"bytes": "29661"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import distutils.util
try:
from importlib.machinery import EXTENSION_SUFFIXES
except ImportError: # pragma: no cover
import imp
EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
del imp
import logging
import os
import platform
import re
import struct
import sys
import sysconfig
import warnings
from ._typing import MYPY_CHECK_RUNNING, cast
if MYPY_CHECK_RUNNING: # pragma: no cover
from typing import (
Dict,
FrozenSet,
IO,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
)
PythonVersion = Sequence[int]
MacVersion = Tuple[int, int]
GlibcVersion = Tuple[int, int]
logger = logging.getLogger(__name__)
INTERPRETER_SHORT_NAMES = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
} # type: Dict[str, str]
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
class Tag(object):
__slots__ = ["_interpreter", "_abi", "_platform"]
def __init__(self, interpreter, abi, platform):
# type: (str, str, str) -> None
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
@property
def interpreter(self):
# type: () -> str
return self._interpreter
@property
def abi(self):
# type: () -> str
return self._abi
@property
def platform(self):
# type: () -> str
return self._platform
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, Tag):
return NotImplemented
return (
(self.platform == other.platform)
and (self.abi == other.abi)
and (self.interpreter == other.interpreter)
)
def __hash__(self):
# type: () -> int
return hash((self._interpreter, self._abi, self._platform))
def __str__(self):
# type: () -> str
return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
def __repr__(self):
# type: () -> str
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
def parse_tag(tag):
# type: (str) -> FrozenSet[Tag]
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _warn_keyword_parameter(func_name, kwargs):
# type: (str, Dict[str, bool]) -> bool
"""
Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only.
"""
if not kwargs:
return False
elif len(kwargs) > 1 or "warn" not in kwargs:
kwargs.pop("warn", None)
arg = next(iter(kwargs.keys()))
raise TypeError(
"{}() got an unexpected keyword argument {!r}".format(func_name, arg)
)
return kwargs["warn"]
def _get_config_var(name, warn=False):
# type: (str, bool) -> Union[int, str, None]
value = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
)
return value
def _normalize_string(string):
# type: (str) -> str
return string.replace(".", "_").replace("-", "_")
def _abi3_applies(python_version):
# type: (PythonVersion) -> bool
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
def _cpython_abis(py_version, warn=False):
# type: (PythonVersion, bool) -> List[str]
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = "{}{}".format(*py_version[:2])
debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append("cp{version}".format(version=version))
abis.insert(
0,
"cp{version}{debug}{pymalloc}{ucs4}".format(
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
),
)
return abis
def cpython_tags(
python_version=None, # type: Optional[PythonVersion]
abis=None, # type: Optional[Iterable[str]]
platforms=None, # type: Optional[Iterable[str]]
**kwargs # type: bool
):
# type: (...) -> Iterator[Tag]
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp<python_version>-<abi>-<platform>
- cp<python_version>-abi3-<platform>
- cp<python_version>-none-<platform>
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
warn = _warn_keyword_parameter("cpython_tags", kwargs)
if not python_version:
python_version = sys.version_info[:2]
if len(python_version) < 2:
interpreter = "cp{}".format(python_version[0])
else:
interpreter = "cp{}{}".format(*python_version[:2])
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or _platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
if _abi3_applies(python_version):
for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
yield tag
for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
yield tag
if _abi3_applies(python_version):
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{major}{minor}".format(
major=python_version[0], minor=minor_version
)
yield Tag(interpreter, "abi3", platform_)
def _generic_abi():
# type: () -> Iterator[str]
abi = sysconfig.get_config_var("SOABI")
if abi:
yield _normalize_string(abi)
def generic_tags(
interpreter=None, # type: Optional[str]
abis=None, # type: Optional[Iterable[str]]
platforms=None, # type: Optional[Iterable[str]]
**kwargs # type: bool
):
# type: (...) -> Iterator[Tag]
"""
Yields the tags for a generic interpreter.
The tags consist of:
- <interpreter>-<abi>-<platform>
The "none" ABI will be added if it was not explicitly provided.
"""
warn = _warn_keyword_parameter("generic_tags", kwargs)
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
platforms = list(platforms or _platform_tags())
abis = list(abis)
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def _py_interpreter_range(py_version):
# type: (PythonVersion) -> Iterator[str]
"""
Yields Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all previous versions of that major version.
"""
if len(py_version) > 1:
yield "py{major}{minor}".format(major=py_version[0], minor=py_version[1])
yield "py{major}".format(major=py_version[0])
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield "py{major}{minor}".format(major=py_version[0], minor=minor)
def compatible_tags(
python_version=None, # type: Optional[PythonVersion]
interpreter=None, # type: Optional[str]
platforms=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Iterator[Tag]
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or _platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
# type: (str, bool) -> str
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version, cpu_arch):
# type: (MacVersion, str) -> List[str]
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
formats.append("universal")
return formats
def mac_platforms(version=None, arch=None):
# type: (Optional[MacVersion], Optional[str]) -> Iterator[str]
"""
Yields the platform tags for a macOS system.
The `version` parameter is a two-item tuple specifying the macOS version to
generate platform tags for. The `arch` parameter is the CPU architecture to
generate platform tags for. Both parameters default to the appropriate value
for the current system.
"""
version_str, _, cpu_arch = platform.mac_ver() # type: ignore
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
for minor_version in range(version[1], -1, -1):
compat_version = version[0], minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
# From PEP 513.
def _is_manylinux_compatible(name, glibc_version):
# type: (str, GlibcVersion) -> bool
# Check for presence of _manylinux module.
try:
import _manylinux # noqa
return bool(getattr(_manylinux, name + "_compatible"))
except (ImportError, AttributeError):
# Fall through to heuristic check below.
pass
return _have_compatible_glibc(*glibc_version)
def _glibc_version_string():
# type: () -> Optional[str]
# Returns glibc version string, or None if not using glibc.
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _glibc_version_string_confstr():
# type: () -> Optional[str]
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
try:
# os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821
"CS_GNU_LIBC_VERSION"
)
assert version_string is not None
_, version = version_string.split() # type: Tuple[str, str]
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes():
# type: () -> Optional[str]
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# Note: typeshed is wrong here so we are ignoring this line.
process_namespace = ctypes.CDLL(None) # type: ignore
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version() # type: str
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
# Separated out from have_compatible_glibc for easier unit testing.
def _check_glibc_version(version_str, required_major, minimum_minor):
# type: (str, int, int) -> bool
# Parse string and check against requested version.
#
# We use a regexp instead of str.split because we want to discard any
# random junk that might come after the minor version -- this might happen
# in patched/forked versions of glibc (e.g. Linaro's version of glibc
# uses version strings like "2.20-2014.11"). See gh-3588.
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
"Expected glibc version with 2 components major.minor,"
" got: %s" % version_str,
RuntimeWarning,
)
return False
return (
int(m.group("major")) == required_major
and int(m.group("minor")) >= minimum_minor
)
def _have_compatible_glibc(required_major, minimum_minor):
# type: (int, int) -> bool
version_str = _glibc_version_string()
if version_str is None:
return False
return _check_glibc_version(version_str, required_major, minimum_minor)
# Python does not provide platform information at sufficient granularity to
# identify the architecture of the running executable in some cases, so we
# determine it dynamically by reading the information from the running
# process. This only applies on Linux, which uses the ELF format.
class _ELFFileHeader(object):
# https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
class _InvalidELFFileHeader(ValueError):
"""
An invalid ELF file header was found.
"""
ELF_MAGIC_NUMBER = 0x7F454C46
ELFCLASS32 = 1
ELFCLASS64 = 2
ELFDATA2LSB = 1
ELFDATA2MSB = 2
EM_386 = 3
EM_S390 = 22
EM_ARM = 40
EM_X86_64 = 62
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
def __init__(self, file):
# type: (IO[bytes]) -> None
def unpack(fmt):
# type: (str) -> int
try:
result, = struct.unpack(
fmt, file.read(struct.calcsize(fmt))
) # type: (int, )
except struct.error:
raise _ELFFileHeader._InvalidELFFileHeader()
return result
self.e_ident_magic = unpack(">I")
if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_class = unpack("B")
if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_data = unpack("B")
if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_version = unpack("B")
self.e_ident_osabi = unpack("B")
self.e_ident_abiversion = unpack("B")
self.e_ident_pad = file.read(7)
format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
self.e_type = unpack(format_h)
self.e_machine = unpack(format_h)
self.e_version = unpack(format_i)
self.e_entry = unpack(format_p)
self.e_phoff = unpack(format_p)
self.e_shoff = unpack(format_p)
self.e_flags = unpack(format_i)
self.e_ehsize = unpack(format_h)
self.e_phentsize = unpack(format_h)
self.e_phnum = unpack(format_h)
self.e_shentsize = unpack(format_h)
self.e_shnum = unpack(format_h)
self.e_shstrndx = unpack(format_h)
def _get_elf_header():
# type: () -> Optional[_ELFFileHeader]
try:
with open(sys.executable, "rb") as f:
elf_header = _ELFFileHeader(f)
except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
return None
return elf_header
def _is_linux_armhf():
# type: () -> bool
# hard-float ABI can be detected from the ELF header of the running
# process
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
elf_header = _get_elf_header()
if elf_header is None:
return False
result = elf_header.e_ident_class == elf_header.ELFCLASS32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
result &= elf_header.e_machine == elf_header.EM_ARM
result &= (
elf_header.e_flags & elf_header.EF_ARM_ABIMASK
) == elf_header.EF_ARM_ABI_VER5
result &= (
elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
) == elf_header.EF_ARM_ABI_FLOAT_HARD
return result
def _is_linux_i686():
# type: () -> bool
elf_header = _get_elf_header()
if elf_header is None:
return False
result = elf_header.e_ident_class == elf_header.ELFCLASS32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
result &= elf_header.e_machine == elf_header.EM_386
return result
def _have_compatible_manylinux_abi(arch):
# type: (str) -> bool
if arch == "armv7l":
return _is_linux_armhf()
if arch == "i686":
return _is_linux_i686()
return True
def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
# type: (bool) -> Iterator[str]
linux = _normalize_string(distutils.util.get_platform())
if linux == "linux_x86_64" and is_32bit:
linux = "linux_i686"
manylinux_support = []
_, arch = linux.split("_", 1)
if _have_compatible_manylinux_abi(arch):
if arch in {"x86_64", "i686", "aarch64", "armv7l", "ppc64", "ppc64le", "s390x"}:
manylinux_support.append(
("manylinux2014", (2, 17))
) # CentOS 7 w/ glibc 2.17 (PEP 599)
if arch in {"x86_64", "i686"}:
manylinux_support.append(
("manylinux2010", (2, 12))
) # CentOS 6 w/ glibc 2.12 (PEP 571)
manylinux_support.append(
("manylinux1", (2, 5))
) # CentOS 5 w/ glibc 2.5 (PEP 513)
manylinux_support_iter = iter(manylinux_support)
for name, glibc_version in manylinux_support_iter:
if _is_manylinux_compatible(name, glibc_version):
yield linux.replace("linux", name)
break
# Support for a later manylinux implies support for an earlier version.
for name, _ in manylinux_support_iter:
yield linux.replace("linux", name)
yield linux
def _generic_platforms():
# type: () -> Iterator[str]
yield _normalize_string(distutils.util.get_platform())
def _platform_tags():
# type: () -> Iterator[str]
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
return mac_platforms()
elif platform.system() == "Linux":
return _linux_platforms()
else:
return _generic_platforms()
def interpreter_name():
# type: () -> str
"""
Returns the name of the running interpreter.
"""
try:
name = sys.implementation.name # type: ignore
except AttributeError: # pragma: no cover
# Python 2.7 compatibility.
name = platform.python_implementation().lower()
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(**kwargs):
# type: (bool) -> str
"""
Returns the version of the running interpreter.
"""
warn = _warn_keyword_parameter("interpreter_version", kwargs)
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = "".join(map(str, sys.version_info[:2]))
return version
def sys_tags(**kwargs):
# type: (bool) -> Iterator[Tag]
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
warn = _warn_keyword_parameter("sys_tags", kwargs)
interp_name = interpreter_name()
if interp_name == "cp":
for tag in cpython_tags(warn=warn):
yield tag
else:
for tag in generic_tags():
yield tag
for tag in compatible_tags():
yield tag
|
{
"content_hash": "dc8f05d87ea838eedb3bba3f240cdd67",
"timestamp": "",
"source": "github",
"line_count": 726,
"max_line_length": 111,
"avg_line_length": 32.133608815426996,
"alnum_prop": 0.6074842470744567,
"repo_name": "kenxwagner/PythonPlay",
"id": "60a69d8f943667d32d9012e3d6219e445ffa6188",
"size": "23510",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Project/webscrap/websc/Lib/site-packages/pip/_vendor/packaging/tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1502"
},
{
"name": "PowerShell",
"bytes": "1755"
},
{
"name": "Python",
"bytes": "2564579"
},
{
"name": "Shell",
"bytes": "3113"
}
],
"symlink_target": ""
}
|
from typing import Optional
from src.settings import Colors
def league_color(league: str) -> Optional[Colors]:
if league in [
"1 HNL (Croatia)",
"A Grupa (Bulgaria)",
"A Lyga (Lithuania)",
"Adeccoligaen (Norway)",
"A-League (Australia)",
"Allsvenskan (Sweden)",
"Bundesliga (Austria)",
"Bundesliga 2 (Germany)",
"Cempionat (Belarus)",
"Challenge League (Switzerland)",
"Championship (Scotland)",
"Eerste Divisie (Netherlands)",
"Ekstraklasa (Poland)",
"Eredivisie (Netherlands)",
"Erste Liga (Austria)",
"First Division (Cyprus)",
"J-League (Japan)",
"J-League 2 (Japan)",
"Jupiler League (Belgium)",
"Kategoria Superiore (Albania)",
"K-League (South Korea)",
"League One (England)",
"Liga 1 (Romania)",
"Ligat ha'Al (Israel)",
"Ligue 1 (France)",
"Ligue 2 (France)",
"Meistriliiga (Estonia)",
"National (France)",
"OTP Bank Liga (Hungary)",
"Premier Division (Ireland)",
"Premier League (England)",
"Premier League (Wales)",
"Primeira Liga (Portugal)",
"Primera A (Colombia)",
"Primera Division (Chile)",
"Primera Division (Mexico)",
"Primera Division (Peru)",
"Proximus League (Belgium)",
"Prva Liga (Slovenia)",
"Segunda Division (Spain)",
"Segunda Liga (Portugal)",
"Serie A (Brazil)",
"Serie A (Italy)",
"Serie B (Brazil)",
"S-League (Singapore)",
"Soccer League (Canada)",
"Super Lig (Turkey)",
"Super Liga (Slovakia)",
"Superleague (Greece)",
"Superliga (Serbia)",
"Superligaen (Denmark)",
"Tippeligaen (Norway)",
"Viasat Sport Divisionen (Denmark)",
"Virsliga (Latvia)",
"Vushaya Liga (Russia)",
"Vyscha Liga (Ukraine)",
"Úrvalsdeild (Iceland)",
]:
return Colors.GREEN
if league in [
"1 CFL (Montenegro)",
"1. Division (Russia)",
"Bundesliga 1 (Germany)",
"Campeonato (Ecuador)",
"Championship (England)",
"Conference (England)",
"I Liga (Poland)",
"Premier League (Scotland)",
"Primera Division (Argentina)",
"Veikkausliiga (Finland)",
"Ykkonen (Finland)",
]:
return Colors.RED
if league in [
"Gambrinus League (Czech Republic)",
"League Two (England)",
"Major League Soccer (United States)",
"Primera Division (Spain)",
"Primera Division (Uruguay)",
"Serie B (Italy)",
"Super League (Switzerland)",
"Superettan (Sweden)",
"TFF Lig A (Turkey)",
]:
return Colors.YELLOW
|
{
"content_hash": "7a44e6c584977703dd6e4f91e5d7ae23",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 50,
"avg_line_length": 30.92391304347826,
"alnum_prop": 0.5332161687170475,
"repo_name": "vapkarian/soccer-analyzer",
"id": "eb60760ebe18e61c041cc3b30d0fdb3d28c1e4ae",
"size": "2846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/colors/v16/default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8543"
},
{
"name": "HTML",
"bytes": "182212"
},
{
"name": "Python",
"bytes": "432305"
},
{
"name": "Shell",
"bytes": "1712"
}
],
"symlink_target": ""
}
|
"""Tests for record_input_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RecordInputOpTest(test.TestCase):
def generateTestData(self, prefix, n, m,
compression_type=tf_record.TFRecordCompressionType.NONE):
options = tf_record.TFRecordOptions(compression_type)
for i in range(n):
f = os.path.join(self.get_temp_dir(), prefix + "." + str(i))
w = tf_record.TFRecordWriter(f, options=options)
for j in range(m):
w.write("{0:0{width}}".format(i * m + j, width=10).encode("utf-8"))
w.close()
def testRecordInputSimple(self):
with self.test_session() as sess:
self.generateTestData("basic", 1, 1)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input").get_yield_op()
self.assertEqual(sess.run(yield_op), b"0000000000")
def testRecordInputSimpleGzip(self):
with self.test_session() as sess:
self.generateTestData("basic", 1, 1,
compression_type=tf_record.TFRecordCompressionType.GZIP)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=
tf_record.TFRecordCompressionType.GZIP).get_yield_op()
self.assertEqual(sess.run(yield_op), b"0000000000")
def testRecordInputSimpleZlib(self):
with self.test_session() as sess:
self.generateTestData("basic", 1, 1,
compression_type=tf_record.TFRecordCompressionType.ZLIB)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=
tf_record.TFRecordCompressionType.ZLIB).get_yield_op()
self.assertEqual(sess.run(yield_op), b"0000000000")
def testRecordInputEpochs(self):
files = 100
records_per_file = 100
batches = 2
with self.test_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = sess.run(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
def testDoesNotDeadlock(self):
# Iterate multiple times to cause deadlock if there is a chance it can occur
for _ in range(30):
with self.test_session() as sess:
self.generateTestData("basic", 1, 1)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=100,
batch_size=1,
name="record_input")
yield_op = records.get_yield_op()
for _ in range(50):
sess.run(yield_op)
def testEmptyGlob(self):
with self.test_session() as sess:
record_input = data_flow_ops.RecordInput(file_pattern="foo")
yield_op = record_input.get_yield_op()
sess.run(variables.global_variables_initializer())
with self.assertRaises(NotFoundError):
sess.run(yield_op)
def testBufferTooSmall(self):
files = 10
records_per_file = 10
batches = 2
with self.test_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = sess.run(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
if __name__ == "__main__":
test.main()
|
{
"content_hash": "f5cd3902df85636565e01d1e6f89d074",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 80,
"avg_line_length": 32.462025316455694,
"alnum_prop": 0.6145447455644375,
"repo_name": "hsaputra/tensorflow",
"id": "0945ed24bf9ac36c508d5da5f66bcc1a3e034083",
"size": "5818",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/record_input_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8913"
},
{
"name": "C",
"bytes": "320101"
},
{
"name": "C++",
"bytes": "35737831"
},
{
"name": "CMake",
"bytes": "188490"
},
{
"name": "Go",
"bytes": "1055853"
},
{
"name": "Java",
"bytes": "541818"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "44805"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94716"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "31102235"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "402584"
}
],
"symlink_target": ""
}
|
from functools import wraps
from flask import Blueprint, jsonify, session, request, make_response
from app.views.api import api
from werkzeug.exceptions import BadRequest
@api.route('/')
def api_default():
return jsonify({
"Response": "No method requested"
})
# http://flask.pocoo.org/docs/0.10/patterns/viewdecorators/
def expect_json_body(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
kwargs['body'] = request.get_json(force=True)
if kwargs['body']:
return f(*args, **kwargs)
else:
return make_response(jsonify({
"error": {
"msg": "Could not parse JSON. Is it well-formed?"
}
}), 400)
except BadRequest as e:
return make_response(jsonify({
"error": {
"msg": "Could not parse JSON. Is it well-formed?",
"type": str(e)
}
}), 400)
return decorated_function
|
{
"content_hash": "03762a2f21107001c38d34ebb527546e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 30.4,
"alnum_prop": 0.5216165413533834,
"repo_name": "krrg/gnomon",
"id": "c8c5da3fb44dec007962799b87883aad91484ded",
"size": "1064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views/api/apiwrappers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3175"
},
{
"name": "JavaScript",
"bytes": "17605"
},
{
"name": "Python",
"bytes": "44037"
}
],
"symlink_target": ""
}
|
"""Tests for the gridfs package.
"""
import datetime
import threading
import time
import gridfs
from bson.binary import Binary
from bson.py3compat import StringIO, string_type
from gridfs.errors import NoFile, CorruptGridFile
from pymongo.errors import (ConfigurationError,
ConnectionFailure,
ServerSelectionTimeoutError,
OperationFailure)
from pymongo.mongo_client import MongoClient
from pymongo.read_preferences import ReadPreference
from test import (client_context,
IntegrationTest)
from test.test_replica_set_client import TestReplicaSetClientBase
from test.utils import (joinall,
single_client,
one,
rs_client,
rs_or_single_client,
rs_or_single_client_noauth,
remove_all_users)
class JustWrite(threading.Thread):
def __init__(self, gfs, num):
threading.Thread.__init__(self)
self.gfs = gfs
self.num = num
self.setDaemon(True)
def run(self):
for _ in range(self.num):
file = self.gfs.open_upload_stream("test")
file.write(b"hello")
file.close()
class JustRead(threading.Thread):
def __init__(self, gfs, num, results):
threading.Thread.__init__(self)
self.gfs = gfs
self.num = num
self.results = results
self.setDaemon(True)
def run(self):
for _ in range(self.num):
file = self.gfs.open_download_stream_by_name("test")
data = file.read()
self.results.append(data)
assert data == b"hello"
class TestGridfs(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestGridfs, cls).setUpClass()
cls.fs = gridfs.GridFSBucket(cls.db)
cls.alt = gridfs.GridFSBucket(
cls.db, bucket_name="alt")
def setUp(self):
self.db.drop_collection("fs.files")
self.db.drop_collection("fs.chunks")
self.db.drop_collection("alt.files")
self.db.drop_collection("alt.chunks")
def test_basic(self):
oid = self.fs.upload_from_stream("test_filename",
b"hello world")
self.assertEqual(b"hello world",
self.fs.open_download_stream(oid).read())
self.assertEqual(1, self.db.fs.files.count())
self.assertEqual(1, self.db.fs.chunks.count())
self.fs.delete(oid)
self.assertRaises(NoFile, self.fs.open_download_stream, oid)
self.assertEqual(0, self.db.fs.files.count())
self.assertEqual(0, self.db.fs.chunks.count())
def test_multi_chunk_delete(self):
self.db.fs.drop()
self.assertEqual(0, self.db.fs.files.count())
self.assertEqual(0, self.db.fs.chunks.count())
gfs = gridfs.GridFSBucket(self.db)
oid = gfs.upload_from_stream("test_filename",
b"hello",
chunk_size_bytes=1)
self.assertEqual(1, self.db.fs.files.count())
self.assertEqual(5, self.db.fs.chunks.count())
gfs.delete(oid)
self.assertEqual(0, self.db.fs.files.count())
self.assertEqual(0, self.db.fs.chunks.count())
def test_empty_file(self):
oid = self.fs.upload_from_stream("test_filename",
b"")
self.assertEqual(b"", self.fs.open_download_stream(oid).read())
self.assertEqual(1, self.db.fs.files.count())
self.assertEqual(0, self.db.fs.chunks.count())
raw = self.db.fs.files.find_one()
self.assertEqual(0, raw["length"])
self.assertEqual(oid, raw["_id"])
self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime))
self.assertEqual(255 * 1024, raw["chunkSize"])
self.assertTrue(isinstance(raw["md5"], string_type))
def test_corrupt_chunk(self):
files_id = self.fs.upload_from_stream("test_filename",
b'foobar')
self.db.fs.chunks.update_one({'files_id': files_id},
{'$set': {'data': Binary(b'foo', 0)}})
try:
out = self.fs.open_download_stream(files_id)
self.assertRaises(CorruptGridFile, out.read)
out = self.fs.open_download_stream(files_id)
self.assertRaises(CorruptGridFile, out.readline)
finally:
self.fs.delete(files_id)
def test_upload_ensures_index(self):
# setUp has dropped collections.
names = self.db.collection_names()
self.assertFalse([name for name in names if name.startswith('fs')])
chunks = self.db.fs.chunks
files = self.db.fs.files
self.fs.upload_from_stream("filename", b"junk")
self.assertTrue(any(
info.get('key') == [('files_id', 1), ('n', 1)]
for info in chunks.index_information().values()))
self.assertTrue(any(
info.get('key') == [('filename', 1), ('uploadDate', 1)]
for info in files.index_information().values()))
def test_alt_collection(self):
oid = self.alt.upload_from_stream("test_filename",
b"hello world")
self.assertEqual(b"hello world",
self.alt.open_download_stream(oid).read())
self.assertEqual(1, self.db.alt.files.count())
self.assertEqual(1, self.db.alt.chunks.count())
self.alt.delete(oid)
self.assertRaises(NoFile, self.alt.open_download_stream, oid)
self.assertEqual(0, self.db.alt.files.count())
self.assertEqual(0, self.db.alt.chunks.count())
self.assertRaises(NoFile, self.alt.open_download_stream, "foo")
self.alt.upload_from_stream("foo",
b"hello world")
self.assertEqual(b"hello world",
self.alt.open_download_stream_by_name("foo").read())
self.alt.upload_from_stream("mike", b"")
self.alt.upload_from_stream("test", b"foo")
self.alt.upload_from_stream("hello world", b"")
self.assertEqual(set(["mike", "test", "hello world", "foo"]),
set(k["filename"] for k in list(
self.db.alt.files.find())))
def test_threaded_reads(self):
self.fs.upload_from_stream("test", b"hello")
threads = []
results = []
for i in range(10):
threads.append(JustRead(self.fs, 10, results))
threads[i].start()
joinall(threads)
self.assertEqual(
100 * [b'hello'],
results
)
def test_threaded_writes(self):
threads = []
for i in range(10):
threads.append(JustWrite(self.fs, 10))
threads[i].start()
joinall(threads)
fstr = self.fs.open_download_stream_by_name("test")
self.assertEqual(fstr.read(), b"hello")
# Should have created 100 versions of 'test' file
self.assertEqual(
100,
self.db.fs.files.find({'filename': 'test'}).count()
)
def test_get_last_version(self):
one = self.fs.upload_from_stream("test", b"foo")
time.sleep(0.01)
two = self.fs.open_upload_stream("test")
two.write(b"bar")
two.close()
time.sleep(0.01)
two = two._id
three = self.fs.upload_from_stream("test", b"baz")
self.assertEqual(b"baz",
self.fs.open_download_stream_by_name("test").read())
self.fs.delete(three)
self.assertEqual(b"bar",
self.fs.open_download_stream_by_name("test").read())
self.fs.delete(two)
self.assertEqual(b"foo",
self.fs.open_download_stream_by_name("test").read())
self.fs.delete(one)
self.assertRaises(NoFile,
self.fs.open_download_stream_by_name, "test")
def test_get_version(self):
self.fs.upload_from_stream("test", b"foo")
time.sleep(0.01)
self.fs.upload_from_stream("test", b"bar")
time.sleep(0.01)
self.fs.upload_from_stream("test", b"baz")
time.sleep(0.01)
self.assertEqual(b"foo", self.fs.open_download_stream_by_name(
"test", revision=0).read())
self.assertEqual(b"bar", self.fs.open_download_stream_by_name(
"test", revision=1).read())
self.assertEqual(b"baz", self.fs.open_download_stream_by_name(
"test", revision=2).read())
self.assertEqual(b"baz", self.fs.open_download_stream_by_name(
"test", revision=-1).read())
self.assertEqual(b"bar", self.fs.open_download_stream_by_name(
"test", revision=-2).read())
self.assertEqual(b"foo", self.fs.open_download_stream_by_name(
"test", revision=-3).read())
self.assertRaises(NoFile, self.fs.open_download_stream_by_name,
"test", revision=3)
self.assertRaises(NoFile, self.fs.open_download_stream_by_name,
"test", revision=-4)
def test_upload_from_stream_filelike(self):
oid = self.fs.upload_from_stream("test_file",
StringIO(b"hello world"),
chunk_size_bytes=1)
self.assertEqual(11, self.db.fs.chunks.count())
self.assertEqual(b"hello world",
self.fs.open_download_stream(oid).read())
def test_missing_length_iter(self):
# Test fix that guards against PHP-237
self.fs.upload_from_stream("empty", b"")
doc = self.db.fs.files.find_one({"filename": "empty"})
doc.pop("length")
self.db.fs.files.replace_one({"_id": doc["_id"]}, doc)
fstr = self.fs.open_download_stream_by_name("empty")
def iterate_file(grid_file):
for _ in grid_file:
pass
return True
self.assertTrue(iterate_file(fstr))
def test_gridfs_lazy_connect(self):
client = MongoClient('badhost', connect=False,
serverSelectionTimeoutMS=0)
cdb = client.db
gfs = gridfs.GridFSBucket(cdb)
self.assertRaises(ServerSelectionTimeoutError, gfs.delete, 0)
gfs = gridfs.GridFSBucket(cdb)
self.assertRaises(
ServerSelectionTimeoutError,
gfs.upload_from_stream, "test", b"") # Still no connection.
def test_gridfs_find(self):
self.fs.upload_from_stream("two", b"test2")
time.sleep(0.01)
self.fs.upload_from_stream("two", b"test2+")
time.sleep(0.01)
self.fs.upload_from_stream("one", b"test1")
time.sleep(0.01)
self.fs.upload_from_stream("two", b"test2++")
self.assertEqual(3, self.fs.find({"filename": "two"}).count())
self.assertEqual(4, self.fs.find({}).count())
cursor = self.fs.find(
{}, no_cursor_timeout=False, sort=[("uploadDate", -1)],
skip=1, limit=2)
gout = next(cursor)
self.assertEqual(b"test1", gout.read())
cursor.rewind()
gout = next(cursor)
self.assertEqual(b"test1", gout.read())
gout = next(cursor)
self.assertEqual(b"test2+", gout.read())
self.assertRaises(StopIteration, cursor.__next__)
cursor.close()
self.assertRaises(TypeError, self.fs.find, {}, {"_id": True})
def test_grid_in_non_int_chunksize(self):
# Lua, and perhaps other buggy GridFS clients, store size as a float.
data = b'data'
self.fs.upload_from_stream('f', data)
self.db.fs.files.update_one({'filename': 'f'},
{'$set': {'chunkSize': 100.0}})
self.assertEqual(data,
self.fs.open_download_stream_by_name('f').read())
def test_unacknowledged(self):
# w=0 is prohibited.
with self.assertRaises(ConfigurationError):
gridfs.GridFSBucket(rs_or_single_client(w=0).pymongo_test)
def test_rename(self):
_id = self.fs.upload_from_stream("first_name", b'testing')
self.assertEqual(b'testing', self.fs.open_download_stream_by_name(
"first_name").read())
self.fs.rename(_id, "second_name")
self.assertRaises(NoFile, self.fs.open_download_stream_by_name,
"first_name")
self.assertEqual(b"testing", self.fs.open_download_stream_by_name(
"second_name").read())
def test_abort(self):
gin = self.fs.open_upload_stream("test_filename",
chunk_size_bytes=5)
gin.write(b"test1")
gin.write(b"test2")
gin.write(b"test3")
self.assertEqual(3, self.db.fs.chunks.count(
{"files_id": gin._id}))
gin.abort()
self.assertTrue(gin.closed)
self.assertRaises(ValueError, gin.write, b"test4")
self.assertEqual(0, self.db.fs.chunks.count(
{"files_id": gin._id}))
class TestGridfsBucketReplicaSet(TestReplicaSetClientBase):
def test_gridfs_replica_set(self):
rsc = rs_client(
w=self.w, wtimeout=5000,
read_preference=ReadPreference.SECONDARY)
gfs = gridfs.GridFSBucket(rsc.pymongo_test)
oid = gfs.upload_from_stream("test_filename", b'foo')
content = gfs.open_download_stream(oid).read()
self.assertEqual(b'foo', content)
def test_gridfs_secondary(self):
primary_host, primary_port = self.primary
primary_connection = single_client(primary_host, primary_port)
secondary_host, secondary_port = one(self.secondaries)
secondary_connection = single_client(
secondary_host, secondary_port,
read_preference=ReadPreference.SECONDARY)
primary_connection.pymongo_test.drop_collection("fs.files")
primary_connection.pymongo_test.drop_collection("fs.chunks")
# Should detect it's connected to secondary and not attempt to
# create index
gfs = gridfs.GridFSBucket(secondary_connection.pymongo_test)
# This won't detect secondary, raises error
self.assertRaises(ConnectionFailure, gfs.upload_from_stream,
"test_filename", b'foo')
def test_gridfs_secondary_lazy(self):
# Should detect it's connected to secondary and not attempt to
# create index.
secondary_host, secondary_port = one(self.secondaries)
client = single_client(
secondary_host,
secondary_port,
read_preference=ReadPreference.SECONDARY,
connect=False)
# Still no connection.
gfs = gridfs.GridFSBucket(client.test_gridfs_secondary_lazy)
# Connects, doesn't create index.
self.assertRaises(NoFile, gfs.open_download_stream_by_name,
"test_filename")
self.assertRaises(ConnectionFailure, gfs.upload_from_stream,
"test_filename", b'data')
def tearDown(self):
rsc = client_context.rs_client
rsc.pymongo_test.drop_collection('fs.files')
rsc.pymongo_test.drop_collection('fs.chunks')
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "cb0fe4a7c15c4cbcabdaa2cc7c1dbfea",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 77,
"avg_line_length": 37.432367149758456,
"alnum_prop": 0.5702394011744208,
"repo_name": "develf/mongo-python-driver",
"id": "42faa1aacee18b64b9f57db272d002a3e7efd8fc",
"size": "16097",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "test/test_gridfs_bucket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "172869"
},
{
"name": "Python",
"bytes": "1497688"
}
],
"symlink_target": ""
}
|
import sys, os
# At the top.
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'vgl'
copyright = u'2014, vgl'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Activate the theme.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "VGL",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
("GitHub", "https://github.com/OpenGeoscience/vgl", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "spacelab",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'vgldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'vgl.tex', u'vgl Documentation',
u'vgl', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'vgl', u'vgl Documentation',
[u'vgl'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'vgl', u'vgl Documentation',
u'vgl', 'vgl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "62f20c6ba802eccf8fd581ae805c9932",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 80,
"avg_line_length": 32.06802721088435,
"alnum_prop": 0.6813746287653797,
"repo_name": "OpenGeoscience/vgl",
"id": "4f8e45a98fe53afb5f5029fd6e8d9cf7d20e0a2d",
"size": "9842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf_jsdoc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "10517"
},
{
"name": "CSS",
"bytes": "18378"
},
{
"name": "GLSL",
"bytes": "4194"
},
{
"name": "HTML",
"bytes": "15670"
},
{
"name": "JavaScript",
"bytes": "881753"
},
{
"name": "Python",
"bytes": "16143"
},
{
"name": "Shell",
"bytes": "17679"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import pandas as pd
import argparse
from os import path
parser = argparse.ArgumentParser(description='Plot data from output of the black hole simulation.')
parser.add_argument('--index', type=int, default=1,
help='Index to plot (default: %(default)s)')
parser.add_argument('--map-dir', type=str, default='maps', dest='map_dir',
help='Directory containing paths (default: %(default)s)')
args = parser.parse_args()
if __name__ == '__main__':
filename = path.join(args.map_dir, 'map_{:0>5}.dat'.format(args.index))
with open(filename, 'r') as f:
f.readline()
nb_T = int(f.readline().split()[0])
f.readline()
nb_S = int(f.readline().split()[0])
f.readline()
T = [float(i) for i in f.readline().split()]
f.readline()
Sigma = [float(i) for i in f.readline().split()]
lines = ''.join(f.readlines())
# Read T, Sigma, Q and tau (one liner yeah!)
Q, tau = [pd.DataFrame([fline.split() for fline in l.split('\n')[1:-1]],
dtype=float)
for l in lines.split('#')][1:]
extents = Sigma[0], Sigma[1], T[0], T[1]
X = np.logspace(np.log10(Sigma[0]), np.log10(Sigma[1]), nb_S)
Y = np.logspace(np.log10(T[1]), np.log10(T[0]), nb_T)
#fig, ax = plt.subplots()
for element in ({'var': 'Q', 'data': Q, 'title': u"Chauffage spécifique $Q^+ - Q^-$ (erg·g$^{-1}$·s$^{-1}$)", 'threshold': 1e15, 'ticks': [-1e20,0,1e20]},
{'var': 'tau', 'data': tau, 'title': u"Opacité $\\tau$", 'threshold': 1e-5, 'ticks': [0.06, 1, 10]}):
plt.figure()
plt.title(element['title'])
plt.xscale('log')
plt.yscale('log')
CSQ = plt.contour(X, Y, Q, origin='image', levels=[0], colors=('w'), extent=extents)
plt.clabel(CSQ, inline=1, fontsize=10)
CStau = plt.contour(X, Y, tau, origin='image', levels=[0.06,1, 10], colors=('r','m','k'), extent=extents)
plt.clabel(CStau, inline=1, fontsize=10)
plt.imshow(element['data'], interpolation='none', cmap='viridis', extent=extents, aspect='equal',
norm=matplotlib.colors.SymLogNorm(element['threshold']))
plt.colorbar(ticks=element['ticks'],shrink=1)
plt.xlabel(u"Densité surfacique $\Sigma$ (g·cm$^{-2}$)")
plt.ylabel(u"Température $T$ (K)")
plt.tight_layout()
plt.savefig('maps/'+element['var']+'_map.pdf', transparent=True, dpi=300, bbox_inches='tight', pad_inches=0)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
|
{
"content_hash": "71381c45fa35aa800068583f324a64ff",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 158,
"avg_line_length": 44.49180327868852,
"alnum_prop": 0.5700073691967575,
"repo_name": "M2-AAIS/BAD",
"id": "a7e0ffc427e1c7deaceaeb5e7de0d1ca22c4df3c",
"size": "2768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot_map.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "FORTRAN",
"bytes": "69857"
},
{
"name": "Makefile",
"bytes": "1524"
},
{
"name": "Python",
"bytes": "22230"
}
],
"symlink_target": ""
}
|
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdip...@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Attention: Requires Chrome or Safari. For IE of Firefox you need https://github.com/gimite/web-socket-js
1) install tornado (requires Tornado 3.0 or later)
easy_install tornado
2) start this app:
python gluon/contrib/websocket_messaging.py -k mykey -p 8888
3) from any web2py app you can post messages with
from gluon.contrib.websocket_messaging import websocket_send
websocket_send('http://127.0.0.1:8888', 'Hello World', 'mykey', 'mygroup')
4) from any template you can receive them with
<script>
$(document).ready(function(){
if(!$.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', function(e){alert(e.data)}))
alert("html5 websocket not supported by your browser, try Google Chrome");
});
</script>
When the server posts a message, all clients connected to the page will popup an alert message
Or if you want to send json messages and store evaluated json in a var called data:
<script>
$(document).ready(function(){
var data;
$.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', function(e){data=eval('('+e.data+')')});
});
</script>
- All communications between web2py and websocket_messaging will be digitally signed with hmac.
- All validation is handled on the web2py side and there is no need to modify websocket_messaging.py
- Multiple web2py instances can talk with one or more websocket_messaging servers.
- "ws://127.0.0.1:8888/realtime/" must be contain the IP of the websocket_messaging server.
- Via group='mygroup' name you can support multiple groups of clients (think of many chat-rooms)
Here is a complete sample web2py action:
def index():
form=LOAD('default', 'ajax_form', ajax=True)
script=SCRIPT('''
jQuery(document).ready(function(){
var callback=function(e){alert(e.data)};
if(!$.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', callback))
alert("html5 websocket not supported by your browser, try Google Chrome");
});
''')
return dict(form=form, script=script)
def ajax_form():
form=SQLFORM.factory(Field('message'))
if form.accepts(request,session):
from gluon.contrib.websocket_messaging import websocket_send
websocket_send(
'http://127.0.0.1:8888', form.vars.message, 'mykey', 'mygroup')
return form
https is possible too using 'https://127.0.0.1:8888' instead of 'http://127.0.0.1:8888', but need to
be started with
python gluon/contrib/websocket_messaging.py -k mykey -p 8888 -s keyfile.pem -c certfile.pem
for secure websocket do:
web2py_websocket('wss://127.0.0.1:8888/realtime/mygroup',callback)
Acknowledgements:
Tornado code inspired by http://thomas.pelletier.im/2010/08/websocket-tornado-redis/
"""
from __future__ import print_function
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import hmac
import sys
import optparse
import time
import sys
import gluon.utils
from gluon._compat import to_native, to_bytes, urlencode, urlopen
listeners, names, tokens = {}, {}, {}
def websocket_send(url, message, hmac_key=None, group='default'):
sig = hmac_key and hmac.new(to_bytes(hmac_key), to_bytes(message)).hexdigest() or ''
params = urlencode(
{'message': message, 'signature': sig, 'group': group})
f = urlopen(url, to_bytes(params))
data = f.read()
f.close()
return data
class PostHandler(tornado.web.RequestHandler):
"""
only authorized parties can post messages
"""
def post(self):
if hmac_key and not 'signature' in self.request.arguments:
self.send_error(401)
if 'message' in self.request.arguments:
message = self.request.arguments['message'][0].decode(encoding='UTF-8')
group = self.request.arguments.get('group', ['default'])[0].decode(encoding='UTF-8')
print('%s:MESSAGE to %s:%s' % (time.time(), group, message))
if hmac_key:
signature = self.request.arguments['signature'][0]
actual_signature = hmac.new(to_bytes(hmac_key), to_bytes(message)).hexdigest()
if not gluon.utils.compare(to_native(signature), actual_signature):
self.send_error(401)
for client in listeners.get(group, []):
client.write_message(message)
class TokenHandler(tornado.web.RequestHandler):
"""
if running with -t post a token to allow a client to join using the token
the message here is the token (any uuid)
allows only authorized parties to joins, for example, a chat
"""
def post(self):
if hmac_key and not 'message' in self.request.arguments:
self.send_error(401)
if 'message' in self.request.arguments:
message = self.request.arguments['message'][0]
if hmac_key:
signature = self.request.arguments['signature'][0]
actual_signature = hmac.new(to_bytes(hmac_key), to_bytes(message)).hexdigest()
if not gluon.utils.compare(to_native(signature), actual_signature):
self.send_error(401)
tokens[message] = None
class DistributeHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self, params):
group, token, name = params.split('/') + [None, None]
self.group = group or 'default'
self.token = token or 'none'
self.name = name or 'anonymous'
# only authorized parties can join
if DistributeHandler.tokens:
if not self.token in tokens or not token[self.token] is None:
self.close()
else:
tokens[self.token] = self
if not self.group in listeners:
listeners[self.group] = []
# notify clients that a member has joined the groups
for client in listeners.get(self.group, []):
client.write_message('+' + self.name)
listeners[self.group].append(self)
names[self] = self.name
print('%s:CONNECT to %s' % (time.time(), self.group))
def on_message(self, message):
pass
def on_close(self):
if self.group in listeners:
listeners[self.group].remove(self)
del names[self]
# notify clients that a member has left the groups
for client in listeners.get(self.group, []):
client.write_message('-' + self.name)
print('%s:DISCONNECT from %s' % (time.time(), self.group))
# if your webserver is different from tornado server uncomment this
# or override using something more restrictive:
# http://tornado.readthedocs.org/en/latest/websocket.html#tornado.websocket.WebSocketHandler.check_origin
# def check_origin(self, origin):
# return True
if __name__ == "__main__":
usage = __doc__
version = ""
parser = optparse.OptionParser(usage, None, optparse.Option, version)
parser.add_option('-p',
'--port',
default='8888',
dest='port',
help='socket')
parser.add_option('-l',
'--listen',
default='0.0.0.0',
dest='address',
help='listener address')
parser.add_option('-k',
'--hmac_key',
default='',
dest='hmac_key',
help='hmac_key')
parser.add_option('-t',
'--tokens',
action='store_true',
default=False,
dest='tokens',
help='require tockens to join')
parser.add_option('-s',
'--sslkey',
default=False,
dest='keyfile',
help='require ssl keyfile full path')
parser.add_option('-c',
'--sslcert',
default=False,
dest='certfile',
help='require ssl certfile full path')
(options, args) = parser.parse_args()
hmac_key = options.hmac_key
DistributeHandler.tokens = options.tokens
urls = [
(r'/', PostHandler),
(r'/token', TokenHandler),
(r'/realtime/(.*)', DistributeHandler)]
application = tornado.web.Application(urls, auto_reload=True)
if options.keyfile and options.certfile:
ssl_options = dict(certfile=options.certfile, keyfile=options.keyfile)
else:
ssl_options = None
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)
http_server.listen(int(options.port), address=options.address)
tornado.ioloop.IOLoop.instance().start()
|
{
"content_hash": "c27eec7241d813d61348ff5ae2b01e60",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 112,
"avg_line_length": 37.80753138075314,
"alnum_prop": 0.6134351482957061,
"repo_name": "xiang12835/python_web",
"id": "962e42a25e7b23686f0a3306ca47f5fc78bc2c9c",
"size": "9083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py2_web2py/web2py/gluon/contrib/websocket_messaging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3341"
},
{
"name": "Python",
"bytes": "17420"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='mainChat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(null=True)),
('message', models.TextField(blank=True, max_length=140)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
{
"content_hash": "d65409d4472904bca1c93984dbdbe49e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 118,
"avg_line_length": 30.846153846153847,
"alnum_prop": 0.6221945137157108,
"repo_name": "AtenrevCode/scChat",
"id": "b5b72cb6b1421104033dbc98e53552b80151fd0d",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chat/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "847"
},
{
"name": "HTML",
"bytes": "24679"
},
{
"name": "Python",
"bytes": "27069"
}
],
"symlink_target": ""
}
|
import numpy as np
from opensfm import features
from opensfm.test import data_generation
def test_dataset_load_features_sift(tmpdir) -> None:
data = data_generation.create_berlin_test_folder(tmpdir)
assert len(data.images()) == 3
data.config["feature_type"] = "SIFT"
image = data.images()[0]
points = np.random.random((3, 4))
descriptors = np.random.random((128, 4))
colors = np.random.random((3, 4))
segmentations = np.random.randint(low=0, high=255, size=(3, 4))
instances = np.random.randint(low=0, high=255, size=(3, 4))
semantic_data = features.SemanticData(
segmentations, instances, data.segmentation_labels()
)
before = features.FeaturesData(points, descriptors, colors, semantic_data)
data.save_features(image, before)
after = data.load_features(image)
assert after
assert np.allclose(points, after.points)
assert np.allclose(descriptors, after.descriptors)
assert np.allclose(colors, after.colors)
semantic = after.semantic
assert semantic
assert np.allclose(
segmentations,
semantic.segmentation,
)
assert np.allclose(instances, semantic.instances)
|
{
"content_hash": "c29ae3661e9b86a4316679d036d2fcee",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.6912013536379019,
"repo_name": "mapillary/OpenSfM",
"id": "02202e45d0d09748b26824826a8a0cfb74ec87b6",
"size": "1182",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "opensfm/test/test_dataset.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "396"
},
{
"name": "C++",
"bytes": "648986"
},
{
"name": "CMake",
"bytes": "78367"
},
{
"name": "CSS",
"bytes": "6426"
},
{
"name": "Dockerfile",
"bytes": "642"
},
{
"name": "HTML",
"bytes": "63144"
},
{
"name": "JavaScript",
"bytes": "1054984"
},
{
"name": "Python",
"bytes": "1141169"
},
{
"name": "Shell",
"bytes": "4006"
}
],
"symlink_target": ""
}
|
import os
import sys
import unittest
from django.conf import settings
from htmlmin.middleware import HtmlMinifyMiddleware, MarkRequestMiddleware
from htmlmin.tests import TESTS_DIR
from htmlmin.tests.mocks import (RequestBareMock, RequestMock, ResponseMock,
ResponseWithCommentMock)
class TestMiddleware(unittest.TestCase):
@classmethod
def setUpClass(cls):
sys.path.insert(0, TESTS_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'mock_settings'
@classmethod
def tearDownClass(cls):
sys.path.remove(TESTS_DIR)
del os.environ['DJANGO_SETTINGS_MODULE']
def test_should_minify_response_when_mime_type_is_html(self):
response_mock = ResponseMock()
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
minified = "<html><head></head><body>some text here</body></html>"
self.assertEqual(minified, response.content)
def test_should_minify_with_any_charset(self):
response_mock = ResponseMock()
response_mock['Content-Type'] = 'text/html; charset=utf-8'
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
minified = "<html><head></head><body>some text here</body></html>"
self.assertEqual(minified, response.content)
def test_should_not_minify_without_content(self):
response_mock = ResponseMock()
del response_mock['Content-Type']
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
html_not_minified = "<html> <body>some text here</body> </html>"
self.assertEqual(html_not_minified, response.content)
def test_should_not_minify_not_html_content(self):
response_mock = ResponseMock()
response_mock['Content-Type'] = 'application/json'
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
html_not_minified = "<html> <body>some text here</body> </html>"
self.assertEqual(html_not_minified, response.content)
def test_should_not_minify_url_marked_as_not_minifiable(self):
html_not_minified = "<html> <body>some text here</body> </html>"
response_mock = ResponseMock()
response = HtmlMinifyMiddleware().process_response(
RequestMock('/raw/'), response_mock,
)
self.assertEqual(html_not_minified, response.content)
def test_should_minify_if_exclude_from_minifying_is_unset(self):
old = settings.EXCLUDE_FROM_MINIFYING
del settings.EXCLUDE_FROM_MINIFYING
minified = "<html><head></head><body>some text here</body></html>"
response = HtmlMinifyMiddleware().process_response(
RequestMock(), ResponseMock(),
)
self.assertEqual(minified, response.content)
settings.EXCLUDE_FROM_MINIFYING = old
def test_should_not_minify_response_with_minify_response_false(self):
html_not_minified = "<html> <body>some text here</body> </html>"
response_mock = ResponseMock()
response_mock.minify_response = False
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
self.assertEqual(html_not_minified, response.content)
def test_should_minify_response_with_minify_response_true(self):
minified = "<html><head></head><body>some text here</body></html>"
response_mock = ResponseMock()
response_mock.minify_response = True
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
self.assertEqual(minified, response.content)
def test_should_keep_comments_when_they_are_enabled(self):
old = settings.KEEP_COMMENTS_ON_MINIFYING
settings.KEEP_COMMENTS_ON_MINIFYING = True
minified = "<html><!-- some comment --><head></head><body>" + \
"some text here</body></html>"
response_mock = ResponseWithCommentMock()
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
self.assertEqual(minified, response.content)
settings.KEEP_COMMENTS_ON_MINIFYING = old
def test_should_remove_comments_they_are_disabled(self):
old = settings.KEEP_COMMENTS_ON_MINIFYING
settings.KEEP_COMMENTS_ON_MINIFYING = False
minified = "<html><head></head><body>some text here</body></html>"
response_mock = ResponseWithCommentMock()
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
self.assertEqual(minified, response.content)
settings.KEEP_COMMENTS_ON_MINIFYING = old
def test_should_remove_comments_when_the_setting_is_not_specified(self):
old = settings.KEEP_COMMENTS_ON_MINIFYING
del settings.KEEP_COMMENTS_ON_MINIFYING
minified = "<html><head></head><body>some text here</body></html>"
response_mock = ResponseWithCommentMock()
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
self.assertEqual(minified, response.content)
settings.KEEP_COMMENTS_ON_MINIFYING = old
def test_should_not_minify_if_the_HTML_MINIFY_setting_is_false(self):
old = settings.HTML_MINIFY
settings.HTML_MINIFY = False
expected_output = "<html> <body>some text here</body> </html>"
response = HtmlMinifyMiddleware().process_response(
RequestMock(), ResponseMock(),
)
self.assertEqual(expected_output, response.content)
settings.HTML_MINIFY = old
def test_should_not_minify_when_DEBUG_is_enabled(self):
old = settings.HTML_MINIFY
old_debug = settings.DEBUG
del settings.HTML_MINIFY
settings.DEBUG = True
expected_output = "<html> <body>some text here</body> </html>"
response = HtmlMinifyMiddleware().process_response(
RequestMock(), ResponseMock(),
)
self.assertEqual(expected_output, response.content)
settings.DEBUG = old_debug
settings.HTML_MINIFY = old
def test_should_minify_when_DEBUG_is_false_and_MINIFY_is_unset(self):
old = settings.HTML_MINIFY
old_debug = settings.DEBUG
del settings.HTML_MINIFY
settings.DEBUG = False
minified = "<html><head></head><body>some text here</body></html>"
response = HtmlMinifyMiddleware().process_response(
RequestMock(), ResponseMock(),
)
self.assertEqual(minified, response.content)
settings.DEBUG = old_debug
settings.HTML_MINIFY = old
def test_should_set_flag_when_request_hits_middleware(self):
request_mock = RequestBareMock()
MarkRequestMiddleware().process_request(request_mock)
self.assertTrue(request_mock._hit_htmlmin)
def test_should_not_minify_when_request_did_not_hit_middleware(self):
expected_output = "<html> <body>some text here</body> </html>"
request_mock = RequestBareMock()
response = HtmlMinifyMiddleware().process_response(
request_mock, ResponseMock(),
)
self.assertEqual(expected_output, response.content)
def test_content_length_header_should_contain_minified_size(self):
response_mock = ResponseMock()
response = HtmlMinifyMiddleware().process_response(
RequestMock(), response_mock,
)
minified = "<html><head></head><body>some text here</body></html>"
self.assertEqual(len(minified), response['Content-Length'])
|
{
"content_hash": "95134a8f191370d5e63bfab930e73577",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 77,
"avg_line_length": 37.859223300970875,
"alnum_prop": 0.6453391460443647,
"repo_name": "cobrateam/django-htmlmin",
"id": "38aa8a1b8bc77ce359f6f59a973c7889d087f9f9",
"size": "7964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "htmlmin/tests/test_middleware.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "30990"
},
{
"name": "Makefile",
"bytes": "432"
},
{
"name": "Python",
"bytes": "38977"
}
],
"symlink_target": ""
}
|
import json
import re
from os import path, getcwd
from random import randint
from typing import Union
from discord import User, Message, Embed, Server, Game, Permissions, Channel, ChannelType, HTTPException, Forbidden, InvalidArgument, NotFound
from discord.ext.commands import Bot, Context, check
from bot.config import CONFIG
from bot.utils import build_embed, OpStatus, permissions
VERSION = '3.5'
PARENTS = [
'202163416083726338', # _HellPie
'225511609567543297', # Kirei
'210279248685039616' # Amanda.
]
ADORABLE_PEOPLE = PARENTS + [
'245997507606216704', # sejin.
'157725308127150081' # Bjorn
]
ZANTOMODE_PEOPLE = ADORABLE_PEOPLE + [
'172331493828460545', # zanto
'88792744587169792' # SakuraJinkyu
]
MERCY_MAINS = [
'210279248685039616', # Amanda.
'245997507606216704', # sejin.
'117349345296121864', # Halo
'296120625972379648', # KrisPbaecon
'186626292966359040' # Pastelle
]
LESSER_CREATURES = [
'228162606580367370', # Sageth
'280096430356430848' # Pmik
]
GUILDS_BLACKLIST = [
'291290703021998080' # OW Road to Grandmaster+ (they disabled send msg perm for the bot, I wont waste CPU on reets)
]
REPLIES_STATUS = True
SIMULATE_USER = None
SIMULATE_COUNT = 0
SIMULATE_CONFIG = {
'USER': None,
'COUNT': 0
}
ZANTOCONF_PATH = path.join(path.realpath(getcwd()), 'assets', f'{CONFIG["STORAGE"]["ZANTOCONF"]}.json')
ZANTOCONF = {}
ZANTOCONF_BLACKLIST = [' ', '!', '?']
HACKERCONF_PATH = path.join(path.realpath(getcwd()), 'assets', f'{CONFIG["STORAGE"]["HACKERCONF"]}.json')
HACKERCONF = {}
LOG_LEVELS = {
'LOG': '\N{PAGE FACING UP}',
'INFO': '\N{BELL}',
'SUCCESS': '\N{WHITE HEAVY CHECK MARK}',
'WARNING': '\N{WARNING SIGN}',
'ERROR': '\N{CROSS MARK}'
}
bot = Bot(CONFIG.get(section='BOT', option='PREFIX').split(' '), description=CONFIG['BOT']['DESCRIPTION'])
@bot.event
async def on_ready():
print('\n#------------------------------------------------------------------------------#')
print(f'\tLOGIN: {bot.user.name}#{bot.user.discriminator} ({bot.user.id})')
print('#------------------------------------------------------------------------------#\n')
if not path.exists(ZANTOCONF_PATH):
with open(ZANTOCONF_PATH, 'w') as zanto_conf:
json.dump({}, zanto_conf)
else:
with open(ZANTOCONF_PATH, 'r') as zanto_conf:
global ZANTOCONF
ZANTOCONF = json.load(zanto_conf)
if not path.exists(HACKERCONF_PATH):
with open(HACKERCONF_PATH, 'w') as zanto_conf:
json.dump({}, zanto_conf)
else:
with open(HACKERCONF_PATH, 'r') as zanto_conf:
global HACKERCONF
HACKERCONF = json.load(zanto_conf)
@bot.event
async def on_command_error(*args):
pass
@bot.event
async def on_message(message: Message):
def match(_expr):
return re.compile(_expr, re.IGNORECASE).match(message.content)
if message.author.id == bot.user.id or message.author.bot or message.server.id in GUILDS_BLACKLIST:
return
if not REPLIES_STATUS and message.author.id not in ADORABLE_PEOPLE: # Bypass disabled replies for the selected few
await bot.process_commands(message)
return
author = message.author
if SIMULATE_CONFIG['USER'] is not None and SIMULATE_CONFIG['COUNT'] > 0:
author = SIMULATE_CONFIG['USER']
SIMULATE_CONFIG['COUNT'] -= 1
if match('^ay[y]+$'):
reply = 'lmao'
elif match('^wew$'):
reply = 'lad'
elif match('^dead$'):
reply = 'ass'
elif match('^frigg$'):
reply = 'off'
elif match('^oh? ?shit$'):
reply = 'waddup'
elif match('^shut ?up!?( @.+#\d+!?)?$') and author.id not in ADORABLE_PEOPLE:
reply = 'wow, such a Sageth' # Gone, but I like this too much to remove it
elif match('^k( .*)?$') and author.id == '170985598297964544': # DMX
_random = randint(0, 3)
if _random == 0:
reply = 'fuck off DMX'
elif _random == 1:
reply = 'Wow... how original...'
elif _random == 2:
reply = 'https://cdn.discordapp.com/attachments/313911775500173313/344152842614865923/6e3.png'
else:
reply = 'k, nard'
elif match('^fuck off( [a-z#0-9]+)?$'):
reply = f'{author.name} has got work to do'
elif match('^smokes$'):
reply = 'let\'s go'
elif match('^safety$'):
reply = 'always off'
elif match('^sh?leep ti(ght|te)(,? [a-z#0-9]+)?$'):
reply = 'don\'t let the genjis bite'
elif match('^i( ?(ly|lu|((love|luv) (yo)?u))),? (@?(Not)?Hime|(@auto-reply-bot#9347|<@!311154146969518083>))!?$'):
if author.id == '208286812089614337': # Obi
reply = 'ily too obi <3'
elif author.id == '225511609567543297': # Kirei
reply = 'ily a lot mommy :two_hearts: :two_hearts: ^~^'
elif author.id == '202163416083726338': # _HellPie
reply = 'ily too dad <3 <3 ^~^'
elif author.id == '210279248685039616': # Amanda.
reply = 'ily too sweetheart <:valeLove:367687853720731658>'
elif author.id == '157725308127150081': # Bjorn
reply = 'ily too darlin\' c:'
elif author.id == '133006275305930753': # Lotus
reply = 'Same, but Id love you more if you switched off widow...'
elif author.id in MERCY_MAINS:
reply = 'cute angel main, ily too \\*-\\*'
elif author.id in LESSER_CREATURES:
reply = '1. wow, pedo 2. I have a boyfriend'
else:
reply = 'ty, have a nice day darlin\''
elif match('^suc[ck] an? egg$') and message.author.id not in ADORABLE_PEOPLE:
reply = 'no, you, eggsucker'
elif match('^wo[ah]{2}!*$'):
reply = 'WOW'
elif match('^no((rmie)|(o+b))s?!*$'):
reply = 'reeeeeeeeeeeeeeeeeeee, just like obi'
elif match('^(((@?(Not)?Hime)|(<@!311154146969518083>))|(@auto-reply-bot#9347)) ?<:.*Hug:\d+>'):
hug_emote = '<:calvinHugged:367687722024042497>'
if author.id in MERCY_MAINS:
hug_emote = '<:valeHugged:367687799270408202>'
final_emote = ''
if author.id in PARENTS:
final_emote = '<:nomyHeart:367687902433509397>'
elif author.id in ADORABLE_PEOPLE:
final_emote = '<:jay3Kiss:367687910436110336>'
reply = f'<@{author.id}> {hug_emote} {final_emote}'
elif match('^(((@?(Not)?Hime)|(<@!311154146969518083>))|(@auto-reply-bot#9347)) ?<:.*Hugged:\d+>'):
hug_emote = '<:calvinHug:326950539524964352>'
if author.id in MERCY_MAINS:
hug_emote = '<:valeHug:367687799488512010>'
final_emote = ''
if author.id in PARENTS:
final_emote = '<:nomyHeart:367687902433509397>'
elif author.id in ADORABLE_PEOPLE:
final_emote = '<:jay3Kiss:367687910436110336>'
reply = f'<@{author.id}> {hug_emote} {final_emote}'
else:
await bot.process_commands(message)
return
if reply is not None:
return await bot.send_message(message.channel, reply)
@bot.command(pass_context=True)
@check(lambda ctx: ctx.message.server.id not in GUILDS_BLACKLIST and ctx.message.author.id in ZANTOMODE_PEOPLE)
async def zantoconf(ctx: Context, character: chr, emote: str):
if character in ZANTOCONF_BLACKLIST:
return await bot.send_message(ctx.message.channel, f'{character} is special and cannot be changed.')
ZANTOCONF[str(character)] = emote
with open(ZANTOCONF_PATH, 'w') as zanto_conf:
json.dump(ZANTOCONF, zanto_conf)
return await bot.send_message(ctx.message.channel, f'{character} => {emote} - Configured')
@bot.command(pass_context=True)
@check(lambda ctx: ctx.message.server.id not in GUILDS_BLACKLIST and ctx.message.author.id in ZANTOMODE_PEOPLE)
async def zantomode(ctx, *sentence):
global ZANTOCONF
with open(ZANTOCONF_PATH, 'r') as zanto_conf:
ZANTOCONF = json.load(zanto_conf)
space = '<:jay3Thinking:368487066755006465> '
message = ' '
sentence = ' '.join(sentence)
for c in sentence:
c = c.upper()
if c in ZANTOCONF:
message += f'{str(ZANTOCONF[c])} '
elif c == ' ':
message += space
elif c == '?':
message += ':question: '
elif c == '!':
message += ':exclamation: '
elif re.compile('[a-z]', re.IGNORECASE).match(c):
message += f':regional_indicator_{c.lower()}: '
if message != ' ':
return await bot.send_message(ctx.message.channel, message)
return await bot.send_message(ctx.message.channel, 'Unable to emojify message :(')
@bot.command(pass_context=True)
@check(lambda ctx: ctx.message.server.id not in GUILDS_BLACKLIST)
async def hackermode(ctx, *sentence):
message = ' '
sentence = ' '.join(sentence)
for c in sentence:
c = c.upper()
if c in HACKERCONF:
message += f'{str(HACKERCONF[c])} '
if message != ' ':
return await bot.send_message(ctx.message.channel, message)
return await bot.send_message(ctx.message.channel, 'Unable to hack message :(')
@bot.group()
@check(lambda ctx: ctx.message.author.id == CONFIG['BOT']['OWNER'] or ctx.message.author.id in PARENTS)
async def hime():
pass
@hime.command(pass_context=True, aliases=['presence'])
async def status(ctx: Context, game: str = None, url: str = None) -> Message:
is_empty = game is None or game.isspace() or len(game) < 1
is_stream = not is_empty and url is not None and len(url) > 0 and not url.isspace()
await bot.change_presence(game=None if is_empty else Game(
name=game,
type=1 if is_stream else 0,
url=url if is_stream else None
))
return await bot.send_message(ctx.message.channel, embed=build_embed(ctx, '{} status{}{}.'.format(
'Cleaned' if is_empty else 'Changed',
'' if is_empty else f' to: `{"Streaming" if is_stream else "Playing"} {game}`',
f'at `{url}`' if is_stream else ''
)))
@hime.command(pass_context=True, aliases=['nick'])
async def nickname(ctx: Context, nick: str = None) -> Message:
is_empty = nick is None or nick.isspace() or len(nick) < 1
try:
await ctx.bot.change_nickname(member=ctx.message.server.me, nickname=None if is_empty else nick[:32])
except Forbidden:
return await bot.send_message(
ctx.message.channel,
embed=build_embed(ctx, 'Permission `Change Nickname` not granted on this server.', status=OpStatus.FAILURE)
)
except HTTPException:
return await bot.send_message(
ctx.message.channel,
embed=build_embed(ctx, 'Nickname change denied by the Discord API.', status=OpStatus.FAILURE)
)
return await bot.send_message(ctx.message.channel, embed=build_embed(ctx, '{} nickname{}.'.format(
'Cleaned' if is_empty else 'Changed',
'' if is_empty else f' to `{nick[:32]}`'
)))
@hime.command(pass_context=True)
async def invite(ctx: Context, dest: Union[Channel, Server], time: int = 0, use: int = 0, tmp: bool = False) -> Message:
options = {
'max_age': time,
'max_uses': use,
'temporary': tmp,
'unique': True
}
try:
created = await bot.create_invite(destination=dest, options=options)
except HTTPException:
if isinstance(dest, Server):
has_permission = Permissions.create_instant_invite in dest.me.server_permissions
elif isinstance(dest, Channel):
has_permission = Permissions.create_instant_invite in dest.permissions_for(dest.server.me)
else:
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
f'Destination (`{dest}`) is not a valid channel or server.',
status=OpStatus.FAILURE
))
if has_permission:
return await bot.send_message(
ctx.message.channel,
embed=build_embed(ctx, 'Invite creation denied by the Discord API.', status=OpStatus.FAILURE)
)
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
'Permission `Create Instant Invite` not granted on {} `{}`'.format(
'server' if isinstance(dest, Server) else 'channel',
dest.name
),
status=OpStatus.FAILURE
))
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
'Created invite: {} to `{}` (can be used `{}` times and expires in `{}` seconds).'.format(
created.url,
dest.name,
created.max_uses,
created.max_age
)
))
@hime.group()
@check(lambda ctx: ctx.message.author.id == CONFIG['BOT']['OWNER'])
async def feature():
pass
@feature.command(pass_context=True)
async def replies(ctx: Context, toggle: bool = not REPLIES_STATUS) -> Message:
global REPLIES_STATUS
REPLIES_STATUS = toggle
return await bot.send_message(
ctx.message.channel,
embed=build_embed(ctx, f'{"Enabled" if toggle else "Disabled"} replies to messages.')
)
@bot.group()
@check(lambda ctx: ctx.message.author.id == CONFIG['BOT']['OWNER'])
async def debug():
pass
@debug.command(pass_context=True)
async def simulate(ctx: Context, user: User = None, count: int = 0) -> Message:
if user is None or count < 1:
if user is not None:
count = 1
await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
'Simulation mode will be enabled for one message.',
status=OpStatus.WARNING
))
else:
user = SIMULATE_CONFIG['USER']
count = SIMULATE_CONFIG['COUNT']
SIMULATE_CONFIG['USER'] = None
SIMULATE_CONFIG['COUNT'] = 0
if user is None:
return await bot.send_message(
ctx.message.channel,
embed=build_embed(ctx, 'No simulation running.', status=OpStatus.FAILURE)
)
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
'Stopped simulating user `{}` ({}) with `{}` messages left.'.format(
user.name,
user.mention,
count
)
))
SIMULATE_CONFIG['USER'] = user
SIMULATE_CONFIG['COUNT'] = count
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
'Simulating user `{}` ({}) for the next `{}` messages. Use `{}debug simulate` to end prematurely.'.format(
user.name,
user.mention,
count,
bot.command_prefix[0]
),
status=OpStatus.WARNING
))
@debug.command(pass_context=True)
async def log(ctx: Context, dest: str, message: str, level: str = 'INFO') -> Message:
dest = bot.get_channel(dest)
if dest is None:
for server in bot.servers:
if dest in server.members:
dest = server.get_member(dest)
break
level = level.upper()
if level not in LOG_LEVELS.keys():
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
f'Log level can be one of: `{LOG_LEVELS.keys()}`',
status=OpStatus.FAILURE
))
prefix = LOG_LEVELS[level]
if message.isspace() or len(message) < 1:
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
'Cannot send an empty message',
status=OpStatus.FAILURE
))
try:
await bot.send_message(dest, embed=build_embed(ctx, f'{prefix} - {message}'))
except InvalidArgument:
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
f'Channel `{dest.name if "name" in dest else "N/A"}` ({dest}) is not a valid destination.',
status=OpStatus.FAILURE
))
except NotFound:
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
f'Channel `{dest.name}` (<#{dest.id}>) could not be found.',
status=OpStatus.FAILURE
))
except Forbidden:
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
'Permission `Send Messages` not granted on server `{}` at channel `{}` (<#{}>).'.format(
dest.server.name,
dest.name,
dest.id
),
status=OpStatus.FAILURE
))
except HTTPException:
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
'Message operation was denied by the Discord API.',
status=OpStatus.FAILURE
))
return await bot.send_message(ctx.message.channel, embed=build_embed(
ctx,
'Send message to server `{}` on channel `{}` (<#{}>).\n\n{}'.format(
dest.server.name,
dest.name,
dest.id,
message
)
))
@debug.command(pass_context=True)
async def broadcast(ctx: Context, message: str, level: str = 'log') -> Message:
count = 0
for server in bot.servers:
for channel in server.channels:
if channel.type not in [ChannelType.text, ChannelType.group]:
continue
if 'general' in channel.name or 'off-topic' in channel.name:
await log(ctx, channel, message, level)
count += 1
return await bot.send_message(
ctx.message.channel,
embed=build_embed(ctx, f'Broadcasted message to `{count}` servers.\n\n{message}', status=OpStatus.WARNING)
)
@debug.group(name='list')
async def data():
pass
@data.command(pass_context=True)
async def servers(ctx: Context) -> Message:
name = ctx.message.server.me.nick if ctx.message.server.me.nick is not None else bot.user.name
template = Embed().set_author(name=name, icon_url=bot.user.avatar_url).to_dict()
first = template.copy()
first['title'] = 'List of servers'
first['description'] = f'Counting `{len(bot.servers)}` servers for this Bot instance.'
await bot.send_message(ctx.message.channel, embed=Embed.from_data(first))
embed = Embed.from_data(template)
for server in bot.servers:
value = f'- Owner: `{server.owner.name}` ({server.owner.mention} - `{server.owner.id}`)'
value += f'\n- Created: `{server.created_at.strftime("%Y-%m-%d %H:%M:%S")}`'
value += f'\n- Icon: `{server.icon_url}`\n'
value += f'\n- Region: {server.region if isinstance(server.region, str) else server.region.value}'
value += f'\n- Channels: `{len(server.channels)}`\n- Members: `{len(server.members)}`'
if len(server.features) > 0:
value += f'\n- Features: `{server.features}`'
if 'INVITE_SPLASH' in server.features:
value += f'- Splash: `{server.splash}`\n- Splash URL: `{server.splash_url}`'
value += f'\n- Permissions:{permissions(server.me.server_permissions.value)}'
embed.add_field(name=f'(`{server.id}`) - {server.name}', value=value)
if len(embed.fields) == 25 or len(str(embed.to_dict())) > 5000:
await bot.send_message(ctx.message.channel, embed=embed)
embed = Embed.from_data(template)
if len(embed.fields) > 0:
await bot.send_message(ctx.message.channel, embed=embed)
return await bot.send_message(ctx.message.channel, embed=build_embed(ctx, 'Listed all servers with details.'))
@data.command(pass_context=True)
async def channels(ctx: Context, server_id: str) -> Message:
server = bot.get_server(server_id)
name = ctx.message.server.me.nick if ctx.message.server.me.nick is not None else bot.user.name
template = Embed().set_author(name=name, icon_url=bot.user.avatar_url).to_dict()
first = template.copy()
first['title'] = f'List of channels for server `{server.name}` (`{server.id}`)'
first['description'] = f'Counting `{len(server.channels)}` channels for this server.'
await bot.send_message(ctx.message.channel, embed=Embed.from_data(first))
embed = Embed.from_data(template)
for channel in server.channels:
embed.add_field(
name=f'`{channel.name}` ({channel.mention} - `{channel.id}`',
value=f'{channel.topic}\n\nPermissions:{permissions(channel.permissions_for(server.me).value)}'
)
if len(embed.fields) == 25 or len(str(embed.to_dict())) > 5000:
await bot.send_message(ctx.message.channel, embed=embed)
embed = Embed.from_data(template)
if len(embed.fields) > 0:
await bot.send_message(ctx.message.channel, embed=embed)
return await bot.send_message(ctx.message.channel, embed=build_embed(ctx, 'Listed all channels with details.'))
def start():
bot.run(CONFIG.get(section='LOGIN', option='TOKEN'))
|
{
"content_hash": "b4ad1bc961746c53acff9047072e6f3c",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 142,
"avg_line_length": 34.91573033707865,
"alnum_prop": 0.6781442746044516,
"repo_name": "HellPie/discord-reply-bot",
"id": "e0f39efb3a6455df9a9623cda01dbc81fd70aae5",
"size": "18645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21812"
}
],
"symlink_target": ""
}
|
import os
import sys
from django.core.management import call_command
# Main entrypoint for barsystem-installer executable
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'barsystem.local_settings')
import warnings
warnings.filterwarnings('ignore') # ignore the warning about settings
call_command('install_barsystem', argv)
if __name__ == '__main__':
main()
|
{
"content_hash": "c66b3c5d3739bec5d276c5665b1b8ebf",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 23.94736842105263,
"alnum_prop": 0.701098901098901,
"repo_name": "TkkrLab/barsystem",
"id": "eda16db54ea18ab0b3d376fe55bef45e3ad42755",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barsystem/src/barsystem/install.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1578"
},
{
"name": "HTML",
"bytes": "19022"
},
{
"name": "JavaScript",
"bytes": "35128"
},
{
"name": "Python",
"bytes": "95526"
}
],
"symlink_target": ""
}
|
from .client import ServiceMonitoringServiceClient
from .async_client import ServiceMonitoringServiceAsyncClient
__all__ = (
"ServiceMonitoringServiceClient",
"ServiceMonitoringServiceAsyncClient",
)
|
{
"content_hash": "ca85a8b623fac05321ea9ae3dd0b9cf3",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 61,
"avg_line_length": 29.857142857142858,
"alnum_prop": 0.8133971291866029,
"repo_name": "googleapis/python-monitoring",
"id": "775fda5cc28cc2e994595987125f18f036c54001",
"size": "809",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/monitoring_v3/services/service_monitoring_service/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2375818"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
"""Custom TestCase and helpers for connectmessages tests."""
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.messages.storage.fallback import FallbackStorage
from django.test import RequestFactory
from django.utils.timezone import now
from model_mommy import mommy
from open_connect.connect_core.utils.basetests import ConnectTestCase
from open_connect.groups.models import Group
from open_connect.connectmessages.models import Message, Thread, UserThread
USER_MODEL = get_user_model()
MESSAGE_TEXT = (
'This has been a test. This has been a test.'
' This has been a test. This has been a test.'
' This has been a test. This has been a test.'
' This has been a test. This has been a test.'
)
THREAD_SUBJECT = 'Test message'
class ConnectMessageTestCase(ConnectTestCase):
"""Helper TestCase for connectmessages app."""
# pylint: disable=invalid-name
@classmethod
def setUpClass(cls):
"""Setup the TestCase class"""
super(ConnectMessageTestCase, cls).setUpClass()
cls.group1 = mommy.make(
Group, tos_accepted_at=now())
cls.group2 = mommy.make(Group)
cls.superuser.add_to_group(cls.group1.pk)
cls.superuser.add_to_group(cls.group2.pk)
cls.normal_user.add_to_group(cls.group1.pk)
cls.staff_user.add_to_group(cls.group1.pk)
cls.thread1 = mommy.make(
Thread, group=cls.group1, subject=THREAD_SUBJECT)
cls.message1 = mommy.make(
Message, thread=cls.thread1, sender=cls.superuser,
text=MESSAGE_TEXT, status='approved')
cls.message2 = mommy.make(
Message, thread=cls.thread1, sender=cls.normal_user,
text=MESSAGE_TEXT, status='approved')
cls.thread2 = mommy.make(
Thread, group=cls.group2, subject=THREAD_SUBJECT)
cls.message3 = mommy.make(
Message, thread=cls.thread2, sender=cls.superuser,
text=MESSAGE_TEXT, status='approved')
cls.directthread1 = mommy.make(
Thread, thread_type='direct', subject=THREAD_SUBJECT)
cls.directmessage1 = mommy.make(
Message,
thread=cls.directthread1,
sender=cls.user1,
text=MESSAGE_TEXT,
status='approved'
)
mommy.make(UserThread, user=cls.normal_user, thread=cls.directthread1)
mommy.make(UserThread, user=cls.staff_user, thread=cls.directthread1)
cls.request_factory = RequestFactory()
cls.request = cls.request_factory.get('/')
setattr(cls.request, 'session', 'session')
messages = FallbackStorage(cls.request)
setattr(cls.request, '_messages', messages)
cls.request.user = cls.superuser
cls._group = None
# pylint: disable=invalid-name
def setUp(self):
"""Setup the test"""
self.client.post(
reverse('login'), {'username': 'bo@dj.local', 'password': 'moo'})
def message(self, **kwargs):
"""Create a new non-persistent Message."""
return mommy.prepare(
Message,
thread=kwargs.get('thread', self.thread1),
sender=kwargs.get('user', self.superuser),
text=kwargs.get('message', MESSAGE_TEXT),
status=kwargs.get('status', 'approved')
)
@property
def group(self):
"""Cache and return the test group."""
if not self._group:
self._group = mommy.make(
Group, group__name='Test group', published=True)
return self._group
# pylint: disable=invalid-name
def assertSuccess(self, response):
"""Helper method for asserting a response object was successful."""
self.assertEqual(response.status_code, 200)
|
{
"content_hash": "0a52768bc259786935c675a232468217",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 78,
"avg_line_length": 36.24528301886792,
"alnum_prop": 0.6387298282144717,
"repo_name": "lpatmo/actionify_the_news",
"id": "4cd4b22554cedb6c5b7c5dd2bcc48a2fe19eb545",
"size": "3842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open_connect/connectmessages/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "546928"
},
{
"name": "HTML",
"bytes": "151617"
},
{
"name": "JavaScript",
"bytes": "211965"
},
{
"name": "Python",
"bytes": "882989"
}
],
"symlink_target": ""
}
|
"""gcloud dns record-sets changes describe command."""
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import resolvers
class Describe(base.DescribeCommand):
"""View the details of a change.
This command displays the details of the specified change.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To display the details of a change, run:
$ {command} change_id
""",
}
@staticmethod
def Args(parser):
util.ZONE_FLAG.AddToParser(parser)
parser.add_argument(
'change_id', metavar='CHANGE_ID',
help='The ID of the change you want details for.')
def Run(self, args):
dns = self.context['dns_client']
resources = self.context['dns_resources']
change_ref = resources.Parse(
args.change_id,
params={'managedZone': resolvers.FromArgument('--zone', args.zone)},
collection='dns.changes')
return dns.changes.Get(
dns.MESSAGES_MODULE.DnsChangesGetRequest(
project=change_ref.project,
managedZone=change_ref.managedZone,
changeId=change_ref.changeId))
|
{
"content_hash": "36f8cd50e7416e635596fe2ebd2d5990",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 28.595238095238095,
"alnum_prop": 0.6519567027477102,
"repo_name": "KaranToor/MA450",
"id": "f6d9b0af6f8be36f482bd46935fb7d7e5c8436a9",
"size": "1797",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/dns/record_sets/changes/describe.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import
import itertools
import numpy as np
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types, typeof, njit
from numba import lowering
from .support import TestCase
def return_double_or_none(x):
if x:
ret = None
else:
ret = 1.2
return ret
def return_different_statement(x):
if x:
return None
else:
return 1.2
def return_bool_optional_or_none(x, y):
if y:
z = False
else:
z = None
if x == 2:
# A boolean
return True
elif x == 1:
# A runtime optional
return z
else:
# None
return None
def is_this_a_none(x):
if x:
val_or_none = None
else:
val_or_none = x
if val_or_none is None:
return x - 1
if val_or_none is not None:
return x + 1
def a_is_b(a, b):
"""
Note in nopython mode, this operation does not make much sense.
Because we don't have objects anymore.
`a is b` is always False if not operating on None and Optional type
"""
return a is b
def a_is_not_b(a, b):
"""
This is `not (a is b)`
"""
return a is not b
class TestOptional(TestCase):
def test_return_double_or_none(self):
pyfunc = return_double_or_none
cres = compile_isolated(pyfunc, [types.boolean])
cfunc = cres.entry_point
for v in [True, False]:
self.assertPreciseEqual(pyfunc(v), cfunc(v))
def test_return_different_statement(self):
pyfunc = return_different_statement
cres = compile_isolated(pyfunc, [types.boolean])
cfunc = cres.entry_point
for v in [True, False]:
self.assertPreciseEqual(pyfunc(v), cfunc(v))
def test_return_bool_optional_or_none(self):
pyfunc = return_bool_optional_or_none
cres = compile_isolated(pyfunc, [types.int32, types.int32])
cfunc = cres.entry_point
for x, y in itertools.product((0, 1, 2), (0, 1)):
self.assertPreciseEqual(pyfunc(x, y), cfunc(x, y))
def test_is_this_a_none(self):
pyfunc = is_this_a_none
cres = compile_isolated(pyfunc, [types.intp])
cfunc = cres.entry_point
for v in [-1, 0, 1, 2]:
self.assertPreciseEqual(pyfunc(v), cfunc(v))
def test_is_this_a_none_objmode(self):
pyfunc = is_this_a_none
flags = Flags()
flags.set('force_pyobject')
cres = compile_isolated(pyfunc, [types.intp], flags=flags)
cfunc = cres.entry_point
self.assertTrue(cres.objectmode)
for v in [-1, 0, 1, 2]:
self.assertPreciseEqual(pyfunc(v), cfunc(v))
def test_a_is_b_intp(self):
pyfunc = a_is_b
cres = compile_isolated(pyfunc, [types.intp, types.intp])
cfunc = cres.entry_point
# integer identity relies on `==`
self.assertTrue(cfunc(1, 1))
self.assertFalse(cfunc(1, 2))
def test_a_is_not_b_intp(self):
pyfunc = a_is_not_b
cres = compile_isolated(pyfunc, [types.intp, types.intp])
cfunc = cres.entry_point
# integer identity relies on `==`
self.assertFalse(cfunc(1, 1))
self.assertTrue(cfunc(1, 2))
def test_optional_float(self):
def pyfunc(x, y):
if y is None:
return x
else:
return x + y
cfunc = njit("(float64, optional(float64))")(pyfunc)
self.assertAlmostEqual(pyfunc(1., 12.3), cfunc(1., 12.3))
self.assertAlmostEqual(pyfunc(1., None), cfunc(1., None))
def test_optional_array(self):
def pyfunc(x, y):
if y is None:
return x
else:
y[0] += x
return y[0]
cfunc = njit("(float32, optional(float32[:]))")(pyfunc)
cy = np.array([12.3], dtype=np.float32)
py = cy.copy()
self.assertAlmostEqual(pyfunc(1., py), cfunc(1., cy))
np.testing.assert_almost_equal(py, cy)
self.assertAlmostEqual(pyfunc(1., None), cfunc(1., None))
def test_optional_array_error(self):
def pyfunc(y):
return y[0]
cfunc = njit("(optional(int32[:]),)")(pyfunc)
with self.assertRaises(TypeError) as raised:
cfunc(None)
self.assertIn('expected array(int32, 1d, A), got None',
str(raised.exception))
y = np.array([0xabcd], dtype=np.int32)
self.assertEqual(cfunc(y), pyfunc(y))
def test_optional_array_attribute(self):
"""
Check that we can access attribute of an optional
"""
def pyfunc(arr, do_it):
opt = None
if do_it: # forces `opt` to be an optional of arr
opt = arr
return opt.shape[0]
cfunc = njit(pyfunc)
arr = np.arange(5)
self.assertEqual(pyfunc(arr, True), cfunc(arr, True))
def test_assign_to_optional(self):
"""
Check that we can assign to a variable of optional type
"""
@njit
def make_optional(val, get_none):
if get_none:
ret = None
else:
ret = val
return ret
@njit
def foo(val, run_second):
a = make_optional(val, True)
if run_second:
a = make_optional(val, False)
return a
self.assertIsNone(foo(123, False))
self.assertEqual(foo(231, True), 231)
def test_optional_thru_omitted_arg(self):
"""
Issue 1868
"""
def pyfunc(x=None):
if x is None:
x = 1
return x
cfunc = njit(pyfunc)
self.assertEqual(pyfunc(), cfunc())
self.assertEqual(pyfunc(3), cfunc(3))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "baeca7637e5b55887b01caed34a86c90",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 71,
"avg_line_length": 26.44,
"alnum_prop": 0.5505126912086065,
"repo_name": "stefanseefeld/numba",
"id": "314d8829f2c92452647271b1fe5bed8cd2fd7833",
"size": "5949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/tests/test_optional.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5535"
},
{
"name": "C",
"bytes": "303376"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "Jupyter Notebook",
"bytes": "110325"
},
{
"name": "Python",
"bytes": "3946372"
},
{
"name": "Shell",
"bytes": "2414"
}
],
"symlink_target": ""
}
|
from openelex.tests.mongo_test_case import MongoTestCase
from openelex.models import RawResult
from openelex.us.md.load import MDLoader2008Special
class TestMDLoader2008Special(MongoTestCase):
def setUp(self):
super(TestMDLoader2008Special, self).setUp()
self.loader = MDLoader2008Special()
self.mapping = self._get_mapping()
# HACK: set loader's mapping attribute
# so we can test if loader._file_handle exists. This
# usually happens in the loader's run() method.
self.loader.source = self.mapping['generated_filename']
try:
fh = self.loader._file_handle
except IOError:
self.skipTest("Cached file for 2008 special election not found. "
"Run 'invoke fetch --state=md --datefilter=2008' first.")
def _get_mapping(self):
for mapping in self.loader.datasource.mappings('2008'):
if 'special' in mapping['election']:
return mapping
else:
raise Exception("Mapping for 2008 special election expected")
def test_parse_html_table(self):
table = self.loader._get_html_table()
rows = self.loader._parse_html_table(table)
self.assertEqual(len(rows), 4)
for row in rows:
self.assertEqual(len(row), 7)
def test_run(self):
election_id = self.mapping['election']
self.loader.run(self.mapping)
self.assertEqual(
RawResult.objects.filter(election_id=election_id).count(), 12)
|
{
"content_hash": "e87fcd23020d346f7ba06c0f95f36341",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 39.282051282051285,
"alnum_prop": 0.6377284595300261,
"repo_name": "cathydeng/openelections-core",
"id": "90a37a5fde63be6889def81b3b158f6995d46228",
"size": "1533",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "openelex/tests/test_md_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "57395"
},
{
"name": "Python",
"bytes": "752866"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(name='rhyme',
version='0.1',
description='A rhyming library',
author='Ian Overgard',
author_email='ian.overgard@gmail.com',
url='http://github.com/fathat/pyrhyme',
py_modules=['rhyme'],
data_files=[('data', ['data/rhyme.db'])]
)
|
{
"content_hash": "7748026ea411f2de5b20007d6ec31465",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 28.727272727272727,
"alnum_prop": 0.6044303797468354,
"repo_name": "fathat/pyrhyme",
"id": "d8afe0b5d0f6ab7f89918cd8480ea69bf89f9666",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2178"
}
],
"symlink_target": ""
}
|
import json
import yaml
from .patched import Api
from flask import Blueprint, make_response, current_app
from datatypes import ns
api_blueprint = Blueprint('api', __name__)
@api_blueprint.route('/swagger.yml')
def swagger():
resp = make_response(yaml.dump(yaml.load(json.dumps(current_app.api.__schema__)), default_flow_style=False))
resp.mimetype = 'text/x-yaml'
return resp
class Rest(object):
def __init__(self, app=None, **kwargs):
self.api = Api(title='IF+ Restful API',
version='1.0',
description='IF+ Restful API')
self.api.add_namespace(ns)
self.app = None
self.url_prefix = None
self.kwargs = {}
if app is not None:
self.init_app(app, **kwargs)
def init_app(self, app, **kwargs):
setattr(app, 'api', self.api)
self.app = app
self.url_prefix = kwargs.get('url_prefix', '/api')
self.kwargs = kwargs
def register(self):
# 生成蓝图
self.app.api.init_app(api_blueprint)
self.app.register_blueprint(api_blueprint, url_prefix=self.url_prefix, **self.kwargs)
|
{
"content_hash": "8eb71c9fbefa84e2077079394807f355",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 112,
"avg_line_length": 28.097560975609756,
"alnum_prop": 0.6024305555555556,
"repo_name": "hitakaken/ifplus",
"id": "537befeb3a0c4d7a1d8c007f126ceab5cc4609a1",
"size": "1184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ifplus/restful/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "243576"
}
],
"symlink_target": ""
}
|
import abc
from neutron_lib.api import validators
from neutron_lib import exceptions
import six
import webob.exc
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.api.v2 import resource as api_resource
from neutron import manager
from neutron.services import service_base
TAG = 'tag'
TAGS = TAG + 's'
MAX_TAG_LEN = 60
TAG_PLUGIN_TYPE = 'TAG'
TAG_SUPPORTED_RESOURCES = {
attributes.NETWORKS: attributes.NETWORK,
# other resources can be added
}
TAG_ATTRIBUTE_MAP = {
TAGS: {'allow_post': False, 'allow_put': False, 'is_visible': True}
}
class TagResourceNotFound(exceptions.NotFound):
message = _("Resource %(resource)s %(resource_id)s could not be found.")
class TagNotFound(exceptions.NotFound):
message = _("Tag %(tag)s could not be found.")
def get_parent_resource_and_id(kwargs):
for key in kwargs:
for resource in TAG_SUPPORTED_RESOURCES:
if key == TAG_SUPPORTED_RESOURCES[resource] + '_id':
return resource, kwargs[key]
return None, None
def validate_tag(tag):
msg = validators.validate_string(tag, MAX_TAG_LEN)
if msg:
raise exceptions.InvalidInput(error_message=msg)
def validate_tags(body):
if 'tags' not in body:
raise exceptions.InvalidInput(error_message="Invalid tags body.")
msg = validators.validate_list_of_unique_strings(body['tags'], MAX_TAG_LEN)
if msg:
raise exceptions.InvalidInput(error_message=msg)
class TagController(object):
def __init__(self):
self.plugin = (manager.NeutronManager.get_service_plugins()
[TAG_PLUGIN_TYPE])
def index(self, request, **kwargs):
# GET /v2.0/networks/{network_id}/tags
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.get_tags(request.context, parent, parent_id)
def show(self, request, id, **kwargs):
# GET /v2.0/networks/{network_id}/tags/{tag}
# id == tag
validate_tag(id)
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.get_tag(request.context, parent, parent_id, id)
def create(self, request, **kwargs):
# not supported
# POST /v2.0/networks/{network_id}/tags
raise webob.exc.HTTPNotFound("not supported")
def update(self, request, id, **kwargs):
# PUT /v2.0/networks/{network_id}/tags/{tag}
# id == tag
validate_tag(id)
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.update_tag(request.context, parent, parent_id, id)
def update_all(self, request, body, **kwargs):
# PUT /v2.0/networks/{network_id}/tags
# body: {"tags": ["aaa", "bbb"]}
validate_tags(body)
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.update_tags(request.context, parent, parent_id,
body)
def delete(self, request, id, **kwargs):
# DELETE /v2.0/networks/{network_id}/tags/{tag}
# id == tag
validate_tag(id)
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.delete_tag(request.context, parent, parent_id, id)
def delete_all(self, request, **kwargs):
# DELETE /v2.0/networks/{network_id}/tags
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.delete_tags(request.context, parent, parent_id)
class Tag(extensions.ExtensionDescriptor):
"""Extension class supporting tags."""
@classmethod
def get_name(cls):
return "Tag support"
@classmethod
def get_alias(cls):
return "tag"
@classmethod
def get_description(cls):
return "Enables to set tag on resources."
@classmethod
def get_updated(cls):
return "2016-01-01T00:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
action_status = {'index': 200, 'show': 204, 'update': 201,
'update_all': 200, 'delete': 204, 'delete_all': 204}
controller = api_resource.Resource(TagController(),
base.FAULT_MAP,
action_status=action_status)
collection_methods = {"delete_all": "DELETE",
"update_all": "PUT"}
exts = []
for collection_name, member_name in TAG_SUPPORTED_RESOURCES.items():
parent = {'member_name': member_name,
'collection_name': collection_name}
exts.append(extensions.ResourceExtension(
TAGS, controller, parent,
collection_methods=collection_methods))
return exts
def get_extended_resources(self, version):
if version != "2.0":
return {}
EXTENDED_ATTRIBUTES_2_0 = {}
for collection_name in TAG_SUPPORTED_RESOURCES:
EXTENDED_ATTRIBUTES_2_0[collection_name] = TAG_ATTRIBUTE_MAP
return EXTENDED_ATTRIBUTES_2_0
@six.add_metaclass(abc.ABCMeta)
class TagPluginBase(service_base.ServicePluginBase):
"""REST API to operate the Tag."""
def get_plugin_description(self):
return "Tag support"
@classmethod
def get_plugin_type(cls):
return TAG_PLUGIN_TYPE
@abc.abstractmethod
def get_tags(self, context, resource, resource_id):
pass
@abc.abstractmethod
def get_tag(self, context, resource, resource_id, tag):
pass
@abc.abstractmethod
def update_tags(self, context, resource, resource_id, body):
pass
@abc.abstractmethod
def update_tag(self, context, resource, resource_id, tag):
pass
@abc.abstractmethod
def delete_tags(self, context, resource, resource_id):
pass
@abc.abstractmethod
def delete_tag(self, context, resource, resource_id, tag):
pass
|
{
"content_hash": "bbd8544f303be5aeaa8c8e38cbe8af0a",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 79,
"avg_line_length": 31.243523316062177,
"alnum_prop": 0.6233830845771144,
"repo_name": "igor-toga/local-snat",
"id": "a09dfa97796de5a08b42c79cd388c9fa608e2e99",
"size": "6605",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/extensions/tag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9636936"
},
{
"name": "Shell",
"bytes": "14072"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
('videos', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='video',
name='cms_page',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='cms.Page'),
),
]
|
{
"content_hash": "9865b76b1da593977df0070e70338657",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 123,
"avg_line_length": 25.75,
"alnum_prop": 0.6194174757281553,
"repo_name": "rouxcode/django-cms-plugins",
"id": "89f30daca5dbdb2eb010ccd93bc12550003435d6",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugins/videos/migrations/0002_video_cms_page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8364"
},
{
"name": "HTML",
"bytes": "13621"
},
{
"name": "JavaScript",
"bytes": "10749"
},
{
"name": "Python",
"bytes": "137096"
}
],
"symlink_target": ""
}
|
"""Tests for db.task layer."""
import datetime as dt
import json
import ddt
import jsonschema
import mock
from rally.common import objects
from rally import consts
from rally import exceptions
from tests.unit import test
@ddt.ddt
class TaskTestCase(test.TestCase):
def setUp(self):
super(TaskTestCase, self).setUp()
self.task = {
"uuid": "00ef46a2-c5b8-4aea-a5ca-0f54a10cbca1",
"status": consts.TaskStatus.INIT,
"verification_log": "",
}
@mock.patch("rally.common.objects.task.db.task_create")
def test_init_with_create(self, mock_task_create):
mock_task_create.return_value = self.task
task = objects.Task(status=consts.TaskStatus.FAILED)
mock_task_create.assert_called_once_with({
"status": consts.TaskStatus.FAILED})
self.assertEqual(task["uuid"], self.task["uuid"])
@mock.patch("rally.common.objects.task.db.task_create")
def test_init_without_create(self, mock_task_create):
task = objects.Task(task=self.task)
self.assertFalse(mock_task_create.called)
self.assertEqual(task["uuid"], self.task["uuid"])
@mock.patch("rally.common.objects.task.uuid.uuid4",
return_value="some_uuid")
@mock.patch("rally.common.objects.task.db.task_create")
def test_init_with_fake_true(self, mock_task_create, mock_uuid4):
task = objects.Task(temporary=True)
self.assertFalse(mock_task_create.called)
self.assertTrue(mock_uuid4.called)
self.assertEqual(task["uuid"], mock_uuid4.return_value)
@mock.patch("rally.common.objects.task.db.task_get")
def test_get(self, mock_task_get):
mock_task_get.return_value = self.task
task = objects.Task.get(self.task["uuid"])
mock_task_get.assert_called_once_with(self.task["uuid"])
self.assertEqual(task["uuid"], self.task["uuid"])
@mock.patch("rally.common.objects.task.db.task_get_status")
def test_get_status(self, mock_task_get_status):
task = objects.Task(task=self.task)
status = task.get_status(task["uuid"])
self.assertEqual(status, mock_task_get_status.return_value)
@mock.patch("rally.common.objects.task.db.task_delete")
@mock.patch("rally.common.objects.task.db.task_create")
def test_create_and_delete(self, mock_task_create, mock_task_delete):
mock_task_create.return_value = self.task
task = objects.Task()
task.delete()
mock_task_delete.assert_called_once_with(
self.task["uuid"], status=None)
@mock.patch("rally.common.objects.task.db.task_delete")
@mock.patch("rally.common.objects.task.db.task_create")
def test_create_and_delete_status(self, mock_task_create,
mock_task_delete):
mock_task_create.return_value = self.task
task = objects.Task()
task.delete(status=consts.TaskStatus.FINISHED)
mock_task_delete.assert_called_once_with(
self.task["uuid"], status=consts.TaskStatus.FINISHED)
@mock.patch("rally.common.objects.task.db.task_delete")
def test_delete_by_uuid(self, mock_task_delete):
objects.Task.delete_by_uuid(self.task["uuid"])
mock_task_delete.assert_called_once_with(
self.task["uuid"], status=None)
@mock.patch("rally.common.objects.task.db.task_delete")
def test_delete_by_uuid_status(self, mock_task_delete):
objects.Task.delete_by_uuid(self.task["uuid"],
consts.TaskStatus.FINISHED)
mock_task_delete.assert_called_once_with(
self.task["uuid"], status=consts.TaskStatus.FINISHED)
@mock.patch("rally.common.objects.task.db.task_list",
return_value=[{"uuid": "a",
"created_at": "b",
"status": consts.TaskStatus.FAILED,
"tag": "d",
"deployment_name": "some_name"}])
def list(self, mock_db_task_list):
tasks = objects.Task.list(status="somestatus")
mock_db_task_list.assert_called_once_with("somestatus", None)
self.assertIs(type(tasks), list)
self.assertIsInstance(tasks[0], objects.Task)
self.assertEqual(mock_db_task_list.return_value["uuis"],
tasks[0]["uuid"])
@mock.patch("rally.common.objects.deploy.db.task_update")
@mock.patch("rally.common.objects.task.db.task_create")
def test_update(self, mock_task_create, mock_task_update):
mock_task_create.return_value = self.task
mock_task_update.return_value = {"opt": "val2"}
deploy = objects.Task(opt="val1")
deploy._update({"opt": "val2"})
mock_task_update.assert_called_once_with(
self.task["uuid"], {"opt": "val2"})
self.assertEqual(deploy["opt"], "val2")
@ddt.data(
{
"status": "some_status", "allowed_statuses": ("s_1", "s_2")
},
{
"status": "some_status", "allowed_statuses": None
}
)
@ddt.unpack
@mock.patch("rally.common.objects.task.db.task_update_status")
@mock.patch("rally.common.objects.task.db.task_update")
def test_update_status(self, mock_task_update, mock_task_update_status,
status, allowed_statuses):
task = objects.Task(task=self.task)
task.update_status(consts.TaskStatus.FINISHED, allowed_statuses)
if allowed_statuses:
self.assertFalse(mock_task_update.called)
mock_task_update_status.assert_called_once_with(
self.task["uuid"],
consts.TaskStatus.FINISHED,
allowed_statuses
)
else:
self.assertFalse(mock_task_update_status.called)
mock_task_update.assert_called_once_with(
self.task["uuid"],
{"status": consts.TaskStatus.FINISHED},
)
@mock.patch("rally.common.objects.task.db.task_update")
def test_update_verification_log(self, mock_task_update):
mock_task_update.return_value = self.task
task = objects.Task(task=self.task)
task.update_verification_log({"a": "fake"})
mock_task_update.assert_called_once_with(
self.task["uuid"],
{"verification_log": json.dumps({"a": "fake"})}
)
@mock.patch("rally.common.objects.task.charts")
def test_extend_results(self, mock_charts):
self.assertRaises(TypeError, objects.Task.extend_results)
mock_stat = mock.Mock()
mock_stat.render.return_value = "durations_stat"
mock_charts.MainStatsTable.return_value = mock_stat
now = dt.datetime.now()
iterations = [
{"timestamp": i + 2, "duration": i + 5,
"scenario_output": {"errors": "", "data": {}},
"error": [], "idle_duration": i,
"atomic_actions": {
"keystone.create_user": i + 10}} for i in range(10)]
obsolete = [
{"task_uuid": "foo_uuid", "created_at": now, "updated_at": None,
"id": 11, "key": {"kw": {"foo": 42},
"name": "Foo.bar", "pos": 0},
"data": {"raw": iterations, "sla": [],
"full_duration": 40, "load_duration": 32}}]
expected = [
{"iterations": "foo_iterations", "sla": [],
"key": {"kw": {"foo": 42}, "name": "Foo.bar", "pos": 0},
"info": {
"atomic": {"keystone.create_user": {"max_duration": 19,
"min_duration": 10}},
"iterations_count": 10, "iterations_failed": 0,
"max_duration": 14, "min_duration": 5, "tstamp_start": 2,
"full_duration": 40, "load_duration": 32,
"stat": "durations_stat"}}]
# serializable is default
results = objects.Task.extend_results(obsolete)
self.assertIsInstance(results[0]["iterations"], type(iter([])))
self.assertEqual(list(results[0]["iterations"]), iterations)
results[0]["iterations"] = "foo_iterations"
self.assertEqual(results, expected)
# serializable is False
results = objects.Task.extend_results(obsolete, serializable=False)
self.assertIsInstance(results[0]["iterations"], type(iter([])))
self.assertEqual(list(results[0]["iterations"]), iterations)
results[0]["iterations"] = "foo_iterations"
self.assertEqual(results, expected)
# serializable is True
results = objects.Task.extend_results(obsolete, serializable=True)
self.assertEqual(list(results[0]["iterations"]), iterations)
expected[0]["created_at"] = now.strftime("%Y-%d-%mT%H:%M:%S")
expected[0]["updated_at"] = None
jsonschema.validate(results[0],
objects.task.TASK_EXTENDED_RESULT_SCHEMA)
results[0]["iterations"] = "foo_iterations"
self.assertEqual(results, expected)
@mock.patch("rally.common.objects.task.db.task_result_get_all_by_uuid",
return_value="foo_results")
def test_get_results(self, mock_task_result_get_all_by_uuid):
task = objects.Task(task=self.task)
results = task.get_results()
mock_task_result_get_all_by_uuid.assert_called_once_with(
self.task["uuid"])
self.assertEqual(results, "foo_results")
@mock.patch("rally.common.objects.task.db.task_result_create")
def test_append_results(self, mock_task_result_create):
task = objects.Task(task=self.task)
task.append_results("opt", "val")
mock_task_result_create.assert_called_once_with(
self.task["uuid"], "opt", "val")
@mock.patch("rally.common.objects.task.db.task_update")
def test_set_failed(self, mock_task_update):
mock_task_update.return_value = self.task
task = objects.Task(task=self.task)
task.set_failed("foo_type", "foo_error_message", "foo_trace")
mock_task_update.assert_called_once_with(
self.task["uuid"],
{"status": consts.TaskStatus.FAILED,
"verification_log": json.dumps({"etype": "foo_type",
"msg": "foo_error_message",
"trace": "foo_trace"})},
)
@ddt.data(
{
"soft": True, "status": consts.TaskStatus.INIT
},
{
"soft": True, "status": consts.TaskStatus.VERIFYING
},
{
"soft": False, "status": consts.TaskStatus.INIT
},
{
"soft": False, "status": consts.TaskStatus.VERIFYING
}
)
@ddt.unpack
def test_abort_with_init_and_verifying_states(self, soft, status):
task = objects.Task(mock.MagicMock(), fake=True)
task.get_status = mock.MagicMock(
side_effect=(status, status, "running"))
task._update_status_in_abort = mock.MagicMock()
self.assertRaises(exceptions.RallyException, task.abort, soft)
self.assertEqual(1, task.get_status.call_count)
self.assertFalse(task._update_status_in_abort.called)
@ddt.data(
{
"soft": True, "status": consts.TaskStatus.ABORTED
},
{
"soft": True, "status": consts.TaskStatus.FINISHED
},
{
"soft": True, "status": consts.TaskStatus.FAILED
},
{
"soft": False, "status": consts.TaskStatus.ABORTED
},
{
"soft": False, "status": consts.TaskStatus.FINISHED
},
{
"soft": False, "status": consts.TaskStatus.FAILED
}
)
@ddt.unpack
def test_abort_with_finished_states(self, soft, status):
task = objects.Task(mock.MagicMock(), fake=True)
task.get_status = mock.MagicMock(return_value=status)
task.update_status = mock.MagicMock()
self.assertRaises(exceptions.RallyException, task.abort, soft)
self.assertEqual(1, task.get_status.call_count)
self.assertFalse(task.update_status.called)
@ddt.data(True, False)
def test_abort_with_running_state(self, soft):
task = objects.Task(mock.MagicMock(), fake=True)
task.get_status = mock.MagicMock(return_value="running")
task.update_status = mock.MagicMock()
task.abort(soft)
if soft:
status = consts.TaskStatus.SOFT_ABORTING
else:
status = consts.TaskStatus.ABORTING
task.update_status.assert_called_once_with(
status,
allowed_statuses=(consts.TaskStatus.RUNNING,
consts.TaskStatus.SOFT_ABORTING)
)
|
{
"content_hash": "8d51839f156aae7e581799fd7b214fb1",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 76,
"avg_line_length": 40.9171974522293,
"alnum_prop": 0.5849937733499377,
"repo_name": "vganapath/rally",
"id": "884b5a670304c1272828457f3e93d3baae96cc19",
"size": "13478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/common/objects/test_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "52008"
},
{
"name": "JavaScript",
"bytes": "8550"
},
{
"name": "Mako",
"bytes": "18645"
},
{
"name": "Python",
"bytes": "3621510"
},
{
"name": "Shell",
"bytes": "43808"
}
],
"symlink_target": ""
}
|
"""A simple web crawler -- classes implementing crawling logic."""
import asyncio
import cgi
from http.client import BadStatusLine
import logging
import re
import time
import urllib.parse
logger = logging.getLogger(__name__)
def unescape(s):
"""The inverse of cgi.escape()."""
s = s.replace('"', '"').replace('>', '>').replace('<', '<')
return s.replace('&', '&') # Must be last.
class ConnectionPool:
"""A connection pool.
To open a connection, use reserve(). To recycle it, use unreserve().
The pool is mostly just a mapping from (host, port, ssl) tuples to
lists of Connections. The currently active connections are *not*
in the data structure; get_connection() takes the connection out,
and recycle_connection() puts it back in. To recycle a
connection, call conn.close(recycle=True).
There are limits to both the overall pool and the per-key pool.
"""
def __init__(self, max_pool=10, max_tasks=5):
self.max_pool = max_pool # Overall limit.
self.max_tasks = max_tasks # Per-key limit.
self.loop = asyncio.get_event_loop()
self.connections = {} # {(host, port, ssl): [Connection, ...], ...}
self.queue = [] # [Connection, ...]
def close(self):
"""Close all connections available for reuse."""
for conns in self.connections.values():
for conn in conns:
conn.close()
self.connections.clear()
self.queue.clear()
@asyncio.coroutine
def get_connection(self, host, port, ssl):
"""Create or reuse a connection."""
port = port or (443 if ssl else 80)
try:
ipaddrs = yield from self.loop.getaddrinfo(host, port)
except Exception as exc:
logger.error('Exception %r for (%r, %r)', exc, host, port)
raise
logger.warn('* %s resolves to %s',
host, ', '.join(ip[4][0] for ip in ipaddrs))
# Look for a reusable connection.
for _, _, _, _, (h, p, *_) in ipaddrs:
key = h, p, ssl
conn = None
conns = self.connections.get(key)
while conns:
conn = conns.pop(0)
self.queue.remove(conn)
if not conns:
del self.connections[key]
if conn.stale():
logger.warn('closing stale connection %r', key)
conn.close() # Just in case.
else:
logger.warn('* Reusing pooled connection %r', key)
return conn
# Create a new connection.
conn = Connection(self, host, port, ssl)
yield from conn.connect()
logger.warn('* New connection %r', conn.key)
return conn
def recycle_connection(self, conn):
"""Make a connection available for reuse.
This also prunes the pool if it exceeds the size limits.
"""
conns = self.connections.setdefault(conn.key, [])
conns.append(conn)
self.queue.append(conn)
if len(conns) > self.max_tasks:
victims = conns # Prune one connection for this key.
elif len(self.queue) > self.max_pool:
victims = self.queue # Prune one connection for any key.
else:
return
for victim in victims:
if victim.stale(): # Prefer pruning the oldest stale connection.
logger.warn('closing stale connection %r', victim.key)
break
else:
victim = victims[0]
logger.warn('closing oldest connection %r', victim.key)
conns = self.connections[victim.key]
conns.remove(victim)
if not conns:
del self.connections[victim.key]
self.queue.remove(victim)
victim.close()
class Connection:
"""A connection that can be recycled to the pool."""
def __init__(self, pool, host, port, ssl):
self.pool = pool
self.host = host
self.port = port
self.ssl = ssl
self.reader = None
self.writer = None
self.key = None
def stale(self):
return self.reader is None or self.reader.at_eof()
@asyncio.coroutine
def connect(self):
self.reader, self.writer = yield from asyncio.open_connection(
self.host, self.port, ssl=self.ssl)
peername = self.writer.get_extra_info('peername')
if peername:
self.host, self.port = peername[:2]
else:
logger.warn('NO PEERNAME %r %r %r', self.host, self.port, self.ssl)
self.key = self.host, self.port, self.ssl
def close(self, recycle=False):
if recycle and not self.stale():
self.pool.recycle_connection(self)
else:
self.writer.close()
self.pool = self.reader = self.writer = None
@asyncio.coroutine
def make_request(url, pool, *, method='GET', headers=None, version='1.1'):
"""Start an HTTP request. Return a Connection."""
parts = urllib.parse.urlparse(url)
assert parts.scheme in ('http', 'https'), repr(url)
ssl = parts.scheme == 'https'
port = parts.port or (443 if ssl else 80)
path = parts.path or '/'
path = '%s?%s' % (path, parts.query) if parts.query else path
logger.warn('* Connecting to %s:%s using %s for %s',
parts.hostname, port, 'ssl' if ssl else 'tcp', url)
conn = yield from pool.get_connection(parts.hostname, port, ssl)
headers = dict(headers) if headers else {} # Must use Cap-Words.
headers.setdefault('User-Agent', 'asyncio-example-crawl/0.0')
headers.setdefault('Host', parts.netloc)
headers.setdefault('Accept', '*/*')
lines = ['%s %s HTTP/%s' % (method, path, version)]
lines.extend('%s: %s' % kv for kv in headers.items())
for line in lines + ['']:
logger.info('> %s', line)
# TODO: close conn if this fails.
conn.writer.write('\r\n'.join(lines + ['', '']).encode('latin-1'))
return conn # Caller must send body if desired, then call read_response().
@asyncio.coroutine
def read_response(conn):
"""Read an HTTP response from a connection."""
@asyncio.coroutine
def getline():
line = (yield from conn.reader.readline()).decode('latin-1').rstrip()
logger.info('< %s', line)
return line
status_line = yield from getline()
status_parts = status_line.split(None, 2)
if len(status_parts) != 3 or not status_parts[1].isdigit():
logger.error('bad status_line %r', status_line)
raise BadStatusLine(status_line)
http_version, status, reason = status_parts
status = int(status)
headers = {}
while True:
header_line = yield from getline()
if not header_line:
break
key, value = header_line.split(':', 1)
# TODO: Continuation lines; multiple header lines per key..
headers[key.lower()] = value.lstrip()
if 'content-length' in headers:
nbytes = int(headers['content-length'])
output = asyncio.StreamReader()
asyncio.async(length_handler(nbytes, conn.reader, output))
elif headers.get('transfer-encoding') == 'chunked':
output = asyncio.StreamReader()
asyncio.async(chunked_handler(conn.reader, output))
else:
output = conn.reader
return http_version[5:], status, reason, headers, output
@asyncio.coroutine
def length_handler(nbytes, input, output):
"""Async handler for reading a body given a Content-Length header."""
while nbytes > 0:
buffer = yield from input.read(min(nbytes, 256*1024))
if not buffer:
logger.error('premature end for content-length')
output.set_exception(EOFError())
return
output.feed_data(buffer)
nbytes -= len(buffer)
output.feed_eof()
@asyncio.coroutine
def chunked_handler(input, output):
"""Async handler for reading a body using Transfer-Encoding: chunked."""
logger.info('parsing chunked response')
nblocks = 0
nbytes = 0
while True:
size_header = yield from input.readline()
if not size_header:
logger.error('premature end of chunked response')
output.set_exception(EOFError())
return
logger.debug('size_header = %r', size_header)
parts = size_header.split(b';')
size = int(parts[0], 16)
nblocks += 1
nbytes += size
if size:
logger.debug('reading chunk of %r bytes', size)
block = yield from input.readexactly(size)
assert len(block) == size, (len(block), size)
output.feed_data(block)
crlf = yield from input.readline()
assert crlf == b'\r\n', repr(crlf)
if not size:
break
logger.warn('chunked response had %r bytes in %r blocks', nbytes, nblocks)
output.feed_eof()
class Fetcher:
"""Logic and state for one URL.
When found in crawler.busy, this represents a URL to be fetched or
in the process of being fetched; when found in crawler.done, this
holds the results from fetching it.
This is usually associated with a task. This references the
crawler for the connection pool and to add more URLs to its todo
list.
Call fetch() to do the fetching; results are in instance variables.
"""
def __init__(self, url, crawler, max_redirect=10, max_tries=4):
self.url = url
self.crawler = crawler
# We don't loop resolving redirects here -- we just use this
# to decide whether to add the redirect URL to crawler.todo.
self.max_redirect = max_redirect
# But we do loop to retry on errors a few times.
self.max_tries = max_tries
# Everything we collect from the response goes here.
self.task = None
self.exceptions = []
self.tries = 0
self.conn = None
self.status = None
self.headers = None
self.body = None
self.next_url = None
self.ctype = None
self.pdict = None
self.encoding = None
self.urls = None
self.new_urls = None
@asyncio.coroutine
def fetch(self):
"""Attempt to fetch the contents of the URL.
If successful, and the data is HTML, extract further links and
add them to the crawler. Redirects are also added back there.
"""
while self.tries < self.max_tries:
self.tries += 1
conn = None
try:
conn = yield from make_request(self.url, self.crawler.pool)
_, status, _, headers, output = yield from read_response(conn)
self.status, self.headers = status, headers
self.body = yield from output.read()
h_conn = headers.get('connection', '').lower()
if h_conn != 'close':
conn.close(recycle=True)
conn = None
if self.tries > 1:
logger.warn('try %r for %r success', self.tries, self.url)
break
except (BadStatusLine, OSError) as exc:
self.exceptions.append(exc)
logger.warn('try %r for %r raised %r',
self.tries, self.url, exc)
finally:
if conn is not None:
conn.close()
else:
# We never broke out of the while loop, i.e. all tries failed.
logger.error('no success for %r in %r tries',
self.url, self.max_tries)
return
if status in (300, 301, 302, 303, 307) and headers.get('location'):
next_url = headers['location']
self.next_url = urllib.parse.urljoin(self.url, next_url)
if self.max_redirect > 0:
logger.warn('redirect to %r from %r', self.next_url, self.url)
self.crawler.add_url(self.next_url, self.max_redirect-1)
else:
logger.error('redirect limit reached for %r from %r',
self.next_url, self.url)
else:
if status == 200:
self.ctype = headers.get('content-type')
self.pdict = {}
if self.ctype:
self.ctype, self.pdict = cgi.parse_header(self.ctype)
self.encoding = self.pdict.get('charset', 'utf-8')
if self.ctype == 'text/html':
body = self.body.decode(self.encoding, 'replace')
# Replace href with (?:href|src) to follow image links.
self.urls = set(re.findall(r'(?i)href=["\']?([^\s"\'<>]+)',
body))
if self.urls:
logger.warn('got %r distinct urls from %r',
len(self.urls), self.url)
self.new_urls = set()
for url in self.urls:
url = unescape(url)
url = urllib.parse.urljoin(self.url, url)
url, frag = urllib.parse.urldefrag(url)
if self.crawler.add_url(url):
self.new_urls.add(url)
class Crawler:
"""Crawl a set of URLs.
This manages three disjoint sets of URLs (todo, busy, done). The
data structures actually store dicts -- the values in todo give
the redirect limit, while the values in busy and done are Fetcher
instances.
"""
def __init__(self, roots,
exclude=None, strict=True, # What to crawl.
max_redirect=10, max_tries=4, # Per-url limits.
max_tasks=10, max_pool=10, # Global limits.
):
self.roots = roots
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.max_pool = max_pool
self.todo = {}
self.busy = {}
self.done = {}
self.pool = ConnectionPool(max_pool, max_tasks)
self.root_domains = set()
for root in roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
self.root_domains.add(host)
else:
host = host.lower()
if self.strict:
self.root_domains.add(host)
else:
self.root_domains.add(host)
for root in roots:
self.add_url(root)
self.governor = asyncio.Semaphore(max_tasks)
self.termination = asyncio.Condition()
self.t0 = time.time()
self.t1 = None
def close(self):
"""Close resources (currently only the pool)."""
self.pool.close()
def host_okay(self, host):
"""Check if a host should be crawled.
A literal match (after lowercasing) is always good. For hosts
that don't look like IP addresses, some approximate matches
are okay depending on the strict flag.
"""
host = host.lower()
if host in self.root_domains:
return True
if re.match(r'\A[\d\.]*\Z', host):
return False
if self.strict:
return self._host_okay_strictish(host)
else:
return self._host_okay_lenient(host)
def _host_okay_strictish(self, host):
"""Check if a host should be crawled, strict-ish version.
This checks for equality modulo an initial 'www.' component.
"""
host = host[4:] if host.startswith('www.') else 'www.' + host
return host in self.root_domains
def _host_okay_lenient(self, host):
"""Check if a host should be crawled, lenient version.
This compares the last two components of the host.
"""
return host in self.root_domains
def add_url(self, url, max_redirect=None):
"""Add a URL to the todo list if not seen before."""
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ('http', 'https'):
logger.info('skipping non-http scheme in %r', url)
return False
host, port = urllib.parse.splitport(parts.netloc)
if not self.host_okay(host):
logger.info('skipping non-root host in %r', url)
return False
if max_redirect is None:
max_redirect = self.max_redirect
if url in self.todo or url in self.busy or url in self.done:
return False
logger.warn('adding %r %r', url, max_redirect)
self.todo[url] = max_redirect
return True
@asyncio.coroutine
def crawl(self):
"""Run the crawler until all finished."""
with (yield from self.termination):
while self.todo or self.busy:
if self.todo:
url, max_redirect = self.todo.popitem()
fetcher = Fetcher(url,
crawler=self,
max_redirect=max_redirect,
max_tries=self.max_tries,
)
self.busy[url] = fetcher
fetcher.task = asyncio.Task(self.fetch(fetcher))
else:
yield from self.termination.wait()
self.t1 = time.time()
@asyncio.coroutine
def fetch(self, fetcher):
"""Call the Fetcher's fetch(), with a limit on concurrency.
Once this returns, move the fetcher from busy to done.
"""
url = fetcher.url
with (yield from self.governor):
try:
yield from fetcher.fetch() # Fetcher gonna fetch.
finally:
# Force GC of the task, so the error is logged.
fetcher.task = None
with (yield from self.termination):
self.done[url] = fetcher
del self.busy[url]
self.termination.notify()
|
{
"content_hash": "c1ddc97cc6ddd7b72ffb3eccaa31c0bd",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 79,
"avg_line_length": 36.64670658682635,
"alnum_prop": 0.5557734204793028,
"repo_name": "mikar/60-days-of-python",
"id": "9bc735d57bccc39530a6b5c7f8e950086d3ef0e0",
"size": "18360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webcrawler/crawling.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10104"
},
{
"name": "Python",
"bytes": "349650"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
}
|
"""Fichier contenant le type orbe."""
from bases.objet.attribut import Attribut
from .base import BaseType
class Orbe(BaseType):
"""Type d'objet: orbe.
"""
nom_type = "orbe"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
BaseType.__init__(self, cle)
# Attributs propres à l'objet (non au prototype)
self._attributs = {
"nom_orbe": Attribut(str),
}
def get_nom(self, nombre=1, pluriels=True):
"""Retourne le nom complet en fonction du nombre.
Par exemple :
Si nombre == 1 : retourne le nom singulier
Sinon : retourne le nombre et le nom pluriel
"""
ajout = ""
if nombre <= 0:
raise ValueError("la fonction get_nom a été appelée " \
"avec un nombre négatif ou nul.")
elif nombre == 1:
nom = getattr(self, "nom_orbe", "")
ajout = nom and " (nommé " + nom + ")" or "(sans nom)"
return self.nom_singulier + ajout
else:
return BaseType.get_nom(self, nombre, pluriels)
|
{
"content_hash": "e2353fe19dac64bae74fbf95e088a2f4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 67,
"avg_line_length": 27.675,
"alnum_prop": 0.5438121047877146,
"repo_name": "vlegoff/tsunami",
"id": "9460c3b0594ff006000c626cab6f633afec96415",
"size": "2680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/objet/types/orbe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
INITIAL_FILE_RENAME_INFORMATION = _FILE_RENAME_INFORMATION
class _FILE_RENAME_INFORMATION(INITIAL_FILE_RENAME_INFORMATION):
@property
def filename(self):
filename_addr = ctypes.addressof(self) + type(self).FileName.offset
if getattr(self, "_target", None) is not None: #remote ctypes :D -> TRICKS OF THE YEAR
raw_data = self._target.read_memory(filename_addr, self.FileNameLength)
return raw_data.decode("utf16")
size = int(self.FileNameLength / 2)
return (ctypes.c_wchar * size).from_address(filename_addr)[:]
|
{
"content_hash": "00d472c7668ad5461696cc2cd5cd60eb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 94,
"avg_line_length": 52.36363636363637,
"alnum_prop": 0.6788194444444444,
"repo_name": "hakril/PythonForWindows",
"id": "4402d6908974fcacd0012e84b75cc0938f315048",
"size": "576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctypes_generation/extended_structs/_FILE_RENAME_INFORMATION.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4087889"
}
],
"symlink_target": ""
}
|
import datetime
import uuid
from oslo.config import cfg
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import servers
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
import nova.db.api
from nova.network import manager
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class SchedulerHintsTestCase(test.TestCase):
def setUp(self):
super(SchedulerHintsTestCase, self).setUp()
self.fake_instance = fakes.stub_instance(1, uuid=FAKE_UUID)
self.app = compute.APIRouterV3(init_only=('servers',
'os-scheduler-hints'))
def test_create_server_without_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_with_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {'a': 'b'})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
'os-scheduler-hints:scheduler_hints': {'a': 'b'},
},
}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_bad_hints(self):
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
'os-scheduler-hints:scheduler_hints': 'non-dict',
},
}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-scheduler-hints',
'osapi_v3')
self.no_scheduler_hints_controller = servers.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2)
if no_image:
server.pop('image_ref', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
if override_controller:
server = override_controller.create(req, body).obj['server']
else:
server = self.controller.create(req, body).obj['server']
def test_create_instance_with_scheduler_hints_disabled(self):
hints = {'a': 'b'}
params = {'os-scheduler-hints:scheduler_hints': hints}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('scheduler_hints', kwargs)
# self.assertEqual(kwargs['scheduler_hints'], {})
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params,
override_controller=self.no_scheduler_hints_controller)
def test_create_instance_with_scheduler_hints_enabled(self):
hints = {'a': 'b'}
params = {'os-scheduler-hints:scheduler_hints': hints}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], hints)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
|
{
"content_hash": "ae17697e17824eb0e9f79ca228f2b20c",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 77,
"avg_line_length": 36.88655462184874,
"alnum_prop": 0.5776284314842237,
"repo_name": "OpenAcademy-OpenStack/nova-scheduler",
"id": "565fe38b4c68b5d8c400263a95818835429d6b73",
"size": "9415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13220667"
},
{
"name": "Shell",
"bytes": "16180"
}
],
"symlink_target": ""
}
|
import logging
import random
import time
import asyncio
import pathlib
import importlib
import collections
import discord
import aiohttp
import uvloop
from discord.ext import commands
import joseconfig as config
from ext.common import SayException
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
log = logging.getLogger(__name__)
extensions = [
'channel_logging', # loading at start to get the logger to run
'config',
'admin',
'exec',
'state',
]
CHECK_FAILURE_PHRASES = [
'br?',
'u died [real] [Not ClickBait]',
'rEEEEEEEEEEEEE',
'not enough permissions lul',
'you sure you can run this?',
]
BAD_ARG_MESSAGES = [
'dude give me the right thing',
"u can't give me this and think i can do something",
'succ',
"i'm not a god, fix your args",
'why. just why',
]
class JoseContext(commands.Context):
@property
def member(self):
if self.guild is None:
return None
return self.guild.get_member(self.author.id)
async def ok(self):
try:
await self.message.add_reaction('👌')
except discord.Forbidden:
await self.message.channel.send('ok')
async def not_ok(self):
try:
await self.message.add_reaction('❌')
except discord.Forbidden:
await self.message.channel.send('not ok')
async def success(self, flag):
if flag:
await self.ok()
else:
await self.not_ok()
async def status(self, flag):
await self.success(flag)
async def err(self, msg):
await self.send(f'\N{POLICE CARS REVOLVING LIGHT} {msg}')
def send(self, content='', **kwargs):
# FUCK EACH AND @EVERYONE OF YOU
# specially mary and gerd
# i hope this saves my life, forever.
nc = self.bot.clean_content(content, normal_send=True)
return super().send(nc, **kwargs)
class JoseBot(commands.Bot):
"""Main bot subclass."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_time = time.time()
self.config = config
self.session = aiohttp.ClientSession()
#: Exceptions that will be simplified
# to WARN logging instead of ERROR logging
self.simple_exc = [SayException]
#: used by ext.channel_logging
self.channel_handlers = []
#: blocking stuff
self.block_coll = None
self.block_cache = {}
async def on_ready(self):
"""Bot ready handler"""
log.info(f'Logged in! {self.user!s}')
async def is_blocked(self, user_id: int, key: str = 'user_id') -> bool:
"""Returns if something blocked to use José. Uses cache"""
if user_id in self.block_cache:
return self.block_cache[user_id]
blocked = await self.block_coll.find_one({key: user_id})
is_blocked = bool(blocked)
self.block_cache[user_id] = is_blocked
return is_blocked
async def is_blocked_guild(self, guild_id: int) -> bool:
"""Returns if a guild is blocked to use José. Uses cache"""
return await self.is_blocked(guild_id, 'guild_id')
def clean_content(self, content: str, **kwargs) -> str:
"""Make a string clean of mentions and not breaking codeblocks"""
content = str(content)
# only escape codeblocks when we are not normal_send
# only escape single person pings when we are not normal_send
if not kwargs.get('normal_send', False):
content = content.replace('`', r'\`')
content = content.replace('<@', '<@\u200b')
content = content.replace('<#', '<#\u200b')
# always escape role pings (@everyone) and @here
content = content.replace('<@&', '<@&\u200b')
content = content.replace('@here', '@\u200bhere')
content = content.replace('@everyone', '@\u200beveryone')
return content
async def on_command(self, ctx):
"""Log command usage"""
# thanks dogbot ur a good
content = ctx.message.content
content = self.clean_content(content)
author = ctx.message.author
guild = ctx.guild
checks = [c.__qualname__.split('.')[0] for c in ctx.command.checks]
location = '[DM]' if isinstance(ctx.channel, discord.DMChannel) else \
f'[Guild {guild.name} {guild.id}]'
log.info('%s [cmd] %s(%d) "%s" checks=%s', location, author, author.id,
content, ','.join(checks) or '(none)')
async def on_error(self, event_method, *args, **kwargs):
# TODO: analyze current exception
# and simplify the logging to WARN
# if it is on self.simple_exc
log.exception(f'evt error ({event_method}) '
f'args={args!r} kwargs={kwargs!r}')
async def on_message(self, message):
if message.author.bot:
return
author_id = message.author.id
if await self.is_blocked(author_id):
return
if message.guild is not None:
guild_id = message.guild.id
if await self.is_blocked_guild(guild_id):
return
ctx = await self.get_context(message, cls=JoseContext)
await self.invoke(ctx)
def load_extension(self, name: str):
"""wrapper for the Bot.load_extension"""
log.debug(f'[load:loading] {name}')
t_start = time.monotonic()
super().load_extension(name)
t_end = time.monotonic()
delta = round((t_end - t_start) * 1000, 2)
log.info(f'[load] {name} took {delta}ms')
def add_jose_cog(self, cls: 'class'):
"""Add a cog but load its requirements first."""
requires = cls._cog_metadata.get('requires', [])
log.debug('requirements for %s: %r', cls, requires)
if not requires:
log.debug(f'no requirements for {cls}')
for _req in requires:
req = f'ext.{_req}'
if req in self.extensions:
log.debug('loading %r from requirements', req)
self.load_extension(req)
else:
log.debug('%s is already loaded', req)
# We instantiate here because
# instantiating on the old add_cog
# is exactly the cause of the problem
cog = cls(self)
super().add_cog(cog)
def load_all(self):
"""Load all extensions in the extensions folder.
Thanks FrostLuma for code!
"""
for extension in extensions:
self.load_extension(f'ext.{extension}')
path = pathlib.Path('ext/')
files = path.glob('**/*.py')
for fileobj in files:
if fileobj.stem == '__init__':
name = str(fileobj)[:-12]
else:
name = str(fileobj)[:-3]
name = name.replace('/', '.')
module = importlib.import_module(name)
if not hasattr(module, 'setup'):
# ignore extensions that do not have a setup() function
continue
if name in extensions:
log.debug(f'ignoring {name}')
self.load_extension(name)
async def get_prefix(bot, message) -> list:
"""Get the preferred list of prefixes for a determined guild/dm."""
if not message.guild:
return bot.config.prefix
config_cog = bot.get_cog('Config')
if not config_cog:
log.warning('config cog not found')
return [config.prefix]
custom = await config_cog.cfg_get(message.guild, 'prefix')
if custom == bot.config.prefix:
return custom
# sort backwards due to the command parser taking the first match
return sorted([bot.config.prefix, custom], reverse=True)
def main():
"""Main entry point"""
jose = JoseBot(
command_prefix=get_prefix,
description='henlo dis is jose',
pm_help=None,
owner_id=getattr(config, 'owner_id', None),
)
jose.load_all()
jose.run(config.token)
if __name__ == '__main__':
main()
|
{
"content_hash": "602a11bb27bc142ebed6f565e4dfe1f5",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 79,
"avg_line_length": 29.060931899641577,
"alnum_prop": 0.5786877158362111,
"repo_name": "lnmds/jose",
"id": "1023f11dc7281fc571e70d01643c3ea9ae8708bf",
"size": "8115",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jose.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "420261"
}
],
"symlink_target": ""
}
|
from apps.common.hasher import get_hasher
from django.db import models
import time
class EditMixin(models.Model):
"""
Abstract class for use in models
"""
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class HashidManagerMixin(object):
def get_by_hashid(self, hashid):
"""
Get an object by a hashed pk
:param hashid: hashed pk value
:return: None
"""
hasher = get_hasher()
return self.get(pk=hasher.decode_single(hashid))
def get_uploaded_file_path(instance, filename):
"""
Function that determines file path for specified file
:param instance: instance of db object for which file is being saved
:param filename: name of file
:return: path to file
"""
# Seconds since epoch
current_time = str(round(time.time(), 0)).split('.')[0]
# Get file extension
file_pieces = filename.split('.')
file_extension = file_pieces[len(file_pieces) - 1]
return 'images/{}.{}'.format(current_time, file_extension)
|
{
"content_hash": "2569f682cd98ea68e5523c9ba1c4b851",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 72,
"avg_line_length": 26.27906976744186,
"alnum_prop": 0.6495575221238938,
"repo_name": "RonquilloAeon/django-golden-image",
"id": "ea9fd2ff0bfbe3a958b01b409ebe7fe724aa88b2",
"size": "1130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/common/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "324"
},
{
"name": "Python",
"bytes": "27259"
},
{
"name": "Shell",
"bytes": "128"
}
],
"symlink_target": ""
}
|
import roslib
roslib.load_manifest('smart_arm_controller')
import rospy
from std_msgs.msg import Float64
from numpy import matrix, array
import numpy as np
import sys
import sensor_msgs.msg
from sensor_msgs.msg import JointState
from dynamixel_msgs.msg import JointState
joint_names = ('shoulder_pitch_controller',
'elbow_flex_controller',
'wrist_roll_controller',
'claw_controller')
#Poles should be on the real axis to avoid overshoot and oscillations
r = 0.95
alpha = r*r
k = 1+r*r-2*r*0.9986
class PropControllerState:
# The possible states we can be in
LISTENING = 0
CONTROLLING = 1
def __init__(self):
# P_J - the pose we are commanded to move to
self.commanded_pose = np.ones(4) * np.nan
# P_C - the current measured pose of the arm
self.measured_pose = np.ones(4) * np.nan
# A list containing the last three measured states.
self.prev_poses = []
# What state we're in
self.state = PropControllerState.LISTENING
# A list of publishers to publish state to
self.pubs = None
def tick(self):
"""Called every so often to update our state."""
if self.state == PropControllerState.LISTENING:
# If we have all of one pose...
if np.all(np.isfinite(self.measured_pose)):
# Record the current pose as element 0 of self.prev_poses
# shuffling the other poses to the right. Then, truncate the
# list to be a maximum of 3 poses long.
self.prev_poses = [np.copy(self.measured_pose)] + self.prev_poses
self.prev_poses = self.prev_poses[:3]
elif self.state == PropControllerState.CONTROLLING:
# Calculate new commanded pose
p0, p1, p2 = self.prev_poses[:3]
p0 = (1+alpha-k)*p0 - alpha*p1 + k*self.commanded_pose
#p0 = 0.8*p1 + 0.2*self.commanded_pose
# Shuffle prev poses array
self.prev_poses = [p0] + self.prev_poses
self.prev_poses = self.prev_poses[:3]
delta = self.commanded_pose - p0
delta_mag = np.sqrt(np.sum(delta * delta))
epsilon = 0.02
#rospy.logerr('p0: '+ str(p0))
#rospy.logerr('p1: '+ str(p1))
#rospy.logerr('p2: '+ str(p2))
#rospy.logerr('Cmded pose: ' + str(self.commanded_pose))
#rospy.logerr('delta: ' +str(delta))
#rospy.logerr('Controlling with prev poses: ' + str(self.prev_poses))
# If we're sufficiently close, or have gone mad, transition to listening
# state and reset the commanded pose.
#The IK works for point (0.1, 0, 0) but it is not publishing
if delta_mag <= epsilon or delta_mag >= 6:
#rospy.logerr('Finished with delta mag: ' + str(delta_mag))
self.commanded_pose = np.ones(4) * np.nan
self.state = PropControllerState.LISTENING
elif self.pubs is not None:
# Actually publish *IF* we have some publishers
for i in range(len(self.pubs)):
self.pubs[i].publish(p0[i])
def commanded_pose_updated(self):
# We transition to the controlling state when we have a full commanded
# pose *AND* at least three previous poses
if np.all(np.isfinite(self.commanded_pose)) and len(self.prev_poses) >= 3:
self.state = PropControllerState.CONTROLLING
# A global variable holding the controller state
c_state = PropControllerState()
def get_shoulder_pitch(event):
"""Called when a new command is sent to the arm."""
c_state.commanded_pose[0] = event.data
c_state.commanded_pose_updated()
def get_elbow_flex(event):
"""Called when a new command is sent to the arm."""
c_state.commanded_pose[1] = event.data
c_state.commanded_pose_updated()
def get_wrist_roll(event):
"""Called when a new command is sent to the arm."""
c_state.commanded_pose[2] = event.data
c_state.commanded_pose_updated()
def get_claw(event):
"""Called when a new command is sent to the arm."""
#Claw should always be closed
c_state.commanded_pose[3] = 0.98
c_state.commanded_pose_updated()
def current_shoulder_pitch(event):
c_state.measured_pose[0] = event.current_pos
def current_elbow_flex(event):
c_state.measured_pose[1] = event.current_pos
def current_wrist_roll(event):
c_state.measured_pose[2] = event.current_pos
def current_claw(event):
c_state.measured_pose[3] = event.current_pos
if __name__ == '__main__':
pubs = [rospy.Publisher(name + '/command', Float64) for name in joint_names]
rospy.init_node('make_goal_pose', anonymous=True)
rospy.Subscriber('shoulder_pitch_controller/state', JointState, current_shoulder_pitch)
rospy.Subscriber('elbow_flex_controller/state', JointState, current_elbow_flex)
rospy.Subscriber('wrist_roll_controller/state', JointState, current_wrist_roll)
rospy.Subscriber('claw_controller/state', JointState, current_claw)
rospy.Subscriber('shoulder_pitch_controller/intermediate_command', Float64, get_shoulder_pitch)
rospy.Subscriber('elbow_flex_controller/intermediate_command', Float64, get_elbow_flex)
rospy.Subscriber('wrist_roll_controller/intermediate_command', Float64, get_wrist_roll)
rospy.Subscriber('claw_controller/intermediate_command', Float64, get_claw)
c_state.pubs = pubs
r = rospy.Rate(30)
while not rospy.is_shutdown():
c_state.tick()
r.sleep()
|
{
"content_hash": "f9cde283790028087c18ddc48f73b3cc",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 99,
"avg_line_length": 37.52980132450331,
"alnum_prop": 0.629610022939827,
"repo_name": "sigproc/robotic_surgery",
"id": "39b421efdcb7abc672923da0df3523dea423b141",
"size": "5690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ros/crustcrawler_smart_arm/smart_arm_controller/scripts/prop_closed_2nd_order_static.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "95098"
},
{
"name": "CMake",
"bytes": "87588"
},
{
"name": "Makefile",
"bytes": "6460"
},
{
"name": "Python",
"bytes": "208268"
},
{
"name": "Shell",
"bytes": "1502"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.contenttypes.models import ContentType
from src.utils.utils import to_str
from src.utils.dbserialize import to_pickle
try:
import cPickle as pickle
except ImportError:
import pickle
CTYPEGET = ContentType.objects.get
GA = object.__getattribute__
SA = object.__setattr__
DA = object.__delattr__
# overloading pickle to have it find the PackedDBobj in this module
import pickle
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
renametable = {
'src.typeclasses.models': 'src.players.migrations.0018_convert_attrdata',
'PackedDBobject': 'PackedDBobject',
}
def mapname(name):
if name in renametable:
return renametable[name]
return name
def mapped_load_global(self):
module = mapname(self.readline()[:-1])
name = mapname(self.readline()[:-1])
klass = self.find_class(module, name)
self.append(klass)
def loads(str):
file = StringIO(str)
unpickler = pickle.Unpickler(file)
unpickler.dispatch[pickle.GLOBAL] = mapped_load_global
return unpickler.load()
class PackedDBobject(object):
"""
Attribute helper class.
A container for storing and easily identifying database objects in
the database (which doesn't suppport storing db_objects directly).
"""
def __init__(self, ID, db_model, db_key):
self.id = ID
self.db_model = db_model
self.key = db_key
def __str__(self):
return "%s(#%s)" % (self.key, self.id)
def __unicode__(self):
return u"%s(#%s)" % (self.key, self.id)
class PackedDict(dict):
"""
Attribute helper class.
A variant of dict that stores itself to the database when
updating one of its keys. This is called and handled by
Attribute.validate_data().
"""
def __init__(self, db_obj, *args, **kwargs):
"""
Sets up the packing dict. The db_store variable
is set by Attribute.validate_data() when returned in
order to allow custom updates to the dict.
db_obj - the Attribute object storing this dict.
The 'parent' property is set to 'init' at creation,
this stops the system from saving itself over and over
when first assigning the dict. Once initialization
is over, the Attribute from_attr() method will assign
the parent (or None, if at the root)
"""
self.db_obj = db_obj
self.parent = 'init'
super(PackedDict, self).__init__(*args, **kwargs)
def __str__(self):
return "{%s}" % ", ".join("%s:%s" % (key, str(val)) for key, val in self.items())
def save(self):
"Relay save operation upwards in tree until we hit the root."
if self.parent == 'init':
pass
elif self.parent:
self.parent.save()
else:
self.db_obj.value = self
def __setitem__(self, *args, **kwargs):
"assign item to this dict"
super(PackedDict, self).__setitem__(*args, **kwargs)
self.save()
def __delitem__(self, *args, **kwargs):
"delete with del self[key]"
super(PackedDict, self).__delitem__(*args, **kwargs)
self.save()
def clear(self, *args, **kwargs):
"Custom clear"
super(PackedDict, self).clear(*args, **kwargs)
self.save()
def pop(self, *args, **kwargs):
"Custom pop"
ret = super(PackedDict, self).pop(*args, **kwargs)
self.save()
return ret
def popitem(self, *args, **kwargs):
"Custom popitem"
ret = super(PackedDict, self).popitem(*args, **kwargs)
self.save()
return ret
def setdefault(self, *args, **kwargs):
"Custom setdefault"
super(PackedDict, self).setdefault(*args, **kwargs)
self.save()
def update(self, *args, **kwargs):
"Custom update"
super(PackedDict, self).update(*args, **kwargs)
self.save()
class PackedList(list):
"""
Attribute helper class.
A variant of list that stores itself to the database when
updating one of its keys. This is called and handled by
Attribute.validate_data().
"""
def __init__(self, db_obj, *args, **kwargs):
"""
sets up the packing list.
db_obj - the attribute object storing this list.
the 'parent' property is set to 'init' at creation,
this stops the system from saving itself over and over
when first assigning the dict. once initialization
is over, the attribute from_attr() method will assign
the parent (or none, if at the root)
"""
self.db_obj = db_obj
self.parent = 'init'
super(PackedList, self).__init__(*args, **kwargs)
def __str__(self):
return "[%s]" % ", ".join(str(val) for val in self)
def save(self):
"relay save operation upwards in tree until we hit the root."
if self.parent == 'init':
pass
elif self.parent:
self.parent.save()
else:
self.db_obj.value = self
def __setitem__(self, *args, **kwargs):
"Custom setitem that stores changed list to database."
super(PackedList, self).__setitem__(*args, **kwargs)
self.save()
def __delitem__(self, *args, **kwargs):
"delete with del self[index]"
super(PackedList, self).__delitem__(*args, **kwargs)
self.save()
def append(self, *args, **kwargs):
"Custom append"
super(PackedList, self).append(*args, **kwargs)
self.save()
def extend(self, *args, **kwargs):
"Custom extend"
super(PackedList, self).extend(*args, **kwargs)
self.save()
def insert(self, *args, **kwargs):
"Custom insert"
super(PackedList, self).insert(*args, **kwargs)
self.save()
def remove(self, *args, **kwargs):
"Custom remove"
super(PackedList, self).remove(*args, **kwargs)
self.save()
def pop(self, *args, **kwargs):
"Custom pop"
ret = super(PackedList, self).pop(*args, **kwargs)
self.save()
return ret
def reverse(self, *args, **kwargs):
"Custom reverse"
super(PackedList, self).reverse(*args, **kwargs)
self.save()
def sort(self, *args, **kwargs):
"Custom sort"
super(PackedList, self).sort(*args, **kwargs)
self.save()
class PackedSet(set):
"""
A variant of Set that stores new updates to the databse.
"""
def __init__(self, db_obj, *args, **kwargs):
"""
sets up the packing set.
db_obj - the attribute object storing this set
the 'parent' property is set to 'init' at creation,
this stops the system from saving itself over and over
when first assigning the dict. once initialization
is over, the attribute from_attr() method will assign
the parent (or none, if at the root)
"""
self.db_obj = db_obj
self.parent = 'init'
super(PackedSet, self).__init__(*args, **kwargs)
def __str__(self):
return "{%s}" % ", ".join(str(val) for val in self)
def save(self):
"relay save operation upwards in tree until we hit the root."
if self.parent == 'init':
pass
elif self.parent:
self.parent.save()
else:
self.db_obj.value = self
def add(self, *args, **kwargs):
"Add an element to the set"
super(PackedSet, self).add(*args, **kwargs)
self.save()
def clear(self, *args, **kwargs):
"Remove all elements from this set"
super(PackedSet, self).clear(*args, **kwargs)
self.save()
def difference_update(self, *args, **kwargs):
"Remove all elements of another set from this set."
super(PackedSet, self).difference_update(*args, **kwargs)
self.save()
def discard(self, *args, **kwargs):
"Remove an element from a set if it is a member.\nIf not a member, do nothing."
super(PackedSet, self).discard(*args, **kwargs)
self.save()
def intersection_update(self, *args, **kwargs):
"Update a set with the intersection of itself and another."
super(PackedSet, self).intersection_update(*args, **kwargs)
self.save()
def pop(self, *args, **kwargs):
"Remove and return an arbitrary set element.\nRaises KeyError if the set is empty."
super(PackedSet, self).pop(*args, **kwargs)
self.save()
def remove(self, *args, **kwargs):
"Remove an element from a set; it must be a member.\nIf the element is not a member, raise a KeyError."
super(PackedSet, self).remove(*args, **kwargs)
self.save()
def symmetric_difference_update(self, *args, **kwargs):
"Update a set with the symmetric difference of itself and another."
super(PackedSet, self).symmetric_difference_update(*args, **kwargs)
self.save()
def update(self, *args, **kwargs):
"Update a set with the union of itself and others."
super(PackedSet, self).update(*args, **kwargs)
self.save()
class Migration(DataMigration):
depends_on = (
("objects", "0018_add_picklefield"),
)
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
# modified for migration - converts to plain python properties
def from_attr(datatuple):
"""
Retrieve data from a previously stored attribute. This
is always a dict with keys type and data.
datatuple comes from the database storage and has
the following format:
(simple|dbobj|iter, <data>)
where
simple - a single non-db object, like a string. is returned as-is.
dbobj - a single dbobj-id. This id is retrieved back from the database.
iter - an iterable. This is traversed iteratively, converting all found
dbobj-ids back to objects. Also, all lists and dictionaries are
returned as their PackedList/PackedDict counterparts in order to
allow in-place assignment such as obj.db.mylist[3] = val. Mylist
is then a PackedList that saves the data on the fly.
"""
# nested functions
def id2db(data):
"""
Convert db-stored dbref back to object
"""
mclass = CTYPEGET(model=data.db_model).model_class()
try:
return mclass.objects.get(id=data.id)
except AttributeError:
try:
return mclass.objects.get(id=data.id)
except mclass.DoesNotExist: # could happen if object was deleted in the interim.
return None
def iter_id2db(item):
"""
Recursively looping through stored iterables, replacing ids with actual objects.
We return PackedDict and PackedLists instead of normal lists; this is needed in order for
the user to do dynamic saving of nested in-place, such as obj.db.attrlist[2]=3. What is
stored in the database are however always normal python primitives.
"""
dtype = type(item)
if dtype in (basestring, int, float, long, bool): # check the most common types first, for speed
return item
elif dtype == PackedDBobject or hasattr(item, '__class__') and item.__class__.__name__ == "PackedDBobject":
return id2db(item)
elif dtype == tuple:
return tuple([iter_id2db(val) for val in item])
elif dtype in (dict, PackedDict):
return dict(zip([key for key in item.keys()],
[iter_id2db(val) for val in item.values()]))
elif hasattr(item, '__iter__'):
return list(iter_id2db(val) for val in item)
else:
return item
typ, data = datatuple
if typ == 'simple':
# single non-db objects
return data
elif typ == 'dbobj':
# a single stored dbobj
return id2db(data)
elif typ == 'iter':
# all types of iterables
return iter_id2db(data)
if not db.dry_run:
for attr in orm['players.PlayerAttribute'].objects.all():
# repack attr into new format and reimport
datatuple = loads(to_str(attr.db_value))
python_data = from_attr(datatuple)
new_data = to_pickle(python_data)
attr.db_value2 = new_data # new pickleObjectField
attr.save()
def backwards(self, orm):
"Write your backwards methods here."
raise RuntimeError("This migration cannot be reversed.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'objects.objectdb': {
'Meta': {'object_name': 'ObjectDB'},
'db_cmdset_storage': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_destination': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'destinations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_home': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'homes_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'db_player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.PlayerDB']", 'null': 'True', 'blank': 'True'}),
'db_sessid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'players.playerattribute': {
'Meta': {'object_name': 'PlayerAttribute'},
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.PlayerDB']"}),
'db_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'db_value2': ('src.utils.picklefield.PickledObjectField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'players.playerdb': {
'Meta': {'object_name': 'PlayerDB'},
'db_cmdset_storage': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_is_connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'players.playernick': {
'Meta': {'unique_together': "(('db_nick', 'db_type', 'db_obj'),)", 'object_name': 'PlayerNick'},
'db_nick': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.PlayerDB']"}),
'db_real': ('django.db.models.fields.TextField', [], {}),
'db_type': ('django.db.models.fields.CharField', [], {'default': "'inputline'", 'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['players']
symmetrical = True
|
{
"content_hash": "21e5ab6a3024491c4b66d6c18ef7297e",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 187,
"avg_line_length": 46.630630630630634,
"alnum_prop": 0.5660258887171561,
"repo_name": "TaliesinSkye/evennia",
"id": "343bd030a212a888dbc122e6830ad3874fd59caa",
"size": "20728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/players/migrations/0018_convert_attrdata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "59698"
},
{
"name": "D",
"bytes": "9343933"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "JavaScript",
"bytes": "91190"
},
{
"name": "Python",
"bytes": "2840755"
},
{
"name": "Shell",
"bytes": "4577"
}
],
"symlink_target": ""
}
|
from tornado import gen
import rethinkdb as r
from datetime import datetime, timedelta
from ..config import CONFIG
from .utils import dump_cursor
from .connection import connection
@gen.coroutine
def get_reservation_for_user(id):
conn = yield connection()
result = yield r.table('reservations').\
filter({"user_id": id}).run(conn)
if result.items:
return result.items[0]
return None
@gen.coroutine
def create_ticket_reservation(showtime_id, user_id, is_shitty=False):
conn = yield connection()
data = {
"showtime_id": showtime_id,
"user_id": user_id,
"confirmation_code": "",
"reserved_on": r.now(),
"is_shitty": is_shitty
}
reservations = r.table('reservations')
yield reservations.filter({"user_id": user_id}).delete().run(conn)
result = yield reservations.insert(
data,
conflict="update").run(conn)
return result
@gen.coroutine
def remove_expired_tickets():
conn = yield connection()
expiration_time = int(CONFIG.get('ticket_expiration'))
safeDate = datetime.now() - timedelta(seconds=expiration_time)
safeDate = r.epoch_time(int(safeDate.strftime("%s")))
result = yield r.table('reservations').\
filter((r.row['confirmation_code'] == '') &
(r.row['reserved_on'] < safeDate)).delete().run(conn)
return result
@gen.coroutine
def confirm_ticket_reservation(id, confirmation_code, is_shitty=False):
conn = yield connection()
result = yield r.table('reservations').get(id).update({
"confirmation_code": confirmation_code,
"is_shitty": is_shitty
}).run(conn)
return result
@gen.coroutine
def change_reservation_showtime(reservation_id, showtime_id):
conn = yield connection()
result = yield r.table('reservations').get(reservation_id).update({
"showtime_id": showtime_id
}).run(conn)
return result
@gen.coroutine
def get_reservations_for_showtime(id):
conn = yield connection()
result = yield r.table('reservations').\
filter({"showtime_id": id}).run(conn)
result = yield dump_cursor(result)
return result
@gen.coroutine
def get_reservations():
conn = yield connection()
result = yield r.table('reservations').run(conn)
result = yield dump_cursor(result)
return result
|
{
"content_hash": "3836c2cfb5e163b938eb49aa7f365162",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 71,
"avg_line_length": 28.301204819277107,
"alnum_prop": 0.6564495530012772,
"repo_name": "wannabeCitizen/quantifiedSelf",
"id": "61a46ad4147e9eb7423a680b1401a32c1a501b76",
"size": "2349",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/database/reservations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4960"
},
{
"name": "HTML",
"bytes": "47436"
},
{
"name": "JavaScript",
"bytes": "127178"
},
{
"name": "Python",
"bytes": "52102"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
class MyUserManager(BaseUserManager):
def create_user(self, email, is_shop_owner, password=None):
"""
Creates and saves a User with the given email, date of
birth and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
is_shop_owner=is_shop_owner,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, is_shop_owner, password):
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(
email,
password=password,
is_shop_owner=is_shop_owner,
)
user.is_admin = True
user.save(using=self._db)
return user
|
{
"content_hash": "5f83526ee6efa02e920d6ec9bc296c27",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 67,
"avg_line_length": 26.58974358974359,
"alnum_prop": 0.5853423336547734,
"repo_name": "VikasSherawat/OrderFood",
"id": "975e126bce8ab963bb690a11a2cdd69751c7586e",
"size": "1038",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "main/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "299141"
},
{
"name": "HTML",
"bytes": "27448"
},
{
"name": "JavaScript",
"bytes": "849578"
},
{
"name": "Python",
"bytes": "38989"
},
{
"name": "Shell",
"bytes": "249"
}
],
"symlink_target": ""
}
|
"""List summary info for the test files in the charset directory"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import logging
logging.basicConfig(level=logging.INFO,
format='%(message)s')
if __name__ == "__main__":
from glob import glob
import dicom
# Get list of all DICOM files
names = glob("*.dcm")
# Collect summary information from the files
files_info = []
for name in names:
ds = dicom.read_file(name)
ds.decode()
files_info.append((name, ds.SpecificCharacterSet, ds.PatientsName))
# Show the information
format = "%-16s %-40s %s"
logging.info(format % ("Filename", "Character Sets", "Patient's Name"))
logging.info(format % ("--------", "--------------", "--------------"))
for file_info in files_info:
logging.info(format % file_info)
if "chrFrenMulti.dcm" in names:
logging.info("\nOther\n=====")
logging.info(
"chrFrenMulti.dcm is a modified version of chrFren.dcm"
" with multi-valued PN and LO for testing decoding"
)
|
{
"content_hash": "f901afe4004726e0f077a9237b85989b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 34.13513513513514,
"alnum_prop": 0.611243072050673,
"repo_name": "njvack/yadda",
"id": "f32cb7cb256e467a529b3717c04ef0990450674c",
"size": "1277",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "dicom/testcharsetfiles/charlist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1729924"
},
{
"name": "Shell",
"bytes": "1250"
}
],
"symlink_target": ""
}
|
"""
Generate grid of hyperparameters
"""
from sys import stdout
from copy import deepcopy
from yaml import dump
base_hyperparameters = {
##########################################
# ENSEMBLE SIZE
##########################################
"n_models": 4,
##########################################
# OPTIMIZATION
##########################################
"max_epochs": 500,
"patience": 20,
"early_stopping": True,
"validation_split": 0.1,
"minibatch_size": None,
"loss": "custom:mse_with_inequalities",
##########################################
# RANDOM NEGATIVE PEPTIDES
##########################################
"random_negative_rate": 0.1,
"random_negative_constant": 25,
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
##########################################
# PEPTIDE REPRESENTATION
##########################################
# One of "one-hot", "embedding", or "BLOSUM62".
"peptide_amino_acid_encoding": "BLOSUM62",
"use_embedding": False, # maintained for backward compatability
"embedding_output_dim": 8, # only used if using embedding
"kmer_size": 15,
##########################################
# NEURAL NETWORK ARCHITECTURE
##########################################
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
],
"activation": "tanh",
"output_activation": "sigmoid",
"layer_sizes": [16],
"dense_layer_l1_regularization": None,
"batch_normalization": False,
"dropout_probability": 0.0,
##########################################
# TRAINING Data
##########################################
"train_data": {"subset": "all", "pretrain_min_points": 1000},
}
grid = []
for train_subset in ["all", "quantitative"]:
for minibatch_size in [128]:
for dense_layer_size in [8, 16, 32, 64]:
for l1 in [0.0, 0.001]:
for num_lc in [0, 1, 2]:
for lc_kernel_size in [3, 5]:
new = deepcopy(base_hyperparameters)
new["minibatch_size"] = minibatch_size
new["train_data"]["subset"] = train_subset
new["layer_sizes"] = [dense_layer_size]
new["dense_layer_l1_regularization"] = l1
(lc_layer,) = new["locally_connected_layers"]
lc_layer['kernel_size'] = lc_kernel_size
if num_lc == 0:
new["locally_connected_layers"] = []
elif num_lc == 1:
new["locally_connected_layers"] = [lc_layer]
elif num_lc == 2:
new["locally_connected_layers"] = [lc_layer, deepcopy(lc_layer)]
if not grid or new not in grid:
grid.append(new)
dump(grid, stdout)
|
{
"content_hash": "85e42c244aba3252d818a20f34dcee81",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 92,
"avg_line_length": 34.93181818181818,
"alnum_prop": 0.43103448275862066,
"repo_name": "hammerlab/mhcflurry",
"id": "5b15e104d4ce6ef155cc03bff33a923d6c5b27e1",
"size": "3074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "downloads-generation/models_class1_unselected_with_mass_spec/generate_hyperparameters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "144467"
},
{
"name": "Shell",
"bytes": "10008"
}
],
"symlink_target": ""
}
|
"""distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
# This module should be kept compatible with Python 1.5.2.
__revision__ = "$Id: build_clib.py,v 1.27 2002/11/19 13:12:28 akuchling Exp $"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os, string
from types import *
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib (Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib', 'b',
"directory to build C/C++ libraries to"),
('build-temp', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
# initialize_options()
def finalize_options (self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if type(self.include_dirs) is StringType:
self.include_dirs = string.split(self.include_dirs,
os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
# finalize_options()
def run (self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
# run()
def check_library_list (self, libraries):
"""Ensure that the list of libraries (presumably provided as a
command option 'libraries') is valid, i.e. it is a list of
2-tuples, where the tuples are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise."""
# Yechh, blecch, ackk: this is ripped straight out of build_ext.py,
# with only names changed to protect the innocent!
if type(libraries) is not ListType:
raise DistutilsSetupError, \
"'libraries' option must be a list of tuples"
for lib in libraries:
if type(lib) is not TupleType and len(lib) != 2:
raise DistutilsSetupError, \
"each element of 'libraries' must a 2-tuple"
if type(lib[0]) is not StringType:
raise DistutilsSetupError, \
"first element of each tuple in 'libraries' " + \
"must be a string (the library name)"
if '/' in lib[0] or (os.sep != '/' and os.sep in lib[0]):
raise DistutilsSetupError, \
("bad library name '%s': " +
"may not contain directory separators") % \
lib[0]
if type(lib[1]) is not DictionaryType:
raise DistutilsSetupError, \
"second element of each tuple in 'libraries' " + \
"must be a dictionary (build info)"
# for lib
# check_library_list ()
def get_library_names (self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
# get_library_names ()
def get_source_files (self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if (sources is None or
type(sources) not in (ListType, TupleType) ):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames") % lib_name
filenames.extend(sources)
return filenames
# get_source_files ()
def build_libraries (self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or type(sources) not in (ListType, TupleType):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % lib_name
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
# for libraries
# build_libraries ()
# class build_lib
|
{
"content_hash": "6b10abc5b8c11a251b7f237ed70f11e2",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 78,
"avg_line_length": 36.42016806722689,
"alnum_prop": 0.561259806183664,
"repo_name": "OS2World/APP-INTERNET-torpak_2",
"id": "66dcdf111fa4091d97c151e7d15e15a865c62208",
"size": "8668",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lib/distutils/command/build_clib.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Created on 2015-05-22
@author: lujin
"""
from django import template
register = template.Library()
def trans_underline(obj, key):
try:
return obj[key]
except:
return ""
register.filter(trans_underline)
|
{
"content_hash": "cced4a9752c88e0f0008e67c46beb582",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 32,
"avg_line_length": 13.823529411764707,
"alnum_prop": 0.6510638297872341,
"repo_name": "myangeline/rorobot",
"id": "1913cd95fb5fc970999e7405c999835ab56314a2",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/article/templatetags/myfilter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "555857"
},
{
"name": "HTML",
"bytes": "114250"
},
{
"name": "JavaScript",
"bytes": "2117838"
},
{
"name": "PHP",
"bytes": "43361"
},
{
"name": "Python",
"bytes": "19374"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('muse_api', '0005_remove_document_path'),
]
operations = [
migrations.AlterField(
model_name='binder',
name='first_child',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='binder_root', to='muse_api.Document'),
),
migrations.AlterField(
model_name='document',
name='first_child',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='parent_node', to='muse_api.Document'),
),
]
|
{
"content_hash": "efa1f6f266dab2fea19d0f728e074e70",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 159,
"avg_line_length": 33.208333333333336,
"alnum_prop": 0.6386449184441656,
"repo_name": "ltouroumov/muse-writer",
"id": "585bf500acec294c6b3ab0c25be316e86bb53e4e",
"size": "870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/muse_api/migrations/0006_auto_20170712_1512.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6223"
},
{
"name": "HTML",
"bytes": "8948"
},
{
"name": "JavaScript",
"bytes": "2390"
},
{
"name": "Python",
"bytes": "21342"
},
{
"name": "Shell",
"bytes": "768"
},
{
"name": "TypeScript",
"bytes": "32206"
}
],
"symlink_target": ""
}
|
import dataset
import time
class Flight(object):
db = dataset.connect('sqlite:///db_flight')
table = db['tb_flight']
__necessary_fields__ = ['from_airport', 'to_airport', 'from_city',
'to_city', 'from_time', 'to_time', 'website',
'price', 'flight_date', 'fetch_time']
__fields__ = ['id', 'from_airport', 'to_airport', 'from_city', 'to_city',
'from_time', 'to_time', 'website', 'price', 'flight_date',
'airline', 'flight_no', 'url', 'fetch_time']
@classmethod
def insert(cls, **kwargs):
for field in ['airline', 'flight_no', 'url']:
kwargs[field] = kwargs.get(field) if kwargs.get(field) else ''
for f in Flight.__necessary_fields__:
if f not in kwargs:
raise ValueError
for key in kwargs:
if key not in Flight.__fields__:
print key
raise ValueError
kwargs['created_at'] = time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime())
return Flight.table.insert(kwargs)
@classmethod
def select(cls, **kwargs):
flights = Flight.table.find(**kwargs)
result = list()
for flight in flights:
tmp = dict()
for key in Flight.__fields__:
tmp[key] = flight[key]
result.append(tmp)
return result
@classmethod
def get_last_fetch_time(cls):
flights = Flight.table.find(order_by=['-fetch_time'], _limit=1)
if not flights:
return None
for flight in flights:
return flight['fetch_time']
@classmethod
def delete(cls, **kwargs):
Flight.table.delete(**kwargs)
|
{
"content_hash": "410e05f03670468cc65be6ad73b89826",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 34.36538461538461,
"alnum_prop": 0.514829322887521,
"repo_name": "daijia/fetch-flight",
"id": "52118a4d8deb72db6469d3494e38f27248632267",
"size": "1833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/flight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40153"
}
],
"symlink_target": ""
}
|
"""
Square digit chains
Problem 92
A number chain is created by continuously adding the square of the digits in a number to form a new number until it has been seen before.
For example,
44 → 32 → 13 → 10 → 1 → 1
85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89
Therefore any chain that arrives at 1 or 89 will become stuck in an endless loop. What is most amazing is that EVERY starting number will eventually arrive at 1 or 89.
How many starting numbers below ten million will arrive at 89?
"""
# answer: 8581146
# primeiro algoritmo:230s+- (brute-force super simples)
# outra versão usando lista = 314s #
# usando dic(atual): 65s
def sequence_end(n, finalnum):
start = 0
dic = {}
end_count = 0
while start + 1 < n:
start += 1
if start in dic:
if dic[start] == finalnum:
end_count += 1
continue
else:
continue
end = start
nums = [end]
while end != 1 and end != 89:
end = sum(map(lambda x: x * x, (int(x) for x in str(end))))
if end in dic:
end = dic[end]
break
else:
nums.append(end)
for num in nums:
dic[num] = end
if end == finalnum:
end_count += 1
return end_count
total = sequence_end(10 ** 7, 89)
print(total)
|
{
"content_hash": "662883a021685f23c3aba20221d05fa4",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 167,
"avg_line_length": 25.61111111111111,
"alnum_prop": 0.5589298626174982,
"repo_name": "DestructHub/ProjectEuler",
"id": "041c3485ad6fc03573320630cb433c902342ffe7",
"size": "1506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Problem092/Python/solution_slow_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11936"
},
{
"name": "C++",
"bytes": "8819"
},
{
"name": "Clojure",
"bytes": "13559"
},
{
"name": "Common Lisp",
"bytes": "15839"
},
{
"name": "D",
"bytes": "602"
},
{
"name": "Dockerfile",
"bytes": "1052"
},
{
"name": "Elixir",
"bytes": "42013"
},
{
"name": "Erlang",
"bytes": "165"
},
{
"name": "Go",
"bytes": "5903"
},
{
"name": "Haskell",
"bytes": "27558"
},
{
"name": "Java",
"bytes": "209"
},
{
"name": "JavaScript",
"bytes": "1414"
},
{
"name": "Kotlin",
"bytes": "166"
},
{
"name": "Lua",
"bytes": "9561"
},
{
"name": "Makefile",
"bytes": "3474"
},
{
"name": "OCaml",
"bytes": "2186"
},
{
"name": "Objective-C",
"bytes": "324"
},
{
"name": "PHP",
"bytes": "835"
},
{
"name": "Python",
"bytes": "156669"
},
{
"name": "R",
"bytes": "59"
},
{
"name": "Racket",
"bytes": "199"
},
{
"name": "Ruby",
"bytes": "11189"
},
{
"name": "Rust",
"bytes": "232"
},
{
"name": "Scheme",
"bytes": "10272"
},
{
"name": "Shell",
"bytes": "3508"
},
{
"name": "Swift",
"bytes": "4740"
},
{
"name": "TeX",
"bytes": "1226"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('officehours', '0005_auto_20170209_2306'),
]
operations = [
migrations.AlterField(
model_name='course',
name='start_time',
field=models.TimeField(blank=True, null=True),
),
]
|
{
"content_hash": "6002c49c0e20af0fab95e24c3f8d748f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 21.88888888888889,
"alnum_prop": 0.5989847715736041,
"repo_name": "izzyalonso/tndata_backend",
"id": "94b7143090b3b0c3d929dd675d44b84f125b8390",
"size": "467",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/officehours/migrations/0006_auto_20170209_2309.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
try:
here = os.path.dirname(os.path.abspath(__file__))
description = file(os.path.join(here, 'README.md')).read()
except IOError:
description = None
PACKAGE_VERSION = '1.4'
deps = ['mozinfo == 0.4',
'mozfile'
]
setup(name='mozInstall',
version=PACKAGE_VERSION,
description="This is a utility package for installing and uninstalling "
"Mozilla applications on various platforms.",
long_description=description,
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='mozilla',
author='Mozilla Automation and Tools team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/MozBase',
license='MPL 2.0',
packages=['mozinstall'],
include_package_data=True,
zip_safe=False,
install_requires=deps,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
mozinstall = mozinstall:install_cli
mozuninstall = mozinstall:uninstall_cli
""",
)
|
{
"content_hash": "69cd4fe59a28c733ebb05a54ad0fa32b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 85,
"avg_line_length": 34.6,
"alnum_prop": 0.5953757225433526,
"repo_name": "sergecodd/FireFox-OS",
"id": "13fa8c5b1670b67589a14001832bf09e537e5e7e",
"size": "1757",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "B2G/gecko/testing/mozbase/mozinstall/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "443"
},
{
"name": "ApacheConf",
"bytes": "85"
},
{
"name": "Assembly",
"bytes": "5123438"
},
{
"name": "Awk",
"bytes": "46481"
},
{
"name": "Batchfile",
"bytes": "56250"
},
{
"name": "C",
"bytes": "101720951"
},
{
"name": "C#",
"bytes": "38531"
},
{
"name": "C++",
"bytes": "148896543"
},
{
"name": "CMake",
"bytes": "23541"
},
{
"name": "CSS",
"bytes": "2758664"
},
{
"name": "DIGITAL Command Language",
"bytes": "56757"
},
{
"name": "Emacs Lisp",
"bytes": "12694"
},
{
"name": "Erlang",
"bytes": "889"
},
{
"name": "FLUX",
"bytes": "34449"
},
{
"name": "GLSL",
"bytes": "26344"
},
{
"name": "Gnuplot",
"bytes": "710"
},
{
"name": "Groff",
"bytes": "447012"
},
{
"name": "HTML",
"bytes": "43343468"
},
{
"name": "IDL",
"bytes": "1455122"
},
{
"name": "Java",
"bytes": "43261012"
},
{
"name": "JavaScript",
"bytes": "46646658"
},
{
"name": "Lex",
"bytes": "38358"
},
{
"name": "Logos",
"bytes": "21054"
},
{
"name": "Makefile",
"bytes": "2733844"
},
{
"name": "Matlab",
"bytes": "67316"
},
{
"name": "Max",
"bytes": "3698"
},
{
"name": "NSIS",
"bytes": "421625"
},
{
"name": "Objective-C",
"bytes": "877657"
},
{
"name": "Objective-C++",
"bytes": "737713"
},
{
"name": "PHP",
"bytes": "17415"
},
{
"name": "Pascal",
"bytes": "6780"
},
{
"name": "Perl",
"bytes": "1153180"
},
{
"name": "Perl6",
"bytes": "1255"
},
{
"name": "PostScript",
"bytes": "1139"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Protocol Buffer",
"bytes": "26553"
},
{
"name": "Python",
"bytes": "8453201"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3481"
},
{
"name": "Ruby",
"bytes": "5116"
},
{
"name": "Scilab",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "3383832"
},
{
"name": "SourcePawn",
"bytes": "23661"
},
{
"name": "TeX",
"bytes": "879606"
},
{
"name": "WebIDL",
"bytes": "1902"
},
{
"name": "XSLT",
"bytes": "13134"
},
{
"name": "Yacc",
"bytes": "112744"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import six
import iso639
from collections import defaultdict
from dash.orgs.views import OrgPermsMixin, OrgObjPermsMixin
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from django.utils.timesince import timesince
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from el_pagination.paginators import LazyPaginator
from smartmin.mixins import NonAtomicMixin
from smartmin.views import SmartCRUDL, SmartTemplateView
from smartmin.views import (SmartListView, SmartCreateView, SmartUpdateView, SmartDeleteView, SmartReadView,
SmartCSVImportView)
from smartmin.csv_imports.models import ImportTask
from temba_client.utils import parse_iso8601
from django.utils import timezone
from casepro.rules.mixins import RuleFormMixin
from casepro.statistics.models import DailyCount
from casepro.utils import parse_csv, str_to_bool, JSONEncoder, json_encode, month_range
from casepro.utils.export import BaseDownloadView
from .forms import LabelForm, FaqForm
from .models import Label, FAQ, Message, MessageExport, MessageFolder, Outgoing, OutgoingFolder, ReplyExport
from .tasks import message_export, reply_export
RESPONSE_DELAY_WARN_SECONDS = 24 * 60 * 60 # show response delays > 1 day as warning
# Override the ImportTask start method so we can use our self-defined task
def override_start(self, org): # pragma: no cover
from .tasks import faq_csv_import
self.log("Queued import at %s" % now())
self.save(update_fields=['import_log'])
# trigger task
result = faq_csv_import.delay(org, self.pk)
self.task_id = result.task_id
self.save(update_fields=['task_id'])
ImportTask.start = override_start
class LabelCRUDL(SmartCRUDL):
actions = ('create', 'update', 'read', 'delete', 'list', 'watch', 'unwatch')
model = Label
class Create(RuleFormMixin, OrgPermsMixin, SmartCreateView):
form_class = LabelForm
def get_form_kwargs(self):
kwargs = super(LabelCRUDL.Create, self).get_form_kwargs()
kwargs['org'] = self.request.org
kwargs['is_create'] = True
return kwargs
def derive_initial(self):
# label created manually in casepro aren't synced by default
initial = super(LabelCRUDL.Create, self).derive_initial()
initial['is_synced'] = False
return initial
def save(self, obj):
data = self.form.cleaned_data
org = self.request.org
name = data['name']
description = data['description']
tests = self.construct_tests()
is_synced = data['is_synced']
self.object = Label.create(org, name, description, tests, is_synced)
def get_success_url(self):
return reverse('msgs.label_read', args=[self.object.pk])
class Update(RuleFormMixin, OrgObjPermsMixin, SmartUpdateView):
form_class = LabelForm
success_url = 'id@msgs.label_read'
def get_form_kwargs(self):
kwargs = super(LabelCRUDL.Update, self).get_form_kwargs()
kwargs['org'] = self.request.org
kwargs['is_create'] = False
return kwargs
def post_save(self, obj):
obj = super(LabelCRUDL.Update, self).post_save(obj)
tests = self.construct_tests()
obj.update_tests(tests)
return obj
class Read(OrgObjPermsMixin, SmartReadView):
def get_queryset(self):
return Label.get_all(self.request.org, self.request.user)
def get_context_data(self, **kwargs):
context = super(LabelCRUDL.Read, self).get_context_data(**kwargs)
# augment usual label JSON
label_json = self.object.as_json()
label_json['watching'] = self.object.is_watched_by(self.request.user)
# angular app requires context data in JSON format
context['context_data_json'] = json_encode({
'label': label_json
})
return context
class Delete(OrgObjPermsMixin, SmartDeleteView):
cancel_url = '@msgs.label_list'
def post(self, request, *args, **kwargs):
label = self.get_object()
label.release()
return HttpResponse(status=204)
class List(OrgPermsMixin, SmartListView):
def get(self, request, *args, **kwargs):
with_activity = str_to_bool(self.request.GET.get('with_activity', ''))
labels = list(Label.get_all(self.request.org, self.request.user).order_by('name'))
Label.bulk_cache_initialize(labels)
if with_activity:
# get message statistics
this_month = DailyCount.get_by_label(labels, DailyCount.TYPE_INCOMING, *month_range(0)).scope_totals()
last_month = DailyCount.get_by_label(labels, DailyCount.TYPE_INCOMING, *month_range(-1)).scope_totals()
def as_json(label):
obj = label.as_json()
if with_activity:
obj['activity'] = {
'this_month': this_month.get(label, 0),
'last_month': last_month.get(label, 0),
}
return obj
return JsonResponse({'results': [as_json(l) for l in labels]})
class Watch(OrgObjPermsMixin, SmartReadView):
"""
Endpoint for watching a label
"""
permission = 'msgs.label_read'
def post(self, request, *args, **kwargs):
self.get_object().watch(request.user)
return HttpResponse(status=204)
class Unwatch(OrgObjPermsMixin, SmartReadView):
"""
Endpoint for unwatching a label
"""
permission = 'msgs.label_read'
def post(self, request, *args, **kwargs):
self.get_object().unwatch(request.user)
return HttpResponse(status=204)
class MessageSearchMixin(object):
def derive_search(self):
"""
Collects and prepares message search parameters into JSON serializable dict
"""
folder = MessageFolder[self.request.GET['folder']]
label_id = self.request.GET.get('label', None)
include_archived = str_to_bool(self.request.GET.get('archived', ''))
text = self.request.GET.get('text', None)
contact_id = self.request.GET.get('contact', None)
group_ids = parse_csv(self.request.GET.get('groups', ''), as_ints=True)
after = parse_iso8601(self.request.GET.get('after', None))
before = parse_iso8601(self.request.GET.get('before', None))
return {
'folder': folder,
'label': label_id,
'include_archived': include_archived, # only applies to flagged folder
'text': text,
'contact': contact_id,
'groups': group_ids,
'after': after,
'before': before
}
class MessageCRUDL(SmartCRUDL):
actions = ('search', 'lock', 'action', 'label', 'bulk_reply', 'forward', 'history')
model = Message
class Search(OrgPermsMixin, MessageSearchMixin, SmartTemplateView):
"""
JSON endpoint for fetching incoming messages
"""
def get_context_data(self, **kwargs):
context = super(MessageCRUDL.Search, self).get_context_data(**kwargs)
org = self.request.org
user = self.request.user
page = int(self.request.GET.get('page', 1))
search = self.derive_search()
# this is a refresh of messages
if self.request.GET.get('last_refresh', None):
new_messages = Message.search(org, user, search)
search['last_refresh'] = self.request.GET['last_refresh']
updated_messages = Message.search(org, user, search)
context['object_list'] = list(new_messages) + list(set(updated_messages) - set(new_messages))
context['has_more'] = False
return context
messages = Message.search(org, user, search)
paginator = LazyPaginator(messages, per_page=50)
context['object_list'] = paginator.page(page)
context['has_more'] = paginator.num_pages > page
return context
def render_to_response(self, context, **response_kwargs):
results = []
for m in context['object_list']:
msg = m.as_json()
msg['lock'] = m.get_lock(self.request.user)
results.append(msg)
return JsonResponse({
'results': results,
'has_more': context['has_more']
}, encoder=JSONEncoder)
class Lock(OrgPermsMixin, SmartTemplateView):
"""
AJAX endpoint for updating messages with a date and user id.
Takes a list of message ids.
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r'^message/lock/(?P<action>\w+)/$'
def post(self, request, *args, **kwargs):
org = request.org
user = request.user
action = kwargs['action']
message_ids = request.json['messages']
messages = org.incoming_messages.filter(org=org, backend_id__in=message_ids)
lock_messages = []
if action == 'lock':
for message in messages:
if message.get_lock(request.user):
lock_messages.append(message.backend_id)
if not lock_messages:
for message in messages:
message.locked_on = timezone.now()
message.locked_by = user
message.modified_on = timezone.now()
message.save(update_fields=['locked_on', 'locked_by', 'modified_on'])
elif action == 'unlock':
for message in messages:
message.locked_on = timezone.now()
message.locked_by = None
message.modified_on = timezone.now()
message.save(update_fields=['locked_on', 'locked_by', 'modified_on'])
else: # pragma: no cover
return HttpResponseBadRequest("Invalid action: %s", action)
return JsonResponse({'messages': lock_messages}, encoder=JSONEncoder)
class Action(OrgPermsMixin, SmartTemplateView):
"""
AJAX endpoint for bulk message actions. Takes a list of message ids.
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r'^message/action/(?P<action>\w+)/$'
def post(self, request, *args, **kwargs):
org = request.org
user = request.user
action = kwargs['action']
message_ids = request.json['messages']
messages = org.incoming_messages.filter(org=org, backend_id__in=message_ids)
label_id = request.json.get('label')
label = Label.get_all(org, user).get(pk=label_id) if label_id else None
if action == 'flag':
Message.bulk_flag(org, user, messages)
elif action == 'unflag':
Message.bulk_unflag(org, user, messages)
elif action == 'label':
Message.bulk_label(org, user, messages, label)
elif action == 'unlabel':
Message.bulk_unlabel(org, user, messages, label)
elif action == 'archive':
Message.bulk_archive(org, user, messages)
elif action == 'restore':
Message.bulk_restore(org, user, messages)
else: # pragma: no cover
return HttpResponseBadRequest("Invalid action: %s", action)
return HttpResponse(status=204)
class Label(OrgPermsMixin, SmartTemplateView):
"""
AJAX endpoint for labelling a message.
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r'^message/label/(?P<id>\d+)/$'
def post(self, request, *args, **kwargs):
org = request.org
user = request.user
user_labels = Label.get_all(self.org, user)
message_id = int(kwargs['id'])
message = org.incoming_messages.filter(org=org, backend_id=message_id).first()
label_ids = request.json['labels']
specified_labels = list(user_labels.filter(pk__in=label_ids))
# user can't remove labels that they can't see
unseen_labels = [l for l in message.labels.all() if l not in user_labels]
message.update_labels(user, specified_labels + unseen_labels)
return HttpResponse(status=204)
class BulkReply(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for bulk messages replies
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r'^message/bulk_reply/$'
def post(self, request, *args, **kwargs):
text = request.json['text']
message_ids = request.json['messages']
messages = Message.objects.filter(org=request.org, backend_id__in=message_ids).select_related('contact')
# organize messages by contact
messages_by_contact = defaultdict(list)
for msg in messages:
messages_by_contact[msg.contact].append(msg)
# the actual message that will be replied to is the oldest selected message for each contact
reply_tos = []
for contact, contact_messages in six.iteritems(messages_by_contact):
contact_messages = sorted(contact_messages, key=lambda m: m.created_on, reverse=True)
reply_tos.append(contact_messages[0])
outgoing = Outgoing.create_bulk_replies(request.org, request.user, text, reply_tos)
return JsonResponse({'messages': len(outgoing)})
class Forward(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for forwarding a message to a URN
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r'^message/forward/(?P<id>\d+)/$'
def post(self, request, *args, **kwargs):
text = request.json['text']
message = Message.objects.get(org=request.org, backend_id=int(kwargs['id']))
urns = request.json['urns']
outgoing = Outgoing.create_forwards(request.org, request.user, text, urns, message)
return JsonResponse({'messages': len(outgoing)})
class History(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for fetching message history. Takes a message backend id
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r'^message/history/(?P<id>\d+)/$'
def get(self, request, *args, **kwargs):
message = Message.objects.get(org=request.org, backend_id=int(kwargs['id']))
actions = [a.as_json() for a in message.get_history()]
return JsonResponse({'actions': actions}, encoder=JSONEncoder)
class MessageExportCRUDL(SmartCRUDL):
model = MessageExport
actions = ('create', 'read')
class Create(NonAtomicMixin, OrgPermsMixin, MessageSearchMixin, SmartCreateView):
def post(self, request, *args, **kwargs):
search = self.derive_search()
export = MessageExport.create(self.request.org, self.request.user, search)
message_export.delay(export.pk)
return JsonResponse({'export_id': export.pk})
class Read(BaseDownloadView):
title = _("Download Messages")
filename = 'message_export.xls'
class ReplySearchMixin(object):
def derive_search(self):
"""
Collects and prepares reply search parameters into JSON serializable dict
"""
params = self.request.GET
partner = params.get('partner')
after = parse_iso8601(params.get('after'))
before = parse_iso8601(params.get('before'))
return {'partner': partner, 'after': after, 'before': before}
class OutgoingCRUDL(SmartCRUDL):
actions = ('search', 'search_replies')
model = Outgoing
class Search(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for fetching outgoing messages
"""
def derive_search(self):
folder = OutgoingFolder[self.request.GET['folder']]
text = self.request.GET.get('text', None)
contact = self.request.GET.get('contact', None)
return {'folder': folder, 'text': text, 'contact': contact}
def get_context_data(self, **kwargs):
context = super(OutgoingCRUDL.Search, self).get_context_data(**kwargs)
org = self.request.org
user = self.request.user
page = int(self.request.GET.get('page', 1))
search = self.derive_search()
messages = Outgoing.search(org, user, search)
paginator = LazyPaginator(messages, per_page=50)
context['object_list'] = paginator.page(page)
context['has_more'] = paginator.num_pages > page
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse({
'results': [m.as_json() for m in context['object_list']],
'has_more': context['has_more']
}, encoder=JSONEncoder)
class SearchReplies(OrgPermsMixin, ReplySearchMixin, SmartTemplateView):
"""
JSON endpoint to fetch replies made by users
"""
def get(self, request, *args, **kwargs):
org = self.request.org
user = self.request.user
page = int(self.request.GET.get('page', 1))
search = self.derive_search()
items = Outgoing.search_replies(org, user, search)
paginator = LazyPaginator(items, 50)
outgoing = paginator.page(page)
has_more = paginator.num_pages > page
def as_json(msg):
delay = (msg.created_on - msg.reply_to.created_on).total_seconds()
obj = msg.as_json()
obj.update({
'reply_to': {
'text': msg.reply_to.text,
'flagged': msg.reply_to.is_flagged,
'labels': [l.as_json(full=False) for l in msg.reply_to.labels.all()],
},
'response': {
'delay': timesince(msg.reply_to.created_on, now=msg.created_on),
'warning': delay > RESPONSE_DELAY_WARN_SECONDS
}
})
return obj
return JsonResponse({'results': [as_json(o) for o in outgoing], 'has_more': has_more}, encoder=JSONEncoder)
class ReplyExportCRUDL(SmartCRUDL):
model = ReplyExport
actions = ('create', 'read')
class Create(NonAtomicMixin, OrgPermsMixin, ReplySearchMixin, SmartCreateView):
def post(self, request, *args, **kwargs):
search = self.derive_search()
export = self.model.create(self.request.org, self.request.user, search)
reply_export.delay(export.pk)
return JsonResponse({'export_id': export.pk})
class Read(BaseDownloadView):
title = _("Download Replies")
filename = 'reply_export.xls'
class FaqSearchMixin(object):
def derive_search(self):
"""
Collects and prepares FAQ search parameters into JSON serializable dict
"""
label = self.request.GET.get('label', None)
text = self.request.GET.get('text', None)
language = self.request.GET.get('language', None)
return {
'label': label,
'text': text,
'language': language,
}
class FaqCRUDL(SmartCRUDL):
model = FAQ
actions = ('list', 'create', 'read', 'update', 'delete', 'search', 'import', 'languages')
class List(OrgPermsMixin, SmartListView):
fields = ('question', 'answer', 'language', 'parent')
default_order = ('-parent', 'question')
def derive_queryset(self, **kwargs):
return FAQ.get_all(self.request.org)
class Create(OrgPermsMixin, SmartCreateView):
form_class = FaqForm
def get_form_kwargs(self):
kwargs = super(FaqCRUDL.Create, self).get_form_kwargs()
# Get the data for post requests that didn't come through a form
if self.request.method == 'POST' and not self.request.POST and hasattr(self.request, 'json'):
kwargs['data'] = self.request.json
kwargs['org'] = self.request.org
return kwargs
def save(self, obj):
data = self.form.cleaned_data
org = self.request.org
question = data['question']
answer = data['answer']
language = data['language']
parent = data['parent']
labels = data['labels']
faq = FAQ.create(org, question, answer, language, parent, labels)
self.object = faq
class Read(OrgPermsMixin, SmartReadView):
fields = ['question', 'answer', 'language', 'parent']
def derive_queryset(self, **kwargs):
return FAQ.get_all(self.request.org)
def get_context_data(self, **kwargs):
context = super(FaqCRUDL.Read, self).get_context_data(**kwargs)
edit_button_url = reverse('msgs.faq_update', args=[self.object.pk])
context['context_data_json'] = json_encode({'faq': self.object.as_json()})
context['edit_button_url'] = edit_button_url
context['can_delete'] = True
labels = []
for label in self.object.labels.all():
labels.append(label.name)
context['labels'] = ', '.join(labels)
return context
class Update(OrgPermsMixin, SmartUpdateView):
form_class = FaqForm
def get_form_kwargs(self):
kwargs = super(FaqCRUDL.Update, self).get_form_kwargs()
# Get the data for post requests that didn't come through a form
if self.request.method == 'POST' and not self.request.POST and hasattr(self.request, 'json'):
kwargs['data'] = self.request.json
kwargs['org'] = self.request.org
return kwargs
def derive_initial(self):
initial = super(FaqCRUDL.Update, self).derive_initial()
initial['labels'] = self.object.labels.all()
return initial
def derive_fields(self):
fields = ['question', 'answer', 'language', 'parent']
if not self.object.parent:
fields.append('labels')
return tuple(fields)
class Delete(OrgPermsMixin, SmartDeleteView):
cancel_url = '@msgs.faq_list'
def post(self, request, *args, **kwargs):
faq = self.get_object()
faq.delete()
return HttpResponse(status=204)
class Search(OrgPermsMixin, FaqSearchMixin, SmartTemplateView):
"""
JSON endpoint for searching FAQs
"""
def get_context_data(self, **kwargs):
context = super(FaqCRUDL.Search, self).get_context_data(**kwargs)
org = self.request.org
user = self.request.user
search = self.derive_search()
faqs = FAQ.search(org, user, search)
context['object_list'] = faqs
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse({
'results': [m.as_json() for m in context['object_list']],
}, encoder=JSONEncoder)
class Import(OrgPermsMixin, SmartCSVImportView):
model = ImportTask
fields = ('csv_file',)
success_message = "File uploaded successfully. If your FAQs don't appear here soon, something went wrong."
success_url = '@msgs.faq_list'
def post_save(self, task):
task.start(self.org)
return task
class Languages(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for getting a list of currently all available languages
"""
def get_context_data(self, **kwargs):
context = super(FaqCRUDL.Languages, self).get_context_data(**kwargs)
org = self.request.org
langs = FAQ.get_all_languages(org)
lang_list = []
for lang in langs:
lang_list.append(FAQ.get_language_from_code(lang['language']))
context['language_list'] = lang_list
iso_list = iso639._load_data()
# remove unwanted keys and only show name up to the first semicolon
for key in iso_list:
del key['iso639_2_t'], key['native'], key['iso639_1']
if 'name' in key.keys():
key['name'] = key['name'].rsplit(';')[0]
context['iso_list'] = iso_list
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse({
'results': context['language_list'],
'iso_list': context['iso_list'],
}, encoder=JSONEncoder)
|
{
"content_hash": "4f260caf6f63ee510bd1948c9359a51f",
"timestamp": "",
"source": "github",
"line_count": 688,
"max_line_length": 119,
"avg_line_length": 37.1656976744186,
"alnum_prop": 0.5832225263981228,
"repo_name": "xkmato/casepro",
"id": "bf3a8beb9d19d42965b24d0feccf1ff6023e46f6",
"size": "25570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "casepro/msgs/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3475"
},
{
"name": "CoffeeScript",
"bytes": "220522"
},
{
"name": "HTML",
"bytes": "104527"
},
{
"name": "PLpgSQL",
"bytes": "6012"
},
{
"name": "Python",
"bytes": "878626"
}
],
"symlink_target": ""
}
|
from panda3d.core import NodePath
from toontown.toonbase import ToontownIntervals
from toontown.toonbase.ToontownTimer import ToontownTimer
from CogdoFlyingGameGuis import CogdoFlyingFuelGui, CogdoFlyingProgressGui
from CogdoGameMessageDisplay import CogdoGameMessageDisplay
from CogdoMemoGui import CogdoMemoGui
import CogdoFlyingGameGlobals as Globals
class CogdoFlyingGuiManager:
ClearMessageDisplayEventName = 'ClearMessageDisplayEvent'
EagleTargetingLocalPlayerEventName = 'EagleTargetingLocalPlayerEvent'
EagleAttackingLocalPlayerEventName = 'EagleAttackingLocalPlayerEvent'
FirstPressOfCtrlEventName = 'FirstPressOfCtrlEvent'
PickedUpFirstPropellerEventName = 'PickedUpFirstPropellerEvent'
InvulnerableEventName = 'InvulnerableEvent'
StartRunningOutOfTimeMusicEventName = 'StartRunningOutOfTimeEvent'
def __init__(self, level):
self._level = level
self.root = NodePath('CogdoFlyingGui')
self.root.reparentTo(aspect2d)
self.root.stash()
self.fuelGui = NodePath('CogdoFlyingFuelGui')
self.fuelGui.reparentTo(base.a2dBottomLeft)
self.fuelGui.stash()
self.progressGui = NodePath('CogdoFlyingProgressGui')
self.progressGui.reparentTo(base.a2dBottomRight)
self.progressGui.stash()
self._initTimer()
self._initHud()
self._initMessageDisplay()
self.sentTimeRunningOutMessage = False
self._refuelGui = CogdoFlyingFuelGui(self.fuelGui)
self._progressGui = CogdoFlyingProgressGui(self.progressGui, self._level)
def _initHud(self):
self._memoGui = CogdoMemoGui(self.root, 'memo_card')
self._memoGui.posNextToLaffMeter()
def _initTimer(self):
self._timer = ToontownTimer()
self._timer.hide()
self._timer.posInTopRightCorner()
def _initMessageDisplay(self):
audioMgr = base.cogdoGameAudioMgr
sound = audioMgr.createSfx('popupHelpText')
self._messageDisplay = CogdoGameMessageDisplay('CogdoFlyingMessageDisplay', self.root, sfx=sound)
def destroyTimer(self):
if self._timer is not None:
self._timer.stop()
self._timer.destroy()
self._timer = None
return
def onstage(self):
self.root.unstash()
self.fuelGui.unstash()
self.progressGui.unstash()
self._refuelGui.hide()
self._progressGui.hide()
def presentProgressGui(self):
ToontownIntervals.start(ToontownIntervals.getPresentGuiIval(self._progressGui, 'present_progress_gui'))
def presentRefuelGui(self):
ToontownIntervals.start(ToontownIntervals.getPresentGuiIval(self._refuelGui, 'present_fuel_gui'))
def presentTimerGui(self):
ToontownIntervals.start(ToontownIntervals.getPresentGuiIval(self._timer, 'present_timer_gui'))
def presentMemoGui(self):
ToontownIntervals.start(ToontownIntervals.getPresentGuiIval(self._memoGui, 'present_memo_gui'))
def offstage(self):
self.root.stash()
self.fuelGui.stash()
self.progressGui.stash()
self._refuelGui.hide()
self._progressGui.hide()
self.hideTimer()
def getTimeLeft(self):
return Globals.Gameplay.SecondsUntilGameOver - self._timer.getElapsedTime()
def isTimeRunningOut(self):
return self.getTimeLeft() < Globals.Gameplay.TimeRunningOutSeconds
def startTimer(self, duration, timerExpiredCallback = None, keepHidden = False):
if self._timer is None:
self._initTimer()
self._timer.setTime(duration)
self._timer.countdown(duration, timerExpiredCallback)
if keepHidden:
self.hideTimer()
else:
self.showTimer()
return
def stopTimer(self):
if hasattr(self, '_timer') and self._timer is not None:
self.hideTimer()
self._timer.stop()
return
def showTimer(self):
self._timer.show()
def hideTimer(self):
self._timer.hide()
def forceTimerDone(self):
if self._timer.countdownTask != None:
self._timer.countdownTask.duration = 0
return
def showRefuelGui(self):
self._refuelGui.show()
def hideRefuelGui(self):
self._refuelGui.hide()
def setMessage(self, text, color = None, transition = 'fade'):
self._messageDisplay.updateMessage(text, color, transition)
def setTemporaryMessage(self, text, duration = 3.0, color = None):
self._messageDisplay.showMessageTemporarily(text, duration, color)
def setFuel(self, fuel):
self._refuelGui.setFuel(fuel)
def resetBlades(self):
self._refuelGui.resetBlades()
def setBlades(self, fuelState):
self._refuelGui.setBlades(fuelState)
def bladeLost(self):
self._refuelGui.bladeLost()
def setPropellerSpinRate(self, newRate):
self._refuelGui.setPropellerSpinRate(newRate)
def setMemoCount(self, score):
self._memoGui.setCount(score)
def addToonToProgressMeter(self, toon):
self._progressGui.addToon(toon)
def removeToonFromProgressMeter(self, toon):
self._progressGui.removeToon(toon)
def update(self):
if self.isTimeRunningOut() and not self.sentTimeRunningOutMessage:
messenger.send(CogdoFlyingGuiManager.StartRunningOutOfTimeMusicEventName)
self.sentTimeRunningOutMessage = True
self._refuelGui.update()
self._progressGui.update()
def destroy(self):
ToontownIntervals.cleanup('present_fuel_gui')
ToontownIntervals.cleanup('present_timer_gui')
ToontownIntervals.cleanup('present_memo_gui')
ToontownIntervals.cleanup('present_progress_gui')
self._refuelGui.destroy()
self._refuelGui = None
self._memoGui.destroy()
self._memoGui = None
self._progressGui.destroy()
self._progressGui = None
self.destroyTimer()
self._messageDisplay.destroy()
self._messageDisplay = None
self.root.removeNode()
self.root = None
self.fuelGui.removeNode()
self.fuelGui = None
self.progressGui.removeNode()
self.progressGui = None
return
|
{
"content_hash": "82ee2f29fc9dac8cb863828c4a6b6e55",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 111,
"avg_line_length": 35.569060773480665,
"alnum_prop": 0.6599875737806772,
"repo_name": "DedMemez/ODS-August-2017",
"id": "089f5cebd9c22ed383755f64dd70799d6917f128",
"size": "6541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogdominium/CogdoFlyingGuiManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10152014"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
}
|
#!/usr/common/bin/python
# apiutil.py
#
# This file defines a bunch of utility functions for OpenGL API code
# generation.
import sys, string, re
#======================================================================
def CopyrightC( ):
print """
"""
def CopyrightDef( ):
print """; Copyright (c) 2001, Stanford University
; All rights reserved.
;
; See the file LICENSE.txt for information on redistributing this software.
"""
#======================================================================
class APIFunction:
"""Class to represent a GL API function (name, return type,
parameters, etc)."""
def __init__(self):
self.name = ''
self.returnType = ''
self.category = ''
self.offset = -1
self.alias = ''
self.vectoralias = ''
self.params = []
self.paramlist = []
self.paramvec = []
self.paramaction = []
self.paramprop = []
self.paramset = []
self.props = []
self.chromium = []
self.chrelopcode = -1
def ProcessSpecFile(filename, userFunc):
"""Open the named API spec file and call userFunc(record) for each record
processed."""
specFile = open(filename, "r")
if not specFile:
print "Error: couldn't open %s file!" % filename
sys.exit()
record = APIFunction()
for line in specFile.readlines():
# split line into tokens
tokens = string.split(line)
if len(tokens) > 0 and line[0] != '#':
if tokens[0] == 'name':
if record.name != '':
# process the function now
userFunc(record)
# reset the record
record = APIFunction()
record.name = tokens[1]
elif tokens[0] == 'return':
record.returnType = string.join(tokens[1:], ' ')
elif tokens[0] == 'param':
name = tokens[1]
type = string.join(tokens[2:], ' ')
vecSize = 0
record.params.append((name, type, vecSize))
elif tokens[0] == 'paramprop':
name = tokens[1]
str = tokens[2:]
enums = []
for i in range(len(str)):
enums.append(str[i])
record.paramprop.append((name, enums))
elif tokens[0] == 'paramlist':
name = tokens[1]
str = tokens[2:]
list = []
for i in range(len(str)):
list.append(str[i])
record.paramlist.append((name,list))
elif tokens[0] == 'paramvec':
name = tokens[1]
str = tokens[2:]
vec = []
for i in range(len(str)):
vec.append(str[i])
record.paramvec.append((name,vec))
elif tokens[0] == 'paramset':
line = tokens[1:]
result = []
for i in range(len(line)):
tset = line[i]
if tset == '[':
nlist = []
elif tset == ']':
result.append(nlist)
nlist = []
else:
nlist.append(tset)
if result != []:
record.paramset.append(result)
elif tokens[0] == 'paramaction':
name = tokens[1]
str = tokens[2:]
list = []
for i in range(len(str)):
list.append(str[i])
record.paramaction.append((name,list))
elif tokens[0] == 'category':
record.category = tokens[1]
elif tokens[0] == 'offset':
if tokens[1] == '?':
record.offset = -2
else:
record.offset = int(tokens[1])
elif tokens[0] == 'alias':
record.alias = tokens[1]
elif tokens[0] == 'vectoralias':
record.vectoralias = tokens[1]
elif tokens[0] == 'props':
record.props = tokens[1:]
elif tokens[0] == 'chromium':
record.chromium = tokens[1:]
elif tokens[0] == 'vector':
vecName = tokens[1]
vecSize = int(tokens[2])
for i in range(len(record.params)):
(name, type, oldSize) = record.params[i]
if name == vecName:
record.params[i] = (name, type, vecSize)
break
elif tokens[0] == 'chrelopcode':
record.chrelopcode = int(tokens[1])
else:
print 'Invalid token %s after function %s' % (tokens[0], record.name)
#endif
#endif
#endfor
specFile.close()
#enddef
# Dictionary [name] of APIFunction:
__FunctionDict = {}
# Dictionary [name] of name
__VectorVersion = {}
# Reverse mapping of function name aliases
__ReverseAliases = {}
def AddFunction(record):
assert not __FunctionDict.has_key(record.name)
#if not "omit" in record.chromium:
__FunctionDict[record.name] = record
def GetFunctionDict(specFile = ""):
if not specFile:
specFile = sys.argv[1]+"/APIspec.txt"
if len(__FunctionDict) == 0:
ProcessSpecFile(specFile, AddFunction)
# Look for vector aliased functions
for func in __FunctionDict.keys():
va = __FunctionDict[func].vectoralias
if va != '':
__VectorVersion[va] = func
#endif
# and look for regular aliases (for glloader)
a = __FunctionDict[func].alias
if a:
__ReverseAliases[a] = func
#endif
#endfor
#endif
return __FunctionDict
def GetAllFunctions(specFile = ""):
"""Return sorted list of all functions known to Chromium."""
d = GetFunctionDict(specFile)
funcs = []
for func in d.keys():
rec = d[func]
if not "omit" in rec.chromium:
funcs.append(func)
funcs.sort()
return funcs
def GetAllFunctionsAndOmittedAliases(specFile = ""):
"""Return sorted list of all functions known to Chromium."""
d = GetFunctionDict(specFile)
funcs = []
for func in d.keys():
rec = d[func]
if (not "omit" in rec.chromium or
rec.alias != ''):
funcs.append(func)
funcs.sort()
return funcs
def GetDispatchedFunctions(specFile = ""):
"""Return sorted list of all functions handled by SPU dispatch table."""
d = GetFunctionDict(specFile)
funcs = []
for func in d.keys():
rec = d[func]
if (not "omit" in rec.chromium and
not "stub" in rec.chromium and
rec.alias == ''):
funcs.append(func)
funcs.sort()
return funcs
#======================================================================
def ReturnType(funcName):
"""Return the C return type of named function.
Examples: "void" or "const GLubyte *". """
d = GetFunctionDict()
return d[funcName].returnType
def Parameters(funcName):
"""Return list of tuples (name, type, vecSize) of function parameters.
Example: if funcName=="ClipPlane" return
[ ("plane", "GLenum", 0), ("equation", "const GLdouble *", 4) ] """
d = GetFunctionDict()
return d[funcName].params
def ParamAction(funcName):
"""Return list of names of actions for testing.
For PackerTest only."""
d = GetFunctionDict()
return d[funcName].paramaction
def ParamList(funcName):
"""Return list of tuples (name, list of values) of function parameters.
For PackerTest only."""
d = GetFunctionDict()
return d[funcName].paramlist
def ParamVec(funcName):
"""Return list of tuples (name, vector of values) of function parameters.
For PackerTest only."""
d = GetFunctionDict()
return d[funcName].paramvec
def ParamSet(funcName):
"""Return list of tuples (name, list of values) of function parameters.
For PackerTest only."""
d = GetFunctionDict()
return d[funcName].paramset
def Properties(funcName):
"""Return list of properties of the named GL function."""
d = GetFunctionDict()
return d[funcName].props
def AllWithProperty(property):
"""Return list of functions that have the named property."""
funcs = []
for funcName in GetDispatchedFunctions():
if property in Properties(funcName):
funcs.append(funcName)
return funcs
def Category(funcName):
"""Return the category of the named GL function."""
d = GetFunctionDict()
return d[funcName].category
def ChromiumProps(funcName):
"""Return list of Chromium-specific properties of the named GL function."""
d = GetFunctionDict()
return d[funcName].chromium
def ChromiumRelOpCode(funcName):
"""Return list of Chromium-specific properties of the named GL function."""
d = GetFunctionDict()
return d[funcName].chrelopcode
def ParamProps(funcName):
"""Return list of Parameter-specific properties of the named GL function."""
d = GetFunctionDict()
return d[funcName].paramprop
def Alias(funcName):
"""Return the function that the named function is an alias of.
Ex: Alias('DrawArraysEXT') = 'DrawArrays'.
"""
d = GetFunctionDict()
return d[funcName].alias
def ReverseAlias(funcName):
"""Like Alias(), but the inverse."""
d = GetFunctionDict()
if funcName in __ReverseAliases.keys():
return __ReverseAliases[funcName]
else:
return ''
def NonVectorFunction(funcName):
"""Return the non-vector version of the given function, or ''.
For example: NonVectorFunction("Color3fv") = "Color3f"."""
d = GetFunctionDict()
return d[funcName].vectoralias
def VectorFunction(funcName):
"""Return the vector version of the given non-vector-valued function,
or ''.
For example: VectorVersion("Color3f") = "Color3fv"."""
d = GetFunctionDict()
if funcName in __VectorVersion.keys():
return __VectorVersion[funcName]
else:
return ''
def GetCategoryWrapper(func_name):
"""Return a C preprocessor token to test in order to wrap code.
This handles extensions.
Example: GetTestWrapper("glActiveTextureARB") = "CR_multitexture"
Example: GetTestWrapper("glBegin") = ""
"""
cat = Category(func_name)
if (cat == "1.0" or
cat == "1.1" or
cat == "1.2" or
cat == "Chromium" or
cat == "GL_chromium" or
cat == "VBox"):
return ''
elif (cat == '1.3' or
cat == '1.4' or
cat == '1.5' or
cat == '2.0' or
cat == '2.1'):
# i.e. OpenGL 1.3 or 1.4 or 1.5
return "OPENGL_VERSION_" + string.replace(cat, ".", "_")
else:
assert cat != ''
return string.replace(cat, "GL_", "")
def CanCompile(funcName):
"""Return 1 if the function can be compiled into display lists, else 0."""
props = Properties(funcName)
if ("nolist" in props or
"get" in props or
"setclient" in props):
return 0
else:
return 1
def HasChromiumProperty(funcName, propertyList):
"""Return 1 if the function or any alias has any property in the
propertyList"""
for funcAlias in [funcName, NonVectorFunction(funcName), VectorFunction(funcName)]:
if funcAlias:
props = ChromiumProps(funcAlias)
for p in propertyList:
if p in props:
return 1
return 0
def CanPack(funcName):
"""Return 1 if the function can be packed, else 0."""
return HasChromiumProperty(funcName, ['pack', 'extpack', 'expandpack'])
def HasPackOpcode(funcName):
"""Return 1 if the function has a true pack opcode"""
return HasChromiumProperty(funcName, ['pack', 'extpack'])
def SetsState(funcName):
"""Return 1 if the function sets server-side state, else 0."""
props = Properties(funcName)
# Exceptions. The first set of these functions *do* have
# server-side state-changing effects, but will be missed
# by the general query, because they either render (e.g.
# Bitmap) or do not compile into display lists (e.g. all the others).
#
# The second set do *not* have server-side state-changing
# effects, despite the fact that they do not render
# and can be compiled. They are control functions
# that are not trackable via state.
if funcName in ['Bitmap', 'DeleteTextures', 'FeedbackBuffer',
'RenderMode', 'BindBufferARB', 'DeleteFencesNV']:
return 1
elif funcName in ['ExecuteProgramNV']:
return 0
# All compilable functions that do not render and that do
# not set or use client-side state (e.g. DrawArrays, et al.), set
# server-side state.
if CanCompile(funcName) and "render" not in props and "useclient" not in props and "setclient" not in props:
return 1
# All others don't set server-side state.
return 0
def SetsClientState(funcName):
"""Return 1 if the function sets client-side state, else 0."""
props = Properties(funcName)
if "setclient" in props:
return 1
return 0
def SetsTrackedState(funcName):
"""Return 1 if the function sets state that is tracked by
the state tracker, else 0."""
# These functions set state, but aren't tracked by the state
# tracker for various reasons:
# - because the state tracker doesn't manage display lists
# (e.g. CallList and CallLists)
# - because the client doesn't have information about what
# the server supports, so the function has to go to the
# server (e.g. CompressedTexImage calls)
# - because they require a round-trip to the server (e.g.
# the CopyTexImage calls, SetFenceNV, TrackMatrixNV)
if funcName in [
'CopyTexImage1D', 'CopyTexImage2D',
'CopyTexSubImage1D', 'CopyTexSubImage2D', 'CopyTexSubImage3D',
'CallList', 'CallLists',
'CompressedTexImage1DARB', 'CompressedTexSubImage1DARB',
'CompressedTexImage2DARB', 'CompressedTexSubImage2DARB',
'CompressedTexImage3DARB', 'CompressedTexSubImage3DARB',
'SetFenceNV'
]:
return 0
# Anything else that affects client-side state is trackable.
if SetsClientState(funcName):
return 1
# Anything else that doesn't set state at all is certainly
# not trackable.
if not SetsState(funcName):
return 0
# Per-vertex state isn't tracked the way other state is
# tracked, so it is specifically excluded.
if "pervertex" in Properties(funcName):
return 0
# Everything else is fine
return 1
def UsesClientState(funcName):
"""Return 1 if the function uses client-side state, else 0."""
props = Properties(funcName)
if "pixelstore" in props or "useclient" in props:
return 1
return 0
def IsQuery(funcName):
"""Return 1 if the function returns information to the user, else 0."""
props = Properties(funcName)
if "get" in props:
return 1
return 0
def FuncGetsState(funcName):
"""Return 1 if the function gets GL state, else 0."""
d = GetFunctionDict()
props = Properties(funcName)
if "get" in props:
return 1
else:
return 0
def IsPointer(dataType):
"""Determine if the datatype is a pointer. Return 1 or 0."""
if string.find(dataType, "*") == -1:
return 0
else:
return 1
def PointerType(pointerType):
"""Return the type of a pointer.
Ex: PointerType('const GLubyte *') = 'GLubyte'
"""
t = string.split(pointerType, ' ')
if t[0] == "const":
t[0] = t[1]
return t[0]
def OpcodeName(funcName):
"""Return the C token for the opcode for the given function."""
return "CR_" + string.upper(funcName) + "_OPCODE"
def ExtendedOpcodeName(funcName):
"""Return the C token for the extended opcode for the given function."""
return "CR_" + string.upper(funcName) + "_EXTEND_OPCODE"
#======================================================================
def MakeCallString(params):
"""Given a list of (name, type, vectorSize) parameters, make a C-style
formal parameter string.
Ex return: 'index, x, y, z'.
"""
result = ''
i = 1
n = len(params)
for (name, type, vecSize) in params:
result += name
if i < n:
result = result + ', '
i += 1
#endfor
return result
#enddef
def MakeDeclarationString(params):
"""Given a list of (name, type, vectorSize) parameters, make a C-style
parameter declaration string.
Ex return: 'GLuint index, GLfloat x, GLfloat y, GLfloat z'.
"""
n = len(params)
if n == 0:
return 'void'
else:
result = ''
i = 1
for (name, type, vecSize) in params:
result = result + type + ' ' + name
if i < n:
result = result + ', '
i += 1
#endfor
return result
#endif
#enddef
def MakeDeclarationStringWithContext(ctx_macro_prefix, params):
"""Same as MakeDeclarationString, but adds a context macro
"""
n = len(params)
if n == 0:
return ctx_macro_prefix + '_ARGSINGLEDECL'
else:
result = MakeDeclarationString(params)
return ctx_macro_prefix + '_ARGDECL ' + result
#endif
#enddef
def MakePrototypeString(params):
"""Given a list of (name, type, vectorSize) parameters, make a C-style
parameter prototype string (types only).
Ex return: 'GLuint, GLfloat, GLfloat, GLfloat'.
"""
n = len(params)
if n == 0:
return 'void'
else:
result = ''
i = 1
for (name, type, vecSize) in params:
result = result + type
# see if we need a comma separator
if i < n:
result = result + ', '
i += 1
#endfor
return result
#endif
#enddef
#======================================================================
__lengths = {
'GLbyte': 1,
'GLubyte': 1,
'GLshort': 2,
'GLushort': 2,
'GLint': 4,
'GLuint': 4,
'GLfloat': 4,
'GLclampf': 4,
'GLdouble': 8,
'GLclampd': 8,
'GLenum': 4,
'GLboolean': 1,
'GLsizei': 4,
'GLbitfield': 4,
'void': 0, # XXX why?
'int': 4,
'GLintptrARB': 4, # XXX or 8 bytes?
'GLsizeiptrARB': 4, # XXX or 8 bytes?
'VBoxGLhandleARB': 4,
'GLcharARB': 1,
'uintptr_t': 4
}
def sizeof(type):
"""Return size of C datatype, in bytes."""
if not type in __lengths.keys():
print >>sys.stderr, "%s not in lengths!" % type
return __lengths[type]
#======================================================================
align_types = 1
def FixAlignment( pos, alignment ):
# if we want double-alignment take word-alignment instead,
# yes, this is super-lame, but we know what we are doing
if alignment > 4:
alignment = 4
if align_types and alignment and ( pos % alignment ):
pos += alignment - ( pos % alignment )
return pos
def WordAlign( pos ):
return FixAlignment( pos, 4 )
def PointerSize():
return 8 # Leave room for a 64 bit pointer
def PacketLength( params ):
len = 0
for (name, type, vecSize) in params:
if IsPointer(type):
size = PointerSize()
else:
assert string.find(type, "const") == -1
size = sizeof(type)
len = FixAlignment( len, size ) + size
len = WordAlign( len )
return len
#======================================================================
__specials = {}
def LoadSpecials( filename ):
table = {}
try:
f = open( filename, "r" )
except:
# try:
f = open( sys.argv[2]+"/"+filename, "r")
# except:
# __specials[filename] = {}
# print "%s not present" % filename
# return {}
for line in f.readlines():
line = string.strip(line)
if line == "" or line[0] == '#':
continue
table[line] = 1
__specials[filename] = table
return table
def FindSpecial( table_file, glName ):
table = {}
filename = table_file + "_special"
try:
table = __specials[filename]
except KeyError:
table = LoadSpecials( filename )
try:
if (table[glName] == 1):
return 1
else:
return 0 #should never happen
except KeyError:
return 0
def AllSpecials( table_file ):
table = {}
filename = table_file + "_special"
try:
table = __specials[filename]
except KeyError:
table = LoadSpecials( filename )
keys = table.keys()
keys.sort()
return keys
def AllSpecials( table_file ):
filename = table_file + "_special"
table = {}
try:
table = __specials[filename]
except KeyError:
table = LoadSpecials(filename)
ret = table.keys()
ret.sort()
return ret
def NumSpecials( table_file ):
filename = table_file + "_special"
table = {}
try:
table = __specials[filename]
except KeyError:
table = LoadSpecials(filename)
return len(table.keys())
def PrintRecord(record):
argList = MakeDeclarationString(record.params)
if record.category == "Chromium":
prefix = "cr"
else:
prefix = "gl"
print '%s %s%s(%s);' % (record.returnType, prefix, record.name, argList )
if len(record.props) > 0:
print ' /* %s */' % string.join(record.props, ' ')
#ProcessSpecFile("APIspec.txt", PrintRecord)
|
{
"content_hash": "66e9fef1dde895ff03e95ebe978460b3",
"timestamp": "",
"source": "github",
"line_count": 777,
"max_line_length": 109,
"avg_line_length": 24.207207207207208,
"alnum_prop": 0.6485724918921792,
"repo_name": "egraba/vbox_openbsd",
"id": "bce38f1886961e4c60a8a97408fecbc39cd5597a",
"size": "18952",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "VirtualBox-5.0.0/src/VBox/GuestHost/OpenGL/glapi_parser/apiutil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "88714"
},
{
"name": "Assembly",
"bytes": "4303680"
},
{
"name": "AutoIt",
"bytes": "2187"
},
{
"name": "Batchfile",
"bytes": "95534"
},
{
"name": "C",
"bytes": "192632221"
},
{
"name": "C#",
"bytes": "64255"
},
{
"name": "C++",
"bytes": "83842667"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "6041"
},
{
"name": "CSS",
"bytes": "26756"
},
{
"name": "D",
"bytes": "41844"
},
{
"name": "DIGITAL Command Language",
"bytes": "56579"
},
{
"name": "DTrace",
"bytes": "1466646"
},
{
"name": "GAP",
"bytes": "350327"
},
{
"name": "Groff",
"bytes": "298540"
},
{
"name": "HTML",
"bytes": "467691"
},
{
"name": "IDL",
"bytes": "106734"
},
{
"name": "Java",
"bytes": "261605"
},
{
"name": "JavaScript",
"bytes": "80927"
},
{
"name": "Lex",
"bytes": "25122"
},
{
"name": "Logos",
"bytes": "4941"
},
{
"name": "Makefile",
"bytes": "426902"
},
{
"name": "Module Management System",
"bytes": "2707"
},
{
"name": "NSIS",
"bytes": "177212"
},
{
"name": "Objective-C",
"bytes": "5619792"
},
{
"name": "Objective-C++",
"bytes": "81554"
},
{
"name": "PHP",
"bytes": "58585"
},
{
"name": "Pascal",
"bytes": "69941"
},
{
"name": "Perl",
"bytes": "240063"
},
{
"name": "PowerShell",
"bytes": "10664"
},
{
"name": "Python",
"bytes": "9094160"
},
{
"name": "QMake",
"bytes": "3055"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "1460572"
},
{
"name": "SourcePawn",
"bytes": "4139"
},
{
"name": "TypeScript",
"bytes": "142342"
},
{
"name": "Visual Basic",
"bytes": "7161"
},
{
"name": "XSLT",
"bytes": "1034475"
},
{
"name": "Yacc",
"bytes": "22312"
}
],
"symlink_target": ""
}
|
from flask import g
from backend.app import app, db
from backend.routes import auth_required, dump_with_schema, load_with_schema
from backend.schema import UserSchema
@app.route('/user')
@auth_required
@dump_with_schema(UserSchema)
def get_user():
return g.current_user
@app.route('/user', methods=['PATCH'])
@auth_required
@load_with_schema(UserSchema)
@dump_with_schema(UserSchema)
def update_user(data):
user = g.current_user
if data['password']:
user.change_password(data['password'])
db.session.commit()
return user
|
{
"content_hash": "921e24d9509e7791d3f2f812cc9757e2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 20.62962962962963,
"alnum_prop": 0.7163375224416517,
"repo_name": "ianunruh/flask-api-skeleton",
"id": "20c21af3b7c7c8b3bd5123edf6356cfeea36c447",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/routes/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "27107"
}
],
"symlink_target": ""
}
|
import torch
import torch.nn.functional as function
import torch.optim.optimizer as optim
import torch.nn as nn
class AlexNet(nn.Module):
def __init__(self, num_classes = 10):
super(AlexNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size= 11, stride=4, padding = 2)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=192, padding=2, kernel_size=5)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(in_channels=192, out_channels=384, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.dropout1 = nn.Dropout()
self.fc1 = nn.Linear(in_features=256 * 6 * 6, out_features=4096)
self.dropout2 = nn.Dropout()
self.fc2 = nn.Linear(in_features=4096, out_features=4096)
self.fc3 = nn.Linear(in_features=4096, out_features=num_classes)
def forward(self, x):
x = self.pool1(function.relu(self.conv1(x), inplace=True))
x = self.pool2(function.relu(self.conv2(x), inplace=True))
x = function.relu(self.conv3(x), inplace=True)
x = function.relu(self.conv4(x), inplace=True)
x = self.pool3(function.relu(self.conv5(x), inplace=True))
x = x.view(x.size(0), 256*6*6)
x = function.relu(self.fc1(self.dropout1(x)), inplace=True)
x = function.relu(self.fc2(self.dropout2(x)), inplace=True)
x = self.fc3(x)
return x
|
{
"content_hash": "4bbff2b44c510b23d4f1e008d5957144",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 102,
"avg_line_length": 50.26470588235294,
"alnum_prop": 0.6454066705675834,
"repo_name": "yt4766269/pytorch_zoo",
"id": "f664f3f87570fa5317152b70f0e261cf60d63c1e",
"size": "1709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AlexNet/Alexnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20186"
}
],
"symlink_target": ""
}
|
"""
Django settings for click2pass project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from secret_settings import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'service'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'click2pass.urls'
WSGI_APPLICATION = 'click2pass.wsgi.application'
TEMPLATE_DIRS = (
'/home/iwagaki/work/django/click2pass/service/templates',
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
#LANGUAGE_CODE = 'utf-8'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# logger
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
|
{
"content_hash": "0e72961fdc612b9a6275f9c56345ab60",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 71,
"avg_line_length": 22.60185185185185,
"alnum_prop": 0.6759524784924211,
"repo_name": "iwagaki/click2pass",
"id": "6068d7a1bef76d43817bd2210a6f24976642e98f",
"size": "2441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "click2pass/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12761"
},
{
"name": "JavaScript",
"bytes": "4260"
},
{
"name": "Python",
"bytes": "7435"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from sentry.models import ProjectRedirect
from sentry.testutils import TestCase
class ProjectRedirectTest(TestCase):
def test_record(self):
org = self.create_organization()
project = self.create_project(organization=org)
ProjectRedirect.record(project, "old_slug")
assert ProjectRedirect.objects.filter(redirect_slug="old_slug", project=project).exists()
# Recording the same historic slug on a different project updates the
# project pointer.
project2 = self.create_project(organization=org)
ProjectRedirect.record(project2, "old_slug")
assert not ProjectRedirect.objects.filter(
redirect_slug="old_slug", project=project
).exists()
assert ProjectRedirect.objects.filter(redirect_slug="old_slug", project=project2).exists()
|
{
"content_hash": "eb25f19356643f0ba1a2e39b8ce7396e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 98,
"avg_line_length": 36.458333333333336,
"alnum_prop": 0.7062857142857143,
"repo_name": "beeftornado/sentry",
"id": "fee740dea91c79fae01dae582da44c0943d59f0d",
"size": "900",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sentry/models/test_projectredirect.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
"""
A PTransform that provides an unbounded, streaming source of empty byte arrays.
This can only be used with the flink runner.
"""
# pytype: skip-file
from __future__ import absolute_import
import json
from typing import Any
from typing import Dict
from apache_beam import PTransform
from apache_beam import Windowing
from apache_beam import pvalue
from apache_beam.transforms.window import GlobalWindows
class FlinkStreamingImpulseSource(PTransform):
URN = "flink:transform:streaming_impulse:v1"
config = {} # type: Dict[str, Any]
def expand(self, pbegin):
assert isinstance(pbegin, pvalue.PBegin), (
'Input to transform must be a PBegin but found %s' % pbegin)
return pvalue.PCollection(pbegin.pipeline, is_bounded=False)
def get_windowing(self, inputs):
return Windowing(GlobalWindows())
def infer_output_type(self, unused_input_type):
return bytes
def to_runner_api_parameter(self, context):
assert isinstance(self, FlinkStreamingImpulseSource), \
"expected instance of StreamingImpulseSource, but got %s" % self.__class__
return (self.URN, json.dumps(self.config))
def set_interval_ms(self, interval_ms):
"""Sets the interval (in milliseconds) between messages in the stream.
"""
self.config["interval_ms"] = interval_ms
return self
def set_message_count(self, message_count):
"""If non-zero, the stream will produce only this many total messages.
Otherwise produces an unbounded number of messages.
"""
self.config["message_count"] = message_count
return self
@staticmethod
@PTransform.register_urn("flink:transform:streaming_impulse:v1", None)
def from_runner_api_parameter(spec_parameter, _unused_context):
config = json.loads(spec_parameter)
instance = FlinkStreamingImpulseSource()
if "interval_ms" in config:
instance.set_interval_ms(config["interval_ms"])
if "message_count" in config:
instance.set_message_count(config["message_count"])
return instance
|
{
"content_hash": "2e4f248abf5254ea749c093a60887074",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 31.4375,
"alnum_prop": 0.7261431411530815,
"repo_name": "iemejia/incubator-beam",
"id": "0dac8512fea22ac2993eaf64af068637f53f8cc0",
"size": "2797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/flink/flink_streaming_impulse_source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
}
|
import a10_neutron_lbaas.tests.unit.test_base as test_base
class UnitTestBase(test_base.UnitTestBase):
def __init__(self, *args):
super(UnitTestBase, self).__init__(*args)
self.version = 'v2'
def print_mocks(self):
super(UnitTestBase, self).print_mocks()
print("NEUTRON ", self.a.neutron.mock_calls)
|
{
"content_hash": "6e991319917cecee08f4036cb605d772",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 58,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.6482558139534884,
"repo_name": "dougwig/a10-neutron-lbaas",
"id": "556ab9abbb0b404ccb689704713d3eb521353ccd",
"size": "974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "a10_neutron_lbaas/tests/unit/v2/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "324867"
},
{
"name": "Shell",
"bytes": "6670"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management.libraries.functions import check_process_status
from resource_management.libraries.script import Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions import stack_select
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from storm import storm
from service import service
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_JAAS_CONF
from setup_ranger_storm import setup_ranger_storm
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core.resources.service import Service
class Nimbus(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
storm("nimbus")
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class NimbusDefault(Nimbus):
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select_packages(params.version)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
setup_ranger_storm(upgrade_type=upgrade_type)
service("nimbus", action="start")
if "SUPERVISOR" not in params.config['localComponents']:
service("logviewer", action="start")
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service("nimbus", action="stop")
if "SUPERVISOR" not in params.config['localComponents']:
service("logviewer", action="stop")
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.pid_nimbus)
def get_log_folder(self):
import params
return params.log_dir
def get_user(self):
import params
return params.storm_user
def get_pid_files(self):
import status_params
return [status_params.pid_nimbus]
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class NimbusWindows(Nimbus):
def start(self, env):
import status_params
env.set_params(status_params)
Service(status_params.nimbus_win_service_name, action="start")
def stop(self, env):
import status_params
env.set_params(status_params)
Service(status_params.nimbus_win_service_name, action="stop")
def status(self, env):
import status_params
from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
env.set_params(status_params)
check_windows_service_status(status_params.nimbus_win_service_name)
if __name__ == "__main__":
Nimbus().execute()
|
{
"content_hash": "d2eb7c491e23681ddf18c03a047bf5b9",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 106,
"avg_line_length": 33.526315789473685,
"alnum_prop": 0.7616431187859759,
"repo_name": "radicalbit/ambari",
"id": "1d000bfbf900e422e5eb4798ab728c0744bb898a",
"size": "3844",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/nimbus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
}
|
'''Process G6 JSON files into elasticsearch
This version reads G6 JSON from disk or DM API.
Usage:
process-g6-into-elastic-search.py <es_endpoint> <dir_or_endpoint> [<token>]
Arguments:
es_endpoint Full ES index URL
dir_or_endpoint Directory path to import or an API URL if token is given
token Digital Marketplace API token
'''
import os
import sys
import json
import urllib2
def post_to_es(es_endpoint, json_data):
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
if not es_endpoint.endswith('/'):
es_endpoint += '/'
request = urllib2.Request(es_endpoint + str(json_data['id']),
data=json.dumps(json_data))
request.add_header("Content-Type", 'application/json')
print request.get_full_url()
# print request.get_data()
try:
connection = opener.open(request)
except urllib2.HTTPError, e:
connection = e
print connection
# check. Substitute with appropriate HTTP code.
if connection.code == 200:
data = connection.read()
print str(connection.code) + " " + data
else:
print "connection.code = " + str(connection.code)
def request_services(endpoint, token):
handler = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(handler)
page_url = endpoint
while page_url:
print "requesting {}".format(page_url)
request = urllib2.Request(page_url)
request.add_header("Authorization", "Bearer {}".format(token))
response = opener.open(request).read()
data = json.loads(response)
for service in data["services"]:
yield service
page_url = filter(lambda l: l['rel'] == 'next', data['links'])
if page_url:
page_url = page_url[0]['href']
def process_json_files_in_directory(dirname):
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as f:
data = json.loads(f.read())
print "doing " + filename
yield data
def main():
if len(sys.argv) == 4:
es_endpoint, endpoint, token = sys.argv[1:]
for data in request_services(endpoint, token):
post_to_es(es_endpoint, data)
elif len(sys.argv) == 3:
es_endpoint, listing_dir = sys.argv[1:]
for data in process_json_files_in_directory(listing_dir):
post_to_es(es_endpoint, data)
else:
print __doc__
if __name__ == '__main__':
main()
|
{
"content_hash": "a8e029629ec82f3dd0147f632bc2c446",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 28.144444444444446,
"alnum_prop": 0.6123174101855507,
"repo_name": "RichardKnop/digitalmarketplace-search-api",
"id": "6fcc100b4a478ac6eaad15e0c6137cb778545f0d",
"size": "2551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/index-g6-in-elasticsearch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6714"
},
{
"name": "JavaScript",
"bytes": "3383"
},
{
"name": "Python",
"bytes": "71192"
},
{
"name": "Shell",
"bytes": "2015"
}
],
"symlink_target": ""
}
|
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
|
{
"content_hash": "b8b5081826b594c01ccee1efe147838d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 24.672131147540984,
"alnum_prop": 0.6132890365448505,
"repo_name": "Titulacion-Sistemas/PythonTitulacion-EV",
"id": "30520fdf285fb9497590ad9f51a40deef78f292c",
"size": "1746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/rst2odt_prepstyles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "C",
"bytes": "469338"
},
{
"name": "C++",
"bytes": "93276"
},
{
"name": "CSS",
"bytes": "173812"
},
{
"name": "JavaScript",
"bytes": "203291"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "17198855"
},
{
"name": "Shell",
"bytes": "2237"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "Visual Basic",
"bytes": "904"
},
{
"name": "XSLT",
"bytes": "154751"
}
],
"symlink_target": ""
}
|
BROWSER = 'chrome'
START_URL = 'https://webtools-test.kent.ac.uk/site-editor'
SITE_EDITOR_USER = ''
SITE_EDITOR_PASSWORD = ''
DEFAULT_SITE_ID = 1
|
{
"content_hash": "5fa2b26d92ee53dcbced31c774344eae",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 58,
"avg_line_length": 18.5,
"alnum_prop": 0.6891891891891891,
"repo_name": "unikent/astro",
"id": "d4ca7e736d78fe7cc9011130d974cc1d04d7b5c1",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/Functional/config.sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Blade",
"bytes": "14293"
},
{
"name": "HTML",
"bytes": "252"
},
{
"name": "JavaScript",
"bytes": "173017"
},
{
"name": "PHP",
"bytes": "791051"
},
{
"name": "Python",
"bytes": "148"
},
{
"name": "RobotFramework",
"bytes": "15650"
},
{
"name": "Shell",
"bytes": "8659"
},
{
"name": "Vue",
"bytes": "214886"
}
],
"symlink_target": ""
}
|
import rospy
from std_msgs.msg import String
from ihmc_msgs.msg import ChestOrientationPacketMessage
import lcm
from bot_core.pose_t import pose_t
lc = lcm.LCM()
def callback(m):
rospy.loginfo(rospy.get_caller_id() + "I heard %f", m.orientation.x)
#rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
msg = pose_t()
msg.utime = 0
msg.pos = [0,0,0]
msg.orientation = [m.orientation.w, m.orientation.x, m.orientation.y, m.orientation.z]
lc.publish('POSE_BODY_ALT', msg.encode() )
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# node are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("/ihmc_ros/valkyrie/output/chest_orientation", ChestOrientationPacketMessage, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
|
{
"content_hash": "aee3ae5620dec534d3dfd37e1ed726a3",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 108,
"avg_line_length": 30.405405405405407,
"alnum_prop": 0.6871111111111111,
"repo_name": "openhumanoids/oh-distro",
"id": "8da3482c28ef951e67978d9991686043fb5b06fc",
"size": "1147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catkin_ws/scripts/ros2lcm_ihmc.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "131738"
},
{
"name": "C++",
"bytes": "2773796"
},
{
"name": "CMake",
"bytes": "1099155"
},
{
"name": "GLSL",
"bytes": "5320"
},
{
"name": "Java",
"bytes": "233603"
},
{
"name": "JavaScript",
"bytes": "232"
},
{
"name": "M",
"bytes": "3971"
},
{
"name": "Makefile",
"bytes": "82095"
},
{
"name": "Matlab",
"bytes": "1946915"
},
{
"name": "Mercury",
"bytes": "1487"
},
{
"name": "Objective-C",
"bytes": "10657"
},
{
"name": "Pascal",
"bytes": "3353"
},
{
"name": "Perl",
"bytes": "18915"
},
{
"name": "Python",
"bytes": "378988"
},
{
"name": "Shell",
"bytes": "35631"
},
{
"name": "XSLT",
"bytes": "73426"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
import platform
import shutil
import six
import tempfile
import unittest
import fs
from fs.osfs import OSFS
if platform.system() != "Windows":
@unittest.skipIf(platform.system() == "Darwin", "Bad unicode not possible on OSX")
class TestEncoding(unittest.TestCase):
TEST_FILENAME = b"foo\xb1bar"
# fsdecode throws error on Windows
TEST_FILENAME_UNICODE = fs.fsdecode(TEST_FILENAME)
def setUp(self):
dir_path = self.dir_path = tempfile.mkdtemp()
if six.PY2:
with open(os.path.join(dir_path, self.TEST_FILENAME), "wb") as f:
f.write(b"baz")
else:
with open(
os.path.join(dir_path, self.TEST_FILENAME_UNICODE), "wb"
) as f:
f.write(b"baz")
def tearDown(self):
shutil.rmtree(self.dir_path)
def test_open(self):
with OSFS(self.dir_path) as test_fs:
self.assertTrue(test_fs.exists(self.TEST_FILENAME_UNICODE))
self.assertTrue(test_fs.isfile(self.TEST_FILENAME_UNICODE))
self.assertFalse(test_fs.isdir(self.TEST_FILENAME_UNICODE))
with test_fs.open(self.TEST_FILENAME_UNICODE, "rb") as f:
self.assertEqual(f.read(), b"baz")
self.assertEqual(test_fs.readtext(self.TEST_FILENAME_UNICODE), "baz")
test_fs.remove(self.TEST_FILENAME_UNICODE)
self.assertFalse(test_fs.exists(self.TEST_FILENAME_UNICODE))
def test_listdir(self):
with OSFS(self.dir_path) as test_fs:
dirlist = test_fs.listdir("/")
self.assertEqual(dirlist, [self.TEST_FILENAME_UNICODE])
self.assertEqual(test_fs.readtext(dirlist[0]), "baz")
def test_scandir(self):
with OSFS(self.dir_path) as test_fs:
for info in test_fs.scandir("/"):
self.assertIsInstance(info.name, six.text_type)
self.assertEqual(info.name, self.TEST_FILENAME_UNICODE)
|
{
"content_hash": "3e5e3a93efa4035281896755cc0fe2f2",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 86,
"avg_line_length": 37.73684210526316,
"alnum_prop": 0.5801952580195258,
"repo_name": "PyFilesystem/pyfilesystem2",
"id": "6791e39682cf78f76dac0149bd34cc55b6ece3e3",
"size": "2151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_encoding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Python",
"bytes": "729117"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.