text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import sys
import ctypes
import collections
from functools import total_ordering
from ._compat import PY2, string_types
from ._fsnative import is_win, _fsn2legacy, path2fsn
from . import _winapi as winapi
def _get_win_argv():
"""Returns a unicode argv under Windows and standard sys.argv otherwise
Returns:
List[`fsnative`]
"""
assert is_win
argc = ctypes.c_int()
try:
argv = winapi.CommandLineToArgvW(
winapi.GetCommandLineW(), ctypes.byref(argc))
except WindowsError:
return []
if not argv:
return []
res = argv[max(0, argc.value - len(sys.argv)):argc.value]
winapi.LocalFree(argv)
return res
@total_ordering
class Argv(collections.MutableSequence):
"""List[`fsnative`]: Like `sys.argv` but contains unicode
keys and values under Windows + Python 2.
Any changes made will be forwarded to `sys.argv`.
"""
def __init__(self):
if PY2 and is_win:
self._argv = _get_win_argv()
else:
self._argv = sys.argv
def __getitem__(self, index):
return self._argv[index]
def __setitem__(self, index, value):
if isinstance(value, string_types):
value = path2fsn(value)
self._argv[index] = value
if sys.argv is not self._argv:
try:
if isinstance(value, string_types):
sys.argv[index] = _fsn2legacy(value)
else:
sys.argv[index] = [_fsn2legacy(path2fsn(v)) for v in value]
except IndexError:
pass
def __delitem__(self, index):
del self._argv[index]
try:
del sys.argv[index]
except IndexError:
pass
def __eq__(self, other):
return self._argv == other
def __lt__(self, other):
return self._argv < other
def __len__(self):
return len(self._argv)
def __repr__(self):
return repr(self._argv)
def insert(self, index, value):
value = path2fsn(value)
self._argv.insert(index, value)
if sys.argv is not self._argv:
sys.argv.insert(index, _fsn2legacy(value))
argv = Argv()
|
{
"content_hash": "b8b61a5247d09b6b5b202fdfce9c4ff8",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 23.4,
"alnum_prop": 0.5704003598740441,
"repo_name": "lazka/senf",
"id": "c335b598faeecb2b155a501b63dea90d7a27947b",
"size": "3341",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "senf/_argv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108302"
},
{
"name": "Shell",
"bytes": "2839"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from timepiece.tests import factories
from timepiece.tests.base import ViewTestMixin
class TestQuickSearchView(ViewTestMixin, TestCase):
url_name = 'quick_search'
template_name = 'timepiece/quick_search.html'
def setUp(self):
super(TestQuickSearchView, self).setUp()
self.user = factories.User()
self.login_user(self.user)
def test_search_user(self):
user = factories.User()
response = self._post(data={
'quick_search_1': 'user-{0}'.format(user.pk),
})
self.assertRedirectsNoFollow(response, user.get_absolute_url())
def test_search_no_such_user(self):
response = self._post(data={
'quick_search_1': 'user-12345',
})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(self.template_name)
self.assertFalse(response.context['form'].is_valid())
def test_search_business(self):
business = factories.Business()
response = self._post(data={
'quick_search_1': 'business-{0}'.format(business.pk),
})
self.assertRedirectsNoFollow(response, business.get_absolute_url())
def test_search_no_such_business(self):
response = self._post(data={
'quick_search_1': 'business-12345',
})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(self.template_name)
self.assertFalse(response.context['form'].is_valid())
def test_search_project(self):
project = factories.Project()
response = self._post(data={
'quick_search_1': 'project-{0}'.format(project.pk),
})
self.assertRedirectsNoFollow(response, project.get_absolute_url())
def test_search_no_such_project(self):
response = self._post(data={
'quick_search_1': 'project-12345',
})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, self.template_name)
self.assertFalse(response.context['form'].is_valid())
def test_malformed_search(self):
response = self._post(data={
'quick_search_1': 'project no dash 12345',
})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, self.template_name)
self.assertFalse(response.context['form'].is_valid())
def test_bad_result_type(self):
response = self._post(data={
'quick_search_1': 'hello-12345',
})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, self.template_name)
self.assertFalse(response.context['form'].is_valid())
def test_no_search(self):
response = self._post(data={
'quick_search_1': '',
})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, self.template_name)
self.assertFalse(response.context['form'].is_valid())
|
{
"content_hash": "7f429de18b7717ed7985d7ff6b15b789",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 75,
"avg_line_length": 36.21686746987952,
"alnum_prop": 0.6284098469727212,
"repo_name": "josesanch/django-timepiece",
"id": "6840ed14505a6ee181de0e3c634a036653c39544",
"size": "3006",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "timepiece/crm/tests/test_quick_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24547"
},
{
"name": "JavaScript",
"bytes": "203329"
},
{
"name": "Makefile",
"bytes": "5604"
},
{
"name": "Python",
"bytes": "594796"
},
{
"name": "Shell",
"bytes": "5116"
}
],
"symlink_target": ""
}
|
"""
raven.utils.serializer.base
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import itertools
import types
from raven.utils.compat import text_type, binary_type, string_types, iteritems, \
class_types, PY2, PY3
from raven.utils.encoding import to_unicode
from .manager import manager as serialization_manager
__all__ = ('Serializer',)
def has_sentry_metadata(value):
try:
return callable(value.__getattribute__('__sentry__'))
except Exception:
return False
class Serializer(object):
types = ()
def __init__(self, manager):
self.manager = manager
def can(self, value):
"""
Given ``value``, return a boolean describing whether this
serializer can operate on the given type
"""
return isinstance(value, self.types)
def serialize(self, value, **kwargs):
"""
Given ``value``, coerce into a JSON-safe type.
"""
return value
def recurse(self, value, max_depth=6, _depth=0, **kwargs):
"""
Given ``value``, recurse (using the parent serializer) to handle
coercing of newly defined values.
"""
string_max_length = kwargs.get('string_max_length', None)
_depth += 1
if _depth >= max_depth:
try:
value = text_type(repr(value))[:string_max_length]
except Exception as e:
import traceback
traceback.print_exc()
self.manager.logger.exception(e)
return text_type(type(value))
return self.manager.transform(value, max_depth=max_depth,
_depth=_depth, **kwargs)
class IterableSerializer(Serializer):
types = (tuple, list, set, frozenset)
def serialize(self, value, **kwargs):
list_max_length = kwargs.get('list_max_length') or float('inf')
return tuple(
self.recurse(o, **kwargs)
for n, o
in itertools.takewhile(lambda x: x[0] < list_max_length,
enumerate(value))
)
class DictSerializer(Serializer):
types = (dict,)
def make_key(self, key):
if not isinstance(key, string_types):
return to_unicode(key)
return key
def serialize(self, value, **kwargs):
list_max_length = kwargs.get('list_max_length') or float('inf')
return dict(
(self.make_key(self.recurse(k, **kwargs)), self.recurse(v, **kwargs))
for n, (k, v)
in itertools.takewhile(lambda x: x[0] < list_max_length, enumerate(
iteritems(value)))
)
class UnicodeSerializer(Serializer):
types = (text_type,)
def serialize(self, value, **kwargs):
# try to return a reasonable string that can be decoded
# correctly by the server so it doesn't show up as \uXXX for each
# unicode character
# e.g. we want the output to be like: "u'רונית מגן'"
string_max_length = kwargs.get('string_max_length', None)
return repr(text_type('%s')) % (value[:string_max_length],)
class StringSerializer(Serializer):
types = (binary_type,)
def serialize(self, value, **kwargs):
string_max_length = kwargs.get('string_max_length', None)
if PY3:
return repr(value[:string_max_length])
try:
# Python2 madness: let's try to recover from developer's issues
# Try to process the string as if it was a unicode.
return "'" + value.decode('utf8')[:string_max_length] \
.encode('utf8') + "'"
except UnicodeDecodeError:
pass
return repr(value[:string_max_length])
class TypeSerializer(Serializer):
types = class_types
def can(self, value):
return not super(TypeSerializer, self).can(value) \
and has_sentry_metadata(value)
def serialize(self, value, **kwargs):
return self.recurse(value.__sentry__(), **kwargs)
class BooleanSerializer(Serializer):
types = (bool,)
def serialize(self, value, **kwargs):
return bool(value)
class FloatSerializer(Serializer):
types = (float,)
def serialize(self, value, **kwargs):
return float(value)
class IntegerSerializer(Serializer):
types = (int,)
def serialize(self, value, **kwargs):
return int(value)
class FunctionSerializer(Serializer):
types = (types.FunctionType,)
def serialize(self, value, **kwargs):
return '<function %s from %s at 0x%x>' % (
value.__name__, value.__module__, id(value))
if PY2:
class LongSerializer(Serializer):
types = (long,) # noqa
def serialize(self, value, **kwargs):
return long(value) # noqa
# register all serializers, order matters
serialization_manager.register(IterableSerializer)
serialization_manager.register(DictSerializer)
serialization_manager.register(UnicodeSerializer)
serialization_manager.register(StringSerializer)
serialization_manager.register(TypeSerializer)
serialization_manager.register(BooleanSerializer)
serialization_manager.register(FloatSerializer)
serialization_manager.register(IntegerSerializer)
serialization_manager.register(FunctionSerializer)
if PY2:
serialization_manager.register(LongSerializer)
|
{
"content_hash": "48a1c58bba1b8bc4bad18924e1e9c180",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 81,
"avg_line_length": 29.164893617021278,
"alnum_prop": 0.618639430968448,
"repo_name": "mottosso/mindbender-setup",
"id": "06753b228f15a67d5507ce9f4f1693730030df00",
"size": "5515",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "bin/pythonpath/raven/utils/serializer/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3519"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "JavaScript",
"bytes": "13629"
},
{
"name": "PowerShell",
"bytes": "1447"
},
{
"name": "Python",
"bytes": "14346555"
},
{
"name": "QML",
"bytes": "2133450"
},
{
"name": "Shell",
"bytes": "4495"
}
],
"symlink_target": ""
}
|
hasJoblib = False
try:
from joblib import Parallel, delayed
hasJoblib = True
except ImportError:
pass
import itertools
import subprocess
import os, re, gzip
options = {}
options["compact"] = True # only show pages with diff
options["compact-surround"] = 0 # how many pages to add before and after compact diff
options["combine-rects"] = 0.0001 # combine diff rects
OLD = False
NEW = True
dirs = ["old", "new"]
texNames = ["main.tex", "main.tex"]
synctexNames = ["main.synctex.gz", "main.synctex.gz"]
pdfNames = ["main.pdf", "main.pdf"]
outputDir = "diff"
texFiles = [os.path.abspath(os.path.join(d,f)) for (d, f) in zip(dirs, texNames)]
synctexFiles = [os.path.abspath(os.path.join(d,f)) for (d, f) in zip(dirs, synctexNames)]
pdfFiles = [os.path.abspath(os.path.join(d,f)) for (d, f) in zip(dirs, pdfNames)]
header = (
r"""
\coordinate (a) at ($ (current page.north east) + (0, 32) $); %
\path [fill=olive, fill opacity=0.2] (0, 0) rectangle (a); %
\node [] at ($ (0,0)!0.5!(a) $) {""" + dirs[OLD] + r"""}; %
""",
r"""
\coordinate (a) at ($ (current page.north east) + (0, 32) $); %
\path [fill=blue, fill opacity=0.2] (0, 0) rectangle (a); %
\node [] at ($ (0,0)!0.5!(a) $) {""" + dirs[NEW] + r"""}; %
""")
diffTemplate = (r"""
\documentclass[12pt]{article}
\usepackage{pdfpages}
\usepackage{tikz}
\usetikzlibrary{calc}
\begin{document}
""",
r"""
\end{document}
""")
merge = (r"""
\documentclass[12pt]{article}
\usepackage{pdfpages}
\usepackage[a4paper]{geometry}
\begin{document}
\includepdf[pages=-, nup=1x2, landscape, frame]{""",
r"""}
\end{document}
""")
# get number of pages from synctex file
def numPages(synctexFile):
with gzip.open(synctexFile, 'rb') as f:
return int(f.readlines()[-7][1:])
numberOfPages = [numPages(synctexFiles[age]) for age in (OLD, NEW)]
maxPages = max(numberOfPages[OLD], numberOfPages[NEW])
def findDocumentRange(tex):
begin = 0
end = 0
with open(tex, "r") as f:
for i, l in enumerate(f):
if begin == 0 and r"\begin{document}" in l:
begin = i
if end == 0 and r"\end{document}" in l:
end = i
return set(range(begin+2, end+1))
documentRanges = [findDocumentRange(tex) for tex in texFiles]
class Rect:
def __init__(self, page = 0, x1 = 0, y1 = 0, x2 = 0, y2 = 0):
self.p = page
self.x1 = min(x1, x2)
self.y1 = min(y1, y2)
self.x2 = max(x1, x2)
self.y2 = max(y1, y2)
# def __cmp__(self, r):
# return self.x1 == r.x1 and self.y1 == r.y1 and self.x2 == r.x2 and self.y2 == r.y2
def __add__(self, r):
if self.p != r.p:
raise Exception("Cannot add rects on different pages")
if self.area() == 0:
return r
if r.area() == 0:
return self
return Rect(self.p, min(self.x1, r.x1), min(self.y1, r.y1), max(self.x2, r.x2), max(self.y2, r.y2))
def area(self):
return (self.x2 - self.x1) * (self.y2 - self.y1)
def tex(self):
return "({0}, {1}) rectangle ({2}, {3})".format(self.x1, self.y1, self.x2, self.y2)
class Hunk:
def __init__(self, rects = [], age = OLD):
self.rects = []
self.age = age
for r in rects:
self.addRect(r)
def addRect(self, rect):
if options["combine-rects"]:
n = len(self.rects)
matching = []
for i, r in enumerate(self.rects):
if(r.p == rect.p):
#if (not (0 < options["combine-rects"] < 1)) or ( r.area() + rect.area() > (1 - options["combine-rects"]) * (r + rect).area()):
if r.area() + rect.area() > (r + rect).area():
matching.insert(0, (i, r))
for (i, r) in matching:
rect = rect + r
del self.rects[i]
self.rects.append(rect)
def affectedPages(self):
return set([r.p for r in self.rects])
def tex(self):
return ""
class AddedHunk(Hunk):
def tex(self, page):
return "".join([r"\path [fill=green, fill opacity=0.2] " + r.tex() + "; % \n" for r in self.rects if r.p == page])
class DeletedHunk(Hunk):
def tex(self, page):
return "".join([r"\path [fill=red, fill opacity=0.2] " + r.tex() + "; % \n" for r in self.rects if r.p == page])
class ChangedHunk(Hunk):
def tex(self, page):
return "".join([r"\path [fill=yellow, fill opacity=0.2] " + r.tex() + "; % \n" for r in self.rects if r.p == page])
def rectsFromPdf(age, line, char):
result = subprocess.check_output(["synctex", "view", "-i", "{0}:{1}:{2}".format(line, char, texFiles[age]), "-o", pdfFiles[age]], shell=False)
for m in re.finditer(r"^Page:(?P<p>[0-9]+).*?h:(?P<h>[0-9.]+).*?v:(?P<v>[0-9.]+).*?W:(?P<W>[0-9.]+).*?H:(?P<H>[0-9.]+)", result, flags=re.MULTILINE | re.DOTALL):
p = int(m.group("p"))
h = float(m.group("h"))
v = float(m.group("v"))
W = float(m.group("W"))
H = float(m.group("H"))
yield Rect(p-1, h, v - H, h + W, v)
def createHunkPair(mode, oldLineRange, newLineRange):
oldLineRange = documentRanges[OLD].intersection(oldLineRange)
newLineRange = documentRanges[NEW].intersection(newLineRange)
if mode == "a":
return (AddedHunk(), #[rect for line in oldLineRange for rect in rectsFromPdf(OLD, line, 0)], OLD),
AddedHunk([rect for line in newLineRange for rect in rectsFromPdf(NEW, line, 0)], NEW))
elif mode == "d":
return (DeletedHunk([rect for line in oldLineRange for rect in rectsFromPdf(OLD, line, 0)], OLD),
DeletedHunk())#[rect for line in newLineRange for rect in rectsFromPdf(NEW, line, 0)], NEW))
elif mode == "c":
return (ChangedHunk([rect for line in oldLineRange for rect in rectsFromPdf(OLD, line, 0)], OLD),
ChangedHunk([rect for line in newLineRange for rect in rectsFromPdf(NEW, line, 0)], NEW))
def writeTexFile(outputFile, hunkPairs):
n = options["compact-surround"]
texAtPage = [ [""] * maxPages, [""] * maxPages ]
for (old, new) in hunkPairs:
for hunk, tex in ((old, texAtPage[OLD]), (new, texAtPage[NEW])):
for p in range(maxPages):
tex[p] += hunk.tex(p)
with open(outputFile, "w") as f:
f.write(diffTemplate[0])
for p in range(maxPages):
if options["compact"] and all([(texAtPage[OLD][i] == "" ) and (texAtPage[NEW][i] == "") for i in range(max(p - n, 0), min(p + n + 1, maxPages))]):
continue
# cycle between old and new
for (tex, pdf, pages, h) in ((texAtPage[OLD][p], pdfFiles[OLD], numberOfPages[OLD], header[OLD]), (texAtPage[NEW][p], pdfFiles[NEW], numberOfPages[NEW], header[NEW])):
if p < pages:
#if tex == None:
#f.write(r"\includepdf[fitpaper=true, pages={" + str(p+1) + "}]{" + pdf + "}" + "\n")
#else:
f.write(r"\includepdf[fitpaper=true, pagecommand={\thispagestyle{empty}\begin{tikzpicture}[x=1pt, y=-1pt, remember picture, overlay, shift={(current page.north west)}] %")
f.write(h);
f.write(tex)
f.write(r"\end{tikzpicture}}, pages={" + str(p+1) + "}]{" + pdf + "}" + "\n")
else: # out of range
f.write(r"\mbox{}\newpage")
#f.write(r"\includepdf[pages={}]{" + pdf + "}" + "\n")
f.write(diffTemplate[1])
# takes "nr" or "nr, nr" to a range
# TODO : intersection with document range
def stringToRange(s):
l = s.split(",")
if len(l) == 1:
return [int(l[0])]
else:
return range(int(l[0]), int(l[1])+1)
if __name__ == "__main__":
diff = None
print "-> Finding diffs"
try:
subprocess.check_output("diff " + texFiles[OLD] + " " + texFiles[NEW], shell=True)
except subprocess.CalledProcessError, e:
diff = e.output
matches = re.finditer(r"^(?P<old>[0-9,]+)(?P<mode>[adc])(?P<new>[0-9,]+)", diff, re.MULTILINE)
if hasJoblib:
print "-> Making diff rects using SyncTeX (in parallel)"
hunkPairs = Parallel(n_jobs=4, verbose=5)(delayed(createHunkPair)(m.group("mode"), stringToRange(m.group("old")), stringToRange(m.group("new"))) for m in matches)
else:
print "-> Making diff rects using SyncTeX (install joblib to run in parallel)"
hunkPairs = [createHunkPair(m.group("mode"), stringToRange(m.group("old")), stringToRange(m.group("new"))) for m in matches]
if not os.path.exists(outputDir):
os.makedirs(outputDir)
writeTexFile(outputDir + "/diff.tex", hunkPairs)
with open(outputDir + "/merge.tex", "w") as f:
f.write(merge[0] + "diff.pdf" + merge[1])
print "-> Making PDF"
subprocess.call(["pdflatex", "diff.tex"], cwd = outputDir, stdout=open(os.devnull, 'wb'))
subprocess.call(["pdflatex", "diff.tex"], cwd = outputDir, stdout=open(os.devnull, 'wb'))
subprocess.call(["pdflatex", "merge.tex"], cwd = outputDir, stdout=open(os.devnull, 'wb'))
|
{
"content_hash": "ec110edb56f39cfeffe8bf9390530330",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 191,
"avg_line_length": 35.33976833976834,
"alnum_prop": 0.5622200371462909,
"repo_name": "hgustafsson/skillnad",
"id": "ea2a99a591d9f2ec25d8831858b841b3e8e6a835",
"size": "9176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skillnad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8829"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import sys
import signal
import psutil
from oslo_config import cfg
import st2tests.config
from st2common.util import concurrency
from st2common.models.db import db_setup
from st2reactor.container.process_container import PROCESS_EXIT_TIMEOUT
from st2common.util.green.shell import run_command
from st2common.bootstrap.sensorsregistrar import register_sensors
from st2tests.base import IntegrationTestCase
__all__ = ["SensorContainerTestCase"]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ST2_CONFIG_PATH = os.path.join(BASE_DIR, "../../../conf/st2.tests.conf")
ST2_CONFIG_PATH = os.path.abspath(ST2_CONFIG_PATH)
PYTHON_BINARY = sys.executable
BINARY = os.path.join(BASE_DIR, "../../../st2reactor/bin/st2sensorcontainer")
BINARY = os.path.abspath(BINARY)
PACKS_BASE_PATH = os.path.abspath(os.path.join(BASE_DIR, "../../../contrib"))
DEFAULT_CMD = [
PYTHON_BINARY,
BINARY,
"--config-file",
ST2_CONFIG_PATH,
"--sensor-ref=examples.SamplePollingSensor",
]
class SensorContainerTestCase(IntegrationTestCase):
"""
Note: For those tests MongoDB must be running, virtualenv must exist for
examples pack and sensors from the example pack must be registered.
"""
print_stdout_stderr_on_teardown = True
@classmethod
def setUpClass(cls):
super(SensorContainerTestCase, cls).setUpClass()
st2tests.config.parse_args()
username = (
cfg.CONF.database.username
if hasattr(cfg.CONF.database, "username")
else None
)
password = (
cfg.CONF.database.password
if hasattr(cfg.CONF.database, "password")
else None
)
cls.db_connection = db_setup(
cfg.CONF.database.db_name,
cfg.CONF.database.host,
cfg.CONF.database.port,
username=username,
password=password,
ensure_indexes=False,
)
# NOTE: We need to perform this patching because test fixtures are located outside of the
# packs base paths directory. This will never happen outside the context of test fixtures.
cfg.CONF.content.packs_base_paths = PACKS_BASE_PATH
# Register sensors
register_sensors(packs_base_paths=[PACKS_BASE_PATH], use_pack_cache=False)
# Create virtualenv for examples pack
virtualenv_path = "/tmp/virtualenvs/examples"
run_command(cmd=["rm", "-rf", virtualenv_path])
cmd = [
"virtualenv",
"--system-site-packages",
"--python",
PYTHON_BINARY,
virtualenv_path,
]
run_command(cmd=cmd)
def test_child_processes_are_killed_on_sigint(self):
process = self._start_sensor_container()
# Give it some time to start up
concurrency.sleep(7)
# Assert process has started and is running
self.assertProcessIsRunning(process=process)
# Verify container process and children sensor / wrapper processes are running
pp = psutil.Process(process.pid)
children_pp = pp.children()
self.assertEqual(pp.cmdline()[1:], DEFAULT_CMD[1:])
self.assertEqual(len(children_pp), 1)
# Send SIGINT
process.send_signal(signal.SIGINT)
# SIGINT causes graceful shutdown so give it some time to gracefuly shut down the sensor
# child processes
concurrency.sleep(PROCESS_EXIT_TIMEOUT + 1)
# Verify parent and children processes have exited
self.assertProcessExited(proc=pp)
self.assertProcessExited(proc=children_pp[0])
self.remove_process(process=process)
def test_child_processes_are_killed_on_sigterm(self):
process = self._start_sensor_container()
# Give it some time to start up
concurrency.sleep(5)
# Verify container process and children sensor / wrapper processes are running
pp = psutil.Process(process.pid)
children_pp = pp.children()
self.assertEqual(pp.cmdline()[1:], DEFAULT_CMD[1:])
self.assertEqual(len(children_pp), 1)
# Send SIGTERM
process.send_signal(signal.SIGTERM)
# SIGTERM causes graceful shutdown so give it some time to gracefuly shut down the sensor
# child processes
concurrency.sleep(PROCESS_EXIT_TIMEOUT + 8)
# Verify parent and children processes have exited
self.assertProcessExited(proc=pp)
self.assertProcessExited(proc=children_pp[0])
self.remove_process(process=process)
def test_child_processes_are_killed_on_sigkill(self):
process = self._start_sensor_container()
# Give it some time to start up
concurrency.sleep(5)
# Verify container process and children sensor / wrapper processes are running
pp = psutil.Process(process.pid)
children_pp = pp.children()
self.assertEqual(pp.cmdline()[1:], DEFAULT_CMD[1:])
self.assertEqual(len(children_pp), 1)
# Send SIGKILL
process.send_signal(signal.SIGKILL)
# Note: On SIGKILL processes should be killed instantly
concurrency.sleep(1)
# Verify parent and children processes have exited
self.assertProcessExited(proc=pp)
self.assertProcessExited(proc=children_pp[0])
self.remove_process(process=process)
def test_single_sensor_mode(self):
# 1. --sensor-ref not provided
cmd = [
PYTHON_BINARY,
BINARY,
"--config-file",
ST2_CONFIG_PATH,
"--single-sensor-mode",
]
process = self._start_sensor_container(cmd=cmd)
pp = psutil.Process(process.pid)
# Give it some time to start up
concurrency.sleep(5)
stdout = process.stdout.read()
self.assertTrue(
(
b"--sensor-ref argument must be provided when running in single sensor "
b"mode"
)
in stdout
)
self.assertProcessExited(proc=pp)
self.remove_process(process=process)
# 2. sensor ref provided
cmd = [
BINARY,
"--config-file",
ST2_CONFIG_PATH,
"--single-sensor-mode",
"--sensor-ref=examples.SampleSensorExit",
]
process = self._start_sensor_container(cmd=cmd)
pp = psutil.Process(process.pid)
# Give it some time to start up
concurrency.sleep(1)
# Container should exit and not respawn a sensor in single sensor mode
stdout = process.stdout.read()
self.assertTrue(
b"Process for sensor examples.SampleSensorExit has exited with code 110"
)
self.assertTrue(b"Not respawning a sensor since running in single sensor mode")
self.assertTrue(b"Process container quit with exit_code 110.")
concurrency.sleep(2)
self.assertProcessExited(proc=pp)
self.remove_process(process=process)
def _start_sensor_container(self, cmd=DEFAULT_CMD):
subprocess = concurrency.get_subprocess_module()
print("Using command: %s" % (" ".join(cmd)))
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
preexec_fn=os.setsid,
)
self.add_process(process=process)
return process
|
{
"content_hash": "c6c48b885a9d2a5144984c27f761951f",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 98,
"avg_line_length": 31.384937238493723,
"alnum_prop": 0.6293827489668045,
"repo_name": "StackStorm/st2",
"id": "687c9f91025f774794da8ddd5e1ef6e62d6d864d",
"size": "8129",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2reactor/tests/integration/test_sensor_container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
# Third-party
import astropy.units as u
from astropy.utils import isiterable
from astropy.coordinates import SkyCoord, frame_transform_graph
from flask import Flask, request, render_template, session, redirect, abort, flash, jsonify
with open(".env") as f:
for line in f:
key, val = line.strip().split("=")
os.environ[key] = val
app = Flask(__name__) # create our flask app
app.secret_key = os.environ.get('SECRET_KEY')
@app.route('/')
def index():
return render_template('index.html')
# @app.errorhandler(404)
# def page_not_found(error):
# return render_template('404.html'), 404
import coordinates
import units
import time
|
{
"content_hash": "fd8a5cf17471a7d2ea0bd19a1cfcfb85",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 91,
"avg_line_length": 24.59375,
"alnum_prop": 0.7026683608640406,
"repo_name": "eteq/Astropy-WebAPI",
"id": "43d6bb60170ee85ba948d7ab1196ee74abe88c81",
"size": "804",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "www/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5738"
},
{
"name": "HTML",
"bytes": "5406"
},
{
"name": "JavaScript",
"bytes": "1418"
},
{
"name": "Python",
"bytes": "18226"
}
],
"symlink_target": ""
}
|
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
from restkit.util import to_bytestring
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
from restkit.version import __version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
if isinstance(v, unicode):
v = v.encode("utf-8")
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return to_bytestring(key), raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(to_bytestring(key), raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
{
"content_hash": "30e51d820002aabbb335141c1afa34cb",
"timestamp": "",
"source": "github",
"line_count": 670,
"max_line_length": 265,
"avg_line_length": 32.876119402985076,
"alnum_prop": 0.5959958233077587,
"repo_name": "benoitc/restkit",
"id": "1591574ca3146e2e58acfe0ffc168ef29f6808de",
"size": "22155",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "restkit/oauth2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187462"
}
],
"symlink_target": ""
}
|
"""Fuzzing module.
The main entry point to GraphicsFuzz Auto.
"""
import argparse
import enum
import os
import random
import secrets
import shutil
import sys
from pathlib import Path
from typing import List, Optional
from gfauto import (
artifact_util,
binaries_util,
devices_util,
download_cts_gf_tests,
fuzz_glsl_amber_test,
fuzz_spirv_amber_test,
fuzz_test_util,
gflogging,
interrupt_util,
run_cts_gf_tests,
settings_util,
shader_job_util,
test_util,
util,
)
from gfauto.device_pb2 import Device, DevicePreprocess
from gfauto.gflogging import log
from gfauto.settings_pb2 import Settings
from gfauto.util import check_dir_exists
# Root:
# - donors/ (contains GLSL shader jobs)
# - temp/ (contains staging directories with random names)
# - reports/ (contains reports)
# Staging directory.
# - source_template/ (source directory but with no test metadata yet)
# - test_1/, test_2/, etc. (test directories)
# Test directory:
# - source/ (source directory, with test.json and other files)
# - results/ (results)
# Report: a test directory with a reduction for a specific device.
# E.g. v signature v device name added
# reports/crashes/Null_point/123_opt_pixel/
# - source/
# - results/
# - laptop/
# - reference/ variant/
# - ... (see below for a more detailed example)
# - pixel/
# - reference/ variant/
# - ... (see below for a more detailed example)
# - reductions/ (since this is a report for a pixel device, we have reductions)
# - ... (see below for a more detailed example)
# - temp/123/ (a staging directory; not a proper test_dir, as it only has "source_template", not "source".)
# - source_template/
# - --test.json-- this will NOT be present because this is just a source template directory.
# - reference/ variant/
# - shader.json, shader.{comp,frag}
# - 123_no_opt/ 123_opt_O/ 123_opt_Os/ 123_opt_rand_1/ etc. (Proper test_dirs, as they have "source". These may be
# copied to become a report if a bug is found.)
# - source/ (same as source_template, but with test.json)
# - results/
# - pixel/ other_phone/ laptop/ etc.
# - reference/ variant/
# - test.amber
# - image.png
# - STATUS
# - log.txt
# - (all other result files and intermediate files for running the shader on the device)
# - reductions/ (reductions are only added once the staging directory is copied to the reports directory)
# - reduction_1/ reduction_blah/ etc. (reduction name; also a test_dir)
# - source/ (same as other source dirs, but with the final reduced shader source)
# - reduction_work/
# - reference/ variant/
# - shader.json, shader_reduction_001_success.json,
# shader_reduction_002_failed.json, etc., shader_reduced_final.json
# - shader/ shader_reduction_001/
# (these are the result directories for each step, containing STATUS, etc.)
#
DONORS_DIR = "donors"
REFERENCES_DIR = "references"
SPIRV_DONORS_DIR = "spirv_fuzz_donors"
SPIRV_REFERENCES_DIR = "spirv_fuzz_references"
REFERENCE_IMAGE_FILE_NAME = "reference.png"
VARIANT_IMAGE_FILE_NAME = "variant.png"
BUFFER_FILE_NAME = "buffer.bin"
BEST_REDUCTION_NAME = "best"
AMBER_RUN_TIME_LIMIT = 30
STATUS_TOOL_CRASH = "TOOL_CRASH"
STATUS_CRASH = "CRASH"
STATUS_UNRESPONSIVE = "UNRESPONSIVE"
STATUS_TOOL_TIMEOUT = "TOOL_TIMEOUT"
STATUS_TIMEOUT = "TIMEOUT"
STATUS_SUCCESS = "SUCCESS"
# Number of bits for seeding the RNG.
# Python normally uses 256 bits internally when seeding its RNG, hence this choice.
ITERATION_SEED_BITS = 256
FUZZ_FAILURES_DIR_NAME = "fuzz_failures"
class FuzzingTool(enum.Enum):
GLSL_FUZZ = "GLSL_FUZZ"
SPIRV_FUZZ = "SPIRV_FUZZ"
def get_random_name() -> str:
# TODO: could change to human-readable random name or the date.
return util.get_random_name()
def get_fuzzing_tool_pattern(
glsl_fuzz_iterations: int, spirv_fuzz_iterations: int
) -> List[FuzzingTool]:
fuzzing_tool_pattern = [FuzzingTool.GLSL_FUZZ] * glsl_fuzz_iterations
fuzzing_tool_pattern += [FuzzingTool.SPIRV_FUZZ] * spirv_fuzz_iterations
# If empty, we default to just running GLSL_FUZZ repeatedly.
if not fuzzing_tool_pattern:
fuzzing_tool_pattern = [FuzzingTool.GLSL_FUZZ]
return fuzzing_tool_pattern
def main() -> None:
parser = argparse.ArgumentParser(
description="Fuzz devices using glsl-fuzz and/or spirv-fuzz to generate tests. "
"By default, repeatedly generates tests using glsl-fuzz. "
"You can instead specify the number of times each tool will run; "
"glsl-fuzz runs G times, then spirv-fuzz runs S times, then the pattern repeats. "
"By default, G=0 and S=0, in which case glsl-fuzz is hardcoded to run. "
'Each run of glsl-fuzz/spirv-fuzz uses a random "iteration seed", which can be used to replay the invocation of the tool and the steps that follow. ',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--settings",
help="Path to the settings JSON file for this fuzzing instance.",
default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH),
)
parser.add_argument(
"--iteration_seed",
help="The seed to use for one fuzzing iteration (useful for reproducing an issue).",
)
parser.add_argument(
"--glsl_fuzz_iterations",
metavar="G",
help="Run glsl-fuzz G times to generate some tests, before moving on to the next tool.",
action="store",
default=0,
type=int,
)
parser.add_argument(
"--spirv_fuzz_iterations",
metavar="S",
help="Run spirv-fuzz S times to generate some tests, before moving on to the next tool.",
action="store",
default=0,
type=int,
)
parser.add_argument(
"--allow_no_stack_traces",
help="Continue even if we cannot get stack traces (using catchsegv or cdb).",
action="store_true",
)
parser.add_argument(
"--active_device",
help="Add an active device name, overriding those in the settings.json file. "
"Ignored when --update_ignored_crash_signatures is passed."
"Can be used multiple times to add multiple devices. "
"E.g. --active_device host --active_device host_with_alternative_icd. "
"This allows sharing a single settings.json file between multiple instances of gfauto_fuzz. "
"Note that a host_preprocessor device will automatically be added as the first active device, if it is missing. ",
action="append",
)
parser.add_argument(
"--update_ignored_crash_signatures",
metavar="GERRIT_COOKIE",
help="When passed, gfauto will download and run the existing GraphicsFuzz AmberScript tests from Khronos vk-gl-cts on "
"the active devices listed in the settings.json file. "
"It will then update the ignored_crash_signatures field for each active device in the settings.json file based on the crash signatures seen. "
"Requires Git. Requires Khronos membership. Obtain the Gerrit cookie as follows. "
+ download_cts_gf_tests.GERRIT_COOKIE_INSTRUCTIONS,
action="store",
default=None,
type=str,
)
parser.add_argument(
"--iteration_limit",
help="Stop after this many fuzzing iterations.",
action="store",
default=None,
type=int,
)
parser.add_argument(
"--keep_temp",
help="Keep temp directories. Useful for debugging with --iteration_seed.",
action="store_true",
)
parsed_args = parser.parse_args(sys.argv[1:])
settings_path = Path(parsed_args.settings)
iteration_seed: Optional[int] = None if parsed_args.iteration_seed is None else int(
parsed_args.iteration_seed
)
glsl_fuzz_iterations: int = parsed_args.glsl_fuzz_iterations
spirv_fuzz_iterations: int = parsed_args.spirv_fuzz_iterations
allow_no_stack_traces: bool = parsed_args.allow_no_stack_traces
active_device_names: Optional[List[str]] = parsed_args.active_device
update_ignored_crash_signatures_gerrit_cookie: Optional[str] = (
parsed_args.update_ignored_crash_signatures
)
iteration_limit: Optional[int] = parsed_args.iteration_limit
keep_temp: bool = parsed_args.keep_temp
# E.g. [GLSL_FUZZ, GLSL_FUZZ, SPIRV_FUZZ] will run glsl-fuzz twice, then spirv-fuzz once, then repeat.
fuzzing_tool_pattern = get_fuzzing_tool_pattern(
glsl_fuzz_iterations=glsl_fuzz_iterations,
spirv_fuzz_iterations=spirv_fuzz_iterations,
)
with util.file_open_text(Path(f"log_{get_random_name()}.txt"), "w") as log_file:
gflogging.push_stream_for_logging(log_file)
try:
main_helper(
settings_path,
iteration_seed,
fuzzing_tool_pattern,
allow_no_stack_traces,
active_device_names=active_device_names,
update_ignored_crash_signatures_gerrit_cookie=update_ignored_crash_signatures_gerrit_cookie,
iteration_limit=iteration_limit,
keep_temp=keep_temp,
)
except settings_util.NoSettingsFile as exception:
log(str(exception))
finally:
gflogging.pop_stream_for_logging()
def try_get_root_file() -> Path:
try:
return artifact_util.artifact_path_get_root()
except FileNotFoundError:
log(
"Could not find ROOT file (in the current directory or above) to mark where binaries should be stored. "
"Creating a ROOT file in the current directory."
)
return util.file_write_text(Path(artifact_util.ARTIFACT_ROOT_FILE_NAME), "")
def main_helper( # pylint: disable=too-many-locals, too-many-branches, too-many-statements;
settings_path: Path,
iteration_seed_override: Optional[int] = None,
fuzzing_tool_pattern: Optional[List[FuzzingTool]] = None,
allow_no_stack_traces: bool = False,
override_sigint: bool = True,
use_amber_vulkan_loader: bool = False,
active_device_names: Optional[List[str]] = None,
update_ignored_crash_signatures_gerrit_cookie: Optional[str] = None,
iteration_limit: Optional[int] = None,
keep_temp: bool = False,
) -> None:
if not fuzzing_tool_pattern:
fuzzing_tool_pattern = [FuzzingTool.GLSL_FUZZ]
util.update_gcov_environment_variable_if_needed()
if override_sigint:
interrupt_util.override_sigint()
try_get_root_file()
settings = settings_util.read_or_create(settings_path)
binary_manager = binaries_util.get_default_binary_manager(settings=settings)
temp_dir = Path() / "temp"
# Note: we use "is not None" so that if the user passes an empty Gerrit cookie, we still try to execute this code.
if update_ignored_crash_signatures_gerrit_cookie is not None:
git_tool = util.tool_on_path("git")
downloaded_graphicsfuzz_tests_dir = (
temp_dir / f"graphicsfuzz_cts_tests_{get_random_name()[:8]}"
)
work_dir = temp_dir / f"graphicsfuzz_cts_run_{get_random_name()[:8]}"
download_cts_gf_tests.download_cts_graphicsfuzz_tests(
git_tool=git_tool,
cookie=update_ignored_crash_signatures_gerrit_cookie,
output_tests_dir=downloaded_graphicsfuzz_tests_dir,
)
download_cts_gf_tests.extract_shaders(
tests_dir=downloaded_graphicsfuzz_tests_dir, binaries=binary_manager
)
with util.file_open_text(work_dir / "results.csv", "w") as results_out_handle:
run_cts_gf_tests.main_helper(
tests_dir=downloaded_graphicsfuzz_tests_dir,
work_dir=work_dir,
binaries=binary_manager,
settings=settings,
active_devices=devices_util.get_active_devices(settings.device_list),
results_out_handle=results_out_handle,
updated_settings_output_path=settings_path,
)
return
active_devices = devices_util.get_active_devices(
settings.device_list, active_device_names=active_device_names
)
# Add host_preprocessor device from device list if it is missing.
if not active_devices[0].HasField("preprocess"):
for device in settings.device_list.devices:
if device.HasField("preprocess"):
active_devices.insert(0, device)
break
# Add host_preprocessor device (from scratch) if it is still missing.
if not active_devices[0].HasField("preprocess"):
active_devices.insert(
0, Device(name="host_preprocessor", preprocess=DevicePreprocess())
)
reports_dir = Path() / "reports"
fuzz_failures_dir = reports_dir / FUZZ_FAILURES_DIR_NAME
references_dir = Path() / REFERENCES_DIR
donors_dir = Path() / DONORS_DIR
spirv_fuzz_references_dir = Path() / SPIRV_REFERENCES_DIR
spirv_fuzz_donors_dir = Path() / SPIRV_DONORS_DIR
# Log a warning if there is no tool on the PATH for printing stack traces.
prepended = util.prepend_catchsegv_if_available([], log_warning=True)
if not allow_no_stack_traces and not prepended:
raise AssertionError("Stopping because we cannot get stack traces.")
spirv_fuzz_reference_shaders: List[Path] = []
spirv_fuzz_donor_shaders: List[Path] = []
references: List[Path] = []
if FuzzingTool.SPIRV_FUZZ in fuzzing_tool_pattern:
check_dir_exists(spirv_fuzz_references_dir)
check_dir_exists(spirv_fuzz_donors_dir)
spirv_fuzz_reference_shaders = sorted(spirv_fuzz_references_dir.rglob("*.json"))
spirv_fuzz_donor_shaders = sorted(spirv_fuzz_donors_dir.rglob("*.json"))
if FuzzingTool.GLSL_FUZZ in fuzzing_tool_pattern:
check_dir_exists(references_dir)
check_dir_exists(donors_dir)
# TODO: make GraphicsFuzz find donors recursively.
references = sorted(references_dir.rglob("*.json"))
# Filter to only include .json files that have at least one shader (.frag, .vert, .comp) file.
references = [
ref for ref in references if shader_job_util.get_related_files(ref)
]
if use_amber_vulkan_loader:
library_path = binary_manager.get_binary_path_by_name(
binaries_util.AMBER_VULKAN_LOADER_NAME
).path.parent
util.add_library_paths_to_environ([library_path], os.environ)
fuzzing_tool_index = 0
iteration_count = 0
while True:
# We use "is not None" because iteration_limit could be 0.
if iteration_limit is not None and iteration_count >= iteration_limit:
log(f"Stopping after {iteration_count} iterations.")
break
interrupt_util.interrupt_if_needed()
# We have to use "is not None" because the seed could be 0.
if iteration_seed_override is not None:
iteration_seed = iteration_seed_override
else:
iteration_seed = secrets.randbits(ITERATION_SEED_BITS)
log(f"Iteration seed: {iteration_seed}")
random.seed(iteration_seed)
staging_name = get_random_name()[:8]
staging_dir = temp_dir / staging_name
try:
util.mkdir_p_new(staging_dir)
except FileExistsError:
if iteration_seed_override is not None:
raise
log(f"Staging directory already exists: {str(staging_dir)}")
log("Starting new iteration.")
continue
# Pseudocode:
# - Create test_dir(s) in staging directory.
# - Run test_dir(s) on all active devices (stop early if appropriate).
# - For each test failure on each device, copy the test to reports_dir, adding the device and crash signature.
# - Reduce each report (on the given device).
# - Produce a summary for each report.
fuzzing_tool = fuzzing_tool_pattern[fuzzing_tool_index]
fuzzing_tool_index = (fuzzing_tool_index + 1) % len(fuzzing_tool_pattern)
if fuzzing_tool == FuzzingTool.SPIRV_FUZZ:
fuzz_spirv_amber_test.fuzz_spirv(
staging_dir,
reports_dir,
fuzz_failures_dir,
active_devices,
spirv_fuzz_reference_shaders,
spirv_fuzz_donor_shaders,
settings,
binary_manager,
)
elif fuzzing_tool == FuzzingTool.GLSL_FUZZ:
fuzz_glsl_amber_test.fuzz_glsl(
staging_dir,
reports_dir,
fuzz_failures_dir,
active_devices,
references,
donors_dir,
settings,
binary_manager,
)
else:
raise AssertionError(f"Unknown fuzzing tool: {fuzzing_tool}")
if not keep_temp:
shutil.rmtree(staging_dir)
if iteration_seed_override is not None:
log("Stopping due to iteration_seed")
break
iteration_count += 1
def create_summary_and_reproduce(
test_dir: Path, binary_manager: binaries_util.BinaryManager, settings: Settings,
) -> None:
util.mkdirs_p(test_dir / "summary")
test_metadata = test_util.metadata_read(test_dir)
# noinspection PyTypeChecker
if test_metadata.HasField("glsl") or test_metadata.HasField("spirv_fuzz"):
fuzz_test_util.create_summary_and_reproduce(test_dir, binary_manager, settings)
else:
raise AssertionError("Unrecognized test type")
if __name__ == "__main__":
main()
|
{
"content_hash": "5bef1e1bb69bcf990cb5fcdeec433f9a",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 158,
"avg_line_length": 37.13900414937759,
"alnum_prop": 0.6390704429920117,
"repo_name": "google/graphicsfuzz",
"id": "f6d837950280029a8b72f343f8f2c6fad34486d8",
"size": "18524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gfauto/gfauto/fuzz.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21057"
},
{
"name": "Batchfile",
"bytes": "18712"
},
{
"name": "C",
"bytes": "1261"
},
{
"name": "C++",
"bytes": "112737"
},
{
"name": "CMake",
"bytes": "3664"
},
{
"name": "CSS",
"bytes": "6774"
},
{
"name": "Dockerfile",
"bytes": "4035"
},
{
"name": "GLSL",
"bytes": "570713"
},
{
"name": "HTML",
"bytes": "9966"
},
{
"name": "Java",
"bytes": "3314649"
},
{
"name": "JavaScript",
"bytes": "75538"
},
{
"name": "Python",
"bytes": "709540"
},
{
"name": "Shell",
"bytes": "62877"
},
{
"name": "Thrift",
"bytes": "7878"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.core.validators import MaxLengthValidator
from django.utils import timezone
from datetime import datetime, timedelta
from webtools import settings
from django.utils.translation import ugettext_lazy as _
import hashlib
import uuid
EXPIRE_CHOICES = (
(0, _('Never expire')),
(5, _('5 minutes')),
(30, _('30 minutes')),
(60, _('1 hour')),
(1440, _('1 day')),
(10080, _('1 week')),
)
class Language(models.Model):
"""Language object."""
name = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique=True)
def __unicode__(self):
"""String representation."""
return _(self.name)
class Paste(models.Model):
"""Paste object."""
language = models.ForeignKey(Language, default=13)
slug = models.SlugField(unique=True, editable=False)
title = models.CharField(max_length=200, blank=True)
content = models.TextField(validators=[MaxLengthValidator(settings.MAX_CHARACTERS)])
size = models.IntegerField(default=0, editable=False)
paste_time = models.DateTimeField(default=datetime.now, editable=False)
paste_ip = models.IPAddressField(editable=False)
paste_agent = models.CharField(max_length=200, editable=False)
lifetime = models.IntegerField(default=0, choices=EXPIRE_CHOICES)
lifecount = models.IntegerField(default=0)
viewcount = models.IntegerField(default=0, editable=False)
expired = models.BooleanField(default=False, editable=False)
private = models.BooleanField(default=False)
password = models.CharField(max_length=128, blank=True)
salt = models.CharField(max_length=36, blank=True)
def compute_size(self):
"""Computes size."""
self.size = len(self.content)
def is_expired(self):
"""Return expiration status."""
if self.expired or self.time_expired() or self.view_expired():
return True
return False
def time_expired(self):
"""Check if paste lifetime is over."""
if not self.lifetime or self.lifetime - self.get_age() > 0:
return False
self.mark_expired()
return True
def get_age(self):
"""Return age in minutes"""
delta = timezone.now() - self.paste_time
return divmod(delta.days * 86400 + delta.seconds, 60)[0]
def expiration_time(self):
"""Return expiration time"""
if not self.lifetime:
return None
delta = timedelta(minutes=self.lifetime)
return self.paste_time + delta
def mark_expired(self):
"""Mark paste expired."""
self.expired = True
self.save()
def incr_viewcount(self):
"""Increment view counter."""
self.viewcount = self.viewcount + 1
self.save()
def view_expired(self):
"""Check if paste view count is over."""
if not self.lifecount:
return False
if self.lifecount <= self.viewcount:
self.mark_expired()
return True
return False
def _hash(self, raw):
"""Return hashed string."""
if not self.salt:
self.salt = str(uuid.uuid1())
return hashlib.sha512(raw+self.salt).hexdigest()
def set_password(self, raw):
"""Define a hashed password."""
self.password = self._hash(raw)
def pwd_match(self, password):
"""Compare provided password to paste's one."""
if not password or not self._hash(password) == self.password:
return False
return True
def __unicode__(self):
"""String representation."""
return self.slug
|
{
"content_hash": "e933b99f470d242284866495498e98d5",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 88,
"avg_line_length": 31.88695652173913,
"alnum_prop": 0.6304881374420507,
"repo_name": "setsuna-/pasteque",
"id": "b1a1aa90996dc98d407c1f12fcab397c670e752f",
"size": "3667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paste/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "63625"
},
{
"name": "Python",
"bytes": "15184"
},
{
"name": "Shell",
"bytes": "275"
}
],
"symlink_target": ""
}
|
from httplang import *
import sys
import os
if len(sys.argv) < 2:
sys.exit("Usage: python httplang.py <file>.httpl")
if not os.path.exists(sys.argv[1]):
sys.exit("No file names {}".format(sys.argv[1]))
evaluate.evaluate(parse.program(tokenize.getTokens(open(sys.argv[1]))))
|
{
"content_hash": "3b90fe79ea3c36ffab25b595858b8703",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 71,
"avg_line_length": 25.90909090909091,
"alnum_prop": 0.6982456140350877,
"repo_name": "Max00355/HTTPLang",
"id": "04d087bde266a42f9b1131b72c5554c459b8049b",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "httplang.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9834"
}
],
"symlink_target": ""
}
|
import tests.missing_data.test_missing_data_air_passengers_generic as gen
gen.test_air_passengers_missing_data(None, 'DiscardRow')
|
{
"content_hash": "3e73cbcda2e794eb1a58acef3a69ce61",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 73,
"avg_line_length": 44,
"alnum_prop": 0.8106060606060606,
"repo_name": "antoinecarme/pyaf",
"id": "87005d286f810387288915680747a9ef8af840f0",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/missing_data/test_missing_data_air_passengers_None_DiscardRow.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class PacketCaptureParameters(Model):
"""Parameters that define the create packet capture operation.
:param target: The ID of the targeted resource, only VM is currently
supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet,
the remaining bytes are truncated. Default value: 0 .
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
Default value: 1073741824 .
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in
seconds. Default value: 18000 .
:type time_limit_in_seconds: int
:param storage_location:
:type storage_location:
~azure.mgmt.network.v2016_09_01.models.PacketCaptureStorageLocation
:param filters:
:type filters:
list[~azure.mgmt.network.v2016_09_01.models.PacketCaptureFilter]
"""
_validation = {
'target': {'required': True},
'storage_location': {'required': True},
}
_attribute_map = {
'target': {'key': 'target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'filters', 'type': '[PacketCaptureFilter]'},
}
def __init__(self, target, storage_location, bytes_to_capture_per_packet=0, total_bytes_per_session=1073741824, time_limit_in_seconds=18000, filters=None):
super(PacketCaptureParameters, self).__init__()
self.target = target
self.bytes_to_capture_per_packet = bytes_to_capture_per_packet
self.total_bytes_per_session = total_bytes_per_session
self.time_limit_in_seconds = time_limit_in_seconds
self.storage_location = storage_location
self.filters = filters
|
{
"content_hash": "ed36494700922eeec7dd412b3e191c4d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 159,
"avg_line_length": 44.020833333333336,
"alnum_prop": 0.6691907240889731,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "e643fc93902fbfffc30b961eb789b1aa6aa61eac",
"size": "2587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/packet_capture_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from bootstrapper.settings.base import *
def get_env_variable(var_name):
try:
return os.environ[var_name]
except KeyError: # pragma: no cover
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
try:
DMP_PERSISTENT_STORAGE_LOCATION = \
get_env_variable(
"DMP_PERSISTENT_STORAGE_LOCATION")
except ImproperlyConfigured:
DMP_PERSISTENT_STORAGE_LOCATION = \
os.path.join(ROOT_DIR, "dmp_persistent_storage")
if not os.path.exists(DMP_PERSISTENT_STORAGE_LOCATION):
raise ImproperlyConfigured
DMP_VENVS_ROOT_PATH = os.path.join(DMP_PERSISTENT_STORAGE_LOCATION, "virtualenvs")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DMP_PERSISTENT_STORAGE_LOCATION,
'dmp_db.sqlite3'),
}
}
try:
DMP_CONFIG_PATH = get_env_variable("DMP_CONFIG_PATH")
except ImproperlyConfigured:
DMP_CONFIG_PATH = os.path.join(ROOT_DIR, "dmp_config")
if not os.path.exists(DMP_CONFIG_PATH):
raise ImproperlyConfigured(
"The path specified by 'DMP_CONFIG_PATH' setting is not valid")
if not os.path.exists(os.path.join(DMP_CONFIG_PATH, "packs")):
raise ImproperlyConfigured(
"'DMP_CONFIG_PATH' incorrect, doesn't contain a packs directory")
DMP_PACKS_PATH = os.path.join(DMP_CONFIG_PATH, "packs")
if not os.path.exists(os.path.join(DMP_CONFIG_PATH, "ui")):
raise ImproperlyConfigured(
"'DMP_CONFIG_PATH' incorrect, doesn't contain a ui directory")
DMP_UI_PATH = os.path.join(DMP_CONFIG_PATH, "ui")
INSTALLED_APPS += (
"dmp_ui",
"dmp_packs",
"dmp_packs.actions",
)
|
{
"content_hash": "a4c747e252de3c74bf74363c9d32ffed",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 82,
"avg_line_length": 29.524590163934427,
"alnum_prop": 0.6785119378123264,
"repo_name": "rjusher/docker-container-manager",
"id": "2fa0d2d19b6dfd1f789cbb2f2b4769ad3ea9f4c4",
"size": "1825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/bootstrapper/settings/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "73"
},
{
"name": "Python",
"bytes": "42312"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import tempfile
import os
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Section(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Article(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section, null=True, blank=True)
def __str__(self):
return self.title
def model_year(self):
return self.date.year
model_year.admin_order_field = 'date'
model_year.short_description = ''
@python_2_unicode_compatible
class Book(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name='¿Name?')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Promo(models.Model):
name = models.CharField(max_length=100, verbose_name='¿Name?')
book = models.ForeignKey(Book)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Chapter(models.Model):
title = models.CharField(max_length=100, verbose_name='¿Title?')
content = models.TextField()
book = models.ForeignKey(Book)
def __str__(self):
return self.title
class Meta:
# Use a utf-8 bytestring to ensure it works (see #11710)
verbose_name = '¿Chapter?'
@python_2_unicode_compatible
class ChapterXtra1(models.Model):
chap = models.OneToOneField(Chapter, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra1: %s' % self.xtra
@python_2_unicode_compatible
class ChapterXtra2(models.Model):
chap = models.OneToOneField(Chapter, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra2: %s' % self.xtra
class RowLevelChangePermissionModel(models.Model):
name = models.CharField(max_length=100, blank=True)
class CustomArticle(models.Model):
content = models.TextField()
date = models.DateTimeField()
@python_2_unicode_compatible
class ModelWithStringPrimaryKey(models.Model):
string_pk = models.CharField(max_length=255, primary_key=True)
def __str__(self):
return self.string_pk
def get_absolute_url(self):
return '/dummy/%s/' % self.string_pk
@python_2_unicode_compatible
class Color(models.Model):
value = models.CharField(max_length=10)
warm = models.BooleanField()
def __str__(self):
return self.value
# we replicate Color to register with another ModelAdmin
class Color2(Color):
class Meta:
proxy = True
@python_2_unicode_compatible
class Thing(models.Model):
title = models.CharField(max_length=20)
color = models.ForeignKey(Color, limit_choices_to={'warm': True})
pub_date = models.DateField(blank=True, null=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Actor(models.Model):
name = models.CharField(max_length=50)
age = models.IntegerField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Inquisition(models.Model):
expected = models.BooleanField()
leader = models.ForeignKey(Actor)
country = models.CharField(max_length=20)
def __str__(self):
return "by %s from %s" % (self.leader, self.country)
@python_2_unicode_compatible
class Sketch(models.Model):
title = models.CharField(max_length=100)
inquisition = models.ForeignKey(Inquisition, limit_choices_to={'leader__name': 'Palin',
'leader__age': 27,
'expected': False,
})
def __str__(self):
return self.title
class Fabric(models.Model):
NG_CHOICES = (
('Textured', (
('x', 'Horizontal'),
('y', 'Vertical'),
)
),
('plain', 'Smooth'),
)
surface = models.CharField(max_length=20, choices=NG_CHOICES)
@python_2_unicode_compatible
class Person(models.Model):
GENDER_CHOICES = (
(1, "Male"),
(2, "Female"),
)
name = models.CharField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
age = models.IntegerField(default=21)
alive = models.BooleanField(default=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Persona(models.Model):
"""
A simple persona associated with accounts, to test inlining of related
accounts which inherit from a common accounts class.
"""
name = models.CharField(blank=False, max_length=80)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Account(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, related_name="accounts")
servicename = 'generic service'
def __str__(self):
return "%s: %s" % (self.servicename, self.username)
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = 'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = 'bar'
@python_2_unicode_compatible
class Subscriber(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __str__(self):
return "%s (%s)" % (self.name, self.email)
class ExternalSubscriber(Subscriber):
pass
class OldSubscriber(Subscriber):
pass
class Media(models.Model):
name = models.CharField(max_length=60)
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class Vodcast(Media):
media = models.OneToOneField(Media, primary_key=True, parent_link=True)
released = models.BooleanField(default=False)
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False)
name = models.CharField(max_length=30, blank=True)
@python_2_unicode_compatible
class EmptyModel(models.Model):
def __str__(self):
return "Primary key = %s" % self.id
temp_storage = FileSystemStorage(tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR']))
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class Gallery(models.Model):
name = models.CharField(max_length=100)
class Picture(models.Model):
name = models.CharField(max_length=100)
image = models.FileField(storage=temp_storage, upload_to='test_upload')
gallery = models.ForeignKey(Gallery, related_name="pictures")
class Language(models.Model):
iso = models.CharField(max_length=5, primary_key=True)
name = models.CharField(max_length=50)
english_name = models.CharField(max_length=50)
shortlist = models.BooleanField(default=False)
class Meta:
ordering = ('iso',)
# a base class for Recommender and Recommendation
class Title(models.Model):
pass
class TitleTranslation(models.Model):
title = models.ForeignKey(Title)
text = models.CharField(max_length=100)
class Recommender(Title):
pass
class Recommendation(Title):
recommender = models.ForeignKey(Recommender)
class Collector(models.Model):
name = models.CharField(max_length=100)
class Widget(models.Model):
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class DooHickey(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Grommet(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Whatsit(models.Model):
index = models.IntegerField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Doodad(models.Model):
name = models.CharField(max_length=100)
class FancyDoodad(Doodad):
owner = models.ForeignKey(Collector)
expensive = models.BooleanField(default=True)
@python_2_unicode_compatible
class Category(models.Model):
collector = models.ForeignKey(Collector)
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
def __str__(self):
return '%s:o%s' % (self.id, self.order)
class Link(models.Model):
posted = models.DateField(
default=lambda: datetime.date.today() - datetime.timedelta(days=7)
)
url = models.URLField()
post = models.ForeignKey("Post")
class PrePopulatedPost(models.Model):
title = models.CharField(max_length=100)
published = models.BooleanField()
slug = models.SlugField()
class PrePopulatedSubPost(models.Model):
post = models.ForeignKey(PrePopulatedPost)
subtitle = models.CharField(max_length=100)
subslug = models.SlugField()
class Post(models.Model):
title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)")
content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)")
posted = models.DateField(
default=datetime.date.today,
help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)"
)
public = models.NullBooleanField()
def awesomeness_level(self):
return "Very awesome."
@python_2_unicode_compatible
class Gadget(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Villain(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class SuperVillain(Villain):
pass
@python_2_unicode_compatible
class FunkyTag(models.Model):
"Because we all know there's only one real use case for GFKs."
name = models.CharField(max_length=25)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Plot(models.Model):
name = models.CharField(max_length=100)
team_leader = models.ForeignKey(Villain, related_name='lead_plots')
contact = models.ForeignKey(Villain, related_name='contact_plots')
tags = generic.GenericRelation(FunkyTag)
def __str__(self):
return self.name
@python_2_unicode_compatible
class PlotDetails(models.Model):
details = models.CharField(max_length=100)
plot = models.OneToOneField(Plot)
def __str__(self):
return self.details
@python_2_unicode_compatible
class SecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
villain = models.ForeignKey(Villain)
def __str__(self):
return self.location
@python_2_unicode_compatible
class SuperSecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
supervillain = models.ForeignKey(SuperVillain)
def __str__(self):
return self.location
@python_2_unicode_compatible
class CyclicOne(models.Model):
name = models.CharField(max_length=25)
two = models.ForeignKey('CyclicTwo')
def __str__(self):
return self.name
@python_2_unicode_compatible
class CyclicTwo(models.Model):
name = models.CharField(max_length=25)
one = models.ForeignKey(CyclicOne)
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=20)
class Pizza(models.Model):
name = models.CharField(max_length=20)
toppings = models.ManyToManyField('Topping')
class Album(models.Model):
owner = models.ForeignKey(User)
title = models.CharField(max_length=30)
class Employee(Person):
code = models.CharField(max_length=20)
class WorkHour(models.Model):
datum = models.DateField()
employee = models.ForeignKey(Employee)
class Question(models.Model):
question = models.CharField(max_length=20)
@python_2_unicode_compatible
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.PROTECT)
answer = models.CharField(max_length=20)
def __str__(self):
return self.answer
class Reservation(models.Model):
start_date = models.DateTimeField()
price = models.IntegerField()
DRIVER_CHOICES = (
('bill', 'Bill G'),
('steve', 'Steve J'),
)
RESTAURANT_CHOICES = (
('indian', 'A Taste of India'),
('thai', 'Thai Pography'),
('pizza', 'Pizza Mama'),
)
class FoodDelivery(models.Model):
reference = models.CharField(max_length=100)
driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True)
restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True)
class Meta:
unique_together = (("driver", "restaurant"),)
@python_2_unicode_compatible
class CoverLetter(models.Model):
author = models.CharField(max_length=30)
date_written = models.DateField(null=True, blank=True)
def __str__(self):
return self.author
class Paper(models.Model):
title = models.CharField(max_length=30)
author = models.CharField(max_length=30, blank=True, null=True)
class ShortMessage(models.Model):
content = models.CharField(max_length=140)
timestamp = models.DateTimeField(null=True, blank=True)
@python_2_unicode_compatible
class Telegram(models.Model):
title = models.CharField(max_length=30)
date_sent = models.DateField(null=True, blank=True)
def __str__(self):
return self.title
class Story(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class OtherStory(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class ComplexSortedPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
is_employee = models.NullBooleanField()
class PrePopulatedPostLargeSlug(models.Model):
"""
Regression test for #15938: a large max_length for the slugfield must not
be localized in prepopulated_fields_js.html or it might end up breaking
the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000)
"""
title = models.CharField(max_length=100)
published = models.BooleanField()
slug = models.SlugField(max_length=1000)
class AdminOrderedField(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedModelMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
def some_order(self):
return self.order
some_order.admin_order_field = 'order'
class AdminOrderedAdminMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedCallable(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
@python_2_unicode_compatible
class Report(models.Model):
title = models.CharField(max_length=100)
def __str__(self):
return self.title
class MainPrepopulated(models.Model):
name = models.CharField(max_length=100)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField()
slug2 = models.SlugField()
class RelatedPrepopulated(models.Model):
parent = models.ForeignKey(MainPrepopulated)
name = models.CharField(max_length=75)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(max_length=50)
slug2 = models.SlugField(max_length=60)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #16819.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
class UndeletableObject(models.Model):
"""
Model whose show_delete in admin change_view has been disabled
Refs #10057.
"""
name = models.CharField(max_length=255)
class UserMessenger(models.Model):
"""
Dummy class for testing message_user functions on ModelAdmin
"""
class Simple(models.Model):
"""
Simple model with nothing on it for use in testing
"""
class Choice(models.Model):
choice = models.IntegerField(blank=True, null=True,
choices=((1, 'Yes'), (0, 'No'), (None, 'No opinion')))
|
{
"content_hash": "bfe3f90df73df944f16b805526bb12f5",
"timestamp": "",
"source": "github",
"line_count": 679,
"max_line_length": 110,
"avg_line_length": 25.98821796759941,
"alnum_prop": 0.6781706902414145,
"repo_name": "hellhovnd/django",
"id": "1916949f63fc414740f63c6405b2bbee231d8ac8",
"size": "17704",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/admin_views/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42926"
},
{
"name": "HTML",
"bytes": "168830"
},
{
"name": "JavaScript",
"bytes": "83748"
},
{
"name": "Python",
"bytes": "8206211"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.contrib.auth.models import User
from robocrm.models import RoboUser
class RoboUserLabelTests(TestCase):
def test_get_label_superuser_admin(self):
"""
Should be able to get label through admin interface.
"""
password = 'test'
user = User.objects.create_superuser(username="bstrysko", email="test@gmail.com", password=password)
robouser = RoboUser.objects.create(user=user)
self.assertTrue(self.client.login(username=user.username, password=password))
response = self.client.get("/admin/auth/user/{}/tools/create_robouser_label/".format(robouser.user.id))
self.assertEqual(response.status_code, 200)
|
{
"content_hash": "b9839050605fbf9d441f9dae3c88b7f9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 107,
"avg_line_length": 36.421052631578945,
"alnum_prop": 0.7369942196531792,
"repo_name": "sreidy/roboticsclub.org",
"id": "e9f30f45b503a011e0653cc1cae30b8ccaf1bb08",
"size": "692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "robocrm/tests/test_user_label.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87807"
},
{
"name": "HTML",
"bytes": "32573"
},
{
"name": "JavaScript",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "239652"
}
],
"symlink_target": ""
}
|
import collections
# Copyright 2011 OpenStack LLC.
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import pyrax.exceptions as exc
import pyrax.utils as utils
# Python 2.4 compat
try:
all
except NameError:
def all(iterable):
return True not in (not x for x in iterable)
class BaseManager(object):
"""
Managers interact with a particular type of API (servers, databases, dns,
etc.) and provide CRUD operations for them.
"""
resource_class = None
response_key = None
plural_response_key = None
uri_base = None
_hooks_map = {}
def __init__(self, api, resource_class=None, response_key=None,
plural_response_key=None, uri_base=None):
self.api = api
self.resource_class = resource_class
self.response_key = response_key
self.plural_response_key = plural_response_key
if plural_response_key is None and response_key is not None:
# Default to adding 's'
self.plural_response_key = "%ss" % response_key
self.uri_base = uri_base
def list(self, limit=None, marker=None, return_raw=False, other_keys=None):
"""
Returns a list of resource objects. Pagination is supported through the
optional 'marker' and 'limit' parameters.
Some APIs do not follow the typical pattern in their responses, and the
BaseManager subclasses will have to parse the raw response to get the
desired information. For those cases, pass 'return_raw=True', and the
response and response_body will be returned unprocessed.
Another use case is when additional information is returned in the
response body. To have that returned, use the 'other_keys' parameter.
This can be either a single string or a list of strings that correspond
to keys in the response body. If specified, a 2-tuple is returned, with
the first element being the list of resources, and the second a dict
whose keys are the 'other_keys' items, and whose values are the
corresponding values in the response body, or None if no such key is
present.
"""
uri = "/%s" % self.uri_base
pagination_items = []
if limit is not None:
pagination_items.append("limit=%s" % limit)
if marker is not None:
pagination_items.append("marker=%s" % marker)
pagination = "&".join(pagination_items)
if pagination:
uri = "%s?%s" % (uri, pagination)
return self._list(uri, return_raw=return_raw, other_keys=other_keys)
def head(self, item):
"""Makes a HEAD request on a specific item."""
uri = "/%s/%s" % (self.uri_base, utils.get_id(item))
return self._head(uri)
def get(self, item):
"""Gets a specific item."""
uri = "/%s/%s" % (self.uri_base, utils.get_id(item))
return self._get(uri)
def create(self, name, *args, **kwargs):
"""
Subclasses need to implement the _create_body() method to return a dict
that will be used for the API request body.
For cases where no response is returned from the API on creation, pass
`return_none=True` so that the _create method doesn't expect one.
For cases where you do not want the _create method to attempt to parse
the response, but instead have it returned directly, pass
`return_raw=True`.
For cases where the API returns information in the response and not the
response_body, pass `return_response=True`.
"""
return_none = kwargs.pop("return_none", False)
return_raw = kwargs.pop("return_raw", False)
return_response = kwargs.pop("return_response", False)
body = self._create_body(name, *args, **kwargs)
return self._create("/%s" % self.uri_base, body,
return_none=return_none, return_raw=return_raw,
return_response=return_response)
def _create_body(self, name, *args, **kwargs):
"""
Creates the dictionary that is passed in the POST call to create a new
resource. Must be defined in each subclass.
"""
raise NotImplementedError("Managers must define their _create_body() "
"method.")
def delete(self, item):
"""Deletes the specified item."""
uri = "/%s/%s" % (self.uri_base, utils.get_id(item))
return self._delete(uri)
def _list(self, uri, obj_class=None, body=None, return_raw=False,
other_keys=None):
"""
Handles the communication with the API when getting
a full listing of the resources managed by this class.
"""
if body:
resp, resp_body = self.api.method_post(uri, body=body)
else:
resp, resp_body = self.api.method_get(uri)
if return_raw:
return (resp, resp_body)
if obj_class is None:
obj_class = self.resource_class
data = self._data_from_response(resp_body)
ret = [obj_class(self, res, loaded=False) for res in data if res]
if other_keys:
keys = utils.coerce_to_list(other_keys)
other = [self._data_from_response(resp_body, key) for key in keys]
return (ret, other)
else:
return ret
def _data_from_response(self, resp_body, key=None):
"""
This works for most API responses, but some don't structure their
listing responses the same way, so overriding this method allows
subclasses to handle extraction for those outliers.
"""
# occasionally, its just a list (rackconnect for example)
if isinstance(resp_body, list):
return resp_body
if key:
data = resp_body.get(key)
else:
data = resp_body.get(self.plural_response_key, resp_body)
# NOTE(ja): some services, such as keystone returns values as list as
# {"values": [ ... ]} unlike other services which just return the
# list.
if isinstance(data, dict):
try:
data = data["values"]
except KeyError:
pass
return data
def _head(self, uri):
"""
Handles the communication with the API when performing a HEAD request
on a specific resource managed by this class. Returns the headers
contained in the response.
"""
resp, resp_body = self.api.method_head(uri)
return resp
def _get(self, uri):
"""
Handles the communication with the API when getting
a specific resource managed by this class.
"""
resp, resp_body = self.api.method_get(uri)
return self.resource_class(self, resp_body, self.response_key,
loaded=True)
def _create(self, uri, body, return_none=False, return_raw=False,
return_response=None, **kwargs):
"""
Handles the communication with the API when creating a new
resource managed by this class.
"""
self.run_hooks("modify_body_for_create", body, **kwargs)
resp, resp_body = self.api.method_post(uri, body=body)
if return_none:
# No response body
return
elif return_response:
return resp
elif return_raw:
if self.response_key:
return resp_body[self.response_key]
else:
return resp_body
return self.resource_class(self, resp_body, self.response_key)
def _delete(self, uri):
"""
Handles the communication with the API when deleting
a specific resource managed by this class.
"""
_resp, _body = self.api.method_delete(uri)
def _update(self, uri, body, **kwargs):
"""
Handles the communication with the API when updating
a specific resource managed by this class.
"""
self.run_hooks("modify_body_for_update", body, **kwargs)
resp, resp_body = self.api.method_put(uri, body=body)
return resp_body
def action(self, item, action_type, body={}):
"""
Several API calls are lumped under the 'action' API. This
is the generic handler for such calls.
"""
uri = "/%s/%s/action" % (self.uri_base, utils.get_id(item))
action_body = {action_type: body}
return self.api.method_post(uri, body=action_body)
def find(self, **kwargs):
"""
Finds a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if not num_matches:
msg = "No %s matching: %s." % (self.resource_class.__name__, kwargs)
raise exc.NotFound(404, msg)
if num_matches > 1:
msg = "More than one %s matching: %s." % (
self.resource_class.__name__, kwargs)
raise exc.NoUniqueMatch(400, msg)
else:
return matches[0]
def findall(self, **kwargs):
"""
Finds all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
@classmethod
def add_hook(cls, hook_type, hook_func):
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
|
{
"content_hash": "7c3b0778407473b9ec466841664ed0af",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 80,
"avg_line_length": 34.57827476038339,
"alnum_prop": 0.598724937632819,
"repo_name": "rackerlabs/heat-pyrax",
"id": "3610b8c09c092198673c573400f34417de51eb86",
"size": "10858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrax/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "Python",
"bytes": "1342641"
}
],
"symlink_target": ""
}
|
import math
from pandac.PandaModules import *
from direct.showbase import DirectObject
import direct.directbase.DirectStart
class InitODE(DirectObject.DirectObject):
"""This creates the various ODE core objects,
and exposes them to other plugins. Should be called ode."""
def __init__(self,manager,xml):
# Setup the physics world...
erp = float(xml.find('param').get('erp',0.8))
cfm = float(xml.find('param').get('cfm',1e-3))
slip = float(xml.find('param').get('slip',0.0))
dampen = float(xml.find('param').get('dampen',0.1))
self.world = OdeWorld()
self.world.setGravity(float(xml.find('gravity').get('x',0.0)),
float(xml.find('gravity').get('y',0.0)),
float(xml.find('gravity').get('z',-9.81)))
self.world.setErp(erp)
self.world.setCfm(cfm)
self.world.setAutoDisableFlag(True)
# Create a surface table - contains interactions between different surface types - loaded from config file...
surElem = [x for x in xml.findall('surface')]
self.world.initSurfaceTable(len(surElem))
self.surFromName = dict()
for a in xrange(len(surElem)):
self.surFromName[surElem[a].get('name')] = a
# Maths used below is obviously wrong - should probably work out something better.
# Interaction with same surface...
mu = float(surElem[a].get('mu'))
bounce = float(surElem[a].get('bounce'))
absorb = float(surElem[a].get('absorb'))
self.world.setSurfaceEntry(a,a,mu,bounce,absorb,erp,cfm,slip,dampen)
# Interaction with other surfaces...
for b in xrange(a+1,len(surElem)):
mu = float(surElem[a].get('mu')) * float(surElem[b].get('mu'))
bounce = float(surElem[a].get('bounce')) * float(surElem[b].get('bounce'))
absorb = float(surElem[a].get('absorb')) + float(surElem[b].get('absorb'))
self.world.setSurfaceEntry(a,b,mu,bounce,absorb,erp,cfm,slip,dampen)
# Create a space to manage collisions...
self.space = OdeHashSpace()
self.space.setAutoCollideWorld(self.world)
# Setup a contact group to handle collision events...
self.contactGroup = OdeJointGroup()
self.space.setAutoCollideJointGroup(self.contactGroup)
# Create the synch database - this is a database of NodePath and
# ODEBodys - each frame the NodePaths have their positions synched with the ODEBodys...
self.synch = dict() # dict of tuples (node,body), indexed by an integer that
# is written to the NodePath as a integer using setPythonTag into 'ode_key'
self.nextKey = 0
self.nextDampKey = 0
# Create the extra function databases - pre- and post- functions for before and after each collision step...
self.preCollide = dict() # id(func) -> func
self.postCollide = dict()
# Create the damping database - damps objects so that they slow down over time, which is very good for stability...
self.damping = dict() # id(body) -> (body,amount)
# Variables for the physics simulation to run on automatic - start and stop are used to enable/disable it however...
self.timeRem = 0.0
self.step = 1.0/50.0
# Arrange variables for collision callback, enable the callbacks...
self.collCB = dict() # OdeGeom to func(entry,flag), where flag is False if its in 1, true if its in 2.
self.space.setCollisionEvent("collision")
def reload(self,manager,xml):
pass # No-op: This makes this module incorrect, but only because you can't
# change the configuration during runtime without unloading it first.
# Physics setup tends to remain constant however.
def simulationTask(self,task):
# Step the simulation and set the new positions - fixed time step...
self.timeRem += globalClock.getDt()
while self.timeRem>self.step:
# Call the pre-collision functions...
for ident,func in self.preCollide.iteritems():
func()
# Apply damping to all objects in damping db...
for key,data in self.damping.iteritems():
if data[0].isEnabled():
vel = data[0].getLinearVel()
if vel.length()>1e3: # Cap dangerous motion.
data[0].setLinearVel(vel*(1e3/vel.length()))
else:
vel *= -data[1]
data[0].addForce(vel)
rot = data[0].getAngularVel()
if rot.length()>1e3: # Cap dangerous rotation.
data[0].setAngularVel(rot*(1e3/rot.length()))
else:
rot *= -data[2]
data[0].addTorque(rot)
# A single step of collision detection...
self.space.autoCollide() # Setup the contact joints
self.world.quickStep(self.step)
self.timeRem -= self.step
self.contactGroup.empty() # Clear the contact joints
# Call the post-collision functions...
for ident,func in self.postCollide.iteritems():
func()
# Update all objects registered with this class to have their positions updated...
for key, data in self.synch.items():
node, body = data
node.setPosQuat(render,body.getPosition(),Quat(body.getQuaternion()))
return task.cont
def onCollision(self,entry):
geom1 = entry.getGeom1()
geom2 = entry.getGeom2()
for geom,func in self.collCB.iteritems():
if geom==geom1:
func(entry,False)
if geom==geom2:
func(entry,True)
def start(self):
self.task = taskMgr.add(self.simulationTask,'Physics Sim',sort=100)
self.accept("collision",self.onCollision)
def stop(self):
taskMgr.remove(self.task)
del self.task
self.timeRem = 0.0
self.ignoreAll()
def getWorld(self):
"""Retuns the ODE world"""
return self.world
def getSpace(self):
"""Returns the ODE space used for automatic collisions."""
return self.space
def getSurface(self,name):
"""This returns the surface number given the surface name.
If it doesn't exist it prints a warning and returns 0 instead of failing."""
if self.surFromName.has_key(name):
return self.surFromName[name]
else:
print 'Warning: Surface %s does not exist'%name
return 0
def getDt(self):
return self.step
def getRemTime(self):
return self.timeRem
def regBodySynch(self,node,body):
"""Given a NodePath and a Body this arranges that the NodePath tracks the Body."""
body.setData(node)
self.synch[node.getKey()] = (node,body)
def unregBodySynch(self,node):
"""Removes a NodePath/Body pair from the synchronisation database,
so the NodePath will stop automatically tracking the Body."""
if self.synch.has_key(node.getKey()):
self.synch[node.getKey()][1].setData(None)
del self.synch[node.getKey()]
def regPreFunc(self,name,func):
"""Registers a function under a unique name to be called before every step
of the physics simulation - this is different from every frame, being entirly regular."""
self.preCollide[name] = func
def unregPreFunc(self,name):
"""Unregisters a function to be called every step, by name."""
if self.preCollide.has_key(name):
del self.preCollide[name]
def regPostFunc(self,name,func):
"""Registers a function under a unique name to be called after every step
of the physics simulation - this is different from every frame, being entirly regular."""
self.postCollide[name] = func
def unregPostFunc(self,name):
"""Unregisters a function to be called every step, by name."""
if self.postCollide.has_key(name):
del self.postCollide[name]
def regCollisionCB(self,geom,func):
"""Registers a callback that will be called whenever the given geom collides.
The function must take an OdeCollisionEntry followed by a flag,
which will be False if geom1 is the given geom, True if its geom2."""
self.collCB[geom] = func
def unregCollisionCB(self,geom):
"""Unregisters the collision callback for a given geom."""
if self.collCB.has_key(geom):
del self.collCB[geom]
def regDamping(self,body,linear,angular):
"""Given a body this applies a damping force, such that the velocity and
rotation will be reduced in time. If the body is already registered
this will update the current setting."""
self.damping[body.getData().getKey()] = (body,linear,angular)
def unregDampingl(self,body):
"""Unregisters a body from damping."""
key = body.getId()
if self.damping.has_key(key):
del self.air_resist[key]
|
{
"content_hash": "38a577655fc910f2c1cf40ebbfd915bf",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 120,
"avg_line_length": 36.869565217391305,
"alnum_prop": 0.6641509433962264,
"repo_name": "frainfreeze/FPS-kit",
"id": "0be0dc0e2e09e97c78e9c2053c41e839f4e96ae7",
"size": "9078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/initode/initode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "148345"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.decorators import login_required, user_passes_test
from ..models import Pessoa, Procedimento, Solicita, Realiza
from django.http import *
from django.shortcuts import render_to_response
from django.template import RequestContext
from time import strftime
from pymongo import MongoClient
import hashlib
@login_required
def insere_procedimento(request):
paciente = Pessoa.objects.get(cpf=request.user.username)
loop = range(40)
valid = False
solicita = None
solicita_id = "-1"
if request.POST:
solicita_id = request.POST['solicitacao_id']
if solicita_id != "-1":
try:
solicita = Solicita.objects.get(id=solicita_id, atendimento__paciente=request.user)
procedimento = solicita.procedimento
valid = True
except Solicita.DoesNotExist:
pass
else:
proc_nome = request.POST['proc_nome']
try:
procedimento = Procedimento.objects.get(nome=proc_nome)
valid = True
except Procedimento.DoesNotExist:
pass
if 'campo_count' in request.POST:
cpf_paciente = request.user.username
data_realizacao = request.POST['data_realizacao'] if 'data_realizacao' in request.POST else strftime("%Y-%m-%d")
realiza = Realiza()
realiza.procedimento = procedimento
realiza.solicitacao = solicita
realiza.paciente = paciente
realiza.data = data_realizacao
realiza.save()
fields = {
'sql_id': realiza.id,
'data': str(realiza.data),
'horario': str(realiza.horario),
'campos': []
}
campo_count = int(request.POST['campo_count'])
for i in range(1, campo_count+1):
campo = request.POST["campo%d" % i].encode('utf-8')
conteudo = request.POST["conteudo%d" % i].encode('utf-8')
unidade = request.POST["unidade%d" % i].encode('utf-8')
fields['campos'].append({
'nome': campo,
'conteudo': conteudo,
'unidade': unidade
})
fields["%s_conteudo" % campo] = conteudo
fields["%s_unidade" % campo] = unidade
client = MongoClient()
db = client.test
db.realiza.save(fields)
client.close()
return HttpResponseRedirect(".")
if valid:
context = {'solicita_id': solicita_id, 'solicitacao': solicita, 'procedimento': procedimento, 'loop': loop}
else:
procedimentos = Procedimento.objects.all()
solicita = Solicita.objects.filter(atendimento__paciente=request.user).order_by('-atendimento__data', '-atendimento__horario')
context = {'solicitacoes': solicita, 'procedimentos': procedimentos}
return render_to_response('novo_procedimento.html',
context,
context_instance=RequestContext(request))
@login_required
def visualiza_procedimento(request, id):
try:
realiza = Realiza.objects.get(id=id)
except Realiza.DoesNotExist:
return HttpResponseRedirect("/404/")
if realiza.paciente.cpf == request.user.username or request.user.is_staff:
client = MongoClient()
db = client.test
conteudo = db.realiza.find_one({'sql_id': int(id)})
client.close()
context = {'realizacao': realiza, 'conteudo': conteudo, }
return render_to_response('visualiza_procedimento.html',
context,
context_instance=RequestContext(request))
else:
return HttpResponseRedirect("/404/")
@login_required
@user_passes_test(lambda u: u.is_staff)
def pesquisa(request):
realizados = None
loop = range(1, 41)
if request.POST:
realizados = []
if request.POST['and'] == 'and':
campo_count = int(request.POST['campo_count'])
fields = {'$and': []}
for i in range(1, campo_count+1):
campo = request.POST["campo%d" % i].encode('utf-8')
conteudo = request.POST["conteudo%d" % i].encode('utf-8')
unidade = request.POST["unidade%d" % i].encode('utf-8')
fields['$and'].append({"%s_conteudo" % campo: conteudo})
fields['$and'].append({"%s_unidade" % campo: unidade})
client = MongoClient()
db = client.test
resultados = db.realiza.find(fields)
client.close()
for resultado in resultados:
realizados.append(Realiza.objects.get(id=resultado['sql_id']))
else:
campo_count = int(request.POST['campo_count'])
fields = {'$or': []}
for i in range(1, campo_count+1):
campo = request.POST["campo%d" % i].encode('utf-8')
conteudo = request.POST["conteudo%d" % i].encode('utf-8')
unidade = request.POST["unidade%d" % i].encode('utf-8')
fields['$or'].append({"$and": [
{"%s_conteudo" % campo: conteudo},
{"%s_unidade" % campo: unidade}
]})
client = MongoClient()
db = client.test
resultados = db.realiza.find(fields)
client.close()
for resultado in resultados:
realizados.append(Realiza.objects.get(id=resultado['sql_id']))
context = {'loop': loop, 'realizados': realizados}
return render_to_response('pesquisa.html',
context,
context_instance=RequestContext(request))
|
{
"content_hash": "aef7b1098d5051ff3755776e14314ecc",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 134,
"avg_line_length": 42.86029411764706,
"alnum_prop": 0.5532681420483788,
"repo_name": "andredalton/imecare",
"id": "10bebf3d29c25da5ac02e537173488d838c0d639",
"size": "5845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imecare/views/mongo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1105"
},
{
"name": "HTML",
"bytes": "69801"
},
{
"name": "JavaScript",
"bytes": "1178"
},
{
"name": "PLpgSQL",
"bytes": "79779"
},
{
"name": "Python",
"bytes": "37829"
}
],
"symlink_target": ""
}
|
"""Default Formatter for Google."""
from .base import Base
class GoogleFormatter(Base):
"""Documentation Formatter Class."""
name = 'google'
def decorators(self, attributes):
"""Create snippet string for a list of decorators."""
return ''
def extends(self, attributes):
"""Create snippet string for a list of extended objects."""
return ''
def arguments(self, attributes):
"""Create snippet string for a list of arguments."""
section = '\nArgs:\n'
template = '\t{name}: {description}\n'
for attr in attributes['arguments']:
section += template.format(
name=self._generate_field('name', attr['name']),
description=self._generate_field('description'),
)
section += self.keyword_arguments(attributes['keyword_arguments'])
if len(attributes['arguments']) == 0 and len(attributes['keyword_arguments']) == 0:
section = ''
return section
def keyword_arguments(self, attributes):
"""Create snippet string for a list of keyword arguments."""
section = ''
template = '\t{name}: {description} (default: {{{default}}})\n'
if len(attributes) == 0:
return ''
for attr in attributes:
section += template.format(
name=self._generate_field('name', attr['name']),
description=self._generate_field('description'),
default=self._generate_field('default', attr['default']),
)
return section
def returns(self, attribute):
"""Create snippet string for a list of return values."""
section = '\nReturns:\n'
template = '\t{description}\n\t{type}\n'
section += template.format(
description=self._generate_field('description'),
type=self._generate_field('type', attribute['type']),
)
return section
def yields(self, attribute):
"""Create snippet string for a list of yielded results."""
section = '\nYields:\n'
template = '\t{description}\n\t{type}\n'
section += template.format(
description=self._generate_field('description'),
type=self._generate_field('type', attribute['type']),
)
return section
def raises(self, attributes):
"""Create snippet string for a list of raiased exceptions."""
section = '\nRaises:\n'
template = '\t{name}: {description}\n'
for attr in attributes:
section += template.format(
name=self._generate_field('name', attr),
description=self._generate_field('description'),
)
return section
def variables(self, attributes):
"""Create snippet string for a list of variables."""
section = '\nAttributes:\n'
template = '\t{name}: {description}\n'
for attr in attributes:
section += template.format(
name=self._generate_field('name', attr['name']),
description=self._generate_field('description'),
)
return section
|
{
"content_hash": "55397ad08fa693986057092b88ffc8eb",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 91,
"avg_line_length": 31.643564356435643,
"alnum_prop": 0.5682102628285357,
"repo_name": "adambullmer/sublime-docblockr-python",
"id": "64d24e2458ed4b23c3dbc5a10121892b5dd28c73",
"size": "3196",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "formatters/google.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4273"
},
{
"name": "Python",
"bytes": "44874"
}
],
"symlink_target": ""
}
|
from six.moves import urllib_parse as urlparse
import jsonpointer
from flex.exceptions import (
ValidationError,
)
from flex.datastructures import (
ValidationDict,
)
from flex.error_messages import MESSAGES
from flex.constants import (
OBJECT,
STRING,
)
from flex.decorators import (
skip_if_not_of_type,
)
from flex.validation.common import (
generate_object_validator,
)
reference_object_schema = {
'type': OBJECT,
'additionalProperties': False,
'required': [
'$ref',
],
'properties': {
'$ref': {
'type': STRING,
},
},
}
@skip_if_not_of_type(STRING)
def validate_reference_pointer(reference, context, **kwargs):
parts = urlparse.urlparse(reference)
if any((parts.scheme, parts.netloc, parts.path, parts.params, parts.query)):
raise ValidationError(
MESSAGES['reference']['unsupported'].format(reference),
)
try:
jsonpointer.resolve_pointer(context, parts.fragment)
except jsonpointer.JsonPointerException:
raise ValidationError(
MESSAGES['reference']['undefined'].format(reference),
)
non_field_validators = ValidationDict()
non_field_validators.add_property_validator('$ref', validate_reference_pointer)
reference_object_validator = generate_object_validator(
schema=reference_object_schema,
non_field_validators=non_field_validators,
)
|
{
"content_hash": "c1d2c275aceccc450e8a8ff06edab437",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 80,
"avg_line_length": 24.10169491525424,
"alnum_prop": 0.6765119549929677,
"repo_name": "pipermerriam/flex",
"id": "50ce601b62d7c7d8a8e336a8577885d63585dc67",
"size": "1422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flex/loading/common/reference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1187"
},
{
"name": "Python",
"bytes": "510857"
}
],
"symlink_target": ""
}
|
"""
Contains various network topologies and scenarios.
See help of individual scenarios for more info.
"""
|
{
"content_hash": "5440aa20a0140a1fc66e0daa2500e884",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 50,
"avg_line_length": 26.75,
"alnum_prop": 0.7757009345794392,
"repo_name": "zhaoyan1117/RoutingProtocols",
"id": "a352f953f0d7381efe6f85054a360bad22c65eb3",
"size": "107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scenarios/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3569"
},
{
"name": "Java",
"bytes": "635164"
},
{
"name": "Processing",
"bytes": "105209"
},
{
"name": "Python",
"bytes": "92117"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
}
|
__author__ = 'Jonathan Brodie'
from hzclient.codec import alongcodec
from hzclient.codec import proxycodec
class ALongProxy(object):
def __init__(self,title,conn):
self.title=title
self.connection=conn
firstpack=proxycodec.createProxy(self.title,"hz:impl:atomicLongService")
self.connection.adjustCorrelationId(firstpack)
self.connection.sendPackage(firstpack.encodeMessage())
response=self.connection.getPackageWithCorrelationId(firstpack.correlation,True)
if response is not None:
print "Initialized and connected proxy!"
else:
print "Unable to connect to server."
def getAndIncrement(self):
pack=alongcodec.getandincrementEncode(self.title)
self.connection.adjustCorrelationId(pack)
self.connection.sendPackage(pack.encodeMessage())
response=self.connection.getPackageWithCorrelationId(pack.correlation,True)
decoded=alongcodec.getandincrementDecode(response)
return decoded
def get(self):
pack=alongcodec.getEncode(self.title)
self.connection.adjustCorrelationId(pack)
self.connection.sendPackage(pack.encodeMessage())
response=self.connection.getPackageWithCorrelationId(pack.correlation,True)
decoded=alongcodec.getDecode(response)
return decoded
def addAndGet(self,delta):
pack=alongcodec.addandgetEncode(self.title,delta)
self.connection.sendPackage(pack.encodeMessage())
response=self.connection.waitAndGetPackage()
decoded=alongcodec.addandgetDecode(response)
return decoded
def compareAndSet(self,expected,updated):
pack=alongcodec.compareandsetEncode(self.title,expected,updated)
self.connection.sendPackage(pack.encodeMessage())
response=self.connection.waitAndGetPackage()
decoded=alongcodec.compareandsetDecode(response)
return decoded
def getAndAdd(self,delta):
pack=alongcodec.getandaddEncode(self.title, delta)
self.connection.sendPackage(pack.encodeMessage())
response=self.connection.waitAndGetPackage()
decoded=alongcodec.getandaddDecode(response)
return decoded
def getAndSet(self,new):
pack=alongcodec.getandsetEncode(self.title,new)
self.connection.sendPackage(pack.encodeMessage())
response=self.connection.waitAndGetPackage()
decoded=alongcodec.getandsetDecode(response)
return decoded
def incrementAndGet(self):
pack=alongcodec.incrementandgetEncode(self.title)
self.connection.sendPackage(pack.encodeMessage())
response=self.connection.waitAndGetPackage()
decoded=alongcodec.incrementandgetDecode(response)
return decoded
def set(self,new):
pack=alongcodec.setEncode(self.title,new)
self.connection.sendPackage(pack.encodeMessage())
response=self.connection.waitAndGetPackage()
decoded=alongcodec.setDecode(response)
return decoded
|
{
"content_hash": "ccdb4afb70a8f2d88b4135338ccdfaf6",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 88,
"avg_line_length": 38.87179487179487,
"alnum_prop": 0.7180079155672823,
"repo_name": "hazelcast-incubator/pyhzclient",
"id": "ed7bd32d8153bf1045a45c1d89b957951adbb529",
"size": "3032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hzclient/proxy/proxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "567231"
}
],
"symlink_target": ""
}
|
'''
INSTRUCTION:
Please configure parameters in __main__,
especially the configurable variable "trainFlag", which is the SWITCH between TRAINING_MODE and PREDICTION_MODE.
TRAINING_MODE will generate two files for User_Feature_Model and Movie_Feature_Model respectively, which could be used for prediction.
PREDICTION_MODE is based on the saved Model_Files
'''
import numpy as np
import time
import pickle
class PMFPredictor:
def __init__(self, inputTrain, num_features=5, train=True, thresholdLikelihood=0.1, thresholdLearningRate=1e-5):
self.num_features = num_features
# initialize
self.learning_rate = .0001
self.threshold_likelihood = thresholdLikelihood
self.threshold_learning_rate = thresholdLearningRate
if train:
self.ratings = np.array(self.loadData(inputTrain))
self.converged = False
self.num_users = int(np.max(self.ratings[:, 0]))
self.num_movies = int(np.max(self.ratings[:, 1]))
self.users = np.random.random((self.num_users, self.num_features))
self.movies = np.random.random((self.num_movies, self.num_features))
self.sigma = np.std(self.ratings, axis=0)[2]
self.lambda_U = 0.1
self.lambda_M = 0.1
self.new_users = np.random.random((self.num_users, self.num_features))
self.new_movies = np.random.random((self.num_movies, self.num_features))
def loadData(self, inputTrain):
count = 0
mylist = []
with file(inputTrain, 'r') as f:
for line in f.readlines():
user, movie, rating = line.split()
dataformat = [int(user), int(movie), float(rating)]
mylist.append(dataformat)
count += 1
if count % 100000 == 0:
print "loading data %d" % count
return mylist
def objectiveFunc(self, users=None, movies=None):
if None == users:
users = self.users
else:
sigma_U = np.std(users)
self.lambda_U = np.square(self.sigma/sigma_U)
if None == movies:
movies = self.movies
else:
sigma_M = np.std(movies)
self.lambda_M = np.square(self.sigma/sigma_M)
# compute squared error
error = 0
for rating_tuple in self.ratings:
i, j, rating = rating_tuple
r_hat = np.sum(users[i-1] * movies[j-1])
error += (rating - r_hat)**2
# compute regularization
users_i_norm = 0
movies_j_norm = 0
for i in range(self.num_users):
for d in range(self.num_features):
users_i_norm += users[i, d]**2
for i in range(self.num_movies):
for d in range(self.num_features):
movies_j_norm += movies[i, d]**2
return error + self.lambda_U * users_i_norm + self.lambda_M * movies_j_norm
def train(self):
updated_users = np.zeros((self.num_users, self.num_features))
updated_movies = np.zeros((self.num_movies, self.num_features))
while (not self.converged):
cur_likelihood = self.objectiveFunc()
print "[", time.ctime(), "]", "current likelihood =", cur_likelihood, " learning rate =", self.learning_rate, " lambda U&M =", self.lambda_U, " ", self.lambda_M
self.updates_test(updated_users, updated_movies)
new_likelihood = self.objectiveFunc(self.new_users, self.new_movies)
if new_likelihood < cur_likelihood:
# on the right direction, speed up
self.updates()
self.learning_rate *= 1.25
if cur_likelihood - new_likelihood < self.threshold_likelihood:
# convergence threshold for likelihood
self.converged = True
else:
# just passed the objective, slow down
self.learning_rate *= 0.5
pass
if self.learning_rate <= self.threshold_learning_rate:
# convergence threshold for learning rate
self.converged = True
def updates(self):
for i in range(self.num_users):
for d in range(self.num_features):
self.users[i, d] = self.new_users[i, d]
for i in range(self.num_movies):
for d in range(self.num_features):
self.movies[i, d] = self.new_movies[i, d]
def updates_test(self, updated_users, updated_movies):
for rating_tuple in self.ratings:
(i, j, rating) = rating_tuple
# estimated rating for user i on movie j
r_hat = np.sum(self.users[i-1] * self.movies[j-1])
# update each feature
for d in range(self.num_features):
updated_movies[j-1, d] += self.users[i-1, d] * (r_hat - rating)
updated_users[i-1, d] += self.movies[j-1, d] * (r_hat - rating)
for i in range(self.num_users):
for d in range(self.num_features):
self.new_users[i, d] = self.users[i, d] - self.learning_rate * (updated_users[i, d] + self.lambda_U * self.users[i, d])
for i in range(self.num_movies):
for d in range(self.num_features):
self.new_movies[i, d] = self.movies[i, d] - self.learning_rate * (updated_movies[i, d] + self.lambda_M * self.movies[i, d])
def save_model(self, trainedUsers, trainedMovies):
self.users.dump(trainedUsers)
self.movies.dump(trainedMovies)
def predict(self, trainedUsers, trainedMovies, inputTest, output):
with file(trainedUsers, 'r') as f:
users = pickle.load(f)
with file(trainedMovies, 'r') as f:
movies = pickle.load(f)
predicted_matrix = np.matrix(users) * np.matrix(movies).transpose()
predicted_matrix = np.array(predicted_matrix)
predicted_ratings = []
with file(inputTest, 'r') as f:
for line in f.readlines():
(user, movie) = line.strip().split()
predicted_ratings.append(predicted_matrix[int(user)-1][int(movie)-1])
with file(output, 'w') as f:
for rating in predicted_ratings:
f.write("%.3f\n" % rating)
if __name__ == "__main__":
# configurations
# latent features
numFeatures = 10
thresholdLikelihood = 0.1
thresholdLearningRate = 1e-5
# path of input dataset
inputTrain = "../dataset/train.txt"
inputTest = "../dataset/test.txt"
# path of output scores
outputPrediction = "./scores.txt"
# path of saved model
trainedUsers = "./trained_users_%sfeatures.pickle" % numFeatures
trainedMovies = "./trained_movies_%sfeatures.pickle" % numFeatures
# TODO: choose a operation mode:
# True - training model, False - load saved model and predict
# trainFlag = True
trainFlag = False
# load data
myPredictor = PMFPredictor(inputTrain, num_features=numFeatures, \
train=trainFlag, thresholdLikelihood=thresholdLikelihood, thresholdLearningRate=thresholdLearningRate)
if trainFlag:
# train model
myPredictor.train()
# save model
myPredictor.save_model(trainedUsers, trainedMovies)
else:
# predict scores based on trained model
myPredictor.predict(trainedUsers, trainedMovies, inputTest, outputPrediction)
|
{
"content_hash": "76251e414ef264966115680f84d70a44",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 172,
"avg_line_length": 41.68681318681319,
"alnum_prop": 0.5795439567681561,
"repo_name": "gypleon/codesCloud",
"id": "b5b747da798c87c82183c23d864d34c2c33a7a89",
"size": "7629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assignments/3/B/PMFPredictor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4603"
},
{
"name": "C++",
"bytes": "356781"
},
{
"name": "CMake",
"bytes": "12216"
},
{
"name": "HTML",
"bytes": "622"
},
{
"name": "Makefile",
"bytes": "237"
},
{
"name": "Python",
"bytes": "156015"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
}
|
debug = False
|
{
"content_hash": "55325e07fdb61634673f299675b9ecfc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 13,
"avg_line_length": 14,
"alnum_prop": 0.7142857142857143,
"repo_name": "Akuryou/tornado-project-template",
"id": "a2601efb61a73043f2dfa595b607e6ab49141f5e",
"size": "14",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "settings/production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "734"
},
{
"name": "Python",
"bytes": "1293"
}
],
"symlink_target": ""
}
|
from abc import ABC, abstractproperty
import torch
from .. import settings
from ..distributions import Delta, MultivariateNormal
from ..module import Module
from ..utils.broadcasting import _mul_broadcast_shape
from ..utils.memoize import cached, clear_cache_hook
class _VariationalStrategy(Module, ABC):
"""
Abstract base class for all Variational Strategies.
"""
def __init__(self, model, inducing_points, variational_distribution, learn_inducing_locations=True):
super().__init__()
# Model
object.__setattr__(self, "model", model)
# Inducing points
inducing_points = inducing_points.clone()
if inducing_points.dim() == 1:
inducing_points = inducing_points.unsqueeze(-1)
if learn_inducing_locations:
self.register_parameter(name="inducing_points", parameter=torch.nn.Parameter(inducing_points))
else:
self.register_buffer("inducing_points", inducing_points)
# Variational distribution
self._variational_distribution = variational_distribution
self.register_buffer("variational_params_initialized", torch.tensor(0))
def _expand_inputs(self, x, inducing_points):
"""
Pre-processing step in __call__ to make x the same batch_shape as the inducing points
"""
batch_shape = _mul_broadcast_shape(inducing_points.shape[:-2], x.shape[:-2])
inducing_points = inducing_points.expand(*batch_shape, *inducing_points.shape[-2:])
x = x.expand(*batch_shape, *x.shape[-2:])
return x, inducing_points
@abstractproperty
@cached(name="prior_distribution_memo")
def prior_distribution(self):
r"""
The :func:`~gpytorch.variational.VariationalStrategy.prior_distribution` method determines how to compute the
GP prior distribution of the inducing points, e.g. :math:`p(u) \sim N(\mu(X_u), K(X_u, X_u))`. Most commonly,
this is done simply by calling the user defined GP prior on the inducing point data directly.
:rtype: :obj:`~gpytorch.distributions.MultivariateNormal`
:return: The distribution :math:`p( \mathbf u)`
"""
raise NotImplementedError
@property
@cached(name="variational_distribution_memo")
def variational_distribution(self):
return self._variational_distribution()
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
r"""
The :func:`~gpytorch.variational.VariationalStrategy.forward` method determines how to marginalize out the
inducing point function values. Specifically, forward defines how to transform a variational distribution
over the inducing point values, :math:`q(u)`, in to a variational distribution over the function values at
specified locations x, :math:`q(f|x)`, by integrating :math:`\int p(f|x, u)q(u)du`
:param torch.Tensor x: Locations :math:`\mathbf X` to get the
variational posterior of the function values at.
:param torch.Tensor inducing_points: Locations :math:`\mathbf Z` of the inducing points
:param torch.Tensor inducing_values: Samples of the inducing function values :math:`\mathbf u`
(or the mean of the distribution :math:`q(\mathbf u)` if q is a Gaussian.
:param ~gpytorch.lazy.LazyTensor variational_inducing_covar: If the distribuiton :math:`q(\mathbf u)`
is Gaussian, then this variable is the covariance matrix of that Gaussian. Otherwise, it will be
:attr:`None`.
:rtype: :obj:`~gpytorch.distributions.MultivariateNormal`
:return: The distribution :math:`q( \mathbf f(\mathbf X))`
"""
raise NotImplementedError
def kl_divergence(self):
r"""
Compute the KL divergence between the variational inducing distribution :math:`q(\mathbf u)`
and the prior inducing distribution :math:`p(\mathbf u)`.
:rtype: torch.Tensor
"""
with settings.max_preconditioner_size(0):
kl_divergence = torch.distributions.kl.kl_divergence(self.variational_distribution, self.prior_distribution)
return kl_divergence
def train(self, mode=True):
# Make sure we are clearing the cache if we change modes
if (self.training and not mode) or mode:
clear_cache_hook(self)
return super().train(mode=mode)
def __call__(self, x, prior=False):
# If we're in prior mode, then we're done!
if prior:
return self.model.forward(x)
# Delete previously cached items from the training distribution
if self.training:
clear_cache_hook(self)
# (Maybe) initialize variational distribution
if not self.variational_params_initialized.item():
prior_dist = self.prior_distribution
self._variational_distribution.initialize_variational_distribution(prior_dist)
self.variational_params_initialized.fill_(1)
# Ensure inducing_points and x are the same size
inducing_points = self.inducing_points
if inducing_points.shape[:-2] != x.shape[:-2]:
x, inducing_points = self._expand_inputs(x, inducing_points)
# Get p(u)/q(u)
variational_dist_u = self.variational_distribution
# Get q(f)
if isinstance(variational_dist_u, MultivariateNormal):
return super().__call__(
x,
inducing_points,
inducing_values=variational_dist_u.mean,
variational_inducing_covar=variational_dist_u.lazy_covariance_matrix,
)
elif isinstance(variational_dist_u, Delta):
return super().__call__(
x, inducing_points, inducing_values=variational_dist_u.mean, variational_inducing_covar=None
)
else:
raise RuntimeError(
f"Invalid variational distribuition ({type(variational_dist_u)}). "
"Expected a multivariate normal or a delta distribution."
)
|
{
"content_hash": "cdd6347fd5f7e31031980366bc9f56ab",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 120,
"avg_line_length": 43.8705035971223,
"alnum_prop": 0.6510331256149557,
"repo_name": "jrg365/gpytorch",
"id": "bbba3229e1ed2b83cbdad549dbbed266f2c79ad3",
"size": "6122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpytorch/variational/_variational_strategy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6005"
},
{
"name": "C++",
"bytes": "242"
},
{
"name": "Python",
"bytes": "338860"
}
],
"symlink_target": ""
}
|
import atexit
import logging
import subprocess
from pyroute2.remote import Transport
from pyroute2.remote import RemoteSocket
from pyroute2.iproute import RTNL_API
from pyroute2.netlink.rtnl.iprsocket import MarshalRtnl
log = logging.getLogger(__name__)
class ShellIPR(RTNL_API, RemoteSocket):
def __init__(self, target):
self.target = target
cmd = '%s python -m pyroute2.remote' % target
self.shell = subprocess.Popen(cmd.split(),
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
self.trnsp_in = Transport(self.shell.stdout)
self.trnsp_out = Transport(self.shell.stdin)
try:
super(ShellIPR, self).__init__()
except Exception:
self.close()
raise
atexit.register(self.close)
self.marshal = MarshalRtnl()
def clone(self):
return type(self)(self.target)
def _cleanup_atexit(self):
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
else:
try:
atexit._exithandlers.remove((self.close, (), {}))
except ValueError:
pass
def close(self):
self._cleanup_atexit()
try:
super(ShellIPR, self).close()
except:
# something went wrong, force server shutdown
try:
self.trnsp_out.send({'stage': 'shutdown'})
except Exception:
pass
log.error('forced shutdown procedure, clean up netns manually')
# force cleanup command channels
for close in (self.trnsp_in.close, self.trnsp_out.close):
try:
close()
except Exception:
pass # Maybe already closed in remote.Client.close
self.shell.kill()
self.shell.wait()
def post_init(self):
pass
|
{
"content_hash": "57ac17563941d0090782cc2b077634d6",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 75,
"avg_line_length": 29.746268656716417,
"alnum_prop": 0.5524335173105871,
"repo_name": "craneworks/python-pyroute2",
"id": "24e0502d41451d9fb4d8c49dda5fd788c6362da6",
"size": "1993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyroute2/remote/shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "860914"
}
],
"symlink_target": ""
}
|
from thumbnails.conf import settings
from thumbnails.engines import DummyEngine
from thumbnails.helpers import get_engine, generate_filename, get_cache_backend
from thumbnails.images import SourceFile, Thumbnail
__version__ = '0.5.1'
def get_thumbnail(original, size, **options):
"""
Creates or gets an already created thumbnail for the given image with the given size and
options.
:param original: File-path, url or base64-encoded string of the image that you want an
thumbnail.
:param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or
``x200``.
:param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``.
:param force: If set to ``True`` the thumbnail will be created even if it exists before.
:param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while
saving the thumbnail.
:param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled
up if necessary.
:param colormode: Overrides ``THUMBNAIL_COLORMODE``, The default colormode for thumbnails.
Supports all values supported by pillow. In other engines there is a best
effort translation from pillow modes to the modes supported by the current
engine.
:param format: Overrides the format the thumbnail will be saved in. This will override both the
detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``.
:return: A Thumbnail object
"""
engine = get_engine()
cache = get_cache_backend()
original = SourceFile(original)
crop = options.get('crop', None)
options = engine.evaluate_options(options)
thumbnail_name = generate_filename(original, size, crop)
if settings.THUMBNAIL_DUMMY:
engine = DummyEngine()
return engine.get_thumbnail(thumbnail_name, engine.parse_size(size), crop, options)
cached = cache.get(thumbnail_name)
force = options is not None and 'force' in options and options['force']
if not force and cached:
return cached
thumbnail = Thumbnail(thumbnail_name, engine.get_format(original, options))
if force or not thumbnail.exists:
size = engine.parse_size(size)
thumbnail.image = engine.get_thumbnail(original, size, crop, options)
thumbnail.save(options)
for resolution in settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS:
resolution_size = engine.calculate_alternative_resolution_size(resolution, size)
image = engine.get_thumbnail(original, resolution_size, crop, options)
thumbnail.save_alternative_resolution(resolution, image, options)
cache.set(thumbnail)
return thumbnail
|
{
"content_hash": "7232c9590b9e5081b63874d1a67252ae",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 100,
"avg_line_length": 45.61904761904762,
"alnum_prop": 0.675365344467641,
"repo_name": "python-thumbnails/python-thumbnails",
"id": "412425d9e6174c8adcdee1a34ac95bdce60f035c",
"size": "2899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thumbnails/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91933"
}
],
"symlink_target": ""
}
|
from django.contrib.sites.models import Site
from django.db.models import Prefetch
from django.db.models.signals import post_delete, post_save
from django.utils.html import escape
from django.utils.safestring import mark_safe
from cms.cache.choices import _page_cache_key, _site_cache_key, clean_page_choices_cache, clean_site_choices_cache
from cms.models import Page, Title
from cms.utils import i18n
def get_sites():
sites = (
Site
.objects
.filter(djangocms_nodes__isnull=False)
.order_by('name')
.distinct()
)
return sites
def get_page_choices_for_site(site, language):
fallbacks = i18n.get_fallback_languages(language, site_id=site.pk)
languages = [language] + fallbacks
translation_lookup = Prefetch(
'title_set',
to_attr='filtered_translations',
queryset=Title.objects.filter(language__in=languages).only('pk', 'page', 'language', 'title')
)
pages = (
Page
.objects
.drafts()
.on_site(site)
.select_related('node')
.prefetch_related(translation_lookup)
.order_by('node__path')
.only('pk', 'node')
)
for page in pages:
translations = page.filtered_translations
titles_by_language = {trans.language: trans.title for trans in translations}
for language in languages:
# EmptyTitle is used to prevent the cms from trying
# to find a translation in the database
if language in titles_by_language:
title = titles_by_language[language]
indent = " " * (page.node.depth - 1)
label = mark_safe(f"{indent}{escape(title)}")
yield (page.pk, label)
break
def update_site_and_page_choices(language=None):
if language is None:
language = i18n.get_current_language()
site_choices = []
page_choices = [('', '----')]
site_choices_key = _site_cache_key(language)
page_choices_key = _page_cache_key(language)
for site in get_sites():
_page_choices = list(get_page_choices_for_site(site, language))
site_choices.append((site.pk, site.name))
page_choices.append((site.name, _page_choices))
from django.core.cache import cache
# We set it to 1 day here because we actively invalidate this cache.
cache.set(site_choices_key, site_choices, 86400)
cache.set(page_choices_key, page_choices, 86400)
return site_choices, page_choices
def get_site_choices(lang=None):
from django.core.cache import cache
lang = lang or i18n.get_current_language()
site_choices = cache.get(_site_cache_key(lang))
if site_choices is None:
site_choices = update_site_and_page_choices(lang)[0]
return site_choices
def get_page_choices(lang=None):
from django.core.cache import cache
lang = lang or i18n.get_current_language()
page_choices = cache.get(_page_cache_key(lang))
if page_choices is None:
page_choices = update_site_and_page_choices(lang)[1]
return page_choices
post_save.connect(clean_page_choices_cache, sender=Page)
post_save.connect(clean_site_choices_cache, sender=Site)
post_delete.connect(clean_page_choices_cache, sender=Page)
post_delete.connect(clean_site_choices_cache, sender=Site)
|
{
"content_hash": "e6e7c7305820cabca1e3653c28efb6c3",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 114,
"avg_line_length": 33.29,
"alnum_prop": 0.6566536497446681,
"repo_name": "rsalmaso/django-cms",
"id": "e1fdd329a2155e7691c00fe2dd958a1f07cd6e7f",
"size": "3329",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/forms/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "204223"
},
{
"name": "JavaScript",
"bytes": "1250281"
},
{
"name": "Python",
"bytes": "2386268"
},
{
"name": "SCSS",
"bytes": "137693"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
}
|
"""
Clustertools offers the possibility to save partial results. This is handy with
long computations that can get interrupted (by a job scheduler like Slurm for
instance).
You just need to use the :meth:`save_result` method from the `Computation`
class.
So far, there is no way to restart a interrupted computation from the last save
point. Pull requests are welcome(d)
To run this example, open two terminals. In the first one, run this example
(`python 009_saving_progress.py front-end`). In the second, do
`python 009_partial_results.py`. Every few seconds, you should see advances
in the root precision
"""
from clustertools import Computation, CTParser, ParameterSet, \
Experiment, set_stdout_logging
class MyComputation(Computation):
"""
Inherit from `Computation` and redefine the `run` method as you which.
This computation will perform steps of the bisection method to find a
root for x^3 - x - 2 on [a, b] up to a precision of 1e-50 (which should
take around 50 iterations)
"""
def run(self, result, a, b, **parameters):
import time
# The polynome for which we are trying to find the roots
def P(x):
return x**3 - x - 2
# Bisection algorithm
m = i = 0
while abs(a-b) >= 1e-50:
m = (a+b)/2.
if P(a)*P(m) < 0:
b = m
else:
a = m
# Update result at each iteration. Saving the iteration number is
# just there for nicer formatting on the result side
result["root"] = m
result["iteration"] = i
if i % 10 == 0:
# Saving the result every 10 iterations
self.save_result()
# Wait some time to be able to see the update when running
# `python 009_partial_results.py`
time.sleep(1)
i += 1
if __name__ == "__main__":
set_stdout_logging()
parser = CTParser()
environment, _ = parser.parse()
param_set = ParameterSet()
param_set.add_parameters(a=1, b=2)
experiment = Experiment("BasicUsagePartialSave", param_set, MyComputation)
environment.run(experiment)
|
{
"content_hash": "94e4f5fd9c08a4531b1f6a8880b1b153",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 29.743243243243242,
"alnum_prop": 0.6201726487960019,
"repo_name": "jm-begon/clustertools",
"id": "21fefebf196f09bd684be6b6b93a63c904844a7c",
"size": "2248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/009_saving_progress.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "311"
},
{
"name": "Python",
"bytes": "225823"
},
{
"name": "Shell",
"bytes": "571"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('token_blacklist', '0005_remove_outstandingtoken_jti'),
]
operations = [
migrations.RenameField(
model_name='outstandingtoken',
old_name='jti_hex',
new_name='jti',
),
]
|
{
"content_hash": "83e735de9a4bbdfa687143ac982300e5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 64,
"avg_line_length": 21.11111111111111,
"alnum_prop": 0.5947368421052631,
"repo_name": "kawamon/hue",
"id": "7a4067bb5ce7f6c314eb06ba9d893033c8d06401",
"size": "453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/djangorestframework_simplejwt-3.3/rest_framework_simplejwt/token_blacklist/migrations/0006_auto_20171017_2113.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
"""Environments used for testing and benchmarking.
These are not a core part of the imitation package. They are relatively lightly tested,
and may be changed without warning.
"""
# Register environments with Gym
from imitation.envs.examples import airl_envs, model_envs # noqa: F401
|
{
"content_hash": "b5b6f62f6e24cfac7a97a9a3bc03ffa8",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 87,
"avg_line_length": 35.75,
"alnum_prop": 0.7832167832167832,
"repo_name": "humancompatibleai/imitation",
"id": "e836e5c463b43076c4fe12d3cc7a190ddac40dcf",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/imitation/envs/examples/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2671"
},
{
"name": "Python",
"bytes": "462051"
},
{
"name": "Shell",
"bytes": "10972"
}
],
"symlink_target": ""
}
|
import sys, itertools, functools
__izip_longest__ = itertools.izip_longest if sys.version_info.major < 3 else itertools.zip_longest
intofdata = lambda data: functools.reduce(lambda t, c: t * 256 | c, bytearray(data), 0)
dataofint = lambda integer: ((integer == 0) and b'\0') or (dataofint(integer // 256).lstrip(b'\0') + bytes(bytearray([integer % 256])[:1]))
from . import jp2, jfif
|
{
"content_hash": "92509201d44a17d0af088eb3bae4e6db",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 139,
"avg_line_length": 48.375,
"alnum_prop": 0.6976744186046512,
"repo_name": "arizvisa/syringe",
"id": "3ed8a44ff9095e6f25cd5065dcdf41f4002edf01",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "template/image/jpeg/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "22844"
},
{
"name": "C",
"bytes": "11095"
},
{
"name": "HTML",
"bytes": "1761"
},
{
"name": "Makefile",
"bytes": "1228"
},
{
"name": "Perl",
"bytes": "9176"
},
{
"name": "Python",
"bytes": "4312979"
},
{
"name": "Shell",
"bytes": "171"
},
{
"name": "XQuery",
"bytes": "1884"
},
{
"name": "XSLT",
"bytes": "10518"
}
],
"symlink_target": ""
}
|
"""Custom mail app exceptions"""
class MultiEmailValidationError(Exception):
"""
General exception for failures while validating multiple emails
"""
def __init__(self, invalid_emails, msg=None):
"""
Args:
invalid_emails (set of str): All email addresses that failed validation
msg (str): A custom exception message
"""
self.invalid_emails = invalid_emails
super().__init__(msg)
|
{
"content_hash": "bf3cae228703456a34dab6623a69810a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 83,
"avg_line_length": 28.8125,
"alnum_prop": 0.6117136659436009,
"repo_name": "mitodl/bootcamp-ecommerce",
"id": "51b931d53004a5b305941659f5ab316c3fcd7489",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mail/v2/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "325"
},
{
"name": "Dockerfile",
"bytes": "998"
},
{
"name": "HTML",
"bytes": "70605"
},
{
"name": "JavaScript",
"bytes": "491664"
},
{
"name": "Procfile",
"bytes": "293"
},
{
"name": "Python",
"bytes": "1236492"
},
{
"name": "SCSS",
"bytes": "72463"
},
{
"name": "Shell",
"bytes": "7329"
}
],
"symlink_target": ""
}
|
import sys
import time
from core import pycore
from core.constants import *
from core.api import coreapi
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
msg_types = {
'node': (1, coreapi.CORE_API_NODE_MSG, 'NODE'),
'link': (2, coreapi.CORE_API_NODE_MSG, 'LINK'),
'execute': (3, coreapi.CORE_API_EXEC_MSG, 'EXEC'),
'register': (4, coreapi.CORE_API_REG_MSG, 'REG'),
'configuration': (5, coreapi.CORE_API_CONF_MSG, 'CONF'),
'file': (6, coreapi.CORE_API_FILE_MSG, 'FILE'),
'interface': (7, coreapi.CORE_API_IFACE_MSG, 'IFACE'),
'event': (8, coreapi.CORE_API_EVENT_MSG, 'EVENT'),
'session': (9, coreapi.CORE_API_SESS_MSG, 'SESS'),
'exception': (10, coreapi.CORE_API_EXCP_MSG, 'EXCP')
}
class MsgHandler():
master = None
localport = None
socket = None
handled_msg_types = None
def __init__(self):
self.master = False
self.localport = None
self.socket = None
self.setup()
def setup(self):
self.handled_msg_types = {
coreapi.CORE_API_SESS_MSG:
{'handler': None, 'callbacks': []},
coreapi.CORE_API_CONF_MSG:
{'handler': None, 'callbacks': []},
coreapi.CORE_API_EVENT_MSG:
{'handler': None, 'callbacks': []},
coreapi.CORE_API_EXCP_MSG:
{'handler': None, 'callbacks': []},
coreapi.CORE_API_EXEC_MSG:
{'handler': None, 'callbacks': []},
coreapi.CORE_API_FILE_MSG:
{'handler': None, 'callbacks': []},
coreapi.CORE_API_IFACE_MSG:
{'handler': None, 'callbacks': []},
coreapi.CORE_API_LINK_MSG:
{'handler': None, 'callbacks': []},
coreapi.CORE_API_NODE_MSG:
{'handler': None, 'callbacks': []},
coreapi.CORE_API_REG_MSG:
{'handler': None, 'callbacks': []},
}
def set_local_port(self, port):
self.localport = port
def set_socket(self, socket):
self.socket = socket
def set_msg_callback(self, msg_type, callback):
if not msg_type in self.handled_msg_types:
print('[ERROR] unsupported msg_type: %s' % str(msg_type))
return False
if callback in self.handled_msg_types[msg_type]:
print('[ERROR] callback already registered')
return False
self.handled_msg_types[msg_type]['callbacks'].append(callback)
return True
def decode_message(self, data):
''' Retrieve a message from a socket and return the CoreMessage object or
None upon disconnect. Socket data beyond the first message is dropped.
'''
msghdr = data[:coreapi.CoreMessage.hdrsiz]
if len(msghdr) == 0:
return None
msgdata = None
msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr)
if msglen:
msgdata = data[coreapi.CoreMessage.hdrsiz:]
try:
msgcls = coreapi.msg_class(msgtype)
except KeyError:
msg = coreapi.CoreMessage(msgflags, msghdr, msgdata)
msg.msgtype = msgtype
print("unimplemented CORE message type: %s" % msg.typestr())
return msg
if len(data) > msglen + coreapi.CoreMessage.hdrsiz:
print("received a message of type %d, dropping %d bytes of extra data" \
% (msgtype, len(data) - (msglen + coreapi.CoreMessage.hdrsiz)))
return msgcls(msgflags, msghdr, msgdata)
def sendall(self, msg):
# i really don't like the way CORE named its methods.
# in my opinion, sendall should be named recv_msg() which is imho more readable
self.recv_msg(msg)
def recv_msg(self, msg):
msg = self.decode_message(msg)
if msg.msgtype in self.handled_msg_types:
if not self.handled_msg_types[msg.msgtype]['handler'] is None:
self.handled_msg_types[msg.msgtype]['handler'](msg)
for callback in self.handled_msg_types[msg.msgtype]['callbacks']:
callback(msg)
else:
print('[ERROR] message type not supported: %d: %s' %
(msg.type, msg.typestr()))
class TLVHelper():
@staticmethod
def str_to_msgtypenum(s):
""" Convert a shorthand string into a message type number """
fulltypestr = str_to_msgtypename(s)
for k, v in coreapi.message_types.items():
if v == fulltypestr:
return k
return None
@staticmethod
def str_to_msgflagnum(s):
flagname = "CORE_API_%s_FLAG" % s.upper()
for (k, v) in coreapi.message_flags.items():
if v == flagname:
return k
return None
@staticmethod
def tlvname_to_num(tlv_cls, name):
""" Convert the given TLV Type class and TLV name to the TLV number """
for (k, v) in tlv_cls.tlvtypemap.items():
if v == name:
return k
return None
@staticmethod
def str_to_tlvname(t, s):
""" Convert the given TLV type t and string s to a TLV name """
return "CORE_TLV_%s_%s" % (t.upper(), s.upper())
@staticmethod
def parse_tlv_stringlist(args):
if len(args) < 3:
print(('[ERROR] message too short. specify at least msgtype, tlvtype and '
'tlvdata'))
return (False, None, 0, None)
msg_type = args[0].lower()
if not msg_type in msg_types:
print('[ERROR] unsupported msg_type: %s' % msg_type)
return (False, None, 0, None)
if msg_types[msg_type][1] is None:
print('[ERROR] msg_type: %s not implemented' % msg_type)
return (False, None, 0, None)
msg_cls = coreapi.msgclsmap[msg_types[msg_type][0]]
tlv_cls = msg_cls.tlvcls
# pop msg_type from arg list
args.pop(0)
flags_or_tlvtype = args[0].lower()
flags = 0
if flags_or_tlvtype.startswith('flags='):
# build a message consisting of TLVs from 'type=value' arguments
flagstr = flags_or_tlvtype.split('=')[1]
for f in flagstr.split(","):
if f == '':
continue
n = TLVHelper.str_to_msgflagnum(f)
if n is None:
print('[ERROR] Invalid flag "%s"' % f)
return (False, None, 0, None)
flags |= n
# pop flags from arg list
args.pop(0)
tlv_data_list = []
while len(args) >= 2:
tlv_type_raw = args.pop(0)
tlv_value = args.pop(0)
tlv_name = TLVHelper.str_to_tlvname(msg_types[msg_type][2], tlv_type_raw)
tlv_type = TLVHelper.tlvname_to_num(tlv_cls, tlv_name)
if tlv_name not in list(tlv_cls.tlvtypemap.values()):
print("[ERROR] Unknown TLV: '%s' / %s:%s" % (tlv_type_raw, tlv_name, str(args)))
return (False, None, 0, None)
tlv_data_list.append(tlv_cls.packstring(tlv_type, tlv_value))
tlv_data = b''.join(tlv_data_list)
return (True, msg_cls, flags, tlv_data)
class CoreConnection():
session = None
server = None
port = None
localport = None
socket = None
msg_handler = None
requested_session = None
requested_session_connected = None
def __init__(self, persistent = False):
self.session = pycore.Session(persistent = persistent)
self.session.verbose = False
self.session.broker.verbose = False
# TODO: mv to config-file
self.session.cfg['clientlogfile'] = '/var/log/core-client.log'
self.server = None
self.port = None
self.localport = None
self.socket = None
self.set_message_handler()
self.requested_session = None
self.requested_session_connected = False
# declare classes for use with Broker
coreapi.add_node_class("CORE_NODE_DEF",
coreapi.CORE_NODE_DEF, pycore.nodes.CoreNode)
coreapi.add_node_class("CORE_NODE_PHYS",
coreapi.CORE_NODE_PHYS, pycore.pnodes.PhysicalNode)
try:
coreapi.add_node_class("CORE_NODE_XEN",
coreapi.CORE_NODE_XEN, pycore.xen.XenNode)
except Exception:
pass
coreapi.add_node_class("CORE_NODE_TBD",
coreapi.CORE_NODE_TBD, None)
coreapi.add_node_class("CORE_NODE_SWITCH",
coreapi.CORE_NODE_SWITCH, pycore.nodes.SwitchNode)
coreapi.add_node_class("CORE_NODE_HUB",
coreapi.CORE_NODE_HUB, pycore.nodes.HubNode)
coreapi.add_node_class("CORE_NODE_WLAN",
coreapi.CORE_NODE_WLAN, pycore.nodes.WlanNode)
coreapi.add_node_class("CORE_NODE_RJ45",
coreapi.CORE_NODE_RJ45, pycore.nodes.RJ45Node)
coreapi.add_node_class("CORE_NODE_TUNNEL",
coreapi.CORE_NODE_TUNNEL, pycore.nodes.TunnelNode)
coreapi.add_node_class("CORE_NODE_EMANE",
coreapi.CORE_NODE_EMANE, pycore.nodes.EmaneNode)
def set_server(self, server, port = None):
self.server = server
self.port = port
def set_local_port(self, port):
self.localport = port
if not self.msg_handler is None:
self.msg_handler.set_local_port(port)
def set_socket(self, socket):
self.socket = socket
if not self.msg_handler is None:
self.msg_handler.set_socket(socket)
def connect(self):
if self.server is None:
return
if self.port is None:
port = coreapi.CORE_API_PORT
else:
port = self.port
self.session.broker.addserver(self.server, self.server, port)
if self.session.broker.servers[self.server][2] is None:
self.session.broker.delserver(self.server)
return False
self.set_local_port(self.session.broker.servers[self.server][2].getsockname()[1])
self.set_socket(self.session.broker.servers[self.server][2])
return True
def disconnect(self):
self.session.broker.delserver(self.server)
self.socket = None
self.localport = None
self.msg_handler.set_socket(None)
self.msg_handler.set_local_port(None)
def set_message_handler(self, callback = None):
if callback is None:
self.msg_handler = MsgHandler()
else:
self.msg_handler = callback
self.session.connect(self.msg_handler)
return self.msg_handler
|
{
"content_hash": "69f1d41a6d380ca5198e5fa65d8275fc",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 104,
"avg_line_length": 35.08571428571429,
"alnum_prop": 0.5509410061527326,
"repo_name": "Benocs/core",
"id": "f9f1ec541db0a67e8440ec805be80281abd14d0a",
"size": "11257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/daemon/core/connection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "215246"
},
{
"name": "C++",
"bytes": "291678"
},
{
"name": "CSS",
"bytes": "549"
},
{
"name": "D",
"bytes": "3559"
},
{
"name": "Java",
"bytes": "427277"
},
{
"name": "Makefile",
"bytes": "10426"
},
{
"name": "Python",
"bytes": "1193207"
},
{
"name": "Shell",
"bytes": "33396"
},
{
"name": "Tcl",
"bytes": "1041249"
}
],
"symlink_target": ""
}
|
import os
import ssl
import gzip
import cPickle
from six.moves import urllib
def load_pickle(data_path):
data_dir, data_file = os.path.split(data_path)
if data_dir == "" and not os.path.isfile(data_path):
new_path = os.path.join(os.path.split(__file__)[0], data_path)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
data_path = new_path
# Download the MNIST dataset if it is not present
if (not os.path.isfile(data_path)) and data_file == 'mnist.pkl.gz':
origin = ('http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz')
print('Downloading data from %s' % origin)
context = ssl._create_unverified_context()
urllib.request.urlretrieve(origin, data_path)
print('... loading data')
# Load the data_path
with gzip.open(data_path, 'rb') as f:
try:
data = cPickle.load(f, encoding='latin1')
except:
data = cPickle.load(f)
return data
|
{
"content_hash": "221bb268bc3f7b9566d18e4a1cfa877f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 28.677419354838708,
"alnum_prop": 0.6895388076490439,
"repo_name": "mingkaic/rocnnet",
"id": "75f31873118c00c5cf6161598033bd39450b4f96",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/boostpy/python/pickle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "648938"
},
{
"name": "CMake",
"bytes": "22870"
},
{
"name": "Jupyter Notebook",
"bytes": "6228"
},
{
"name": "Python",
"bytes": "36976"
},
{
"name": "Shell",
"bytes": "9139"
}
],
"symlink_target": ""
}
|
from electrum_cesc.util import print_error
from urlparse import urlparse, parse_qs
from PyQt4.QtGui import QPushButton, QMessageBox, QDialog, QVBoxLayout, QHBoxLayout, QGridLayout, QLabel, QLineEdit, QComboBox
from PyQt4.QtCore import Qt
from electrum_cesc.i18n import _
import re
import os
from electrum_cesc import Transaction
from electrum_cesc.bitcoin import MIN_RELAY_TX_FEE, is_valid
from electrum_cesc_gui.qt.qrcodewidget import QRCodeWidget
from electrum_cesc import bmp
from electrum_cesc_gui.qt import HelpButton, EnterButton
import json
try:
import zbar
except ImportError:
zbar = None
from electrum_cesc import BasePlugin
class Plugin(BasePlugin):
def fullname(self): return 'QR scans'
def description(self): return "QR Scans.\nInstall the zbar package (http://zbar.sourceforge.net/download.html) to enable this plugin"
def __init__(self, gui, name):
BasePlugin.__init__(self, gui, name)
self._is_available = self._init()
def _init(self):
if not zbar:
return False
try:
proc = zbar.Processor()
proc.init(video_device=self.video_device())
except zbar.SystemError:
# Cannot open video device
pass
#return False
return True
def load_wallet(self, wallet):
b = QPushButton(_("Scan QR code"))
b.clicked.connect(self.fill_from_qr)
self.send_tab_grid.addWidget(b, 1, 5)
b2 = QPushButton(_("Scan TxQR"))
b2.clicked.connect(self.read_raw_qr)
if not wallet.seed:
b3 = QPushButton(_("Show unsigned TxQR"))
b3.clicked.connect(self.show_raw_qr)
self.send_tab_grid.addWidget(b3, 7, 1)
self.send_tab_grid.addWidget(b2, 7, 2)
else:
self.send_tab_grid.addWidget(b2, 7, 1)
def is_available(self):
return self._is_available
def create_send_tab(self, grid):
self.send_tab_grid = grid
def scan_qr(self):
proc = zbar.Processor()
try:
proc.init(video_device=self.video_device())
except zbar.SystemError, e:
QMessageBox.warning(self.gui.main_window, _('Error'), _(e), _('OK'))
return
proc.visible = True
while True:
try:
proc.process_one()
except Exception:
# User closed the preview window
return {}
for r in proc.results:
if str(r.type) != 'QRCODE':
continue
return r.data
def show_raw_qr(self):
r = unicode( self.gui.main_window.payto_e.text() )
r = r.strip()
# label or alias, with address in brackets
m = re.match('(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>', r)
to_address = m.group(2) if m else r
if not is_valid(to_address):
QMessageBox.warning(self.gui.main_window, _('Error'), _('Invalid Cryptoescudo Address') + ':\n' + to_address, _('OK'))
return
try:
amount = self.gui.main_window.read_amount(unicode( self.gui.main_window.amount_e.text()))
except Exception:
QMessageBox.warning(self.gui.main_window, _('Error'), _('Invalid Amount'), _('OK'))
return
try:
fee = self.gui.main_window.read_amount(unicode( self.gui.main_window.fee_e.text()))
except Exception:
QMessageBox.warning(self.gui.main_window, _('Error'), _('Invalid Fee'), _('OK'))
return
try:
tx = self.gui.main_window.wallet.mktx( [(to_address, amount)], None, fee)
except Exception as e:
self.gui.main_window.show_message(str(e))
return
if fee < tx.required_fee(self.gui.main_window.wallet.verifier):
QMessageBox.warning(self.gui.main_window, _('Error'), _("This transaction requires a higher fee, or it will not be propagated by the network."), _('OK'))
return
try:
out = {
"hex" : tx.hash(),
"complete" : "false"
}
input_info = []
except Exception as e:
self.gui.main_window.show_message(str(e))
try:
json_text = json.dumps(tx.as_dict()).replace(' ', '')
self.show_tx_qrcode(json_text, 'Unsigned Transaction')
except Exception as e:
self.gui.main_window.show_message(str(e))
def show_tx_qrcode(self, data, title):
if not data: return
d = QDialog(self.gui.main_window)
d.setModal(1)
d.setWindowTitle(title)
d.setMinimumSize(250, 525)
vbox = QVBoxLayout()
qrw = QRCodeWidget(data)
vbox.addWidget(qrw, 0)
hbox = QHBoxLayout()
hbox.addStretch(1)
def print_qr(self):
filename = "qrcode.bmp"
electrum_gui.bmp.save_qrcode(qrw.qr, filename)
QMessageBox.information(None, _('Message'), _("QR code saved to file") + " " + filename, _('OK'))
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(d.accept)
b.setDefault(True)
vbox.addLayout(hbox, 1)
d.setLayout(vbox)
d.exec_()
def read_raw_qr(self):
qrcode = self.scan_qr()
if qrcode:
tx = self.gui.main_window.tx_from_text(qrcode)
if tx:
self.create_transaction_details_window(tx)
def create_transaction_details_window(self, tx):
dialog = QDialog(self.gui.main_window)
dialog.setMinimumWidth(500)
dialog.setWindowTitle(_('Process Offline transaction'))
dialog.setModal(1)
l = QGridLayout()
dialog.setLayout(l)
l.addWidget(QLabel(_("Transaction status:")), 3,0)
l.addWidget(QLabel(_("Actions")), 4,0)
if tx.is_complete == False:
l.addWidget(QLabel(_("Unsigned")), 3,1)
if self.gui.main_window.wallet.seed :
b = QPushButton("Sign transaction")
b.clicked.connect(lambda: self.sign_raw_transaction(tx, tx.inputs, dialog))
l.addWidget(b, 4, 1)
else:
l.addWidget(QLabel(_("Wallet is de-seeded, can't sign.")), 4,1)
else:
l.addWidget(QLabel(_("Signed")), 3,1)
b = QPushButton("Broadcast transaction")
def broadcast(tx):
result, result_message = self.gui.main_window.wallet.sendtx( tx )
if result:
self.gui.main_window.show_message(_("Transaction successfully sent:")+' %s' % (result_message))
if dialog:
dialog.done(0)
else:
self.gui.main_window.show_message(_("There was a problem sending your transaction:") + '\n %s' % (result_message))
b.clicked.connect(lambda: broadcast( tx ))
l.addWidget(b,4,1)
closeButton = QPushButton(_("Close"))
closeButton.clicked.connect(lambda: dialog.done(0))
l.addWidget(closeButton, 4,2)
dialog.exec_()
def do_protect(self, func, args):
if self.gui.main_window.wallet.use_encryption:
password = self.gui.main_window.password_dialog()
if not password:
return
else:
password = None
if args != (False,):
args = (self,) + args + (password,)
else:
args = (self,password)
apply( func, args)
def protected(func):
return lambda s, *args: s.do_protect(func, args)
@protected
def sign_raw_transaction(self, tx, input_info, dialog ="", password = ""):
try:
self.gui.main_window.wallet.signrawtransaction(tx, input_info, [], password)
txtext = json.dumps(tx.as_dict()).replace(' ', '')
self.show_tx_qrcode(txtext, 'Signed Transaction')
except Exception as e:
self.gui.main_window.show_message(str(e))
def fill_from_qr(self):
qrcode = parse_uri(self.scan_qr())
if not qrcode:
return
if 'address' in qrcode:
self.gui.main_window.payto_e.setText(qrcode['address'])
if 'amount' in qrcode:
self.gui.main_window.amount_e.setText(str(qrcode['amount']))
if 'label' in qrcode:
self.gui.main_window.message_e.setText(qrcode['label'])
if 'message' in qrcode:
self.gui.main_window.message_e.setText("%s (%s)" % (self.gui.main_window.message_e.text(), qrcode['message']))
def video_device(self):
device = self.config.get("video_device", "default")
if device == 'default':
device = ''
return device
def requires_settings(self):
return True
def settings_widget(self, window):
return EnterButton(_('Settings'), self.settings_dialog)
def _find_system_cameras(self):
device_root = "/sys/class/video4linux"
devices = {} # Name -> device
if os.path.exists(device_root):
for device in os.listdir(device_root):
name = open(os.path.join(device_root, device, 'name')).read()
devices[name] = os.path.join("/dev",device)
return devices
def settings_dialog(self):
system_cameras = self._find_system_cameras()
d = QDialog()
layout = QGridLayout(d)
layout.addWidget(QLabel("Choose a video device:"),0,0)
# Create a combo box with the available video devices:
combo = QComboBox()
# on change trigger for video device selection, makes the
# manual device selection only appear when needed:
def on_change(x):
combo_text = str(combo.itemText(x))
combo_data = combo.itemData(x)
if combo_text == "Manually specify a device":
custom_device_label.setVisible(True)
self.video_device_edit.setVisible(True)
if self.config.get("video_device") == "default":
self.video_device_edit.setText("")
else:
self.video_device_edit.setText(self.config.get("video_device"))
else:
custom_device_label.setVisible(False)
self.video_device_edit.setVisible(False)
self.video_device_edit.setText(combo_data.toString())
# on save trigger for the video device selection window,
# stores the chosen video device on close.
def on_save():
device = str(self.video_device_edit.text())
self.config.set_key("video_device", device)
d.accept()
custom_device_label = QLabel("Video device: ")
custom_device_label.setVisible(False)
layout.addWidget(custom_device_label,1,0)
self.video_device_edit = QLineEdit()
self.video_device_edit.setVisible(False)
layout.addWidget(self.video_device_edit, 1,1,2,2)
combo.currentIndexChanged.connect(on_change)
combo.addItem("Default","default")
for camera, device in system_cameras.items():
combo.addItem(camera, device)
combo.addItem("Manually specify a device",self.config.get("video_device"))
# Populate the previously chosen device:
index = combo.findData(self.config.get("video_device"))
combo.setCurrentIndex(index)
layout.addWidget(combo,0,1)
self.accept = QPushButton(_("Done"))
self.accept.clicked.connect(on_save)
layout.addWidget(self.accept,4,2)
if d.exec_():
return True
else:
return False
def parse_uri(uri):
if ':' not in uri:
# It's just an address (not BIP21)
return {'address': uri}
if '//' not in uri:
# Workaround for urlparse, it don't handle bitcoin: URI properly
uri = uri.replace(':', '://')
uri = urlparse(uri)
result = {'address': uri.netloc}
if uri.query.startswith('?'):
params = parse_qs(uri.query[1:])
else:
params = parse_qs(uri.query)
for k,v in params.items():
if k in ('amount', 'label', 'message'):
result[k] = v[0]
return result
if __name__ == '__main__':
# Run some tests
assert(parse_uri('LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx') ==
{'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
assert(parse_uri('cryptoescudo://LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx') ==
{'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
assert(parse_uri('cryptoescudo:LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx') ==
{'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
assert(parse_uri('cryptoescudo:LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx?amount=10') ==
{'amount': '10', 'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
assert(parse_uri('cryptoescudo:LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx?amount=10&label=devfund&message=Donation%20to%20the%20dev%20fund') ==
{'amount': '10', 'label': 'devfund', 'message': 'Donation to the dev fund', 'address': 'LcUP7ZU3Xpk1BUR3qut3dTjC3aK5JoZMYx'})
|
{
"content_hash": "d21ca88d9fa1472dbe7b3356591f9308",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 165,
"avg_line_length": 34.6580310880829,
"alnum_prop": 0.5746000896995066,
"repo_name": "Marcdnd/electrum-cesc",
"id": "a92f2fc5337e19b40490a924e07607d565f5f4a8",
"size": "13378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/qrscanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3536"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3354"
},
{
"name": "Makefile",
"bytes": "849"
},
{
"name": "NSIS",
"bytes": "6970"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2354"
},
{
"name": "Python",
"bytes": "2163404"
},
{
"name": "Shell",
"bytes": "7908"
}
],
"symlink_target": ""
}
|
from pin.lib import p, util, ui
import unittest
from mock import Mock, patch
from tests import fixtures
class TestImage(unittest.TestCase):
def setUp(self):
fixtures.reset()
image = Mock()
image.get_width = Mock()
image.get_width.return_value = 20
image.get_height = Mock()
image.get_height.return_value = 10
p.images["test"] = image
@patch("pin.lib.p.dmd.create_frame")
def test_auto_size(self, patch):
image = ui.Image("test")
image.revalidate()
self.assertEquals(20, image.width)
self.assertEquals(10, image.height)
@patch("pin.lib.p.dmd.create_frame")
def test_auto_size_empty(self, patch):
image = ui.Image()
image.revalidate()
self.assertEquals(0, image.width)
self.assertEquals(0, image.height)
|
{
"content_hash": "287dcddaa9b2c173b7612ee9b9bdbcec",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 43,
"avg_line_length": 25.757575757575758,
"alnum_prop": 0.6188235294117647,
"repo_name": "town-hall-pinball/project-omega",
"id": "cb9e60c7758f129b97909c1519f31e5ebac288da",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lib/ui/test_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "151"
},
{
"name": "CSS",
"bytes": "116351"
},
{
"name": "HTML",
"bytes": "6419"
},
{
"name": "JavaScript",
"bytes": "25150"
},
{
"name": "Python",
"bytes": "539013"
},
{
"name": "Shell",
"bytes": "2713"
}
],
"symlink_target": ""
}
|
import os
import shutil
import sys
# Ensures that the current version matches the last-produced version, which is
# stored in the version_file. If it does not, then the framework_root_dir is
# obliterated.
# Usage: python prepare_framework_version.py out/obj/version_file \
# out/Framework.framework \
# 'A'
def PrepareFrameworkVersion(version_file, framework_root_dir, version):
# Test what the current framework version is. Stop if it is up-to-date.
try:
with open(version_file, 'r') as f:
current_version = f.read()
if current_version == version:
return
except IOError:
pass
# The framework version has changed, so clobber the framework.
if os.path.exists(framework_root_dir):
shutil.rmtree(framework_root_dir)
# Write out the new framework version file, making sure its containing
# directory exists.
dirname = os.path.dirname(version_file)
if not os.path.isdir(dirname):
os.makedirs(dirname, 0700)
with open(version_file, 'w+') as f:
f.write(version)
if __name__ == '__main__':
PrepareFrameworkVersion(sys.argv[1], sys.argv[2], sys.argv[3])
sys.exit(0)
|
{
"content_hash": "15a88ad0d975f4f4d196983459b4dd6d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 31.973684210526315,
"alnum_prop": 0.6567901234567901,
"repo_name": "google-ar/WebARonARCore",
"id": "5e8a53f20ab7e6fda29012aa871455f75ae99e1a",
"size": "1378",
"binary": false,
"copies": "10",
"ref": "refs/heads/webarcore_57.0.2987.5",
"path": "build/config/mac/prepare_framework_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
'''
Created on 25.01.2015
@author: patrick
'''
import json
import logging
import raumfeld
import threading
from bottle import route, run
from urllib import quote, unquote
updateAvailableEvent = threading.Event()
def __getSingleZone(name_udn):
"""Tries to find the first occurring Zone with the specified name or UDN"""
zone = None
if (name_udn.startswith("uuid:")):
zone = raumfeld.getZoneByUDN(name_udn)
else:
zones = raumfeld.getZonesByName(name_udn.decode('utf-8'))
if (len(zones) > 0):
zone = zones[0]
return zone
def __getSingleRoom(name_udn):
"""Tries to find the first occurring Room with the specified name or UDN"""
room = None
if (name_udn.startswith("uuid:")):
room = raumfeld.getRoomByUDN(name_udn)
else:
rooms = raumfeld.getRoomsByName(name_udn.decode('utf-8'))
if (len(rooms) > 0):
room = rooms[0]
return room
@route('/')
def index():
returndata = u'<html><body>'
returndata += u'<b>Global actions:</b>'
returndata += u'<ul>'
returndata += u'<li>/ - this site</li>'
returndata += u'<li>/zones - list zones</li>'
returndata += u'<li>/unassignedRooms - list unassigned rooms</li>'
returndata += u'<li>/waitForChanges - returns the request when something changed in the zone structure</li>'
returndata += u'<li>/update - updates the internal device and zone data</li>'
returndata += u'</ul>'
returndata += u'<b>Zone actions:</b>'
returndata += u'<ul>'
returndata += u'<li>/zone/<name_udn>/volume - get volume from the zone defined by the <name> or <udn></li>'
returndata += u'<li>/zone/<name_udn>/volume/<volume> - set the volume of the zone defined by the <name> or <udn> to <volume></li>'
returndata += u'<li>/zone/<name_udn>/volume/[+/-]<amount> - changes the volume of the zone defined by the <name> or <udn> by [+/-]<amount> percent</li>'
returndata += u'<li>/zone/<name_udn>/rooms - list the rooms in a zone defined by the <name> or <udn></li>'
returndata += u'<li>/zone/<name_udn>/play/<uri> - plays <uri> in the given zone</li>'
returndata += u'<li>/zone/<name_udn>/play - start to play in the given zone</li>'
returndata += u'<li>/zone/<name_udn>/pause - pause the given zone</li>'
returndata += u'<li>/zone/<name_udn>/play_pause - toggle between play and pause for the given zone</li>'
returndata += u'<li>/zone/<name_udn>/next - play next song in the given zone</li>'
returndata += u'<li>/zone/<name_udn>/previous - play previous song in the given zone</li>'
returndata += u'<li>/zone/<name_udn>/stop - stop the given zone</li>'
returndata += u'<li>/zone/<name_udn>/transport_info - show transport information of the given zone</li>'
returndata += u'</ul>'
returndata += u'<b>Room actions:</b>'
returndata += u'<ul>'
returndata += u'<li>/room/<name_udn> - Returns the Room name and UDN in JSON format</li>'
returndata += u'<li>/room/<name_udn>/volume - get the volume of the given room</li>'
returndata += u'<li>/room/<name_udn>/volume/<volume> - set the volume of the given room</li>'
returndata += u'<li>/room/<name_udn>/zone - get the zone associated to the given room</li>'
returndata += u'<li>/room/<name_udn>/separate - Separates the the Room defined by the name or UDN from its zone</li>'
returndata += u'</ul>'
returndata += u'</body></html>'
return returndata
################
# Global actions
################
@route('/zones')
def getZones():
"""Returns the Zone names and UDNs in JSON format"""
returndata = {}
returndata["data"] = []
returndata["success"] = False
for zone in raumfeld.getZones():
z = {}
z['name'] = zone.Name
z['udn'] = zone.UDN
returndata["data"].append(z)
returndata["success"] = True
return json.dumps(returndata)
@route('/unassignedRooms')
def getUnassignedRooms():
"""Returns the unassigned room names and UDNs in JSON format"""
returndata = {}
returndata["data"] = []
returndata["success"] = False
for room in raumfeld.getUnassignedRooms():
r = {}
r['name'] = room.Name
r['udn'] = room.UDN
returndata["data"].append(r)
returndata["success"] = True
return json.dumps(returndata)
################
# Zone actions
################
@route('/zone/<name_udn>')
def getZone(name_udn):
"""Returns the Zone name and UDN in JSON format"""
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
z = {}
z['name'] = zone.Name
z['udn'] = zone.UDN
returndata["data"] = z
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/volume')
def getZoneVolume(name_udn):
"""Gets the volume of the Zone defined by the name or UDN"""
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
returndata["data"] = zone.volume
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/volume/<volume:re:\d+>')
def setZoneVolume(name_udn, volume):
"""Sets the volume of the Zone defined by the name or UDN"""
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
zone.volume = volume
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/volume/<amount:re:[+-]\d+>')
def changeZoneVolume(name_udn, amount):
"""Changes the volume of the Zone defined by the name or UDN"""
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
zone.changeVolume(int(amount))
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/rooms')
def getZoneRooms(name_udn):
"""Gets the rooms of the Zone defined by the name or UDN"""
returndata = {}
returndata["data"] = []
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
for room in zone.getRooms():
r = {}
r['name'] = room.Name
r['udn'] = room.UDN
returndata["data"].append(r)
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/play/<uri:path>')
def zonePlayURI(name_udn, uri):
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
#zone.play("dlna-playcontainer://{udn}?sid={sid}&cid={cid}&md=0".format(udn=quote(raumfeld.getMediaServerUDN()), sid=quote("urn:upnp-org:serviceId:ContentDirectory"), cid=quote("0/Playlists/MyPlaylists/Radio/" + unicode(index))))
zone.play(unquote(uri))
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/play')
def zonePlay(name_udn):
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
zone.play()
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/pause')
def zonePause(name_udn):
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
zone.pause()
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/stop')
def zoneStop(name_udn):
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
zone.stop()
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/play_pause')
def zonePlay_Pause(name_udn):
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
TState = str(zone.transport_info['CurrentTransportState'])
if str(TState) == "STOPPED" or str(TState) == "PAUSED_PLAYBACK":
zone.mute = False
zone.play()
else:
zone.pause()
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/next')
def zoneNext(name_udn):
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
zone.next()
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/previous')
def zoneNext(name_udn):
returndata = {}
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
zone.previous()
returndata["success"] = True
return json.dumps(returndata)
@route('/zone/<name_udn>/transport_info')
def getTransportInfo(name_udn):
"""Get the transport information of the Zone defined by the name or UDN"""
returndata = {}
returndata["data"] = []
returndata["success"] = False
zone = __getSingleZone(name_udn)
if zone != None:
returndata["data"].append(zone.transport_info_CurrentTransportState(zone))
returndata["success"] = True
return json.dumps(returndata)
################
# Room actions
################
@route('/room/<name_udn>')
def getRoom(name_udn):
"""Returns the Room name and UDN in JSON format"""
returndata = {}
returndata["success"] = False
room = __getSingleRoom(name_udn)
if room != None:
z = {}
z['name'] = room.Name
z['udn'] = room.UDN
returndata["data"] = z
returndata["success"] = True
return json.dumps(returndata)
@route('/room/<name_udn>/volume')
def getRoomVolume(name_udn):
"""Gets the volume of the Room defined by the name or UDN"""
returndata = {}
returndata["success"] = False
room = __getSingleRoom(name_udn)
if room != None:
returndata["data"] = room.volume
returndata["success"] = True
return json.dumps(returndata)
@route('/room/<name_udn>/volume/<volume:int>')
def setRoomVolume(name_udn, volume):
"""Sets the volume of the Room defined by the name or UDN"""
returndata = {}
returndata["success"] = False
room = __getSingleRoom(name_udn)
if room != None:
room.volume = volume
returndata["success"] = True
return json.dumps(returndata)
@route('/room/<name_udn>/zone')
def getRoomZone(name_udn):
"""Gets the zone json of the Room defined by the name or UDN"""
returndata = {}
returndata["success"] = False
room = __getSingleRoom(name_udn)
if room != None:
zone = raumfeld.getZoneWithRoomUDN(room.UDN)
if zone != None:
returndata["data"] = {}
returndata["data"]["udn"] = zone.UDN
returndata["data"]["name"] = zone.Name
returndata["success"] = True
return json.dumps(returndata)
@route('/room/<name_udn>/separate')
def separateRoom(name_udn):
"""Separates the the Room defined by the name or UDN from its zone"""
global updateAvailableEvent
returndata = {}
returndata["success"] = False
room = __getSingleRoom(name_udn)
if room != None:
raumfeld.connectRoomToZone(room.UDN)
if updateAvailableEvent.wait(10):
returndata["success"] = True
return json.dumps(returndata)
##################
# Wait for Changes
##################
@route('/waitForChanges')
def waitForChanges():
"""Returns when an update in the DataStructure happened"""
global updateAvailableEvent
returndata = []
if updateAvailableEvent.wait(10):
r = {}
r['changes'] = True
returndata.append(r)
return json.dumps(returndata)
#################
# Update Data
##################
@route('/update')
def updateData():
raumfeld.updateData()
def __updateAvailableCallback():
global updateAvailableEvent
updateAvailableEvent.set()
def __resetUpdateAvailableEventThread():
global updateAvailableEvent
while True:
updateAvailableEvent.wait()
updateAvailableEvent.clear()
raumfeld.setLogging(logging.INFO)
raumfeld.registerChangeCallback(__updateAvailableCallback)
raumfeld.init()
print("Host URL: " +raumfeld.hostBaseURL)
# Start observing the device list
resetUpdateAvailableEventThread = threading.Thread(target=__resetUpdateAvailableEventThread)
resetUpdateAvailableEventThread.daemon = True
resetUpdateAvailableEventThread.start()
run(host='0.0.0.0', port=8080, debug=True)
|
{
"content_hash": "c4ed6a6ec5b3ed95386eb78b7a40b21b",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 237,
"avg_line_length": 33.51193633952255,
"alnum_prop": 0.6230805762228906,
"repo_name": "maierp/PyRaumfeld",
"id": "7f13552d5cf4ab981382caee15c677347c1cac03",
"size": "12658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RaumfeldControl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53217"
}
],
"symlink_target": ""
}
|
import numpy as np
from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.figure import Figure
#import matplotlib.mlab as mlab
import mpl_toolkits.axisartist as axisartist
from matplotlib import rcParams
def setup_page(outputpdf, pageorientation):
### Set up default parameters for matplotlib
### rcParams are the default parameters for matplotlib
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Century Gothic']
rcParams['font.size'] = 10.
rcParams['axes.labelsize'] = 10.
rcParams['xtick.labelsize'] = 8.
rcParams['ytick.labelsize'] = 12.
if pageorientation == 'portrait':
pagelength = 11
pagewidth = 8.5
#Set margins
#top_mar = 0.95
#bot_mar = 0.15
#left_mar = 0.15
#right_mar = 0.85
#fig.subplots_adjust(bottom=bot_mar,top=top_mar,left=left_mar,right=right_mar)
if pageorientation == 'landscape':
pagelength = 8.5
pagewidth = 11
#top_mar = 0.95
#bot_mar = 0.15
#left_mar = 0.10
#right_mar = 0.95
fig = Figure(figsize=(pagewidth,pagelength))
pp = PdfPages(outputpdf)
# Create a canvas and add the figure to it.
#canvas = FigureCanvas(fig)
FigureCanvas(fig)
#fig.subplots_adjust(bottom=bot_mar,top=top_mar,left=left_mar,right=right_mar)
return fig, pp, axisartist
def next_page(pageorientation):
### Set up default parameters for matplotlib
### rcParams are the default parameters for matplotlib
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Century Gothic']
rcParams['font.size'] = 10.
rcParams['axes.labelsize'] = 10.
rcParams['xtick.labelsize'] = 8.
rcParams['ytick.labelsize'] = 12.
if pageorientation == 'portrait':
pagelength = 11
pagewidth = 8.5
#Set margins
#top_mar = 0.95
#bot_mar = 0.15
#left_mar = 0.15
#right_mar = 0.85
if pageorientation == 'landscape':
pagelength = 8.5
pagewidth = 11
#top_mar = 0.95
#bot_mar = 0.15
#left_mar = 0.10
#right_mar = 0.95
fig = Figure(figsize=(pagewidth,pagelength))
#pp = PdfPages(outputpdf)
# Create a canvas and add the figure to it.
#canvas = FigureCanvas(fig)
FigureCanvas(fig)
#You can do margin adjustments in your main deck (no need to pass the margin variables)
#fig.subplots_adjust(bottom=bot_mar,top=top_mar,left=left_mar,right=right_mar)
return fig, axisartist
def setup_axes(ax, x_range, y_range, x_label, y_label, x_tickinterval = np.nan, y_tickinterval = np.nan, x_labelsize = 8, y_labelsize = 8):
#More on keyword arguments https://docs.python.org/release/1.5.1p1/tut/keywordArgs.html
ax.set_ylim(y_range)
ax.set_xlim(x_range)
# Set the X Axis label.
ax.axis["bottom"].label.set_text(x_label)
ax.axis["bottom"].label.set_weight('bold')
ax.axis["bottom"].label.set_pad(15) #specifies number of points between axis title and axis
ax.axis["bottom"].label.set_size(x_labelsize)
# Set the Y Axis label.
ax.axis["left"].label.set_text(y_label)
#ax.axis["left"].label.set_size(8)
ax.axis["left"].label.set_size(y_labelsize)
ax.axis["left"].label.set_weight('bold')
ax.axis["left"].label.set_pad(7)
#ax.tick_params(axis='both', which='major', labelsize=x_fontsize)
if np.isfinite(y_tickinterval):
ax.set_yticks(range(y_range[0],y_range[1],y_tickinterval))
if np.isfinite(x_tickinterval):
ax.set_xticks(range(x_range[0],x_range[1],x_tickinterval))
def make_png(outputpdf, resolution = 100):
from wand.image import Image
#For converting pdf to png (can't export natively in Matplotlib. Matplotlib can only export pngs, jpgs, etc. one image at a time
#for wand.image to work you need to install the ghostscript package (a la easy_install ghostscript)
outputpng = ''.join(outputpdf.split('.')[0:-1])+'.png'
with Image(filename=outputpdf,resolution = resolution) as img:
img.compression_quality = 99
img.save(filename=outputpng)
|
{
"content_hash": "b7205f32882c8c6937c332eb3a3f7a75",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 139,
"avg_line_length": 35.295081967213115,
"alnum_prop": 0.6400371574547143,
"repo_name": "Harefoot/TurboQUALTX",
"id": "7246d7826e8cb40e824b43dba637c1aee9f8047f",
"size": "4306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ET_Utils/Plot_Utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3656269"
},
{
"name": "Python",
"bytes": "35946"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from categoria.categoria_model import Categoria
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_required, permissions
from routes.categorias import edit
from routes.categorias.new import salvar
from tekton.gae.middleware.redirect import RedirectResponse
from tekton.router import to_path
@no_csrf
def index():
query = Categoria.query_ordenada_por_nome()
edit_path_base = to_path(edit)
deletar_path_base = to_path(deletar)
categorias = query.fetch()
for cat in categorias:
key = cat.key
key_id = key.id()
cat.edit_path = to_path(edit_path_base, key_id)
cat.deletar_path = to_path(deletar_path_base, key_id)
ctx = {'salvar_path': to_path(salvar),
'categorias': categorias}
return TemplateResponse(ctx, 'categorias/categorias_home.html')
def deletar(categoria_id):
key = ndb.Key(Categoria, int(categoria_id))
key.delete()
return RedirectResponse(index)
|
{
"content_hash": "00418aea7bde5711002aa9cab57b861b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 67,
"avg_line_length": 33.96969696969697,
"alnum_prop": 0.7314897413024085,
"repo_name": "SamaraCardoso27/eMakeup",
"id": "d3fa6614635f6d2a22482f4bd29dcd808ea072b0",
"size": "1145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/appengine/routes/categorias/home.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "CSS",
"bytes": "130870"
},
{
"name": "HTML",
"bytes": "137342"
},
{
"name": "JavaScript",
"bytes": "134976"
},
{
"name": "Python",
"bytes": "2581352"
},
{
"name": "Shell",
"bytes": "4663"
}
],
"symlink_target": ""
}
|
"""This function makes tests to the views"""
from unittest import TestCase
from django.http import HttpRequest
from django.test import Client
from app.views import index
class HomePageViewTest(TestCase):
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = index(request)
self.assertTrue(response.content.startswith(b'<!DOCTYPE html>'))
self.assertTrue(response.content.endswith(b'</html>'),
msg=str(response.content))
self.assertIn(b'<title>RD Emprende</title>', response.content)
class SignUpTest(TestCase):
def setUp(self):
"""initialize a client (meaning a browser) and visits the page"""
self.client = Client()
def test_access(self):
# create a get request
response = self.client.get('/signup/')
#check that the response is 220 ok
self.assertEqual(response.status_code, 200)
def test_dashboard_not_signed(self):
"""Test if the browser redirects the login_required protected views"""
views_url = ('/dashboard/',
'/accounts/picture/')
#create a get request
for view in views_url:
response = self.client.get(view)
#the user was not logged in, the user should be redirected
self.assertEqual(response.status_code, 302,
msg=str(response.request))
|
{
"content_hash": "60df8deb6f1568e3cf36608ad8af1cda",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 35.8,
"alnum_prop": 0.6319832402234636,
"repo_name": "xpostudio4/red-de-emprendimiento",
"id": "40cf6fa691c69ee94688ac2bb73c33a9ee0ef632",
"size": "1432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/institutions/tests/tests_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "108204"
},
{
"name": "HTML",
"bytes": "71892"
},
{
"name": "JavaScript",
"bytes": "48571"
},
{
"name": "Python",
"bytes": "62458"
}
],
"symlink_target": ""
}
|
"""Classes and functions for creating and comparing hash digests
Todo:
* Implement SHAKE and BLAKE
* Resist urge to call it "SHAKE'N BLAKE"
* Write better method for dealing with digest references that weren't
correctly provided.
"""
# ----------------------------Compatibility Imports----------------------------
from __future__ import print_function
from six.moves import range
import sys
import hashlib
if sys.version_info < (3, 6):
# noinspection PyUnresolvedReferences
import sha3
# -----------------------------------------------------------------------------
import os
import hmac
class Digest(object):
"""Class for determining what hash generation method to use based off a
provided reference digest.
Attributes:
reference_digest (str): Either a filename containing a generated hash
digest, or the actual hash digest itself
sha3 (bool): Designates whether SHA3 should be used over SHA2
"""
def __init__(self, reference_digest, sha3=False):
self.reference_digest = self.process_reference(reference_digest)
self.sha3 = sha3
@staticmethod
def process_reference(source):
"""Determines if source of digest is stored in a text file, or if it's a
string provided by user.
Args:
source (str): Filename or string containing a generated hash digest.
Returns:
str: Hash source digest stripped of leading and trailing whitespace.
"""
if os.path.isfile(source):
with open(source, 'r') as f:
return f.read().split(' ')[0]
else:
return source.strip()
@property
def hash_method(self):
"""str: Exact name of built-in hashlib method as a string."""
standard_hash_methods = {
32: 'md5', 40: 'sha1', 56: 'sha224', 64: 'sha256', 96: 'sha384',
128: 'sha512'
}
sha3_methods = {
56: 'sha3_224', 64: 'sha3_256', 96: 'sha3_384', 128: 'sha3_512'
}
family = standard_hash_methods if not self.sha3 else sha3_methods
digest_length = len(self.reference_digest)
try:
return family[digest_length]
except KeyError:
deviations = [(abs(x - digest_length), x) for x in family]
return family[min(deviations)[1]]
def generate_digest(filename, hash_method):
"""
Args:
filename (str): Filename of binary file.
hash_method (str): exact name of hashlib method used for digest
generation.
Returns:
str: Hash digest generated from binary file.
"""
# Buffer used read file into memory in smaller blocks
buffer_size = 65536
blocks = (os.path.getsize(filename) // buffer_size) + 1
hash_digest = getattr(hashlib, hash_method)()
with open(filename, 'rb') as f:
generator = (f.read(buffer_size) for _ in range(blocks))
for data in generator:
hash_digest.update(data)
return hash_digest.hexdigest()
def compare_digests(digest_1, digest_2):
"""
Args:
digest_1 (str):
digest_2 (str):
Returns:
bool: Result of comparison between digest_1 and digest_2
"""
return hmac.compare_digest(digest_1, digest_2)
if __name__ == '__main__':
pass
|
{
"content_hash": "eaa274fbb240f93ed91088ab52c30c06",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 80,
"avg_line_length": 27.42622950819672,
"alnum_prop": 0.588762701733413,
"repo_name": "jim-hart/sealant",
"id": "612282d9aa945663e4fa6127671a59703a268acd",
"size": "3346",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "sealant/hashchk/hashchk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35995"
}
],
"symlink_target": ""
}
|
"""
PySpark is the Python API for Spark.
Public classes:
- L{SparkContext<pyspark.context.SparkContext>}
Main entry point for Spark functionality.
- L{RDD<pyspark.rdd.RDD>}
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
- L{Broadcast<pyspark.broadcast.Broadcast>}
A broadcast variable that gets reused across tasks.
- L{Accumulator<pyspark.accumulators.Accumulator>}
An "add-only" shared variable that tasks can only add values to.
- L{SparkConf<pyspark.conf.SparkConf>}
For configuring Spark.
- L{SparkFiles<pyspark.files.SparkFiles>}
Access files shipped with jobs.
- L{StorageLevel<pyspark.storagelevel.StorageLevel>}
Finer-grained cache persistence levels.
Spark SQL:
- L{SQLContext<pyspark.sql.SQLContext>}
Main entry point for SQL functionality.
- L{SchemaRDD<pyspark.sql.SchemaRDD>}
A Resilient Distributed Dataset (RDD) with Schema information for the data contained. In
addition to normal RDD operations, SchemaRDDs also support SQL.
- L{Row<pyspark.sql.Row>}
A Row of data returned by a Spark SQL query.
Hive:
- L{HiveContext<pyspark.context.HiveContext>}
Main entry point for accessing data stored in Apache Hive..
"""
import sys
import os
sys.path.insert(0, os.path.join(os.environ["SPARK_HOME"], "python/lib/py4j-0.8.1-src.zip"))
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.sql import SQLContext
from pyspark.rdd import RDD
from pyspark.sql import SchemaRDD
from pyspark.sql import Row
from pyspark.files import SparkFiles
from pyspark.storagelevel import StorageLevel
__all__ = ["SparkConf", "SparkContext", "SQLContext", "RDD", "SchemaRDD", "SparkFiles", "StorageLevel", "Row"]
|
{
"content_hash": "93027569c77f45634e27a62700757e09",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 110,
"avg_line_length": 33.98076923076923,
"alnum_prop": 0.7408036219581211,
"repo_name": "zhangjunfang/eclipse-dir",
"id": "73fe7378ffa631b48d9322f2f0156491f584295e",
"size": "2552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spark/python/pyspark/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "341592"
},
{
"name": "Assembly",
"bytes": "11762"
},
{
"name": "CSS",
"bytes": "5838098"
},
{
"name": "ColdFusion",
"bytes": "998340"
},
{
"name": "Java",
"bytes": "11143836"
},
{
"name": "JavaScript",
"bytes": "37580201"
},
{
"name": "Lasso",
"bytes": "140040"
},
{
"name": "PHP",
"bytes": "610596"
},
{
"name": "Perl",
"bytes": "280108"
},
{
"name": "Python",
"bytes": "389792"
},
{
"name": "Scala",
"bytes": "652118"
},
{
"name": "Shell",
"bytes": "18696"
},
{
"name": "Slash",
"bytes": "4374192"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import url, patterns, include, handler404, handler500
urlpatterns = patterns("dialogos.views",
url(r"^comment/(?P<comment_id>.+?)/edit/$", "edit_comment",
name="edit_comment"),
url(r"^comment/(?P<comment_id>.+?)/delete/$", "delete_comment",
name="delete_comment"),
url(r"^comment/(?P<content_type_id>.+?)/(?P<object_id>.+?)/$", "post_comment",
name="post_comment"),
)
|
{
"content_hash": "a786fde227549acefcdf9cad1e05d470",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 84,
"avg_line_length": 39.72727272727273,
"alnum_prop": 0.6178489702517163,
"repo_name": "georgedorn/dialogos",
"id": "a0849348e17f12022bc3ed3e28b096d7a23b381a",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dialogos/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20971"
}
],
"symlink_target": ""
}
|
import numpy as np
from astropy import units as u
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates.builtin_frames import FK4, Galactic
from astropy.table import Table
from astropy.time import Time
from astropy.utils.data import get_pkg_data_contents
# the number of tests to run
from . import N_ACCURACY_TESTS
TOLERANCE = 0.3 # arcseconds
def test_galactic_fk4():
lines = get_pkg_data_contents('data/galactic_fk4.csv').split('\n')
t = Table.read(lines, format='ascii', delimiter=',', guess=False)
if N_ACCURACY_TESTS >= len(t):
idxs = range(len(t))
else:
idxs = np.random.randint(len(t), size=N_ACCURACY_TESTS)
diffarcsec1 = []
diffarcsec2 = []
for i in idxs:
# Extract row
r = t[int(i)] # int here is to get around a py 3.x astropy.table bug
# Galactic to FK4
c1 = Galactic(l=r['lon_in']*u.deg, b=r['lat_in']*u.deg)
c2 = c1.transform_to(FK4(equinox=Time(r['equinox_fk4'])))
# Find difference
diff = angular_separation(c2.ra.radian, c2.dec.radian,
np.radians(r['ra_fk4']),
np.radians(r['dec_fk4']))
diffarcsec1.append(np.degrees(diff) * 3600.)
# FK4 to Galactic
c1 = FK4(ra=r['lon_in']*u.deg, dec=r['lat_in']*u.deg,
obstime=Time(r['obstime']),
equinox=Time(r['equinox_fk4']))
c2 = c1.transform_to(Galactic())
# Find difference
diff = angular_separation(c2.l.radian, c2.b.radian,
np.radians(r['lon_gal']),
np.radians(r['lat_gal']))
diffarcsec2.append(np.degrees(diff) * 3600.)
np.testing.assert_array_less(diffarcsec1, TOLERANCE)
np.testing.assert_array_less(diffarcsec2, TOLERANCE)
|
{
"content_hash": "878ddc9540b5b2fec61570dda0a105da",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 33.482142857142854,
"alnum_prop": 0.5914666666666667,
"repo_name": "larrybradley/astropy",
"id": "3e4733f438f7c33fa204173a0f3f956b08f26b8d",
"size": "1941",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/coordinates/tests/accuracy/test_galactic_fk4.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12335716"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
"""
Class for polling Ceilometer
This class provides means to requests for authentication tokens to be used with OpenStack's Ceilometer, Nova and RabbitMQ
"""
############# NOTICE ######################
# ProZaC is a fork of ZabbixCeilometer-Proxy (aka ZCP),
# which is Copyright of OneSource Consultoria Informatica (http://www.onesource.pt).
# For further information about ZCP, check its github :
# https://github.com/clmarques/ZabbixCeilometer-Proxy
##########################################################
### ProZaC added functionalities (in this module) ########
#
# - support to token renewal : proxy restart is no longer needed each hour
# - support to logging
#
### --------------------------- ##########################
__copyright__ = "Istituto Nazionale di Fisica Nucleare (INFN)"
__license__ = "Apache 2"
__contact__ = "emidio.giorgio@ct.infn.it"
__date__ = "15/11/2014"
__version__ = "0.9"
import struct
import urllib2
import json
import socket
import time
from threading import Timer
class CeilometerHandler:
def __init__(self, ceilometer_api_port, polling_interval, template_name, ceilometer_api_host, zabbix_host,
zabbix_port, zabbix_proxy_name, keystone_auth):
"""
TODO
:type self: object
"""
self.ceilometer_api_port = ceilometer_api_port
self.polling_interval = int(polling_interval)
self.template_name = template_name
self.ceilometer_api_host = ceilometer_api_host
self.zabbix_host = zabbix_host
self.zabbix_port = zabbix_port
self.zabbix_proxy_name = zabbix_proxy_name
self.keystone_auth = keystone_auth
#self.token = self.keystone_auth.getToken()
full_token= self.keystone_auth.getTokenV2()
self.token = full_token['id']
self.token_expires = full_token['expires']
self.logger=keystone_auth.logger
self.logger.info("Ceilometer handler initialized")
def run(self):
Timer(self.polling_interval, self.run, ()).start()
host_list = self.get_hosts_ID()
self.update_values(host_list)
def get_hosts_ID(self):
"""
Method used do query Zabbix API in order to fill an Array of hosts
:return: returns a array of servers and items to monitor by server
"""
data = {"request": "proxy config", "host": self.zabbix_proxy_name}
payload = self.set_proxy_header(data)
response = self.connect_zabbix(payload)
hosts_id = []
items = []
for line in response['hosts']['data']:
for line2 in response['items']['data']:
if line2[4] == line[0]:
items.append(line2[5])
hosts_id.append([line[0], line[1], items, line[7]])
items = []
return hosts_id
def update_values(self, hosts_id):
"""
TODO
:param hosts_id:
"""
self.check_token_lifetime(self.token_expires)
for host in hosts_id:
links = []
if not host[1] == self.template_name:
self.logger.info("Checking host %s" %(host[3]))
#Get links for instance compute metrics
request = urllib2.urlopen(urllib2.Request(
"http://" + self.ceilometer_api_host + ":" + self.ceilometer_api_port +
"/v2/resources?q.field=resource_id&q.value=" + host[1],
headers={"Accept": "application/json", "Content-Type": "application/json",
"X-Auth-Token": self.token})).read()
# Filter the links to an array
for line in json.loads(request):
for line2 in line['links']:
if line2['rel'] in ('cpu', 'cpu_util', 'disk.read.bytes', 'disk.read.requests',
'disk.write.bytes', 'disk.write.requests'):
links.append(line2)
# Get the links regarding network metrics
request = urllib2.urlopen(urllib2.Request(
"http://" + self.ceilometer_api_host + ":" + self.ceilometer_api_port +
"/v2/resources?q.field=metadata.instance_id&q.value=" + host[1],
headers={"Accept": "application/json","Content-Type": "application/json",
"X-Auth-Token": self.token})).read()
# Add more links to the array
for line in json.loads(request):
for line2 in line['links']:
if line2['rel'] in ('network.incoming.bytes', 'network.incoming.packets', 'network.outgoing.bytes.rate', 'network.incoming.bytes.rate',
'network.outgoing.bytes', 'network.outgoing.packets'):
links.append(line2)
# Query ceilometer API using the array of links
for line in links:
self.query_ceilometer(host[1], line['rel'], line['href'])
self.logger.debug (" - Item %s" %(line['rel']))
def query_ceilometer(self, resource_id, item_key, link):
"""
TODO
:param resource_id:
:param item_key:
:param link:
"""
self.check_token_lifetime(self.token_expires)
try:
global contents
contents = urllib2.urlopen(urllib2.Request(link + str("&limit=1"),
headers={"Accept": "application/json",
"Content-Type": "application/json",
"X-Auth-Token": self.token})).read()
except urllib2.HTTPError, e:
if e.code == 401:
self.logger.error("Error 401...Token refused! Please check your credentials")
elif e.code == 404:
self.logger.error("%s not found" %(link))
elif e.code == 503:
self.logger.error("service %s unavailable" %(link))
else:
self.logger.error("unknown error opening %s " %(link))
response = json.loads(contents)
try:
counter_volume = response[0]['counter_volume']
self.send_data_zabbix(counter_volume, resource_id, item_key)
except:
pass
def connect_zabbix(self, payload):
"""
Method used to send information to Zabbix
:param payload: refers to the json message prepared to send to Zabbix
:rtype : returns the response received by the Zabbix API
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.zabbix_host, int(self.zabbix_port)))
s.send(payload)
# read its response, the first five bytes are the header again
response_header = s.recv(5, socket.MSG_WAITALL)
if not response_header == 'ZBXD\1':
raise ValueError('Got invalid response')
# read the data header to get the length of the response
response_data_header = s.recv(8, socket.MSG_WAITALL)
response_data_header = response_data_header[:4]
response_len = struct.unpack('i', response_data_header)[0]
# read the whole rest of the response now that we know the length
response_raw = s.recv(response_len, socket.MSG_WAITALL)
s.close()
response = json.loads(response_raw)
return response
def set_proxy_header(self, data):
"""
Method used to simplify constructing the protocol to communicate with Zabbix
:param data: refers to the json message
:rtype : returns the message ready to send to Zabbix server with the right header
"""
data_length = len(data)
data_header = struct.pack('i', data_length) + '\0\0\0\0'
HEADER = '''ZBXD\1%s%s'''
data_to_send = HEADER % (data_header, data)
payload = json.dumps(data)
return payload
def send_data_zabbix(self, counter_volume, resource_id, item_key):
"""
Method used to prepare the body with data from Ceilometer and send it to Zabbix using connect_zabbix method
:param counter_volume: the actual measurement
:param resource_id: refers to the resource ID
:param item_key: refers to the item key
"""
tmp = json.dumps(counter_volume)
data = {"request": "history data", "host": self.zabbix_proxy_name,
"data": [{"host": resource_id,
"key": item_key,
"value": tmp}]}
payload = self.set_proxy_header(data)
self.connect_zabbix(payload)
def check_token_lifetime(self,expires_timestamp,threshold=300):
"""
check time (in seconds) left before token expiration
if time left is below threshold, provides token renewal
"""
now_timestamp_utc=time.time()+time.timezone
timeleft=expires_timestamp - now_timestamp_utc
if timeleft < threshold: # default, less than five minutes
full_token=self.keystone_auth.getTokenV2()
self.token=full_token['id']
self.token_expires=full_token['expires']
self.logger.info("ceilometer token has been renewed")
|
{
"content_hash": "f787f82fcbee6554a3044fc834053c87",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 159,
"avg_line_length": 39.66244725738397,
"alnum_prop": 0.5591489361702128,
"repo_name": "hocchudong/ZabbixCeilometer-Proxy",
"id": "25f1c637e67bd9bf8fcea51e00fb24744324dc34",
"size": "9400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "65720"
},
{
"name": "Shell",
"bytes": "676"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import os
import shutil
import sys
from commitsan.git import (REPOS_PATH, CalledProcessError,
git_cmd, git_revlist, mkdir_p)
from commitsan.worker import job
from commitsan.checks import check_all
def output(*args, **kwargs):
kwargs.setdefault('file', sys.stderr)
print(*args, **kwargs)
@job()
def update_repo(repo, clone_url):
try:
out = git_cmd(repo, ['remote', 'update'])
except (OSError, CalledProcessError):
repo_path = os.path.join(REPOS_PATH, repo)
shutil.rmtree(repo_path, ignore_errors=True)
mkdir_p(repo_path)
out = git_cmd(repo, ['clone', '--mirror', clone_url, '.'],
no_git_dir=True)
output(out)
@job()
def process_commit_range(repo, *commits):
for commit in git_revlist(repo, *commits):
check_all(repo, commit)
|
{
"content_hash": "90b2070bb363347983f5b7e84d3b0eb4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 66,
"avg_line_length": 25.973684210526315,
"alnum_prop": 0.6220871327254306,
"repo_name": "abusalimov/commitsan",
"id": "6112fd906ab8bdf14a1350997500d66b125a7688",
"size": "987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commitsan/repos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26515"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from indico.core.db.sqlalchemy import db
from indico.modules.events.models.persons import PersonLinkBase
from indico.util.string import format_repr, return_ascii
class SessionBlockPersonLink(PersonLinkBase):
"""Association between EventPerson and SessionBlock.
Also known as a 'session convener'
"""
__tablename__ = 'session_block_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'session_block_links'
person_link_unique_columns = ('session_block_id',)
object_relationship_name = 'session_block'
session_block_id = db.Column(
db.Integer,
db.ForeignKey('events.session_blocks.id'),
index=True,
nullable=False
)
# relationship backrefs:
# - session_block (SessionBlock.person_links)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'person_id', 'session_block_id', _text=self.full_name)
|
{
"content_hash": "8cc0db09a33ff0f041281a244707e6dc",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 93,
"avg_line_length": 30.5625,
"alnum_prop": 0.6840490797546013,
"repo_name": "mic4ael/indico",
"id": "63224c585bd113de16a787135a50908bac51d3e6",
"size": "1192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/sessions/models/persons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553825"
},
{
"name": "HTML",
"bytes": "1375160"
},
{
"name": "JavaScript",
"bytes": "1852830"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4612709"
},
{
"name": "Shell",
"bytes": "2665"
},
{
"name": "TeX",
"bytes": "23292"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
"""Fix incompatible imports and module references."""
# Authors: Collin Winter, Nick Edds
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {'StringIO': 'io',
'cStringIO': 'io',
'cPickle': 'pickle',
'__builtin__' : 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'FileDialog': 'tkinter.filedialog',
'tkFileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
# anydbm and whichdb are handled by fix_imports2
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs' : 'html.entities',
'HTMLParser' : 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
#'test.test_support': 'test.support',
'commands': 'subprocess',
'UserString' : 'collections',
'UserList' : 'collections',
'urlparse' : 'urllib.parse',
'robotparser' : 'urllib.robotparser',
}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern(mapping=MAPPING):
mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
bare_names = alternates(mapping.keys())
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (mod_list, mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (mod_list, mod_list)
# Find usages of module members in code e.g. thread.foo(bar)
yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
class FixImports(fixer_base.BaseFix):
# This is overridden in fix_imports2.
mapping = MAPPING
# We want to run this fixer late, so fix_import doesn't try to make stdlib
# renames into relative imports.
run_order = 6
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def compile_pattern(self):
# We override this, so MAPPING can be pragmatically altered and the
# changes will be reflected in PATTERN.
self.PATTERN = self.build_pattern()
super(FixImports, self).compile_pattern()
# Don't match the node if it's within another match.
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
# Module usage could be in the trailer of an attribute lookup, so we
# might have nested matches when "bare_with_attr" is present.
if "bare_with_attr" not in results and \
any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod:
mod_name = import_mod.value
new_name = unicode(self.mapping[mod_name])
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if "name_import" in results:
# If it's not a "from x import x, y" or "import x as y" import,
# marked its usage to be replaced.
self.replace[mod_name] = new_name
if "multiple_imports" in results:
# This is a nasty hack to fix multiple imports on a line (e.g.,
# "import StringIO, urlparse"). The problem is that I can't
# figure out an easy way to make a pattern recognize the keys of
# MAPPING randomly sprinkled in an import statement.
results = self.match(node)
if results:
self.transform(node, results)
else:
# Replace usage of the module.
bare_name = results["bare_with_attr"][0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.prefix))
|
{
"content_hash": "4856ceda518e86bffcd7a59019089cc1",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 80,
"avg_line_length": 39.44755244755245,
"alnum_prop": 0.5525616025527389,
"repo_name": "kpkhxlgy0/SublimeText3",
"id": "e1ad667f7bfab9f72e5d2a2f925fe4612e9f87c0",
"size": "5641",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Packages/Python PEP8 Autoformat/libs/py26/lib2to3/fixes/fix_imports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "1180"
},
{
"name": "Go",
"bytes": "366735"
},
{
"name": "JavaScript",
"bytes": "7834"
},
{
"name": "Lua",
"bytes": "277100"
},
{
"name": "Python",
"bytes": "5872395"
}
],
"symlink_target": ""
}
|
import types
import sys
import traceback
from odict import odict
from node.interfaces import IRoot
from zope.interface import implementer
from zope.component import (
getUtility,
queryUtility,
getUtilitiesFor,
provideUtility,
)
from zope.component.interfaces import ComponentLookupError
from agx.core.interfaces import (
IConfLoader,
IController,
IProcessor,
ITransform,
IGenerator,
ITargetHandler,
IDispatcher,
IHandler,
IScope,
IToken,
)
from agx.core.util import (
readsourcepath,
writesourcepath,
write_source_to_target_mapping,
)
@implementer(IController)
class Controller(object):
"""AGX standalone main controller.
"""
def __call__(self, sourcepath, targetpath):
confloader = getUtility(IConfLoader)
confloader()
source = None
target = None
for name in confloader.transforms:
transform = getUtility(ITransform, name=name)
source = transform.source(sourcepath)
if source is None:
# case continuation, expects None from transform.source
source = target
target = transform.target(targetpath)
processor = Processor(name)
target = processor(source, target)
target()
return target
@implementer(IProcessor)
class Processor(object):
"""Default processor.
"""
def __init__(self, transform):
"""@param transform: The transform name
"""
self.transform = transform
def __call__(self, source, target):
generators = self.lookup_generators()
targethandler = None
for generator in generators:
targethandler = getUtility(ITargetHandler, name=generator.name)
targethandler.anchor = None
targethandler.__init__(target)
generator(source, targethandler)
if targethandler is None:
# no generators registered:
return target
return targethandler.anchor.root
def lookup_generators(self):
generators = list()
for genname, generator in getUtilitiesFor(IGenerator):
transformname = genname[:genname.find('.')]
if transformname == self.transform:
generators.append(generator)
generators = self._sortgenerators(generators)
return generators
def _sortgenerators_j(self, generators):
# jensens flavor of a valid dependency chain sorter
# it breaks the test, nevertheless its output is valid
lookup = odict([(g.name[g.name.find('.')+1:], g) for g in generators])
inkeys = lookup.keys()
outkeys = ['NO']
while len(inkeys) != 0:
iterkeys = [k for k in inkeys]
for inkey in iterkeys:
if lookup[inkey].depends not in outkeys:
continue
outkeys.insert(outkeys.index(lookup[inkey].depends)+1, inkey)
inkeys.remove(inkey)
if len(iterkeys) == len(inkeys):
raise ValueError, 'Broken dependency chain.'
return [lookup[key] for key in outkeys[1:]]
def _sortgenerators_r(self, generators):
dtree = {'NO': ([], {})}
self._makedtree(generators, dtree)
sortedgen = list()
self._fillsorted(sortedgen, dtree)
return sortedgen
_sortgenerators = _sortgenerators_r
def _fillsorted(self, sortedgen, dtree):
"""Flatten dependency tree.
"""
for key in sorted(dtree.keys()):
for gen in dtree[key][0]:
sortedgen.append(gen)
self._fillsorted(sortedgen, dtree[key][1])
def _makedtree(self, generators, dtree):
"""Sort list of generators by generator.dependency.
"""
children = list()
for generator in generators:
if not generator.depends in dtree.keys():
children.append(generator)
continue
genname = generator.name[generator.name.find('.') + 1:]
if dtree[generator.depends][1].get(genname, None) is None:
dtree[generator.depends][1][genname] = ([], {})
for gen in dtree[generator.depends][0]:
if gen.name == generator.name:
continue
dtree[generator.depends][0].append(generator)
for child in dtree.values():
self._makedtree(children, child[1])
def _printdtree(self, dtree, indent=0):
"""Debug function.
"""
keys = dtree.keys()
keys.sort()
for key in keys:
if dtree[key][1].keys():
print indent * ' ' + '%s dependencies:' % key
for gen in dtree[key][0]:
print (indent) * ' ' + ' - %s' % gen.name
self._printdtree(dtree[key][1], indent + 4)
@implementer(IGenerator)
class Generator(object):
"""Default Generator.
"""
def __init__(self, name, depends, description=u''):
self.name = name
self.depends = depends
self.description = description
self.backup = False
def __call__(self, source, target):
self.source = source
self.target = target
self._dispatch([source])
def _dispatch(self, children):
dispatcher = getUtility(IDispatcher, name=self.name)
for child in children:
self.target(child)
dispatcher(child, self.target)
self._dispatch([node for name, node in child.items()])
@implementer(ITargetHandler)
class TargetHandler(object):
"""Abstract target handler.
"""
anchor = None
def __init__(self, root):
self.target = root
if self.anchor is None:
self.anchor = root
def __call__(self, source):
raise NotImplementedError(u"Abstract target handler does not "
"implement ``__call__``.")
def setanchor(self, path):
node = self.target
self._setanchor([node], path)
def _setanchor(self, children, path):
name = path[0]
for child in children:
if name == child.__name__:
if len(path) > 1:
self._setanchor(child.values(), path[1:])
else:
self.anchor = child
return
raise KeyError(u"Target node does not exist.")
class NullTargetHandler(TargetHandler):
"""A target handler which does nothing.
Used as default target handler if no one is defined for a generator.
"""
def __call__(self, source):
pass
class TreeSyncPreperator(TargetHandler):
"""Sync anchor by sourcepath.
"""
def __call__(self, source):
if len(source.path) <= len(readsourcepath(self.anchor)):
elem = self.anchor
while len(readsourcepath(elem)) >= len(source.path):
elem = elem.__parent__
self.anchor = elem
def finalize(self, source, target, set_anchor=True):
writesourcepath(source, target)
write_source_to_target_mapping(source, target)
if set_anchor:
self.anchor = target
@implementer(IScope)
class Scope(object):
"""Scope mapping against interfaces.
"""
def __init__(self, name, interfaces):
if not type(interfaces) == types.ListType:
interfaces = [interfaces]
self.name = name
self.interfaces = interfaces
def __call__(self, node):
for iface in self.interfaces:
if iface is None:
raise ValueError('The Scope "%s" contains a None-Interface' % (self.name))
if iface.providedBy(node):
return True
return False
@implementer(IDispatcher)
class Dispatcher(object):
"""Default dispatcher.
"""
def __init__(self, generator):
self.generator = self.name = generator
self.transform = generator[:generator.find('.')]
def __call__(self, source, targethandler):
handlers = self.lookup_handlers()
for handler in handlers:
if handler.scope:
scopename = '%s.%s' % (self.transform, handler.scope)
scope = queryUtility(IScope, name=scopename)
if scope is None:
func=handler._callfunc
dottedpack=func.func_globals['__package__']
print >> sys.stderr, ValueError('No Scope defined with name %s for handler %s, defined in %s' % (scopename, func.__name__, dottedpack))
continue
if not scope(source):
continue
handler(source, targethandler)
def lookup_handlers(self):
handlers = getUtilitiesFor(IHandler)
handlers = [util for name, util in handlers]
unordered = list()
for handler in handlers:
if handler.order == -1:
unordered.append(handler)
handlers = [handler for handler in handlers if handler.order > -1]
handlers.sort(lambda x, y: x.order < y.order and -1 or 1)
handlers = handlers + unordered
handlers = [handler for handler in handlers \
if handler.name.startswith(self.name)]
return handlers
@implementer(IHandler)
class Handler(object):
"""Base handler, can be registered by ``@handler`` decorator.
"""
def __init__(self, name, scope, order):
self.name = name
self.scope = scope
self.order = order
self._callfunc = None
def __call__(self, source, target):
try:
self._callfunc(self, source, target)
except:
#check if the handler belongs to a bleeding edge gen package
#if yes, print the exception and continue
#to define a generator package as bleeding edge,
#set _bleeding_edge_=True in __init__.py
func=self._callfunc
dottedpack=func.func_globals['__package__']
#initpy=__import__(dottedpack)
#the above stmt should return the generator package, but it doesnt
#therefore we do it another way by iterating down the path:
packnames=dottedpack.split('.')
pack=__import__(packnames[0])
for packname in packnames[1:]:
pack=getattr(pack,packname)
if getattr(pack,'_bleeding_edge_',None):
print >> sys.stderr,'Error in ', dottedpack
print >> sys.stderr,'===================================='
print >> sys.stderr,'Generator execution continues, '+\
'since this generator is marked as bleeding edge'
traceback.print_exc(None)
else:
#the generator is not bleeding edge, so lets stop the chain
raise
def token(name, create, reset=False, **kw):
"""Create or lookup a token by name.
"""
if type(name) is types.ListType:
name = '.'.join(name)
kw['name'] = name
try:
token = getUtility(IToken, name=name)
if reset:
token.__init__(**kw)
for k in kw:
if not hasattr(token, k): setattr(token, k, kw[k])
except ComponentLookupError, e:
if not create:
raise e
token = Token(**kw)
provideUtility(token, provides=IToken, name=name)
return token
@implementer(IToken)
class Token(object):
"""A token.
"""
def __init__(self, **kw):
self.__dict__.update(kw)
|
{
"content_hash": "3503f288d36c088c4a3a35c9e353ce64",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 155,
"avg_line_length": 31.88736263736264,
"alnum_prop": 0.5700008615490653,
"repo_name": "bluedynamics/agx.core",
"id": "002d222728126cecbe7d6050e7741df3d8130615",
"size": "11607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/agx/core/_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "48016"
}
],
"symlink_target": ""
}
|
"""Demo platform that has two fake binary sensors."""
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.helpers.entity import DeviceInfo
from . import DOMAIN
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Demo binary sensor platform."""
async_add_entities(
[
DemoBinarySensor(
"binary_1",
"Basement Floor Wet",
False,
BinarySensorDeviceClass.MOISTURE,
),
DemoBinarySensor(
"binary_2", "Movement Backyard", True, BinarySensorDeviceClass.MOTION
),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoBinarySensor(BinarySensorEntity):
"""representation of a Demo binary sensor."""
def __init__(
self,
unique_id: str,
name: str,
state: bool,
device_class: BinarySensorDeviceClass,
) -> None:
"""Initialize the demo sensor."""
self._unique_id = unique_id
self._name = name
self._state = state
self._sensor_type = device_class
@property
def device_info(self) -> DeviceInfo:
"""Return device info."""
return DeviceInfo(
identifiers={
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self.unique_id)
},
name=self.name,
)
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def device_class(self) -> BinarySensorDeviceClass:
"""Return the class of this sensor."""
return self._sensor_type
@property
def should_poll(self):
"""No polling needed for a demo binary sensor."""
return False
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
|
{
"content_hash": "fd8f6ecacf376a308a49afc9549d886c",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 86,
"avg_line_length": 27.301204819277107,
"alnum_prop": 0.5856134157105031,
"repo_name": "home-assistant/home-assistant",
"id": "8710308fc678e913a2ccd8abd77d881c53823fcb",
"size": "2266",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/demo/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
import time
import json
import socket
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, FileModifiedEvent
with open('config.json') as json_file:
config = json.load(json_file)
client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_sock.connect((config['ip'], config['port']))
connection_msg = '< ' + config['name'] + ' connected !*{&c82-492832> '
client_sock.sendall(str.encode(connection_msg))
chat = []
options = []
my_messages = [connection_msg]
template_reset = ''
users = []
user_str = ''
with open ('template.txt') as file:
template_reset = file.read()
with open('chat.txt', 'w') as file:
file.write('<! You logged in as ' + config['name'] + ' !>\n')
file.write(template_reset)
class MyHandler(FileSystemEventHandler):
event = FileModifiedEvent('chat.txt')
def on_modified(self, event):
with open('chat.txt') as file:
lines = file.readlines()
# name_str = lines[1][12:].rstrip()
name_str = config['name']
try:
text_len = 1
while True:
if lines[len(lines)-(4 + text_len)] != '--------------------------\n':
text_len += 1
else:
break
text_str = ''
for text in lines[len(lines)-(3 + text_len):len(lines)-3]:
text_str += '\t\t\t\t\t\t\t ' + text
text_str = text_str.strip()
options = lines[len(lines)-2].strip()
except IndexError:
text_str = ''
options = ''
if ('disconnect' in options or 'выход' in options ) and my_messages[-1] != '< ' + config['name'] + ' disconected !*{&dc82-492832>' :
disconnection_msg = '< ' + config['name'] + ' disconected !*{&dc82-492832>'
client_sock.sendall(str.encode(disconnection_msg))
my_messages.append(disconnection_msg)
elif text_str != my_messages[-1] and name_str != '' and text_str != '' and my_messages[-1] != '< ' + config['name'] + ' disconected !*{&dc82-492832>' :
if len(name_str) <= 5:
client_sock.sendall(str.encode(name_str + '\t\t\t\t > ' + text_str))
elif 5 < len(name_str) <= 8:
client_sock.sendall(str.encode(name_str + '\t\t\t > ' + text_str))
elif len(name_str) > 8:
point = len(name_str) - 8
client_sock.sendall(str.encode(name_str[:-point] + '...\t > ' + text_str))
my_messages.append(text_str)
if __name__ == "__main__":
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path='.', recursive=False)
observer.start()
try:
while True:
time.sleep(1)
if my_messages[-1] == '< ' + config['name'] + ' disconected !*{&dc82-492832>':
raise KeyboardInterrupt
data = client_sock.recv(1024)
if not data:
raise KeyboardInterrupt
# print('server.log > ', data.decode("utf-8"))
if '!*{&u82-492832>' not in data.decode("utf-8"):
chat.append(data.decode("utf-8"))
else:
user_str = data.decode("utf-8")[:-15]
print("\a")
with open('chat.txt', 'w') as file:
file.write('<! You logged in as ' + config['name'] + ' !>\n\n')
file.write('< tChat >\n')
file.write('--------------------------\n')
for msg in chat:
if '!*{&c82-492832>' not in msg and '!*{&dc82-492832>' not in msg and 'shared !*{&file82-492832>' not in msg and len(msg) <= 2048:
if msg != chat[-1]:
if len(config['name']) <= 4:
file.write(msg.replace(config['name']+'\t', 'You\t\t', 1) + '\n\n')
elif 4 < len(config['name']) <= 8:
file.write(msg.replace(config['name']+'\t', 'You\t\t\t', 1) + '\n\n')
elif len(config['name']) > 8:
point = len(config['name']) - 8
file.write(msg.replace(config['name'][:-point]+'...\t', 'You\t\t\t\t\t', 1) + '\n\n')
else:
if len(config['name']) <= 4:
file.write(msg.replace(config['name']+'\t', 'You\t\t', 1) + '\n')
elif 4 < len(config['name']) <= 8:
file.write(msg.replace(config['name']+'\t', 'You\t\t\t', 1) + '\n')
elif len(config['name']) > 8:
point = len(config['name']) - 8
file.write(msg.replace(config['name'][:-point]+'...\t', 'You\t\t\t\t\t', 1) + '\n')
file.write(template_reset)
if user_str:
file.write(user_str + '\n')
except KeyboardInterrupt:
disconnection_msg = '< ' + config['name'] + ' disconected !*{&dc82-492832>'
client_sock.sendall(str.encode(disconnection_msg))
my_messages.append(disconnection_msg)
client_sock.close()
with open('chat.txt', 'w') as file:
file.write('')
observer.stop()
observer.join()
|
{
"content_hash": "5624431a4d28d19f31fda0c1e8fd508a",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 163,
"avg_line_length": 41.45112781954887,
"alnum_prop": 0.47088699437692727,
"repo_name": "FarScripter/tChat",
"id": "c9fa31c972146a1a00e0c435ceb8af55e195f785",
"size": "5518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "last_v/client/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9516"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from future.utils import viewitems, viewkeys
def recurrent_net(
net, cell_net, inputs, initial_cell_inputs,
links, timestep=None, scope=None, outputs_with_grads=(0,),
recompute_blobs_on_backward=None, forward_only=False,
):
'''
net: the main net operator should be added to
cell_net: cell_net which is executed in a recurrent fasion
inputs: sequences to be fed into the recurrent net. Currently only one input
is supported. It has to be in a format T x N x (D1...Dk) where T is lengths
of the sequence. N is a batch size and (D1...Dk) are the rest of dimentions
initial_cell_inputs: inputs of the cell_net for the 0 timestamp.
Format for each input is:
(cell_net_input_name, external_blob_with_data)
links: a dictionary from cell_net input names in moment t+1 and
output names of moment t. Currently we assume that each output becomes
an input for the next timestep.
timestep: name of the timestep blob to be used. If not provided "timestep"
is used.
scope: Internal blobs are going to be scoped in a format
<scope_name>/<blob_name>
If not provided we generate a scope name automatically
outputs_with_grads : position indices of output blobs which will receive
error gradient (from outside recurrent network) during backpropagation
recompute_blobs_on_backward: specify a list of blobs that will be
recomputed for backward pass, and thus need not to be
stored for each forward timestep.
forward_only: if True, only forward steps are executed
'''
assert len(inputs) == 1, "Only one input blob is supported so far"
input_blobs = [str(i[0]) for i in inputs]
initial_input_blobs = [str(x[1]) for x in initial_cell_inputs]
op_name = net.NextName('recurrent')
def s(name):
# We have to manually scope due to our internal/external blob
# relationships.
scope_name = op_name if scope is None else scope
return "{}/{}".format(str(scope_name), str(name))
# determine inputs that are considered to be references
# it is those that are not referred to in inputs or initial_cell_inputs
known_inputs = [str(b) for b in input_blobs + initial_input_blobs]
known_inputs += [str(x[0]) for x in initial_cell_inputs]
if timestep is not None:
known_inputs.append(str(timestep))
references = [
core.BlobReference(b) for b in cell_net.Proto().external_input
if b not in known_inputs]
inner_outputs = list(cell_net.Proto().external_output)
# These gradients are expected to be available during the backward pass
inner_outputs_map = {o: o + '_grad' for o in inner_outputs}
# compute the backward pass of the cell net
if not forward_only:
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
cell_net.Proto().op, inner_outputs_map)
backward_mapping = {str(k): v for k, v in viewitems(backward_mapping)}
backward_cell_net = core.Net("RecurrentBackwardStep")
del backward_cell_net.Proto().op[:]
if recompute_blobs_on_backward is not None:
# Insert operators to re-compute the specified blobs.
# They are added in the same order as for the forward pass, thus
# the order is correct.
recompute_blobs_on_backward = {str(b) for b in
recompute_blobs_on_backward}
for op in cell_net.Proto().op:
if not recompute_blobs_on_backward.isdisjoint(set(op.output)):
backward_cell_net.Proto().op.extend([op])
# This fires if other outputs than the declared
# are computed by the ops that are recomputed
assert set(op.output).issubset(recompute_blobs_on_backward)
backward_cell_net.Proto().op.extend(backward_ops)
# compute blobs used but not defined in the backward pass
backward_ssa, backward_blob_versions = core.get_ssa(
backward_cell_net.Proto())
undefined = core.get_undefined_blobs(backward_ssa)
# also add to the output list the intermediate outputs of fwd_step that
# are used by backward.
ssa, blob_versions = core.get_ssa(cell_net.Proto())
scratches = [
blob
for blob, ver in viewitems(blob_versions)
if (ver > 0 and
blob in undefined and
blob not in cell_net.Proto().external_output)
]
backward_cell_net.Proto().external_input.extend(scratches)
backward_cell_net.Proto().type = 'simple'
else:
backward_cell_net = None
all_inputs = [i[1] for i in inputs] + [
x[1] for x in initial_cell_inputs] + references
all_outputs = []
cell_net.Proto().type = 'simple'
# Internal arguments used by RecurrentNetwork operator
# Links are in the format blob_name, recurrent_states, offset.
# In the moment t we know that corresponding data block is at
# t + offset position in the recurrent_states tensor
forward_links = []
backward_links = []
# Aliases are used to expose outputs to external world
# Format (internal_blob, external_blob, offset)
# Negative offset stands for going from the end,
# positive - from the beginning
aliases = []
# States held inputs to the cell net
recurrent_states = []
for cell_input, _ in initial_cell_inputs:
cell_input = str(cell_input)
# Recurrent_states is going to be (T + 1) x ...
# It stores all inputs and outputs of the cell net over time.
# Or their gradients in the case of the backward pass.
state = s(cell_input + "_states")
states_grad = state + "_grad"
cell_output = links[str(cell_input)]
forward_links.append((cell_input, state, 0))
forward_links.append((cell_output, state, 1))
aliases.append((state, cell_output + "_all", 1))
aliases.append((state, cell_output + "_last", -1))
all_outputs.extend([cell_output + "_all", cell_output + "_last"])
recurrent_states.append(state)
if backward_cell_net is not None:
backward_links.append((cell_output + "_grad", states_grad, 1))
backward_cell_net.Proto().external_input.append(
str(cell_output) + "_grad")
recurrent_input_grad = cell_input + "_grad"
if not backward_blob_versions.get(recurrent_input_grad, 0):
# If nobody writes to this recurrent input gradient, we need
# to make sure it gets to the states grad blob after all.
# We do this by using backward_links which triggers an alias
# This logic is being used for example in a SumOp case
backward_links.append(
(backward_mapping[cell_input], states_grad, 0))
else:
backward_links.append((recurrent_input_grad, states_grad, 0))
for input_t, input_blob in inputs:
forward_links.append((str(input_t), str(input_blob), 0))
if backward_cell_net is not None:
for input_t, input_blob in inputs:
backward_links.append((
backward_mapping[str(input_t)], str(input_blob) + "_grad", 0
))
backward_cell_net.Proto().external_input.extend(
cell_net.Proto().external_input)
backward_cell_net.Proto().external_input.extend(
cell_net.Proto().external_output)
def unpack_triple(x):
if x:
a, b, c = zip(*x)
return a, b, c
return [], [], []
# Splitting to separate lists so we can pass them to c++
# where we ensemle them back
link_internal, link_external, link_offset = unpack_triple(forward_links)
alias_src, alias_dst, alias_offset = unpack_triple(aliases)
recurrent_inputs = [str(x[1]) for x in initial_cell_inputs]
# Make sure that recurrent gradients accumulate with internal gradients
# (if a blob in the backward_cell_net receives gradient from both an
# external connection as well as from within the backward_cell_net,
# those gradients need to be added together, rather than one overwriting
# the other)
if backward_cell_net is not None:
proto = backward_cell_net.Proto()
operators = []
while len(proto.op) > 0:
op = proto.op[-1]
proto.op.remove(op)
operators.append(op)
for op in operators[::-1]:
proto.op.extend([op])
for j, output_blob in enumerate(op.output):
if output_blob in proto.external_input:
# In place operation won't cause issues because it takes
# existing value of a blob into account
if output_blob in op.input:
continue
output_blob = core.BlobReference(output_blob)
accum_blob = output_blob + "_accum"
proto.op[-1].output[j] = str(accum_blob)
backward_cell_net.Sum(
[output_blob, accum_blob],
[output_blob],
)
def map_to_dual_list(m):
return [str(x) for x in list(m.keys())] + \
[str(x) for x in list(m.values())]
backward_args = {}
if backward_cell_net is not None:
backward_mapping_keys = set(viewkeys(backward_mapping))
backward_link_internal, backward_link_external, backward_link_offset = \
unpack_triple(backward_links)
params = [x for x in references if x in backward_mapping_keys]
param_grads = [
str(backward_mapping[x])
for x in references
if x in backward_mapping_keys
]
if recompute_blobs_on_backward is None:
recompute_blobs_on_backward = set()
backward_args = {
'param': [all_inputs.index(p) for p in params],
'backward_link_internal': [str(l) for l in backward_link_internal],
'backward_link_external': [str(l) for l in backward_link_external],
'backward_link_offset': backward_link_offset,
'outputs_with_grads': outputs_with_grads,
'recompute_blobs_on_backward': [
str(b) for b in recompute_blobs_on_backward
],
'param_grads': param_grads,
}
if len(backward_cell_net.Proto().op) != 0:
backward_args['backward_step_net'] = backward_cell_net.Proto()
results = net.RecurrentNetwork(
all_inputs,
all_outputs + [s("step_workspaces")],
alias_src=alias_src,
alias_dst=[str(a) for a in alias_dst],
alias_offset=alias_offset,
recurrent_states=recurrent_states,
initial_recurrent_state_ids=[
all_inputs.index(i) for i in recurrent_inputs
],
link_internal=[str(l) for l in link_internal],
link_external=[str(l) for l in link_external],
link_offset=link_offset,
enable_rnn_executor=1,
step_net=cell_net.Proto(),
timestep="timestep" if timestep is None else str(timestep),
**backward_args
)
# Restore net type since 'rnn' is not recognized outside RNNs
cell_net.Proto().type = 'simple'
# The last output is a list of step workspaces,
# which is only needed internally for gradient propogation
return results[:-1]
def set_rnn_executor_config(rnn_op, num_threads=None, max_cuda_streams=None):
from caffe2.proto import caffe2_pb2
assert rnn_op.type in {'RecurrentNetwork', 'RecurrentNetworkGradient'}
def add_arg(s, v):
a = caffe2_pb2.Argument()
a.name = "rnn_executor." + s
a.i = v
rnn_op.arg.extend([a])
if num_threads is not None:
add_arg('num_threads', num_threads)
if max_cuda_streams is not None:
add_arg('max_cuda_streams', max_cuda_streams)
def retrieve_step_blobs(net, prefix='rnn'):
'''
Retrieves blobs from step workspaces (which contain intermediate recurrent
network computation for each timestep) and puts them in the global
workspace. This allows access to the contents of this intermediate
computation in python. Returns the list of extracted blob names.
net: the net from which the step workspace blobs should be extracted
prefix: prefix to append to extracted blob names when placing them in the
global workspace
'''
count = 1
output_list = []
for op in net.Proto().op:
if op.type == "RecurrentNetwork":
blob_name = prefix + "_" + str(count)
count = count + 1
scratch_workspaces_blob_name = op.output[-1]
workspace.RunOperatorOnce(
core.CreateOperator(
"RecurrentNetworkBlobFetcher",
[scratch_workspaces_blob_name],
[blob_name],
prefix=prefix
)
)
output_list += workspace.FetchBlob(blob_name).tolist()
return output_list
|
{
"content_hash": "290e8f26ed7d3d960dce23c31898dbef",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 80,
"avg_line_length": 40.4441087613293,
"alnum_prop": 0.6136550384701576,
"repo_name": "xzturn/caffe2",
"id": "e5b48894efbce2f16fcc5d441862e2e2a49f31bc",
"size": "13442",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "caffe2/python/recurrent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3296"
},
{
"name": "C",
"bytes": "678918"
},
{
"name": "C++",
"bytes": "5480393"
},
{
"name": "CMake",
"bytes": "323261"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "2013333"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "15290"
},
{
"name": "Metal",
"bytes": "41257"
},
{
"name": "Objective-C",
"bytes": "4053"
},
{
"name": "Objective-C++",
"bytes": "249566"
},
{
"name": "Python",
"bytes": "3658352"
},
{
"name": "Shell",
"bytes": "65206"
}
],
"symlink_target": ""
}
|
import numpy as np
import astropy.units as u
import astropy.coordinates as coord
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose
__all__ = ["JhelumBonaca19"]
class JhelumBonaca19(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Jhelum stream, as described in
Bonaca et al. 2019.
For more information about this class, see the Astropy documentation
on coordinate frames in :mod:`~astropy.coordinates`.
Parameters
----------
representation : :class:`~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
phi1 : angle_like, optional, must be keyword
The longitude-like angle corresponding to GD-1's orbit.
phi2 : angle_like, optional, must be keyword
The latitude-like angle corresponding to GD-1's orbit.
distance : :class:`~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_phi1_cosphi2 : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in the longitude-like direction corresponding to
the GD-1 stream's orbit.
pm_phi2 : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in the latitude-like direction perpendicular to the
GD-1 stream's orbit.
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'phi1'),
coord.RepresentationMapping('lat', 'phi2'),
coord.RepresentationMapping('distance', 'distance')],
}
_default_wrap_angle = 180*u.deg
def __init__(self, *args, **kwargs):
wrap = kwargs.pop('wrap_longitude', True)
super().__init__(*args, **kwargs)
if wrap and isinstance(self._data, (coord.UnitSphericalRepresentation,
coord.SphericalRepresentation)):
self._data.lon.wrap_angle = self._default_wrap_angle
# TODO: remove this. This is a hack required as of astropy v3.1 in order
# to have the longitude components wrap at the desired angle
def represent_as(self, base, s='base', in_frame_units=False):
r = super().represent_as(base, s=s, in_frame_units=in_frame_units)
r.lon.wrap_angle = self._default_wrap_angle
return r
represent_as.__doc__ = coord.BaseCoordinateFrame.represent_as.__doc__
# Rotation matrix as defined in Bonaca+2019
R = np.array([[0.6173151074, -0.0093826715, -0.7866600433],
[-0.0151801852, -0.9998847743, 0.0000135163],
[-0.7865695266, 0.0119333013, -0.6173864075]])
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.ICRS,
JhelumBonaca19)
def icrs_to_jhelum():
""" Compute the transformation from Galactic spherical to
heliocentric Jhelum coordinates.
"""
return R
@frame_transform_graph.transform(coord.StaticMatrixTransform, JhelumBonaca19,
coord.ICRS)
def gd1_to_icrs():
""" Compute the transformation from heliocentric Jhelum coordinates to
spherical ICRS.
"""
return matrix_transpose(icrs_to_jhelum())
|
{
"content_hash": "92243c1b6e8829bb928bd7d813bbb73d",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 83,
"avg_line_length": 40.87640449438202,
"alnum_prop": 0.6767454645409565,
"repo_name": "adrn/gary",
"id": "f61bdc9a9c03faf79081d33864e6f6c89a26295d",
"size": "3652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gala/coordinates/jhelum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "61297"
},
{
"name": "C++",
"bytes": "7004"
},
{
"name": "Python",
"bytes": "523293"
}
],
"symlink_target": ""
}
|
from distutils.version import StrictVersion
def test_project_metadata():
import circle_asset.version as v
StrictVersion(v.VERSION)
|
{
"content_hash": "c74b308828b10c87bea7aecb5992113d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 43,
"avg_line_length": 28,
"alnum_prop": 0.7785714285714286,
"repo_name": "prophile/circle-asset",
"id": "766e708a007e67feb3f0d8504340c43a62f41954",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7203"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from allauth.account.signals import user_signed_up
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from crumpet.profiles import constants
from crumpet.profiles.fields import PriceField, PercentField, AmountField
from crumpet.profiles.models import UserAccount
@python_2_unicode_compatible
class TradingStrategyProfile(models.Model):
"""Base trading strategy configuration models class."""
name = models.CharField(max_length=100)
note = models.CharField(max_length=255, blank=True)
account = models.ForeignKey(UserAccount)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
@python_2_unicode_compatible
class FixedStrategyProfile(TradingStrategyProfile):
"""Configuration for fixed trading strategy."""
buy = PriceField()
sell = PriceField()
class Meta:
db_table = 'strategy_profile_fixed'
def __str__(self):
return 'fixed buy at ${buy}, sell at ${sell}'.format(
buy=self.buy, sell=self.sell)
@python_2_unicode_compatible
class RelativeStrategyProfile(TradingStrategyProfile):
"""Configuration for relative trading strategy."""
buy = PercentField()
sell = PercentField()
class Meta:
db_table = 'strategy_profile_relative'
def __str__(self):
return 'relative buy at {buy}%, sell at ${sell}%'.format(
buy=self.buy, sell=self.sell)
def save(self, *args, **kwargs):
# TODO: Check what the fees are for poloniex
min_fee = .2
# NEVER BUY OR SELL UNLESS THESE ASSERTS PASS
assert self.buy < 100 - min_fee
assert self.sell > 100 + min_fee
return super().save(*args, **kwargs)
class SimpleMovingAverage(models.Model):
period = models.CharField(max_length=100, default=25)
class Meta:
db_table = 'simple_moving_average_indicator'
def __str__(self):
return 'Simple moving average with a period of ${period}%'.format(
period=self.period)
class ExponentialMovingAverage(models.Model):
period = models.CharField(max_length=100, default=25)
class Meta:
db_table = 'simple_moving_average_indicator'
def __str__(self):
return 'Simple moving average with a period of ${period}%'.format(
period=self.period)
class IndicatorParameter(models.Model):
name = models.CharField(max_length=100)
parameter = AmountField()
class Strategy(TradingStrategyProfile):
indicator = models.ForeignKey(IndicatorParameter, related_name="indicator")
class Meta:
db_table = 'to_the_moon_strategy'
def __str__(self):
return 'A simple buy/sell strategy that uses SMA, EMA and Momentum Oscillation as its key indicators.'
###############################################################################
# Poloniex API-based models
# https://poloniex.com/support/api/
###############################################################################
class Ticker(models.Model):
"""
{
'BTC_BCN':
{
'id': 7,
'last': '0.00000157',
'lowestAsk': '0.00000157',
'highestBid': '0.00000156',
'percentChange': '0.34188034',
'baseVolume': '8431.59544575',
'quoteVolume': '5818665335.00883484',
'isFrozen': '0',
'high24hr': '0.00000173',
'low24hr': '0.00000111'
}
}
"""
symbol = models.CharField(max_length=30, verbose_name="Ticker Symbol")
ticker_id = models.CharField(max_length=20)
last = AmountField()
lowest_ask = AmountField()
highest_bid = AmountField()
percent_change = PercentField()
base_volume = AmountField()
quote_volume = AmountField()
is_frozen = models.BooleanField()
high_24_hour = AmountField()
low_24_hour = AmountField()
class Meta:
ordering = ['-percent_change']
get_latest_by = 'percent_change'
db_table = 'poloniex_ticker'
def __str__(self):
return 'last={last}, percent_change={percent_change}'.format(**self.__dict__)
class Balance(models.Model):
"""
{
"LTC": {
"available": "5.015",
"onOrders": "1.0025",
"btcValue": "0.078"
}
}
"""
created = models.DateTimeField(auto_now_add=True)
account = models.ForeignKey(UserAccount, related_name='balances')
# API fields
available = AmountField()
on_orders = AmountField()
btc_value = AmountField()
class Meta:
get_latest_by = 'created'
ordering = ['-created']
db_table = 'poloniex_balance'
# def __str__(self):
# return '{usd:0>6} US$ | {btc:0>10} BTC'.format(
# usd=self.usd_balance,
# btc=self.btc_balance
# )
"""
BUY/SELL ORDER
{
"orderNumber": 31226040,
"resultingTrades": [
{
"amount": "338.8732",
"date": "2014-10-18 23:03:21",
"rate": "0.00000173",
"total": "0.00058625",
"tradeID": "16164",
"type": "buy"
}
]
}
"""
class Order(models.Model):
type = models.IntegerField(
choices=constants.ORDER_TYPES,
max_length=255,
db_index=True
)
order_number = models.CharField(
null=True,
blank=True,
max_length=50,
verbose_name="Order Number"
)
amount = AmountField()
price = AmountField()
date = models.DateTimeField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
user_account = models.ForeignKey(UserAccount, related_name='orders')
ticker = models.CharField(verbose_name='Ticker Symbol', max_length=30)
status = models.CharField(
default=None,
choices=constants.ORDER_STATES,
max_length=255,
db_index=True
)
class Meta:
ordering = ['-date']
get_latest_by = 'date'
db_table = 'bot_order'
def __str__(self):
return '{type} {amount} BTC at {price} US$'.format(
type=self.get_type_display(),
amount=self.amount,
price=self.price
)
# class Trade(models.Model):
# total = AmountField()
# price = AmountField()
# amount = AmountField()
# type = models.IntegerField(
# choices=constants.ORDER_TYPES,
# max_length=255,
# db_index=True
# )
# date = models.DateTimeField()
# order = models.ForeignKey(Order, related_name='trades')
# trade_id = models.CharField(max_length=30)
# # Add category
#
# def __str__(self):
# return '{type} {amount} BTC at {price} US$'.format(
# type=self.get_type_display(),
# amount=self.amount,
# price=self.price
# )
def save_user_account_data(sender, request, user, **kwargs):
user_account = UserAccount(
user=user,
created=datetime.now()
)
user_account.save()
user_signed_up.connect(save_user_account_data)
|
{
"content_hash": "501b733a19617114a1bed0dc297d3771",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 110,
"avg_line_length": 27.614503816793892,
"alnum_prop": 0.5828610919143055,
"repo_name": "chrislombaard/cryptopuppet",
"id": "ee0bdd59ccd151f26e1f0e25d57505b13251c9af",
"size": "7235",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "crumpet/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20278"
},
{
"name": "HTML",
"bytes": "24665"
},
{
"name": "JavaScript",
"bytes": "769251"
},
{
"name": "Makefile",
"bytes": "1813"
},
{
"name": "Python",
"bytes": "58382"
}
],
"symlink_target": ""
}
|
import simplejson as json
import ijson
import requests
import copy
import os
import datetime
from PyQt4.QtCore import QObject, pyqtSignal
from path import Path
from ramed.app_logging import logger
from ramed.static import Constants
from ramed.tools.ramed_instance import RamedInstance
from ramed.tools.ramed_form_pdf_export import gen_pdf_export
requests.packages.urllib3.disable_warnings()
class RamedExporter(QObject):
check_started = pyqtSignal(name='checkStarted')
check_ended = pyqtSignal(bool, str, name='checkEnded')
parsing_started = pyqtSignal(name='parsingStarted')
# succeeded, nbInstances, errorMesage
parsing_ended = pyqtSignal(bool, int, str, name='parsingEnded')
export_started = pyqtSignal(name='exportStarted')
# succeeded, index, total
instance_completed = pyqtSignal(bool, int, name='instanceCompleted')
# ident-string, index
exporting_instance = pyqtSignal(str, int, name='exportingInstance')
# number of successful exports, number of errors
# export_ended = pyqtSignal(int, int, name='exportEnded')
# nb exports success, nb exports failed,
# nb medias success, nb medias failed
export_ended = pyqtSignal(int, int, int, int, name='exportEnded')
export_canceled = pyqtSignal(name='exportCanceled')
# error message
error_raised = pyqtSignal(str, name='ErrorRaised')
def __init__(self, main_window):
super(RamedExporter, self).__init__()
self.main_window = main_window
self.nb_instances = 0
self.cancel_requested = None
self.is_running = False
# connect signals
self.check_started.connect(main_window.check_started)
self.check_ended.connect(main_window.check_ended)
self.parsing_started.connect(main_window.parsing_started)
self.parsing_ended.connect(main_window.parsing_ended)
self.parsing_ended.connect(main_window.view_widget.parsing_ended)
self.export_started.connect(main_window.export_started)
self.exporting_instance.connect(main_window.exporting_instance)
self.exporting_instance.connect(
main_window.view_widget.exporting_instance)
self.instance_completed.connect(
main_window.view_widget.instance_completed)
self.export_ended.connect(main_window.export_ended)
self.export_ended.connect(main_window.view_widget.export_ended)
self.export_canceled.connect(main_window.view_widget.export_canceled)
self.export_canceled.connect(main_window.export_canceled)
self.error_raised.connect(main_window.export_error_raised)
def path_for(self, path):
return os.path.join(self.destination_folder, path)
def submission_filter(self, instance_dict):
try:
instance_id = instance_dict.get('instanceID') or None
instance_date = datetime.date(
*[int(x) for x in instance_dict.get('date').split('-')[:3]])
assert instance_id
assert instance_date >= self.from_date
assert instance_date <= self.to_date
return True
except:
return False
def check_aggregate_presence(self):
self.check_started.emit()
try:
req = requests.get(Constants.AGGREGATE_URL,
timeout=Constants.ODK_TIMEOUT)
assert req.status_code in (200, 201, 301)
success = True
error_message = ""
except (requests.exceptions.RequestException, Exception) as e:
error_message = repr(e)
success = False
finally:
self.check_ended.emit(success, error_message)
def parse(self, destination_folder, fname, from_date, to_date):
self.parsing_started.emit()
self.fname = fname
self.destination_folder = destination_folder
Path(destination_folder).makedirs_p()
self.from_date = from_date
self.to_date = to_date
nb_instances = 0
success = False
error_message = ""
try:
with open(self.fname, encoding="UTF-8", mode='r') as f:
items = ijson.items(f, 'item')
nb_instances = len(list(filter(self.submission_filter, items)))
except IOError:
error_message = "Impossible de lire le fichier."
except ValueError:
error_message = "Le fichier n'est pas un fichier JSON valide."
except Exception as e:
error_message = repr(e)
else:
success = True
finally:
self.nb_instances = nb_instances
self.parsing_ended.emit(success, nb_instances, error_message)
def start(self):
self.export_started.emit()
self.is_running = True
exported_instances = []
nb_instances_successful = 0
nb_instances_failed = 0
nb_medias_successful = 0
nb_medias_failed = 0
counter = 0
with open(self.fname, encoding="UTF-8", mode='r') as f:
for instance_dict in filter(self.submission_filter,
ijson.items(f, 'item')):
if self.cancel_requested:
break
# track progression over all
counter += 1
try:
instance = RamedInstance(instance_dict)
except:
# unable to parse instance. outch
nb_instances_failed += 1
continue
try:
medias = self.export_instance_medias(instance)
except:
# unable to guess how much succeeded/failed
medias = {}
finally:
nb_medias_successful += len(
[1 for m in medias.values()
if m.get('success') or False])
nb_medias_failed += len(medias) - nb_medias_successful
exported_instances.append(instance_dict)
if self.cancel_requested:
break
self.exporting_instance.emit(instance.ident, counter)
try:
self.export_single_instance(instance)
except:
raise
nb_instances_failed += 1
# don't fetch medias for failed exports
self.instance_completed.emit(True, counter)
continue
else:
nb_instances_successful += 1
if self.cancel_requested:
break
self.instance_completed.emit(True, counter)
if self.cancel_requested:
self.cleanup_canceled_export(exported_instances)
self.export_canceled.emit()
return
# copy JSON file to destination
fpath = os.path.join(self.destination_folder, "odk_data.json")
with open(fpath, encoding='UTF-8', mode='w') as f:
json.dump(exported_instances, f)
self.is_running = False
self.export_ended.emit(nb_instances_successful, nb_instances_failed,
nb_medias_successful, nb_medias_failed)
def export_single_instance(self, instance):
fname, fpath = gen_pdf_export(self.destination_folder, instance)
def export_instance_medias(self, instance):
medias = copy.deepcopy(instance.medias)
output_dir = os.path.join(self.destination_folder,
instance.folder_name)
Path(output_dir).makedirs_p()
for key, media in medias.items():
url = media.get('url').replace('http://aggregate.defaultdomain',
Constants.AGGREGATE_URL)
fname = "{key}_{fname}".format(key=key,
fname=media.get('filename'))
fpath = os.path.join(output_dir, fname)
try:
assert self.cancel_requested is not True
req = requests.get(url, timeout=Constants.ODK_TIMEOUT)
assert req.status_code == 200
with open(fpath, 'wb') as f:
f.write(req.content)
except (AssertionError, IOError, Exception,
requests.exceptions.RequestException, Exception) as ex:
logger.debug(repr(ex))
# logger.exception(ex)
success = False
else:
success = True
medias[key].update({'success': success})
return medias
def cancel(self):
self.cancel_requested = True
def cleanup_canceled_export(self, instances=[]):
logger.debug("cleanup_canceled_export")
# remove every instance's individual folder
for instance_dict in instances:
instance = RamedInstance(instance_dict)
Path(self.path_for(instance.folder_name)).rmtree_p()
# remove other static folders and files
Path(self.path_for("PDF")).rmtree_p()
Path(self.path_for('odk_data.json')).remove_p()
# try to remove destination folder (if empty)
Path(self.destination_folder).rmdir_p()
self.cancel_requested = None
self.is_running = False
|
{
"content_hash": "4fd2bc6a83a9074008a59feb135bd338",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 79,
"avg_line_length": 36.39688715953307,
"alnum_prop": 0.5839213170836006,
"repo_name": "yeleman/ramed-desktop",
"id": "2800973ac9afd9e82ebc3eff9258ac7564d9dfb6",
"size": "9433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ramed/tools/ramed_export.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "NSIS",
"bytes": "2739"
},
{
"name": "Python",
"bytes": "56448"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
from threading import Thread
import npyscreen
from docker.errors import DockerException
from npyscreen import notify_confirm
from vent.api.system import System
from vent.helpers.logs import Logger
from vent.helpers.meta import Containers
from vent.helpers.meta import Cpu
from vent.helpers.meta import DropLocation
from vent.helpers.meta import Gpu
from vent.helpers.meta import Timestamp
from vent.helpers.meta import Uptime
from vent.helpers.paths import PathDirs
from vent.menus.add import AddForm
from vent.menus.backup import BackupForm
from vent.menus.editor import EditorForm
from vent.menus.inventory_forms import InventoryToolsForm
from vent.menus.services import ServicesForm
from vent.menus.tools import ToolForm
class MainForm(npyscreen.FormBaseNewWithMenus):
""" Main information landing form for the Vent CLI """
@staticmethod
def exit(*args, **kwargs):
os.system('reset')
os.system('stty sane')
try:
sys.exit(0)
except SystemExit: # pragma: no cover
os._exit(0)
def while_waiting(self):
""" Update fields periodically if nothing is happening """
# give a little extra time for file descriptors to close
time.sleep(0.1)
self.addfield.value = Timestamp()
self.addfield.display()
self.addfield2.value = Uptime()
self.addfield2.display()
self.addfield3.value = str(len(Containers()))+' running'
if len(Containers()) > 0:
self.addfield3.labelColor = 'GOOD'
else:
self.addfield3.labelColor = 'DEFAULT'
self.addfield3.display()
# if file drop location changes deal with it
logger = Logger(__name__)
if self.file_drop.value != DropLocation()[1]:
logger.info('Starting: file drop restart')
try:
self.file_drop.value = DropLocation()[1]
logger.info('Path given: ' + str(self.file_drop.value))
except Exception as e: # pragma no cover
logger.error('file drop restart failed with error: ' + str(e))
logger.info('Finished: file drop restart')
self.file_drop.display()
return
def add_form(self, form, form_name, form_args):
""" Add new form and switch to it """
self.parentApp.addForm(form_name, form, **form_args)
self.parentApp.change_form(form_name)
return
def remove_forms(self, form_names):
""" Remove all forms supplied """
for form in form_names:
try:
self.parentApp.removeForm(form)
except Exception as e: # pragma: no cover
pass
return
def perform_action(self, action):
""" Perform actions in the api from the CLI """
form = ToolForm
s_action = form_action = action.split('_')[0]
form_name = s_action.title() + ' tools'
cores = False
a_type = 'containers'
forms = [action.upper() + 'TOOLS']
form_args = {'color': 'CONTROL',
'names': [s_action],
'name': form_name,
'action_dict': {'action_name': s_action,
'present_t': s_action + 'ing ' + a_type,
'past_t': s_action.title() + ' ' + a_type,
'action': form_action,
'type': a_type,
'cores': cores}}
# grammar rules
vowels = ['a', 'e', 'i', 'o', 'u']
# consonant-vowel-consonant ending
# Eg: stop -> stopping
if s_action[-1] not in vowels and \
s_action[-2] in vowels and \
s_action[-3] not in vowels:
form_args['action_dict']['present_t'] = s_action + \
s_action[-1] + 'ing ' + a_type
# word ends with a 'e'
# eg: remove -> removing
if s_action[-1] == 'e':
form_args['action_dict']['present_t'] = s_action[:-1] \
+ 'ing ' + a_type
if s_action == 'configure':
form_args['names'].pop()
form_args['names'].append('get_configure')
form_args['names'].append('save_configure')
form_args['names'].append('restart_tools')
if action == 'add':
form = AddForm
forms = ['ADD', 'ADDOPTIONS', 'CHOOSETOOLS']
form_args['name'] = 'Add plugins'
form_args['name'] += '\t'*6 + '^Q to quit'
elif action == 'inventory':
form = InventoryToolsForm
forms = ['INVENTORY']
form_args = {'color': 'STANDOUT', 'name': 'Inventory of tools'}
elif action == 'services':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': 'STANDOUT',
'name': 'Plugin Services',
'core': True}
elif action == 'services_external':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': 'STANDOUT',
'name': 'External Services',
'core': True,
'external': True}
form_args['name'] += '\t'*8 + '^T to toggle main'
if s_action in self.view_togglable:
form_args['name'] += '\t'*8 + '^V to toggle group view'
try:
self.remove_forms(forms)
thr = Thread(target=self.add_form, args=(),
kwargs={'form': form,
'form_name': forms[0],
'form_args': form_args})
thr.start()
while thr.is_alive():
npyscreen.notify('Please wait, loading form...',
title='Loading')
time.sleep(1)
except Exception as e: # pragma: no cover
pass
return
def switch_tutorial(self, action):
""" Tutorial forms """
if action == 'background':
self.parentApp.change_form('TUTORIALBACKGROUND')
elif action == 'terminology':
self.parentApp.change_form('TUTORIALTERMINOLOGY')
elif action == 'setup':
self.parentApp.change_form('TUTORIALGETTINGSETUP')
elif action == 'starting_tools':
self.parentApp.change_form('TUTORIALSTARTINGCORES')
elif action == 'adding_tools':
self.parentApp.change_form('TUTORIALADDINGPLUGINS')
elif action == 'adding_files':
self.parentApp.change_form('TUTORIALADDINGFILES')
elif action == 'basic_troubleshooting':
self.parentApp.change_form('TUTORIALTROUBLESHOOTING')
return
def system_commands(self, action):
""" Perform system commands """
if action == 'backup':
status = self.api_action.backup()
if status[0]:
notify_confirm('Vent backup successful')
else:
notify_confirm('Vent backup could not be completed')
elif action == 'start':
status = self.api_action.start()
if status[0]:
notify_confirm('System start complete. '
'Press OK.')
else:
notify_confirm(status[1])
elif action == 'stop':
status = self.api_action.stop()
if status[0]:
notify_confirm('System stop complete. '
'Press OK.')
else:
notify_confirm(status[1])
elif action == 'configure':
# TODO
form_args = {'name': 'Change vent configuration',
'get_configure': self.api_action.get_configure,
'save_configure': self.api_action.save_configure,
'restart_tools': self.api_action.restart_tools,
'vent_cfg': True}
add_kargs = {'form': EditorForm,
'form_name': 'CONFIGUREVENT',
'form_args': form_args}
self.add_form(**add_kargs)
elif action == 'reset':
okay = npyscreen.notify_ok_cancel(
"This factory reset will remove ALL of Vent's user data, "
'containers, and images. Are you sure?',
title='Confirm system command')
if okay:
status = self.api_action.reset()
if status[0]:
notify_confirm('Vent reset complete. '
'Press OK to exit Vent Manager console.')
else:
notify_confirm(status[1])
MainForm.exit()
elif action == 'gpu':
gpu = Gpu(pull=True)
if gpu[0]:
notify_confirm('GPU detection successful. '
'Found: ' + gpu[1])
else:
if gpu[1] == 'Unknown':
notify_confirm('Unable to detect GPUs, try `make gpu` '
'from the vent repository directory. '
'Error: ' + str(gpu[2]))
else:
notify_confirm('No GPUs detected.')
elif action == 'restore':
backup_dir_home = os.path.expanduser('~')
backup_dirs = [f for f in os.listdir(backup_dir_home) if
f.startswith('.vent-backup')]
form_args = {'restore': self.api_action.restore,
'dirs': backup_dirs,
'name': 'Pick a version to restore from' + '\t'*8 +
'^T to toggle main',
'color': 'CONTROL'}
add_kargs = {'form': BackupForm,
'form_name': 'CHOOSEBACKUP',
'form_args': form_args}
self.add_form(**add_kargs)
return
def create(self):
""" Override method for creating FormBaseNewWithMenu form """
try:
self.api_action = System()
except DockerException as de: # pragma: no cover
notify_confirm(str(de),
title='Docker Error',
form_color='DANGER',
wrap=True)
MainForm.exit()
self.add_handlers({'^T': self.help_form, '^Q': MainForm.exit})
# all forms that can toggle view by group
self.view_togglable = ['inventory', 'remove']
#######################
# MAIN SCREEN WIDGETS #
#######################
self.addfield = self.add(npyscreen.TitleFixedText, name='Date:',
labelColor='DEFAULT', value=Timestamp())
self.addfield2 = self.add(npyscreen.TitleFixedText, name='Uptime:',
labelColor='DEFAULT', value=Uptime())
self.cpufield = self.add(npyscreen.TitleFixedText,
name='Logical CPUs:',
labelColor='DEFAULT', value=Cpu())
self.gpufield = self.add(npyscreen.TitleFixedText, name='GPUs:',
labelColor='DEFAULT', value=Gpu()[1])
self.location = self.add(npyscreen.TitleFixedText,
name='User Data:',
value=PathDirs().meta_dir,
labelColor='DEFAULT')
self.file_drop = self.add(npyscreen.TitleFixedText,
name='File Drop:',
value=DropLocation()[1],
labelColor='DEFAULT')
self.addfield3 = self.add(npyscreen.TitleFixedText, name='Containers:',
labelColor='DEFAULT',
value='0 '+' running')
self.multifield1 = self.add(npyscreen.MultiLineEdit, max_height=22,
editable=False, value="""
'.,
'b *
'$ #.
$: #:
*# @):
:@,@): ,.**:'
, :@@*: ..**'
'#o. .:(@'.@*"'
'bq,..:,@@*' ,*
,p$q8,:@)' .p*'
' '@@Pp@@*'
Y7'.'
:@):.
.:@:'.
.::(@:.
_
__ _____ _ __ | |_
\ \ / / _ \ '_ \| __|
\ V / __/ | | | |_
\_/ \___|_| |_|\__|
""")
################
# MENU OPTIONS #
################
# Tool Menu Items
self.m3 = self.add_menu(name='Tools', shortcut='p')
self.m3.addItem(text='Add New Tool',
onSelect=self.perform_action,
arguments=['add'], shortcut='a')
self.m3.addItem(text='Configure Tools',
onSelect=self.perform_action,
arguments=['configure'], shortcut='t')
self.m3.addItem(text='Inventory',
onSelect=self.perform_action,
arguments=['inventory'], shortcut='i')
self.m3.addItem(text='Remove Tools',
onSelect=self.perform_action,
arguments=['remove'], shortcut='r')
self.m3.addItem(text='Start Tools',
onSelect=self.perform_action,
arguments=['start'], shortcut='s')
self.m3.addItem(text='Stop Tools',
onSelect=self.perform_action,
arguments=['stop'], shortcut='p')
# Services Menu Items
self.m5 = self.add_menu(name='Services Running', shortcut='s')
self.m5.addItem(text='External Services', onSelect=self.perform_action,
arguments=['services_external'], shortcut='e')
self.m5.addItem(text='Tool Services',
onSelect=self.perform_action,
arguments=['services'], shortcut='t')
# System Commands Menu Items
self.m6 = self.add_menu(name='System Commands', shortcut='y')
self.m6.addItem(text='Backup', onSelect=self.system_commands,
arguments=['backup'], shortcut='b')
self.m6.addItem(text='Change Vent Configuration',
onSelect=self.system_commands, arguments=['configure'],
shortcut='c')
self.m6.addItem(text='Detect GPUs', onSelect=self.system_commands,
arguments=['gpu'], shortcut='g')
self.m6.addItem(text='Factory Reset', onSelect=self.system_commands,
arguments=['reset'], shortcut='r')
self.m6.addItem(text='Restore (To Be Implemented...', onSelect=self.system_commands,
arguments=['restore'], shortcut='t')
# TODO this should be either or depending on whether or not it's running already
self.m6.addItem(text='Start', onSelect=self.system_commands,
arguments=['start'], shortcut='s')
self.m6.addItem(text='Stop', onSelect=self.system_commands,
arguments=['stop'], shortcut='o')
self.m6.addItem(text='Upgrade (To Be Implemented...)',
onSelect=self.system_commands,
arguments=['upgrade'], shortcut='u')
# Tutorial Menu Items
self.m7 = self.add_menu(name='Tutorials', shortcut='t')
self.s1 = self.m7.addNewSubmenu(name='About Vent', shortcut='v')
self.s1.addItem(text='Background', onSelect=self.switch_tutorial,
arguments=['background'], shortcut='b')
self.s1.addItem(text='Terminology', onSelect=self.switch_tutorial,
arguments=['terminology'], shortcut='t')
self.s1.addItem(text='Getting Setup', onSelect=self.switch_tutorial,
arguments=['setup'], shortcut='s')
self.s2 = self.m7.addNewSubmenu(name='Working with Tools',
shortcut='c')
self.s2.addItem(text='Starting Tools', onSelect=self.switch_tutorial,
arguments=['starting_tools'], shortcut='s')
self.s3 = self.m7.addNewSubmenu(name='Working with Plugins',
shortcut='p')
self.s3.addItem(text='Adding Tools', onSelect=self.switch_tutorial,
arguments=['adding_tools'], shortcut='a')
self.s4 = self.m7.addNewSubmenu(name='Files', shortcut='f')
self.s4.addItem(text='Adding Files', onSelect=self.switch_tutorial,
arguments=['adding_files'], shortcut='a')
self.s5 = self.m7.addNewSubmenu(name='Help', shortcut='s')
self.s5.addItem(text='Basic Troubleshooting',
onSelect=self.switch_tutorial,
arguments=['basic_troubleshooting'], shortcut='t')
def help_form(self, *args, **keywords):
""" Toggles to help """
self.parentApp.change_form('HELP')
|
{
"content_hash": "25238c1893911e7e839a573aa4c2eba7",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 92,
"avg_line_length": 43.069825436408976,
"alnum_prop": 0.4969602223380233,
"repo_name": "cglewis/vent",
"id": "7048641ffd8a72ce81a6324521f74da29d9be98e",
"size": "17271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vent/menus/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8858"
},
{
"name": "Go",
"bytes": "227"
},
{
"name": "Makefile",
"bytes": "4286"
},
{
"name": "Python",
"bytes": "337477"
},
{
"name": "Shell",
"bytes": "4107"
}
],
"symlink_target": ""
}
|
import os
import cPickle as pickle
import numpy as np
from config import *
from PreprocessedData import PreprocessedData, Seq
def load_songs(configs):
# Load 173? chord sequences from Rolling Stone "500 Greatest Songs of All Time"
print configs['corpus']
if configs['corpus'] == 'bach':
fname = os.path.join('data', 'bach_chorales_rn.pkl')
if configs['use_durations']:
fname = os.path.join('data', 'bach_chorales_rn_durs.pkl')
if configs['augmented_data']:
fname = os.path.join('data', 'bach-letters-augmented.pkl')
# fname = os.path.join('data', 'bach_chorales_rn_seqintclass_durs.pkl')
# with open(fname, 'rb') as p:
# seqs = pickle.load(p)
# durations = pickle.load(p)
# clipped_durs = []
# for durs in durations:
# durs = np.asarray(durs)
# durs[durs>2.0] = 2.0
# durs = list(durs)
# clipped_durs.append(durs)
# return seqs, clipped_durs
# fname = os.path.join('data', 'bach_chorales_rn_alone.pkl')
elif configs['corpus'] == 'rock' and not configs['use_letternames']:
fname = os.path.join('data', 'rock-rns.pkl')
elif configs['corpus'] == 'rock' and configs['use_letternames'] and not configs['transposed']:
fname = os.path.join('data', 'rock-lettername-originalKey.pkl')
elif configs['corpus'] == 'rock' and configs["use_letternames"]:
fname = os.path.join('data', 'rock_letternames_fixed.pkl')
if configs['augmented_data']:
fname = os.path.join('data', 'rock-augmented.pkl')
else:
assert False, 'ERROR: Data set configuration not available'
print 'fname', fname
path = os.path.dirname(os.path.realpath(__file__))
fname = os.path.join(path, fname)
print fname
durs = None
with open(fname, 'rb') as p:
seqs = pickle.load(p)
if configs["use_durations"]:
durs = pickle.load(p)
print 'num of songs:', len(seqs)
if configs["use_durations"]:
assert durs is not None, 'ERROR: not yet supporting duration'
# return seqs, durs
else:
return seqs
def get_segmented_songs(seqs=None, min_len=5):
if seqs is None:
from config import get_configs
configs = get_configs()
seqs = load_songs(configs)
subseqs = []
for seq in seqs:
subseq = []
for i, s in enumerate(seq):
subseq.append(s)
if (s == 'I' or s == 'i') and len(subseq) > min_len:
subseqs.append(subseq)
subseq = []
# if i + 1 < len(seq) and (seq[i+1] != 'I' or seq[i+1] != 'i'):
# subseq.append(s)
# with open('subseqs.txt', 'w') as p:
# lines = ''
# for seq in subseqs:
# line = ''
# for s in seq:
# line += '%s ' % s
# line += '\n'
# lines += line
# p.writelines(lines)
return subseqs
def get_raw_data(configs=None):
if configs is None:
configs = get_configs()
if configs['use_durations']:
seqs, durs = load_songs(configs)
sentences = [Seq(seqs[i], durs[i]) for i in range(len(seqs))]
else:
sentences = load_songs(configs)
print sentences[0]
from word2vec_utility_tools import build_vocab
print "# of sentences", len(sentences)
vocab2index, index2word = build_vocab(sentences,
configs['min_count'])
print "# of syms", len(index2word)
print "syms:", index2word
return sentences, index2word
def get_raw_encoded_data(configs=None):
if configs is None:
configs = get_configs()
sentences, syms = get_raw_data(configs)
seqs = []
for sent in sentences:
seq = [syms.index(s) for s in sent]
seqs.append(seq)
return seqs, syms
def pickle_train_test_seqs():
configs = get_configs()
data = get_data(configs)
syms = data.syms
train_seqs = data.get_train_seqs_data().seqs
test_seqs = data.get_test_seqs_data().seqs
print '# training:', len(train_seqs)
print '# testing:', len(test_seqs)
train_data = dict(seqs=train_seqs, syms=syms)
fname = 'rock-train.pkl'
path = os.path.join('data', 'chords', 'rock', 'train')
if not os.path.isdir(path):
os.makedirs(path)
fpath = os.path.join(path, fname)
print fpath
with open(fpath, 'wb') as p:
pickle.dump(train_data, p)
save_as_text(fpath, train_seqs)
test_data = dict(seqs=test_seqs, syms=syms)
fname = 'rock-test.pkl'
path = os.path.join('data', 'chords', 'rock', 'test')
if not os.path.isdir(path):
os.mkdir(path)
fpath = os.path.join(path, fname)
print fpath
with open(fpath, 'wb') as p:
pickle.dump(test_data, p)
save_as_text(fpath, test_seqs)
def check_train_test_texts():
pickle_train_test_seqs()
path = os.path.join('data', 'chords', 'rock', 'train')
train_fname = os.path.join(path, 'rock-train.txt')
read_text(train_fname)
path = os.path.join('data', 'chords', 'rock', 'test')
test_fname = os.path.join(path, 'rock-test.txt')
read_text(test_fname)
def get_train_test_data():
configs = get_configs()
data = get_data(configs)
syms = data.syms
print '# of symbols:', len(syms)
train_seqs = data.get_train_seqs_data().seqs
test_seqs = data.get_test_seqs_data().seqs
print '# training:', len(train_seqs)
print '# testing:', len(test_seqs)
print train_seqs[0]
return train_seqs, test_seqs, syms
def get_data(configs=None):
if configs is None:
configs = get_configs()
seqs, syms = get_raw_data(configs)
print 'get data, # of syms', len(syms), len(set(syms))
assert len(syms) == len(set(syms))
window = configs["window"]
data = PreprocessedData(seqs, syms, window)
return data
def get_configs_data():
configs = get_configs()
data = get_data(configs)
return configs, data
def make_rn2letter_dict():
data = get_data()
syms = data.syms
conversion_dict = {}
from music21_chord_tools import roman2letter
for sym in syms:
# print sym
conversion_dict[sym] = roman2letter(sym)
fname = 'rn2letter.pkl'
with open(fname, 'wb') as p:
pickle.dump(conversion_dict, p)
def check_roman_vs_letters():
data = get_data()
syms = data.syms
from music21_chord_tools import roman2letter, letter2roman
mismatches = {}
for sym in syms:
letter = roman2letter(sym)
roman = letter2roman(letter)
if sym != roman:
mismatches[roman] = sym
print '------ mismatch'
print mismatches
def test_get_raw_data():
seqs, syms = get_raw_data()
count_end_or_start_with_C = 0
for seq in seqs:
if seq[0] == 'C' or seq[-1] == 'C':
count_end_or_start_with_C += 1
else:
print seq[0], seq[-1]
print 'number of songs:', len(seqs)
print 'count_end_or_start_with_C:', count_end_or_start_with_C
def save_segmented_songs():
configs = get_configs()
configs['corpus'] = 'rock'
configs['use_letternames'] = False
seqs = load_songs(configs)
seqs = get_segmented_songs(seqs, min_len=5)
print '# of segmented seqs:', len(seqs)
fpath = os.path.join('data', 'rock-rns-segmented.txt')
save_as_text(fpath, seqs)
def convert_to_pickle(fname):
fpath = os.path.join('data', fname)
seqs = read_seqs(fpath)
fname_parts = fname.split('.')
assert len(fname_parts) == 2
pickle_fname = fname.split('.')[0] + '.pkl'
pickle_fpath = os.path.join('data', pickle_fname)
with open(pickle_fpath, 'wb') as p:
pickle.dump(seqs, p)
def save_as_text(fpath, seqs):
seqs_strs = []
for seq in seqs:
seq_str = ', '.join(seq) + '\n'
seqs_strs.append(seq_str)
fpath = os.path.splitext(fpath)[0] + '.txt'
print 'save fname:', fpath
with open(fpath, 'w') as p:
p.writelines(seqs_strs)
def read_text(fpath):
print fpath
with open(fpath, 'r') as p:
seqs = p.readlines()
print '# of seqs:', len(seqs)
return seqs
def read_seqs(fpath):
lines = read_text(fpath)
seqs = []
for line in lines:
if ', ' in line:
syms = line.strip().split(', ')
else:
syms = line.strip().split(' ')
assert ' ' not in syms
seqs.append(syms)
return seqs
if __name__ == '__main__':
# seqs = load_songs()
# print len(seqs)
# seqs = get_segmented_songs()
# make_rn2letter_dict()
# check_roman_vs_letters()
# configs = get_configs()
# seqs, syms = get_raw_data(configs)
# print seqs[0]
# pickle_train_test_seqs()
# check_train_test_texts()
# get_train_test_data()
# test_get_raw_data()
# fname = 'rock-lettername-originalKey.txt'
# convert_to_pickle(fname)
save_segmented_songs()
|
{
"content_hash": "ab8fefb02c9d2721416c875c03205209",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 98,
"avg_line_length": 28.70793650793651,
"alnum_prop": 0.5789008072542298,
"repo_name": "czhuang/ChordRipple",
"id": "96260bf987dd82aae8d9c68d9757952c7601fa95",
"size": "9044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chord2vec/load_songs_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "107282"
},
{
"name": "HTML",
"bytes": "7412"
},
{
"name": "JavaScript",
"bytes": "1408228"
},
{
"name": "Python",
"bytes": "423696"
}
],
"symlink_target": ""
}
|
BOT_NAME = 'CNSpider'
SPIDER_MODULES = ['CNSpider.spiders']
NEWSPIDER_MODULE = 'CNSpider.spiders'
RANDOMIZE_DOWNLOAD_DELAY = True
COOKIES_ENABLED = False
RETRY_ENABLED = False
DOWNLOAD_DELAY = 2
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware' : None,
'CNSpider.rotate_useragent.RotateUserAgentMiddleware' :400
}
SPIDER_MIDDLEWARES = {
'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
}
REDIS_HOST = '192.168.1.132'
REDIS_PORT = 6379
|
{
"content_hash": "464dcfe60c8157bf377518c39129f198",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 83,
"avg_line_length": 24.08,
"alnum_prop": 0.7458471760797342,
"repo_name": "4xin/scrapy-study",
"id": "837b2f59341e1e3c0c65c583669382d684de3c32",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CNSpider-Redis/CNSpider/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21150"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Remove StaticInterface
# XXX PLEASE uncomment this in production
from django.contrib.contenttypes.models import ContentType
try:
db.delete_table('static_interface')
db.delete_table('static_inter_key_value')
db.delete_table('static_interface_views')
except:
pass
for content_type in ContentType.objects.filter(app_label='static_intr'):
content_type.delete()
# Adding model 'StaticReg'
db.create_table('static_reg', (
('domain', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['domain.Domain'])),
('label', self.gf('django.db.models.fields.CharField')(max_length=63, null=True, blank=True)),
('fqdn', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('ttl', self.gf('django.db.models.fields.PositiveIntegerField')(default=3600, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000, null=True, blank=True)),
('ip_str', self.gf('django.db.models.fields.CharField')(max_length=39)),
('ip_upper', self.gf('django.db.models.fields.BigIntegerField')(null=True, blank=True)),
('ip_lower', self.gf('django.db.models.fields.BigIntegerField')(null=True, blank=True)),
('ip_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reverse_domain', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='reverse_staticreg_set', null=True, to=orm['domain.Domain'])),
('system', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['systems.System'], null=True, blank=True)),
))
db.send_create_signal('static', ['StaticReg'])
# Adding unique constraint on 'StaticReg', fields ['ip_upper', 'ip_lower', 'label', 'domain']
db.create_unique('static_reg', ['ip_upper', 'ip_lower', 'label', 'domain_id'])
# Adding M2M table for field views on 'StaticReg'
m2m_table_name = db.shorten_name('static_reg_views')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('staticreg', models.ForeignKey(orm['static.staticreg'], null=False)),
('view', models.ForeignKey(orm['view.view'], null=False))
))
db.create_unique(m2m_table_name, ['staticreg_id', 'view_id'])
# Adding model 'StaticRegKeyValue'
db.create_table('static_key_value', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=255)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
('is_option', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_statement', self.gf('django.db.models.fields.BooleanField')(default=False)),
('has_validator', self.gf('django.db.models.fields.BooleanField')(default=False)),
('obj', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keyvalue_set', to=orm['static.StaticReg'])),
))
db.send_create_signal('static', ['StaticRegKeyValue'])
# Adding unique constraint on 'StaticRegKeyValue', fields ['key', 'value', 'obj']
db.create_unique('static_key_value', ['key', 'value', 'obj_id'])
def backwards(self, orm):
# Removing unique constraint on 'StaticRegKeyValue', fields ['key', 'value', 'obj']
db.delete_unique('static_key_value', ['key', 'value', 'obj_id'])
# Removing unique constraint on 'StaticReg', fields ['ip_upper', 'ip_lower', 'label', 'domain']
db.delete_unique('static_reg', ['ip_upper', 'ip_lower', 'label', 'domain_id'])
# Deleting model 'StaticReg'
db.delete_table('static_reg')
# Removing M2M table for field views on 'StaticReg'
db.delete_table(db.shorten_name('static_reg_views'))
# Deleting model 'StaticRegKeyValue'
db.delete_table('static_key_value')
models = {
'domain.domain': {
'Meta': {'object_name': 'Domain', 'db_table': "'domain'"},
'delegated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverse': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'master_domain': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['domain.Domain']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'purgeable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'soa': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['soa.SOA']", 'null': 'True', 'blank': 'True'})
},
'soa.soa': {
'Meta': {'unique_together': "(('primary', 'contact', 'description'),)", 'object_name': 'SOA', 'db_table': "'soa'"},
'contact': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'expire': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1209600'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'minimum': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'primary': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'refresh': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'retry': ('django.db.models.fields.PositiveIntegerField', [], {'default': '86400'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2013062501'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'})
},
'static.staticreg': {
'Meta': {'unique_together': "(('ip_upper', 'ip_lower', 'label', 'domain'),)", 'object_name': 'StaticReg', 'db_table': "'static_reg'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['domain.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ip_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'ip_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'ip_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'reverse_domain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reverse_staticreg_set'", 'null': 'True', 'to': "orm['domain.Domain']"}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']", 'null': 'True', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['view.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'static.staticregkeyvalue': {
'Meta': {'unique_together': "(('key', 'value', 'obj'),)", 'object_name': 'StaticRegKeyValue', 'db_table': "'static_key_value'"},
'has_validator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_option': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_statement': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'obj': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keyvalue_set'", 'to': "orm['static.StaticReg']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.allocation': {
'Meta': {'ordering': "['name']", 'object_name': 'Allocation', 'db_table': "u'allocations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location', 'db_table': "u'locations'"},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'systems.operatingsystem': {
'Meta': {'ordering': "['name', 'version']", 'object_name': 'OperatingSystem', 'db_table': "u'operating_systems'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.servermodel': {
'Meta': {'ordering': "['vendor', 'model']", 'object_name': 'ServerModel', 'db_table': "u'server_models'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.system': {
'Meta': {'object_name': 'System', 'db_table': "u'systems'"},
'allocation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Allocation']", 'null': 'True', 'blank': 'True'}),
'asset_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'change_password': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_dhcp_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_dns_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_nagios_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_puppet_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_switch': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'licenses': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'oob_ip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'oob_switch_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.OperatingSystem']", 'null': 'True', 'blank': 'True'}),
'patch_panel_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_price': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rack_order': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'ram': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'server_model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.ServerModel']", 'null': 'True', 'blank': 'True'}),
'switch_ports': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system_rack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemRack']", 'null': 'True', 'blank': 'True'}),
'system_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemStatus']", 'null': 'True', 'blank': 'True'}),
'system_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemType']", 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'warranty_end': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'warranty_start': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'systems.systemrack': {
'Meta': {'ordering': "['name']", 'object_name': 'SystemRack', 'db_table': "u'system_racks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemstatus': {
'Meta': {'ordering': "['status']", 'object_name': 'SystemStatus', 'db_table': "u'system_statuses'"},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'color_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemtype': {
'Meta': {'object_name': 'SystemType', 'db_table': "u'system_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'view.view': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'View', 'db_table': "'view'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['static']
|
{
"content_hash": "af80297187116d9b8fb30b6ecf871024",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 189,
"avg_line_length": 76.61085972850678,
"alnum_prop": 0.5619868879570019,
"repo_name": "mozilla/inventory",
"id": "e4bd8f57b68ff489d3f92e27bd06479d9ae19fe8",
"size": "16955",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/registration/static/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5104"
},
{
"name": "CSS",
"bytes": "362837"
},
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "HTML",
"bytes": "1195738"
},
{
"name": "JavaScript",
"bytes": "1300342"
},
{
"name": "Makefile",
"bytes": "14421"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Python",
"bytes": "3642733"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
CognateClass = apps.get_model('lexicon', 'CognateClass')
fields = ['gloss_in_root_lang',
'loanword',
'loan_source',
'loan_notes']
print('Copying JSON data to dedicated columns.')
for c in CognateClass.objects.all():
for f in fields:
if f in c.data:
setattr(c, f, c.data[f])
c.save()
def reverse_func(apps, schema_editor):
print('Nothing to do for reverse_func of 0056_cognateclass_unjson')
class Migration(migrations.Migration):
dependencies = [('lexicon', '0055_auto_20160504_1304')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
{
"content_hash": "2cd1720a1320b143b0a31db350cbaa17",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 28.137931034482758,
"alnum_prop": 0.625,
"repo_name": "lingdb/CoBL-public",
"id": "20ad85de78f4b478c74cd05741aba3891292f47a",
"size": "840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ielex/lexicon/migrations/0056_cognateclass_unjson.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "76222"
},
{
"name": "HTML",
"bytes": "558967"
},
{
"name": "JavaScript",
"bytes": "189642"
},
{
"name": "Python",
"bytes": "858438"
},
{
"name": "Shell",
"bytes": "1258"
},
{
"name": "TeX",
"bytes": "119143"
},
{
"name": "Vim script",
"bytes": "870"
}
],
"symlink_target": ""
}
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function
from rplibs.six.moves import range
from panda3d.core import Camera, MatrixLens
from rpcore.pynative.shadow_atlas import ShadowAtlas
class ShadowManager(object):
""" Please refer to the native C++ implementation for docstrings and comments.
This is just the python implementation, which does not contain documentation! """
def __init__(self):
self._max_updates = 10
self._atlas = None
self._atlas_size = 4096
self._tag_state_mgr = None
self._atlas_graphics_output = None
self._display_regions = []
self._queued_updates = []
self._cameras = []
self._camera_nps = []
def set_max_updates(self, max_updates):
if max_updates == 0:
print("Warning: max_updates set to 0, no shadow updates will happen")
self._max_updates = max_updates
def set_atlas_size(self, atlas_size):
self._atlas_size = atlas_size
def get_atlas_size(self):
return self._atlas_size
atlas_size = property(get_atlas_size, set_atlas_size)
def set_scene(self, scene_parent):
self._scene_parent = scene_parent
def set_tag_state_manager(self, tag_mgr):
self._tag_state_mgr = tag_mgr
def set_atlas_graphics_output(self, graphics_output):
self._atlas_graphics_output = graphics_output
def get_num_update_slots_left(self):
return self._max_updates - len(self._queued_updates)
num_update_slots_left = property(get_num_update_slots_left)
def get_atlas(self):
return self._atlas
atlas = property(get_atlas)
def init(self):
for i in range(self._max_updates):
camera = Camera("ShadowCam-" + str(i))
camera.set_lens(MatrixLens())
camera.set_active(False)
camera.set_scene(self._scene_parent)
self._tag_state_mgr.register_camera("shadow", camera)
self._camera_nps.append(self._scene_parent.attach_new_node(camera))
self._cameras.append(camera)
region = self._atlas_graphics_output.make_display_region()
region.set_sort(1000)
region.set_clear_depth_active(True)
region.set_clear_depth(1.0)
region.set_clear_color_active(False)
region.set_camera(self._camera_nps[i])
region.set_active(False)
self._display_regions.append(region)
self._atlas = ShadowAtlas(self._atlas_size)
def update(self):
for i in range(len(self._queued_updates), self._max_updates):
self._cameras[i].set_active(False)
self._display_regions[i].set_active(False)
for i, source in enumerate(self._queued_updates):
self._cameras[i].set_active(True)
self._display_regions[i].set_active(True)
self._cameras[i].get_lens().set_user_mat(source.get_mvp())
uv = source.get_uv_region()
self._display_regions[i].set_dimensions(uv.x, uv.x + uv.z, uv.y, uv.y + uv.w)
self._queued_updates = []
def add_update(self, source):
if len(self._queued_updates) >= self._max_updates:
return False
self._queued_updates.append(source)
return True
|
{
"content_hash": "c3caed86fa2378f3180384ddd60d11e1",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 89,
"avg_line_length": 36.83606557377049,
"alnum_prop": 0.6433021806853583,
"repo_name": "croxis/SpaceDrive",
"id": "5c95c2da25495ae3bfed158a71313989bbc9bf01",
"size": "4494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacedrive/renderpipeline/rpcore/pynative/shadow_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1288"
},
{
"name": "C",
"bytes": "21897"
},
{
"name": "C++",
"bytes": "165025"
},
{
"name": "GLSL",
"bytes": "741524"
},
{
"name": "Groff",
"bytes": "119"
},
{
"name": "Python",
"bytes": "1523574"
}
],
"symlink_target": ""
}
|
"""
Tick locating and formatting
============================
This module contains classes to support completely configurable tick locating
and formatting. Although the locators know nothing about major or minor
ticks, they are used by the Axis class to support major and minor tick
locating and formatting. Generic tick locators and formatters are provided,
as well as domain specific custom ones..
Tick locating
-------------
The Locator class is the base class for all tick locators. The locators
handle autoscaling of the view limits based on the data limits, and the
choosing of tick locations. A useful semi-automatic tick locator is
MultipleLocator. You initialize this with a base, eg 10, and it picks axis
limits and ticks that are multiples of your base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (eg. where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
:class:`AutoMinorLocator`
locator for minor ticks when the axis is linear and the
major ticks are uniformly spaced. It subdivides the major
tick interval into a specified number of minor intervals,
defaulting to 4 or 5 depending on the major interval.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, eg no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
no labels on the ticks
:class:`IndexFormatter`
set the strings from a list of labels
:class:`FixedFormatter`
set the strings manually for the labels
:class:`FuncFormatter`
user defined function sets the labels
:class:`FormatStrFormatter`
use a sprintf format string
:class:`ScalarFormatter`
default formatter for scalars; autopick the fmt string
:class:`LogFormatter`
formatter for log axes
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has access
to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major an minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import division, print_function
import decimal
import locale
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
class _DummyAxis(object):
def __init__(self, minpos=0):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self._minpos = minpos
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_minpos(self):
return self._minpos
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
class TickHelper(object):
axis = None
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self, **kwargs):
if self.axis is None:
self.axis = _DummyAxis(**kwargs)
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Convert the tick location to a string
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
"""Return the format for tick val x at position pos; pos=None
indicated unspecified"""
raise NotImplementedError('Derived must override')
def format_data(self, value):
return self.__call__(value)
def format_data_short(self, value):
"""return a short string version"""
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
some classes may want to replace a hyphen for minus with the
proper unicode symbol as described `here
<http://sourceforge.net/tracker/index.php?func=detail&aid=1962574&group_id=80706&atid=560720>`_.
The default is to do nothing
Note, if you use this method, eg in :meth`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interactive coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class IndexFormatter(Formatter):
"""
format the position x to the nearest i-th label where i=int(x+0.5)
"""
def __init__(self, labels):
self.labels = labels
self.n = len(labels)
def __call__(self, x, pos=None):
"""Return the format for tick val x at position pos; pos=None
indicated unspecified"""
i = int(x + 0.5)
if i < 0:
return ''
elif i >= self.n:
return ''
else:
return self.labels[i]
class NullFormatter(Formatter):
'Always return the empty string'
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return ''
class FixedFormatter(Formatter):
'Return fixed strings for tick labels'
def __init__(self, seq):
"""
*seq* is a sequence of strings. For positions ``i < len(seq)`` return
*seq[i]* regardless of *x*. Otherwise return ''
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if pos is None or pos >= len(self.seq):
return ''
else:
return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
User defined function for formatting
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use a format string to format the tick
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt % x
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x, d)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' % (mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Tick location is a plain old number. If useOffset==True and the data range
is much smaller than the data average, then an offset will be determined
such that the tick labels are meaningful. Scientific notation is used for
data < 10^-n or data >= 10^m, where n and m are the power limits set using
set_powerlimits((n,m)). The defaults for these are controlled by the
axes.formatter.limits rc parameter.
"""
def __init__(self, useOffset=True, useMathText=None, useLocale=None):
# useOffset allows plotting small data ranges with large offsets: for
# example: [1+1e-9,1+2e-9,1+3e-9] useMathText will render the offset
# and scientific notation in mathtext
self.set_useOffset(useOffset)
self._usetex = rcParams['text.usetex']
if useMathText is None:
useMathText = rcParams['axes.formatter.use_mathtext']
self._useMathText = useMathText
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
if useLocale is None:
useLocale = rcParams['axes.formatter.use_locale']
self._useLocale = useLocale
def get_useOffset(self):
return self._useOffset
def set_useOffset(self, val):
if val in [True, False]:
self.offset = 0
self._useOffset = val
else:
self._useOffset = False
self.offset = val
useOffset = property(fget=get_useOffset, fset=set_useOffset)
def get_useLocale(self):
return self._useLocale
def set_useLocale(self, val):
if val is None:
self._useLocale = rcParams['axes.formatter.use_locale']
else:
self._useLocale = val
useLocale = property(fget=get_useLocale, fset=set_useLocale)
def fix_minus(self, s):
"""use a unicode minus rather than hyphen"""
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']:
return s
else:
return s.replace('-', u'\u2212')
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if len(self.locs) == 0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
'''True or False to turn scientific notation on or off
see also :meth:`set_powerlimits`
'''
self._scientific = bool(b)
def set_powerlimits(self, lims):
'''
Sets size thresholds for scientific notation.
e.g. ``formatter.set_powerlimits((-3, 4))`` sets the pre-2007 default
in which scientific notation is used for numbers less than 1e-3 or
greater than 1e4.
See also :meth:`set_scientific`.
'''
assert len(lims) == 2, "argument must be a sequence of length 2"
self._powerlimits = lims
def format_data_short(self, value):
"""return a short formatted string representation of a number"""
if self._useLocale:
return locale.format_string('%-12g', (value,))
else:
return '%-12g' % value
def format_data(self, value):
'return a formatted string representation of a number'
if self._useLocale:
s = locale.format_string('%1.10e', (value,))
else:
s = '%1.10e' % value
s = self._formatSciNotation(s)
return self.fix_minus(s)
def get_offset(self):
"""Return scientific notation, plus offset"""
if len(self.locs) == 0:
return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0:
offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10 ** self.orderOfMagnitude)
else:
sciNotStr = '1e%d' % self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = ''.join(('$', sciNotStr,
r'\mathdefault{', offsetStr, '}$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$', sciNotStr, offsetStr, '$'))
else:
s = ''.join((sciNotStr, offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
'set the locations of the ticks'
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
if self._useOffset:
self._set_offset(d)
self._set_orderOfMagnitude(d)
self._set_format(vmin, vmax)
def _set_offset(self, range):
# offset of 20,001 is 20,000, for example
locs = self.locs
if locs is None or not len(locs) or range == 0:
self.offset = 0
return
ave_loc = np.mean(locs)
if ave_loc: # dont want to take log10(0)
ave_oom = math.floor(math.log10(np.mean(np.absolute(locs))))
range_oom = math.floor(math.log10(range))
if np.absolute(ave_oom - range_oom) >= 3: # four sig-figs
p10 = 10 ** range_oom
if ave_loc < 0:
self.offset = (math.ceil(np.max(locs) / p10) * p10)
else:
self.offset = (math.floor(np.min(locs) / p10) * p10)
else:
self.offset = 0
def _set_orderOfMagnitude(self, range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the
# offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset:
oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]:
val = locs[0]
else:
val = locs[-1]
if val == 0:
oom = 0
else:
oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self, vmin, vmax):
# set the format string to format all the ticklabels
if len(self.locs) < 2:
# Temporarily augment the locations with the axis end points.
_locs = list(self.locs) + [vmin, vmax]
else:
_locs = self.locs
locs = (np.asarray(_locs) - self.offset) / 10 ** self.orderOfMagnitude
loc_range = np.ptp(locs)
if len(self.locs) < 2:
# We needed the end points only for the loc_range calculation.
locs = locs[:-2]
loc_range_oom = int(math.floor(math.log10(loc_range)))
# first estimate:
sigfigs = max(0, 3 - loc_range_oom)
# refined estimate:
thresh = 1e-3 * 10 ** loc_range_oom
while sigfigs >= 0:
if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
sigfigs -= 1
else:
break
sigfigs += 1
self.format = '%1.' + str(sigfigs) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$\mathdefault{%s}$' % self.format
def pprint_val(self, x):
xp = (x - self.offset) / (10 ** self.orderOfMagnitude)
if np.absolute(xp) < 1e-8:
xp = 0
if self._useLocale:
return locale.format_string(self.format, (xp,))
else:
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
if self._useLocale:
decimal_point = locale.localeconv()['decimal_point']
positive_sign = locale.localeconv()['positive_sign']
else:
decimal_point = '.'
positive_sign = '+'
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip(decimal_point)
sign = tup[1][0].replace(positive_sign, '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1' and exponent != '':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}' % (sign, exponent)
if significand and exponent:
return r'%s{\times}%s' % (significand, exponent)
else:
return r'%s%s' % (significand, exponent)
else:
s = ('%se%s%s' % (significand, sign, exponent)).rstrip('e')
return s
except IndexError:
return s
class LogFormatter(Formatter):
"""
Format values for log axis;
"""
def __init__(self, base=10.0, labelOnlyBase=True):
"""
*base* is used to locate the decade tick,
which will be the only one to be labeled if *labelOnlyBase*
is ``False``
"""
self._base = base + 0.0
self.labelOnlyBase = labelOnlyBase
def base(self, base):
"""change the *base* for labeling - warning: should always match the
base used for :class:`LogLocator`"""
self._base = base
def label_minor(self, labelOnlyBase):
'switch on/off minor ticks labeling'
self.labelOnlyBase = labelOnlyBase
def __call__(self, x, pos=None):
"""Return the format for tick val *x* at position *pos*"""
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
b = self._base
if x == 0.0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x)) / math.log(b)
isDecade = is_close_to_int(fx)
if not isDecade and self.labelOnlyBase:
s = ''
elif x > 10000:
s = '%1.0e' % x
elif x < 1:
s = '%1.0e' % x
else:
s = self.pprint_val(x, d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
def format_data(self, value):
b = self.labelOnlyBase
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = b
return value
def format_data_short(self, value):
'return a short formatted string representation of a number'
return '%-12g' % value
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' % (mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
"""Return the format for tick val *x* at position *pos*"""
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
d = abs(vmax - vmin)
b = self._base
if x == 0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x)) / math.log(b)
isDecade = is_close_to_int(fx)
if not isDecade and self.labelOnlyBase:
s = ''
#if 0: pass
elif fx > 10000:
s = '%1.0e' % fx
#elif x<1: s = '$10^{%d}$'%fx
#elif x<1: s = '10^%d'%fx
elif fx < 1:
s = '%1.0e' % fx
else:
s = self.pprint_val(fx, d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
b = self._base
usetex = rcParams['text.usetex']
# only label the decades
if x == 0:
if usetex:
return '$0$'
else:
return '$\mathdefault{0}$'
fx = math.log(abs(x)) / math.log(b)
is_decade = is_close_to_int(fx)
sign_string = '-' if x < 0 else ''
# use string formatting of the base if it is not an integer
if b % 1 == 0.0:
base = '%d' % b
else:
base = '%s' % b
if not is_decade and self.labelOnlyBase:
return ''
elif not is_decade:
if usetex:
return (r'$%s%s^{%.2f}$') % \
(sign_string, base, fx)
else:
return ('$\mathdefault{%s%s^{%.2f}}$') % \
(sign_string, base, fx)
else:
if usetex:
return (r'$%s%s^{%d}$') % (sign_string,
base,
nearest_long(fx))
else:
return (r'$\mathdefault{%s%s^{%d}}$') % (sign_string,
base,
nearest_long(fx))
class EngFormatter(Formatter):
"""
Formats axis values using engineering prefixes to represent powers of 1000,
plus a specified unit, eg. 10 MHz instead of 1e7.
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: u"\u03bc", # Greek letter mu
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, unit="", places=None):
self.unit = unit
self.places = places
def __call__(self, x, pos=None):
s = "%s%s" % (self.format_eng(x), self.unit)
return self.fix_minus(s)
def format_eng(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) for self.places = 0
'0'
>>> format_eng(1000000) for self.places = 1
'1.0 M'
>>> format_eng("-1e-6") for self.places = 2
u'-1.00 \u03bc'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
dnum = decimal.Decimal(str(num))
sign = 1
if dnum < 0:
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
prefix = self.ENG_PREFIXES[int(pow10)]
mant = sign * dnum / (10 ** pow10)
if self.places is None:
format_str = u"%g %s"
elif self.places == 0:
format_str = u"%i %s"
elif self.places > 0:
format_str = (u"%%.%if %%s" % self.places)
formatted = format_str % (mant, prefix)
return formatted.strip()
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different
:class:`~matplotlib.axis.Axis` because the locator stores references to
the Axis data and view limits
"""
# some automatic tick locators can generate so many ticks they
# kill the machine when you try and render them, see eg sf bug
# report
# https://sourceforge.net/tracker/index.php?func=detail&aid=2715172&group_id=80706&atid=560720.
# This parameter is set to cause locators to raise an error if too
# many ticks are generated
MAXTICKS = 1000
def tick_values(self, vmin, vmax):
"""
Return the values of the located ticks given **vmin** and **vmax**.
.. note::
To get tick locations with the vmin and vmax values defined
automatically for the associated :attr:`axis` simply call
the Locator instance::
>>> print(type(loc))
<type 'Locator'>
>>> print(loc())
[1, 2, 3, 4]
"""
raise NotImplementedError('Derived must override')
def __call__(self):
"""Return the locations of the ticks"""
# note: some locators return data limits, other return view limits,
# hence there is no *one* interface to call self.tick_values.
raise NotImplementedError('Derived must override')
def raise_if_exceeds(self, locs):
"""raise a RuntimeError if Locator attempts to create more than
MAXTICKS locs"""
if len(locs) >= self.MAXTICKS:
msg = ('Locator attempting to generate %d ticks from %s to %s: ' +
'exceeds Locator.MAXTICKS') % (len(locs), locs[0], locs[-1])
raise RuntimeError(msg)
return locs
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally This will be overridden.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
"""autoscale the view limits"""
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
"""Pan numticks (can be positive or negative)"""
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if numticks > 2:
step = numsteps * abs(ticks[0] - ticks[1])
else:
d = abs(vmax - vmin)
step = numsteps * d / 6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
interval = abs(vmax - vmin)
step = 0.1 * interval * direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
"""refresh internal information based on current lim"""
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, eg on every 5th point. It is assumed that you are doing
index plotting; ie the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def __call__(self):
"""Return the locations of the ticks"""
dmin, dmax = self.axis.get_data_interval()
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
return self.raise_if_exceeds(
np.arange(vmin + self.offset, vmax + 1, self._base))
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
The subsampling will be done so as to include the smallest
absolute value; for example, if zero is included in the
array of possibilities, then it is guaranteed to be one of
the chosen ticks.
"""
def __init__(self, locs, nbins=None):
self.locs = np.asarray(locs)
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are fixed, vmin and vmax are not used in this
method.
"""
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
ticks = self.locs[::step]
for i in range(1, step):
ticks1 = self.locs[i::step]
if np.absolute(ticks1).min() < np.absolute(ticks).min():
ticks = ticks1
return self.raise_if_exceeds(ticks)
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are Null, vmin and vmax are not used in this
method.
"""
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks=None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if vmax < vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks == 0:
return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return self.raise_if_exceeds(ticklocs)
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin -= 1
vmax += 1
exponent, remainder = divmod(math.log10(vmax - vmin), 1)
if remainder < 0.5:
exponent -= 1
scale = 10 ** (-exponent)
vmin = math.floor(scale * vmin) / scale
vmax = math.ceil(scale * vmax) / scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x, y):
if abs(x - y) < 1e-10:
return True
else:
return False
class Base:
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
assert(base > 0)
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d, m = divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return (d - 1) * self._base
return d * self._base
def le(self, x):
'return the largest multiple of base <= x'
d, m = divmod(x, self._base)
if closeto(m / self._base, 1): # was closeto(m, self._base)
#looks like floating point error
return (d + 1) * self._base
return d * self._base
def gt(self, x):
'return the smallest multiple of base > x'
d, m = divmod(x, self._base)
if closeto(m / self._base, 1):
#looks like floating point error
return (d + 2) * self._base
return (d + 1) * self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d, m = divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return d * self._base
return (d + 1) * self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if vmax < vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001 * base) // base
locs = vmin + np.arange(n + 1) * base
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin == vmax:
vmin -= 1
vmax += 1
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n=1, threshold=100):
dv = abs(vmax - vmin)
if dv == 0: # maxabsv == 0 is a special case of this.
return 1.0, 0.0
# Note: this should never occur because
# vmin, vmax should have been checked by nonsingular(),
# and spread apart if necessary.
meanv = 0.5 * (vmax + vmin)
if abs(meanv) / dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(math.log10(meanv), 1)[0]
offset = 10 ** ex
else:
ex = divmod(math.log10(-meanv), 1)[0]
offset = -10 ** ex
ex = divmod(math.log10(dv / n), 1)[0]
scale = 10 ** ex
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
default_params = dict(nbins=10,
steps=None,
trim=True,
integer=False,
symmetric=False,
prune=None)
def __init__(self, *args, **kwargs):
"""
Keyword args:
*nbins*
Maximum number of intervals; one less than max number of ticks.
*steps*
Sequence of nice numbers starting with 1 and ending with 10;
e.g., [1, 2, 4, 5, 10]
*integer*
If True, ticks will take only integer values.
*symmetric*
If True, autoscaling will result in a range symmetric
about zero.
*prune*
['lower' | 'upper' | 'both' | None]
Remove edge ticks -- useful for stacked or ganged plots
where the upper tick of one axes overlaps with the lower
tick of the axes above it.
If prune=='lower', the smallest tick will
be removed. If prune=='upper', the largest tick will be
removed. If prune=='both', the largest and smallest ticks
will be removed. If prune==None, no ticks will be removed.
"""
# I left "trim" out; it defaults to True, and it is not
# clear that there is any use case for False, so we may
# want to remove that kwarg. EF 2010/04/18
if args:
kwargs['nbins'] = args[0]
if len(args) > 1:
raise ValueError(
"Keywords are required for all arguments except 'nbins'")
self.set_params(**self.default_params)
self.set_params(**kwargs)
def set_params(self, **kwargs):
if 'nbins' in kwargs:
self._nbins = int(kwargs['nbins'])
if 'trim' in kwargs:
self._trim = kwargs['trim']
if 'integer' in kwargs:
self._integer = kwargs['integer']
if 'symmetric' in kwargs:
self._symmetric = kwargs['symmetric']
if 'prune' in kwargs:
prune = kwargs['prune']
if prune is not None and prune not in ['upper', 'lower', 'both']:
raise ValueError(
"prune must be 'upper', 'lower', 'both', or None")
self._prune = prune
if 'steps' in kwargs:
steps = kwargs['steps']
if steps is None:
self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
if 'integer' in kwargs:
self._integer = kwargs['integer']
if self._integer:
self._steps = [n for n in self._steps if divmod(n, 1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin = vmin - offset
vmax = vmax - offset
raw_step = (vmax - vmin) / nbins
scaled_raw_step = raw_step / scale
best_vmax = vmax
best_vmin = vmin
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step * divmod(vmin, step)[0]
best_vmax = best_vmin + step * nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins + 1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=1e-13,
tiny=1e-14)
locs = self.bin_boundaries(vmin, vmax)
prune = self._prune
if prune == 'lower':
locs = locs[1:]
elif prune == 'upper':
locs = locs[:-1]
elif prune == 'both':
locs = locs[1:-1]
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander=1e-12,
tiny=1.e-13)
return np.take(self.bin_boundaries(dmin, dmax), [0, -1])
def decade_down(x, base=10):
'floor x to the nearest lower decade'
if x == 0.0:
return -base
lx = np.floor(np.log(x) / np.log(base))
return base ** lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
if x == 0.0:
return base
lx = np.ceil(np.log(x) / np.log(base))
return base ** lx
def nearest_long(x):
if x == 0:
return 0L
elif x > 0:
return long(x + 0.5)
else:
return long(x - 0.5)
def is_decade(x, base=10):
if not np.isfinite(x):
return False
if x == 0.0:
return True
lx = np.log(np.abs(x)) / np.log(base)
return is_close_to_int(lx)
def is_close_to_int(x):
if not np.isfinite(x):
return False
return abs(x - nearest_long(x)) < 1e-10
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0], numdecs=4, numticks=15):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
self.numticks = numticks
self.numdecs = numdecs
def base(self, base):
"""
set the base of the log scaling (major tick every base**i, i integer)
"""
self._base = base + 0.0
def subs(self, subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs) + 0.0
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._base
# dummy axis has no axes attribute
if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
decades = np.arange(vmax - self.numdecs, vmax)
ticklocs = b ** decades
return ticklocs
if vmin <= 0.0:
if self.axis is not None:
vmin = self.axis.get_minpos()
if vmin <= 0.0 or not np.isfinite(vmin):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
vmin = math.log(vmin) / math.log(b)
vmax = math.log(vmax) / math.log(b)
if vmax < vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax) - math.ceil(vmin)
if self._subs is None: # autosub
if numdec > 10:
subs = np.array([1.0])
elif numdec > 6:
subs = np.arange(2.0, b, 2.0)
else:
subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
while numdec / stride + 1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin),
math.ceil(vmax) + stride, stride)
if hasattr(self, '_transform'):
ticklocs = self._transform.inverted().transform(decades)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = np.ravel(np.outer(subs, ticklocs))
else:
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b ** decades:
ticklocs.extend(subs * decadeStart)
else:
ticklocs = b ** decades
return self.raise_if_exceeds(np.asarray(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._base
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
vmin = b ** (vmax - self.numdecs)
return vmin, vmax
minpos = self.axis.get_minpos()
if minpos <= 0 or not np.isfinite(minpos):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin, self._base):
vmin = decade_down(vmin, self._base)
if not is_decade(vmax, self._base):
vmax = decade_up(vmax, self._base)
if vmin == vmax:
vmin = decade_down(vmin, self._base)
vmax = decade_up(vmax, self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, transform, subs=None):
"""
place ticks on the location= base**i*subs[j]
"""
self._transform = transform
if subs is None:
self._subs = [1.0]
else:
self._subs = subs
self.numticks = 15
def __call__(self):
'Return the locations of the ticks'
# Note, these are untransformed coordinates
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._transform.base
t = self._transform.linthresh
if vmax < vmin:
vmin, vmax = vmax, vmin
# The domain is divided into three sections, only some of
# which may actually be present.
#
# <======== -t ==0== t ========>
# aaaaaaaaa bbbbb ccccccccc
#
# a) and c) will have ticks at integral log positions. The
# number of ticks needs to be reduced if there are more
# than self.numticks of them.
#
# b) has a tick at 0 and only 0 (we assume t is a small
# number, and the linear segment is just an implementation
# detail and not interesting.)
#
# We could also add ticks at t, but that seems to usually be
# uninteresting.
#
# "simple" mode is when the range falls entirely within (-t,
# t) -- it should just display (vmin, 0, vmax)
has_a = has_b = has_c = False
if vmin < -t:
has_a = True
if vmax > -t:
has_b = True
if vmax > t:
has_c = True
elif vmin < 0:
if vmax > 0:
has_b = True
if vmax > t:
has_c = True
else:
return [vmin, vmax]
elif vmin < t:
if vmax > t:
has_b = True
has_c = True
else:
return [vmin, vmax]
else:
has_c = True
def get_log_range(lo, hi):
lo = np.floor(np.log(lo) / np.log(b))
hi = np.ceil(np.log(hi) / np.log(b))
return lo, hi
# First, calculate all the ranges, so we can determine striding
if has_a:
if has_b:
a_range = get_log_range(t, -vmin + 1)
else:
a_range = get_log_range(-vmax, -vmin + 1)
else:
a_range = (0, 0)
if has_c:
if has_b:
c_range = get_log_range(t, vmax + 1)
else:
c_range = get_log_range(vmin, vmax + 1)
else:
c_range = (0, 0)
total_ticks = (a_range[1] - a_range[0]) + (c_range[1] - c_range[0])
if has_b:
total_ticks += 1
stride = max(np.floor(float(total_ticks) / (self.numticks - 1)), 1)
decades = []
if has_a:
decades.extend(-1 * (b ** (np.arange(a_range[0], a_range[1],
stride)[::-1])))
if has_b:
decades.append(0.0)
if has_c:
decades.extend(b ** (np.arange(c_range[0], c_range[1], stride)))
# Add the subticks if requested
if self._subs is None:
subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * decade)
else:
ticklocs = decades
return self.raise_if_exceeds(np.array(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._transform.base
if vmax < vmin:
vmin, vmax = vmax, vmin
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class AutoLocator(MaxNLocator):
def __init__(self):
MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10])
class AutoMinorLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks. Assumes the scale is linear and major ticks are
evenly spaced.
"""
def __init__(self, n=None):
"""
*n* is the number of subdivisions of the interval between
major ticks; e.g., n=2 will place a single minor tick midway
between major ticks.
If *n* is omitted or None, it will be set to 5 or 4.
"""
self.ndivs = n
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
try:
majorstep = majorlocs[1] - majorlocs[0]
except IndexError:
# Need at least two major ticks to find minor tick locations
# TODO: Figure out a way to still be able to display minor
# ticks without two major ticks visible. For now, just display
# no ticks at all.
majorstep = 0
if self.ndivs is None:
if majorstep == 0:
# TODO: Need a better way to figure out ndivs
ndivs = 1
else:
x = int(round(10 ** (np.log10(majorstep) % 1)))
if x in [1, 5, 10]:
ndivs = 5
else:
ndivs = 4
else:
ndivs = self.ndivs
minorstep = majorstep / ndivs
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
if len(majorlocs) > 0:
t0 = majorlocs[0]
tmin = np.ceil((vmin - t0) / minorstep) * minorstep
tmax = np.floor((vmax - t0) / minorstep) * minorstep
locs = np.arange(tmin, tmax, minorstep) + t0
cond = np.abs((locs - t0) % majorstep) > minorstep / 10.0
locs = locs.compress(cond)
else:
locs = []
return self.raise_if_exceeds(np.array(locs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self.raise_if_exceeds(self._locator())
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d <= 0:
locator = MultipleLocator(0.2)
else:
try:
ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10 ** fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5 * base:
ticksize = base
elif d >= 2 * base:
ticksize = base / 2.0
else:
ticksize = base / 5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'ScalarFormatter', 'LogFormatter', 'LogFormatterExponent',
'LogFormatterMathtext', 'Locator', 'IndexLocator',
'FixedLocator', 'NullLocator', 'LinearLocator',
'LogLocator', 'AutoLocator', 'MultipleLocator',
'MaxNLocator', 'AutoMinorLocator',)
|
{
"content_hash": "dcafc6449c49d741efc3ccefff96773b",
"timestamp": "",
"source": "github",
"line_count": 1792,
"max_line_length": 104,
"avg_line_length": 31.013392857142858,
"alnum_prop": 0.5401972074276666,
"repo_name": "lthurlow/Network-Grapher",
"id": "e6c7f4d2d04c7e638ec6fc843d69b422fe35ef59",
"size": "55576",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "proj/external/matplotlib-1.2.1/lib/matplotlib/ticker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6550"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geral', '0002_auto_20170914_1207'),
]
operations = [
migrations.CreateModel(
name='Dispositivos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=64, verbose_name='Chave de identificação')),
('nome', models.CharField(blank=True, max_length=64, null=True, verbose_name='Nome do dispositivo')),
],
options={
'db_table': 'fo2_ger_dispositivos',
'verbose_name': 'dispositivo',
},
),
]
|
{
"content_hash": "52d87ccaf7113fe3eeb35b6f35d8b3f0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 117,
"avg_line_length": 32.04,
"alnum_prop": 0.5617977528089888,
"repo_name": "anselmobd/fo2",
"id": "a9a4fe3a3124122c0227bfc41cc1771aed28ed04",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/geral/migrations/0003_dispositivos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from pandas.compat import range, long, zip
from pandas import compat
import re
import numpy as np
from pandas.core.algorithms import unique
from pandas.tseries.offsets import DateOffset
from pandas.util.decorators import cache_readonly
import pandas.tseries.offsets as offsets
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
class FreqGroup(object):
FR_ANN = 1000
FR_QTR = 2000
FR_MTH = 3000
FR_WK = 4000
FR_BUS = 5000
FR_DAY = 6000
FR_HR = 7000
FR_MIN = 8000
FR_SEC = 9000
FR_MS = 10000
FR_US = 11000
FR_NS = 12000
class Resolution(object):
RESO_US = tslib.US_RESO
RESO_MS = tslib.MS_RESO
RESO_SEC = tslib.S_RESO
RESO_MIN = tslib.T_RESO
RESO_HR = tslib.H_RESO
RESO_DAY = tslib.D_RESO
_reso_str_map = {
RESO_US: 'microsecond',
RESO_MS: 'millisecond',
RESO_SEC: 'second',
RESO_MIN: 'minute',
RESO_HR: 'hour',
RESO_DAY: 'day'}
_str_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_str_map)])
_reso_freq_map = {
'year': 'A',
'quarter': 'Q',
'month': 'M',
'day': 'D',
'hour': 'H',
'minute': 'T',
'second': 'S',
'millisecond': 'L',
'microsecond': 'U',
'nanosecond': 'N'}
_freq_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_freq_map)])
@classmethod
def get_str(cls, reso):
return cls._reso_str_map.get(reso, 'day')
@classmethod
def get_reso(cls, resostr):
return cls._str_reso_map.get(resostr, cls.RESO_DAY)
@classmethod
def get_freq(cls, resostr):
return cls._reso_freq_map[resostr]
@classmethod
def get_str_from_freq(cls, freq):
return cls._freq_reso_map.get(freq, 'day')
@classmethod
def get_reso_from_freq(cls, freq):
return cls.get_reso(cls.get_str_from_freq(freq))
def get_reso_string(reso):
return Resolution.get_str(reso)
def get_to_timestamp_base(base):
if base < FreqGroup.FR_BUS:
return FreqGroup.FR_DAY
if FreqGroup.FR_HR <= base <= FreqGroup.FR_SEC:
return FreqGroup.FR_SEC
return base
def get_freq_group(freq):
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return (freq // 1000) * 1000
def get_freq(freq):
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return freq
def get_freq_code(freqstr):
"""
Parameters
----------
Returns
-------
"""
if isinstance(freqstr, DateOffset):
freqstr = (get_offset_name(freqstr), freqstr.n)
if isinstance(freqstr, tuple):
if (com.is_integer(freqstr[0]) and
com.is_integer(freqstr[1])):
# e.g., freqstr = (2000, 1)
return freqstr
else:
# e.g., freqstr = ('T', 5)
try:
code = _period_str_to_code(freqstr[0])
stride = freqstr[1]
except:
if com.is_integer(freqstr[1]):
raise
code = _period_str_to_code(freqstr[1])
stride = freqstr[0]
return code, stride
if com.is_integer(freqstr):
return (freqstr, 1)
base, stride = _base_and_stride(freqstr)
code = _period_str_to_code(base)
return code, stride
def _get_freq_str(base, mult=1):
code = _reverse_period_code_map.get(base)
if mult == 1:
return code
return str(mult) + code
#----------------------------------------------------------------------
# Offset names ("time rules") and related functions
from pandas.tseries.offsets import (Nano, Micro, Milli, Second, Minute, Hour,
Day, BDay, CDay, Week, MonthBegin,
MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin,
BQuarterEnd, YearBegin, YearEnd,
BYearBegin, BYearEnd, _make_offset
)
try:
cday = CDay()
except NotImplementedError:
cday = None
#: cache of previously seen offsets
_offset_map = {}
_offset_to_period_map = {
'WEEKDAY': 'D',
'EOM': 'M',
'BM': 'M',
'BQS': 'Q',
'QS': 'Q',
'BQ': 'Q',
'BA': 'A',
'AS': 'A',
'BAS': 'A',
'MS': 'M',
'D': 'D',
'C': 'C',
'B': 'B',
'T': 'T',
'S': 'S',
'L': 'L',
'U': 'U',
'N': 'N',
'H': 'H',
'Q': 'Q',
'A': 'A',
'W': 'W',
'M': 'M'
}
need_suffix = ['QS', 'BQ', 'BQS', 'AS', 'BA', 'BAS']
_months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
for __prefix in need_suffix:
for _m in _months:
_offset_to_period_map['%s-%s' % (__prefix, _m)] = \
_offset_to_period_map[__prefix]
for __prefix in ['A', 'Q']:
for _m in _months:
_alias = '%s-%s' % (__prefix, _m)
_offset_to_period_map[_alias] = _alias
_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for _d in _days:
_offset_to_period_map['W-%s' % _d] = 'W-%s' % _d
def get_period_alias(offset_str):
""" alias to closest period strings BQ->Q etc"""
return _offset_to_period_map.get(offset_str, None)
_rule_aliases = {
# Legacy rules that will continue to map to their original values
# essentially for the rest of time
'WEEKDAY': 'B',
'EOM': 'BM',
'W@MON': 'W-MON',
'W@TUE': 'W-TUE',
'W@WED': 'W-WED',
'W@THU': 'W-THU',
'W@FRI': 'W-FRI',
'W@SAT': 'W-SAT',
'W@SUN': 'W-SUN',
'W': 'W-SUN',
'Q@JAN': 'BQ-JAN',
'Q@FEB': 'BQ-FEB',
'Q@MAR': 'BQ-MAR',
'Q': 'Q-DEC',
'A': 'A-DEC', # YearEnd(month=12),
'AS': 'AS-JAN', # YearBegin(month=1),
'BA': 'BA-DEC', # BYearEnd(month=12),
'BAS': 'BAS-JAN', # BYearBegin(month=1),
'A@JAN': 'BA-JAN',
'A@FEB': 'BA-FEB',
'A@MAR': 'BA-MAR',
'A@APR': 'BA-APR',
'A@MAY': 'BA-MAY',
'A@JUN': 'BA-JUN',
'A@JUL': 'BA-JUL',
'A@AUG': 'BA-AUG',
'A@SEP': 'BA-SEP',
'A@OCT': 'BA-OCT',
'A@NOV': 'BA-NOV',
'A@DEC': 'BA-DEC',
# lite aliases
'Min': 'T',
'min': 'T',
'ms': 'L',
'us': 'U'
}
#TODO: Can this be killed?
for _i, _weekday in enumerate(['MON', 'TUE', 'WED', 'THU', 'FRI']):
for _iweek in range(4):
_name = 'WOM-%d%s' % (_iweek + 1, _weekday)
_rule_aliases[_name.replace('-', '@')] = _name
# Note that _rule_aliases is not 1:1 (d[BA]==d[A@DEC]), and so traversal
# order matters when constructing an inverse. we pick one. #2331
_legacy_reverse_map = dict((v, k) for k, v in
reversed(sorted(compat.iteritems(_rule_aliases))))
def to_offset(freqstr):
"""
Return DateOffset object from string representation
Examples
--------
>>> to_offset('5Min')
Minute(5)
"""
if freqstr is None:
return None
if isinstance(freqstr, DateOffset):
return freqstr
if isinstance(freqstr, tuple):
name = freqstr[0]
stride = freqstr[1]
if isinstance(stride, compat.string_types):
name, stride = stride, name
name, _ = _base_and_stride(name)
delta = get_offset(name) * stride
else:
delta = None
stride_sign = None
try:
for stride, name, _ in opattern.findall(freqstr):
offset = get_offset(name)
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
stride = int(stride)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError("Could not evaluate %s" % freqstr)
if delta is None:
raise ValueError('Unable to understand %s as a frequency' % freqstr)
return delta
# hack to handle WOM-1MON
opattern = re.compile(r'([\-]?\d*)\s*([A-Za-z]+([\-@][\dA-Za-z\-]+)?)')
def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
Examples
--------
_freq_and_stride('5Min') -> 'Min', 5
"""
groups = opattern.match(freqstr)
if not groups:
raise ValueError("Could not evaluate %s" % freqstr)
stride = groups.group(1)
if len(stride):
stride = int(stride)
else:
stride = 1
base = groups.group(2)
return (base, stride)
def get_base_alias(freqstr):
"""
Returns the base frequency alias, e.g., '5D' -> 'D'
"""
return _base_and_stride(freqstr)[0]
_dont_uppercase = set(('MS', 'ms'))
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in _dont_uppercase:
name = name.upper()
if name in _rule_aliases:
name = _rule_aliases[name]
elif name.lower() in _rule_aliases:
name = _rule_aliases[name.lower()]
else:
if name in _rule_aliases:
name = _rule_aliases[name]
if name not in _offset_map:
try:
# generate and cache offset
offset = _make_offset(name)
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError('Bad rule name requested: %s.' % name)
_offset_map[name] = offset
return _offset_map[name]
getOffset = get_offset
def get_offset_name(offset):
"""
Return rule name associated with a DateOffset object
Examples
--------
get_offset_name(BMonthEnd(1)) --> 'EOM'
"""
if offset is None:
raise ValueError("Offset can't be none!")
# Hack because this is what it did before...
if isinstance(offset, BDay):
if offset.n != 1:
raise ValueError('Bad rule given: %s.' % 'BusinessDays')
else:
return offset.rule_code
try:
return offset.freqstr
except AttributeError:
# Bad offset, give useful error.
raise ValueError('Bad rule given: %s.' % offset)
def get_legacy_offset_name(offset):
"""
Return the pre pandas 0.8.0 name for the date offset
"""
name = offset.name
return _legacy_reverse_map.get(name, name)
def get_standard_freq(freq):
"""
Return the standardized frequency string
"""
if freq is None:
return None
if isinstance(freq, DateOffset):
return get_offset_name(freq)
code, stride = get_freq_code(freq)
return _get_freq_str(code, stride)
#----------------------------------------------------------------------
# Period codes
# period frequency constants corresponding to scikits timeseries
# originals
_period_code_map = {
# Annual freqs with various fiscal year ends.
# eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005
"A-DEC": 1000, # Annual - December year end
"A-JAN": 1001, # Annual - January year end
"A-FEB": 1002, # Annual - February year end
"A-MAR": 1003, # Annual - March year end
"A-APR": 1004, # Annual - April year end
"A-MAY": 1005, # Annual - May year end
"A-JUN": 1006, # Annual - June year end
"A-JUL": 1007, # Annual - July year end
"A-AUG": 1008, # Annual - August year end
"A-SEP": 1009, # Annual - September year end
"A-OCT": 1010, # Annual - October year end
"A-NOV": 1011, # Annual - November year end
# Quarterly frequencies with various fiscal year ends.
# eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005
"Q-DEC": 2000, # Quarterly - December year end
"Q-JAN": 2001, # Quarterly - January year end
"Q-FEB": 2002, # Quarterly - February year end
"Q-MAR": 2003, # Quarterly - March year end
"Q-APR": 2004, # Quarterly - April year end
"Q-MAY": 2005, # Quarterly - May year end
"Q-JUN": 2006, # Quarterly - June year end
"Q-JUL": 2007, # Quarterly - July year end
"Q-AUG": 2008, # Quarterly - August year end
"Q-SEP": 2009, # Quarterly - September year end
"Q-OCT": 2010, # Quarterly - October year end
"Q-NOV": 2011, # Quarterly - November year end
"M": 3000, # Monthly
"W-SUN": 4000, # Weekly - Sunday end of week
"W-MON": 4001, # Weekly - Monday end of week
"W-TUE": 4002, # Weekly - Tuesday end of week
"W-WED": 4003, # Weekly - Wednesday end of week
"W-THU": 4004, # Weekly - Thursday end of week
"W-FRI": 4005, # Weekly - Friday end of week
"W-SAT": 4006, # Weekly - Saturday end of week
"B": 5000, # Business days
"D": 6000, # Daily
"H": 7000, # Hourly
"T": 8000, # Minutely
"S": 9000, # Secondly
"L": 10000, # Millisecondly
"U": 11000, # Microsecondly
"N": 12000, # Nanosecondly
}
_reverse_period_code_map = {}
for _k, _v in compat.iteritems(_period_code_map):
_reverse_period_code_map[_v] = _k
# Additional aliases
_period_code_map.update({
"Q": 2000, # Quarterly - December year end (default quarterly)
"A": 1000, # Annual
"W": 4000, # Weekly
})
def _period_alias_dictionary():
"""
Build freq alias dictionary to support freqs from original c_dates.c file
of the scikits.timeseries library.
"""
alias_dict = {}
M_aliases = ["M", "MTH", "MONTH", "MONTHLY"]
B_aliases = ["B", "BUS", "BUSINESS", "BUSINESSLY", 'WEEKDAY']
D_aliases = ["D", "DAY", "DLY", "DAILY"]
H_aliases = ["H", "HR", "HOUR", "HRLY", "HOURLY"]
T_aliases = ["T", "MIN", "MINUTE", "MINUTELY"]
S_aliases = ["S", "SEC", "SECOND", "SECONDLY"]
L_aliases = ["L", "ms", "MILLISECOND", "MILLISECONDLY"]
U_aliases = ["U", "US", "MICROSECOND", "MICROSECONDLY"]
N_aliases = ["N", "NS", "NANOSECOND", "NANOSECONDLY"]
for k in M_aliases:
alias_dict[k] = 'M'
for k in B_aliases:
alias_dict[k] = 'B'
for k in D_aliases:
alias_dict[k] = 'D'
for k in H_aliases:
alias_dict[k] = 'H'
for k in T_aliases:
alias_dict[k] = 'Min'
for k in S_aliases:
alias_dict[k] = 'S'
for k in L_aliases:
alias_dict[k] = 'L'
for k in U_aliases:
alias_dict[k] = 'U'
for k in N_aliases:
alias_dict[k] = 'N'
A_prefixes = ["A", "Y", "ANN", "ANNUAL", "ANNUALLY", "YR", "YEAR",
"YEARLY"]
Q_prefixes = ["Q", "QTR", "QUARTER", "QUARTERLY", "Q-E",
"QTR-E", "QUARTER-E", "QUARTERLY-E"]
month_names = [
["DEC", "DECEMBER"],
["JAN", "JANUARY"],
["FEB", "FEBRUARY"],
["MAR", "MARCH"],
["APR", "APRIL"],
["MAY", "MAY"],
["JUN", "JUNE"],
["JUL", "JULY"],
["AUG", "AUGUST"],
["SEP", "SEPTEMBER"],
["OCT", "OCTOBER"],
["NOV", "NOVEMBER"]]
seps = ["@", "-"]
for k in A_prefixes:
alias_dict[k] = 'A'
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = 'A-' + m1
alias_dict[k + sep + m2] = 'A-' + m1
for k in Q_prefixes:
alias_dict[k] = 'Q'
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = 'Q-' + m1
alias_dict[k + sep + m2] = 'Q-' + m1
W_prefixes = ["W", "WK", "WEEK", "WEEKLY"]
day_names = [
["SUN", "SUNDAY"],
["MON", "MONDAY"],
["TUE", "TUESDAY"],
["WED", "WEDNESDAY"],
["THU", "THURSDAY"],
["FRI", "FRIDAY"],
["SAT", "SATURDAY"]]
for k in W_prefixes:
alias_dict[k] = 'W'
for d_tup in day_names:
for sep in ["@", "-"]:
d1, d2 = d_tup
alias_dict[k + sep + d1] = 'W-' + d1
alias_dict[k + sep + d2] = 'W-' + d1
return alias_dict
def _infer_period_group(freqstr):
return _period_group(Resolution._reso_freq_map[freqstr])
def _period_group(freqstr):
base, mult = get_freq_code(freqstr)
return base // 1000 * 1000
_period_alias_dict = _period_alias_dictionary()
def _period_str_to_code(freqstr):
# hack
freqstr = _rule_aliases.get(freqstr, freqstr)
if freqstr not in _dont_uppercase:
freqstr = _rule_aliases.get(freqstr.lower(), freqstr)
try:
if freqstr not in _dont_uppercase:
freqstr = freqstr.upper()
return _period_code_map[freqstr]
except KeyError:
try:
alias = _period_alias_dict[freqstr]
except KeyError:
raise ValueError("Unknown freqstr: %s" % freqstr)
return _period_code_map[alias]
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed
Parameters
----------
index : DatetimeIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
freq : string or None
None if no discernible frequency
TypeError if the index is not datetime-like
"""
import pandas as pd
if isinstance(index, com.ABCSeries):
values = index.values
if not (com.is_datetime64_dtype(index.values) or com.is_timedelta64_dtype(index.values) or values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype on a Series of {0}".format(index.dtype))
index = values
if com.is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
elif isinstance(index, pd.TimedeltaIndex):
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index type {0}".format(type(index)))
index = index.values
index = pd.DatetimeIndex(index)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
_ONE_MICRO = long(1000)
_ONE_MILLI = _ONE_MICRO * 1000
_ONE_SECOND = _ONE_MILLI * 1000
_ONE_MINUTE = 60 * _ONE_SECOND
_ONE_HOUR = 60 * _ONE_MINUTE
_ONE_DAY = 24 * _ONE_HOUR
class _FrequencyInferer(object):
"""
Not sure if I can avoid the state machine here
"""
def __init__(self, index, warn=True):
self.index = index
self.values = np.asarray(index).view('i8')
# This moves the values, which are implicitly in UTC, to the
# the timezone so they are in local time
if hasattr(index,'tz'):
if index.tz is not None:
self.values = tslib.tz_convert(self.values, 'UTC', index.tz)
self.warn = warn
if len(index) < 3:
raise ValueError('Need at least 3 dates to infer frequency')
self.is_monotonic = self.index.is_monotonic
@cache_readonly
def deltas(self):
return tslib.unique_deltas(self.values)
@cache_readonly
def deltas_asi8(self):
return tslib.unique_deltas(self.index.asi8)
@cache_readonly
def is_unique(self):
return len(self.deltas) == 1
@cache_readonly
def is_unique_asi8(self):
return len(self.deltas_asi8) == 1
def get_freq(self):
if not self.is_monotonic or not self.index.is_unique:
return None
delta = self.deltas[0]
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
else:
# Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
if not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
if _is_multiple(delta, _ONE_HOUR):
# Hours
return _maybe_add_count('H', delta / _ONE_HOUR)
elif _is_multiple(delta, _ONE_MINUTE):
# Minutes
return _maybe_add_count('T', delta / _ONE_MINUTE)
elif _is_multiple(delta, _ONE_SECOND):
# Seconds
return _maybe_add_count('S', delta / _ONE_SECOND)
elif _is_multiple(delta, _ONE_MILLI):
# Milliseconds
return _maybe_add_count('L', delta / _ONE_MILLI)
elif _is_multiple(delta, _ONE_MICRO):
# Microseconds
return _maybe_add_count('U', delta / _ONE_MICRO)
else:
# Nanoseconds
return _maybe_add_count('N', delta)
@cache_readonly
def day_deltas(self):
return [x / _ONE_DAY for x in self.deltas]
@cache_readonly
def fields(self):
return tslib.build_field_sarray(self.values)
@cache_readonly
def rep_stamp(self):
return lib.Timestamp(self.values[0])
def month_position_check(self):
# TODO: cythonize this, very slow
calendar_end = True
business_end = True
calendar_start = True
business_start = True
years = self.fields['Y']
months = self.fields['M']
days = self.fields['D']
weekdays = self.index.dayofweek
from calendar import monthrange
for y, m, d, wd in zip(years, months, days, weekdays):
wd = datetime(y, m, d).weekday()
if calendar_start:
calendar_start &= d == 1
if business_start:
business_start &= d == 1 or (d <= 3 and wd == 0)
if calendar_end or business_end:
_, daysinmonth = monthrange(y, m)
cal = d == daysinmonth
if calendar_end:
calendar_end &= cal
if business_end:
business_end &= cal or (daysinmonth - d < 3 and wd == 4)
elif not calendar_start and not business_start:
break
if calendar_end:
return 'ce'
elif business_end:
return 'be'
elif calendar_start:
return 'cs'
elif business_start:
return 'bs'
else:
return None
@cache_readonly
def mdiffs(self):
nmonths = self.fields['Y'] * 12 + self.fields['M']
return tslib.unique_deltas(nmonths.astype('i8'))
@cache_readonly
def ydiffs(self):
return tslib.unique_deltas(self.fields['Y'].astype('i8'))
def _infer_daily_rule(self):
annual_rule = self._get_annual_rule()
if annual_rule:
nyears = self.ydiffs[0]
month = _month_aliases[self.rep_stamp.month]
return _maybe_add_count('%s-%s' % (annual_rule, month), nyears)
quarterly_rule = self._get_quarterly_rule()
if quarterly_rule:
nquarters = self.mdiffs[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = _month_aliases[mod_dict[self.rep_stamp.month % 3]]
return _maybe_add_count('%s-%s' % (quarterly_rule, month),
nquarters)
monthly_rule = self._get_monthly_rule()
if monthly_rule:
return monthly_rule
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
alias = _weekday_rule_aliases[self.rep_stamp.weekday()]
return _maybe_add_count('W-%s' % alias, days / 7)
else:
return _maybe_add_count('D', days)
# Business daily. Maybe
if self.day_deltas == [1, 3]:
return 'B'
wom_rule = self._get_wom_rule()
if wom_rule:
return wom_rule
def _get_annual_rule(self):
if len(self.ydiffs) > 1:
return None
if len(algos.unique(self.fields['M'])) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'AS', 'bs': 'BAS',
'ce': 'A', 'be': 'BA'}.get(pos_check)
def _get_quarterly_rule(self):
if len(self.mdiffs) > 1:
return None
if not self.mdiffs[0] % 3 == 0:
return None
pos_check = self.month_position_check()
return {'cs': 'QS', 'bs': 'BQS',
'ce': 'Q', 'be': 'BQ'}.get(pos_check)
def _get_monthly_rule(self):
if len(self.mdiffs) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'MS', 'bs': 'BMS',
'ce': 'M', 'be': 'BM'}.get(pos_check)
def _get_wom_rule(self):
# wdiffs = unique(np.diff(self.index.week))
#We also need -47, -49, -48 to catch index spanning year boundary
# if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all():
# return None
weekdays = unique(self.index.weekday)
if len(weekdays) > 1:
return None
week_of_months = unique((self.index.day - 1) // 7)
if len(week_of_months) > 1:
return None
# get which week
week = week_of_months[0] + 1
wd = _weekday_rule_aliases[weekdays[0]]
return 'WOM-%d%s' % (week, wd)
import pandas.core.algorithms as algos
class _TimedeltaFrequencyInferer(_FrequencyInferer):
def _infer_daily_rule(self):
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
alias = _weekday_rule_aliases[self.rep_stamp.weekday()]
return _maybe_add_count('W-%s' % alias, days / 7)
else:
return _maybe_add_count('D', days)
def _maybe_add_count(base, count):
if count > 1:
return '%d%s' % (count, base)
else:
return base
def is_subperiod(source, target):
"""
Returns True if downsampling is possible between source and target
frequencies
Parameters
----------
source : string
Frequency converting from
target : string
Frequency converting to
Returns
-------
is_subperiod : boolean
"""
if isinstance(source, offsets.DateOffset):
source = source.rule_code
if isinstance(target, offsets.DateOffset):
target = target.rule_code
target = target.upper()
source = source.upper()
if _is_annual(target):
if _is_quarterly(source):
return _quarter_months_conform(_get_rule_month(source),
_get_rule_month(target))
return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_quarterly(target):
return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'M':
return source in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_weekly(target):
return source in [target, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'B':
return source in ['B', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'C':
return source in ['C', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'D':
return source in ['D', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'H':
return source in ['H', 'T', 'S', 'L', 'U', 'N']
elif target == 'T':
return source in ['T', 'S', 'L', 'U', 'N']
elif target == 'S':
return source in ['S', 'L', 'U', 'N']
elif target == 'L':
return source in ['L', 'U', 'N']
elif target == 'U':
return source in ['U', 'N']
elif target == 'N':
return source in ['N']
def is_superperiod(source, target):
"""
Returns True if upsampling is possible between source and target
frequencies
Parameters
----------
source : string
Frequency converting from
target : string
Frequency converting to
Returns
-------
is_superperiod : boolean
"""
if isinstance(source, offsets.DateOffset):
source = source.rule_code
if isinstance(target, offsets.DateOffset):
target = target.rule_code
target = target.upper()
source = source.upper()
if _is_annual(source):
if _is_annual(target):
return _get_rule_month(source) == _get_rule_month(target)
if _is_quarterly(target):
smonth = _get_rule_month(source)
tmonth = _get_rule_month(target)
return _quarter_months_conform(smonth, tmonth)
return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_quarterly(source):
return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'M':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_weekly(source):
return target in [source, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'B':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'C':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'D':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'H':
return target in ['H', 'T', 'S', 'L', 'U', 'N']
elif source == 'T':
return target in ['T', 'S', 'L', 'U', 'N']
elif source == 'S':
return target in ['S', 'L', 'U', 'N']
elif source == 'L':
return target in ['L', 'U', 'N']
elif source == 'U':
return target in ['U', 'N']
elif source == 'N':
return target in ['N']
def _get_rule_month(source, default='DEC'):
source = source.upper()
if '-' not in source:
return default
else:
return source.split('-')[1]
def _is_annual(rule):
rule = rule.upper()
return rule == 'A' or rule.startswith('A-')
def _quarter_months_conform(source, target):
snum = _month_numbers[source]
tnum = _month_numbers[target]
return snum % 3 == tnum % 3
def _is_quarterly(rule):
rule = rule.upper()
return rule == 'Q' or rule.startswith('Q-')
def _is_weekly(rule):
rule = rule.upper()
return rule == 'W' or rule.startswith('W-')
DAYS = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
_month_numbers = dict((k, i) for i, k in enumerate(MONTHS))
_weekday_rule_aliases = dict((k, v) for k, v in enumerate(DAYS))
_month_aliases = dict((k + 1, v) for k, v in enumerate(MONTHS))
def _is_multiple(us, mult):
return us % mult == 0
|
{
"content_hash": "2b9995f8ae2405836323a2b60317ee00",
"timestamp": "",
"source": "github",
"line_count": 1089,
"max_line_length": 123,
"avg_line_length": 28.52617079889807,
"alnum_prop": 0.5296314179945276,
"repo_name": "dssg/wikienergy",
"id": "54b29b164130927e4571cd1380d56e6f3c640d17",
"size": "31065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "disaggregator/build/pandas/pandas/tseries/frequencies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "21786236"
},
{
"name": "C++",
"bytes": "625951"
},
{
"name": "CSS",
"bytes": "168138"
},
{
"name": "JavaScript",
"bytes": "55363"
},
{
"name": "Makefile",
"bytes": "7243"
},
{
"name": "Python",
"bytes": "8476897"
},
{
"name": "Shell",
"bytes": "6715"
},
{
"name": "TeX",
"bytes": "70097"
}
],
"symlink_target": ""
}
|
"""
.. moduleauthor:: Ulf Krumnack
.. module:: qtgui.widgets.datasource
This module contains widgets for viewing and controlling
:py:class:`Datasource`s. It aims at providing support for all
abstract interfaces defined in
`datasource.datasource`.
"""
# standard imports
from typing import Union
import logging
# Qt imports
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QHideEvent, QFontMetrics, QDoubleValidator
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout
from PyQt5.QtWidgets import QWidget, QPushButton, QDoubleSpinBox
# toolbox imports
from toolbox import Toolbox
from dltb.datasource import Datasource, Datafetcher
from dltb.datasource import Indexed, Random, Livesource
from dltb.base.register import RegisterEntry, InstanceRegisterEntry
# GUI imports
from ..utils import QObserver, QPrepareButton, protect
from .register import ToolboxAdapter
from .register import QRegisterListWidget, QRegisterComboBox
from .register import QInstanceRegisterEntryController
from .navigation import QIndexControls as QBaseIndexControls
# logging
LOG = logging.getLogger(__name__)
class DatasourceAdapter(ToolboxAdapter, qobservables={
Toolbox: {'datasource_changed', 'datasources_changed'}}):
# pylint: disable=abstract-method
"""A :py:class:`ToolboxAdapter` that is especially interested in
:py:class:`Datasource`.
"""
datasourceSelected = pyqtSignal(object)
def updateFromToolbox(self) -> None:
"""Update the list from the :py:class:`Toolbox`.
"""
self.updateFromIterable(self._toolbox.datasources)
def toolbox_changed(self, _toolbox: Toolbox,
change: Toolbox.Change) -> None:
# pylint: disable=invalid-name
"""React to a change in the :py:class:`Toolbox`. The only change
of interest is a change of the current datasource. This
will be reflected in the list.
"""
if change.datasource_changed: # the current datasource has changed
self._formatAllItems()
elif change.datasources_changed: # the list of datasources has changed
self.updateFromToolbox()
def datasource(self) -> Datasource:
"""The currently selected Datasource in this
:py:class:`QDatasourceList`.
"""
item = self._currentItem()
if self._toolbox is not None:
# items are of type Datasource
return item
# items are of type InstanceRegisterEntry
return None if item is None else item.obj
def setDatasource(self, datasource: Datasource) -> None:
"""Set the current :py:class:`Datasource`.
"""
if datasource is None:
self._setCurrentItem(None)
else:
self._setCurrentText(datasource.key)
class QDatasourceListWidget(DatasourceAdapter, QRegisterListWidget):
"""A list displaying the Datasources of a Toolbox.
By providing a Datasource, the list becomes clickable, and
selecting a Datasource from the list will set the observed
Datasource, and vice versa, i.e. changing the observed datasource
will change the current item in the list.
Entries of this list can be of different nature:
* instances of class :py:class:`Datasource`
* ids (str) to identify registered (but potentially uninitialized)
instances of the class :py:class:`Datasource`
* subclasses of :py:class:`Datasource`, that allow to instantiate
a new datasource (provided that sufficient initialization parameters
are provided).
A :py:class:`QDatasourceList` can be run in different modes:
* display the (initialized) datasources of a :py:class:`Toolbox`
* display the (potentiallly uninitialized) datasources registered
at class :py:class:`Datasource`
* display a list of subclasses of class :py:class:`Datasource`
that may be used to initialize a new datasource.
The list displayed by :py:class:`QDatasourceList` is intended to
reflect the current state of affairs, reflecting changes in the
mentioned lists or individual datasources. Hence it implements
different observer interfaces and registers as observer whenever
possible (even for individual datasources displayed in the list).
"""
def __init__(self, **kwargs) -> None:
"""
"""
super().__init__(register=Datasource.instance_register, **kwargs)
@protect
def _oncurrentIndexChanged(self, index: int) -> None:
"""A forward to map item selection to Datasource selection.
"""
self.datasourceSelected.emit(self.itemData(index))
class QDatasourceComboBox(DatasourceAdapter, QRegisterComboBox):
"""A :py:class:`QComboBox` to select a :py:class:`Datasource`.
"""
def __init__(self, **kwargs) -> None:
"""
"""
super().__init__(register=Datasource.instance_register, **kwargs)
self.currentIndexChanged.connect(self._oncurrentIndexChanged)
@protect
def _oncurrentIndexChanged(self, _index: int) -> None:
"""A forward to map item selection to Datasource selection.
"""
self.datasourceSelected.emit(self.datasource())
class QDatafetcherObserver(QObserver, qobservables={
Datafetcher: {'state_changed', 'datasource_changed'}}):
"""A QObserver observing a :py:class:`Datafetcher`. This is intended
to be inherited by classes observing a :py:class:`Datafetcher`.
Attributes
----------
_datafetcher: Datafetcher
A :py:class:`Datafetcher` used by this Button to control the
(loop mode) of the Datasource.
"""
_interests: Datasource.Change = None
def __init__(self, datafetcher: Datafetcher = None,
interests: Datafetcher.Change = None, **kwargs) -> None:
super().__init__(**kwargs)
self._interests = interests or \
Datafetcher.Change('busy_changed', 'state_changed')
self.setDatafetcher(datafetcher)
def datafetcher_changed(self, _datafetcher: Datafetcher,
info: Datafetcher.Change) -> None:
# pylint: disable=invalid-name
"""React to a change in the state of the controlled
:py:class:`Datafetcher`.
"""
if info.state_changed or info.datasource_changed:
self.update()
class QLoopButton(QPushButton, QDatafetcherObserver):
"""A Button to control a :py:class:`Datasource` of type
:py:class:`Loop`. Such datasource can be in a loop mode, meaning
that they continously produce new data (e.g., webcam, movies,
etc.).
The :py:class:`QLoopButton` can observe a :py:class:`Datasource`
and adapt its appearance and function based on the state of the
datasource.
"""
def __init__(self, text: str = 'Loop', **kwargs) -> None:
super().__init__(text, **kwargs)
self.setCheckable(True)
self.clicked.connect(self.onClicked)
@protect
def onClicked(self, checked: bool) -> None:
"""Click on this :py:class:`QLoopButton` will start or stop
looping.
"""
LOG.info("QLoopButton: looping=%s", self._datafetcher.looping)
self._datafetcher.looping = checked
def update(self) -> None:
"""Update this QLoopButton based on the state of the
:py:class:`Datafetcher`.
"""
enabled = (self._datafetcher is not None and
self._datafetcher.loopable and
self._datafetcher.ready)
checked = enabled and self._datafetcher.looping
self.setEnabled(enabled)
self.setChecked(checked)
def hideEvent(self, event: QHideEvent) -> None:
"""When hiding the button, we should stop the loop,
assuming it makes no sense to waste resources by playing
it in the background.
"""
if self._datafetcher is not None and self._datafetcher.loopable:
self._datafetcher.looping = False
super().hideEvent(event)
class QFramesPerSecondEdit(QDoubleSpinBox, QDatafetcherObserver):
# FIXME[todo]: should become a Datafetcher.Observer
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.setRange(0.5, 10.0)
self.setSingleStep(0.5)
self.setMinimumWidth(QFontMetrics(self.font()).width('8') * 4)
# editingFinished: This signal is emitted when the Return or
# Enter key is pressed or the line edit loses focus.
self.valueChanged.connect(self.onValueChanged)
def setDatafetcher(self, datafetcher: Datafetcher) -> None:
"""Set the datafetcher whose frames per second value is to
be edited by this :py:class:`QFramesPerSecondEdit`.
"""
self._datafetcher = datafetcher
self.setValue(10.0 if datafetcher is None else
datafetcher.frames_per_second)
@protect
def onValueChanged(self, value: float) -> None:
"""React to the `EditingFinished` signal of the line editor. This
signal is emitted when the Return or Enter key is pressed or
the line edit loses focus.
"""
if self._datafetcher is not None:
frames_per_second = max(0.5, min(value, 10.0))
self._datafetcher.frames_per_second = frames_per_second
def update(self) -> None:
"""Update this QLoopButton based on the state of the
:py:class:`Datafetcher`.
"""
enabled = (self._datafetcher is not None and
self._datafetcher.ready)
self.setEnabled(enabled)
class QSnapshotButton(QPushButton, QDatafetcherObserver):
"""A Button to control a :py:class:`Datasource` of type
:py:class:`Livesource`. Pressing this button will will obtain a
snapshot from the datasource.
The :py:class:`QSnapshotButton` can observe a :py:class:`Datasource`
and adapt its appearance and function based on the state of that
datasource.
The :py:class:`QSnapshotButton` will only be enabled if a
:py:class:`Datasource` was registered with the
:py:meth:`setDatasource` method and if this datasource is not busy
(e.g., by looping).
"""
def __init__(self, text: str = 'Snapshot', **kwargs) -> None:
super().__init__(text, **kwargs)
self.clicked.connect(self.onClicked)
@protect
def onClicked(self, _checked: bool):
"""Create a snapshot as reaction to a button click.
"""
# FIXME[bug]: May throw a RuntimeError (when busy)
self._datafetcher.fetch(snapshot=True)
def update(self) -> None:
"""Update this QLoopButton based on the state of the
:py:class:`Datasource`.
"""
self.setEnabled(self._datafetcher is not None and
self._datafetcher.snapshotable and
self._datafetcher.ready and
not self._datafetcher.looping)
class QRandomButton(QPushButton, QDatafetcherObserver):
"""A Button to control a :py:class:`Datasource` of type
:py:class:`datasource.Random`. Pressing this button will
obtain a entry from the datasource.
The :py:class:`QRandomButton` can observe a :py:class:`Datasource`
and adapt its appearance and function based on the state of that
datasource. The :py:class:`QRandomButton` will only be enabled if a
:py:class:`Datasource` was registered with the
:py:meth:`setDatasource` method and if this
datasource is not busy (e.g., by looping).
"""
def __init__(self, text: str = 'Random', **kwargs) -> None:
super().__init__(text, **kwargs)
self.clicked.connect(self.onClicked)
@protect
def onClicked(self, _checked: bool):
"""Fetch a random item from the datasource.
"""
# FIXME[bug]: May throw a RuntimeError (when busy)
self._datafetcher.fetch(random=True)
def update(self) -> None:
"""Update this QLoopButton based on the state of the
:py:class:`Datasource`.
"""
enabled = (self._datafetcher is not None and
self._datafetcher.randomable and
self._datafetcher.ready and
not self._datafetcher.looping)
self.setEnabled(enabled)
class QBatchButton(QPushButton, QDatafetcherObserver):
"""A Button to control a :py:class:`Datasource` of type
:py:class:`datasource.Random`. Pressing this button will
obtain a entry from the datasource.
The :py:class:`QRandomButton` can observe a :py:class:`Datasource`
and adapt its appearance and function based on the state of that
datasource. The :py:class:`QRandomButton` will only be enabled if
a :py:class:`Datasource` was registered with the
:py:meth:`setDatasource` method and if this datasource is not busy
(e.g., by looping).
"""
def __init__(self, text: str = 'Batch', **kwargs) -> None:
super().__init__(text, **kwargs)
self._batchSize = 8
self.clicked.connect(self.onClicked)
@protect
def onClicked(self, _checked: bool):
"""Fetch a batch of data.
"""
# FIXME[bug]: May throw a RuntimeError (when busy)
if isinstance(self._datafetcher.datasource, Random):
self._datafetcher.fetch(batch=self._batchSize, random=True)
def update(self) -> None:
"""Update this QLoopButton based on the state of the
:py:class:`Datasource`.
"""
enabled = (self._datafetcher is not None and
self._datafetcher.randomable and
self._datafetcher.ready and
not self._datafetcher.looping)
self.setEnabled(enabled)
class QIndexControls(QBaseIndexControls, QDatafetcherObserver, qobservables={
Datafetcher: {'state_changed', 'data_changed',
'datasource_changed', 'prepared_changed'}}):
"""A group of Widgets to control an :py:class:`Indexed`
:py:class:`Datasource`. The controls allow to select elements
from the datasource based on their index.
The :py:class:`QIndexControls` can observe a :py:class:`Datasource`
and adapt their appearance and function based on the state of that
datasource.
The :py:class:`QIndexControls` will only be enabled if a
:py:class:`Datasource` was registered with the
:py:meth:`setDatasource` method and if this
datasource is not busy (e.g., by fetching or looping).
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.indexChanged.connect(self.onIndexChanged)
@protect
def onIndexChanged(self, index: int) -> None:
"""React to a change of the current index by fetching
the corresponding entry.
"""
LOG.info("QIndexControls: index changed=%d", index)
# FIXME[bug]: May throw a RuntimeError (when busy)
self._datafetcher.fetch(index=index)
def datafetcher_changed(self, datafetcher: Datafetcher,
info: Datafetcher.Change) -> None:
# pylint: disable=invalid-name
"""React to a change in the state of the controlled
:py:class:`Datafetcher`.
"""
LOG.debug("QIndexControls: datafetcher %s changed %s",
datafetcher, info)
enabled = not datafetcher.looping
if info.datasource_changed or info.prepared_changed:
datasource = datafetcher.datasource
# we can obtain the length only from a prepared datasource
if datafetcher.indexable and datasource.prepared:
self.setElements(len(datasource))
else:
self.setElements(-1)
enabled = False
if info.data_changed:
data = datafetcher.data
if datafetcher.indexable and datafetcher.fetched:
# The index may have changed
index = data[0].index if data.is_batch else data.index
self.setIndex(index)
LOG.debug("QIndexControls: index=%d", index)
if info.state_changed:
enabled = enabled and datafetcher.ready
self.update(enabled)
class QDatasourceNavigator(QWidget, QObserver, qattributes={
Toolbox: False, Datasource: False}, qobservables={
Datafetcher: {'datasource_changed'}}):
"""The QDatasourceNavigator offers control widgets to navigate through
a :py:class:`Datasource`. The actual controls depend on the type
of the datasource and the arrangement can be adapted by providing
layout parameters.
The :py:class:`Datasource` to navigate can be set via the
:py:meth:`setDatasource` method. Alternatively, a :py:class:`Toolbox`
can be set using the :py:class:`setToolbox` method.
If a toolbox is set, its current datasource will be used and it
is no longer allowed to set the Datasource via :py:meth:`setDatasource`.
_indexControls: QIndexControls
controls for an indexed datasource (Indexed)
_randomButton: QRandomButton
select random entry from the Datasource (Random)
_snapshotButton: QSnapshotButton
Snapshot button (Snapshot)
_loopButton: QLoopButton
start/stop looping the Datasource (Loop)
_batchButton: QLoopButton
start/stop looping the Datasource (Loop)
_selector: QDatasourceComboBox
_prepareButton: QPrepareButton
"""
def __init__(self, toolbox: Toolbox = None,
datasource: Datasource = None,
datasource_selector: bool = True,
style: str = 'narrow', **kwargs):
"""Initialization of the :py:class:`QDatasourceNavigator`.
Parameters
---------
datasource: Datasource
The datasource to be controlled by this
:py:class:`QDatasourceNavigator`
"""
super().__init__(**kwargs)
self._initUI(datasource_selector)
self._layoutUI(style)
self.setToolbox(toolbox)
if datasource is not None:
self.setDatasource(datasource)
elif self._selector is not None:
self.setDatasource(self._selector.datasource())
def _initUI(self, selector: bool = True) -> None:
"""Initialize the user interface.
"""
self._indexControls = QIndexControls()
self.addAttributePropagation(Datafetcher, self._indexControls)
self._randomButton = QRandomButton()
self.addAttributePropagation(Datafetcher, self._randomButton)
self._snapshotButton = QSnapshotButton()
self.addAttributePropagation(Datafetcher, self._snapshotButton)
self._loopButton = QLoopButton()
self.addAttributePropagation(Datafetcher, self._loopButton)
self._framesPerSecondEdit = QFramesPerSecondEdit()
self.addAttributePropagation(Datafetcher, self._framesPerSecondEdit)
self._batchButton = QBatchButton()
self.addAttributePropagation(Datafetcher, self._batchButton)
if selector:
self._selector = QDatasourceComboBox()
self._selector.datasourceSelected.\
connect(self._onDatasourceSelected)
self.addAttributePropagation(Toolbox, self._selector)
self._prepareButton = QPrepareButton()
self.addAttributePropagation(Datasource, self._selector)
else:
self._selector = None
self._prepareButton = None
def _layoutUI(self, style: str) -> None:
row = QHBoxLayout()
row2 = QHBoxLayout() if style == 'narrow' else row
if self._selector is not None:
row.addWidget(self._selector)
if self._prepareButton is not None:
row.addWidget(self._prepareButton)
row2.addStretch()
row2.addWidget(self._indexControls)
row.addStretch()
row.addWidget(self._randomButton)
row.addWidget(self._snapshotButton)
row.addWidget(self._loopButton)
row.addWidget(self._framesPerSecondEdit)
row.addWidget(self._batchButton)
if style == 'narrow':
layout = QVBoxLayout()
row2.addStretch()
layout.addLayout(row2)
layout.addLayout(row)
else:
layout = row
self.setLayout(layout)
def setToolbox(self, toolbox: Toolbox) -> None:
"""Set the toolbox for this :py:class:`QDatasourceNavigator`.
If not None, the :py:class:`QDatasourceNavigator` will obtain
its :py:class:`Datasource` from that toolbox. Otherwise it
will run in standalone mode and the :py:class:`Datasource`
has to be set explicitly.
"""
LOG.debug("QDatasourceNavigator.setToolbox(%s)", toolbox)
self.setDatafetcher(Datafetcher() if toolbox is None else
toolbox.datafetcher)
def setDatasource(self, datasource: Datasource) -> None:
"""Set the datasource for this :py:class:`QDatasourceNavigator`.
Depending on the type of the new datasource, controls will
become visible.
"""
LOG.debug("QDatasourceNavigator.setDatasource(%s)", datasource)
if self._datafetcher is not None:
self._datafetcher.datasource = datasource
elif datasource is not None:
# the datafetcher will notify all interested parties
# (including us) that the datasource has changed.
self.setDatafetcher(Datafetcher(datasource))
def setDatafetcher(self, datafetcher: Datafetcher) -> None:
"""Set the :py:class:`Datafetcher` for this
:py:class:`QDatasourceNavigator`.
Depending on the type of the :py:class:`Datasource` from which
data are fetched, the controls of this
:py:class:`QDatasourceNavigator` will become visible or hidden.
"""
LOG.debug("QDatasourceNavigator.setDatafetcher(%s)", datafetcher)
self._updateDatasource(datafetcher and datafetcher.datasource)
def datafetcher_changed(self, datafetcher: Datafetcher, info) -> None:
# pylint: disable=invalid-name
"""React to a change of the Toolbox datasource.
"""
LOG.debug("QDatasourceNavigator.datafetcher_changed(%s, %s)",
datafetcher, info)
if info.datasource_changed:
self._updateDatasource(datafetcher.datasource)
def _updateDatasource(self, datasource: Datasource) -> None:
"""Update the navigation controls based on a
:py:class:`Datasource`.
Some control elements will be hidden if not applicable for
that datasource.
Arguments
---------
datasource: Datasource
The datasource to which to adapt. May be `None`.
"""
self._indexControls.setVisible(isinstance(datasource, Indexed))
self._randomButton.setVisible(isinstance(datasource, Random))
self._snapshotButton.setVisible(isinstance(datasource, Livesource))
self._loopButton.setVisible(isinstance(datasource, Livesource) or
isinstance(datasource, Indexed))
self._batchButton.setVisible(isinstance(datasource, Datasource))
if self._prepareButton is not None:
self._prepareButton.setPreparable(datasource)
@protect
def _onDatasourceSelected(self, datasource: Datasource) -> None:
"""The signal `datasourceChanged` is sent whenever the selection of
the datasource in the QComboBox changes, either through user
interaction or programmatically.
"""
# FIXME[hack]: we need a more consistent way of what to store
# (Datasource or InstanceRegisterEntry) and what to report ...
if isinstance(datasource, InstanceRegisterEntry):
datasource = datasource.obj
self.setDatasource(datasource)
class QDatasourceController(QInstanceRegisterEntryController, qobservables={
Toolbox: {'datasource_changed'}}):
"""The :py:class:`QDatasourceController` can control general
aspects of a datasource. This includes:
* initialization (in case the datasource initialization is registered)
* prepare/unprepare for initialized datasources
Display a description of the :py:class:`Datasource`.
Attributes
----------
**Signals:**
A :py:class:`QDatasourceController` emits different signals
corresponding to the different actions that can be initiated:
* initializeKeyClicked(str): The value is the key.
* initializeClassClicked(str): The value is the fully qualified class name.
"""
def __init__(self, **kwargs) -> None:
super().__init__(Datasource, **kwargs)
def _layoutUI(self):
# pylint: disable=attribute-defined-outside-init
super()._layoutUI()
self._toolboxButton = QPushButton("Set as Toolbox Datasource")
self._toolboxButton.clicked.connect(self._onToolboxButtonClicked)
layout = self.layout()
layout.addWidget(self._toolboxButton)
def datasource(self) -> object:
"""The :py:class:`Datasource` currently controlled by this
:py:class:`QDatabaseController` or None if no datasource is
controlled or the datasource is not yet initialized.
"""
if self._registerEntry is None:
return None
return self._registerEntry.obj
def setRegisterEntry(self, entry: Union[RegisterEntry, str]) -> None:
"""Set a new :py:class:`ClassRegisterEntry` to control.
"""
super().setRegisterEntry(entry)
self._updateToolboxButton()
def setToolbox(self, toolbox: Toolbox) -> None:
"""Set the toolbox for this :py:class:`QDatasourceController`.
"""
self._updateToolboxButton()
def _updateToolboxButton(self) -> None:
"""Update the state of the toolbox button. The button
is checked, if there is a toolbox and a datasource,
and that datasource is the current datasource of the toolbox.
"""
toolbox = self.toolbox()
datasource = self.datasource()
if toolbox is None or datasource is None:
self._toolboxButton.setChecked(False)
self._toolboxButton.setEnabled(False)
else:
checked = toolbox.datasource is datasource
self._toolboxButton.setChecked(checked)
self._toolboxButton.setEnabled(not checked)
@protect
def _onToolboxButtonClicked(self, _checked: bool):
"""React to a click on the "Set as Toolbox Datasource"-Button.
If a :py:class:`Toolbox` is present, this currently selected
:py:class:`Datasource` will be assigned as the active datasource
of the toolbox.
"""
if self._toolbox is not None:
self._toolbox.datasource = self.datasource()
def toolbox_changed(self, toolbox: Toolbox,
change: Toolbox.Change) -> None:
# pylint: disable=invalid-name
"""The resources of the :py:class:`Toolbox` have changed.
"""
LOG.debug("%s.toolbox_changed(%s, %s)",
type(self).__name__, type(toolbox).__name__, change)
if 'datasource_changed' in change:
self._updateToolboxButton()
|
{
"content_hash": "cf13af7910ca4f4bf71baa1d2dca2753",
"timestamp": "",
"source": "github",
"line_count": 716,
"max_line_length": 79,
"avg_line_length": 38.002793296089386,
"alnum_prop": 0.6489525909592062,
"repo_name": "Petr-By/qtpyvis",
"id": "d8112dd6fe99263e2eb8e9c1526cba27afc60963",
"size": "27210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qtgui/widgets/datasource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3648815"
},
{
"name": "Python",
"bytes": "243969"
}
],
"symlink_target": ""
}
|
"""Functions for TML layout that are used in the grammar to construct DOM-like
node objects used in the 164 layout engine.
"""
def createNode(name, attributes=None, children=None):
"""Creates a DOM-like node object, using the 164 representation so that
the node can be processed by the 164 layout engine.
"""
node = dict(attributes)
node['name'] = name
# Represent the list of child nodes as a dict with numeric keys.
node['children'] = dict(enumerate(children)) if children else {}
return node
def createWordNodes(text):
"""Returns a Python list of DOM-like nodes, one for each word in the given
text.
"""
return [createNode('Word', {'word': word + ' '}) for word in text.split()]
|
{
"content_hash": "1dd0f802644791c2a35610b96c1a9a69",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 38.578947368421055,
"alnum_prop": 0.684856753069577,
"repo_name": "michelle/sink",
"id": "3f1a174cef5caa300f5e8dde7fdb33469797d15e",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "164/tml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "47213"
}
],
"symlink_target": ""
}
|
import pika
import json
import base64
import time
import os
import redis
import logging
import farm_template_py3
global connection
class Remote_Interface_server():
def __init__(self, redis_handle ):
self.redis = redis_handle
self.cmds = {}
self.cmds["PING"] = True
self.cmds["REDIS_GET"] = self.redis_get
self.cmds["REDIS_SET"] = self.redis_set
self.cmds["REDIS_LLEN"] = self.redis_llen
self.cmds["REDIS_LINDEX"] = self.redis_lindex
self.cmds["REDIS_LSET"] = self.redis_lset
self.cmds["REDIS_TRIM"] = self.redis_trim
self.cmds["REDIS_PUSH"] = self.redis_lpush
self.cmds["REDIS_POP"] = self.redis_rpop
self.cmds["REDIS_DEL"] = self.redis_del
self.cmds["REDIS_HGET"] = self.redis_hget
self.cmds["REDIS_HSET"] = self.redis_hset
self.cmds["REDIS_HGET_ALL"] = self.redis_hget_all
self.cmds["REDIS_HDEL"] = self.redis_hdel
self.cmds["REDIS_HKEYS"] = self.redis_hkeys
self.cmds["REDIS_KEYS"] = self.redis_keys
def redis_hkeys( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append( self.redis.hkeys(i["hash"]).decode("utf-8") )
return object_data
def redis_keys( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append( self.redis.keys(i["key"]).decode("utf-8") )
return object_data
def redis_get( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append({"key":i, "data": self.redis.get(i).decode("utf-8") } )
return object_data
def redis_set( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
data = i["data"]
self.redis.set(key,data )
return object_data
def redis_llen( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i
object_data["results"].append({"key":i, "data":self.redis.llen(i)})
return object_data
def redis_lindex( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
index = int(i["index"])
object_data["results"].append({"key":key, "index":index, "data":self.redis.lindex( key, index ).decode("utf-8") })
return object_data
def redis_lset( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
index = int(i["index"])
value = i["data"]
self.redis.lset(key,index,value)
return object_data
def redis_trim( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
#print( "i",i)
key = i["key"]
start = i["start"]
end = i["end"]
self.redis.ltrim(key,start, end)
return object_data
def redis_lpush( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
for j in i["data"]:
self.redis.lpush( key, j )
return object_data
def redis_rpop( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
number = i["number"]
temp1 = {"key": key,"number":number }
temp = []
for j in range(0,number):
temp_1 = self.redis.rpop(key).decode("utf-8")
if temp_1 != None:
temp.append(temp_1)
temp1["data"] = temp
object_data["results"].append(temp1)
return object_data
def redis_del( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
self.redis.delete(i)
return object_data
def redis_hdel( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
self.redis.hdel(i["hash"], i["key"] )
return object_data
#
# Array of dictionary where each element has the following values
# hash = i["hash"]
# key = i["key"]
#
#
# returns array of dictionaries where each dictionary has the following elements
# "hash"
# "key"
# "value"
# hash = i["hash"]
# key = i["key"]
def redis_hget( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append({"hash":i["hash"], "key":i["key"], "data": self.redis.hget(i["hash"],i["key"] ).decode("utf-8") })
return object_data
#
# Array of dictionary where each element has the following values
# hash = i["hash"]
# key = i["key"]
# value = i["data"]
#
# returns true
def redis_hset( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
#print( i)
hash = i["hash"]
key = i["key"]
data = i["data"]
self.redis.hset(hash, key,data )
return object_data
#
# Array of dictionary where each element is dictionary key
# key = i["key"]
# number = i["number"]
#
# returns array of dictionarys
def redis_hget_all( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append({"hash":i["hash"], "data": self.redis.hgetall(i["hash"]) } )
return object_data
def process_commands( self, command_data ):
#print( "command ",command_data["command"])
try:
if self.cmds.has_key( command_data["command"] ) == True:
if command_data["command"] == "PING":
object_data = command_data
object_data["reply"] = command_data["command"]
else:
object_data = self.cmds[ command_data["command"] ]( command_data["data"] )
object_data["reply"] = command_data["command"]
object_data["command"] = command_data["command"]
else:
object_data = {}
object_data["reply"] = "BAD_COMMAND"
except:
print ("exception")
object_data = {}
object_data["reply"] = "BAD_COMMAND"
object_data["results"] = None
return object_data
def on_request(self, ch, method, props, body):
try:
input_data = json.loads( base64.b64decode(body))
#print( "input_data",input_data)
output_data = self.process_commands( input_data )
#print( "output_data",output_data)
except:
print( "exception")
output_data = {}
output_data["reply"] = "BAD_COMMAND"
output_data["results"] = None
output_data = json.dumps(output_data)
#print( "data output = ",output_data)
response = base64.b64encode( json.dumps(output_data ) )
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body= str(response) )
ch.basic_ack(delivery_tag = method.delivery_tag)
if __name__ == "__main__":
gm = farm_template_py3.Graph_Management("PI_1","main_remote","LaCima_DataStore")
#
# Now Find Data Stores
#
#
#
data_store_nodes = gm.find_data_stores()
# find ip and port for redis data store
data_server_ip = data_store_nodes[0]["ip"]
data_server_port = data_store_nodes[0]["port"]
# find ip and port for ip server
print( "data_server_ip",data_server_ip,data_server_port)
redis_handle = redis.StrictRedis( host = data_server_ip, port=data_server_port, db = 2 )
user_name = redis_handle.hget("redis_gateway", "user_name" ).decode("utf-8")
password = redis_handle.hget("redis_gateway", "password" ).decode("utf-8")
vhost = redis_handle.hget("redis_gateway", "vhost" ).decode("utf-8")
queue = redis_handle.hget("redis_gateway", "queue" ).decode("utf-8")
port = int(redis_handle.hget("redis_gateway", "port" ))
server = redis_handle.hget("redis_gateway", "server" ).decode("utf-8")
print( "server",server)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.CRITICAL)
command_handler = Remote_Interface_server(redis_handle)
credentials = pika.PlainCredentials( user_name, password )
parameters = pika.ConnectionParameters( server,
port, #ssl port
vhost,
credentials,
ssl = True ,
)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
#channel.queue_delete(queue=queue)
channel.queue_declare(queue=queue)
channel.basic_qos(prefetch_count=1)
channel.basic_consume( command_handler.on_request, queue=queue)
print (" [x] Awaiting RPC requests")
channel.start_consuming()
|
{
"content_hash": "9c9777e189b849e602267ae09f6d7507",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 141,
"avg_line_length": 31.56656346749226,
"alnum_prop": 0.5139270302079246,
"repo_name": "glenn-edgar/local_controller_2",
"id": "6185602cf6db908d922a4de113762b6d76c9fb1d",
"size": "10218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rabbit_redis_access_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1392"
},
{
"name": "Batchfile",
"bytes": "2452"
},
{
"name": "CSS",
"bytes": "3169864"
},
{
"name": "HTML",
"bytes": "1762520"
},
{
"name": "JavaScript",
"bytes": "7044628"
},
{
"name": "Makefile",
"bytes": "5136"
},
{
"name": "PHP",
"bytes": "93357"
},
{
"name": "Python",
"bytes": "3189928"
},
{
"name": "Shell",
"bytes": "532"
},
{
"name": "Smalltalk",
"bytes": "189"
},
{
"name": "TeX",
"bytes": "3153"
}
],
"symlink_target": ""
}
|
value = True
if value:
print("Got true")
else:
print("Got else")
value = False
if value:
print("Got true")
else:
print("Got else")
result = 3
if result == 1:
print("Got true")
elif result == 2:
print("Got else")
else:
print("Got if else")
|
{
"content_hash": "286a35aa3c9c65994115adc4a75a3d96",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 24,
"avg_line_length": 14.105263157894736,
"alnum_prop": 0.5895522388059702,
"repo_name": "Bigsby/HelloLanguages",
"id": "4a382aa43cd52189dcb3b9f284c202f75a1d6689",
"size": "268",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/py/ifelse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "220"
},
{
"name": "C#",
"bytes": "9372"
},
{
"name": "C++",
"bytes": "10264"
},
{
"name": "Erlang",
"bytes": "7698"
},
{
"name": "F#",
"bytes": "4513"
},
{
"name": "Forth",
"bytes": "162"
},
{
"name": "Go",
"bytes": "5672"
},
{
"name": "Haskell",
"bytes": "1802"
},
{
"name": "Java",
"bytes": "9194"
},
{
"name": "JavaScript",
"bytes": "8099"
},
{
"name": "Kotlin",
"bytes": "3315"
},
{
"name": "PHP",
"bytes": "5136"
},
{
"name": "Perl",
"bytes": "3877"
},
{
"name": "Perl6",
"bytes": "910"
},
{
"name": "PowerShell",
"bytes": "4518"
},
{
"name": "Python",
"bytes": "4329"
},
{
"name": "Ruby",
"bytes": "3932"
},
{
"name": "TypeScript",
"bytes": "6223"
},
{
"name": "Visual Basic",
"bytes": "8173"
}
],
"symlink_target": ""
}
|
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import (Personality, PersonalityParameter,
ParamDefinition, ArchetypeParamDef, Feature)
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.change_management import ChangeManagement
from aquilon.worker.dbwrappers.feature import get_affected_plenaries
from aquilon.worker.dbwrappers.parameter import (set_parameter,
lookup_paramdef)
class CommandAddParameter(BrokerCommand):
requires_plenaries = True
required_parameters = ['personality', 'path', 'value']
def process_parameter(self, session, dbstage, db_paramdef, path, value,
plenaries):
try:
parameter = dbstage.parameters[db_paramdef.holder]
except KeyError:
parameter = PersonalityParameter(param_def_holder=db_paramdef.holder,
value={})
dbstage.parameters[db_paramdef.holder] = parameter
# Since the parameter is new, the PlenaryPersonality collection does
# not have it, so we need to add it explicitly for the template to
# get created on the disk. It would be nice if PlanaryPersonality
# would be able to handle this internally, but that would need
# deeper surgery.
if isinstance(db_paramdef.holder, ArchetypeParamDef):
plenaries.add(parameter)
set_parameter(session, parameter, db_paramdef, path, value)
def render(self, session, logger, plenaries, archetype, personality,
personality_stage, feature, type, path, user, value=None,
justification=None, reason=None, **arguments):
dbpersonality = Personality.get_unique(session, name=personality,
archetype=archetype, compel=True)
dbpersonality.archetype.require_compileable("parameters are not supported")
dbstage = dbpersonality.active_stage(personality_stage)
cm = ChangeManagement(session, user, justification, reason, logger, self.command, **arguments)
cm.consider(dbstage)
cm.validate()
path = ParamDefinition.normalize_path(path, strict=False)
plenaries.add(dbstage)
if feature:
dbfeature = Feature.get_unique(session, name=feature, feature_type=type,
compel=True)
if dbfeature not in dbstage.param_features:
raise ArgumentError("{0} is not bound to {1:l}, or it does not "
"have any parameters defined."
.format(dbfeature, dbstage))
holder_object = dbfeature
for link in dbstage.features:
if link.feature != dbfeature:
continue
get_affected_plenaries(session, dbfeature, plenaries, dbstage,
None, link.model, link.interface_name)
else:
holder_object = dbpersonality.archetype
db_paramdef, rel_path = lookup_paramdef(holder_object, path, False)
self.process_parameter(session, dbstage, db_paramdef, rel_path, value,
plenaries)
session.flush()
plenaries.write()
return
|
{
"content_hash": "0d418f295595fa213c314b3be85c986e",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 102,
"avg_line_length": 43.35443037974684,
"alnum_prop": 0.6058394160583942,
"repo_name": "quattor/aquilon",
"id": "4e367fcb97cb421a4ee733a922d11e829291e6a5",
"size": "4134",
"binary": false,
"copies": "1",
"ref": "refs/heads/upstream",
"path": "lib/aquilon/worker/commands/add_parameter.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1823"
},
{
"name": "Makefile",
"bytes": "5732"
},
{
"name": "Mako",
"bytes": "4178"
},
{
"name": "PLSQL",
"bytes": "102109"
},
{
"name": "PLpgSQL",
"bytes": "8091"
},
{
"name": "Pan",
"bytes": "1058"
},
{
"name": "Perl",
"bytes": "6057"
},
{
"name": "Python",
"bytes": "5884984"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "33547"
},
{
"name": "Smarty",
"bytes": "4603"
}
],
"symlink_target": ""
}
|
import ast
from contextlib import contextmanager
def add_scope_context(node):
"""Provide to scope context to all nodes"""
return ScopeTransformer().visit(node)
class ScopeMixin(object):
"""
Adds a scope property with the current scope (function, module)
a node is part of.
"""
scopes = []
@contextmanager
def enter_scope(self, node):
if self._is_scopable_node(node):
self.scopes.append(node)
yield
self.scopes.pop()
else:
yield
@property
def scope(self):
try:
return self.scopes[-1]
except IndexError:
return None
def _is_scopable_node(self, node):
scopes = [ast.Module, ast.FunctionDef, ast.For, ast.If, ast.With]
return len([s for s in scopes if isinstance(node, s)]) > 0
class ScopeList(list):
"""
Wraps around list of scopes and provides find method for finding
the definition of a variable
"""
def find(self, lookup):
"""Find definition of variable lookup."""
def is_match(var):
return ((isinstance(var, ast.alias) and var.name == lookup) or
(isinstance(var, ast.Name) and var.id == lookup))
def find_definition(scope, var_attr="vars"):
for var in getattr(scope, var_attr):
if is_match(var):
return var
for scope in self:
defn = find_definition(scope)
if not defn and hasattr(scope, "body_vars"):
defn = find_definition(scope, "body_vars")
if not defn and hasattr(scope, "orelse_vars"):
defn = find_definition(scope, "orelse_vars")
if defn:
return defn
def find_import(self, lookup):
for scope in reversed(self):
if hasattr(scope, "imports"):
for imp in scope.imports:
if imp.name == lookup:
return imp
class ScopeTransformer(ast.NodeTransformer, ScopeMixin):
"""
Adds a scope attribute to each node.
The scope contains the current scope (function, module, for loop)
a node is part of.
"""
def visit(self, node):
with self.enter_scope(node):
node.scopes = ScopeList(self.scopes)
return super(ScopeTransformer, self).visit(node)
|
{
"content_hash": "871823b74227f662dcb9932b879c01ce",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 74,
"avg_line_length": 29.432098765432098,
"alnum_prop": 0.5708892617449665,
"repo_name": "lukasmartinelli/py14",
"id": "b6bc8b5f6505cecbf99d8aa649e08b2f74044d01",
"size": "2384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py14/scope.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "324679"
},
{
"name": "HTML",
"bytes": "9412"
},
{
"name": "Makefile",
"bytes": "201"
},
{
"name": "Python",
"bytes": "46460"
}
],
"symlink_target": ""
}
|
from jim.protocol import JimMessage, JimResponse
from jim.errors import MandatoryKeyError
from jim.config import MESSAGE
from PyQt5.QtCore import QObject, pyqtSignal
class Receiver:
''' Класс-получатель информации из сокета
'''
def __init__(self, sock, request_queue):
# запоминаем очередь ответов
self.request_queue = request_queue
# запоминаем сокет
self.sock = sock
self.is_alive = False
def process_message(self, message):
pass
def poll(self):
self.is_alive = True
while True:
if not self.is_alive:
break
data = self.sock.recv(1024)
if data:
try:
# Если нам пришло сообщение
jm = JimMessage.create_from_bytes(data)
# Если это message
if MESSAGE in jm:
# Печатаем в нормальном виде
self.process_message(jm)
else:
# Добавляем сообщение в очередь т.к. это серверное сообщение
self.request_queue.put(jm)
except MandatoryKeyError:
# Если нам пришел ответ от сервера мы его добавляем в очередь для дальнейшей обработки
jr = JimResponse.create_from_bytes(data)
# При этом поток приостанавливается
self.request_queue.put(jr)
else:
break
def stop(self):
self.is_alive = False
class ConsoleReciever(Receiver):
def process_message(self, message):
print("\n>> user {}: {}".format(message.__dict__['from'], message.message))
class GuiReciever(Receiver, QObject):
gotData = pyqtSignal(str)
finished = pyqtSignal(int)
def __init__(self, sock, request_queue):
Receiver.__init__(self, sock, request_queue)
QObject.__init__(self)
def process_message(self, message):
self.gotData.emit('{} >>> {}'.format(message.__dict__['from'], message.message))
def poll(self):
super().poll()
self.finished.emit(0)
|
{
"content_hash": "37dbc46cc745355f2fa71cc1adf2e29b",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 106,
"avg_line_length": 31.814285714285713,
"alnum_prop": 0.5383924562191289,
"repo_name": "OOPSA45/Python-learn-",
"id": "3e3fe9723cdbd61be9f77ac6d8474c1bdfb2d7f9",
"size": "2501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_package/handlers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "54282"
}
],
"symlink_target": ""
}
|
"""Stream-related things."""
__all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol',
'open_connection', 'start_server',
'IncompleteReadError',
]
import socket
if hasattr(socket, 'AF_UNIX'):
__all__.extend(['open_unix_connection', 'start_unix_server'])
from . import coroutines
from . import events
from . import futures
from . import protocols
from .coroutines import coroutine
from .log import logger
_DEFAULT_LIMIT = 2**16
class IncompleteReadError(EOFError):
"""
Incomplete read error. Attributes:
- partial: read bytes string before the end of stream was reached
- expected: total number of expected bytes
"""
def __init__(self, partial, expected):
EOFError.__init__(self, "%s bytes read on a total of %s expected bytes"
% (len(partial), expected))
self.partial = partial
self.expected = expected
@coroutine
def open_connection(host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""A wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.)
"""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = yield from loop.create_connection(
lambda: protocol, host, port, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer
@coroutine
def start_server(client_connected_cb, host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Start a socket server, call back for each client connected.
The first parameter, `client_connected_cb`, takes two parameters:
client_reader, client_writer. client_reader is a StreamReader
object, while client_writer is a StreamWriter object. This
parameter can either be a plain callback function or a coroutine;
if it is a coroutine, it will be automatically converted into a
Task.
The rest of the arguments are all the usual arguments to
loop.create_server() except protocol_factory; most common are
positional host and port, with various optional keyword arguments
following. The return value is the same as loop.create_server().
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
The return value is the same as loop.create_server(), i.e. a
Server object which can be used to stop the service.
"""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return (yield from loop.create_server(factory, host, port, **kwds))
if hasattr(socket, 'AF_UNIX'):
# UNIX Domain Sockets are supported on this platform
@coroutine
def open_unix_connection(path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = yield from loop.create_unix_connection(
lambda: protocol, path, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer
@coroutine
def start_unix_server(client_connected_cb, path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `start_server` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return (yield from loop.create_unix_server(factory, path, **kwds))
class FlowControlMixin(protocols.Protocol):
"""Reusable flow control logic for StreamWriter.drain().
This implements the protocol methods pause_writing(),
resume_reading() and connection_lost(). If the subclass overrides
these it must call the super methods.
StreamWriter.drain() must wait for _drain_helper() coroutine.
"""
def __init__(self, loop=None):
self._loop = loop # May be None; we may never need it.
self._paused = False
self._drain_waiter = None
self._connection_lost = False
def pause_writing(self):
assert not self._paused
self._paused = True
if self._loop.get_debug():
logger.debug("%r pauses writing", self)
def resume_writing(self):
assert self._paused
self._paused = False
if self._loop.get_debug():
logger.debug("%r resumes writing", self)
waiter = self._drain_waiter
if waiter is not None:
self._drain_waiter = None
if not waiter.done():
waiter.set_result(None)
def connection_lost(self, exc):
self._connection_lost = True
# Wake up the writer if currently paused.
if not self._paused:
return
waiter = self._drain_waiter
if waiter is None:
return
self._drain_waiter = None
if waiter.done():
return
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
@coroutine
def _drain_helper(self):
if self._connection_lost:
raise ConnectionResetError('Connection lost')
if not self._paused:
return
waiter = self._drain_waiter
assert waiter is None or waiter.cancelled()
waiter = futures.Future(loop=self._loop)
self._drain_waiter = waiter
yield from waiter
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
"""Helper class to adapt between Protocol and StreamReader.
(This is a helper class instead of making StreamReader itself a
Protocol subclass, because the StreamReader has other potential
uses, and to prevent the user of the StreamReader to accidentally
call inappropriate methods of the protocol.)
"""
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
super().__init__(loop=loop)
self._stream_reader = stream_reader
self._stream_writer = None
self._client_connected_cb = client_connected_cb
def connection_made(self, transport):
self._stream_reader.set_transport(transport)
if self._client_connected_cb is not None:
self._stream_writer = StreamWriter(transport, self,
self._stream_reader,
self._loop)
res = self._client_connected_cb(self._stream_reader,
self._stream_writer)
if coroutines.iscoroutine(res):
self._loop.create_task(res)
def connection_lost(self, exc):
if exc is None:
self._stream_reader.feed_eof()
else:
self._stream_reader.set_exception(exc)
super().connection_lost(exc)
def data_received(self, data):
self._stream_reader.feed_data(data)
def eof_received(self):
self._stream_reader.feed_eof()
class StreamWriter:
"""Wraps a Transport.
This exposes write(), writelines(), [can_]write_eof(),
get_extra_info() and close(). It adds drain() which returns an
optional Future on which you can wait for flow control. It also
adds a transport property which references the Transport
directly.
"""
def __init__(self, transport, protocol, reader, loop):
self._transport = transport
self._protocol = protocol
# drain() expects that the reader has a exception() method
assert reader is None or isinstance(reader, StreamReader)
self._reader = reader
self._loop = loop
def __repr__(self):
info = [self.__class__.__name__, 'transport=%r' % self._transport]
if self._reader is not None:
info.append('reader=%r' % self._reader)
return '<%s>' % ' '.join(info)
@property
def transport(self):
return self._transport
def write(self, data):
self._transport.write(data)
def writelines(self, data):
self._transport.writelines(data)
def write_eof(self):
return self._transport.write_eof()
def can_write_eof(self):
return self._transport.can_write_eof()
def close(self):
return self._transport.close()
def get_extra_info(self, name, default=None):
return self._transport.get_extra_info(name, default)
@coroutine
def drain(self):
"""Flush the write buffer.
The intended use is to write
w.write(data)
yield from w.drain()
"""
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
yield from self._protocol._drain_helper()
class StreamReader:
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
# The line length limit is a security feature;
# it also doubles as half the buffer limit.
self._limit = limit
if loop is None:
loop = events.get_event_loop()
self._loop = loop
self._buffer = bytearray()
self._eof = False # Whether we're done.
self._waiter = None # A future.
self._exception = None
self._transport = None
self._paused = False
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_exception(exc)
def set_transport(self, transport):
assert self._transport is None, 'Transport already set'
self._transport = transport
def _maybe_resume_transport(self):
if self._paused and len(self._buffer) <= self._limit:
self._paused = False
self._transport.resume_reading()
def feed_eof(self):
self._eof = True
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(True)
def at_eof(self):
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer
def feed_data(self, data):
assert not self._eof, 'feed_data after feed_eof'
if not data:
return
self._buffer.extend(data)
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(False)
if (self._transport is not None and
not self._paused and
len(self._buffer) > 2*self._limit):
try:
self._transport.pause_reading()
except NotImplementedError:
# The transport can't be paused.
# We'll just have to buffer all data.
# Forget the transport so we don't keep trying.
self._transport = None
else:
self._paused = True
def _create_waiter(self, func_name):
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError('%s() called while another coroutine is '
'already waiting for incoming data' % func_name)
return futures.Future(loop=self._loop)
@coroutine
def readline(self):
if self._exception is not None:
raise self._exception
line = bytearray()
not_enough = True
while not_enough:
while self._buffer and not_enough:
ichar = self._buffer.find(b'\n')
if ichar < 0:
line.extend(self._buffer)
self._buffer.clear()
else:
ichar += 1
line.extend(self._buffer[:ichar])
del self._buffer[:ichar]
not_enough = False
if len(line) > self._limit:
self._maybe_resume_transport()
raise ValueError('Line is too long')
if self._eof:
break
if not_enough:
self._waiter = self._create_waiter('readline')
try:
yield from self._waiter
finally:
self._waiter = None
self._maybe_resume_transport()
return bytes(line)
@coroutine
def read(self, n=-1):
if self._exception is not None:
raise self._exception
if not n:
return b''
if n < 0:
# This used to just loop creating a new waiter hoping to
# collect everything in self._buffer, but that would
# deadlock if the subprocess sends more than self.limit
# bytes. So just call self.read(self._limit) until EOF.
blocks = []
while True:
block = yield from self.read(self._limit)
if not block:
break
blocks.append(block)
return b''.join(blocks)
else:
if not self._buffer and not self._eof:
self._waiter = self._create_waiter('read')
try:
yield from self._waiter
finally:
self._waiter = None
if n < 0 or len(self._buffer) <= n:
data = bytes(self._buffer)
self._buffer.clear()
else:
# n > 0 and len(self._buffer) > n
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data
@coroutine
def readexactly(self, n):
if self._exception is not None:
raise self._exception
# There used to be "optimized" code here. It created its own
# Future and waited until self._buffer had at least the n
# bytes, then called read(n). Unfortunately, this could pause
# the transport if the argument was larger than the pause
# limit (which is twice self._limit). So now we just read()
# into a local buffer.
blocks = []
while n > 0:
block = yield from self.read(n)
if not block:
partial = b''.join(blocks)
raise IncompleteReadError(partial, len(partial) + n)
blocks.append(block)
n -= len(block)
return b''.join(blocks)
|
{
"content_hash": "576c649cda43374b88d5de4d2d593fdd",
"timestamp": "",
"source": "github",
"line_count": 485,
"max_line_length": 79,
"avg_line_length": 33.338144329896906,
"alnum_prop": 0.5854412765167913,
"repo_name": "OptimusGitEtna/RestSymf",
"id": "c77eb606c2f9f1f8d1e71d08629eeaceaed8a9e8",
"size": "16169",
"binary": false,
"copies": "61",
"ref": "refs/heads/master",
"path": "Python-3.4.2/Lib/asyncio/streams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "594205"
},
{
"name": "C",
"bytes": "15348597"
},
{
"name": "C++",
"bytes": "65109"
},
{
"name": "CSS",
"bytes": "12039"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "JavaScript",
"bytes": "10597"
},
{
"name": "Makefile",
"bytes": "9444"
},
{
"name": "Objective-C",
"bytes": "1390141"
},
{
"name": "PHP",
"bytes": "93070"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24018306"
},
{
"name": "Shell",
"bytes": "440753"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import sys
import wx
from Experimenter.GUI.STAverage import PyroSTAFrame
def launch_sta_app(port=6878):
app = wx.PySimpleApp()
frame = PyroSTAFrame(pyro_port=port)
frame.Show()
app.SetTopWindow(frame)
app.MainLoop()
if __name__ == '__main__':
port = int(sys.argv[-1])
launch_sta_app(port)
|
{
"content_hash": "7a7e279fccbdebf1cc7a6e4db2ddc831",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 51,
"avg_line_length": 22.785714285714285,
"alnum_prop": 0.664576802507837,
"repo_name": "chrox/RealTimeElectrophy",
"id": "7c63edb6e45f8802bb46649973db3fa31284c7e4",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Experimenter/Experiments/app/pyro_sta.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "24301"
},
{
"name": "Python",
"bytes": "681188"
},
{
"name": "Shell",
"bytes": "73"
}
],
"symlink_target": ""
}
|
import foauth.providers
class TwentyThreeAndMe(foauth.providers.OAuth2):
# General info about the provider
alias = '23andme'
name = '23andMe'
provider_url = 'https://23andme.com/'
docs_url = 'https://api.23andme.com/docs/'
category = 'Genealogy'
# URLs to interact with the API
authorize_url = 'https://api.23andme.com/authorize/'
access_token_url = 'https://api.23andme.com/token/'
api_domain = 'api.23andme.com'
available_permissions = [
(None, 'anonymously tell whether each profile in your account is genotyped'),
('profile:read', 'read your profile information, including your picture'),
('profile:write', 'write to your profile information, including your picture'),
('names', 'read the full name of every profile in your account'),
('haplogroups', 'read your maternal and paternal haplogroups'),
('ancestry', 'access the full ancestral breakdown for all your profiles'),
('relatives', 'access your relatives who have also been genotyped'),
('relatives:write', 'add notes about and update relationships with relatives'),
('publish', 'publish shareable results so that anyone can read them'),
('analyses', 'access your analyzed genomes, including traits and health information'),
('genomes', 'read your entire genetic profile, raw and unanalyzed')
]
def get_authorize_params(self, redirect_uri, scopes):
scopes.append('basic')
return super(TwentyThreeAndMe, self).get_authorize_params(redirect_uri, scopes)
def get_user_id(self, key):
r = self.api(key, self.api_domain, u'/1/user')
return unicode(r.json()[u'id'])
|
{
"content_hash": "f55dc0f909e2dbea9e2440248f04fdd9",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 94,
"avg_line_length": 47.22222222222222,
"alnum_prop": 0.6705882352941176,
"repo_name": "foauth/oauth-proxy",
"id": "8064af15eaa7f1f02d1cb802c904b912fcc56b8d",
"size": "1700",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "services/twentythreeandme.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "107029"
}
],
"symlink_target": ""
}
|
'''
A library of parsing functions for fixlets.
'''
import json
import cgi
import re
'''
The regular expression for matching the format of an attribute in a directory
entry.
'''
ATTR_REGEX = '\w+: (.*)'
'''
The attributes present in a file's entry on the gather site, in order.
See the function 'parse_directory(text)'.
'''
FILE_ATTRS = ('url', 'name', 'modified', 'size', 'type', 'hash', 'hashinfo')
'''
Same as FILE_ATTRS but without 'hashinfo', which is missing on some sites.
'''
FILE_ATTRS0 = ('url', 'name', 'modified', 'size', 'type', 'hash')
'''
The regular expression for matching a multipart boundary in a fixfile.
'''
BOUND_REGEX = '.*boundary="(.*)"'
'''
Regular expression for a HTML omment.
'''
COMMENT_REGEX = '<!--.*?-->'
class FixletParsingException(Exception):
pass
def parse_directory(text):
'''
Returns a 'directory': a list of file entries.
A file entry is a dictionary representing a mapping from attributes in
'FILE_ATTRS' to their actual values, parsed.
'''
directory = []
lines = text.split('\n')
lines = map(lambda x: x.strip(), lines)
# go to first entry
while not lines[2].startswith('URL: '):
lines = lines[1:]
if len(lines) < 3:
return []
# figure out whether the hashinfo attribute exists in our file entry
try:
content = map(lambda x: re.findall(ATTR_REGEX, x)[0], lines[2:9])
target_attrs = FILE_ATTRS
except Exception:
assert lines[8] == ''
target_attrs = FILE_ATTRS0
while lines[2].startswith('URL: '):
content = lines[2:2+len(target_attrs)]
content = map(lambda x: re.findall(ATTR_REGEX, x)[0], content)
attrs = dict(zip(target_attrs, content))
directory.append(attrs)
lines = lines[len(target_attrs)+3:]
assert len(lines) >= 3
return directory
def parse_directory_metadata(text):
properties = [] # not dictionary because duplicates exist (e.g. relevance)
lines = text.split('\n')
lines = map(lambda x: x.strip(), lines)
try:
# find boundary of document
while not lines[0].startswith('Content-Type:'):
lines = lines[1:]
# go to header
header = re.findall(BOUND_REGEX, lines[0])[0]
lines = lines[1:]
while not (header in lines[0]):
lines = lines[1:]
# parse MIME-properties
lines = lines[1:]
while not lines[0] == '':
separator = lines[0].find(":")
key = lines[0][:separator]
value = lines[0][separator+2:]
properties.append((key, value))
lines = lines[1:]
except IndexError:
return properties
return properties
def flatten(lists):
'''
Flatten nested lists into one list.
'''
flattened = []
for l in lists:
if not type(l) == list: # cannot flatten
flattened.append(l)
else:
flattened += flatten(l)
return flattened
def extract_site_name(url):
return re.findall('/([^/]*)$', url)[0]
def parse_fixlet(sections):
'''
Parse the smallest partition of a fixfile (contains ActionScript
or the text description).
TODO we currently only parse fixlets - add support for analyses, tasks, etc.
'''
text = None
actions = []
for section in sections:
lines = section.split('\n')
while not lines[0].startswith('Content-Type: '):
lines = lines[1:]
if (lines[0].split('Content-Type: ')[1].strip()
== 'text/html; charset=us-ascii'):
lines = lines[1:]
section = '\n'.join(lines)
text = re.sub(COMMENT_REGEX, '', section).strip()
elif (lines[0].split('Content-Type: ')[1].strip()
== 'application/x-Fixlet-Windows-Shell'):
lines = lines[1:]
section = '\n'.join(lines)
actions.append(section.strip())
elif (lines[0].split('Content-Type: ')[1].strip() in
('application/x-bigfix-analysis-template' or
'application/x-bigfix-itclient-property')):
# TODO add analysis parsing later (look for x-fixlet-type)
raise FixletParsingException("not fixlet, is analysis")
elif (lines[0].split('Content-Type: ')[1].strip()
== 'application/x-Task-Windows-Shell'):
# TODO add task parsing later (look for x-fixlet-type)
raise FixletParsingException("not fixlet, is task")
else:
raise FixletParsingException("couldn't recognize " + lines[0].split('Content-Type: ')[1].strip())
return (text, actions)
class Relevance:
def __init__(self, clauses, parent):
self.clauses = clauses
self.parent = parent
def compressed_str_list(self):
c = []
if self.parent:
c += self.parent.compressed_str_list()
return c + self.clauses
def _to_dict(self):
info = {'clauses': self.clauses}
if self.parent:
info['parent'] = self.parent._to_dict()
return info
class Fixlet:
def __init__(self, fid, relevance, title, modified, text, actions):
self.fid = fid
self.relevance = relevance
self.title = title
self.modified = modified
self.text = text
self.actions = actions
@property
def contents(self):
escape = lambda text: cgi.escape(text, True) if not (text is None) else None
r = list(map(escape, self.relevance.compressed_str_list()))
a = list(map(escape, self.actions))
d = {'relevance': r, 'text': [escape(self.text)], 'actions': a}
return json.dumps(d)
def rsplit_fixfile(text, parent=None):
'''
Recursively split a fixfile between boundaries, scraping relevance and
actionscript as we go.
'''
relevance = []
modified = 'unknown'
lines = text.split('\n')
while not lines[0].startswith('Content-Type: multipart/'):
if lines[0].startswith('X-Relevant-When: '):
relevance.append(lines[0].split('X-Relevant-When: ')[1].strip() + '\n')
elif lines[0].startswith('X-Fixlet-ID: '):
fid = int(lines[0].split('X-Fixlet-ID: ')[1].strip())
elif lines[0].startswith('Subject: '):
title = lines[0].split('Subject: ')[1].strip()
elif lines[0].startswith('X-Fixlet-Modification-Time: '):
modified = lines[0].split('X-Fixlet-Modification-Time: ')[1].strip()
lines = lines[1:]
splitter = re.findall(BOUND_REGEX, lines[0])[0]
assert len(text.split('--{}--'.format(splitter))) == 2
main_section = text.split('--{}--'.format(splitter))[0]
subsections = main_section.split('--{}'.format(splitter))[1:] # remove header
relevance = Relevance(relevance, parent)
if lines[0].startswith('Content-Type: multipart/digest'):
return filter(lambda x: x is not None,
map(lambda x: rsplit_fixfile(x, relevance), subsections))
elif lines[0].startswith('Content-Type: multipart/related'):
try:
text, actions = parse_fixlet(subsections)
return Fixlet(fid, relevance, title, modified, text, actions)
except FixletParsingException as e: # TODO handle properly
try:
print 'skipped parsing fixlet {} (id {})'.format(title, str(fid))
except UnicodeEncodeError:
print 'skipped parsing fixlet <UnicodeEncodeError> (id {})'.format(str(fid))
return None
else:
assert False
def parse_fxffile(text):
fixlets = flatten(filter(lambda x: x is not None, rsplit_fixfile(text)))
return dict(map(lambda fxf: (fxf.fid, fxf), fixlets))
|
{
"content_hash": "10b6d37078784d369afdc67486f63aaf",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 109,
"avg_line_length": 33.489177489177486,
"alnum_prop": 0.5900982419855222,
"repo_name": "bigfix/fixlet-historian",
"id": "8313bedfc3f2f8e2eb07be45852b9d7eac6f4b46",
"size": "7759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixlet_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2213"
},
{
"name": "HTML",
"bytes": "6714"
},
{
"name": "JavaScript",
"bytes": "8185"
},
{
"name": "Nix",
"bytes": "241"
},
{
"name": "Python",
"bytes": "39431"
}
],
"symlink_target": ""
}
|
from classes_pong import Paddle, Ball, ScoreArea
import pygame
from pygame.locals import *
#Screen Constants
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_SIZE = (SCREEN_WIDTH, SCREEN_HEIGHT)
#Screen area constants
SCORES_HEIGHT = 100
PLAY_AREA = (0, SCREEN_WIDTH, SCORES_HEIGHT, SCREEN_HEIGHT)
#Other Game constants
FRAME_RATE = 60
WHITE = (255, 255, 255)
def get_objects():
"""Gives the 3 objects in the game"""
player = Paddle(SCREEN_SIZE[0] // 20, SCREEN_SIZE[1] // 2, WHITE)
comp = Paddle((18.7 * SCREEN_SIZE[0]) // 20, SCREEN_SIZE[1] // 2, WHITE)
ball = Ball(SCREEN_SIZE[0] // 2, SCREEN_SIZE[1] // 2, 10, WHITE)
score_area = ScoreArea(SCORES_HEIGHT, SCREEN_WIDTH)
return player, comp, ball, score_area
def movement(cur_pad):
cur_pad.set_speed(0, 0)
pressed = pygame.key.get_pressed()
if pressed[K_UP] or pressed[K_w]:
cur_pad.set_speed(0, - Paddle.SPEED)
if pressed[K_DOWN] or pressed[K_s]:
cur_pad.set_speed(0, Paddle.SPEED)
cur_pad.move(*PLAY_AREA)
def main():
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE)
clock = pygame.time.Clock().tick
player, comp, ball, score_area = get_objects()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
screen.fill((0, 0, 0))
score_area.draw(screen)
movement(player)
player.draw(screen)
comp.draw(screen)
ball.draw(screen)
pygame.display.flip()
clock(FRAME_RATE)
if __name__ == "__main__":
main()
|
{
"content_hash": "c75d64e41477f327619a640b1835ffce",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 76,
"avg_line_length": 26.016129032258064,
"alnum_prop": 0.6174829510229386,
"repo_name": "anshbansal/general",
"id": "7db6751d5d530d3dfccdb691054f62d4c826cb0a",
"size": "1624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python3/Games/pong/pong.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45627"
},
{
"name": "Haskell",
"bytes": "1391"
},
{
"name": "Java",
"bytes": "15545"
},
{
"name": "Python",
"bytes": "60441"
}
],
"symlink_target": ""
}
|
'''
Return data to an etcd server or cluster
:depends: - python-etcd
In order to return to an etcd server, a profile should be created in the master
configuration file:
.. code-block:: yaml
my_etcd_config:
etcd.host: 127.0.0.1
etcd.port: 4001
It is technically possible to configure etcd without using a profile, but this
is not considered to be a best practice, especially when multiple etcd servers
or clusters are available.
.. code-block:: yaml
etcd.host: 127.0.0.1
etcd.port: 4001
Additionally, two more options must be specified in the top-level configuration
in order to use the etcd returner:
.. code-block:: yaml
etcd.returner: my_etcd_config
etcd.returner_root: /salt/return
The ``etcd.returner`` option specifies which configuration profile to use. The
``etcd.returner_root`` option specifies the path inside etcd to use as the root
of the returner system.
Once the etcd options are configured, the returner may be used:
CLI Example:
salt '*' test.ping --return etcd
A username and password can be set:
.. code-block:: yaml
etcd.username: larry # Optional; requires etcd.password to be set
etcd.password: 123pass # Optional; requires etcd.username to be set
You can also set a TTL (time to live) value for the returner:
.. code-block:: yaml
etcd.ttl: 5
Authentication with username and password, and ttl, currently requires the
``master`` branch of ``python-etcd``.
You may also specify different roles for read and write operations. First,
create the profiles as specified above. Then add:
.. code-block:: yaml
etcd.returner_read_profile: my_etcd_read
etcd.returner_write_profile: my_etcd_write
'''
from __future__ import absolute_import
# Import python libs
import json
import logging
# Import salt libs
try:
import salt.utils.etcd_util
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
import salt.utils
import salt.utils.jid
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'etcd'
def __virtual__():
'''
Only return if python-etcd is installed
'''
return __virtualname__ if HAS_LIBS else False
def _get_conn(opts, profile=None):
'''
Establish a connection to etcd
'''
if profile is None:
profile = opts.get('etcd.returner')
path = opts.get('etcd.returner_root', '/salt/return')
return salt.utils.etcd_util.get_conn(opts, profile), path
def returner(ret):
'''
Return data to an etcd server or cluster
'''
write_profile = __opts__.get('etcd.returner_write_profile')
if write_profile:
ttl = __opts__.get(write_profile, {}).get('etcd.ttl')
else:
ttl = __opts__.get('etcd.ttl')
client, path = _get_conn(__opts__, write_profile)
# Make a note of this minion for the external job cache
client.set(
'/'.join((path, 'minions', ret['id'])),
ret['jid'],
ttl=ttl,
)
for field in ret:
# Not using os.path.join because we're not dealing with file paths
dest = '/'.join((
path,
'jobs',
ret['jid'],
ret['id'],
field
))
client.set(dest, json.dumps(ret[field]), ttl=ttl)
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid
'''
write_profile = __opts__.get('etcd.returner_write_profile')
client, path = _get_conn(__opts__, write_profile)
if write_profile:
ttl = __opts__.get(write_profile, {}).get('etcd.ttl')
else:
ttl = __opts__.get('etcd.ttl')
client.set(
'/'.join((path, 'jobs', jid, '.load.p')),
json.dumps(load),
ttl=ttl,
)
def save_minions(jid, minions): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
read_profile = __opts__.get('etcd.returner_read_profile')
client, path = _get_conn(__opts__, read_profile)
return json.loads(client.get('/'.join((path, 'jobs', jid, '.load.p'))))
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
client, path = _get_conn(__opts__)
jid_path = '/'.join((path, 'jobs', jid))
return client.tree(jid_path)
def get_fun():
'''
Return a dict of the last function called for all minions
'''
ret = {}
client, path = _get_conn(__opts__)
items = client.get('/'.join((path, 'minions')))
for item in items.children:
comps = str(item.key).split('/')
ret[comps[-1]] = item.value
return ret
def get_jids():
'''
Return a list of all job ids
'''
ret = {}
client, path = _get_conn(__opts__)
items = client.get('/'.join((path, 'jobs')))
for item in items.children:
if item.dir is True:
jid = str(item.key).split('/')[-1]
load = client.get('/'.join((item.key, '.load.p'))).value
ret[jid] = salt.utils.jid.format_jid_instance(jid, json.loads(load))
return ret
def get_minions():
'''
Return a list of minions
'''
ret = []
client, path = _get_conn(__opts__)
items = client.get('/'.join((path, 'minions')))
for item in items.children:
comps = str(item.key).split('/')
ret.append(comps[-1])
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid()
|
{
"content_hash": "0418e8a4e7ddad691451ecf3ac8a3f71",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 80,
"avg_line_length": 25.40990990990991,
"alnum_prop": 0.6234710157773444,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "3e0e75f67bf5650ed1658c13037513a903cbb36e",
"size": "5665",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/salt/returners/etcd_return.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
from nose.tools import with_setup
from testconfig import config
from pyvcloud import vcloudair
from pyvcloud.vcloudair import VCA
import time
class TestCatalog:
def __init__(self):
self.vca = None
self.login_to_vcloud()
def login_to_vcloud(self):
"""Login to vCloud"""
username = config['vcloud']['username']
password = config['vcloud']['password']
service_type = config['vcloud']['service_type']
host = config['vcloud']['host']
version = config['vcloud']['version']
org = config['vcloud']['org']
service = config['vcloud']['service']
instance = config['vcloud']['instance']
self.vca = VCA(host=host, username=username, service_type=service_type, version=version, verify=True, log=True)
assert self.vca
if vcloudair.VCA_SERVICE_TYPE_STANDALONE == service_type:
result = self.vca.login(password=password, org=org)
assert result
result = self.vca.login(token=self.vca.token, org=org, org_url=self.vca.vcloud_session.org_url)
assert result
elif vcloudair.VCA_SERVICE_TYPE_SUBSCRIPTION == service_type:
result = self.vca.login(password=password)
assert result
result = self.vca.login(token=self.vca.token)
assert result
result = self.vca.login_to_org(service, org)
assert result
elif vcloudair.VCA_SERVICE_TYPE_ONDEMAND == service_type:
result = self.vca.login(password=password)
assert result
result = self.vca.login_to_instance(password=password, instance=instance, token=None, org_url=None)
assert result
result = self.vca.login_to_instance(password=None, instance=instance, token=self.vca.vcloud_session.token, org_url=self.vca.vcloud_session.org_url)
assert result
def logout_from_vcloud(self):
"""Logout from vCloud"""
print 'logout'
selfl.vca.logout()
self.vca = None
assert self.vca is None
def catalog_exists(self, catalog_name, catalogs):
for catalog in catalogs:
if catalog.name == catalog_name:
return True
return False
def test_0001(self):
"""Loggin in to vCloud"""
assert self.vca.token
def test_0002(self):
"""Get VDC"""
vdc_name = config['vcloud']['vdc']
the_vdc = self.vca.get_vdc(vdc_name)
assert the_vdc
assert the_vdc.get_name() == vdc_name
def test_0009(self):
"""Validate that catalog doesn't exist"""
vdc_name = config['vcloud']['vdc']
vapp_name = config['vcloud']['vapp']
vm_name = config['vcloud']['vm']
custom_catalog = config['vcloud']['custom_catalog']
the_vdc = self.vca.get_vdc(vdc_name)
assert the_vdc
assert the_vdc.get_name() == vdc_name
catalogs = self.vca.get_catalogs()
assert not self.catalog_exists(custom_catalog, catalogs)
def test_0010(self):
"""Create Catalog"""
vdc_name = config['vcloud']['vdc']
vapp_name = config['vcloud']['vapp']
vm_name = config['vcloud']['vm']
custom_catalog = config['vcloud']['custom_catalog']
the_vdc = self.vca.get_vdc(vdc_name)
assert the_vdc
assert the_vdc.get_name() == vdc_name
task = self.vca.create_catalog(custom_catalog, custom_catalog)
assert task
result = self.vca.block_until_completed(task)
assert result
catalogs = self.vca.get_catalogs()
assert self.catalog_exists(custom_catalog, catalogs)
def test_0011(self):
"""Upload media file"""
vdc_name = config['vcloud']['vdc']
vapp_name = config['vcloud']['vapp']
vm_name = config['vcloud']['vm']
custom_catalog = config['vcloud']['custom_catalog']
media_file_name = config['vcloud']['media_file_name']
media_name = config['vcloud']['media_name']
the_vdc = self.vca.get_vdc(vdc_name)
assert the_vdc
assert the_vdc.get_name() == vdc_name
result = self.vca.upload_media(custom_catalog, media_name, media_file_name, media_file_name, True)
assert result
#todo: assert that media is uploaded
def test_0099(self):
"""Delete Catalog"""
vdc_name = config['vcloud']['vdc']
vapp_name = config['vcloud']['vapp']
vm_name = config['vcloud']['vm']
custom_catalog = config['vcloud']['custom_catalog']
the_vdc = self.vca.get_vdc(vdc_name)
assert the_vdc
assert the_vdc.get_name() == vdc_name
deleted = self.vca.delete_catalog(custom_catalog)
assert deleted
the_vdc = self.vca.get_vdc(vdc_name)
catalogs = self.vca.get_catalogs()
assert not self.catalog_exists(custom_catalog, catalogs)
|
{
"content_hash": "407f497f42f39fbe92d6ed8bd93141ac",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 159,
"avg_line_length": 35.948905109489054,
"alnum_prop": 0.5981725888324874,
"repo_name": "cloudify-cosmo/pyvcloud",
"id": "dd3469f935539d4a65d1a5064aac1e2f68c3f247",
"size": "4925",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/catalog_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16922884"
}
],
"symlink_target": ""
}
|
import logging
import os
from contextlib import contextmanager
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
TextIO,
Tuple,
Union,
cast,
)
from funcy import compact, lremove
from rich.rule import Rule
from rich.syntax import Syntax
from dvc.exceptions import DvcException
from dvc.stage import PipelineStage
from dvc.stage.serialize import to_pipeline_file
from dvc.types import OptStr
from dvc.utils.serialize import dumps_yaml
if TYPE_CHECKING:
from dvc.repo import Repo
from dvc.dvcfile import DVCFile
from rich.tree import Tree
from dvc.ui import ui
PROMPTS = {
"cmd": "[b]Command[/b] to execute",
"code": "Path to a [b]code[/b] file/directory",
"data": "Path to a [b]data[/b] file/directory",
"models": "Path to a [b]model[/b] file/directory",
"params": "Path to a [b]parameters[/b] file",
"metrics": "Path to a [b]metrics[/b] file",
"plots": "Path to a [b]plots[/b] file/directory",
"live": "Path to log [b]dvclive[/b] outputs",
}
def _prompts(
keys: Iterable[str],
defaults: Dict[str, str] = None,
validator: Callable[[str, str], Union[str, Tuple[str, str]]] = None,
allow_omission: bool = True,
stream: Optional[TextIO] = None,
) -> Dict[str, OptStr]:
from dvc.ui.prompt import Prompt
defaults = defaults or {}
return {
key: Prompt.prompt_(
PROMPTS[key],
console=ui.error_console,
default=defaults.get(key),
validator=partial(validator, key) if validator else None,
allow_omission=allow_omission,
stream=stream,
)
for key in keys
}
@contextmanager
def _disable_logging(highest_level=logging.CRITICAL):
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
def build_workspace_tree(workspace: Dict[str, str]) -> "Tree":
from rich.tree import Tree
tree = Tree(
"DVC assumes the following workspace structure:",
highlight=True,
)
for value in sorted(workspace.values()):
tree.add(f"[green]{value}[/green]")
return tree
PIPELINE_FILE_LINK = "https://s.dvc.org/g/pipeline-files"
def init_interactive(
name: str,
defaults: Dict[str, str],
provided: Dict[str, str],
validator: Callable[[str, str], Union[str, Tuple[str, str]]] = None,
live: bool = False,
stream: Optional[TextIO] = None,
) -> Dict[str, str]:
command = provided.pop("cmd", None)
primary = lremove(provided.keys(), ["code", "data", "models", "params"])
secondary = lremove(
provided.keys(), ["live"] if live else ["metrics", "plots"]
)
prompts = primary + secondary
workspace = {**defaults, **provided}
if not live and "live" not in provided:
workspace.pop("live", None)
for key in ("plots", "metrics"):
if live and key not in provided:
workspace.pop(key, None)
ret: Dict[str, str] = {}
if command:
ret["cmd"] = command
if not prompts and command:
return ret
ui.error_write(
f"This command will guide you to set up a [bright_blue]{name}[/]",
"stage in [green]dvc.yaml[/].",
f"\nSee [repr.url]{PIPELINE_FILE_LINK}[/].\n",
styled=True,
)
if not command:
ret.update(
compact(_prompts(["cmd"], allow_omission=False, stream=stream))
)
if prompts:
ui.error_write(styled=True)
if not prompts:
return ret
ui.error_write(
"Enter the paths for dependencies and outputs of the command.",
styled=True,
)
if workspace:
ui.error_write(build_workspace_tree(workspace), styled=True)
ui.error_write(styled=True)
ret.update(
compact(
_prompts(prompts, defaults, validator=validator, stream=stream)
)
)
return ret
def _check_stage_exists(
dvcfile: "DVCFile", name: str, force: bool = False
) -> None:
if not force and dvcfile.exists() and name in dvcfile.stages:
from dvc.stage.exceptions import DuplicateStageName
hint = "Use '--force' to overwrite."
raise DuplicateStageName(
f"Stage '{name}' already exists in 'dvc.yaml'. {hint}"
)
def loadd_params(path: str) -> Dict[str, List[str]]:
from dvc.utils.serialize import LOADERS
_, ext = os.path.splitext(path)
return {path: list(LOADERS[ext](path))}
def validate_prompts(key: str, value: str) -> Union[Any, Tuple[Any, str]]:
from dvc.ui.prompt import InvalidResponse
if key == "params":
assert isinstance(value, str)
msg_format = (
"[prompt.invalid]'{0}' {1}. "
"Please retry with an existing parameters file."
)
if not os.path.exists(value):
raise InvalidResponse(msg_format.format(value, "does not exist"))
if os.path.isdir(value):
raise InvalidResponse(msg_format.format(value, "is a directory"))
elif key in ("code", "data"):
if not os.path.exists(value):
return value, (
f"[yellow]'{value}' does not exist in the workspace. "
'"exp run" may fail.[/]'
)
return value
def init(
repo: "Repo",
name: str = "train",
type: str = "default", # pylint: disable=redefined-builtin
defaults: Dict[str, str] = None,
overrides: Dict[str, str] = None,
interactive: bool = False,
force: bool = False,
stream: Optional[TextIO] = None,
) -> PipelineStage:
from dvc.dvcfile import make_dvcfile
dvcfile = make_dvcfile(repo, "dvc.yaml")
_check_stage_exists(dvcfile, name, force=force)
defaults = defaults.copy() if defaults else {}
overrides = overrides.copy() if overrides else {}
with_live = type == "dl"
if interactive:
defaults = init_interactive(
name,
validator=validate_prompts,
defaults=defaults,
live=with_live,
provided=overrides,
stream=stream,
)
else:
if with_live:
# suppress `metrics`/`plots` if live is selected, unless
# it is also provided via overrides/cli.
# This makes output to be a checkpoint as well.
defaults.pop("metrics", None)
defaults.pop("plots", None)
else:
defaults.pop("live", None) # suppress live otherwise
context: Dict[str, str] = {**defaults, **overrides}
assert "cmd" in context
params_kv = []
params = context.get("params")
if params:
params_kv.append(loadd_params(params))
checkpoint_out = bool(context.get("live"))
models = context.get("models")
stage = repo.stage.create(
name=name,
cmd=context["cmd"],
deps=compact([context.get("code"), context.get("data")]),
params=params_kv,
metrics_no_cache=compact([context.get("metrics")]),
plots_no_cache=compact([context.get("plots")]),
live=context.get("live"),
force=force,
**{"checkpoints" if checkpoint_out else "outs": compact([models])},
)
if interactive:
ui.error_write(Rule(style="green"), styled=True)
_yaml = dumps_yaml(to_pipeline_file(cast(PipelineStage, stage)))
syn = Syntax(_yaml, "yaml", theme="ansi_dark")
ui.error_write(syn, styled=True)
from dvc.ui.prompt import Confirm
if not interactive or Confirm.ask(
"Do you want to add the above contents to dvc.yaml?",
console=ui.error_console,
default=True,
stream=stream,
):
with _disable_logging(), repo.scm_context(autostage=True, quiet=True):
stage.dump(update_lock=False)
stage.ignore_outs()
if params:
repo.scm_context.track_file(params)
else:
raise DvcException("Aborting ...")
assert isinstance(stage, PipelineStage)
return stage
|
{
"content_hash": "452b7acbbdb6146cafafc2492bd3212d",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 78,
"avg_line_length": 28.535211267605632,
"alnum_prop": 0.6027887462981244,
"repo_name": "efiop/dvc",
"id": "613906b1ae21bfa6b3395ca64f97adfeefc2b343",
"size": "8104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dvc/repo/experiments/init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "53"
},
{
"name": "Inno Setup",
"bytes": "10158"
},
{
"name": "PowerShell",
"bytes": "2686"
},
{
"name": "Python",
"bytes": "2231040"
},
{
"name": "Shell",
"bytes": "695"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('trips', '0005_auto_20150725_1629'),
]
operations = [
migrations.AlterField(
model_name='calendar',
name='service_id',
field=models.CharField(unique=True, max_length=25, verbose_name='Service ID'),
),
migrations.AlterField(
model_name='calendardate',
name='service_id',
field=models.ForeignKey(verbose_name='Service ID', to='trips.Calendar'),
),
migrations.AlterField(
model_name='trip',
name='service_id',
field=models.ForeignKey(verbose_name='Service ID', to='trips.Calendar'),
),
]
|
{
"content_hash": "a99f0c39733f2d2b436a0f1eec76179d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 90,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.5792079207920792,
"repo_name": "renanalencar/hermes",
"id": "06d659b4aa1054046b8fd0f6cb06d4ec166c043f",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trips/migrations/0006_auto_20150725_1630.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45248"
},
{
"name": "HTML",
"bytes": "48827"
},
{
"name": "JavaScript",
"bytes": "87491"
},
{
"name": "Python",
"bytes": "158986"
}
],
"symlink_target": ""
}
|
'''
Script "suite" for grabbing and analyzing files from the NCBI database. Generally, the program will
take a list of accession numbers and retrieve either the protein or nucleotide sequence and can save the
file in either genbank or fasta form. The file containing the accession numbers should be in csv format
with a column labelled as "Accession". Other column labels are also possible, and up to the user.
Usage:
python ncbi_sequence_analyzer.py 'accession list', 'type to retrieve'*, 'path to save files'
*type to retrieve can equal 'protein' or 'nucleotide'
'''
from Bio import Entrez
from os.path import join, basename, isfile
from os import listdir
import pandas as pd
import numpy as np
import sys
#Always tell NCBI your email
Entrez.email = 'john.hayes@usask.ca'
#Fetches either individual protein or nucleotide entry from database and saves it in specified path
#Default format fetched in genbank; entry_type = 'protein' or 'nucleotide'
def Sequence_Fetch(entry_id, entry_type, format_type, path):
#Format file name appropriately
if format_type == 'gb':
file_name = entry_id + '.gb'
elif format_type == 'fasta':
file_name = entry_id + '.fasta'
#Fetch the sequence and sav eit
with open(join(path, file_name), 'w') as f:
print("Fetching %s" %entry_id)
handle = Entrez.efetch(db = entry_type, id=entry_id, rettype = format_type, retmode = 'text')
f.write(handle.read())
#Reads the list of accession numbers. The file containing the accession numbers should be in csv format
#with a column labelled as "Accession Number"
def Accession_Reader(accession_file, entry_type, format_type, path):
#Set the extension based on the file type
if format_type == 'fasta':
extension = '.fasta'
elif format_type == 'genbank' or format_type == 'gb':
extension = '.gb'
else:
print("Error, not a valid file type")
return
#Not necessary to read as dataframe, but likely easier to do this for future file name/fasta header manipulation
#Use of SeqIO and parsing through genbank files may make this unnecessary
df = pd.read_csv(accession_file)
#Make a list of the accession numbers to feed to the sequence fetcher
accession_list = []
for entry in list(df['Accession Number']):
if pd.notnull(entry):
accession_list.append(entry)
for entry in accession_list:
if not isfile(join(path, entry + extension)):
Sequence_Fetch(entry, entry_type, format_type, path)
else:
print("The file with Accession Number %s appears to have already been fetched" %entry)
#Run the program
if len(sys.argv) == 4:
Accession_Reader(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
else:
print("Sorry, I don't follow, there are supposed to be four arguments...")
print("'accession list', 'type to retrieve', format type, 'path to save files'")
|
{
"content_hash": "b7bb6fe9c6b522126534451881eec131",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 113,
"avg_line_length": 37.554054054054056,
"alnum_prop": 0.7351565311263044,
"repo_name": "chibbargroup/CentralRepository",
"id": "9f51215b41bcd7c02fc113930451365219a32e62",
"size": "2779",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Phylogeny Scripts/ncbi_sequence_grabber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157325"
},
{
"name": "R",
"bytes": "7377"
},
{
"name": "Shell",
"bytes": "19238"
}
],
"symlink_target": ""
}
|
import glance_store
from oslo_log import log as logging
from glance.api import authorization
from glance.api import policy
from glance.api import property_protections
from glance.common import exception
from glance.common import property_utils
from glance.common import store_utils
import glance.db
import glance.domain
from glance.i18n import _LE
import glance.location
import glance.notifier
import glance.quota
try:
import glance.search
glance_search = glance.search
except ImportError:
glance_search = None
LOG = logging.getLogger(__name__)
class Gateway(object):
def __init__(self, db_api=None, store_api=None, notifier=None,
policy_enforcer=None, es_api=None):
self.db_api = db_api or glance.db.get_api()
self.store_api = store_api or glance_store
self.store_utils = store_utils
self.notifier = notifier or glance.notifier.Notifier()
self.policy = policy_enforcer or policy.Enforcer()
if es_api:
self.es_api = es_api
else:
self.es_api = glance_search.get_api() if glance_search else None
def get_image_factory(self, context):
image_factory = glance.domain.ImageFactory()
store_image_factory = glance.location.ImageFactoryProxy(
image_factory, context, self.store_api, self.store_utils)
quota_image_factory = glance.quota.ImageFactoryProxy(
store_image_factory, context, self.db_api, self.store_utils)
policy_image_factory = policy.ImageFactoryProxy(
quota_image_factory, context, self.policy)
notifier_image_factory = glance.notifier.ImageFactoryProxy(
policy_image_factory, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pif = property_protections.ProtectedImageFactoryProxy(
notifier_image_factory, context, property_rules)
authorized_image_factory = authorization.ImageFactoryProxy(
pif, context)
else:
authorized_image_factory = authorization.ImageFactoryProxy(
notifier_image_factory, context)
return authorized_image_factory
def get_image_member_factory(self, context):
image_factory = glance.domain.ImageMemberFactory()
quota_image_factory = glance.quota.ImageMemberFactoryProxy(
image_factory, context, self.db_api, self.store_utils)
policy_member_factory = policy.ImageMemberFactoryProxy(
quota_image_factory, context, self.policy)
authorized_image_factory = authorization.ImageMemberFactoryProxy(
policy_member_factory, context)
return authorized_image_factory
def get_repo(self, context):
image_repo = glance.db.ImageRepo(context, self.db_api)
store_image_repo = glance.location.ImageRepoProxy(
image_repo, context, self.store_api, self.store_utils)
quota_image_repo = glance.quota.ImageRepoProxy(
store_image_repo, context, self.db_api, self.store_utils)
policy_image_repo = policy.ImageRepoProxy(
quota_image_repo, context, self.policy)
notifier_image_repo = glance.notifier.ImageRepoProxy(
policy_image_repo, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pir = property_protections.ProtectedImageRepoProxy(
notifier_image_repo, context, property_rules)
authorized_image_repo = authorization.ImageRepoProxy(
pir, context)
else:
authorized_image_repo = authorization.ImageRepoProxy(
notifier_image_repo, context)
return authorized_image_repo
def get_task_factory(self, context):
task_factory = glance.domain.TaskFactory()
policy_task_factory = policy.TaskFactoryProxy(
task_factory, context, self.policy)
notifier_task_factory = glance.notifier.TaskFactoryProxy(
policy_task_factory, context, self.notifier)
authorized_task_factory = authorization.TaskFactoryProxy(
notifier_task_factory, context)
return authorized_task_factory
def get_task_repo(self, context):
task_repo = glance.db.TaskRepo(context, self.db_api)
policy_task_repo = policy.TaskRepoProxy(
task_repo, context, self.policy)
notifier_task_repo = glance.notifier.TaskRepoProxy(
policy_task_repo, context, self.notifier)
authorized_task_repo = authorization.TaskRepoProxy(
notifier_task_repo, context)
return authorized_task_repo
def get_task_stub_repo(self, context):
task_stub_repo = glance.db.TaskRepo(context, self.db_api)
policy_task_stub_repo = policy.TaskStubRepoProxy(
task_stub_repo, context, self.policy)
notifier_task_stub_repo = glance.notifier.TaskStubRepoProxy(
policy_task_stub_repo, context, self.notifier)
authorized_task_stub_repo = authorization.TaskStubRepoProxy(
notifier_task_stub_repo, context)
return authorized_task_stub_repo
def get_task_executor_factory(self, context):
task_repo = self.get_task_repo(context)
image_repo = self.get_repo(context)
image_factory = self.get_image_factory(context)
return glance.domain.TaskExecutorFactory(task_repo,
image_repo,
image_factory)
def get_metadef_namespace_factory(self, context):
ns_factory = glance.domain.MetadefNamespaceFactory()
policy_ns_factory = policy.MetadefNamespaceFactoryProxy(
ns_factory, context, self.policy)
notifier_ns_factory = glance.notifier.MetadefNamespaceFactoryProxy(
policy_ns_factory, context, self.notifier)
authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy(
notifier_ns_factory, context)
return authorized_ns_factory
def get_metadef_namespace_repo(self, context):
ns_repo = glance.db.MetadefNamespaceRepo(context, self.db_api)
policy_ns_repo = policy.MetadefNamespaceRepoProxy(
ns_repo, context, self.policy)
notifier_ns_repo = glance.notifier.MetadefNamespaceRepoProxy(
policy_ns_repo, context, self.notifier)
authorized_ns_repo = authorization.MetadefNamespaceRepoProxy(
notifier_ns_repo, context)
return authorized_ns_repo
def get_metadef_object_factory(self, context):
object_factory = glance.domain.MetadefObjectFactory()
policy_object_factory = policy.MetadefObjectFactoryProxy(
object_factory, context, self.policy)
notifier_object_factory = glance.notifier.MetadefObjectFactoryProxy(
policy_object_factory, context, self.notifier)
authorized_object_factory = authorization.MetadefObjectFactoryProxy(
notifier_object_factory, context)
return authorized_object_factory
def get_metadef_object_repo(self, context):
object_repo = glance.db.MetadefObjectRepo(context, self.db_api)
policy_object_repo = policy.MetadefObjectRepoProxy(
object_repo, context, self.policy)
notifier_object_repo = glance.notifier.MetadefObjectRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefObjectRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_resource_type_factory(self, context):
resource_type_factory = glance.domain.MetadefResourceTypeFactory()
policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy(
resource_type_factory, context, self.policy)
notifier_resource_type_factory = (
glance.notifier.MetadefResourceTypeFactoryProxy(
policy_resource_type_factory, context, self.notifier)
)
authorized_resource_type_factory = (
authorization.MetadefResourceTypeFactoryProxy(
notifier_resource_type_factory, context)
)
return authorized_resource_type_factory
def get_metadef_resource_type_repo(self, context):
resource_type_repo = glance.db.MetadefResourceTypeRepo(
context, self.db_api)
policy_object_repo = policy.MetadefResourceTypeRepoProxy(
resource_type_repo, context, self.policy)
notifier_object_repo = glance.notifier.MetadefResourceTypeRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefResourceTypeRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_property_factory(self, context):
prop_factory = glance.domain.MetadefPropertyFactory()
policy_prop_factory = policy.MetadefPropertyFactoryProxy(
prop_factory, context, self.policy)
notifier_prop_factory = glance.notifier.MetadefPropertyFactoryProxy(
policy_prop_factory, context, self.notifier)
authorized_prop_factory = authorization.MetadefPropertyFactoryProxy(
notifier_prop_factory, context)
return authorized_prop_factory
def get_metadef_property_repo(self, context):
prop_repo = glance.db.MetadefPropertyRepo(context, self.db_api)
policy_prop_repo = policy.MetadefPropertyRepoProxy(
prop_repo, context, self.policy)
notifier_prop_repo = glance.notifier.MetadefPropertyRepoProxy(
policy_prop_repo, context, self.notifier)
authorized_prop_repo = authorization.MetadefPropertyRepoProxy(
notifier_prop_repo, context)
return authorized_prop_repo
def get_metadef_tag_factory(self, context):
tag_factory = glance.domain.MetadefTagFactory()
policy_tag_factory = policy.MetadefTagFactoryProxy(
tag_factory, context, self.policy)
notifier_tag_factory = glance.notifier.MetadefTagFactoryProxy(
policy_tag_factory, context, self.notifier)
authorized_tag_factory = authorization.MetadefTagFactoryProxy(
notifier_tag_factory, context)
return authorized_tag_factory
def get_metadef_tag_repo(self, context):
tag_repo = glance.db.MetadefTagRepo(context, self.db_api)
policy_tag_repo = policy.MetadefTagRepoProxy(
tag_repo, context, self.policy)
notifier_tag_repo = glance.notifier.MetadefTagRepoProxy(
policy_tag_repo, context, self.notifier)
authorized_tag_repo = authorization.MetadefTagRepoProxy(
notifier_tag_repo, context)
return authorized_tag_repo
def get_catalog_search_repo(self, context):
if self.es_api is None:
LOG.error(_LE('The search and index services are not available. '
'Ensure you have the necessary prerequisite '
'dependencies installed like elasticsearch to use '
'these services.'))
raise exception.SearchNotAvailable()
search_repo = glance.search.CatalogSearchRepo(context, self.es_api)
policy_search_repo = policy.CatalogSearchRepoProxy(
search_repo, context, self.policy)
return policy_search_repo
|
{
"content_hash": "780c0f4b7a4ec2576b9c75518e1a5c0a",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 78,
"avg_line_length": 47.12244897959184,
"alnum_prop": 0.6733650931139021,
"repo_name": "vuntz/glance",
"id": "bc12665de4a31310b536606837e1750925d5217a",
"size": "12207",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "glance/gateway.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3964511"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
import argparse
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def del_all_network_sets(net):
netsets = net.get_networksets()
for netset in netsets:
print('Deleting Network Set:', netset['name'])
net.delete_networkset(netset)
def del_network_set_by_name(net, name):
found = False
netsets = net.get_networksets()
for netset in netsets:
print(netset['name'],'==', name)
if netset['name'] == name:
found = True
print('Deleting Network Set:', name)
net.delete_networkset(netset)
if not found:
print('Error, could not locate Network Set:', name)
sys.exit()
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Delete individual or ALL Network Sets
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', dest='delete_all', action='store_true',
help='''
Delete ALL Network-sets''')
group.add_argument('-n', dest='name',
help='''
Name of the Network-set to delete''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
net = hpov.networking(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
if args.delete_all:
del_all_network_sets(net)
sys.exit()
del_network_set_by_name(net, args.name)
if __name__ == '__main__':
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
{
"content_hash": "18a258124847ba086c22eedb02bc3279",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 73,
"avg_line_length": 29.394957983193276,
"alnum_prop": 0.5943396226415094,
"repo_name": "miqui/python-hpOneView",
"id": "39ebd2b2511bb0845b02b76812372cc2a3b52097",
"size": "4651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/scripts/del-network-set.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121791"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import pycurl, sys, time, logging
from os import unlink, path
from threading import Thread
from decrypt import *
if sys.version_info[0] < 3:
from StringIO import StringIO
from tkMessageBox import showinfo as msgbox
from urllib import unquote
from urllib import urlencode
else:
from io import BytesIO
from tkinter.messagebox import showinfo as msgbox
from urllib.parse import unquote
from urllib.parse import urlencode
log = logging.getLogger('main')
_cookies_file = '.adln_cookies'
_headers = """
Accept:text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
Accept-Language:es-ES,es;q=0.8\nConnection:keep-alive\nDNT:1\nHost:%s
"""
_ua = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36'
def get_content(url, data = None):
# host determination
host = text_finder(url + '/', '//', '/')
if sys.version_info[0] < 3: buffer = StringIO()
else: buffer = BytesIO()
# cURL configuration
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.HTTPHEADER, (_headers % host).split('\n'))
c.setopt(c.USERAGENT, _ua)
c.setopt(c.COOKIEFILE, _cookies_file)
c.setopt(c.COOKIEJAR, _cookies_file)
c.setopt(c.SSL_VERIFYPEER, 0)
c.setopt(c.SSL_VERIFYHOST, 0)
c.setopt(c.WRITEDATA, buffer)
if data != None:
c.setopt(c.POSTFIELDS, urlencode(data))
# cURL: DO IT!
try:
#print('[get_content] Retrieving content from "%s"' % url)
c.perform()
status = c.getinfo(pycurl.HTTP_CODE)
c.close()
except BaseException as e:
log.error('[pycURL] Could not load data from url "%s"' % url)
log.error(e)
raise e
if status != 200:
log.error('[pycURL] Received a HTTP ' + str(status) + ' status')
return ''
if sys.version_info[0] < 3:
cont = buffer.getvalue()
else:
body = buffer.getvalue()
cont = body.decode('iso-8859-1')
return cont
# Downloads a file by spoofing a browser to avoid bot-detection issues.
# Also calls the dlProgress function to report download progress
def download_file(url, filepath, progress_callback, error_callback = None, done_callback = None):
if filepath == None or filepath == '':
log.debug('Filepath is empty!')
return
log.debug('Downloading url "%s" to file "%s"' % (url, filepath))
try:
f = open(filepath, 'wb')
except IOError:
log.error('Could not open file to write download')
return
# host determination
host = text_finder(url + '/', '//', '/')
def df_thread():
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.HTTPHEADER, (_headers % host).split('\n'))
c.setopt(c.USERAGENT, _ua)
c.setopt(c.NOPROGRESS, 0)
c.setopt(c.FOLLOWLOCATION, 1)
c.setopt(c.SSL_VERIFYPEER, 0)
c.setopt(c.SSL_VERIFYHOST, 0)
c.setopt(c.PROGRESSFUNCTION, progress_callback)
try:
c.setopt(c.WRITEDATA, f)
c.perform()
except IOError as e:
log.error('Could not open file to write download')
log.error(e)
if error_callback != None:
error_callback(1, e)
return
except KeyboardInterrupt as e:
log.info('KeyboardInterrupt detected!')
delete_file(filepath)
if error_callback != None:
error_callback(2, e)
return
except pycurl.error as e:
log.error(e)
delete_file(filepath)
if error_callback != None:
error_callback(3, e)
return
# order status
status = c.getinfo(pycurl.HTTP_CODE)
c.close()
if status != 200 and status != 302:
log.error('[pycURL] Got an HTTP %s status' % str(status))
f.close()
delete_file(filepath)
return
if done_callback != None:
done_callback()
t = Thread(target=df_thread)
t.start()
return f
def delete_file(filepath):
log.debug('Deleting file "%s"' % filepath)
if path.isfile(filepath):
try:
unlink(filepath)
log.debug('File deleted')
except BaseException as e:
log.error('Could not delete file!')
log.error(e)
else:
log.error('File "%s" not found!' % filepath)
def do_tests(class_obj, search_anime = 'barakamon'):
# Search callback
def callback(res):
if len(res) < 1:
log.error('No content retrieved')
return
_,url = res[0]
test.get_info(url, callback2)
# Anime info callback
def callback2(info):
_,url = info['episodes'][0]
test.get_dd_link(url, callback3)
# DD Link callback
def callback3(link):
log.info('anime ep 1 dd link: ' + link)
log.info('anime search: ' + search_anime)
test = class_obj()
test.search_anime(search_anime, callback)
def text_finder(text, initparam, endparam = None, initreverse = False):
if text == None:
return ''
if initreverse: text_init = text.rfind(initparam) + len(initparam)
else: text_init = text.find(initparam) + len(initparam)
text_end = len(text) if endparam == None else text.find(endparam, text_init)
return text[text_init:text_end]
def print_encoded(cont):
if sys.stdout.encoding == None: print(cont)
else: print(cont.encode(sys.stdout.encoding, errors='replace'))
def final_file_name(file_name):
n_rename = 1
if path.isfile(file_name):
name, ext = file_name[:-4], file_name[-4:]
while path.isfile(name + ' (' + str(n_rename) + ')' + ext):
n_rename += 1
file_name = name + ' (' + str(n_rename) + ')' + ext
return file_name
def create_logger():
formatter = logging.Formatter(fmt='[%(asctime)s %(levelname)s][%(filename)s:%(lineno)d %(funcName)s] %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
if __name__ == '__main__':
log = create_logger()
# should not generate any errors
# get_content('http://www.animeflv.com')
# get_content should show an "could not connect" error
# get_content('.')
# get_content should show an "could not resolve" error
# get_content('123')
# text_finder test, should show the last segment
# print(text_finder('http://www.dailymotion.com/embed/video/x2l13qu_wol-12hh_videogames', '/', None, True))
from plugins.controllers.jka import jka
c = jka(); chapter_url = 'http://jkanime.net/k-on/1/'
def dd_callback(url):
if url == '': return
log.debug('trying to download file from ' + url)
f = download_file(url, 'test.mp4', dl_progress)
time.sleep(3)
log.debug('closing file')
f.close()
delete_file(f.name)
_tp = None
def dl_progress(dl_total, dl_prog, z, a):
if dl_total == 0: return
global _tp
if _tp == None or time.time() - _tp >= 1:
percent = (dl_prog / dl_total) * 100
print('progress: ' + ("%.2f%%" % percent))
_tp = time.time()
c.get_dd_link(chapter_url, dd_callback)
|
{
"content_hash": "e3d0317d64b6e8985d98634515428bfc",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 118,
"avg_line_length": 27.00826446280992,
"alnum_prop": 0.6739596083231334,
"repo_name": "jkcgs/anime-downloader-es",
"id": "171ec21ff5679c64508c4aafa2a723864d8676e9",
"size": "6560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51417"
}
],
"symlink_target": ""
}
|
"""
Test the calcload.py utility and function calc_max_load()
"""
from tests.data import voyager_h5, voyager_fil
from blimpy.calcload import cmd_tool, calc_max_load
def test_calcload():
r""" Test the calcload command line tool """
args = [voyager_h5]
cmd_tool(args)
args = ['-v', voyager_fil]
cmd_tool(args)
def test_calc_max_load():
gb1 = calc_max_load(voyager_h5)
gb2 = calc_max_load(voyager_fil)
assert(gb1 == gb2 == 1.0)
if __name__ == "__main__":
test_calcload()
test_calc_max_load()
|
{
"content_hash": "36e8208262e178f98adb70e8280e454d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 57,
"avg_line_length": 25.476190476190474,
"alnum_prop": 0.6317757009345795,
"repo_name": "UCBerkeleySETI/blimpy",
"id": "cc82923ec3653486df41dc5676da4533aa7abaee",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_calcload.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "492"
},
{
"name": "Jupyter Notebook",
"bytes": "179482"
},
{
"name": "Python",
"bytes": "282017"
},
{
"name": "Shell",
"bytes": "1125"
},
{
"name": "TeX",
"bytes": "13936"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0064_auto_20150314_2132'),
]
operations = [
migrations.RenameField(
model_name='taxsaveinputs',
old_name='personal_exemp_amount',
new_name='_II_em',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='phase_out_threshold_single',
new_name='_II_em_ps_0',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='phase_out_threshold_jointly',
new_name='_II_em_ps_1',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='phase_out_threshold_head',
new_name='_II_em_ps_2',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='phase_out_threshold_separately',
new_name='_II_em_ps_3',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='phase_out',
new_name='_II_prt',
),
]
|
{
"content_hash": "69abfe88fd5c994860652355252056f4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 54,
"avg_line_length": 28.325581395348838,
"alnum_prop": 0.5328407224958949,
"repo_name": "nolanzandi/webapp-public",
"id": "8c83030ebd587a7e6818d1f3ad6f60b36d7ff8f2",
"size": "1242",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/migrations/0065_auto_20150314_2133.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61908"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "380111"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
}
|
"""
This module provides a client class for TSDB.
"""
import io
import copy
import json
import logging
import gzip
from baidubce import bce_client_configuration
from baidubce import utils
from baidubce.auth import bce_v1_signer
from baidubce.bce_base_client import BceBaseClient
from baidubce.http import bce_http_client
from baidubce.http import handler
from baidubce.http import http_content_types
from baidubce.http import http_headers
from baidubce.http import http_methods
from baidubce.services.tsdb import tsdb_handler
_logger = logging.getLogger(__name__)
class TsdbClient(BceBaseClient):
"""
sdk client
"""
def __init__(self, config):
BceBaseClient.__init__(self, config)
def write_datapoints(self, datapoints, use_gzip=True):
"""
write datapoints
:param datapoints: a list of datapoint dict
:type datapoints: list
:param use_gzip: open gzip compress
:type use_gzip: boolean
"""
path = b'/v1/datapoint'
body = json.dumps({"datapoints": datapoints}).encode('utf-8')
headers={http_headers.CONTENT_TYPE: http_content_types.JSON}
if use_gzip:
body = self._gzip_compress(body)
headers[http_headers.CONTENT_ENCODING] = b'gzip'
return self._send_request(
http_methods.POST,
path=path,
body=body,
headers=headers,
body_parser=tsdb_handler.parse_json
)
def get_metrics(self):
"""
list metrics
:return: a list of metric
:rtype: baidubce.bce_response.BceResponse
"""
path = b"/v1/metric"
return self._send_request(http_methods.GET, path=path, body_parser=tsdb_handler.parse_json)
def get_fields(self, metric):
"""
get fields
:type metric: string
:param metric:
:return: field dict. {field1:{type: 'Number'},field2:{type: 'String'}}
:rtype: baidubce.bce_response.BceResponse
"""
metric = utils.convert_to_standard_string(metric)
path = b'/v1/metric/' + metric + b'/field'
return self._send_request(http_methods.GET, path=path, body_parser=tsdb_handler.parse_json)
def get_tags(self, metric):
"""
get tags
:type metric: string
:param metric:
:return: {tagk1:[tagk11,tagk21,..],tagk2:[tagk21,tagk22,..]..}
:rtype: baidubce.bce_response.BceResponse
"""
metric = utils.convert_to_standard_string(metric)
path = b'/v1/metric/' + metric + b'/tag'
return self._send_request(http_methods.GET, path=path, body_parser=tsdb_handler.parse_json)
def get_datapoints(self, query_list, disable_presampling=False):
"""
query datapoints
:param query_list: a list of query dict
:type query_list: list
:param disable_presampling: open of close presampling result query
:type disable_presampling: boolean
:return: a list of result dict
:rtype: baidubce.bce_response.BceResponse
"""
path = b'/v1/datapoint'
params = {'query': '', 'disablePresampling': disable_presampling}
body = json.dumps({"queries": query_list})
return self._send_request(http_methods.PUT, path=path, params=params,
body=body, body_parser=tsdb_handler.parse_json)
def get_rows_with_sql(self, statement):
"""
get_rows_with_sql
:param statement: sql statement
:type statement: string
:return: {rows:[[],[],...], columns: []}
:rtype: baidubce.bce_response.BceResponse
"""
path = b'/v1/row'
params = {'sql': statement}
return self._send_request(http_methods.GET, path=path, params=params,
body_parser=tsdb_handler.parse_json)
def generate_pre_signed_url(self,
query_list,
timestamp=0,
expiration_in_seconds=1800,
disable_presampling=False,
headers=None,
headers_to_sign=None,
protocol=None,
config=None):
"""
Get an authorization url with expire time
:type timestamp: int
:param timestamp: None
:type expiration_in_seconds: int
:param expiration_in_seconds: None
:type options: dict
:param options: None
:return:
**URL string**
"""
path = b'/v1/datapoint'
params = {
'query': json.dumps({"queries": query_list}),
'disablePresampling': disable_presampling
}
return self._generate_pre_signed_url(path, timestamp, expiration_in_seconds,
params, headers, headers_to_sign, protocol, config)
def generate_pre_signed_url_with_sql(self,
statement,
timestamp=0,
expiration_in_seconds=1800,
headers=None,
headers_to_sign=None,
protocol=None,
config=None):
"""
Get an authorization url with sql
:type timestamp: int
:param timestamp: None
:type expiration_in_seconds: int
:param expiration_in_seconds: None
:type options: dict
:param options: None
:return:
**URL string**
"""
path = b'/v1/row'
params = {'sql': statement}
return self._generate_pre_signed_url(path, timestamp, expiration_in_seconds,
params, headers, headers_to_sign, protocol, config)
def _generate_pre_signed_url(
self, path, timestamp=0,
expiration_in_seconds=1800,
params=None,
headers=None,
headers_to_sign=None,
protocol=None,
config=None):
"""
Get an authorization url with expire time
:type timestamp: int
:param timestamp: None
:type expiration_in_seconds: int
:param expiration_in_seconds: None
:type options: dict
:param options: None
:return:
**URL string**
"""
config = self._merge_config(config)
headers = headers or {}
params = params or {}
# specified protocol > protocol in endpoint > default protocol
endpoint_protocol, endpoint_host, endpoint_port = utils.parse_host_port(
config.endpoint, config.protocol)
protocol = protocol or endpoint_protocol
full_host = endpoint_host
if endpoint_port != config.protocol.default_port:
full_host += b':' + str(endpoint_port)
headers[http_headers.HOST] = full_host
params[http_headers.AUTHORIZATION.lower()] = bce_v1_signer.sign(
config.credentials,
http_methods.GET,
path,
headers,
params,
timestamp,
expiration_in_seconds,
headers_to_sign)
return "%s://%s%s?%s" % (protocol.name,
full_host.decode(),
path.decode(),
utils.get_canonical_querystring(params, False).decode())
def _gzip_compress(self, str):
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(str)
return out.getvalue()
def _merge_config(self, config):
if config is None:
return self.config
else:
new_config = copy.copy(self.config)
new_config.merge_non_none_values(config)
return new_config
def _send_request(
self, http_method, path,
body=None,
headers=None,
params=None,
config=None,
body_parser=None):
config = self._merge_config(config)
if headers is None:
headers = {http_headers.CONTENT_TYPE: http_content_types.JSON}
if body_parser is None:
body_parser = handler.parse_json
return bce_http_client.send_request(
config, bce_v1_signer.sign, [handler.parse_error, body_parser],
http_method, path, body, headers, params)
|
{
"content_hash": "119ae585ce542cc50a8aebdb1911734b",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 99,
"avg_line_length": 31.384615384615383,
"alnum_prop": 0.5510037348272643,
"repo_name": "baidubce/bce-sdk-python",
"id": "aaefbb8547610ef16d4765858ad905271c37adaa",
"size": "9136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baidubce/services/tsdb/tsdb_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1275911"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.urls import path
from .export import EXPORT_MEDIA_PREFIX
from .views import ExportFileDetailView
MEDIA_PATH = settings.MEDIA_URL
# Split off domain and leading slash
if MEDIA_PATH.startswith("http"):
MEDIA_PATH = MEDIA_PATH.split("/", 3)[-1]
else:
MEDIA_PATH = MEDIA_PATH[1:]
urlpatterns = [
path(
"%s%s/<str:token>.zip" % (MEDIA_PATH, EXPORT_MEDIA_PREFIX),
ExportFileDetailView.as_view(),
name="account-download_export_token",
)
]
|
{
"content_hash": "c3a8da0d95eacfc1261dfd475dc4a78c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 67,
"avg_line_length": 24.80952380952381,
"alnum_prop": 0.6852207293666027,
"repo_name": "fin/froide",
"id": "bced463d0b2468845e8d2b5402d66fd7689e83e6",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "froide/account/export_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302838"
},
{
"name": "JavaScript",
"bytes": "47357"
},
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "1706123"
},
{
"name": "SCSS",
"bytes": "39397"
},
{
"name": "TypeScript",
"bytes": "57910"
},
{
"name": "Vue",
"bytes": "218866"
}
],
"symlink_target": ""
}
|
from .kernel_disk_workload import KernelDiskWorkload
from ...resources.resource import AbstractResource
from ...utils.override import overrides
from .abstract_system_board import AbstractSystemBoard
from ...isas import ISA
from m5.objects import (
Pc,
AddrRange,
X86FsLinux,
Addr,
X86SMBiosBiosInformation,
X86IntelMPProcessor,
X86IntelMPIOAPIC,
X86IntelMPBus,
X86IntelMPBusHierarchy,
X86IntelMPIOIntAssignment,
X86E820Entry,
Bridge,
IOXBar,
IdeDisk,
CowDiskImage,
RawDiskImage,
BaseXBar,
Port,
)
from m5.util.convert import toMemorySize
from ..processors.abstract_processor import AbstractProcessor
from ..memory.abstract_memory_system import AbstractMemorySystem
from ..cachehierarchies.abstract_cache_hierarchy import AbstractCacheHierarchy
from typing import List, Sequence
class X86Board(AbstractSystemBoard, KernelDiskWorkload):
"""
A board capable of full system simulation for X86.
**Limitations**
* Currently, this board's memory is hardcoded to 3GB
* Much of the I/O subsystem is hard coded
"""
def __init__(
self,
clk_freq: str,
processor: AbstractProcessor,
memory: AbstractMemorySystem,
cache_hierarchy: AbstractCacheHierarchy,
) -> None:
super().__init__(
clk_freq=clk_freq,
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
)
if self.get_processor().get_isa() != ISA.X86:
raise Exception("The X86Board requires a processor using the X86 "
f"ISA. Current processor ISA: '{processor.get_isa().name}'.")
@overrides(AbstractSystemBoard)
def _setup_board(self) -> None:
self.pc = Pc()
self.workload = X86FsLinux()
# North Bridge
self.iobus = IOXBar()
# Set up all of the I/O.
self._setup_io_devices()
self.m5ops_base = 0xffff0000
def _setup_io_devices(self):
""" Sets up the x86 IO devices.
Note: This is mostly copy-paste from prior X86 FS setups. Some of it
may not be documented and there may be bugs.
"""
# Constants similar to x86_traits.hh
IO_address_space_base = 0x8000000000000000
pci_config_address_space_base = 0xC000000000000000
interrupts_address_space_base = 0xA000000000000000
APIC_range_size = 1 << 12
# Setup memory system specific settings.
if self.get_cache_hierarchy().is_ruby():
self.pc.attachIO(self.get_io_bus(), [self.pc.south_bridge.ide.dma])
else:
self.bridge = Bridge(delay="50ns")
self.bridge.mem_side_port = self.get_io_bus().cpu_side_ports
self.bridge.cpu_side_port = (
self.get_cache_hierarchy().get_mem_side_port()
)
# # Constants similar to x86_traits.hh
IO_address_space_base = 0x8000000000000000
pci_config_address_space_base = 0xC000000000000000
interrupts_address_space_base = 0xA000000000000000
APIC_range_size = 1 << 12
self.bridge.ranges = [
AddrRange(0xC0000000, 0xFFFF0000),
AddrRange(
IO_address_space_base, interrupts_address_space_base - 1
),
AddrRange(pci_config_address_space_base, Addr.max),
]
self.apicbridge = Bridge(delay="50ns")
self.apicbridge.cpu_side_port = self.get_io_bus().mem_side_ports
self.apicbridge.mem_side_port = (
self.get_cache_hierarchy().get_cpu_side_port()
)
self.apicbridge.ranges = [
AddrRange(
interrupts_address_space_base,
interrupts_address_space_base
+ self.get_processor().get_num_cores() * APIC_range_size
- 1,
)
]
self.pc.attachIO(self.get_io_bus())
# Add in a Bios information structure.
self.workload.smbios_table.structures = [X86SMBiosBiosInformation()]
# Set up the Intel MP table
base_entries = []
ext_entries = []
for i in range(self.get_processor().get_num_cores()):
bp = X86IntelMPProcessor(
local_apic_id=i,
local_apic_version=0x14,
enable=True,
bootstrap=(i == 0),
)
base_entries.append(bp)
io_apic = X86IntelMPIOAPIC(
id=self.get_processor().get_num_cores(),
version=0x11,
enable=True,
address=0xFEC00000,
)
self.pc.south_bridge.io_apic.apic_id = io_apic.id
base_entries.append(io_apic)
pci_bus = X86IntelMPBus(bus_id=0, bus_type="PCI ")
base_entries.append(pci_bus)
isa_bus = X86IntelMPBus(bus_id=1, bus_type="ISA ")
base_entries.append(isa_bus)
connect_busses = X86IntelMPBusHierarchy(
bus_id=1, subtractive_decode=True, parent_bus=0
)
ext_entries.append(connect_busses)
pci_dev4_inta = X86IntelMPIOIntAssignment(
interrupt_type="INT",
polarity="ConformPolarity",
trigger="ConformTrigger",
source_bus_id=0,
source_bus_irq=0 + (4 << 2),
dest_io_apic_id=io_apic.id,
dest_io_apic_intin=16,
)
base_entries.append(pci_dev4_inta)
def assignISAInt(irq, apicPin):
assign_8259_to_apic = X86IntelMPIOIntAssignment(
interrupt_type="ExtInt",
polarity="ConformPolarity",
trigger="ConformTrigger",
source_bus_id=1,
source_bus_irq=irq,
dest_io_apic_id=io_apic.id,
dest_io_apic_intin=0,
)
base_entries.append(assign_8259_to_apic)
assign_to_apic = X86IntelMPIOIntAssignment(
interrupt_type="INT",
polarity="ConformPolarity",
trigger="ConformTrigger",
source_bus_id=1,
source_bus_irq=irq,
dest_io_apic_id=io_apic.id,
dest_io_apic_intin=apicPin,
)
base_entries.append(assign_to_apic)
assignISAInt(0, 2)
assignISAInt(1, 1)
for i in range(3, 15):
assignISAInt(i, i)
self.workload.intel_mp_table.base_entries = base_entries
self.workload.intel_mp_table.ext_entries = ext_entries
entries = [
# Mark the first megabyte of memory as reserved
X86E820Entry(addr=0, size="639kB", range_type=1),
X86E820Entry(addr=0x9FC00, size="385kB", range_type=2),
# Mark the rest of physical memory as available
X86E820Entry(
addr=0x100000,
size=f"{self.mem_ranges[0].size() - 0x100000:d}B",
range_type=1,
),
]
# Reserve the last 16kB of the 32-bit address space for m5ops
entries.append(
X86E820Entry(addr=0xFFFF0000, size="64kB", range_type=2)
)
self.workload.e820_table.entries = entries
@overrides(AbstractSystemBoard)
def has_io_bus(self) -> bool:
return True
@overrides(AbstractSystemBoard)
def get_io_bus(self) -> BaseXBar:
return self.iobus
@overrides(AbstractSystemBoard)
def has_dma_ports(self) -> bool:
return True
@overrides(AbstractSystemBoard)
def get_dma_ports(self) -> Sequence[Port]:
return [self.pc.south_bridge.ide.dma, self.iobus.mem_side_ports]
@overrides(AbstractSystemBoard)
def has_coherent_io(self) -> bool:
return True
@overrides(AbstractSystemBoard)
def get_mem_side_coherent_io_port(self) -> Port:
return self.iobus.mem_side_ports
@overrides(AbstractSystemBoard)
def _setup_memory_ranges(self):
memory = self.get_memory()
if memory.get_size() > toMemorySize("3GB"):
raise Exception(
"X86Board currently only supports memory sizes up "
"to 3GB because of the I/O hole."
)
data_range = AddrRange(memory.get_size())
memory.set_memory_range([data_range])
# Add the address range for the IO
self.mem_ranges = [
data_range, # All data
AddrRange(0xC0000000, size=0x100000), # For I/0
]
@overrides(KernelDiskWorkload)
def get_disk_device(self):
return "/dev/hda"
@overrides(KernelDiskWorkload)
def _add_disk_to_board(self, disk_image: AbstractResource):
ide_disk = IdeDisk()
ide_disk.driveID = "device0"
ide_disk.image = CowDiskImage(
child=RawDiskImage(read_only=True), read_only=False
)
ide_disk.image.child.image_file = disk_image.get_local_path()
# Attach the SimObject to the system.
self.pc.south_bridge.ide.disks = [ide_disk]
@overrides(KernelDiskWorkload)
def get_default_kernel_args(self) -> List[str]:
return [
"earlyprintk=ttyS0",
"console=ttyS0",
"lpj=7999923",
"root={root_value}",
]
|
{
"content_hash": "878f4a043070fa1666d3d782524edb9c",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 79,
"avg_line_length": 32.05821917808219,
"alnum_prop": 0.5797457536587971,
"repo_name": "gem5/gem5",
"id": "6761bdb3fa1422dfe4db7b8618c9f8657f0fa6bf",
"size": "10912",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "src/python/gem5/components/boards/x86_board.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
}
|
from .error import BetTooLargeError, BetTooSmallError
class Players(object):
""" deals with the player holdings and results
all results are returned as (betting amount, is_all_in)
"""
def __init__(self, holdings):
self.cards = None
self.holdings = holdings
self.alive = True
self.in_hand = True
self.min_bet = 0
self.min_raise = 0
def deal_cards(self, cards):
self.cards = list(cards)
def bet(self, amount, min_bet, min_raise):
self.min_bet = min_bet
self.min_raise = min_raise
if amount == self.holdings:
self.holdings -= amount
return amount, self.holdings == 0
elif amount > self.holdings:
raise BetTooLargeError
else:
if (amount < min_bet) | ((amount > min_bet) & (amount < min_raise)):
raise BetTooSmallError
else:
self.holdings -= amount
return amount, self.holdings == 0
def ante(self, amount):
ante_amount = min(self.holdings, amount)
self.holdings -= ante_amount
return ante_amount, self.holdings == 0
def fold(self):
self.in_hand = False
return -1, False
def reset(self, earnings):
self.holdings += earnings
if self.holdings == 0:
self.alive = False
else:
self.cards = None
self.in_hand = True
|
{
"content_hash": "c3d91ea43fde5855e57cbad3f03895f6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 28.49019607843137,
"alnum_prop": 0.5554026152787337,
"repo_name": "tychung84/poker_tester",
"id": "5ba178ecfe45c7b16be05e301cde83a52fb990b6",
"size": "1453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poker_player/player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15560"
}
],
"symlink_target": ""
}
|
from ._abstract import AbstractScraper
class G750g(AbstractScraper):
@classmethod
def host(cls):
return "750g.com"
def title(self):
return self.schema.title()
def total_time(self):
return self.schema.total_time()
def yields(self):
return self.schema.yields()
def image(self):
return self.schema.image()
def ingredients(self):
return self.schema.ingredients()
def instructions(self):
return self.schema.instructions()
def ratings(self):
return self.schema.ratings()
|
{
"content_hash": "dc48902f1d3ba5e0592da5221feffd92",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 41,
"avg_line_length": 20.5,
"alnum_prop": 0.632404181184669,
"repo_name": "hhursev/recipe-scraper",
"id": "4aab52bca735df0eade1ebb6c95642acc98816d6",
"size": "574",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "recipe_scrapers/g750g.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88554"
}
],
"symlink_target": ""
}
|
import traceback
from math import *
from PyQt5.QtCore import QEvent, QPoint, Qt, QPointF
from PyQt5.QtGui import QColor, QLinearGradient, QTransform, QPen, QPainter, QBrush
from PyQt5.QtWidgets import QDialog, QInputDialog, QMessageBox, QWidget
from Business.SketchActions import *
from Data.Style import BrushType
from GUI.GeometryViews.SketchView import get_sketch_view
from GUI.init import is_dark_theme
from GUI.Widgets.NewDrawers import *
class SketchEditorViewWidget(QWidget):
def __init__(self, parent, document, main_window):
QWidget.__init__(self, parent)
self._main_window = main_window
self._states = main_window.get_states()
self.setMouseTracking(True)
self._doc = document
self._is_dark_theme = is_dark_theme()
self._sketch = None
self._scale = 1.0
self._offset = Vertex()
self._mouse_position = None
self._move_ref_pos = None
self._pan_ref_pos = None
self._selected_edges = []
self._selected_key_points = []
self._selected_texts = []
self._selected_areas = []
self._selected_instances = []
self._kp_move = None
self._kp_hover = None
self._edge_hover = None
self._text_hover = None
self._area_hover = None
self._instance_hover = None
self._mouse_press_event_handlers = []
self._mouse_move_event_handlers = []
self._escape_event_handlers = []
self.installEventFilter(self)
if self._is_dark_theme:
self._axis_pen = QPen(QColor(40, 50, 80), 1)
self._gradient_color_top = QColor(80, 80, 90)
self._gradient_color_bottom = QColor(50, 50, 60)
else:
self._axis_pen = QPen(QColor(140, 150, 180), 1)
self._gradient_color_top = QColor(220, 220, 230)
self._gradient_color_bottom = QColor(170, 170, 180)
@property
def scale(self):
return self._scale
@property
def sketch(self):
return self._sketch
@property
def kp_hover(self):
return self._kp_hover
@kp_hover.setter
def kp_hover(self, value):
self._kp_hover = value
@property
def edge_hover(self):
return self._edge_hover
@edge_hover.setter
def edge_hover(self, value):
self._edge_hover = value
@property
def area_hover(self):
return self._area_hover
@area_hover.setter
def area_hover(self, value):
self._area_hover = value
@property
def instance_hover(self):
return self._instance_hover
@instance_hover.setter
def instance_hover(self, value):
self._instance_hover = value
@property
def text_hover(self):
return self._text_hover
@text_hover.setter
def text_hover(self, value):
self._text_hover = value
@property
def selected_texts(self):
return self._selected_texts
@selected_texts.setter
def selected_texts(self, value):
self._selected_texts = value
self.update()
@property
def selected_key_points(self):
return self._selected_key_points
@selected_key_points.setter
def selected_key_points(self, value):
self._selected_key_points = value
self.update()
@property
def selected_edges(self):
return self._selected_edges
@selected_edges.setter
def selected_edges(self, value):
self._selected_edges = value
self.update()
@property
def selected_areas(self):
return self._selected_areas
@selected_areas.setter
def selected_areas(self, value):
self._selected_areas = value
self.update()
@property
def selected_instances(self):
return self._selected_instances
@selected_instances.setter
def selected_instances(self, value):
self._selected_instances = value
self.update()
def eventFilter(self, obj, event):
if event.type() == QEvent.KeyPress:
if event.key() == Qt.Key_Delete:
self.on_delete()
return True
if event.key() == Qt.Key_Control:
self._states.multi_select = True
if event.key() == Qt.Key_Escape:
self.on_escape()
if event.type() == QEvent.KeyRelease:
if event.key() == Qt.Key_Control:
self._states.multi_select = False
if event.type() == QEvent.GraphicsSceneMouseDoubleClick:
print("double click")
return False
def add_mouse_press_event_handler(self, event_handler):
self._mouse_press_event_handlers.append(event_handler)
def add_mouse_move_event_handler(self, event_handler):
self._mouse_move_event_handlers.append(event_handler)
def add_escape_event_handler(self, event_handler):
self._escape_event_handlers.append(event_handler)
def on_zoom_fit(self):
if self._sketch is None:
return
limits = self._sketch.get_limits()
x_min = limits[0]
x_max = limits[2]
y_min = limits[1]
y_max = limits[3]
y_scale = 1
x_scale = 1
if (y_max - y_min) != 0:
y_scale = self.height() / (y_max - y_min)
if (x_max - x_min) != 0:
x_scale = self.width() / (x_max - x_min)
scale = min(y_scale, x_scale) * 0.9
self._offset.x = -(x_min + (x_max - x_min) / 2)
self._offset.y = -(y_min + (y_max - y_min) / 2)
self._scale = scale
self.update()
def on_delete(self):
txt = "Are you sure you want to delete these geometries?"
ret = QMessageBox.warning(self, "Delete geometries?", txt, QMessageBox.Yes | QMessageBox.Cancel)
if ret == QMessageBox.Yes:
remove_key_points(self._sketch, self._selected_key_points)
remove_edges(self._sketch, self._selected_edges)
remove_texts(self._sketch, self._selected_texts)
self._selected_key_points.clear()
self._selected_edges.clear()
def on_escape(self):
self.setCursor(Qt.ArrowCursor)
self._doc.set_status("", 0, True)
for event_handler in self._escape_event_handlers:
event_handler()
self._main_window.update_ribbon_state()
def set_sketch(self, sketch):
self._sketch = sketch
self.on_escape()
self.update()
def mouseReleaseEvent(self, q_mouse_event):
if q_mouse_event.button() == 4:
self._states.middle_button_hold = False
return
if q_mouse_event.button() == 1:
self._states.left_button_hold = False
self._kp_move = None
return
def on_create_composite_area(self):
self.on_escape()
self._states.create_composite_area = True
self._doc.set_status("Select base area for new area", 0, True)
self._main_window.update_ribbon_state()
def mouseMoveEvent(self, q_mouse_event):
self.update_status()
position = q_mouse_event.pos()
update_view = False
if self._mouse_position is not None:
mouse_move_x = self._mouse_position.x() - position.x()
mouse_move_y = self._mouse_position.y() - position.y()
else:
mouse_move_x = 0
mouse_move_y = 0
self._mouse_position = position
width = self.width() / 2
height = self.height() / 2
scale = self._scale
x = (self._mouse_position.x() - width) / scale - self._offset.x
y = -((self._mouse_position.y() - height) / scale + self._offset.y)
sketch = self._sketch
if sketch is None:
return
if self._states.middle_button_hold:
self._offset.x -= mouse_move_x / scale
self._offset.y += mouse_move_y / scale
update_view = True
if self._states.left_button_hold:
if self._kp_move is not None:
if self._kp_move.get_x_parameter() is None:
self._kp_move.x = x
update_view = True
if self._kp_move.get_y_parameter() is None:
self._kp_move.y = y
update_view = True
for event_handler in self._mouse_move_event_handlers:
if event_handler(scale, x, y):
update_view = True
if update_view:
self.update()
def mousePressEvent(self, q_mouse_event):
self.setFocus()
position = q_mouse_event.pos()
if q_mouse_event.button() == 4:
self._states.middle_button_hold = True
self._pan_ref_pos = position
return
if q_mouse_event.button() == 1:
self._states.left_button_hold = True
self._move_ref_pos = position
half_width = self.width() / 2
half_height = self.height() / 2
scale = self._scale
x = (self._mouse_position.x() - half_width) / scale - self._offset.x
y = -((self._mouse_position.y() - half_height) / scale + self._offset.y)
# **** Keypoint move ****
if self._states.left_button_hold and self._kp_hover is not None and self._states.allow_move:
if self._kp_hover in self._selected_key_points and self._kp_hover.editable:
self._kp_move = self._kp_hover
for event_handler in self._mouse_press_event_handlers:
event_handler(scale, x, y)
def wheelEvent(self, event):
if self._mouse_position is not None:
delta = event.angleDelta().y() / 8
if self._scale + self._scale * (delta * 0.01) > 0:
self._scale += self._scale * (delta * 0.01)
width = self.width() / 2
height = self.height() / 2
scale = self._scale
x = self._mouse_position.x() - width
y = self._mouse_position.y() - height
distx = x / scale
disty = y / scale
self._offset.x -= distx * (delta * 0.01)
self._offset.y += disty * (delta * 0.01)
self.update()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
p1 = QPoint(0, 0)
p2 = QPoint(self.width(), self.height())
p3 = QPoint(0, self.height())
gradient = QLinearGradient(p1, p3)
gradient.setColorAt(0, self._gradient_color_top)
gradient.setColorAt(1, self._gradient_color_bottom)
qp.fillRect(event.rect(), gradient)
qp.setRenderHint(QPainter.HighQualityAntialiasing)
cx = self._offset.x * self._scale + self.width() / 2
cy = -self._offset.y * self._scale + self.height() / 2
qp.setPen(self._axis_pen)
if self.width() > cx > 0:
qp.drawLine(QPointF(cx, 0), QPointF(cx, self.height()))
if self.height() > cy > 0:
qp.drawLine(QPointF(0, cy), QPointF(self.width(), cy))
qp.save()
qp.translate(self.width()/2, self.height()/2)
qp.scale(self._scale, self._scale)
qp.translate(self._offset.x, -self._offset.y)
try:
self.draw_instances(event, qp)
self.draw_areas(event, qp)
self.draw_edges(event, qp)
self.draw_texts(event, qp)
except Exception as e:
print(str(e))
traceback.print_exc()
qp.restore()
qp.end()
def draw_texts(self, event, qp: QPainter):
if self._sketch is None:
return
normal_pen = QPen(QColor(0, 0, 0), 2)
kp_pen_hover = QPen(QColor(0, 120, 255), 3)
kp_pen_hl = QPen(QColor(180, 50, 0), 3)
qp.setPen(normal_pen)
for text in self._sketch.get_texts():
if type(text) is Text:
draw_text(text, qp, 1/self._scale)
elif type(text) is Attribute:
draw_attribute(text, qp, 1/self._scale)
if self._text_hover is not None:
qp.setPen(kp_pen_hover)
if type(self._text_hover) is Text:
draw_text(self._text_hover, qp, 1/self._scale)
elif type(self._text_hover) is Attribute:
draw_attribute(self._text_hover, qp, 1/self._scale)
qp.setPen(kp_pen_hl)
for text in self._selected_texts:
if type(text) is Text:
draw_text(text, qp, 1/self._scale)
elif type(text) is Attribute:
draw_attribute(text, qp, 1/self._scale)
def draw_edges(self, event, qp):
edge_thickness = 6000/self._scale
if not self._states.show_thickness:
edge_thickness = 0
pens = create_pens(self._doc, edge_thickness)
pens_hover = create_pens(self._doc, edge_thickness, QColor(100, 100, 200), 2)
pens_select_high = create_pens(self._doc, edge_thickness, QColor(255, 0, 0), 3)
pens_select = create_pens(self._doc, edge_thickness, QColor(255, 255, 255))
if self._is_dark_theme:
kp_pen = QPen(QColor(0, 200, 200), 1/self._scale)
kp_pen_hl = QPen(QColor(190, 0, 0), 3/self._scale)
kp_pen_hover = QPen(QColor(0, 60, 150), 3/self._scale)
else:
kp_pen = QPen(QColor(0, 100, 200), 1/self._scale)
kp_pen_hl = QPen(QColor(180, 50, 0), 3/self._scale)
kp_pen_hover = QPen(QColor(0, 120, 255), 3/self._scale)
if self._sketch is None:
return
edges = self._sketch.get_edges()
for edge in edges:
draw_edge(edge, qp, pens, None)
for edge in self._selected_edges:
draw_edge(edge, qp, pens_select_high, None)
for edge in self._selected_edges:
draw_edge(edge, qp, pens_select, None)
if self._edge_hover is not None:
draw_edge(self._edge_hover, qp, pens_hover, None)
qp.setPen(pens['default'])
key_points = self._sketch.get_keypoints()
for kp in key_points:
qp.setPen(kp_pen)
key_point = kp
if self._kp_hover is key_point and self._states.select_kp:
qp.setPen(kp_pen_hover)
if self._states.show_key_points or self._kp_hover is key_point or self._states.set_similar_x or self._states.set_similar_y:
draw_kp(qp, key_point, self._scale)
qp.setPen(kp_pen_hl)
for key_point in self._selected_key_points:
draw_kp(qp, key_point, self._scale)
def draw_areas(self, event, qp: QPainter):
if self._sketch is None:
return
area_brush = QBrush(QColor(150, 150, 150, 80))
area_hover_brush = QBrush(QColor(150, 150, 200, 80))
area_selected_brush = QBrush(QColor(150, 150, 200, 120))
areas = self._sketch.get_areas()
qp.setPen(Qt.NoPen)
for area in areas:
brush = area_brush
if area in self._selected_areas:
brush = area_selected_brush
if area == self._area_hover:
brush = area_hover_brush
draw_area(area, qp, self._states.show_area_names or area in self._selected_areas, brush, 1/self._scale, None)
if area.brush is not None:
if area.brush.type == BrushType.Solid:
brush = QBrush(QColor(0, 0, 0))
else:
brush = QBrush(QColor(0, 0, 0), Qt.HorPattern)
transform = QTransform().scale(1 / self._scale, 1 / self._scale).rotate(area.brush_rotation)
brush.setTransform(transform)
draw_area(area, qp, self._states.show_area_names or area in self._selected_areas, brush, 1/self._scale, None)
def draw_instances(self, event, qp):
if self._sketch is None:
return
for sketch_inst in self._sketch.sketch_instances:
si = sketch_inst.sketch
os = sketch_inst.offset/sketch_inst.scale
pens = create_pens(self._doc, 6000/(self._scale*sketch_inst.scale))
sketch_view = get_sketch_view(si)
sketch_view.draw_instance(qp, pens, sketch_inst.scale, 1 / self._scale, os, Vertex(), sketch_inst.rotation, sketch_inst.uid)
#draw_sketch(qp, si, sketch_inst.scale , 1/self._scale, os, Vertex(), sketch_inst.rotation, pens, {}, sketch_inst.uid)
if self._instance_hover is not None:
sketch_inst = self._instance_hover
si = sketch_inst.sketch
os = sketch_inst.offset / sketch_inst.scale
hover_pens = create_pens(self._doc, 6000 / (self._scale * sketch_inst.scale), QColor(100, 100, 200), 1)
sketch_view = get_sketch_view(si)
sketch_view.draw_instance(qp, hover_pens, sketch_inst.scale, 1 / self._scale, os, Vertex(), sketch_inst.rotation, sketch_inst.uid)
#draw_sketch(qp, si, sketch_inst.scale, 1 / self._scale, os, Vertex(), sketch_inst.rotation, hover_pens, {}, sketch_inst.uid)
for sketch_inst in self._selected_instances:
si = sketch_inst.sketch
os = sketch_inst.offset / sketch_inst.scale
sel_pens = create_pens(self._doc, 6000 / (self._scale * sketch_inst.scale), QColor(255, 0, 0), 2)
sketch_view = get_sketch_view(si)
sketch_view.draw_instance(qp, sel_pens, sketch_inst.scale, 1/self._scale, os, Vertex(), sketch_inst.rotation, sketch_inst.uid)
#draw_sketch(qp, si, sketch_inst.scale, 1 / self._scale, os, Vertex(), sketch_inst.rotation, sel_pens, {},sketch_inst.uid)
def update_status(self):
self._doc.set_status("")
|
{
"content_hash": "c0043df9b5027938d833bd4308d1d8d1",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 133,
"avg_line_length": 30.086693548387096,
"alnum_prop": 0.6705756215238223,
"repo_name": "pracedru/PracedruDesign",
"id": "c0ec2407b30168938bc820707515343971d96849",
"size": "14923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GUI/Widgets/SketchEditorView.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4428"
},
{
"name": "CSS",
"bytes": "3334"
},
{
"name": "Python",
"bytes": "637422"
}
],
"symlink_target": ""
}
|
import sys
IS_PYTHON3 = sys.version_info >= (3, 0)
if IS_PYTHON3:
BASESTRING_TYPES = str
else:
BASESTRING_TYPES = (str, unicode)
def _unicode(item):
if IS_PYTHON3:
return str(item)
else:
return unicode(item)
|
{
"content_hash": "cecc5e6c0158d32a3d832a0ad88255cc",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 39,
"avg_line_length": 16.142857142857142,
"alnum_prop": 0.668141592920354,
"repo_name": "GoUbiq/pyexchange",
"id": "f1ee45da131f45ac58a6c2637c716173d254f47f",
"size": "226",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pyexchange/compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "264201"
}
],
"symlink_target": ""
}
|
import json
import select
import socket
import settings
from pymongo import MongoClient
from bson import json_util
bufferSize = 1024 # whatever you need
print("Starting ndlogger daemon")
print("UDP target IP:", settings.LOG_SERVER_IP)
print("UDP target port:", settings.LOG_SERVER_PORT)
#start
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((settings.LOG_SERVER_IP, settings.LOG_SERVER_PORT))
s.setblocking(0)
client = MongoClient(settings.LOG_SERVER_DB_HOST, settings.LOG_SERVER_DB_PORT)
db = client[settings.LOG_SERVER_DB_NAME]
def addToDB(data):
messages = db.messages
post_id = messages.insert(data)
return post_id
while True:
result = select.select([s], [], [])
msg = result[0][0].recv(bufferSize)
#print '%s ----> %s' % (len(msg), msg)
print '[>]%s bytes' % len(msg)
data = json.loads(msg, object_hook=json_util.object_hook) # here we can loose time
data['hour'] = data['timestamp'].hour
print '[%s]%s\t%s' % (data['level'][0:1], data['timestamp'], data['message'])
post_id = addToDB(data)
print '[<]added to db:%s' % post_id
|
{
"content_hash": "5486e8e40667e9dee97cbe6423adba78",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 87,
"avg_line_length": 23.1875,
"alnum_prop": 0.6720575022461814,
"repo_name": "russenreaktor/tdlogger",
"id": "b55f0a2a4b325065e0bceabe2fccf8c1afd08310",
"size": "1136",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ndlogger-daemon.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "526"
},
{
"name": "JavaScript",
"bytes": "4786"
},
{
"name": "Python",
"bytes": "19930"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tsukkomi'
copyright = '2016, Spoqa Inc'
author = 'Spoqa Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tsukkomidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tsukkomi.tex', 'tsukkomi Documentation',
'Spoqa Inc', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tsukkomi', 'tsukkomi Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tsukkomi', 'tsukkomi Documentation',
author, 'tsukkomi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
{
"content_hash": "99d6a4e16dcd2090abf84179a961eb9b",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 80,
"avg_line_length": 32.55595667870036,
"alnum_prop": 0.7039254823685961,
"repo_name": "spoqa/tsukkomi",
"id": "c34c0deff24d9f375254f560c6a56c8f89bfd02b",
"size": "9462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17600"
},
{
"name": "Shell",
"bytes": "99"
}
],
"symlink_target": ""
}
|
class FacebookCanvasMiddleware(object):
def process_response(self, request, response):
if 'signed_request' in request.REQUEST:
response.set_cookie('signed_request', request.REQUEST['signed_request'])
response['P3P'] = 'CP="IDC DSP COR ADM DEVi TAIi PSA PSD IVAi IVDi CONi HIS OUR IND CNT"'
return response
|
{
"content_hash": "640587c96436de445afcde9130558767",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 97,
"avg_line_length": 44.875,
"alnum_prop": 0.6601671309192201,
"repo_name": "gsiegman/python-facebook",
"id": "73c13ddaa1abf40735bc38ca513a6c780aaa6ffc",
"size": "359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "facebook/extras/django/facebook/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8989"
}
],
"symlink_target": ""
}
|
import discord
from discord.voice_client import StreamPlayer
from roboto.commands import dispatcher, TaskState, Commands
class ServerState(object):
def __init__(self, server_id):
from roboto import text
self.server_id = server_id
self._voice_channel_id = None
self._media_player = None
self.markov_model = text.MarkovModel(server_id)
self.ready_state = False
self.voice_client = None
self.media_continuous = True
self.song_id = None
async def on_connect(self):
task = TaskState(Commands.server_connect, [], server_id=self.server_id)
await dispatcher.add_task(task)
self.ready_state = True
def has_voice(self) -> bool:
return self._voice_channel_id
def set_voice_channel(self, channel_id: str):
self._voice_channel_id = channel_id
def set_active_media_player(self, media_player):
self._media_player = media_player
if media_player:
media_player.server_state = self if media_player else None
def get_media_player(self) -> StreamPlayer:
return self._media_player
def get_voice_channel(self, client: discord.Client) -> discord.Channel:
channel = client.get_channel(self._voice_channel_id)
return channel
class ServerManager(object):
def __init__(self):
self._servers = dict()
def __len__(self):
return len([self._servers.keys()])
async def get_server(self, server_id: str) -> ServerState:
"""
:param server_id:
:return:
:rtype: roboto.state.ServerState
"""
try:
return self._servers[server_id]
except KeyError:
server = ServerState(server_id)
if not server.ready_state:
await server.on_connect()
self._servers[server_id] = server
return server
servers = ServerManager()
|
{
"content_hash": "bcfb63c715d11536f0c3903d2a70134f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 28.043478260869566,
"alnum_prop": 0.6134366925064599,
"repo_name": "leighmacdonald/roboto",
"id": "ca1324244e6e4bc1b68df3c18c8afa8ebf0ba863",
"size": "1935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roboto/state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1578"
},
{
"name": "Python",
"bytes": "36911"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.