commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
82bc502cf7bb64236feba6e140d98bb9e555f4ca | Fix assert_raises for catching parents of exceptions. | tests/backport_assert_raises.py | tests/backport_assert_raises.py | from __future__ import unicode_literals
"""
Patch courtesy of:
https://marmida.com/blog/index.php/2012/08/08/monkey-patching-assert_raises/
"""
# code for monkey-patching
import nose.tools
# let's fix nose.tools.assert_raises (which is really unittest.assertRaises)
# so that it always supports context management
# in order for these changes to be available to other modules, you'll need
# to guarantee this module is imported by your fixture before either nose or
# unittest are imported
try:
nose.tools.assert_raises(Exception)
except TypeError:
# this version of assert_raises doesn't support the 1-arg version
class AssertRaisesContext(object):
def __init__(self, expected):
self.expected = expected
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
self.exception = exc_val
if issubclass(exc_type, self.expected):
return True
nose.tools.assert_equal(exc_type, self.expected)
# if you get to this line, the last assertion must have passed
# suppress the propagation of this exception
return True
def assert_raises_context(exc_type):
return AssertRaisesContext(exc_type)
nose.tools.assert_raises = assert_raises_context
| from __future__ import unicode_literals
"""
Patch courtesy of:
https://marmida.com/blog/index.php/2012/08/08/monkey-patching-assert_raises/
"""
# code for monkey-patching
import nose.tools
# let's fix nose.tools.assert_raises (which is really unittest.assertRaises)
# so that it always supports context management
# in order for these changes to be available to other modules, you'll need
# to guarantee this module is imported by your fixture before either nose or
# unittest are imported
try:
nose.tools.assert_raises(Exception)
except TypeError:
# this version of assert_raises doesn't support the 1-arg version
class AssertRaisesContext(object):
def __init__(self, expected):
self.expected = expected
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
self.exception = exc_val
nose.tools.assert_equal(exc_type, self.expected)
# if you get to this line, the last assertion must have passed
# suppress the propagation of this exception
return True
def assert_raises_context(exc_type):
return AssertRaisesContext(exc_type)
nose.tools.assert_raises = assert_raises_context
| Python | 0 |
bea4752dea1e7f01257b38faef9e21ba0e946983 | Implement psutil within blackbox tests | tests/blackbox/testlib/utils.py | tests/blackbox/testlib/utils.py | # Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for blackbox testing.
"""
# isort: STDLIB
import os
import random
import string
from subprocess import PIPE, Popen
# isort: THIRDPARTY
import psutil
# Name prefix, so that we hopefully don't destroy any end user data by mistake!
TEST_PREF = os.getenv("STRATIS_UT_PREFIX", "STRATI$_DE$TROY_ME!_")
def p_n():
"""
Return a random pool name
:return: Random String
"""
return TEST_PREF + "pool" + random_string()
def fs_n():
"""
Return a random FS name
:return: Random String
"""
return TEST_PREF + "fs" + random_string()
def random_string(length=4):
"""
Generates a random string
:param length: Length of random string
:return: String
"""
return "{0}".format(
"".join(random.choice(string.ascii_uppercase) for _ in range(length))
)
def process_exists(name):
"""
Look through processes, using their pids, to find one matching 'name'.
Return None if no such process found, else return the pid.
:param name: name of process to check
:type name: str
:return: pid or None
:rtype: int or NoneType
"""
for proc in psutil.process_iter(["name"]):
try:
if proc.name() == name:
return proc.pid
except psutil.NoSuchProcess:
pass
return None
def umount_mdv():
"""
Locate and umount any stratis mdv mounts
:return: None
"""
with open("/proc/self/mounts", "r") as mounts:
for line in mounts.readlines():
if "/stratis/.mdv-" in line:
mountpoint = line.split()[1]
exec_command(["umount", mountpoint])
def exec_command(cmd):
"""
Executes the specified infrastructure command.
:param cmd: command to execute
:type cmd: list of str
:returns: standard output
:rtype: str
:raises AssertionError: if exit code is non-zero
"""
exit_code, stdout_text, stderr_text = exec_test_command(cmd)
expected_exit_code = 0
if expected_exit_code != exit_code:
print("cmd = %s [%d != %d]" % (str(cmd), expected_exit_code, exit_code))
print("STDOUT= %s" % stdout_text)
print("STDERR= %s" % stderr_text)
assert expected_exit_code == exit_code
return stdout_text
def exec_test_command(cmd):
"""
Executes the specified test command
:param cmd: Command and arguments as list
:type cmd: list of str
:returns: (exit code, std out text, std err text)
:rtype: triple of int * str * str
"""
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ)
result = process.communicate()
return (
process.returncode,
bytes(result[0]).decode("utf-8"),
bytes(result[1]).decode("utf-8"),
)
| # Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for blackbox testing.
"""
# isort: STDLIB
import os
import random
import string
from subprocess import PIPE, Popen
# Name prefix, so that we hopefully don't destroy any end user data by mistake!
TEST_PREF = os.getenv("STRATIS_UT_PREFIX", "STRATI$_DE$TROY_ME!_")
def p_n():
"""
Return a random pool name
:return: Random String
"""
return TEST_PREF + "pool" + random_string()
def fs_n():
"""
Return a random FS name
:return: Random String
"""
return TEST_PREF + "fs" + random_string()
def random_string(length=4):
"""
Generates a random string
:param length: Length of random string
:return: String
"""
return "{0}".format(
"".join(random.choice(string.ascii_uppercase) for _ in range(length))
)
def process_exists(name):
"""
Walk the process table looking for executable 'name', returns pid if one
found, else return None
"""
for pid in [pid for pid in os.listdir("/proc") if pid.isdigit()]:
try:
exe_name = os.readlink(os.path.join("/proc/", pid, "exe"))
except OSError:
continue
if exe_name and exe_name.endswith(os.path.join("/", name)):
return pid
return None
def umount_mdv():
"""
Locate and umount any stratis mdv mounts
:return: None
"""
with open("/proc/self/mounts", "r") as mounts:
for line in mounts.readlines():
if "/stratis/.mdv-" in line:
mountpoint = line.split()[1]
exec_command(["umount", mountpoint])
def exec_command(cmd):
"""
Executes the specified infrastructure command.
:param cmd: command to execute
:type cmd: list of str
:returns: standard output
:rtype: str
:raises AssertionError: if exit code is non-zero
"""
exit_code, stdout_text, stderr_text = exec_test_command(cmd)
expected_exit_code = 0
if expected_exit_code != exit_code:
print("cmd = %s [%d != %d]" % (str(cmd), expected_exit_code, exit_code))
print("STDOUT= %s" % stdout_text)
print("STDERR= %s" % stderr_text)
assert expected_exit_code == exit_code
return stdout_text
def exec_test_command(cmd):
"""
Executes the specified test command
:param cmd: Command and arguments as list
:type cmd: list of str
:returns: (exit code, std out text, std err text)
:rtype: triple of int * str * str
"""
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ)
result = process.communicate()
return (
process.returncode,
bytes(result[0]).decode("utf-8"),
bytes(result[1]).decode("utf-8"),
)
| Python | 0.000002 |
fc94bda4cb840b74fbd1226d69bf0aafc5e16e61 | return when not installed (#283) | pwndbg/commands/rop.py | pwndbg/commands/rop.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import subprocess
import tempfile
import gdb
import pwndbg.commands
import pwndbg.vmmap
parser = argparse.ArgumentParser(description="Dump ROP gadgets with Jon Salwan's ROPgadget tool.",
epilog="Example: rop --grep 'pop rdi' -- --nojop")
parser.add_argument('--grep', type=str,
help='String to grep the output for')
parser.add_argument('argument', nargs='*', type=str,
help='Arguments to pass to ROPgadget')
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWithFile
def rop(grep, argument):
with tempfile.NamedTemporaryFile() as corefile:
# If the process is running, dump a corefile so we get actual addresses.
if pwndbg.proc.alive:
filename = corefile.name
gdb.execute('gcore %s' % filename)
else:
filename = pwndbg.proc.exe
# Build up the command line to run
cmd = ['ROPgadget',
'--binary',
filename]
cmd += argument
try:
io = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except Exception:
print("Could not run ROPgadget. Please ensure it's installed and in $PATH.")
return
(stdout, stderr) = io.communicate()
stdout = stdout.decode('latin-1')
if not grep:
print(stdout)
return
for line in stdout.splitlines():
if re.search(grep, line):
print(line)
@pwndbg.commands.Command
def ropgadget(*a):
return rop(*a)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import subprocess
import tempfile
import gdb
import pwndbg.commands
import pwndbg.vmmap
parser = argparse.ArgumentParser(description="Dump ROP gadgets with Jon Salwan's ROPgadget tool.",
epilog="Example: rop --grep 'pop rdi' -- --nojop")
parser.add_argument('--grep', type=str,
help='String to grep the output for')
parser.add_argument('argument', nargs='*', type=str,
help='Arguments to pass to ROPgadget')
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWithFile
def rop(grep, argument):
with tempfile.NamedTemporaryFile() as corefile:
# If the process is running, dump a corefile so we get actual addresses.
if pwndbg.proc.alive:
filename = corefile.name
gdb.execute('gcore %s' % filename)
else:
filename = pwndbg.proc.exe
# Build up the command line to run
cmd = ['ROPgadget',
'--binary',
filename]
cmd += argument
try:
io = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except Exception:
print("Could not run ROPgadget. Please ensure it's installed and in $PATH.")
(stdout, stderr) = io.communicate()
stdout = stdout.decode('latin-1')
if not grep:
print(stdout)
return
for line in stdout.splitlines():
if re.search(grep, line):
print(line)
@pwndbg.commands.Command
def ropgadget(*a):
return rop(*a)
| Python | 0 |
0a5b7c606a711307bdc41179cf94c0a72c15ee92 | Make BaseCommandTest automatically instantiate commands using decoration magic. | hypebot/commands/hypetest.py | hypebot/commands/hypetest.py | # Copyright 2019 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing commands.
This file will be a dependency of all tests within hypebot, but will not be
included in the main binary.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypebot import basebot
from hypebot import hypecore
from hypebot.core import params_lib
from hypebot.interfaces import interface_factory
from hypebot.protos import channel_pb2
TEST_CHANNEL = channel_pb2.Channel(
id='#test', name='Test', visibility=channel_pb2.Channel.PUBLIC)
def ForCommand(command_cls):
"""Decorator to enable setting the command for each test class."""
def _Internal(test_cls):
test_cls._command_cls = command_cls
return test_cls
return _Internal
class BaseCommandTestCase(unittest.TestCase):
# Set the default bot params (used by core) to something sane for testing.
BOT_PARAMS = params_lib.MergeParams(basebot.BaseBot.DEFAULT_PARAMS, {
'interface': {
'type': 'CaptureInterface',
},
'storage': {
'type': 'MemStore',
'cached_type': 'MemStore',
},
'execution_mode': {
# This currently sets the command prefix to `!`. We should figure out
# a better long-term solution for the command prefix though since this
# can in theory change other behavior within core, but currently
# should have no other impacts.
'dev': False,
},
'commands': {},
'subscriptions': {},
})
@classmethod
def setUpClass(cls):
super(BaseCommandTestCase, cls).setUpClass()
if not hasattr(cls, '_command_cls'):
raise AttributeError(
('%s is missing command initializer. All BaseCommandTestCases must'
' be decorated with @ForCommand and given the command they are'
' testing. For example:\n\n@ForCommand(simple_commands.HelpCommand'
')\nclass HelpCommandTest(BaseCommandTestCase):\n ...') %
cls.__name__)
def setUp(self):
super(BaseCommandTestCase, self).setUp()
self.interface = interface_factory.CreateFromParams(
self.BOT_PARAMS.interface)
self.core = hypecore.Core(self.BOT_PARAMS, self.interface)
# We disable ratelimiting for tests.
self.command = self._command_cls({'ratelimit': {
'enabled': False
}}, self.core)
| # Copyright 2019 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing commands.
This file will be a dependency of all tests within hypebot, but will not be
included in the main binary.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypebot import basebot
from hypebot import hypecore
from hypebot.core import params_lib
from hypebot.interfaces import interface_factory
from hypebot.protos import channel_pb2
TEST_CHANNEL = channel_pb2.Channel(
id='#test', name='Test', visibility=channel_pb2.Channel.PUBLIC)
class BaseCommandTestCase(unittest.TestCase):
# Set the default bot params (used by core) to something sane for testing.
BOT_PARAMS = params_lib.MergeParams(basebot.BaseBot.DEFAULT_PARAMS, {
'interface': {
'type': 'CaptureInterface',
},
'storage': {
'type': 'MemStore',
'cached_type': 'MemStore',
},
'execution_mode': {
# This currently sets the command prefix to `!`. We should figure out
# a better long-term solution for the command prefix though since this
# can in theory change other behavior within core, but currently
# should have no other impacts.
'dev': False,
},
'commands': {},
'subscriptions': {},
})
def setUp(self):
super(BaseCommandTestCase, self).setUp()
self.interface = interface_factory.CreateFromParams(
self.BOT_PARAMS.interface)
self.core = hypecore.Core(self.BOT_PARAMS, self.interface)
| Python | 0 |
6cef7f841fc34321d68e8c85ff7f78682c59eae2 | Add help and version text; check for IO errors | py-chrome-bookmarks.py | py-chrome-bookmarks.py | #!/usr/bin/python
# py-chrome-bookmarks
#
# A script to convert Google Chrome's bookmarks file to the standard HTML-ish
# format.
#
# (c) Benjamin Esham, 2011. See the accompanying README for this file's
# license and other information.
import json, sys, os, re
# html escaping code from http://wiki.python.org/moin/EscapingHtml
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
return ''.join(html_escape_table.get(c,c) for c in text)
def sanitize(string):
res = ''
string = html_escape(string)
for i in range(len(string)):
if ord(string[i]) > 127:
res += '&#x%x;' % ord(string[i])
else:
res += string[i]
return res
def html_for_node(node):
if 'url' in node:
return html_for_url_node(node)
elif 'children' in node:
return html_for_parent_node(node)
else:
return ''
def html_for_url_node(node):
if not re.match("javascript:", node['url']):
return '<dt><a href="%s">%s</a>\n' % (sanitize(node['url']), sanitize(node['name']))
else:
return ''
def html_for_parent_node(node):
return '<dt><h3>%s</h3>\n<dl><p>%s</dl><p>\n' % (sanitize(node['name']),
''.join([html_for_node(n) for n in node['children']]))
def version_text():
old_out = sys.stdout
sys.stdout = sys.stderr
print "py-chrome-bookmarks"
print "(c) 2011, Benjamin Esham"
print "https://github.com/bdesham/py-chrome-bookmarks"
sys.stdout = old_out
def help_text():
version_text()
old_out = sys.stdout
sys.stdout = sys.stderr
print
print "usage: python py-chrome-bookmarks input-file output-file"
print " input-file is the Chrome bookmarks file"
print " output-file is the destination for the generated HTML bookmarks file"
sys.stdout = old_out
# check for help or version requests
if len(sys.argv) != 3 or "-h" in sys.argv or "--help" in sys.argv:
help_text()
exit()
if "-v" in sys.argv or "--version" in sys.argv:
version_text()
exit()
# the actual code here...
in_file = os.path.expanduser(sys.argv[1])
out_file = os.path.expanduser(sys.argv[2])
try:
f = open(in_file, 'r')
except IOError, e:
print >> sys.stderr, "py-chrome-bookmarks: error opening the input file."
print >> sys.stderr, e
exit()
j = json.loads(f.read())
f.close()
try:
out = open(out_file, 'w')
except IOError, e:
print >> sys.stderr, "py-chrome-bookmarks: error opening the output file."
print >> sys.stderr, e
exit()
out.write("""<!DOCTYPE NETSCAPE-Bookmark-file-1>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8' />
<title>Bookmarks</title>
<h1>Bookmarks</h1>
<dl><p>
<dl>%(bookmark_bar)s</dl>
<dl>%(other)s</dl>
"""
% {'bookmark_bar': html_for_node(j['roots']['bookmark_bar']),
'other': html_for_node(j['roots']['other'])})
out.close()
| #!/usr/bin/python
# py-chrome-bookmarks
#
# A script to convert Google Chrome's bookmarks file to the standard HTML-ish
# format.
#
# (c) Benjamin Esham, 2011. See the accompanying README for this file's
# license and other information.
import json, sys, os, re
# html escaping code from http://wiki.python.org/moin/EscapingHtml
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
return ''.join(html_escape_table.get(c,c) for c in text)
def sanitize(string):
res = ''
string = html_escape(string)
for i in range(len(string)):
if ord(string[i]) > 127:
res += '&#x%x;' % ord(string[i])
else:
res += string[i]
return res
def html_for_node(node):
if 'url' in node:
return html_for_url_node(node)
elif 'children' in node:
return html_for_parent_node(node)
else:
return ''
def html_for_url_node(node):
if not re.match("javascript:", node['url']):
return '<dt><a href="%s">%s</a>\n' % (sanitize(node['url']), sanitize(node['name']))
else:
return ''
def html_for_parent_node(node):
return '<dt><h3>%s</h3>\n<dl><p>%s</dl><p>\n' % (sanitize(node['name']),
''.join([html_for_node(n) for n in node['children']]))
in_file = os.path.expanduser(sys.argv[1])
out_file = os.path.expanduser(sys.argv[2])
f = open(in_file, 'r')
j = json.loads(f.read())
f.close()
out = open(out_file, 'w')
out.write("""<!DOCTYPE NETSCAPE-Bookmark-file-1>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8' />
<title>Bookmarks</title>
<h1>Bookmarks</h1>
<dl><p>
<dl>%(bookmark_bar)s</dl>
<dl>%(other)s</dl>
"""
% {'bookmark_bar': html_for_node(j['roots']['bookmark_bar']),
'other': html_for_node(j['roots']['other'])})
out.close()
| Python | 0 |
98467f55ef8526d343065da7d6a896b16539fa53 | use consistent hash for etag | http_agent/utils/etag.py | http_agent/utils/etag.py | from zlib import adler32
def make_entity_tag(body):
checksum = adler32(body.encode())
return '"{checksum}"'.format(checksum=checksum)
| def make_entity_tag(body):
checksum = hash(body) + (1 << 64)
return '"{checksum}"'.format(checksum=checksum)
| Python | 0.000001 |
82f6a4cf6e1e5ceef2c48811eceb93e8a7ce13e3 | Add handler for demo. | filestore/file_readers.py | filestore/file_readers.py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .retrieve import HandlerBase
import six
import logging
import h5py
import numpy as np
import os.path
logger = logging.getLogger(__name__)
class _HDF5HandlerBase(HandlerBase):
def open(self):
if self._file:
return
self._file = h5py.File(self._filename)
def close(self):
super(HDF5HandlerBase, self).close()
self._file.close()
class HDF5DatasetSliceHandler(_HDF5HandlerBase):
"Handler for Stuart's first detector demo"
def __init__(self, filename, frame_per_point):
self._filename = filename
self.open()
def __call__(self, point_number):
dataset_name = '/entry/data/data'
# Don't read out the dataset until it is requested for the first time.
if not hasattr(self, '_dataset'):
self._dataset = self._file[dataset_name]
return self._dataset[point_number, :, :]
class _HdfMapsHandlerBase(_HDF5HandlerBase):
"""
Reader for XRF data stored in hdf5 files.
The data set is assumed to be in a group called MAPS and stored
as a 3D array ordered [energy, x, y].
Parameters
----------
filename : str
Path to physical location of file
dset_path : str
The path to the dataset inside of 'MAPS'
"""
def __init__(self, filename, dset_path):
self._filename = filename
self._dset_path = dset_path
self._file = None
self._dset = None
self.open()
def open(self):
"""
Open the file for reading.
Provided as a stand alone function to allow re-opening of the handler
"""
if self._file:
return
self._file = h5py.File(self._filename, mode='r')
self._dset = self._file['/'.join(['MAPS', self._dset_path])]
def close(self):
"""
Close the underlying file
"""
super(_HdfMapsHandlerBase, self).close()
self._file.close()
def __call__(self):
if not self._file:
raise RuntimeError("File is not open")
class HDFMapsSpectrumHandler(_HdfMapsHandlerBase):
def __call__(self, x, y):
"""
Return the spectrum at the x, y position
Parameters
----------
x : int
raster index in the x direction
y : int
raster index in the y direction
Returns
-------
spectrum : ndarray
The MCA channels
"""
super(HDFMapsSpectrumHandler, self).__call__()
return self._dset[:, x, y]
class HDFMapsEnergyHandler(_HdfMapsHandlerBase):
def __call__(self, e_index):
"""
Return the raster plane at a fixed energy
Parameters
----------
e_index : int
The index of the engery
Returns
-------
plane : ndarray
The raster image at a fixed energy.
"""
super(HDFMapsEnergyHandler, self).__call__()
return self._dset[e_index, :, :]
class NpyHandler(HandlerBase):
"""
Class to deal with reading npy files
Parameters
----------
fpath : str
Path to file
mmap_mode : {'r', 'r+', c}, optional
memmap mode to use to open file
"""
def __init__(self, filename, mmap_mode=None):
self._mmap_mode = mmap_mode
if not os.path.exists(filename):
raise IOError("the requested file {fpath} does not exst")
self._fpath = filename
def __call__(self):
return np.load(self._fpath, self._mmap_mode)
| from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .retrieve import HandlerBase
import six
import logging
import h5py
import numpy as np
import os.path
logger = logging.getLogger(__name__)
class _HdfMapsHandlerBase(HandlerBase):
"""
Reader for XRF data stored in hdf5 files.
The data set is assumed to be in a group called MAPS and stored
as a 3D array ordered [energy, x, y].
Parameters
----------
filename : str
Path to physical location of file
dset_path : str
The path to the dataset inside of 'MAPS'
"""
def __init__(self, filename, dset_path):
self._filename = filename
self._dset_path = dset_path
self._file = None
self._dset = None
self.open()
def open(self):
"""
Open the file for reading.
Provided as a stand alone function to allow re-opening of the handler
"""
if self._file:
return
self._file = h5py.File(self._filename, mode='r')
self._dset = self._file['/'.join(['MAPS', self._dset_path])]
def close(self):
"""
Close the underlying file
"""
super(_HdfMapsHandlerBase, self).close()
self._file.close()
def __call__(self):
if not self._file:
raise RuntimeError("File is not open")
class HDFMapsSpectrumHandler(_HdfMapsHandlerBase):
def __call__(self, x, y):
"""
Return the spectrum at the x, y position
Parameters
----------
x : int
raster index in the x direction
y : int
raster index in the y direction
Returns
-------
spectrum : ndarray
The MCA channels
"""
super(HDFMapsSpectrumHandler, self).__call__()
return self._dset[:, x, y]
class HDFMapsEnergyHandler(_HdfMapsHandlerBase):
def __call__(self, e_index):
"""
Return the raster plane at a fixed energy
Parameters
----------
e_index : int
The index of the engery
Returns
-------
plane : ndarray
The raster image at a fixed energy.
"""
super(HDFMapsEnergyHandler, self).__call__()
return self._dset[e_index, :, :]
class NpyHandler(HandlerBase):
"""
Class to deal with reading npy files
Parameters
----------
fpath : str
Path to file
mmap_mode : {'r', 'r+', c}, optional
memmap mode to use to open file
"""
def __init__(self, filename, mmap_mode=None):
self._mmap_mode = mmap_mode
if not os.path.exists(filename):
raise IOError("the requested file {fpath} does not exst")
self._fpath = filename
def __call__(self):
return np.load(self._fpath, self._mmap_mode)
| Python | 0 |
996e33d1662517ba8a1671d8549fa3d189f3b1d0 | reorganize script and allow user provided date ranges | get-challenges.py | get-challenges.py | #!/usr/bin/env python3
from datetime import datetime, timezone
from dateutil import parser
import click
import os.path
import os
import shutil
import praw
import sys
import re
_SITE_NAME = 'dailyprogrammer-bot'
_SUBREDDIT = 'dailyprogrammer'
_DATE_FORMAT = '%Y-%m-%d'
_RATING_PATTERN = r'(?<=\[)(?!psa)[a-z]*(?=\])'
_SANITIZE_PATTERN = r'(\[[0-9-/]+\]|\[[a-z/]+\]|[^0-9a-z\s])'
_SYMBOL_PATTERN = r'[^0-9a-z\s]'
_WEEKLY_MONTHLY_PATTERN = r'(monthly|weekly|week-long)'
_KNOWN_DIRECTORIES = [
'all',
'bonus',
'difficult',
'easy',
'extra',
'hard',
'intermediate',
'medium',
'monthly',
'special',
'unknown',
'weekly'
]
@click.group()
def cli():
"""A cli for pulling and managing challenges from r/dailyprogrammer."""
pass
@cli.command()
@click.argument('start')
@click.argument('end', required=False, default='')
def pull(start, end):
"""
Pull challenges for the specified START and END times.
START and END must be parsable by dateutil.parser.parse with the following exception.
START can be either 'all' or 'today', which will automatically pick your START and
END times. In this case END will be ignored if provided.
"""
if start == 'all':
kwargs = {}
elif start == 'today':
now = datetime.utcnow()
start = datetime(now.year, now.month, now.day, 0, 0, 0, tzinfo=timezone.utc).timestamp()
end = datetime(now.year, now.month, now.day, 23, 59, 59, tzinfo=timezone.utc).timestamp()
kwargs = {'start': start, 'end': end}
else:
try:
start = parser.parse(start).timestamp()
except ValueError:
click.echo('"{}" is unparsable as a datetime'.format(start))
sys.exit(1)
try:
end = parser.parse(end).timestamp()
except ValueError:
click.echo('"{}" is unparsable as a datetime'.format(end))
sys.exit(1)
if start > end:
click.echo('start must be before end')
sys.exit(1)
kwargs = {'start': start, 'end': end}
_get_challenges(**kwargs)
@cli.command()
def clean():
"""
Destroy the directories that challenges are located in to prepare for a clean pull.
\b
Known directories:
./all
./bonus
./difficult
./easy
./extra
./hard
./intermediate
./medium
./monthly
./special
./unknown
./weekly
"""
for d in _KNOWN_DIRECTORIES:
try:
shutil.rmtree(d)
except FileNotFoundError:
continue
def _get_challenges(start=None, end=None):
reddit = praw.Reddit(_SITE_NAME)
subreddit = reddit.subreddit(_SUBREDDIT)
for submission in subreddit.submissions(start=start, end=end):
_parse(submission)
def _parse(submission):
title = submission.title.lower()
if 'challenge #' not in title and 'weekly #' not in title:
return
sub_date = datetime.fromtimestamp(submission.created_utc).strftime(_DATE_FORMAT)
rating = _get_challenge_rating(title)
sanitized_title = _sanitize_title(title)
challenge_dir = os.path.join(os.getcwd(), '{}/{}_{}/'.format(rating, sub_date, sanitized_title))
click.echo(challenge_dir)
os.makedirs(challenge_dir, exist_ok=True)
readme = '{}\n\n{}\n\n{}\n'.format(submission.title, submission.selftext, submission.url)
with open(os.path.join(challenge_dir, 'README.md'), 'w') as f_out:
f_out.write(readme)
def _get_challenge_rating(title):
match = re.search(_RATING_PATTERN, title) or re.search(_WEEKLY_MONTHLY_PATTERN, title)
return re.sub(_SYMBOL_PATTERN, '-', match.group() if match else 'unknown')
def _sanitize_title(title):
return '-'.join(re.sub(_SANITIZE_PATTERN, ' ', title).split())
if __name__ == '__main__':
cli()
| #!/usr/bin/env python3
from datetime import datetime, timezone
import click
import os.path
import os
import shutil
import praw
import pytz
import re
_SITE_NAME = 'dailyprogrammer-bot'
_SUBREDDIT = 'dailyprogrammer'
_FIRST_SUBMISSION_DATE = datetime(2012, 2, 9, tzinfo=pytz.timezone('America/Los_Angeles'))
_DATE_FORMAT = '%Y-%m-%d'
_RATING_PATTERN = r'(?<=\[)(?!psa)[a-z]*(?=\])'
_SANITIZE_PATTERN = r'(\[[0-9-/]+\]|\[[a-z/]+\]|[^0-9a-z\s])'
_SYMBOL_PATTERN = r'[^0-9a-z\s]'
_WEEKLY_MONTHLY_PATTERN = r'(monthly|weekly|week-long)'
@click.group()
def cli():
pass
@cli.command('all')
def get_all():
reddit = praw.Reddit(_SITE_NAME)
subreddit = reddit.subreddit(_SUBREDDIT)
for submission in subreddit.submissions():
_parse(submission)
@cli.command('today')
def get_today():
reddit = praw.Reddit(_SITE_NAME)
subreddit = reddit.subreddit(_SUBREDDIT)
today = datetime.utcnow()
start, end = _get_day_boundaries(today.year, today.month, today.day)
for submission in subreddit.submissions(start=start, end=end):
_parse(submission)
@cli.command()
def clean():
known_directories = [
'all',
'bonus',
'difficult',
'easy',
'extra',
'hard',
'intermediate',
'medium',
'monthly',
'special',
'unknown',
'weekly'
]
for d in known_directories:
try:
shutil.rmtree(d)
except FileNotFoundError:
continue
def _parse(submission):
os.getcwd()
title = submission.title.lower()
if 'challenge #' not in title and 'weekly #' not in title:
return
sub_date = datetime.fromtimestamp(submission.created_utc).strftime(_DATE_FORMAT)
rating = _get_challenge_rating(title)
sanitized_title = _sanitize_title(title)
challenge_dir = os.path.join(os.getcwd(), '{}/{}_{}/'.format(rating, sub_date, sanitized_title))
os.makedirs(challenge_dir, exist_ok=True)
click.echo(challenge_dir)
readme = '{}\n\n{}\n\n{}\n'.format(submission.title, submission.selftext, submission.url)
with open(os.path.join(challenge_dir, 'README.md'), 'w') as f_out:
f_out.write(readme)
def _get_challenge_rating(title):
match = re.search(_RATING_PATTERN, title) or re.search(_WEEKLY_MONTHLY_PATTERN, title)
return re.sub(_SYMBOL_PATTERN, '-', match.group() if match else 'unknown')
def _sanitize_title(title):
return '-'.join(re.sub(_SANITIZE_PATTERN, ' ', title).split())
def _get_day_boundaries(year, month, day):
start = datetime(year, month, day, 0, 0, 0, tzinfo=timezone.utc)
end = datetime(year, month, day, 23, 59, 59, tzinfo=timezone.utc)
return start.timestamp(), end.timestamp()
if __name__ == '__main__':
cli()
| Python | 0 |
ec668c693051f70026360ac2f3bc67ced6c01a21 | fix little bug | src/fb_messenger/test/test_attachements.py | src/fb_messenger/test/test_attachements.py | import unittest
class FirstTest(unittest.TestCase):
def test_first(self):
self.assertEqual(True, True, 'incorrect types')
| import unittest
class FirstTest(unittest.TestCase):
def test_first(self):
self.assertEqual(True, False, 'incorrect types')
| Python | 0.000001 |
4979e8e5ee8ac6cb86ab260f44f052b27381eeb6 | bump version | giddy/__init__.py | giddy/__init__.py | __version__ = "2.0.0"
# __version__ has to be defined in the first line
"""
:mod:`giddy` --- Spatial Dynamics and Mobility
==============================================
"""
from . import directional
from . import ergodic
from . import markov
from . import mobility
from . import rank
from . import util | __version__ = "1.2.0"
# __version__ has to be defined in the first line
"""
:mod:`giddy` --- Spatial Dynamics and Mobility
==============================================
"""
from . import directional
from . import ergodic
from . import markov
from . import mobility
from . import rank
from . import util | Python | 0 |
522906d2842d90722776f898015fde060c967401 | Update cam.py | pyCam/build_0.3/cam.py | pyCam/build_0.3/cam.py | import cv2
import numpy as np
from twilio.rest import TwilioRestClient
import time
#importing modules ^^
body_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml')
#importing cascade-classfier ^^
vc = cv2.VideoCapture(0)
#finding default camera ^^
while -1:
ret, img = vc.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bodies = body_cascade.detectMultiScale(gray, 1.2, 2)
#converting img frame by frame to suitable type ^^
for (x,y,w,h) in bodies:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,171),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
client = TwilioRestClient("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") #account_sid, auth_token for twilio accaount.
client.messages.create(to="+15122997254", from_="+15125807197", #user number, twilio number
body="Alert: person(s) on property.") #messege
time.sleep(300)
#look for features, draw box on features, sends sms upon sight of features ^^
cv2.imshow('WebDetect',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
vc.release()
cv2.destroyAllWindows()
#shows video feed, ESC key kills program ^^
| import cv2
import numpy as np
from twilio.rest import TwilioRestClient
import time
#importing modules ^^
body_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml')
#importing cascade-classfiers ^^
vc = cv2.VideoCapture(0)
#finding default camera ^^
while -1:
ret, img = vc.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bodies = body_cascade.detectMultiScale(gray, 1.2, 2)
#converting img frame by frame to suitable type ^^
for (x,y,w,h) in bodies:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,171),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
client = TwilioRestClient("AC47b13b617c5806614265237ce06fa110", "e4e74dbdf6719d769422a90225dd8814") #account_sid, auth_token for twilio accaount.
client.messages.create(to="+15122997254", from_="+15125807197", #my number, twilio number
body="Alert: person(s) on property.") #messege
time.sleep(300)
#look for features, draw box on features, sends sms upon sight of features ^^
cv2.imshow('WebDetect',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
vc.release()
cv2.destroyAllWindows()
#shows video feed, ESC key kills program ^^
| Python | 0.000001 |
64c08dfc40240c7b1b4b876b12bdb57ace22d675 | remove print statement | gippy/__init__.py | gippy/__init__.py | #!/usr/bin/env python
################################################################################
# GIPPY: Geospatial Image Processing library for Python
#
# AUTHOR: Matthew Hanson
# EMAIL: matt.a.hanson@gmail.com
#
# Copyright (C) 2015 Applied Geosolutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from .version import __version__
def mac_update():
""" update search path on mac """
import sys
if sys.platform == 'darwin':
import os
from subprocess import check_output
lib = 'libgip.so'
path = os.path.dirname(__file__)
for f in ['_gippy.so', '_algorithms.so']:
fin = os.path.join(path, f)
cmd = ['install_name_tool', '-change', lib, os.path.join(path, lib), fin]
print cmd
check_output(cmd)
mac_update()
from gippy import init, DataType, GeoImage, GeoVector, Options
# register GDAL and OGR formats
init()
# cleanup functions
del gippy
del version
del init
del mac_update
| #!/usr/bin/env python
################################################################################
# GIPPY: Geospatial Image Processing library for Python
#
# AUTHOR: Matthew Hanson
# EMAIL: matt.a.hanson@gmail.com
#
# Copyright (C) 2015 Applied Geosolutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from .version import __version__
def mac_update():
""" update search path on mac """
import sys
print 'here'
if sys.platform == 'darwin':
import os
from subprocess import check_output
lib = 'libgip.so'
path = os.path.dirname(__file__)
for f in ['_gippy.so', '_algorithms.so']:
fin = os.path.join(path, f)
cmd = ['install_name_tool', '-change', lib, os.path.join(path, lib), fin]
print cmd
check_output(cmd)
mac_update()
from gippy import init, DataType, GeoImage, GeoVector, Options
# register GDAL and OGR formats
init()
# cleanup functions
del gippy
del version
del init
del mac_update
| Python | 0.999999 |
30f89eacb428af7091d238d39766d6481735c670 | fix for BASH_FUNC_module on qstat output | igf_airflow/hpc/hpc_queue.py | igf_airflow/hpc/hpc_queue.py | import json
import subprocess
from collections import defaultdict
from tempfile import TemporaryFile
def get_pbspro_job_count(job_name_prefix=''):
'''
A function for fetching running and queued job information from a PBSPro HPC cluster
:param job_name_prefix: A text to filter running jobs, default ''
:returns: A defaultdict object with the following structure
{ job_name: {'Q': counts, 'R': counts }}
'''
try:
with TemporaryFile() as tmp_file:
subprocess.\
check_call(
'qstat -t -f -F json|grep -v BASH_FUNC_module', # this can fix or break pipeline as well
shell=True,
stdout=tmp_file)
tmp_file.seek(0)
json_data = tmp_file.read()
json_data = json.loads(json_data)
jobs = json_data.get('Jobs')
active_jobs = dict()
if jobs is not None:
active_jobs = defaultdict(lambda: defaultdict(int))
if len(jobs) > 0:
for _,job_data in jobs.items():
job_name = job_data.get('Job_Name')
job_state = job_data.get('job_state')
if job_name.startswith(job_name_prefix):
if job_state == 'Q':
active_jobs[job_name]['Q'] += 1
if job_state == 'R':
active_jobs[job_name]['R'] += 1
return active_jobs
except Exception as e:
raise ValueError('Failed to get job counts from hpc, error: {0}'.format(e)) | import json
import subprocess
from collections import defaultdict
from tempfile import TemporaryFile
def get_pbspro_job_count(job_name_prefix=''):
'''
A function for fetching running and queued job information from a PBSPro HPC cluster
:param job_name_prefix: A text to filter running jobs, default ''
:returns: A defaultdict object with the following structure
{ job_name: {'Q': counts, 'R': counts }}
'''
try:
with TemporaryFile() as tmp_file:
subprocess.\
check_call(
['qstat','-t','-f','-F','json'],
stdout=tmp_file)
tmp_file.seek(0)
json_data = tmp_file.read()
json_data = json.loads(json_data)
jobs = json_data.get('Jobs')
active_jobs = dict()
if jobs is not None:
active_jobs = defaultdict(lambda: defaultdict(int))
if len(jobs) > 0:
for _,job_data in jobs.items():
job_name = job_data.get('Job_Name')
job_state = job_data.get('job_state')
if job_name.startswith(job_name_prefix):
if job_state == 'Q':
active_jobs[job_name]['Q'] += 1
if job_state == 'R':
active_jobs[job_name]['R'] += 1
return active_jobs
except Exception as e:
raise ValueError('Failed to get job counts from hpc, error: {0}'.format(e)) | Python | 0.000001 |
8998d0f617791f95b1ed6b4a1fffa0f71752b801 | Update docs/params for initialization methods. | pybo/bayesopt/inits.py | pybo/bayesopt/inits.py | """
Implementation of methods for sampling initial points.
"""
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# global imports
import numpy as np
# local imports
from ..utils import ldsample
# exported symbols
__all__ = ['init_middle', 'init_uniform', 'init_latin', 'init_sobol']
def init_middle(bounds):
"""
Initialize using a single query in the middle of the space.
"""
return np.mean(bounds, axis=1)[None, :]
def init_uniform(bounds, n=None, rng=None):
"""
Initialize using `n` uniformly distributed query points. If `n` is `None`
then use 3D points where D is the dimensionality of the input space.
"""
n = 3*len(bounds) if (n is None) else n
X = ldsample.random(bounds, n, rng)
return X
def init_latin(bounds, n=None, rng=None):
"""
Initialize using a Latin hypercube design of size `n`. If `n` is `None`
then use 3D points where D is the dimensionality of the input space.
"""
n = 3*len(bounds) if (n is None) else n
X = ldsample.latin(bounds, n, rng)
return X
def init_sobol(bounds, n=None, rng=None):
"""
Initialize using a Sobol sequence of length `n`. If `n` is `None` then use
3D points where D is the dimensionality of the input space.
"""
n = 3*len(bounds) if (n is None) else n
X = ldsample.sobol(bounds, n, rng)
return X
| """
Implementation of methods for sampling initial points.
"""
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# global imports
import numpy as np
# local imports
from ..utils import ldsample
# exported symbols
__all__ = ['init_middle', 'init_uniform', 'init_latin', 'init_sobol']
def init_middle(bounds):
return np.mean(bounds, axis=1)[None, :]
def init_uniform(bounds, rng=None):
n = 3*len(bounds)
X = ldsample.random(bounds, n, rng)
return X
def init_latin(bounds, rng=None):
n = 3*len(bounds)
X = ldsample.latin(bounds, n, rng)
return X
def init_sobol(bounds, rng=None):
n = 3*len(bounds)
X = ldsample.sobol(bounds, n, rng)
return X
| Python | 0 |
cfc13f7e98062a2eb5a9a96298ebc67ee79d9602 | Use path for urls | src/clarityv2/deductions/admin.py | src/clarityv2/deductions/admin.py | from django.urls import path
from django.contrib import admin
from django.db.models import Sum
from clarityv2.utils.views.private_media import PrivateMediaView
from .models import Deduction
class DeductionPrivateMediaView(PrivateMediaView):
model = Deduction
permission_required = 'invoices.can_view_invoice'
file_field = 'receipt'
@admin.register(Deduction)
class DeductionAdmin(admin.ModelAdmin):
list_display = ('name', 'date', 'amount')
search_fields = ('name', 'notes')
change_list_template = 'admin/deductions/deduction/change_list.html'
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(request, extra_context=None)
if hasattr(response, 'context_data'):
cl = response.context_data.get('cl')
if cl:
queryset = cl.get_queryset(request)
amount = queryset.aggregate(Sum('amount'))['amount__sum']
response.context_data['total_amount'] = amount
return response
def get_urls(self):
extra = [
path(
'<pk>/file/',
self.admin_site.admin_view(DeductionPrivateMediaView.as_view()),
name='deductions_deduction_receipt'
),
]
return extra + super().get_urls()
| from django.conf.urls import url
from django.contrib import admin
from django.db.models import Sum
from clarityv2.utils.views.private_media import PrivateMediaView
from .models import Deduction
class DeductionPrivateMediaView(PrivateMediaView):
model = Deduction
permission_required = 'invoices.can_view_invoice'
file_field = 'receipt'
@admin.register(Deduction)
class DeductionAdmin(admin.ModelAdmin):
list_display = ('name', 'date', 'amount')
search_fields = ('name', 'notes')
change_list_template = 'admin/deductions/deduction/change_list.html'
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(request, extra_context=None)
if hasattr(response, 'context_data'):
cl = response.context_data.get('cl')
if cl:
queryset = cl.get_queryset(request)
amount = queryset.aggregate(Sum('amount'))['amount__sum']
response.context_data['total_amount'] = amount
return response
def get_urls(self):
extra = [
url(
r'^(?P<pk>.*)/file/$',
self.admin_site.admin_view(DeductionPrivateMediaView.as_view()),
name='deductions_deduction_receipt'
),
]
return extra + super().get_urls()
| Python | 0.000002 |
a8d7afe076c14115f3282114cecad216e46e7353 | Update scipy_effects.py | pydub/scipy_effects.py | pydub/scipy_effects.py | """
This module provides scipy versions of high_pass_filter, and low_pass_filter
as well as an additional band_pass_filter.
Of course, you will need to install scipy for these to work.
When this module is imported the high and low pass filters from this module
will be used when calling audio_segment.high_pass_filter() and
audio_segment.high_pass_filter() instead of the slower, less powerful versions
provided by pydub.effects.
"""
from scipy.signal import butter, sosfilt
from .utils import register_pydub_effect
def _mk_butter_filter(freq, type, order):
"""
Args:
freq: The cutoff frequency for highpass and lowpass filters. For
band filters, a list of [low_cutoff, high_cutoff]
type: "lowpass", "highpass", or "band"
order: nth order butterworth filter (default: 5th order). The
attenuation is -6dB/octave beyond the cutoff frequency (for 1st
order). A Higher order filter will have more attenuation, each level
adding an additional -6dB (so a 3rd order butterworth filter would
be -18dB/octave).
Returns:
function which can filter a mono audio segment
"""
def filter_fn(seg):
assert seg.channels == 1
nyq = 0.5 * seg.frame_rate
try:
freqs = [f / nyq for f in freq]
except TypeError:
freqs = freq / nyq
sos = butter(order, freqs, btype=type, output='sos')
y = sosfilt(sos, seg.get_array_of_samples())
return seg._spawn(y.astype(seg.array_type).tostring())
return filter_fn
@register_pydub_effect
def band_pass_filter(seg, low_cutoff_freq, high_cutoff_freq, order=5):
filter_fn = _mk_butter_filter([low_cutoff_freq, high_cutoff_freq], 'band', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def high_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'highpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def low_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'lowpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
| """
This module provides scipy versions of high_pass_filter, and low_pass_filter
as well as an additional band_pass_filter.
Of course, you will need to install scipy for these to work.
When this module is imported the high and low pass filters are used when calling
audio_segment.high_pass_filter() and audio_segment.high_pass_filter() instead
of the slower, less powerful versions provided by pydub.effects.
"""
from scipy.signal import butter, sosfilt
from .utils import register_pydub_effect
def _mk_butter_filter(freq, type, order):
"""
Args:
freq: The cutoff frequency for highpass and lowpass filters. For
band filters, a list of [low_cutoff, high_cutoff]
type: "lowpass", "highpass", or "band"
order: nth order butterworth filter (default: 5th order). The
attenuation is -6dB/octave beyond the cutoff frequency (for 1st
order). A Higher order filter will have more attenuation, each level
adding an additional -6dB (so a 3rd order butterworth filter would
be -18dB/octave).
Returns:
function which can filter a mono audio segment
"""
def filter_fn(seg):
assert seg.channels == 1
nyq = 0.5 * seg.frame_rate
try:
freqs = [f / nyq for f in freq]
except TypeError:
freqs = freq / nyq
sos = butter(order, freqs, btype=type, output='sos')
y = sosfilt(sos, seg.get_array_of_samples())
return seg._spawn(y.astype(seg.array_type).tostring())
return filter_fn
@register_pydub_effect
def band_pass_filter(seg, low_cutoff_freq, high_cutoff_freq, order=5):
filter_fn = _mk_butter_filter([low_cutoff_freq, high_cutoff_freq], 'band', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def high_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'highpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def low_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'lowpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
| Python | 0.000002 |
7508d20bd3d6af0b2e5a886c8ea2f895d9e69935 | Bump version: 0.2.1 → 0.2.2 | pyfilemail/__init__.py | pyfilemail/__init__.py | # -*- coding: utf-8 -*-
__title__ = 'pyfilemail'
__version__ = '0.2.2'
__author__ = 'Daniel Flehner Heen'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Daniel Flehner Heen'
import os
import logging
from functools import wraps
import appdirs
# Init logger
logger = logging.getLogger('pyfilemail')
level = os.getenv('PYFILEMAÌL_DEBUG') and logging.DEBUG or logging.INFO
logger.setLevel(level)
# Formatter
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_string)
# File logger setup
datadir = appdirs.user_data_dir(appname='pyfilemail', version=__version__)
if not os.path.exists(datadir):
os.makedirs(datadir)
logfile = os.path.join(datadir, 'pyfilemail.log')
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(level)
filehandler.setFormatter(formatter)
# Stream logger
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.WARNING)
# Add handler
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
# Decorator to make sure user is logged in
from errors import FMBaseError
def login_required(f):
"""Decorator function to check if user is loged in.
:raises: :class:`FMBaseError` if not logged in
"""
@wraps(f)
def check_login(cls, *args, **kwargs):
if not cls.logged_in:
raise FMBaseError('Please login to use this method')
return f(cls, *args, **kwargs)
return check_login
from users import User # lint:ok
from transfer import Transfer # lint:ok | # -*- coding: utf-8 -*-
__title__ = 'pyfilemail'
__version__ = '0.2.1'
__author__ = 'Daniel Flehner Heen'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Daniel Flehner Heen'
import os
import logging
from functools import wraps
import appdirs
# Init logger
logger = logging.getLogger('pyfilemail')
level = os.getenv('PYFILEMAÌL_DEBUG') and logging.DEBUG or logging.INFO
logger.setLevel(level)
# Formatter
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_string)
# File logger setup
datadir = appdirs.user_data_dir(appname='pyfilemail', version=__version__)
if not os.path.exists(datadir):
os.makedirs(datadir)
logfile = os.path.join(datadir, 'pyfilemail.log')
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(level)
filehandler.setFormatter(formatter)
# Stream logger
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.WARNING)
# Add handler
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
# Decorator to make sure user is logged in
from errors import FMBaseError
def login_required(f):
"""Decorator function to check if user is loged in.
:raises: :class:`FMBaseError` if not logged in
"""
@wraps(f)
def check_login(cls, *args, **kwargs):
if not cls.logged_in:
raise FMBaseError('Please login to use this method')
return f(cls, *args, **kwargs)
return check_login
from users import User # lint:ok
from transfer import Transfer # lint:ok | Python | 0.000001 |
f7066d6bdd4fefbf517cd8ab44951955bb9f3a2a | Fix min/max for None types | gpaw/setup/gcc.py | gpaw/setup/gcc.py | #!/usr/bin/env python3
"""Wrapper for the GNU compiler that converts / removes incompatible
compiler options and allows for file-specific tailoring."""
import sys
from subprocess import call
# Default compiler and options
compiler = 'gcc'
args2change = {}
fragile_files = ['c/xc/tpss.c']
# Default optimisation settings
default_level = 3
default_flags = ['-funroll-loops']
fragile_level = 2
fragile_flags = []
# Puhti (Bull Sequana X1000)
if True:
compiler = 'mpicc'
default_flags += ['-march=cascadelake']
# Sisu (Cray XC40)
if not True:
compiler = 'cc'
default_flags += ['-march=haswell -mtune=haswell -mavx2']
fragile_files += ['c/xc/revtpss.c']
# Taito (HP cluster)
if not True:
compiler = 'mpicc'
default_flags += ['-ffast-math -march=sandybridge -mtune=haswell']
optimise = None # optimisation level 0/1/2/3 (None == default)
debug = False # use -g or not
fragile = False # use special flags for current file?
sandwich = True # use optimisation flag twice (= no override possible)
# process arguments
args = []
for arg in sys.argv[1:]:
arg = arg.strip()
if arg.startswith('-O'):
level = int(arg.replace('-O',''))
if not optimise or level > optimise:
optimise = level
elif arg == '-g':
debug = True
elif arg in args2change:
if args2change[arg]:
args.append(args2change[arg])
else:
if arg in fragile_files:
fragile = True
args.append(arg)
# set default optimisation level and flags
if fragile:
if optimise is not None:
optimise = min(fragile_level, optimise)
flags = fragile_flags
else:
if optimise is not None:
optimise = max(default_level, optimise)
flags = default_flags
# add optimisation level to flags
if optimise is not None:
flags.insert(0, '-O{0}'.format(optimise))
if sandwich:
args.append('-O{0}'.format(optimise))
# make sure -g is always the _first_ flag, so it doesn't mess e.g. with the
# optimisation level
if debug:
flags.insert(0, '-g')
# construct and execute the compile command
cmd = '{0} {1} {2}'.format(compiler, ' '.join(flags), ' '.join(args))
print(cmd)
call(cmd, shell=True)
| #!/usr/bin/env python3
"""Wrapper for the GNU compiler that converts / removes incompatible
compiler options and allows for file-specific tailoring."""
import sys
from subprocess import call
# Default compiler and options
compiler = 'gcc'
args2change = {}
fragile_files = ['c/xc/tpss.c']
# Default optimisation settings
default_level = 3
default_flags = ['-funroll-loops']
fragile_level = 2
fragile_flags = []
# Puhti (Bull Sequana X1000)
if True:
compiler = 'mpicc'
default_flags += ['-march=cascadelake']
# Sisu (Cray XC40)
if not True:
compiler = 'cc'
default_flags += ['-march=haswell -mtune=haswell -mavx2']
fragile_files += ['c/xc/revtpss.c']
# Taito (HP cluster)
if not True:
compiler = 'mpicc'
default_flags += ['-ffast-math -march=sandybridge -mtune=haswell']
optimise = None # optimisation level 0/1/2/3 (None == default)
debug = False # use -g or not
fragile = False # use special flags for current file?
sandwich = True # use optimisation flag twice (= no override possible)
# process arguments
args = []
for arg in sys.argv[1:]:
arg = arg.strip()
if arg.startswith('-O'):
level = int(arg.replace('-O',''))
if not optimise or level > optimise:
optimise = level
elif arg == '-g':
debug = True
elif arg in args2change:
if args2change[arg]:
args.append(args2change[arg])
else:
if arg in fragile_files:
fragile = True
args.append(arg)
# set default optimisation level and flags
if fragile:
optimise = min(fragile_level, optimise)
flags = fragile_flags
else:
optimise = max(default_level, optimise)
flags = default_flags
# add optimisation level to flags
if optimise is not None:
flags.insert(0, '-O{0}'.format(optimise))
if sandwich:
args.append('-O{0}'.format(optimise))
# make sure -g is always the _first_ flag, so it doesn't mess e.g. with the
# optimisation level
if debug:
flags.insert(0, '-g')
# construct and execute the compile command
cmd = '{0} {1} {2}'.format(compiler, ' '.join(flags), ' '.join(args))
print(cmd)
call(cmd, shell=True)
| Python | 0.000072 |
0b6b7ab518362445f3901f8d3b0d6281e2671c3f | Make code python3 compatible | drivers/python/setup.py | drivers/python/setup.py | # Copyright 2010-2012 RethinkDB, all rights reserved.
from setuptools import setup, Extension
from distutils.command.build_ext import build_ext
from distutils.errors import DistutilsPlatformError, CCompilerError, DistutilsExecError
import sys
class build_ext_nofail(build_ext):
# This class can replace the build_ext command with one that does not fail
# when the extension fails to build.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as e:
self._failed(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError) as e:
self._failed(e)
else:
try:
import google.protobuf.internal.cpp_message
except ImportError:
print >> sys.stderr, "*** WARNING: The installed protobuf library does not seem to include the C++ extension"
print >> sys.stderr, "*** WARNING: The RethinkDB driver will fallback to using the pure python implementation"
def _failed(self, e):
print >> sys.stderr, "*** WARNING: Unable to compile the C++ extension"
print >> sys.stderr, e
print >> sys.stderr, "*** WARNING: Defaulting to the python implementation"
setup(name="rethinkdb"
,version="1.7.0-2"
,description="This package provides the Python driver library for the RethinkDB database server."
,url="http://rethinkdb.com"
,maintainer="RethinkDB Inc."
,maintainer_email="bugs@rethinkdb.com"
,packages=['rethinkdb']
,install_requires=['protobuf']
,ext_modules=[Extension('rethinkdb_pbcpp', sources=['./rethinkdb/pbcpp.cpp', './rethinkdb/ql2.pb.cc'],
include_dirs=['./rethinkdb'], libraries=['protobuf'])]
,cmdclass={"build_ext":build_ext_nofail}
)
| # Copyright 2010-2012 RethinkDB, all rights reserved.
from setuptools import setup, Extension
from distutils.command.build_ext import build_ext
from distutils.errors import DistutilsPlatformError, CCompilerError, DistutilsExecError
import sys
class build_ext_nofail(build_ext):
# This class can replace the build_ext command with one that does not fail
# when the extension fails to build.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError, e:
self._failed(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError), e:
self._failed(e)
else:
try:
import google.protobuf.internal.cpp_message
except ImportError:
print >> sys.stderr, "*** WARNING: The installed protobuf library does not seem to include the C++ extension"
print >> sys.stderr, "*** WARNING: The RethinkDB driver will fallback to using the pure python implementation"
def _failed(self, e):
print >> sys.stderr, "*** WARNING: Unable to compile the C++ extension"
print >> sys.stderr, e
print >> sys.stderr, "*** WARNING: Defaulting to the python implementation"
setup(name="rethinkdb"
,version="1.7.0-2"
,description="This package provides the Python driver library for the RethinkDB database server."
,url="http://rethinkdb.com"
,maintainer="RethinkDB Inc."
,maintainer_email="bugs@rethinkdb.com"
,packages=['rethinkdb']
,install_requires=['protobuf']
,ext_modules=[Extension('rethinkdb_pbcpp', sources=['./rethinkdb/pbcpp.cpp', './rethinkdb/ql2.pb.cc'],
include_dirs=['./rethinkdb'], libraries=['protobuf'])]
,cmdclass={"build_ext":build_ext_nofail}
)
| Python | 0.000222 |
7418379d959cba0e96161c9e61f340541b82d85f | clean up xor a bit | python/examples/xor.py | python/examples/xor.py | # Copyright Hugh Perkins 2016
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
Simple example of xor
"""
from __future__ import print_function
import PyDeepCL
import random
import numpy as np
def go():
print('xor')
data = [
{'data': [-1, -1], 'label': 0},
{'data': [1, -1], 'label': 1},
{'data': [-1, 1], 'label': 1},
{'data': [1, 1], 'label': 0}
]
N = len(data)
batchSize = N
planes = 2
size = 1
learningRate = 0.1
numEpochs = 4000
cl = PyDeepCL.DeepCL()
net = PyDeepCL.NeuralNet(cl, planes, size)
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
net.addLayer(PyDeepCL.SoftMaxMaker())
print(net.asString())
images = np.zeros((N, planes, size, size), dtype=np.float32)
labels = np.zeros((N,), dtype=np.int32)
for n in range(N):
for plane in range(planes):
images[n,plane,0,0] = data[n]['data'][plane]
labels[n] = data[n]['label']
sgd = PyDeepCL.SGD(cl, learningRate, 0.0)
netLearner = PyDeepCL.NetLearner(
sgd, net,
N, images.reshape((images.size,)), labels,
N, images.reshape((images.size,)), labels,
batchSize)
netLearner.setSchedule(numEpochs)
netLearner.run()
if __name__ == '__main__':
go()
| # Copyright Hugh Perkins 2015
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
# import sys
# import array
import PyDeepCL
import random
import numpy as np
def go():
print('xor')
random.seed(1)
cl = PyDeepCL.DeepCL()
net = PyDeepCL.NeuralNet(cl, 2, 1)
# net.addLayer(PyDeepCL.InputLayerMaker().numPlanes(2).imageSize(1))
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
# net.addLayer(PyDeepCL.FullyConnectedMaker().numPlanes(2).imageSize(1).biased().relu())
# net.addLayer(PyDeepCL.FullyConnectedMaker().numPlanes(2).imageSize(1).biased().relu())
# net.addLayer( PyDeepCL.FullyConnectedMaker().numPlanes(10).imageSize(1).biased().linear() )
#net.addLayer( PyDeepCL.SquareLossMaker() )
net.addLayer(PyDeepCL.SoftMaxMaker())
print(net.asString())
data = [
{'data': [-1, -1], 'label': 0},
{'data': [1, -1], 'label': 1},
{'data': [-1, 1], 'label': 1},
{'data': [1, 1], 'label': 0}
]
N = len(data)
planes = 2
size = 1
images = np.zeros((N, planes, size, size), dtype=np.float32)
labels = np.zeros((N,), dtype=np.int32)
for n in range(N):
images[n,0,0,0] = data[n]['data'][0]
images[n,1,0,0] = data[n]['data'][1]
labels[n] = data[n]['label']
sgd = PyDeepCL.SGD(cl, 0.1, 0.0)
netLearner = PyDeepCL.NetLearner(
sgd, net,
N, images.reshape((images.size,)), labels,
N, images.reshape((images.size,)), labels,
N)
netLearner.setSchedule(2000)
netLearner.run()
if __name__ == '__main__':
go()
| Python | 0 |
9729c3aecccfa8130db7b5942c423c0807726f81 | Add feature importance bar chart. | python/gbdt/_forest.py | python/gbdt/_forest.py | from libgbdt import Forest as _Forest
class Forest:
def __init__(self, forest):
if type(forest) is str or type(forest) is unicode:
self._forest = _Forest(forest)
elif type(forest) is _Forest:
self._forest = forest
else:
raise TypeError, 'Unsupported forest type: {0}'.format(type(forest))
def predict(self, data_store):
"""Computes prediction scores for data_store."""
return self._forest.predict(data_store._data_store)
def feature_importance(self):
"""Outputs list of feature importances in descending order."""
return self._forest.feature_importance()
def feature_importance_bar_chart(self, color='blue'):
try:
from matplotlib import pyplot as plt
import numpy
except ImportError:
raise ImportError('Please install matplotlib and numpy.')
fimps = self.feature_importance()
importances = [v for _, v in fimps]
features = [f for f,_ in fimps]
ind = -numpy.arange(len(fimps))
_, ax = plt.subplots()
plt.barh(ind, importances, align='center', color=color)
ax.set_yticks(ind)
ax.set_yticklabels(features)
ax.set_xlabel('Feature importance')
def __str__(self):
return self._forest.as_json()
| from libgbdt import Forest as _Forest
class Forest:
def __init__(self, forest):
if type(forest) is str or type(forest) is unicode:
self._forest = _Forest(forest)
elif type(forest) is _Forest:
self._forest = forest
else:
raise TypeError, 'Unsupported forest type: {0}'.format(type(forest))
def predict(self, data_store):
"""Computes prediction scores for data_store."""
return self._forest.predict(data_store._data_store)
def feature_importance(self):
"""Outputs list of feature importances in descending order."""
return self._forest.feature_importance()
def __str__(self):
return self._forest.as_json()
| Python | 0 |
851fe0dad512dbf9888638566135d1f8cd0dd853 | fix #4036 | gui/qt/qrtextedit.py | gui/qt/qrtextedit.py |
from electrum.i18n import _
from electrum.plugins import run_hook
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QFileDialog
from .util import ButtonsTextEdit, MessageBoxMixin, ColorScheme
class ShowQRTextEdit(ButtonsTextEdit):
def __init__(self, text=None):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(1)
self.addButton(":icons/qrcode.png", self.qr_show, _("Show as QR code"))
run_hook('show_text_edit', self)
def qr_show(self):
from .qrcodewidget import QRDialog
try:
s = str(self.toPlainText())
except:
s = self.toPlainText()
QRDialog(s).exec_()
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Show as QR code"), self.qr_show)
m.exec_(e.globalPos())
class ScanQRTextEdit(ButtonsTextEdit, MessageBoxMixin):
def __init__(self, text="", allow_multi=False):
ButtonsTextEdit.__init__(self, text)
self.allow_multi = allow_multi
self.setReadOnly(0)
self.addButton(":icons/file.png", self.file_input, _("Read file"))
icon = ":icons/qrcode_white.png" if ColorScheme.dark_scheme else ":icons/qrcode.png"
self.addButton(icon, self.qr_input, _("Read QR code"))
run_hook('scan_text_edit', self)
def file_input(self):
fileName, __ = QFileDialog.getOpenFileName(self, 'select file')
if not fileName:
return
try:
with open(fileName, "r") as f:
data = f.read()
except BaseException as e:
self.show_error(_('Error opening file') + ':\n' + str(e))
else:
self.setText(data)
def qr_input(self):
from electrum import qrscanner, get_config
try:
data = qrscanner.scan_barcode(get_config().get_video_device())
except BaseException as e:
self.show_error(str(e))
data = ''
if not data:
data = ''
if self.allow_multi:
new_text = self.text() + data + '\n'
else:
new_text = data
self.setText(new_text)
return data
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Read QR code"), self.qr_input)
m.exec_(e.globalPos())
|
from electrum.i18n import _
from electrum.plugins import run_hook
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QFileDialog
from .util import ButtonsTextEdit, MessageBoxMixin, ColorScheme
class ShowQRTextEdit(ButtonsTextEdit):
def __init__(self, text=None):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(1)
self.addButton(":icons/qrcode.png", self.qr_show, _("Show as QR code"))
run_hook('show_text_edit', self)
def qr_show(self):
from .qrcodewidget import QRDialog
try:
s = str(self.toPlainText())
except:
s = self.toPlainText()
QRDialog(s).exec_()
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Show as QR code"), self.qr_show)
m.exec_(e.globalPos())
class ScanQRTextEdit(ButtonsTextEdit, MessageBoxMixin):
def __init__(self, text="", allow_multi=False):
ButtonsTextEdit.__init__(self, text)
self.allow_multi = allow_multi
self.setReadOnly(0)
self.addButton(":icons/file.png", self.file_input, _("Read file"))
icon = ":icons/qrcode_white.png" if ColorScheme.dark_scheme else ":icons/qrcode.png"
self.addButton(icon, self.qr_input, _("Read QR code"))
run_hook('scan_text_edit', self)
def file_input(self):
fileName, __ = QFileDialog.getOpenFileName(self, 'select file')
if not fileName:
return
with open(fileName, "r") as f:
data = f.read()
self.setText(data)
def qr_input(self):
from electrum import qrscanner, get_config
try:
data = qrscanner.scan_barcode(get_config().get_video_device())
except BaseException as e:
self.show_error(str(e))
data = ''
if not data:
data = ''
if self.allow_multi:
new_text = self.text() + data + '\n'
else:
new_text = data
self.setText(new_text)
return data
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Read QR code"), self.qr_input)
m.exec_(e.globalPos())
| Python | 0.000001 |
50d44ec25eb102451a495dd645ffb2a6f77012ae | Add a shortcut for imports | queue_util/__init__.py | queue_util/__init__.py | from queue_util.consumer import Consumer
from queue_util.producer import Producer
| Python | 0.000002 | |
325c5a8f407340fa8901f406c301fa8cbdac4ff8 | bump version to 0.13.0 | gunicorn/__init__.py | gunicorn/__init__.py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 13, 0)
__version__ = ".".join(map(str, version_info))
SERVER_SOFTWARE = "gunicorn/%s" % __version__
| # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 12, 2)
__version__ = ".".join(map(str, version_info))
SERVER_SOFTWARE = "gunicorn/%s" % __version__
| Python | 0 |
dda25cdb808259ec2a91cba74273dc6c929af0aa | Check README.md and CHANGELOG.md. | dscan/plugins/drupal.py | dscan/plugins/drupal.py | from cement.core import handler, controller
from dscan.plugins import BasePlugin
from dscan.common.update_api import GitRepo
import dscan.common.update_api as ua
import dscan.common.versions
class Drupal(BasePlugin):
plugins_base_url = [
"%ssites/all/modules/%s/",
"%ssites/default/modules/%s/",
"%smodules/contrib/%s/",
"%smodules/%s/"]
themes_base_url = [
"%ssites/all/themes/%s/",
"%ssites/default/themes/%s/",
"%sthemes/%s/"]
forbidden_url = "sites/"
regular_file_url = ["misc/drupal.js", 'core/misc/drupal.js']
module_common_file = "LICENSE.txt"
update_majors = ['6','7','8', '9']
interesting_urls = [
("CHANGELOG.txt", "Default changelog file"),
("user/login", "Default admin"),
]
interesting_module_urls = [
('CHANGELOG.txt', 'Changelog file'),
('CHANGELOG.md', 'Changelog file'),
('changelog.txt', 'Changelog file'),
('CHANGELOG.TXT', 'Changelog file'),
('README.txt', 'README file'),
('README.md', 'README file'),
('readme.txt', 'README file'),
('README.TXT', 'README file'),
('LICENSE.txt', 'License file'),
('API.txt', 'Contains API documentation for the module')
]
class Meta:
label = 'drupal'
@controller.expose(help='drupal related scanning tools')
def drupal(self):
self.plugin_init()
def update_version_check(self):
"""
@return: True if new tags have been made in the github repository.
"""
return ua.github_tags_newer('drupal/drupal/', self.versions_file,
update_majors=self.update_majors)
def update_version(self):
"""
@return: updated VersionsFile
"""
gr, versions_file, new_tags = ua.github_repo_new('drupal/drupal/',
'drupal/drupal', self.versions_file, self.update_majors)
hashes = {}
for version in new_tags:
gr.tag_checkout(version)
hashes[version] = gr.hashes_get(versions_file)
versions_file.update(hashes)
return versions_file
def update_plugins_check(self):
return ua.update_modules_check(self)
def update_plugins(self):
"""
@return: (plugins, themes) a tuple which contains two list of
strings, the plugins and the themes.
"""
plugins_url = 'https://drupal.org/project/project_module?page=%s'
plugins_css = '.node-project-module > h2 > a'
themes_url = 'https://drupal.org/project/project_theme?page=%s'
themes_css = '.node-project-theme > h2 > a'
per_page = 25
plugins = []
for elem in ua.modules_get(plugins_url, per_page, plugins_css):
plugins.append(elem['href'].split("/")[-1])
themes = []
for elem in ua.modules_get(themes_url, per_page, themes_css):
themes.append(elem['href'].split("/")[-1])
return plugins, themes
def load(app=None):
handler.register(Drupal)
| from cement.core import handler, controller
from dscan.plugins import BasePlugin
from dscan.common.update_api import GitRepo
import dscan.common.update_api as ua
import dscan.common.versions
class Drupal(BasePlugin):
plugins_base_url = [
"%ssites/all/modules/%s/",
"%ssites/default/modules/%s/",
"%smodules/contrib/%s/",
"%smodules/%s/"]
themes_base_url = [
"%ssites/all/themes/%s/",
"%ssites/default/themes/%s/",
"%sthemes/%s/"]
forbidden_url = "sites/"
regular_file_url = ["misc/drupal.js", 'core/misc/drupal.js']
module_common_file = "LICENSE.txt"
update_majors = ['6','7','8', '9']
interesting_urls = [
("CHANGELOG.txt", "Default changelog file"),
("user/login", "Default admin"),
]
interesting_module_urls = [
('CHANGELOG.txt', 'Changelog file'),
('changelog.txt', 'Changelog file'),
('CHANGELOG.TXT', 'Changelog file'),
('README.txt', 'README file'),
('readme.txt', 'README file'),
('README.TXT', 'README file'),
('LICENSE.txt', 'License file'),
('API.txt', 'Contains API documentation for the module')
]
class Meta:
label = 'drupal'
@controller.expose(help='drupal related scanning tools')
def drupal(self):
self.plugin_init()
def update_version_check(self):
"""
@return: True if new tags have been made in the github repository.
"""
return ua.github_tags_newer('drupal/drupal/', self.versions_file,
update_majors=self.update_majors)
def update_version(self):
"""
@return: updated VersionsFile
"""
gr, versions_file, new_tags = ua.github_repo_new('drupal/drupal/',
'drupal/drupal', self.versions_file, self.update_majors)
hashes = {}
for version in new_tags:
gr.tag_checkout(version)
hashes[version] = gr.hashes_get(versions_file)
versions_file.update(hashes)
return versions_file
def update_plugins_check(self):
return ua.update_modules_check(self)
def update_plugins(self):
"""
@return: (plugins, themes) a tuple which contains two list of
strings, the plugins and the themes.
"""
plugins_url = 'https://drupal.org/project/project_module?page=%s'
plugins_css = '.node-project-module > h2 > a'
themes_url = 'https://drupal.org/project/project_theme?page=%s'
themes_css = '.node-project-theme > h2 > a'
per_page = 25
plugins = []
for elem in ua.modules_get(plugins_url, per_page, plugins_css):
plugins.append(elem['href'].split("/")[-1])
themes = []
for elem in ua.modules_get(themes_url, per_page, themes_css):
themes.append(elem['href'].split("/")[-1])
return plugins, themes
def load(app=None):
handler.register(Drupal)
| Python | 0 |
fbb3df846b3b9f4a86d6238cc5605a8d771ff924 | add python 3 compatibility | dumper/logging_utils.py | dumper/logging_utils.py | from __future__ import unicode_literals
import logging
import six
class BaseLogger(object):
@classmethod
def get_logger(cls):
logger = logging.getLogger(cls.module)
logger.setLevel('DEBUG')
return logger
@classmethod
def _log(cls, message):
cls.get_logger().debug(message)
@classmethod
def _cache_action(cls, action, path=None, method=None, key=None):
log_string = action
if path:
log_string += ' path "{0}"'.format(path)
if method:
log_string += ' with method "{0}"'.format(method)
if key:
log_string += ' as key "{0}"'.format(key)
cls._log(log_string)
class MiddlewareLogger(BaseLogger):
module = 'dumper.middleware'
@classmethod
def get(cls, key, value, request):
success_text = 'found' if value else 'didnt find'
cls._cache_action(
success_text,
path=request.path,
method=request.method,
key=key
)
@classmethod
def not_get(cls, request):
cls._cache_action(
'skipped getting',
path=request.path,
)
@classmethod
def save(cls, key, request):
cls._cache_action(
'cached',
path=request.path,
method=request.method,
key=key
)
@classmethod
def not_save(cls, request):
cls._cache_action(
'not saving cache for',
path=request.path,
method=request.method,
)
class InvalidationLogger(BaseLogger):
module = 'dumper.invalidation'
@classmethod
def invalidate(cls, path, key):
cls._cache_action(
'invalidated',
path=path,
key=key
)
class SiteLogger(BaseLogger):
module = 'dumper.site'
@classmethod
def register(cls, model):
app_name = model._meta.app_label
model_name = model._meta.object_name
cls._log('registered {0}.{1}'.format(app_name, model_name))
@classmethod
def invalidate_instance(cls, instance):
instance_name = six.text_type(instance)
model = instance.__class__
app_name = model._meta.app_label
model_name = model._meta.object_name
cls._log('invalidating instance #{0} "{1}" of {2}.{3}'.format(
instance.pk,
instance_name,
app_name,
model_name
))
| from __future__ import unicode_literals
import logging
class BaseLogger(object):
@classmethod
def get_logger(cls):
logger = logging.getLogger(cls.module)
logger.setLevel('DEBUG')
return logger
@classmethod
def _log(cls, message):
cls.get_logger().debug(message)
@classmethod
def _cache_action(cls, action, path=None, method=None, key=None):
log_string = action
if path:
log_string += ' path "{0}"'.format(path)
if method:
log_string += ' with method "{0}"'.format(method)
if key:
log_string += ' as key "{0}"'.format(key)
cls._log(log_string)
class MiddlewareLogger(BaseLogger):
module = 'dumper.middleware'
@classmethod
def get(cls, key, value, request):
success_text = 'found' if value else 'didnt find'
cls._cache_action(
success_text,
path=request.path,
method=request.method,
key=key
)
@classmethod
def not_get(cls, request):
cls._cache_action(
'skipped getting',
path=request.path,
)
@classmethod
def save(cls, key, request):
cls._cache_action(
'cached',
path=request.path,
method=request.method,
key=key
)
@classmethod
def not_save(cls, request):
cls._cache_action(
'not saving cache for',
path=request.path,
method=request.method,
)
class InvalidationLogger(BaseLogger):
module = 'dumper.invalidation'
@classmethod
def invalidate(cls, path, key):
cls._cache_action(
'invalidated',
path=path,
key=key
)
class SiteLogger(BaseLogger):
module = 'dumper.site'
@classmethod
def register(cls, model):
app_name = model._meta.app_label
model_name = model._meta.object_name
cls._log('registered {0}.{1}'.format(app_name, model_name))
@classmethod
def invalidate_instance(cls, instance):
instance_name = unicode(instance)
model = instance.__class__
app_name = model._meta.app_label
model_name = model._meta.object_name
cls._log('invalidating instance #{0} "{1}" of {2}.{3}'.format(
instance.pk,
instance_name,
app_name,
model_name
))
| Python | 0.000001 |
0f4d2b75cde58f6926636563691182fb5896c894 | Add docstring to autocorr and ambiguity functions so the axes and peak location of the result is made clear. | echolect/core/coding.py | echolect/core/coding.py | # Copyright 2013 Ryan Volz
# This file is part of echolect.
# Echolect is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Echolect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with echolect. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from echolect.filtering import filtering
__all__ = ['autocorr', 'ambiguity']
def autocorr(code, nfreq=1):
"""Calculate autocorrelation of code for nfreq frequencies.
If nfreq == 1, the result is a 1-D array with length that is
2*len(code) - 1. The peak value of sum(abs(code)**2) is located
in the middle at index len(code) - 1.
If nfreq > 1, the result is a 2-D array with the first index
corresponding to frequency shift. The code is frequency shifted
by normalized frequencies of range(nfreq)/nfreq and correlated
with the baseband code. The result acorr[0] gives the
autocorrelation with 0 frequency shift, acorr[1] with 1/nfreq
frequency shift, etc. These frequencies are the same as (and
are in the same order as) the FFT frequencies for an nfreq-
length FFT.
****Thus, the peak value is at acorr[0, len(code) - 1]****
To relocate the peak to the middle of the result, use
np.fft.fftshift(acorr, axes=0)
To relocate the peak to the [0, 0] entry, use
np.fft.ifftshift(acorr, axes=1)
"""
# special case because matched_doppler does not handle nfreq < len(code)
if nfreq == 1:
acorr = filtering.matched(code, code)
else:
acorr = filtering.matched_doppler(code, nfreq, code)
return acorr
def ambiguity(code, nfreq=1):
"""Calculate the ambiguity function of code for nfreq frequencies.
The ambiguity function is the square of the autocorrelation,
normalized so the peak value is 1.
See autocorr for details.
"""
acorr = autocorr(code, nfreq)
# normalize so answer at zero delay, zero Doppler is 1
b = len(code)
if nfreq == 1:
acorr = acorr / acorr[b - 1]
else:
acorr = acorr / acorr[0, b - 1]
amb = acorr.real**2 + acorr.imag**2
return amb | # Copyright 2013 Ryan Volz
# This file is part of echolect.
# Echolect is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Echolect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with echolect. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from echolect.filtering import filtering
__all__ = ['autocorr', 'ambiguity']
def autocorr(code, nfreq=1):
# special case because matched_doppler does not handle nfreq < len(code)
if nfreq == 1:
acorr = filtering.matched(code, code)
else:
acorr = filtering.matched_doppler(code, nfreq, code)
return acorr
def ambiguity(code, nfreq=1):
acorr = autocorr(code, nfreq)
# normalize so answer at zero delay, zero Doppler is 1
b = len(code)
if nfreq == 1:
acorr = acorr / acorr[b - 1]
else:
acorr = acorr / acorr[0, b - 1]
amb = acorr.real**2 + acorr.imag**2
return amb | Python | 0 |
cc6bc2b9af67c064339371b43795c36ed3e5ddcb | use TemplateResponse everywhere | ella_galleries/views.py | ella_galleries/views.py | from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.translation import ungettext
from django.utils.cache import patch_vary_headers
from ella.core.views import get_templates_from_publishable
def gallery_item_detail(request, context, item_slug=None):
'''
Returns ``GalleryItem`` object by its slug or first one (given by
``GalleryItem``.``order``) from ``Gallery``.
'''
gallery = context['object']
item_sorted_dict = gallery.items
count = len(item_sorted_dict)
count_str = ungettext('%(count)d object total', '%(count)d objects total',
count) % {'count': count}
next = None
previous = None
if count == 0:
# TODO: log empty gallery
raise Http404()
if item_slug is None:
item = item_sorted_dict.value_for_index(0)
if count > 1:
next = item_sorted_dict.value_for_index(1)
position = 1
else:
try:
item = item_sorted_dict[item_slug]
except KeyError:
raise Http404()
item_index = item_sorted_dict.keyOrder.index(item_slug)
if item_index > 0:
previous = item_sorted_dict.value_for_index(item_index - 1)
if (item_index + 1) < count:
next = item_sorted_dict.value_for_index(item_index + 1)
position = item_index + 1
context.update({
'gallery': gallery,
'item': item,
'item_list' : item_sorted_dict.values(),
'next' : next,
'previous' : previous,
'count' : count,
'count_str' : count_str,
'position' : position,
})
if request.is_ajax():
template_name = "item-ajax.html"
else:
template_name = "item.html"
response = TemplateResponse(
request,
get_templates_from_publishable(template_name, context['object']),
context,
)
patch_vary_headers(response, ('X-Requested-With',))
return response
| from django.http import Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ungettext
from django.utils.cache import patch_vary_headers
from ella.core.views import get_templates_from_publishable
def gallery_item_detail(request, context, item_slug=None):
'''
Returns ``GalleryItem`` object by its slug or first one (given by
``GalleryItem``.``order``) from ``Gallery``.
'''
gallery = context['object']
item_sorted_dict = gallery.items
count = len(item_sorted_dict)
count_str = ungettext('%(count)d object total', '%(count)d objects total',
count) % {'count': count}
next = None
previous = None
if count == 0:
# TODO: log empty gallery
raise Http404()
if item_slug is None:
item = item_sorted_dict.value_for_index(0)
if count > 1:
next = item_sorted_dict.value_for_index(1)
position = 1
else:
try:
item = item_sorted_dict[item_slug]
except KeyError:
raise Http404()
item_index = item_sorted_dict.keyOrder.index(item_slug)
if item_index > 0:
previous = item_sorted_dict.value_for_index(item_index - 1)
if (item_index + 1) < count:
next = item_sorted_dict.value_for_index(item_index + 1)
position = item_index + 1
context.update({
'gallery': gallery,
'item': item,
'item_list' : item_sorted_dict.values(),
'next' : next,
'previous' : previous,
'count' : count,
'count_str' : count_str,
'position' : position,
})
if request.is_ajax():
template_name = "item-ajax.html"
else:
template_name = "item.html"
response = render_to_response(
get_templates_from_publishable(template_name, context['object']),
context,
context_instance=RequestContext(request),
)
patch_vary_headers(response, ('X-Requested-With',))
return response
| Python | 0 |
63587ab033a0aabd52af6b657600d2d2547f034f | Bump release version | grove/__init__.py | grove/__init__.py | ##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
__version__ = '1.3.0'
| ##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
__version__ = '1.2.0'
| Python | 0 |
e0de9a865b731f3f24bc7a42909849abc738217f | Increment version | karld/_meta.py | karld/_meta.py | version_info = (0, 2, 1)
version = '.'.join(map(str, version_info))
| version_info = (0, 2, 0)
version = '.'.join(map(str, version_info))
| Python | 0.000002 |
c39ce3485af781e8974a70200baa1f51e5c1633b | fix imports | gstat/__init__.py | gstat/__init__.py | from gstat import gstat, gstats, gstat_elapsed, gstat_event
| Python | 0.000002 | |
cbbe9d50108747b15864436de01947bc2598b1b3 | Fix bug | WindAdapter/data_provider.py | WindAdapter/data_provider.py | # -*- coding: utf-8 -*-
import pandas as pd
try:
from WindPy import w
except ImportError:
pass
class WindRunner:
def __init__(self):
try:
w.start()
except NameError:
pass
def __del__(self):
try:
w.stop()
except AttributeError:
pass
except NameError:
pass
class WindDataProvider:
WIND_RUNNER = WindRunner()
def __init__(self):
pass
@staticmethod
def force_throw_err(raw_data, func_name):
if raw_data.ErrorCode != 0:
raise ValueError('{0}: {1}'.format(raw_data.Data[0], func_name))
elif len(raw_data.Data) == 0:
raise ValueError('{0}: empty data returned'.format(func_name))
@staticmethod
def get_universe(index_id, date=None, output_weight=False):
index_id = index_id.lower()
try:
if index_id == 'fulla':
code = 'a001010100000000'
params = 'sectorid=' + code + ';field=wind_code' if date is None \
else 'date=' + str(date) + ';sectorid=' + code
raw_data = w.wset('sectorconstituent', params)
else:
short_params = 'windcode=' + index_id
params = short_params if date is None else short_params + ';date=' + str(date)
raw_data = w.wset('IndexConstituent', params)
WindDataProvider.force_throw_err(raw_data, 'WindDataProvider.get_universe')
if output_weight:
return pd.DataFrame(data=raw_data.Data[3], index=raw_data.Data[1], columns=['weight'])
else:
return raw_data.Data[1]
except NameError:
pass
@staticmethod
def advance_date(date, unit, freq):
try:
ret = w.tdaysoffset(int(unit) * -1, date, 'period=' + freq)
WindDataProvider.force_throw_err(ret, 'WindDataProvider.advance_date')
return ret.Data[0][0]
except NameError:
pass
@staticmethod
def biz_days_list(start_date, end_date, freq):
try:
dates = w.tdays(start_date, end_date, 'period=' + freq)
WindDataProvider.force_throw_err(dates, 'WindDataProvider.biz_days_list')
return dates.Data[0]
except NameError:
pass
@staticmethod
def query_data(api, sec_id, indicator, extra_params, start_date=None, end_date=None):
if api == 'w.wsd':
ret = eval(api)(sec_id, indicator, start_date, end_date, extra_params)
elif api == 'w.wss':
ret = eval(api)(sec_id, indicator, extra_params)
else:
raise ValueError('WindDataProvider.query_data: unknown type of api')
WindDataProvider.force_throw_err(ret, 'WindDataProvider.query_data')
return ret
| # -*- coding: utf-8 -*-
import pandas as pd
try:
from WindPy import w
except ImportError:
pass
class WindRunner:
def __init__(self):
try:
w.start()
except NameError:
pass
def __del__(self):
try:
w.stop()
except AttributeError:
pass
except NameError:
pass
class WindDataProvider:
WIND_RUNNER = WindRunner()
def __init__(self):
pass
@staticmethod
def force_throw_err(raw_data, func_name):
if raw_data.ErrorCode != 0:
raise ValueError('{0}: {1}'.format(raw_data.Data[0], func_name))
elif len(raw_data.Data) == 0:
raise ValueError('{0}: empty data returned'.format(func_name))
@staticmethod
def get_universe(index_id, date, output_weight=False):
index_id = index_id.lower()
try:
if index_id == 'fulla':
code = 'a001010100000000'
params = 'sectorid=' + code + ';field=wind_code' if date is None \
else 'date=' + str(date) + ';sectorid=' + code
raw_data = w.wset('sectorconstituent', params)
else:
short_params = 'windcode=' + index_id
params = short_params if date is None else short_params + ';date=' + str(date)
raw_data = w.wset('IndexConstituent', params)
WindDataProvider.force_throw_err(raw_data, 'WindDataProvider.get_universe')
if output_weight:
return pd.DataFrame(data=raw_data.Data[3], index=raw_data.Data[1], columns=['weight'])
else:
return raw_data.Data[1]
except NameError:
pass
@staticmethod
def advance_date(date, unit, freq):
try:
ret = w.tdaysoffset(int(unit) * -1, date, 'period=' + freq)
WindDataProvider.force_throw_err(ret, 'WindDataProvider.advance_date')
return ret.Data[0][0]
except NameError:
pass
@staticmethod
def biz_days_list(start_date, end_date, freq):
try:
dates = w.tdays(start_date, end_date, 'period=' + freq)
WindDataProvider.force_throw_err(dates, 'WindDataProvider.biz_days_list')
return dates.Data[0]
except NameError:
pass
@staticmethod
def query_data(api, sec_id, indicator, extra_params, start_date=None, end_date=None):
if api == 'w.wsd':
ret = eval(api)(sec_id, indicator, start_date, end_date, extra_params)
elif api == 'w.wss':
ret = eval(api)(sec_id, indicator, extra_params)
else:
raise ValueError('WindDataProvider.query_data: unknown type of api')
WindDataProvider.force_throw_err(ret, 'WindDataProvider.query_data')
return ret
| Python | 0.000001 |
2a0e3fe9c83da1d11b892c7c35e367f414329936 | Update teaching_modules.py | src/ensae_teaching_cs/automation/teaching_modules.py | src/ensae_teaching_cs/automation/teaching_modules.py | # -*- coding: utf-8 -*-
"""
@file
@brief List of modules to maintain for the teachings.
"""
def get_teaching_modules():
"""
List of teachings modules to maintain (CI + documentation).
.. runpython::
:showcode:
from ensae_teaching_cs.automation import get_teaching_modules
print('\\n'.join(sorted(get_teaching_modules())))
"""
return ["pymlbenchmark", "_benchmarks", "ensae_teaching_dl", "machinelearningext",
"lecture_citation", "botadi", "pyquickhelper", "jyquickhelper",
"python3_module_template", "mathenjeu", "pymmails", "pymyinstall",
"pyensae", "pyrsslocal", "pysqllike", "ensae_projects", "ensae_teaching_cs",
"code_beatrix", "actuariat_python", "mlstatpy", "jupytalk", "teachpyx",
"tkinterquickhelper", "cpyquickhelper", "pandas_streaming",
"lightmlboard", "lightmlrestapi", "mlinsights", "pyenbc", "mlprodict",
"papierstat", "sparkouille", "manydataapi", "csharpy", "csharpyml",
"wrapclib", "myblog", "_check_python_install"
]
| # -*- coding: utf-8 -*-
"""
@file
@brief List of modules to maintain for the teachings.
"""
def get_teaching_modules():
"""
List of teachings modules to maintain (CI + documentation).
.. runpython::
:showcode:
from ensae_teaching_cs.automation import get_teaching_modules
print('\\n'.join(sorted(get_teaching_modules())))
"""
return ["pymlbenchmark", "_benchmarks", "ensae_teaching_dl", "machinelearningext",
"lecture_citation", "botadi", "pyquickhelper", "jyquickhelper",
"python3_module_template", "mathenjeu", "pymmails", "pymyinstall",
"pyensae", "pyrsslocal", "pysqllike", "ensae_projects", "ensae_teaching_cs",
"code_beatrix", "actuariat_python", "mlstatpy", "jupytalk", "teachpyx",
"tkinterquickhelper", "cpyquickhelper", "pandas_streaming",
"lightmlboard", "lightmlrestapi", "mlinsights", "pyenbc", "mlprodict",
"papierstat", "sparkouille", "manydataapi", "csharpy", "csharpyml",
"wrapclib", "myblog", "_check_python_install", "onnxcustom"
]
| Python | 0.000001 |
bc08499fd803278ea502bafdf845dec438f951f3 | Update range-sum-query-2d-immutable.py | Python/range-sum-query-2d-immutable.py | Python/range-sum-query-2d-immutable.py | # Time: ctor: O(m * n)
# lookup: O(1)
# Space: O(m * n)
#
# Given a 2D matrix matrix, find the sum of the elements inside
# the rectangle defined by its upper left corner (row1, col1)
# and lower right corner (row2, col2).
#
# Range Sum Query 2D
# The above rectangle (with the red border) is defined by
# (row1, col1) = (2, 1) and (row2, col2) = (4, 3),
# which contains sum = 8.
#
# Example:
# Given matrix = [
# [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]
# ]
#
# sumRegion(2, 1, 4, 3) -> 8
# sumRegion(1, 1, 2, 2) -> 11
# sumRegion(1, 2, 2, 4) -> 12
# Note:
# You may assume that the matrix does not change.
# There are many calls to sumRegion function.
# You may assume that row1 <= row2 and col1 <= col2.
class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
if not matrix:
return
m, n = len(matrix), len(matrix[0])
self.sums = [[0 for _ in xrange(n+1)] for _ in xrange(m+1)]
for i in xrange(1, m+1):
for j in xrange(1, n+1):
self.sums[i][j] = matrix[i-1][j-1]
self.sums[i][j] += self.sums[i][j-1]
for j in xrange(1, n+1):
for i in xrange(1, m+1):
self.sums[i][j] += self.sums[i-1][j]
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.sums[row2+1][col2+1] - self.sums[row2+1][col1] - \
self.sums[row1][col2+1] + self.sums[row1][col1]
# Your NumMatrix object will be instantiated and called as such:
# numMatrix = NumMatrix(matrix)
# numMatrix.sumRegion(0, 1, 2, 3)
# numMatrix.sumRegion(1, 2, 3, 4)
| # Time: ctor: O(m * n)
# lookup: O(1)
# Space: O(m * n)
#
# Given a 2D matrix matrix, find the sum of the elements inside
# the rectangle defined by its upper left corner (row1, col1)
# and lower right corner (row2, col2).
#
# Range Sum Query 2D
# The above rectangle (with the red border) is defined by
# (row1, col1) = (2, 1) and (row2, col2) = (4, 3),
# which contains sum = 8.
#
# Example:
# Given matrix = [
# [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]
# ]
#
# sumRegion(2, 1, 4, 3) -> 8
# sumRegion(1, 1, 2, 2) -> 11
# sumRegion(1, 2, 2, 4) -> 12
# Note:
# You may assume that the matrix does not change.
# There are many calls to sumRegion function.
# You may assume that row1 <= row2 and col1 <= col2.
class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
if not matrix:
return
m, n = len(matrix), len(matrix[0])
self.sums = [[0 for _ in xrange(n+1)] for _ in xrange(m+1)]
for i in xrange(1, m+1):
for j in xrange(1, n+1):
self.sums[i][j] = matrix[i-1][j-1]
self.sums[i][j] += self.sums[i][j-1]
for j in xrange(1, n+1):
for i in xrange(1, m+1):
self.sums[i][j] += self.sums[i-1][j]
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.sums[row2+1][col2+1] - self.sums[row2+1][col1] - \
self.sums[row1][col2+1] + self.sums[row1][col1]
# Your NumMatrix object will be instantiated and called as such:
# numMatrix = NumMatrix(matrix)
# numMatrix.sumRegion(0, 1, 2, 3)
# numMatrix.sumRegion(1, 2, 3, 4)
| Python | 0.000005 |
27f8b342e1a4bea9c807b005d16f932880bb7136 | Document utils.setup_readline | jedi/utils.py | jedi/utils.py | """
Utilities for end-users.
"""
import sys
from jedi import Interpreter
def readline_complete(text, state):
"""
Function to be passed to :func:`readline.set_completer`.
Usage::
import readline
readline.set_completer(readline_complete)
"""
ns = vars(sys.modules['__main__'])
completions = Interpreter(text, [ns]).completions()
try:
return text + completions[state].complete
except IndexError:
return None
def setup_readline():
"""
Install Jedi completer to :mod:`readline`.
This function setups :mod:`readline` to use Jedi in Python interactive
shell. If you want to use custom ``PYTHONSTARTUP`` file, you can call
this function like this:
>>> from jedi.utils import setup_readline
>>> setup_readline()
"""
try:
import readline
except ImportError:
print("Module readline not available.")
else:
readline.set_completer(readline_complete)
readline.parse_and_bind("tab: complete")
# Default delimiters minus "()":
readline.set_completer_delims(' \t\n`~!@#$%^&*-=+[{]}\\|;:\'",<>/?')
| """
Utilities for end-users.
"""
import sys
from jedi import Interpreter
def readline_complete(text, state):
"""
Function to be passed to :func:`readline.set_completer`.
Usage::
import readline
readline.set_completer(readline_complete)
"""
ns = vars(sys.modules['__main__'])
completions = Interpreter(text, [ns]).completions()
try:
return text + completions[state].complete
except IndexError:
return None
def setup_readline():
"""
Install Jedi completer to :mod:`readline`.
"""
try:
import readline
except ImportError:
print("Module readline not available.")
else:
readline.set_completer(readline_complete)
readline.parse_and_bind("tab: complete")
# Default delimiters minus "()":
readline.set_completer_delims(' \t\n`~!@#$%^&*-=+[{]}\\|;:\'",<>/?')
| Python | 0.000001 |
c9dceb4dc83490ab0eebcd3efc9590d3275f53df | tidy up messytables-jts integration | ktbh/schema.py | ktbh/schema.py | import unicodecsv
from cStringIO import StringIO
import messytables
import itertools
import slugify
import jsontableschema
from messytables.types import *
from messytables_jts import celltype_as_string
def censor(dialect):
tmp = dict(dialect)
censored = [
"doublequote",
"lineterminator",
"skipinitialspace"
]
[ tmp.pop(i) for i in censored ]
return tmp
def sabotage(d):
[ d.__setitem__(k, d[k].encode('utf-8')) for k in d
if isinstance(d[k], unicode) ]
def get_type_of_column(col):
try:
return celltype_as_string(col)
except:
return "any"
def infer_schema(data, _dialect):
f = StringIO(data)
sabotage(_dialect)
d = unicodecsv.reader(f, dialect=None, **_dialect)
field_names = d.next()
f.seek(0)
dialect = censor(_dialect)
t = messytables.CSVTableSet(f, **dialect).tables[0]
sample = itertools.islice(t, 0, 9)
types = messytables.type_guess(sample)
json_table_schema_types = map(get_type_of_column,
types)
slugs = [ slugify.slugify(i) for i in field_names ]
metadata = zip(slugs, field_names, json_table_schema_types)
sch = jsontableschema.JSONTableSchema()
for field_id, label, field_type in metadata:
sch.add_field(field_id=field_id,
label=label,
field_type=field_type)
return sch.as_json()
| import unicodecsv
from cStringIO import StringIO
import messytables
import itertools
import slugify
import jsontableschema
from messytables.types import *
from messytables_jts import rowset_as_schema
def censor(dialect):
tmp = dict(dialect)
censored = [
"doublequote",
"lineterminator",
"skipinitialspace"
]
[ tmp.pop(i) for i in censored ]
return tmp
def sabotage(d):
[ d.__setitem__(k, d[k].encode('utf-8')) for k in d
if isinstance(d[k], unicode) ]
def get_type_of_column(col):
try:
return rowset_as_schema(col)
except:
return "any"
def infer_schema(data, _dialect):
f = StringIO(data)
sabotage(_dialect)
d = unicodecsv.reader(f, dialect=None, **_dialect)
field_names = d.next()
f.seek(0)
dialect = censor(_dialect)
t = messytables.CSVTableSet(f, **dialect).tables[0]
sample = itertools.islice(t, 0, 9)
types = messytables.type_guess(sample)
json_table_schema_types = map(get_type_of_column(t),
types)
slugs = [ slugify.slugify(i) for i in field_names ]
metadata = zip(slugs, field_names, json_table_schema_types)
sch = jsontableschema.JSONTableSchema()
for field_id, label, field_type in metadata:
sch.add_field(field_id=field_id,
label=label,
field_type=field_type)
return sch.as_json()
| Python | 0 |
39de531241f987daf2f417fd419c7bd63248dd9d | Bump version number. | kyokai/util.py | kyokai/util.py | """
Misc utilities.
"""
import os
import pathlib
VERSION = "1.5.0"
VERSIONT = tuple(map(int, VERSION.split('.')))
HTTP_CODES = {
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
400: "Bad Request",
401: "Unauthorized",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
410: "Gone",
413: "Payload Too Large",
429: "Too Many Requests",
500: "Internal Server Error"
}
def static_filename(filename: str) -> str:
"""
Naive static filename implementation, to allow serving static files.
"""
built = ""
p = pathlib.PurePath(filename)
for part in p.parts:
if part != "..":
built += part + os.path.sep
return built[:-1]
| """
Misc utilities.
"""
import os
import pathlib
VERSION = "1.3.8"
VERSIONT = tuple(map(int, VERSION.split('.')))
HTTP_CODES = {
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
400: "Bad Request",
401: "Unauthorized",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
410: "Gone",
413: "Payload Too Large",
429: "Too Many Requests",
500: "Internal Server Error"
}
def static_filename(filename: str) -> str:
"""
Naive static filename implementation, to allow serving static files.
"""
built = ""
p = pathlib.PurePath(filename)
for part in p.parts:
if part != "..":
built += part + os.path.sep
return built[:-1]
| Python | 0 |
34ca71d5db9c1f17d236e5e49471fb6f2a6e1747 | Implement Paulo tip about router. | aldryn_search/router.py | aldryn_search/router.py | # -*- coding: utf-8 -*-
from django.conf import settings
from django.utils.translation import get_language
from cms.utils.i18n import alias_from_language
from haystack import routers
from haystack.constants import DEFAULT_ALIAS
class LanguageRouter(routers.BaseRouter):
def for_read(self, **hints):
language = get_language()
alias = alias_from_language(language)
if alias not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return alias
def for_write(self, **hints):
language = get_language()
alias = alias_from_language(language)
if alias not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return alias
| # -*- coding: utf-8 -*-
from django.conf import settings
from cms.utils.i18n import get_current_language
from haystack import routers
from haystack.constants import DEFAULT_ALIAS
class LanguageRouter(routers.BaseRouter):
def for_read(self, **hints):
language = get_current_language()
if language not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return language
def for_write(self, **hints):
language = get_current_language()
if language not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return language
| Python | 0 |
9fa2214b03d3264240b46fe5368b37ffa696f50c | Allow test selection according to tags | baf/src/baf.py | baf/src/baf.py | """The main module of the Bayesian API Fuzzer."""
import sys
from time import time
from fastlog import log
from csv_reader import read_csv_as_dicts
from setup import setup, parse_tags
from cliargs import cli_parser
from fuzzer import run_test
from results import Results
from report_generator import generate_reports
VERSION_MAJOR = 1
VERSION_MINOR = 0
def run_all_loaded_tests(cfg, fuzzer_settings, tests, results):
"""Run all tests read from CSV file."""
i = 1
for test in tests:
log.info("Starting test #{n} with name '{desc}'".format(n=i, desc=test["Name"]))
with log.indent():
run_test(cfg, fuzzer_settings, test, results)
i += 1
def run_tests_with_tags(cfg, fuzzer_settings, tests, results, tags):
"""Run tests read from CSV file that are marged by any of tags provided in tags parameter."""
i = 1
for test in tests:
test_tags = parse_tags(test["Tags"])
if tags <= test_tags:
log.info("Starting test #{n} with name '{desc}'".format(n=i, desc=test["Name"]))
with log.indent():
run_test(cfg, fuzzer_settings, test, results)
i += 1
else:
log.info("Skipping test #{n} with name '{desc}'".format(n=i, desc=test["Name"]))
def start_tests(cfg, fuzzer_settings, tests, results, tags):
"""Start all tests using the already loaded configuration and fuzzer settings."""
log.info("Run tests")
with log.indent():
if not tests or len(tests) == 0:
log.error("No tests loaded!")
sys.exit(-1)
if len(tests) == 1:
log.success("Loaded 1 test")
else:
log.success("Loaded {n} tests".format(n=len(tests)))
if not tags:
run_all_loaded_tests(cfg, fuzzer_settings, tests, results)
else:
run_tests_with_tags(cfg, fuzzer_settings, tests, results, tags)
def read_fuzzer_settings(filename):
"""Read fuzzer settings from the CSV file."""
log.info("Read fuzzer settings")
with log.indent():
fuzzer_settings = read_csv_as_dicts(filename)
if len(fuzzer_settings) == 1:
log.success("Loaded 1 setting")
else:
log.success("Loaded {n} settings".format(n=len(fuzzer_settings)))
return fuzzer_settings
def show_version():
"""Show BAF version."""
print("BAF version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR))
def main():
"""Entry point to the Bayesian API Fuzzer."""
log.setLevel(log.INFO)
cli_arguments = cli_parser.parse_args()
if cli_arguments.version:
show_version()
sys.exit(0)
else:
cfg = setup(cli_arguments)
fuzzer_settings = read_fuzzer_settings("fuzzer_settings.csv")
results = Results()
tests = read_csv_as_dicts(cfg["input_file"])
t1 = time()
tags = cfg["tags"]
start_tests(cfg, fuzzer_settings, tests, results, tags)
t2 = time()
generate_reports(tests, results, cfg, t2 - t1)
if __name__ == "__main__":
# execute only if run as a script
main()
| """The main module of the Bayesian API Fuzzer."""
import sys
from time import time
from fastlog import log
from csv_reader import read_csv_as_dicts
from setup import setup
from cliargs import cli_parser
from fuzzer import run_test
from results import Results
from report_generator import generate_reports
VERSION_MAJOR = 1
VERSION_MINOR = 0
def run_all_loaded_tests(cfg, fuzzer_settings, tests, results):
"""Run all tests read from CSV file."""
i = 1
for test in tests:
log.info("Starting test #{n} with name '{desc}'".format(n=i, desc=test["Name"]))
with log.indent():
run_test(cfg, fuzzer_settings, test, results)
i += 1
def start_tests(cfg, fuzzer_settings, tests, results):
"""Start all tests using the already loaded configuration and fuzzer settings."""
log.info("Run tests")
with log.indent():
if not tests or len(tests) == 0:
log.error("No tests loaded!")
sys.exit(-1)
if len(tests) == 1:
log.success("Loaded 1 test")
else:
log.success("Loaded {n} tests".format(n=len(tests)))
run_all_loaded_tests(cfg, fuzzer_settings, tests, results)
def read_fuzzer_settings(filename):
"""Read fuzzer settings from the CSV file."""
log.info("Read fuzzer settings")
with log.indent():
fuzzer_settings = read_csv_as_dicts(filename)
if len(fuzzer_settings) == 1:
log.success("Loaded 1 setting")
else:
log.success("Loaded {n} settings".format(n=len(fuzzer_settings)))
return fuzzer_settings
def show_version():
"""Show BAF version."""
print("BAF version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR))
def main():
"""Entry point to the Bayesian API Fuzzer."""
log.setLevel(log.INFO)
cli_arguments = cli_parser.parse_args()
if cli_arguments.version:
show_version()
sys.exit(0)
else:
cfg = setup(cli_arguments)
fuzzer_settings = read_fuzzer_settings("fuzzer_settings.csv")
results = Results()
tests = read_csv_as_dicts(cfg["input_file"])
t1 = time()
start_tests(cfg, fuzzer_settings, tests, results)
t2 = time()
generate_reports(tests, results, cfg, t2 - t1)
if __name__ == "__main__":
# execute only if run as a script
main()
| Python | 0 |
e5e523890dd1129402d7a0477468ee47dee3fd91 | Fix missing part/block conversion. | inbox/sendmail/smtp/common.py | inbox/sendmail/smtp/common.py | from inbox.sendmail.base import generate_attachments, SendError
from inbox.sendmail.smtp.postel import BaseSMTPClient
from inbox.sendmail.smtp.message import create_email, create_reply
class SMTPClient(BaseSMTPClient):
""" SMTPClient for Gmail and other providers. """
def _send_mail(self, db_session, message, smtpmsg):
"""Send the email message."""
# Send it using SMTP:
try:
return self._send(smtpmsg.recipients, smtpmsg.msg)
except SendError as e:
self.log.error(str(e))
raise
def send_new(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email from this user account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
blocks = [p.block for p in draft.attachments]
attachments = generate_attachments(blocks)
smtpmsg = create_email(self.sender_name, self.email_address,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
def send_reply(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email reply from this user
account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
blocks = [p.block for p in draft.attachments]
attachments = generate_attachments(blocks)
smtpmsg = create_reply(self.sender_name, self.email_address,
draft.in_reply_to, draft.references,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
| from inbox.sendmail.base import generate_attachments, SendError
from inbox.sendmail.smtp.postel import BaseSMTPClient
from inbox.sendmail.smtp.message import create_email, create_reply
class SMTPClient(BaseSMTPClient):
""" SMTPClient for Gmail and other providers. """
def _send_mail(self, db_session, message, smtpmsg):
"""Send the email message."""
# Send it using SMTP:
try:
return self._send(smtpmsg.recipients, smtpmsg.msg)
except SendError as e:
self.log.error(str(e))
raise
def send_new(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email from this user account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
attachments = generate_attachments(draft.attachments)
smtpmsg = create_email(self.sender_name, self.email_address,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
def send_reply(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email reply from this user
account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
attachments = generate_attachments(draft.attachments)
smtpmsg = create_reply(self.sender_name, self.email_address,
draft.in_reply_to, draft.references,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
| Python | 0 |
2808b0dfba0597e09d80eafedfead246779111d9 | Clean up verbose looking code | MOAL/data_structures/trees/binary_trees.py | MOAL/data_structures/trees/binary_trees.py | # -*- coding: utf-8 -*-
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.display import print_h4
from MOAL.helpers.display import cmd_title
from MOAL.data_structures.abstract.tree import Tree
DEBUG = True if __name__ == '__main__' else False
class InvalidChildNodeCount(Exception):
pass
class BinaryTree(Tree):
"""A binary tree is the same as a tree ADT, except each node must have a max
of two child nodes, unless it's a leaf node, in which case it has zero."""
def __setitem__(self, key, val):
if len(val['edges']) > 2:
raise InvalidChildNodeCount(
'Binary Tree cannot have more than two children!')
super(BinaryTree, self).__setitem__(key, val)
def get_left_child(self, node):
return node['edges'][0]
def get_right_child(self, node):
if len(node['edges'] < 2):
return None
return node['edges'][1]
def is_degenerate(self):
# TODO
pass
def is_pathological(self):
# TODO
return self.is_degenerate()
class BinarySearchTree(BinaryTree):
def __init__(self, *args, **kwargs):
super(BinarySearchTree, self).__init__(*args, **kwargs)
self.rebalance(self.get_root())
def _lt(self, node_a, node_b):
"""Comparator function, which can be used to implement a BST.
This should be sub-classed and overridden for customer comparisons,
beyond typical integer comparison."""
node_a = self.__getitem__(node_a)
node_b = self.__getitem__(node_b)
if 'val' in node_a and 'val' in node_b:
return node_a['val'] > node_b['val']
else:
return False
def rebalance(self, node):
edges = node['edges']
if len(edges) < 2:
return
if self._lt(edges[0], edges[1]):
list(reversed(edges))
class BifurcatingArborescence(BinaryTree):
"""A hilariously verbose alternative name for a Binary Tree!"""
if DEBUG:
with Section('Binary Tree'):
"""
0 root
/ \
/ \
1 2 interior
/ / \
/ / \
3 4 5 leaves
The tree above is represented in python code below.
"""
data = {
0: {'edges': [1, 2], 'is_root': True},
1: {'edges': [3], 'parent': 0},
2: {'edges': [4, 5], 'parent': 0},
3: {'edges': [], 'parent': 1},
4: {'edges': [], 'parent': 2},
5: {'edges': [], 'parent': 2},
}
btree = BinaryTree(data)
print(btree)
print_h4(
'Binary trees',
desc=('They can have no more than two nodes, '
'so adding new edges that do not conform'
' should throw an error.'))
try:
btree[6] = {'edges': [7, 8, 9], 'parent': 3}
except InvalidChildNodeCount:
cmd_title('Error called successfully', newlines=False)
bst = BinarySearchTree(data)
print(bst)
bst.add_child(5, 6)
bst.add_siblings(5, [10, 11])
print(bst)
| # -*- coding: utf-8 -*-
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.display import print_h4
from MOAL.helpers.display import cmd_title
from MOAL.data_structures.abstract.tree import Tree
DEBUG = True if __name__ == '__main__' else False
class InvalidChildNodeCount(Exception):
pass
class BinaryTree(Tree):
"""A binary tree is the same as a tree ADT, except each node must have a max
of two child nodes, unless it's a leaf node, in which case it has zero."""
def __setitem__(self, key, val):
if len(val['edges']) > 2:
raise InvalidChildNodeCount(
'Binary Tree cannot have more than two children!')
super(BinaryTree, self).__setitem__(key, val)
def get_left_child(self, node):
return node['edges'][0]
def get_right_child(self, node):
if len(node['edges'] < 2):
return None
return node['edges'][1]
def is_degenerate(self):
# TODO
pass
def is_pathological(self):
# TODO
return self.is_degenerate()
class BinarySearchTree(BinaryTree):
def __init__(self, *args, **kwargs):
super(BinarySearchTree, self).__init__(*args, **kwargs)
self.rebalance(self.get_root())
def _lt(self, node_a, node_b):
"""Comparator function, which can be used to implement a BST.
This should be sub-classed and overridden for customer comparisons,
beyond typical integer comparison."""
node_a = self.__getitem__(node_a)
node_b = self.__getitem__(node_b)
if 'val' in node_a and 'val' in node_b:
return node_a['val'] > node_b['val']
else:
return False
def rebalance(self, node):
if len(node['edges']) < 2:
return
if self._lt(node['edges'][0], node['edges'][1]):
list(reversed(node['edges']))
class BifurcatingArborescence(BinaryTree):
"""A hilariously verbose alternative name for a Binary Tree!"""
if DEBUG:
with Section('Binary Tree'):
"""
0 root
/ \
/ \
1 2 interior
/ / \
/ / \
3 4 5 leaves
The tree above is represented in python code below.
"""
data = {
0: {'edges': [1, 2], 'is_root': True},
1: {'edges': [3], 'parent': 0},
2: {'edges': [4, 5], 'parent': 0},
3: {'edges': [], 'parent': 1},
4: {'edges': [], 'parent': 2},
5: {'edges': [], 'parent': 2},
}
btree = BinaryTree(data)
print(btree)
print_h4(
'Binary trees',
desc=('They can have no more than two nodes, '
'so adding new edges that do not conform'
' should throw an error.'))
try:
btree[6] = {'edges': [7, 8, 9], 'parent': 3}
except InvalidChildNodeCount:
cmd_title('Error called successfully', newlines=False)
bst = BinarySearchTree(data)
print(bst)
bst.add_child(5, 6)
bst.add_siblings(5, [10, 11])
print(bst)
| Python | 0.998916 |
872a96b52061bd9ab3a3178aacf3e3d0be2cc498 | Make field filter errors ValidationErrors | nap/dataviews/fields.py | nap/dataviews/fields.py |
from django.db.models.fields import NOT_PROVIDED
from django.forms import ValidationError
from nap.utils import digattr
class field(property):
'''A base class to compare against.'''
def __get__(self, instance, cls=None):
if instance is None:
return self
return self.fget(instance._obj)
def __set__(self, instance, value):
self.fset(instance._obj, value)
class Field(field):
'''
class V(DataView):
foo = Field('bar', default=1)
'''
def __init__(self, name, default=NOT_PROVIDED, filters=None):
self.name = name
self.default = default
self.filters = filters or []
def __get__(self, instance, cls=None):
if instance is None:
return self
value = getattr(instance._obj, self.name, self.default)
for filt in self.filters:
try:
value = filt.from_python(value)
except (TypeError, ValueError):
raise ValidationError('Invalid value')
return value
def __set__(self, instance, value):
for filt in self.filters[::-1]:
value = filt.to_python(value)
setattr(instance._obj, self.name, value)
class DigField(Field):
def __get__(self, instance, cls=None):
if instance is None:
return self
return digattr(instance._obj, self.name, self.default)
def __set__(self, instance):
raise NotImplementedError
|
from django.db.models.fields import NOT_PROVIDED
from nap.utils import digattr
class field(property):
'''A base class to compare against.'''
def __get__(self, instance, cls=None):
if instance is None:
return self
return self.fget(instance._obj)
def __set__(self, instance, value):
self.fset(instance._obj, value)
class Field(field):
'''
class V(DataView):
foo = Field('bar', default=1)
'''
def __init__(self, name, default=NOT_PROVIDED, filters=None):
self.name = name
self.default = default
self.filters = filters or []
def __get__(self, instance, cls=None):
if instance is None:
return self
value = getattr(instance._obj, self.name, self.default)
for filt in self.filters:
value = filt.from_python(value)
return value
def __set__(self, instance, value):
for filt in self.filters[::-1]:
value = filt.to_python(value)
setattr(instance._obj, self.name, value)
class DigField(Field):
def __get__(self, instance, cls=None):
if instance is None:
return self
return digattr(instance._obj, self.name, self.default)
def __set__(self, instance):
raise NotImplementedError
| Python | 0.000008 |
f41bb86dd5263d63172b303a5a3993fc28e612dc | fix spelling of "received" | django-hq/apps/receiver/submitprocessor.py | django-hq/apps/receiver/submitprocessor.py | from models import *
import logging
import hashlib
import settings
import traceback
import sys
import os
import string
import uuid
from django.db import transaction
def get_submission_path():
return settings.rapidsms_apps_conf['receiver']['xform_submission_path']
@transaction.commit_on_success
def do_raw_submission(metadata, payload, domain=None, is_resubmission=False):
logging.debug("Begin do_raw_submission()")
transaction = str(uuid.uuid1())
new_submit = Submission()
new_submit.transaction_uuid = transaction
if is_resubmission:
new_submit.submit_ip = metadata['HTTP_ORIGINAL_IP']
new_submit.submit_time = datetime.strptime(metadata['HTTP_TIME_RECEIVED'], "%Y-%m-%d %H:%M:%S")
else:
if metadata.has_key('HTTP_X_FORWARDED_FOR'):
new_submit.submit_ip = metadata['HTTP_X_FORWARDED_FOR']
elif metadata.has_key('REMOTE_HOST'):
new_submit.submit_ip = metadata['REMOTE_HOST']
else:
new_submit.submit_ip = '127.0.0.1'
if metadata.has_key('HTTP_CONTENT_TYPE'):
content_type = metadata['HTTP_CONTENT_TYPE']
else:
content_type = metadata['CONTENT_TYPE']#"text/xml"
new_submit.raw_header = repr(metadata)
logging.debug("compute checksum")
new_submit.checksum = hashlib.md5(payload).hexdigest()
logging.debug("Get bytes")
#new_submit.bytes_received = int(request.META['HTTP_CONTENT_LENGTH'])
if metadata.has_key('HTTP_CONTENT_LENGTH'):
new_submit.bytes_received = int(metadata['HTTP_CONTENT_LENGTH'])
else:
new_submit.bytes_received = int(metadata['CONTENT_LENGTH'])
try:
newfilename = os.path.join(get_submission_path(),transaction + '.postdata')
logging.debug("try to write file")
fout = open(newfilename, 'w')
fout.write('Content-type: %s\n' % content_type.replace("'newdivider'","newdivider"))
fout.write('Content-length: %s\n\n' % new_submit.bytes_received)
fout.write(payload)
fout.close()
logging.debug("write successful")
new_submit.raw_post = newfilename
except:
logging.error("Unable to write raw post data")
logging.error("Unable to write raw post data: Exception: " + str(sys.exc_info()[0]))
logging.error("Unable to write raw post data: Traceback: " + str(sys.exc_info()[1]))
return '[error]'
#return render_to_response(template_name, context, context_instance=RequestContext(request))
logging.debug("try to write model")
new_submit.domain = domain
new_submit.save()
logging.debug("save to db successful")
return new_submit | from models import *
import logging
import hashlib
import settings
import traceback
import sys
import os
import string
import uuid
from django.db import transaction
def get_submission_path():
return settings.rapidsms_apps_conf['receiver']['xform_submission_path']
@transaction.commit_on_success
def do_raw_submission(metadata, payload, domain=None, is_resubmission=False):
logging.debug("Begin do_raw_submission()")
transaction = str(uuid.uuid1())
new_submit = Submission()
new_submit.transaction_uuid = transaction
if is_resubmission:
new_submit.submit_ip = metadata['HTTP_ORIGINAL_IP']
new_submit.submit_time = datetime.strptime(metadata['HTTP_TIME_RECEIEVED'], "%Y-%m-%d %H:%M:%S")
else:
if metadata.has_key('HTTP_X_FORWARDED_FOR'):
new_submit.submit_ip = metadata['HTTP_X_FORWARDED_FOR']
elif metadata.has_key('REMOTE_HOST'):
new_submit.submit_ip = metadata['REMOTE_HOST']
else:
new_submit.submit_ip = '127.0.0.1'
if metadata.has_key('HTTP_CONTENT_TYPE'):
content_type = metadata['HTTP_CONTENT_TYPE']
else:
content_type = metadata['CONTENT_TYPE']#"text/xml"
new_submit.raw_header = repr(metadata)
logging.debug("compute checksum")
new_submit.checksum = hashlib.md5(payload).hexdigest()
logging.debug("Get bytes")
#new_submit.bytes_received = int(request.META['HTTP_CONTENT_LENGTH'])
if metadata.has_key('HTTP_CONTENT_LENGTH'):
new_submit.bytes_received = int(metadata['HTTP_CONTENT_LENGTH'])
else:
new_submit.bytes_received = int(metadata['CONTENT_LENGTH'])
try:
newfilename = os.path.join(get_submission_path(),transaction + '.postdata')
logging.debug("try to write file")
fout = open(newfilename, 'w')
fout.write('Content-type: %s\n' % content_type.replace("'newdivider'","newdivider"))
fout.write('Content-length: %s\n\n' % new_submit.bytes_received)
fout.write(payload)
fout.close()
logging.debug("write successful")
new_submit.raw_post = newfilename
except:
logging.error("Unable to write raw post data")
logging.error("Unable to write raw post data: Exception: " + str(sys.exc_info()[0]))
logging.error("Unable to write raw post data: Traceback: " + str(sys.exc_info()[1]))
return '[error]'
#return render_to_response(template_name, context, context_instance=RequestContext(request))
logging.debug("try to write model")
new_submit.domain = domain
new_submit.save()
logging.debug("save to db successful")
return new_submit | Python | 0.999883 |
bbe263e8bd9bb12ccef681d4f21f6b90c89f059d | Remove some debug logging | flask/test/test_signup.py | flask/test/test_signup.py | from __future__ import unicode_literals
from test import TestCase
from web import app
from db import session, User
from nose.tools import eq_
class TestSignup(TestCase):
def test_sign_up(self):
app.test_client().post('/', data={'email': 'andrew@lorente.name'})
users = session().query(User.email).all()
eq_(users, [('andrew@lorente.name',)])
self.visit('/')
self.browser.fill('email', 'joe@lewis.name')
self.browser.find_by_name('go').click()
assert self.browser.is_text_present('Thanks'), 'rude!'
users = session().query(User.email).all()
eq_(users, [('andrew@lorente.name',), ('joe@lewis.name',)])
def test_valid_emails_get_validated(self):
self.visit('/')
self.browser.fill('email', 'eric@holscher.name')
assert self.browser.is_text_present('valid'), "didn't get validated"
def test_invalid_emails_get_yelled_about(self):
self.visit('/')
self.browser.fill('email', 'aghlaghlaghl')
assert self.browser.is_text_present('invalid'), "didn't get yelled at"
| from __future__ import unicode_literals
from test import TestCase
from web import app
from db import session, User
from nose.tools import eq_
class TestSignup(TestCase):
def test_sign_up(self):
app.test_client().post('/', data={'email': 'andrew@lorente.name'})
users = session().query(User.email).all()
eq_(users, [('andrew@lorente.name',)])
self.visit('/')
self.browser.fill('email', 'joe@lewis.name')
self.browser.find_by_name('go').click()
assert self.browser.is_text_present('Thanks'), 'rude!'
users = session().query(User.email).all()
eq_(users, [('andrew@lorente.name',), ('joe@lewis.name',)])
def test_valid_emails_get_validated(self):
print 'here before visit'
self.visit('/')
print 'here after visit'
self.browser.fill('email', 'eric@holscher.name')
assert self.browser.is_text_present('valid'), "didn't get validated"
def test_invalid_emails_get_yelled_about(self):
self.visit('/')
self.browser.fill('email', 'aghlaghlaghl')
assert self.browser.is_text_present('invalid'), "didn't get yelled at"
| Python | 0.000003 |
6d9e8e8831cd08fa358f33f155a760de3ec59f3b | document that this file is generated | Lib/fontTools/ttLib/tables/__init__.py | Lib/fontTools/ttLib/tables/__init__.py | # DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
def _moduleFinderHint():
"""Dummy function to let modulefinder know what tables may be
dynamically imported. Generated by MetaTools/buildTableList.py.
"""
import B_A_S_E_
import C_F_F_
import D_S_I_G_
import G_D_E_F_
import G_P_O_S_
import G_S_U_B_
import J_S_T_F_
import L_T_S_H_
import O_S_2f_2
import T_S_I_B_
import T_S_I_D_
import T_S_I_J_
import T_S_I_P_
import T_S_I_S_
import T_S_I_V_
import T_S_I__0
import T_S_I__1
import T_S_I__2
import T_S_I__3
import T_S_I__5
import V_O_R_G_
import _c_m_a_p
import _c_v_t
import _f_p_g_m
import _g_a_s_p
import _g_l_y_f
import _h_d_m_x
import _h_e_a_d
import _h_h_e_a
import _h_m_t_x
import _k_e_r_n
import _l_o_c_a
import _m_a_x_p
import _n_a_m_e
import _p_o_s_t
import _p_r_e_p
import _v_h_e_a
import _v_m_t_x
| def _moduleFinderHint():
"""Dummy function to let modulefinder know what tables may be
dynamically imported. Generated by MetaTools/buildTableList.py.
"""
import B_A_S_E_
import C_F_F_
import D_S_I_G_
import G_D_E_F_
import G_P_O_S_
import G_S_U_B_
import J_S_T_F_
import L_T_S_H_
import O_S_2f_2
import T_S_I_B_
import T_S_I_D_
import T_S_I_J_
import T_S_I_P_
import T_S_I_S_
import T_S_I_V_
import T_S_I__0
import T_S_I__1
import T_S_I__2
import T_S_I__3
import T_S_I__5
import _c_m_a_p
import _c_v_t
import _f_p_g_m
import _g_a_s_p
import _g_l_y_f
import _h_d_m_x
import _h_e_a_d
import _h_h_e_a
import _h_m_t_x
import _k_e_r_n
import _l_o_c_a
import _m_a_x_p
import _n_a_m_e
import _p_o_s_t
import _p_r_e_p
import _v_h_e_a
import _v_m_t_x
| Python | 0.000003 |
1f977aa5fa28ed1e351f337191291198384abe02 | Set auth_encryption_key option to be secret | heat/common/crypt.py | heat/common/crypt.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from Crypto.Cipher import AES
from oslo_config import cfg
from heat.openstack.common.crypto import utils
auth_opts = [
cfg.StrOpt('auth_encryption_key',
secret=True,
default='notgood but just long enough i t',
help="Key used to encrypt authentication info in the database. "
"Length of this key must be 16, 24 or 32 characters.")
]
cfg.CONF.register_opts(auth_opts)
def encrypt(auth_info):
if auth_info is None:
return None, None
sym = utils.SymmetricCrypto()
res = sym.encrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64encode=True)
return 'oslo_decrypt_v1', res
def oslo_decrypt_v1(auth_info):
if auth_info is None:
return None
sym = utils.SymmetricCrypto()
return sym.decrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64decode=True)
def heat_decrypt(auth_info):
"""Decrypt function for data that has been encrypted using an older
version of Heat.
Note: the encrypt function returns the function that is needed to
decrypt the data. The database then stores this. When the data is
then retrieved (potentially by a later version of Heat) the decrypt
function must still exist. So whilst it may seem that this function
is not referenced, it will be referenced from the database.
"""
if auth_info is None:
return None
auth = base64.b64decode(auth_info)
iv = auth[:AES.block_size]
cipher = AES.new(cfg.CONF.auth_encryption_key[:32], AES.MODE_CFB, iv)
res = cipher.decrypt(auth[AES.block_size:])
return res
def list_opts():
yield None, auth_opts
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from Crypto.Cipher import AES
from oslo_config import cfg
from heat.openstack.common.crypto import utils
auth_opts = [
cfg.StrOpt('auth_encryption_key',
default='notgood but just long enough i think',
help="Encryption key used for authentication info in database.")
]
cfg.CONF.register_opts(auth_opts)
def encrypt(auth_info):
if auth_info is None:
return None, None
sym = utils.SymmetricCrypto()
res = sym.encrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64encode=True)
return 'oslo_decrypt_v1', res
def oslo_decrypt_v1(auth_info):
if auth_info is None:
return None
sym = utils.SymmetricCrypto()
return sym.decrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64decode=True)
def heat_decrypt(auth_info):
"""Decrypt function for data that has been encrypted using an older
version of Heat.
Note: the encrypt function returns the function that is needed to
decrypt the data. The database then stores this. When the data is
then retrieved (potentially by a later version of Heat) the decrypt
function must still exist. So whilst it may seem that this function
is not referenced, it will be referenced from the database.
"""
if auth_info is None:
return None
auth = base64.b64decode(auth_info)
iv = auth[:AES.block_size]
cipher = AES.new(cfg.CONF.auth_encryption_key[:32], AES.MODE_CFB, iv)
res = cipher.decrypt(auth[AES.block_size:])
return res
def list_opts():
yield None, auth_opts
| Python | 0.000001 |
57100d99b58263a76f9bf405f7bdc62e8839552e | Fix bug with JSON serialization | model.py | model.py | import datetime
import random
from google.appengine.api import memcache
from google.appengine.ext import db
def from_milliseconds(millis):
return datetime.datetime.utcfromtimestamp(millis / 1000)
def to_milliseconds(date_time):
delta = date_time - from_milliseconds(0)
return int(round(delta.total_seconds() * 1000))
class ShardedCounterConfig(db.Expando):
"""
Represents the sharded counter config, that helps us figure out how many shards to use for a sharded counter
__key__ == name property in ShardedCounter
"""
name = db.StringProperty(required=True)
shards = db.IntegerProperty(default=1)
@classmethod
def cache_key(cls, name):
return 'shard_config_' + name
@classmethod
def get_sharded_config(cls, name):
cache_key = ShardedCounterConfig.cache_key(name)
config = memcache.get(cache_key)
if not config:
''' Try fetching from datastore '''
config = ShardedCounterConfig.get_or_insert(name, name=name, shards=20)
memcache.set(cache_key, config, time=86400)
return config
class CrashReport(db.Expando):
"""
Represents an Crash Report item
"""
name = db.StringProperty(required=True) # key_name and not the sharded key name
labels = db.StringListProperty(default=[])
crash = db.TextProperty(required=True)
fingerprint = db.StringProperty(required=True)
date_time = db.DateTimeProperty(required=True, default=datetime.datetime.utcnow())
count = db.IntegerProperty(default=0)
@classmethod
def get_count(cls, name):
total = memcache.get(name)
if total is None:
total = 0
q = CrashReport.all()
q.filter('name = ', name)
for counter in q.run():
total += counter.count
memcache.set(name, str(total))
''' total can be a string (when cached) '''
return int(total)
@classmethod
def add_or_remove(cls, fingerprint, crash, labels=None, is_add=True, delta=1):
key_name = CrashReport.key_name(fingerprint)
config = ShardedCounterConfig.get_sharded_config(key_name)
shards = config.shards
shard_to_use = random.randint(0, shards-1)
shard_key_name = key_name + '_' + str(shard_to_use)
crash_report = CrashReport.get_or_insert(shard_key_name,
name=key_name, crash=crash, fingerprint=fingerprint, labels=labels)
if is_add:
crash_report.count += delta
crash_report.put()
memcache.incr(key_name, delta, initial_value=0)
else:
crash_report.count -= delta
crash_report.put()
memcache.decr(key_name, delta)
return crash_report
@classmethod
def get_crash(cls, fingerprint):
q = CrashReport.all()
q.filter('name =', CrashReport.key_name(fingerprint))
crash_report = q.get()
if not crash_report:
return None
else:
return crash_report
@classmethod
def key_name(cls, name):
return cls.kind() + '_' + name
@classmethod
def to_json(cls, entity):
return {
'key': unicode(entity.key()),
'crash': entity.crash,
'labels': entity.labels or list(),
'fingerprint': entity.fingerprint,
'time': to_milliseconds(entity.date_time), # in millis
'count': cls.get_count(entity.name)
}
class Link(object):
"""
Represents a link (essentially contains the url, title and active properties).
"""
def __init__(self, title, url, active=False):
self.title = title
self.url = url
self.active = active
| import datetime
import random
from google.appengine.api import memcache
from google.appengine.ext import db
def from_milliseconds(millis):
return datetime.datetime.utcfromtimestamp(millis / 1000)
def to_milliseconds(date_time):
delta = date_time - from_milliseconds(0)
return int(round(delta.total_seconds() * 1000))
class ShardedCounterConfig(db.Expando):
"""
Represents the sharded counter config, that helps us figure out how many shards to use for a sharded counter
__key__ == name property in ShardedCounter
"""
name = db.StringProperty(required=True)
shards = db.IntegerProperty(default=1)
@classmethod
def cache_key(cls, name):
return 'shard_config_' + name
@classmethod
def get_sharded_config(cls, name):
cache_key = ShardedCounterConfig.cache_key(name)
config = memcache.get(cache_key)
if not config:
''' Try fetching from datastore '''
config = ShardedCounterConfig.get_or_insert(name, name=name, shards=20)
memcache.set(cache_key, config, time=86400)
return config
class CrashReport(db.Expando):
"""
Represents an Crash Report item
"""
name = db.StringProperty(required=True) # key_name and not the sharded key name
labels = db.StringListProperty(default=[])
crash = db.TextProperty(required=True)
fingerprint = db.StringProperty(required=True)
date_time = db.DateTimeProperty(required=True, default=datetime.datetime.utcnow())
count = db.IntegerProperty(default=0)
@classmethod
def get_count(cls, name):
total = memcache.get(name)
if total is None:
total = 0
q = CrashReport.all()
q.filter('name = ', name)
for counter in q.run():
total += counter.count
memcache.set(name, str(total))
''' total can be a string (when cached) '''
return int(total)
@classmethod
def add_or_remove(cls, fingerprint, crash, labels=None, is_add=True, delta=1):
key_name = CrashReport.key_name(fingerprint)
config = ShardedCounterConfig.get_sharded_config(key_name)
shards = config.shards
shard_to_use = random.randint(0, shards-1)
shard_key_name = key_name + '_' + str(shard_to_use)
crash_report = CrashReport.get_or_insert(shard_key_name,
name=key_name, crash=crash, fingerprint=fingerprint, labels=labels)
if is_add:
crash_report.count += delta
crash_report.put()
memcache.incr(key_name, delta, initial_value=0)
else:
crash_report.count -= delta
crash_report.put()
memcache.decr(key_name, delta)
return crash_report
@classmethod
def get_crash(cls, fingerprint):
q = CrashReport.all()
q.filter('name =', CrashReport.key_name(fingerprint))
crash_report = q.get()
if not crash_report:
return None
else:
return crash_report
@classmethod
def key_name(cls, name):
return cls.kind() + '_' + name
@classmethod
def to_json(cls, entity):
return {
'key': entity.key(),
'crash': entity.crash,
'labels': entity.labels or list(),
'fingerprint': entity.fingerprint,
'time': to_milliseconds(entity.date_time), # in millis
'count': cls.get_count(entity.name)
}
class Link(object):
"""
Represents a link (essentially contains the url, title and active properties).
"""
def __init__(self, title, url, active=False):
self.title = title
self.url = url
self.active = active
| Python | 0.000002 |
197a0440ddbf31aa87a6a6998b41344be4924076 | fix on the rows update | gui/placeslist.py | gui/placeslist.py | # -*- coding: utf8 -*-
from PyQt4 import QtGui
from PyQt4 import QtCore
class placesList(QtGui.QTableWidget):
_columns = ('Name', 'Type', 'X', 'Y', 'Locate')
_app = None
_parent = None
def __init__(self, parent, app):
"""
Initialisation of the window, creates the GUI and displays the window.
"""
self._app = app
QtGui.QTableView.__init__(self, parent)
self._parent = parent
self.setColumnCount(len(self._columns))
self.setHorizontalHeaderLabels(self._columns)
self.verticalHeader().setVisible(False)
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.setData()
def setData(self):
self.clearContents()
nbRowsToInsert = len(self._app.map.places)
for index, row in enumerate(self._app.map.places):
if self.rowCount() < nbRowsToInsert:
self.insertRow(index)
self.setItem(index, 0, QtGui.QTableWidgetItem(row['name']))
self.setItem(index, 1, QtGui.QTableWidgetItem(self._app.map.getPlaceTypesLabels()[row['type']]))
self.setItem(index, 2, QtGui.QTableWidgetItem(str(row['coordinates'][0])))
self.setItem(index, 3, QtGui.QTableWidgetItem(str(row['coordinates'][1])))
self.setCellWidget(index, 4, QtGui.QPushButton("Locate"))
self.resizeColumnsToContents()
| # -*- coding: utf8 -*-
from PyQt4 import QtGui
from PyQt4 import QtCore
class placesList(QtGui.QTableWidget):
_columns = ('Name', 'Type', 'X', 'Y', 'Locate')
_app = None
_parent = None
def __init__(self, parent, app):
"""
Initialisation of the window, creates the GUI and displays the window.
"""
self._app = app
QtGui.QTableView.__init__(self, parent)
self._parent = parent
self.setColumnCount(len(self._columns))
self.setHorizontalHeaderLabels(self._columns)
self.verticalHeader().setVisible(False)
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.setData()
def setData(self):
for index, row in enumerate(self._app.map.places):
self.insertRow(index)
self.setItem(index, 0, QtGui.QTableWidgetItem(row['name']))
self.setItem(index, 1, QtGui.QTableWidgetItem(self._app.map.getPlaceTypesLabels()[row['type']]))
self.setItem(index, 2, QtGui.QTableWidgetItem(str(row['coordinates'][0])))
self.setItem(index, 3, QtGui.QTableWidgetItem(str(row['coordinates'][1])))
self.setCellWidget(index, 4, QtGui.QPushButton("Locate"))
self.resizeColumnsToContents()
| Python | 0 |
29e18edf18c14cd11c8cebd93548eeadbb61b1da | Fix biases by using correct shape | model.py | model.py | import tensorflow as tf
def make_weight_variable(name, num_inputs, num_outputs):
return tf.get_variable(
name,
[num_inputs, num_outputs],
initializer=tf.contrib.layers.variance_scaling_initializer()
)
class Model:
def __init__(self, chars, max_steps, lstm_units=250, l1_units=200, l2_units=150,
learning_rate=0.001, l2=0.001):
self.chars = chars
self.max_steps = max_steps
# Define placeholders for training data
self.features = tf.placeholder(dtype=tf.int32, shape=[None, max_steps])
self.labels = tf.placeholder(dtype=tf.int32, shape=[None, max_steps])
self.mask = tf.placeholder(dtype=tf.float32, shape=[None, max_steps])
# Define LSTM layer
features_one_hot = tf.one_hot(self.features, len(chars) + 1, dtype=tf.float32)
lstm_3d, _ = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.LSTMCell(num_units=lstm_units),
dtype=tf.float32,
inputs=features_one_hot
)
lstm_flat = tf.reshape(lstm_3d, [-1, lstm_units])
# Define first ReLU layer
l1_weights = make_weight_variable("l1-weights", lstm_units, l1_units)
l1_biases = tf.Variable(tf.constant(0.1, shape=[l1_units]), name='l1-biases')
layer1 = tf.nn.relu(tf.matmul(lstm_flat, l1_weights) + l1_biases)
# Define second ReLU layer
l2_weights = make_weight_variable("l2-weights", l1_units, l2_units)
l2_biases = tf.Variable(tf.constant(0.1, shape=[l2_units]), name='l2-biases')
layer2 = tf.nn.relu(tf.matmul(layer1, l2_weights) + l2_biases)
# Define output layer
out_len = len(chars) + 1
out_weights = make_weight_variable("out-weights", l2_units, out_len)
out_biases = tf.Variable(tf.constant(0.1, shape=[out_len]), name='out-biases')
self.out_logits = tf.matmul(layer2, out_weights) + out_biases
# Define training objective
loss_flat = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(self.labels, [-1]),
logits=self.out_logits
)
loss_flat_masked = loss_flat * tf.reshape(self.mask, [-1])
self.loss = tf.reduce_mean(loss_flat_masked)
weight_vars = [v for v in tf.trainable_variables() if 'bias' not in v.name]
self.l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in weight_vars]) * l2
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = optimizer.minimize(self.loss + self.l2_loss)
| import tensorflow as tf
def make_weight_variable(name, num_inputs, num_outputs):
return tf.get_variable(
name,
[num_inputs, num_outputs],
initializer=tf.contrib.layers.variance_scaling_initializer()
)
class Model:
def __init__(self, chars, max_steps, lstm_units=250, l1_units=200, l2_units=150,
learning_rate=0.001, l2=0.001):
self.chars = chars
self.max_steps = max_steps
# Define placeholders for training data
self.features = tf.placeholder(dtype=tf.int32, shape=[None, max_steps])
self.labels = tf.placeholder(dtype=tf.int32, shape=[None, max_steps])
self.mask = tf.placeholder(dtype=tf.float32, shape=[None, max_steps])
# Define LSTM layer
features_one_hot = tf.one_hot(self.features, len(chars) + 1, dtype=tf.float32)
lstm_3d, _ = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.LSTMCell(num_units=lstm_units),
dtype=tf.float32,
inputs=features_one_hot
)
lstm_flat = tf.reshape(lstm_3d, [-1, lstm_units])
# Define first ReLU layer
l1_weights = make_weight_variable("l1-weights", lstm_units, l1_units)
l1_biases = tf.Variable(0.1, name='l1-biases')
layer1 = tf.nn.relu(tf.matmul(lstm_flat, l1_weights) + l1_biases)
# Define second ReLU layer
l2_weights = make_weight_variable("l2-weights", l1_units, l2_units)
l2_biases = tf.Variable(0.1, name='l2-biases')
layer2 = tf.nn.relu(tf.matmul(layer1, l2_weights) + l2_biases)
# Define output layer
out_weights = make_weight_variable("out-weights", l2_units, len(chars) + 1)
out_biases = tf.Variable(0.1, name='out-biases')
self.out_logits = tf.matmul(layer2, out_weights) + out_biases
# Define training objective
loss_flat = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(self.labels, [-1]),
logits=self.out_logits
)
loss_flat_masked = loss_flat * tf.reshape(self.mask, [-1])
self.loss = tf.reduce_mean(loss_flat_masked)
weight_vars = [v for v in tf.trainable_variables() if 'bias' not in v.name]
self.l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in weight_vars]) * l2
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = optimizer.minimize(self.loss + self.l2_loss)
| Python | 0.0001 |
5809fc832340e3ee5d798fa347e4933e874bdb8b | Allow older django to find the tests | voting/tests/__init__.py | voting/tests/__init__.py | import django
if django.VERSION[0] == 1 and django.VERSION[1] < 6:
from .tests import *
| Python | 0.000003 | |
b33b6c7a5ae8835514389340ff3152f03f619984 | prefix number underscore sep | holodeck/settings.py | holodeck/settings.py | LOGICAL_SHARDS = 8
PHYSICAL_SHARDS = [
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME_PREFIX': 'holodeck_1',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME_PREFIX': 'holodeck_2',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
]
| LOGICAL_SHARDS = 8
PHYSICAL_SHARDS = [
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME_PREFIX': 'holodeck1',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME_PREFIX': 'holodeck2',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
]
| Python | 0.998789 |
9246d33429e1940d5a98c3c16e708159437b88fa | enable header modification | lib/neuroimaging/tools/AnalyzeHeaderTool.py | lib/neuroimaging/tools/AnalyzeHeaderTool.py | import os, sys
from optparse import OptionParser, Option
from neuroimaging.data import DataSource
from neuroimaging.refactoring.analyze import struct_fields, AnalyzeHeader
##############################################################################
class AnalyzeHeaderTool (OptionParser):
"Command-line tool for getting and setting Analyze header values."
_usage= "%prog [options] <hdrfile>\n"+__doc__
options = (
Option('-a', '--attribute', dest="attname",
help="Get or set this attribute"),
Option('-v', '--value', dest="value",
help="Set attribute to this value"))
#-------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
OptionParser.__init__(self, *args, **kwargs)
self.set_usage(self._usage)
self.add_options(self.options)
#-------------------------------------------------------------------------
def _error(self, message):
print message
self.print_help()
sys.exit(0)
#-------------------------------------------------------------------------
def run(self):
options, args = self.parse_args()
if len(args) != 1: self._error("Please provide a header file name")
filename = args[0]
if not DataSource().exists(filename):
self._error("File not found: %s"%filename)
header = AnalyzeHeader(filename)
attname, value = options.attname, options.value
if attname is not None:
if value is not None:
print "before: %s\t%s"%(attname, getattr(header, attname))
setattr(header, attname, value)
print "after: %s\t%s"%(attname, getattr(header, attname))
header.write(filename+".new")
else: print "%s\t%s"%(attname, getattr(header, attname))
elif value is not None:
self._error("Only provide a value when an attribute is provided")
else: print header
if __name__ == "__main__": AnalyzeHeaderTool().run()
| import os, sys
from optparse import OptionParser, Option
from neuroimaging.data import DataSource
from neuroimaging.refactoring.analyze import struct_fields, AnalyzeHeader
##############################################################################
class AnalyzeHeaderTool (OptionParser):
"Command-line tool for getting and setting Analyze header values."
_usage= "%prog [options] <hdrfile>\n"+__doc__
options = (
Option('-a', '--attribute', dest="attname",
help="Get or set this attribute"),
Option('-v', '--value', dest="value",
help="Set attribute to this value"))
#-------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
OptionParser.__init__(self, *args, **kwargs)
self.set_usage(self._usage)
self.add_options(self.options)
#-------------------------------------------------------------------------
def _error(self, message):
print message
self.print_help()
sys.exit(0)
#-------------------------------------------------------------------------
def run(self):
options, args = self.parse_args()
if len(args) != 1: self._error("Please provide a header file name")
filename = args[0]
if not DataSource().exists(filename):
self._error("File not found: %s"%filename)
header = AnalyzeHeader(filename)
attname, value = options.attname, options.value
if attname is not None:
if value is not None:
print "before: %s = %s"%(attname, getattr(header, attname))
setattr(header, attname, value)
print "after: %s = %s"%(attname, getattr(header, attname))
#write back out
else: print "%s = %s"%(attname, getattr(header, attname))
elif value is not None:
self._error("Only provide a value when an attribute is provided")
else: print header
if __name__ == "__main__": AnalyzeHeaderTool().run()
| Python | 0 |
1117f6e2d51ed6faf37aa2a6deab8a6ff8fa0e5b | test for compiling simple command | linemode/tests/test_command_list_printer.py | linemode/tests/test_command_list_printer.py | import unittest
from linemode.drivers.command_list import compile
class TestCommandListPrinter(unittest.TestCase):
def test_simple_command(self):
program = compile([
"reset"
])
self.assertEqual(program, b'reset')
| import unittest
from linemode.drivers.command_list import CommandListPrinter
class TestCommandListPrinter(unittest.TestCase):
pass
| Python | 0.000001 |
5662a6cb9cd567b5e08398e4e5394f8049f02741 | Update tests to allow for id in content type json | feincms_extensions/tests/test_content_types.py | feincms_extensions/tests/test_content_types.py | import datetime
from django.test import TestCase
from . import factories
from .models import Dummy
from .. import content_types
class TestJsonRichTextContent(TestCase):
model = Dummy.content_type_for(content_types.JsonRichTextContent)
def test_json(self):
"""A JsonRichTextContent can be rendered to json."""
text = 'Rich Text'
pk = 42
content = self.model(region='body', text=text, pk=pk)
self.assertEqual(content.json(), {
'content_type': 'rich-text',
'html': text,
'id': pk,
})
class TestJsonSectionContent(TestCase):
model = Dummy.content_type_for(content_types.JsonSectionContent)
def test_json(self):
"""A JsonSectionContent can be rendered to json."""
title = 'Section 1'
richtext = 'Rich Text'
image_type = 'image'
copyright = 'Incuna'
pk = 42
created = datetime.datetime(year=2015, month=3, day=1)
image = factories.MediaFileFactory.build(
type=image_type,
copyright=copyright,
created=created,
)
content = self.model(
region='body',
title=title,
richtext=richtext,
mediafile=image,
pk=pk,
)
expected = {
'content_type': 'section',
'id': pk,
'title': title,
'html': richtext,
'mediafile': {
'url': image.file.url,
'type': image_type,
'created': created,
'copyright': copyright,
'file_size': image.file.size,
},
}
self.assertEqual(content.json(), expected)
def test_json_no_mediafile(self):
"""A JsonSectionContent can be rendered to json."""
title = 'Section 1'
richtext = 'Rich Text'
pk = 42
content = self.model(
pk=pk,
region='body',
title=title,
richtext=richtext,
mediafile=None,
)
expected = {
'content_type': 'section',
'title': title,
'html': richtext,
'mediafile': None,
}
self.assertEqual(content.json(), expected)
class TestJsonMediaFileContent(TestCase):
model = Dummy.content_type_for(content_types.JsonMediaFileContent)
def test_json(self):
"""A JsonMediaFileContent can be rendered to json."""
image_type = 'image'
copyright = 'Incuna'
created = datetime.datetime(year=2015, month=3, day=1)
pk = 42
image = factories.MediaFileFactory.build(
type=image_type,
copyright=copyright,
created=created,
pk=pk,
)
content = self.model(region='body', mediafile=image)
expected = {
'content_type': 'media-file',
'id': pk,
'url': image.file.url,
'type': image_type,
'created': created,
'copyright': copyright,
'file_size': image.file.size,
}
self.assertEqual(content.json(), expected)
| import datetime
from django.test import TestCase
from . import factories
from .models import Dummy
from .. import content_types
class TestJsonRichTextContent(TestCase):
model = Dummy.content_type_for(content_types.JsonRichTextContent)
def test_json(self):
"""A JsonRichTextContent can be rendered to json."""
text = 'Rich Text'
content = self.model(region='body', text=text)
self.assertEqual(content.json(), {
'content_type': 'rich-text',
'html': text,
})
class TestJsonSectionContent(TestCase):
model = Dummy.content_type_for(content_types.JsonSectionContent)
def test_json(self):
"""A JsonSectionContent can be rendered to json."""
title = 'Section 1'
richtext = 'Rich Text'
image_type = 'image'
copyright = 'Incuna'
created = datetime.datetime(year=2015, month=3, day=1)
image = factories.MediaFileFactory.build(
type=image_type,
copyright=copyright,
created=created,
)
content = self.model(
region='body',
title=title,
richtext=richtext,
mediafile=image,
)
expected = {
'content_type': 'section',
'title': title,
'html': richtext,
'mediafile': {
'url': image.file.url,
'type': image_type,
'created': created,
'copyright': copyright,
'file_size': image.file.size,
},
}
self.assertEqual(content.json(), expected)
def test_json_no_mediafile(self):
"""A JsonSectionContent can be rendered to json."""
title = 'Section 1'
richtext = 'Rich Text'
content = self.model(
region='body',
title=title,
richtext=richtext,
mediafile=None,
)
expected = {
'content_type': 'section',
'title': title,
'html': richtext,
'mediafile': None,
}
self.assertEqual(content.json(), expected)
class TestJsonMediaFileContent(TestCase):
model = Dummy.content_type_for(content_types.JsonMediaFileContent)
def test_json(self):
"""A JsonMediaFileContent can be rendered to json."""
image_type = 'image'
copyright = 'Incuna'
created = datetime.datetime(year=2015, month=3, day=1)
image = factories.MediaFileFactory.build(
type=image_type,
copyright=copyright,
created=created,
)
content = self.model(region='body', mediafile=image)
expected = {
'content_type': 'media-file',
'url': image.file.url,
'type': image_type,
'created': created,
'copyright': copyright,
'file_size': image.file.size,
}
self.assertEqual(content.json(), expected)
| Python | 0 |
3f0b19d153360ee5cf1fda1acfa0e4ad846b6c86 | fix admin.py, remove site on AccountAccess and add it on Provider | allaccess/admin.py | allaccess/admin.py | from django.contrib import admin
from .models import Provider, AccountAccess
class ProviderAdmin(admin.ModelAdmin):
"Admin customization for OAuth providers."
list_display = ('name', 'enabled', 'site',)
list_filter = ('name', 'enabled', 'site', )
class AccountAccessAdmin(admin.ModelAdmin):
"Admin customization for accounts."
list_display = (
'__str__', 'provider', 'user', 'created', 'modified',)
list_filter = ('provider', 'created', 'modified', )
admin.site.register(Provider, ProviderAdmin)
admin.site.register(AccountAccess, AccountAccessAdmin)
| from django.contrib import admin
from .models import Provider, AccountAccess
class ProviderAdmin(admin.ModelAdmin):
"Admin customization for OAuth providers."
list_display = ('name', 'enabled', )
class AccountAccessAdmin(admin.ModelAdmin):
"Admin customization for accounts."
list_display = (
'__str__', 'provider', 'user', 'created', 'modified', 'site',)
list_filter = ('provider', 'created', 'modified', 'site', )
admin.site.register(Provider, ProviderAdmin)
admin.site.register(AccountAccess, AccountAccessAdmin)
| Python | 0 |
ee6d4f50b4a27e9cc8c3b5f8a821a6d9c0cf4f21 | remove unwanted changes | frappe/website/page_renderers/document_page.py | frappe/website/page_renderers/document_page.py | import frappe
from frappe.model.document import get_controller
from frappe.website.page_renderers.base_template_page import BaseTemplatePage
from frappe.website.utils import cache_html
from frappe.website.router import (get_doctypes_with_web_view,
get_page_info_from_web_page_with_dynamic_routes)
class DocumentPage(BaseTemplatePage):
def can_render(self):
'''
Find a document with matching `route` from all doctypes with `has_web_view`=1
'''
if self.search_in_doctypes_with_web_view():
return True
if self.search_web_page_dynamic_routes():
return True
return False
def search_in_doctypes_with_web_view(self):
for doctype in get_doctypes_with_web_view():
filters = dict(route=self.path)
meta = frappe.get_meta(doctype)
condition_field = self.get_condition_field(meta)
if condition_field:
filters[condition_field] = 1
try:
self.docname = frappe.db.get_value(doctype, filters, 'name')
if self.docname:
self.doctype = doctype
return True
except Exception as e:
if not frappe.db.is_missing_column(e):
raise e
def search_web_page_dynamic_routes(self):
d = get_page_info_from_web_page_with_dynamic_routes(self.path)
if d:
self.doctype = 'Web Page'
self.docname = d.name
return True
else:
return False
def render(self):
html = self.get_html()
html = self.add_csrf_token(html)
return self.build_response(html)
@cache_html
def get_html(self):
self.doc = frappe.get_doc(self.doctype, self.docname)
self.init_context()
self.update_context()
self.post_process_context()
html = frappe.get_template(self.template_path).render(self.context)
return html
def update_context(self):
self.context.doc = self.doc
self.context.update(self.context.doc.as_dict())
self.context.update(self.context.doc.get_page_info())
self.template_path = self.context.template or self.template_path
if not self.template_path:
self.template_path = self.context.doc.meta.get_web_template()
if hasattr(self.doc, "get_context"):
ret = self.doc.get_context(self.context)
if ret:
self.context.update(ret)
for prop in ("no_cache", "sitemap"):
if prop not in self.context:
self.context[prop] = getattr(self.doc, prop, False)
def get_condition_field(self, meta):
condition_field = None
if meta.is_published_field:
condition_field = meta.is_published_field
elif not meta.custom:
controller = get_controller(meta.name)
condition_field = controller.website.condition_field
return condition_field
| import frappe
from frappe.model.document import get_controller
from frappe.website.page_renderers.base_template_page import BaseTemplatePage
from frappe.website.utils import build_response
from frappe.website.router import (get_doctypes_with_web_view,
get_page_info_from_web_page_with_dynamic_routes)
class DocumentPage(BaseTemplatePage):
def can_render(self):
'''
Find a document with matching `route` from all doctypes with `has_web_view`=1
'''
if self.search_in_doctypes_with_web_view():
return True
if self.search_web_page_dynamic_routes():
return True
return False
def search_in_doctypes_with_web_view(self):
for doctype in get_doctypes_with_web_view():
filters = dict(route=self.path)
meta = frappe.get_meta(doctype)
condition_field = self.get_condition_field(meta)
if condition_field:
filters[condition_field] = 1
try:
self.docname = frappe.db.get_value(doctype, filters, 'name')
if self.docname:
self.doctype = doctype
return True
except Exception as e:
if not frappe.db.is_missing_column(e):
raise e
def search_web_page_dynamic_routes(self):
d = get_page_info_from_web_page_with_dynamic_routes(self.path)
if d:
self.doctype = 'Web Page'
self.docname = d.name
return True
else:
return False
def render(self):
self.doc = frappe.get_doc(self.doctype, self.docname)
self.init_context()
self.update_context()
self.post_process_context()
html = frappe.get_template(self.template_path).render(self.context)
html = self.add_csrf_token(html)
return build_response(self.path, html, self.http_status_code or 200, self.headers)
def update_context(self):
self.context.doc = self.doc
self.context.update(self.context.doc.as_dict())
self.context.update(self.context.doc.get_page_info())
self.template_path = self.context.template or self.template_path
if not self.template_path:
self.template_path = self.context.doc.meta.get_web_template()
if hasattr(self.doc, "get_context"):
ret = self.doc.get_context(self.context)
if ret:
self.context.update(ret)
for prop in ("no_cache", "sitemap"):
if prop not in self.context:
self.context[prop] = getattr(self.doc, prop, False)
def get_condition_field(self, meta):
condition_field = None
if meta.is_published_field:
condition_field = meta.is_published_field
elif not meta.custom:
controller = get_controller(meta.name)
condition_field = controller.website.condition_field
return condition_field
| Python | 0.005417 |
a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a | Divide entire loss by n, not just mll component. | gpytorch/mlls/exact_marginal_log_likelihood.py | gpytorch/mlls/exact_marginal_log_likelihood.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import torch
from .marginal_log_likelihood import MarginalLogLikelihood
from ..lazy import LazyVariable, NonLazyVariable
from ..likelihoods import GaussianLikelihood
from ..random_variables import GaussianRandomVariable, MultitaskGaussianRandomVariable
from ..variational import MVNVariationalStrategy
class ExactMarginalLogLikelihood(MarginalLogLikelihood):
def __init__(self, likelihood, model):
"""
A special MLL designed for exact inference
Args:
- likelihood: (Likelihood) - the likelihood for the model
- model: (Module) - the exact GP model
"""
if not isinstance(likelihood, GaussianLikelihood):
raise RuntimeError("Likelihood must be Gaussian for exact inference")
super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)
def forward(self, output, target):
if not isinstance(output, GaussianRandomVariable) and not isinstance(output, MultitaskGaussianRandomVariable):
raise RuntimeError("ExactMarginalLogLikelihood can only operate on Gaussian random variables")
if not isinstance(output.covar(), LazyVariable):
output = output.__class__(output.mean(), NonLazyVariable(output.covar()))
mean, covar = self.likelihood(output).representation()
n_data = target.size(-1)
if target.size() != mean.size():
raise RuntimeError(
"Expected target size to equal mean size, but got {} and {}".format(target.size(), mean.size())
)
if isinstance(output, MultitaskGaussianRandomVariable):
if target.ndimension() == 2:
mean = mean.view(-1)
target = target.view(-1)
elif target.ndimension() == 3:
mean = mean.view(mean.size(0), -1)
target = target.view(target.size(0), -1)
# Get log determininat and first part of quadratic form
inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
# Add terms for SGPR / when inducing points are learned
trace_diff = torch.zeros_like(inv_quad)
for variational_strategy in self.model.variational_strategies():
if isinstance(variational_strategy, MVNVariationalStrategy):
trace_diff = trace_diff.add(variational_strategy.trace_diff())
trace_diff = trace_diff / self.likelihood.log_noise.exp()
res = -0.5 * sum([inv_quad, log_det, n_data * math.log(2 * math.pi), -trace_diff])
# Add log probs of priors on the parameters
for _, param, prior in self.named_parameter_priors():
res.add_(prior.log_prob(param).sum())
for _, prior, params, transform in self.named_derived_priors():
res.add_(prior.log_prob(transform(*params)).sum())
return res.div_(n_data)
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import torch
from .marginal_log_likelihood import MarginalLogLikelihood
from ..lazy import LazyVariable, NonLazyVariable
from ..likelihoods import GaussianLikelihood
from ..random_variables import GaussianRandomVariable, MultitaskGaussianRandomVariable
from ..variational import MVNVariationalStrategy
class ExactMarginalLogLikelihood(MarginalLogLikelihood):
def __init__(self, likelihood, model):
"""
A special MLL designed for exact inference
Args:
- likelihood: (Likelihood) - the likelihood for the model
- model: (Module) - the exact GP model
"""
if not isinstance(likelihood, GaussianLikelihood):
raise RuntimeError("Likelihood must be Gaussian for exact inference")
super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)
def forward(self, output, target):
if not isinstance(output, GaussianRandomVariable) and not isinstance(output, MultitaskGaussianRandomVariable):
raise RuntimeError("ExactMarginalLogLikelihood can only operate on Gaussian random variables")
if not isinstance(output.covar(), LazyVariable):
output = output.__class__(output.mean(), NonLazyVariable(output.covar()))
mean, covar = self.likelihood(output).representation()
n_data = target.size(-1)
if target.size() != mean.size():
raise RuntimeError(
"Expected target size to equal mean size, but got {} and {}".format(target.size(), mean.size())
)
if isinstance(output, MultitaskGaussianRandomVariable):
if target.ndimension() == 2:
mean = mean.view(-1)
target = target.view(-1)
elif target.ndimension() == 3:
mean = mean.view(mean.size(0), -1)
target = target.view(target.size(0), -1)
# Get log determininat and first part of quadratic form
inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
# Add terms for SGPR / when inducing points are learned
trace_diff = torch.zeros_like(inv_quad)
for variational_strategy in self.model.variational_strategies():
if isinstance(variational_strategy, MVNVariationalStrategy):
trace_diff = trace_diff.add(variational_strategy.trace_diff())
trace_diff = trace_diff / self.likelihood.log_noise.exp()
res = -0.5 * sum([inv_quad, log_det, n_data * math.log(2 * math.pi), -trace_diff])
res.div_(n_data)
# Add log probs of priors on the parameters
for _, param, prior in self.named_parameter_priors():
res.add_(prior.log_prob(param).sum())
for _, prior, params, transform in self.named_derived_priors():
res.add_(prior.log_prob(transform(*params)).sum())
return res
| Python | 0.000004 |
16110c627100f5fd6bdaaf859ed71559ea17780a | Fix push/pull tests | jupyterlab_git/tests/test_pushpull.py | jupyterlab_git/tests/test_pushpull.py | from subprocess import PIPE
from mock import patch, call, Mock
from jupyterlab_git.git import Git
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_pull_fail(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', 'Authentication failed'.encode('utf-8')),
'returncode': 1
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').pull('test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git pull --no-commit'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 1, 'message': 'Authentication failed'} == actual_response
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_pull_success(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', ''.encode('utf-8')),
'returncode': 0
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').pull('test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git pull --no-commit'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 0} == actual_response
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_push_fail(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', 'Authentication failed'.encode('utf-8')),
'returncode': 1
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').push('test_origin', 'HEAD:test_master', 'test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git push test_origin HEAD:test_master'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 1, 'message': 'Authentication failed'} == actual_response
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_push_success(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', 'does not matter'.encode('utf-8')),
'returncode': 0
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').push('.', 'HEAD:test_master', 'test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git push . HEAD:test_master'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 0} == actual_response
| from subprocess import PIPE
from mock import patch, call, Mock
from jupyterlab_git.git import Git
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_pull_fail(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', 'Authentication failed'.encode('utf-8')),
'returncode': 1
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').pull('test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git', 'pull', '--no-commit'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 1, 'message': 'Authentication failed'} == actual_response
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_pull_success(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', ''.encode('utf-8')),
'returncode': 0
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').pull('test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git', 'pull', '--no-commit'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 0} == actual_response
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_push_fail(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', 'Authentication failed'.encode('utf-8')),
'returncode': 1
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').push('test_origin', 'HEAD:test_master', 'test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git', 'push', 'test_origin', 'HEAD:test_master'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 1, 'message': 'Authentication failed'} == actual_response
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_push_success(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', 'does not matter'.encode('utf-8')),
'returncode': 0
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').push('.', 'HEAD:test_master', 'test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git', 'push', '.', 'HEAD:test_master'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 0} == actual_response
| Python | 0.000686 |
d80878788ddcc1443c54c11b923da23bc295b496 | Fix wget -q flag | charmtest/network.py | charmtest/network.py | import io
import argparse
class Wget(object):
name = "wget"
def __init__(self, network):
self._network = network
def __call__(self, proc_args):
parser = argparse.ArgumentParser()
parser.add_argument("url")
parser.add_argument("-O", dest="output")
parser.add_argument("-q", dest="quiet", action="store_true")
args = parser.parse_args(proc_args["args"][1:])
content = self._network[args.url]
result = {}
if args.output == "-":
result["stdout"] = io.BytesIO(content)
else:
with open(args.output, "wb") as fd:
fd.write(content)
return result
| import io
import argparse
class Wget(object):
name = "wget"
def __init__(self, network):
self._network = network
def __call__(self, proc_args):
parser = argparse.ArgumentParser()
parser.add_argument("url")
parser.add_argument("-O", dest="output")
parser.add_argument("-q", dest="quite")
args = parser.parse_args(proc_args["args"][1:])
content = self._network[args.url]
result = {}
if args.output == "-":
result["stdout"] = io.BytesIO(content)
else:
with open(args.output, "wb") as fd:
fd.write(content)
return result
| Python | 0.000002 |
833395650dc585d1a35e15d8751801988f251388 | Use closure to remove globals. | avenue/web.py | avenue/web.py | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Michael Babich
# See LICENSE.txt or http://opensource.org/licenses/MIT
'''Acts as an interface between what Flask serves and what goes on in
the rest of the application.
'''
from avenue import app, api
from flask import render_template, make_response
from copy import copy
import yaml
from os import path
browser_upgrade = '<p><img src="static/dl/firefox-g.png"></img><img src="static/dl/chrome-g.png"></img><img src="static/dl/safari-g.png"></img><img src="static/dl/opera-g.png"></img></p>'
def forum_generator(site_name, forum_name):
navbar = []
navbar.append({'title' : 'Zombie Raptor Blog',
'content' : 'Read new updates from the Zombie Raptor team!',
'link' : '/'})
navbar.append({'title' : 'Main Forum',
'content' : 'Visit the main forum!',
'link' : '/f/main'})
tags = { 'post' : ['post', '#aabbcc', '/'],
'test' : ['test', '#ffbb99', '/'],
'micro' : ['micro', '#aabbcc', '/'],
'link' : ['link', '#aabbcc', '/'],
'news' : ['news', '#bbeebb', '/']}
def render_forum(thread_title='', main_title='', html_title='', posts=[], threaded=False, content=''):
return render_template('forum.html',
style='night',
sidebar=navbar,
thread_title=thread_title,
main_title=main_title,
html_title=html_title,
posts=posts,
threaded=threaded,
content=content)
def forum_page(filename, content):
data = open(path.join(path.dirname(__file__), 'data', filename))
thread = yaml.load(data)
data.close()
html_title = '%s :: %s :: %s' % (thread['title'], forum_name, site_name)
main_title = '%s -- %s' % (site_name, forum_name)
for post in thread['posts']:
if 'tags' in post:
for i in range(len(post['tags'])):
post['tags'][i] = tags[post['tags'][i]]
return render_forum(main_title=main_title,
thread_title=thread['title'],
html_title=html_title,
posts=thread['posts'],
threaded=thread['threaded'],
content=content)
return forum_page
make_page = forum_generator('Zombie Raptor', 'Main Forum')
@app.route('/')
def index():
return make_page('front_page.yml', 'blog')
@app.route('/f/')
def f():
return ''
@app.route('/f/main/')
def main_forum():
return make_page('main_forum.yml', 'index')
@app.route('/f/main/post/')
def post():
return ''
@app.route('/f/main/post/1')
def sample_post():
return make_page('sample.yml', 'post')
@app.route('/night.css')
def night():
conf = open(path.join(path.dirname(__file__), 'data', 'style.yml'))
style = yaml.load(conf)
conf.close()
response = make_response(render_template('main.css',
text=style['text'],
background=style['background'],
post=style['post']))
response.mimetype = 'text/css'
return response
| # -*- coding: utf-8 -*-
# Copyright (c) 2012 Michael Babich
# See LICENSE.txt or http://opensource.org/licenses/MIT
'''Acts as an interface between what Flask serves and what goes on in
the rest of the application.
'''
from avenue import app, api
from flask import render_template, make_response
from copy import copy
import yaml
from os import path
site_name = 'Zombie Raptor'
forum_name = 'Main Forum'
navbar = []
navbar.append({'title' : 'Zombie Raptor Blog',
'content' : 'Read new updates from the Zombie Raptor team!',
'link' : '/'})
navbar.append({'title' : 'Main Forum',
'content' : 'Visit the main forum!',
'link' : '/f/main'})
browser_upgrade = '<p><img src="static/dl/firefox-g.png"></img><img src="static/dl/chrome-g.png"></img><img src="static/dl/safari-g.png"></img><img src="static/dl/opera-g.png"></img></p>'
tags = { 'post' : ['post', '#aabbcc', '/'],
'test' : ['test', '#ffbb99', '/'],
'micro' : ['micro', '#aabbcc', '/'],
'link' : ['link', '#aabbcc', '/'],
'news' : ['news', '#bbeebb', '/']}
def render_forum(thread_title='', main_title='', html_title='', posts=[], threaded=False, content=''):
return render_template('forum.html',
style='night',
sidebar=navbar,
thread_title=thread_title,
main_title=main_title,
html_title=html_title,
posts=posts,
threaded=threaded,
content=content)
def forum_page(filename, content):
data = open(path.join(path.dirname(__file__), 'data', filename))
thread = yaml.load(data)
data.close()
html_title = '%s :: %s :: %s' % (thread['title'], forum_name, site_name)
main_title = '%s -- %s' % (site_name, forum_name)
for post in thread['posts']:
if 'tags' in post:
for i in range(len(post['tags'])):
post['tags'][i] = tags[post['tags'][i]]
return render_forum(main_title=main_title,
thread_title=thread['title'],
html_title=html_title,
posts=thread['posts'],
threaded=thread['threaded'],
content=content)
@app.route('/')
def index():
return forum_page('front_page.yml', 'blog')
@app.route('/f/')
def f():
return ''
@app.route('/f/main/')
def main_forum():
return forum_page('main_forum.yml', 'index')
@app.route('/f/main/post/')
def post():
return ''
@app.route('/f/main/post/1')
def sample_post():
return forum_page('sample.yml', 'post')
@app.route('/night.css')
def night():
conf = open(path.join(path.dirname(__file__), 'data', 'style.yml'))
style = yaml.load(conf)
conf.close()
response = make_response(render_template('main.css',
text=style['text'],
background=style['background'],
post=style['post']))
response.mimetype = 'text/css'
return response
| Python | 0 |
730e765822932b5b0b00832c41140f39a9ae8d11 | Bump version | datetimerange/__version__.py | datetimerange/__version__.py | # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.3.6"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.3.5"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| Python | 0 |
0b9a97a4d6d47bd8f442c3fe3783b1d2cd85ac74 | Add tests for widgets | jarbas/dashboard/tests/test_dashboard_admin.py | jarbas/dashboard/tests/test_dashboard_admin.py | from collections import namedtuple
from unittest.mock import MagicMock
from django.test import TestCase
from jarbas.core.models import Reimbursement
from jarbas.dashboard.admin import (
ReceiptUrlWidget,
ReimbursementModelAdmin,
SubquotaWidget,
SubuotaListfilter,
SuspiciousWidget,
)
Request = namedtuple('Request', ('method',))
ReimbursementMock = namedtuple('Reimbursement', ('cnpj_cpf'))
class TestDashboardSite(TestCase):
def setUp(self):
self.requests = map(Request, ('GET', 'POST', 'PUT', 'PATCH', 'DELETE'))
self.ma = ReimbursementModelAdmin(Reimbursement, 'dashboard')
def test_has_add_permission(self):
permissions = map(self.ma.has_add_permission, self.requests)
self.assertNotIn(True, tuple(permissions))
def test_has_change_permission(self):
permissions = map(self.ma.has_change_permission, self.requests)
expected = (True, False, False, False, False)
self.assertEqual(expected, tuple(permissions))
def test_has_delete_permission(self):
permissions = map(self.ma.has_delete_permission, self.requests)
self.assertNotIn(True, tuple(permissions))
def test_format_document(self):
obj1 = ReimbursementMock('12345678901234')
obj2 = ReimbursementMock('12345678901')
obj3 = ReimbursementMock('2345678')
self.assertEqual('12.345.678/9012-34', self.ma._format_document(obj1))
self.assertEqual('123.456.789-01', self.ma._format_document(obj2))
self.assertEqual('2345678', self.ma._format_document(obj3))
class TestSubuotaListfilter(TestCase):
def setUp(self):
self.qs = MagicMock()
self.list_filter = MagicMock()
def test_queryset_without_subquota(self):
self.list_filter.value.return_value = None
SubuotaListfilter.queryset(self.list_filter, MagicMock(), self.qs)
self.qs.filter.assert_not_called()
def test_queryset_with_subquota(self):
self.list_filter.value.return_value = 42
SubuotaListfilter.queryset(self.list_filter, MagicMock(), self.qs)
self.qs.filter.assert_called_once_with(subquota_id=42)
class TestCustomWidgets(TestCase):
def test_subquota_widget(self):
widget = SubquotaWidget()
rendered = widget.render('Name', 'Flight ticket issue')
self.assertIn('Emissão bilhete aéreo', rendered)
def test_suspicious_widget_with_one_suspicion(self):
widget = SuspiciousWidget()
json_value = '{"invalid_cnpj_cpf": true}'
rendered = widget.render('Name', json_value)
self.assertIn('CPF ou CNPJ inválidos', rendered)
self.assertNotIn('<br>', rendered)
def test_suspicious_widget_with_two_suspicions(self):
widget = SuspiciousWidget()
json_value = '{"invalid_cnpj_cpf": true, "election_expenses": true}'
rendered = widget.render('Name', json_value)
self.assertIn('CPF ou CNPJ inválidos', rendered)
self.assertIn('<br>', rendered)
self.assertIn('Gasto com campanha eleitoral', rendered)
def test_suspicious_widget_with_new_suspicion(self):
widget = SuspiciousWidget()
json_value = '{"whatever": true, "invalid_cnpj_cpf": true}'
rendered = widget.render('Name', json_value)
self.assertIn('CPF ou CNPJ inválidos', rendered)
self.assertIn('<br>', rendered)
self.assertIn('whatever', rendered)
def test_suspicious_widget_without_suspicion(self):
widget = SuspiciousWidget()
json_value = 'null'
rendered = widget.render('Name', json_value)
self.assertEqual('', rendered)
def test_receipt_url_widget(self):
widget = ReceiptUrlWidget()
url = 'https://jarbas.serenatadeamor.org'
rendered = widget.render('Name', url)
self.assertIn('href="{}"'.format(url), rendered)
self.assertIn('>{}</a>'.format(url), rendered)
def test_receipt_url_widget_without_url(self):
widget = ReceiptUrlWidget()
rendered = widget.render('Name', '')
self.assertEqual('', rendered)
| from collections import namedtuple
from unittest.mock import MagicMock
from django.test import TestCase
from jarbas.core.models import Reimbursement
from jarbas.dashboard.admin import ReimbursementModelAdmin, SubuotaListfilter
Request = namedtuple('Request', ('method',))
ReimbursementMock = namedtuple('Reimbursement', ('cnpj_cpf'))
class TestDashboardSite(TestCase):
def setUp(self):
self.requests = map(Request, ('GET', 'POST', 'PUT', 'PATCH', 'DELETE'))
self.ma = ReimbursementModelAdmin(Reimbursement, 'dashboard')
def test_has_add_permission(self):
permissions = map(self.ma.has_add_permission, self.requests)
self.assertNotIn(True, tuple(permissions))
def test_has_change_permission(self):
permissions = map(self.ma.has_change_permission, self.requests)
expected = (True, False, False, False, False)
self.assertEqual(expected, tuple(permissions))
def test_has_delete_permission(self):
permissions = map(self.ma.has_delete_permission, self.requests)
self.assertNotIn(True, tuple(permissions))
def test_format_document(self):
obj1 = ReimbursementMock('12345678901234')
obj2 = ReimbursementMock('12345678901')
obj3 = ReimbursementMock('2345678')
self.assertEqual('12.345.678/9012-34', self.ma._format_document(obj1))
self.assertEqual('123.456.789-01', self.ma._format_document(obj2))
self.assertEqual('2345678', self.ma._format_document(obj3))
class TestSubuotaListfilter(TestCase):
def setUp(self):
self.qs = MagicMock()
self.list_filter = MagicMock()
def test_queryset_without_subquota(self):
self.list_filter.value.return_value = None
SubuotaListfilter.queryset(self.list_filter, MagicMock(), self.qs)
self.qs.filter.assert_not_called()
def test_queryset_with_subquota(self):
self.list_filter.value.return_value = 42
SubuotaListfilter.queryset(self.list_filter, MagicMock(), self.qs)
self.qs.filter.assert_called_once_with(subquota_id=42)
| Python | 0.000001 |
35ff017b483bb46b1f942045bd2c9e20ace39483 | fix line splitting | Commands/Urban.py | Commands/Urban.py | # -*- coding: utf-8 -*-
"""
Created on Jan 24, 2014
@author: Tyranic-Moron
"""
import urllib
import json
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
from Utils import WebUtils
from twisted.words.protocols.irc import assembleFormattedText, attributes as A
class Urban(CommandInterface):
triggers = ['urban', 'ud']
help = "urban <search term> - returns the definition of the given search term from UrbanDictionary.com"
def execute(self, message):
"""
@type message: IRCMessage
"""
if len(message.ParameterList) == 0:
return IRCResponse(ResponseType.Say,
"You didn't give a word! Usage: {0}".format(self.help),
message.ReplyTo)
search = urllib.quote(message.Parameters)
url = 'http://api.urbandictionary.com/v0/define?term={0}'.format(search)
webPage = WebUtils.fetchURL(url)
response = json.loads(webPage.body)
if len(response['list']) == 0:
return IRCResponse(ResponseType.Say,
"No entry found for '{0}'".format(message.Parameters),
message.ReplyTo)
graySplitter = assembleFormattedText(A.normal[' ', A.fg.gray['|'], ' '])
defn = response['list'][0]
word = defn['word']
definition = defn['definition']
definition = graySplitter.join([s.strip() for s in definition.strip().splitlines()])
example = defn['example']
example = graySplitter.join([s.strip() for s in example.strip().splitlines()])
author = defn['author']
up = defn['thumbs_up']
down = defn['thumbs_down']
more = 'http://{}.urbanup.com/'.format(word.replace(' ', '-'))
if word.lower() != message.Parameters.lower():
word = "{0} (Contains '{1}')".format(word, message.Parameters)
defFormatString = unicode(assembleFormattedText(A.normal[A.bold["{0}:"], " {1}"]))
exampleFormatString = unicode(assembleFormattedText(A.normal[A.bold["Example(s):"], " {0}"]))
byFormatString = unicode(assembleFormattedText(A.normal["{0}",
graySplitter,
A.fg.lightGreen["+{1}"],
A.fg.gray["/"],
A.fg.lightRed["-{2}"],
graySplitter,
"More defs: {3}"]))
responses = [IRCResponse(ResponseType.Say,
defFormatString.format(word, definition),
message.ReplyTo),
IRCResponse(ResponseType.Say,
exampleFormatString.format(example),
message.ReplyTo),
IRCResponse(ResponseType.Say,
byFormatString.format(author, up, down, more),
message.ReplyTo)]
return responses
| # -*- coding: utf-8 -*-
"""
Created on Jan 24, 2014
@author: Tyranic-Moron
"""
import urllib
import json
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
from Utils import WebUtils
from twisted.words.protocols.irc import assembleFormattedText, attributes as A
class Urban(CommandInterface):
triggers = ['urban', 'ud']
help = "urban <search term> - returns the definition of the given search term from UrbanDictionary.com"
def execute(self, message):
"""
@type message: IRCMessage
"""
if len(message.ParameterList) == 0:
return IRCResponse(ResponseType.Say,
"You didn't give a word! Usage: {0}".format(self.help),
message.ReplyTo)
search = urllib.quote(message.Parameters)
url = 'http://api.urbandictionary.com/v0/define?term={0}'.format(search)
webPage = WebUtils.fetchURL(url)
response = json.loads(webPage.body)
if len(response['list']) == 0:
return IRCResponse(ResponseType.Say,
"No entry found for '{0}'".format(message.Parameters),
message.ReplyTo)
graySplitter = assembleFormattedText(A.normal[' ', A.fg.gray['|'], ' '])
defn = response['list'][0]
word = defn['word']
definition = defn['definition']
definition = graySplitter.join([s.strip() for s in definition.strip().split('\r\n')])
example = defn['example']
example = graySplitter.join([s.strip() for s in example.strip().split('\r\n')])
author = defn['author']
up = defn['thumbs_up']
down = defn['thumbs_down']
more = 'http://{}.urbanup.com/'.format(word.replace(' ', '-'))
if word.lower() != message.Parameters.lower():
word = "{0} (Contains '{1}')".format(word, message.Parameters)
defFormatString = unicode(assembleFormattedText(A.normal[A.bold["{0}:"], " {1}"]))
exampleFormatString = unicode(assembleFormattedText(A.normal[A.bold["Example(s):"], " {0}"]))
byFormatString = unicode(assembleFormattedText(A.normal["{0}",
graySplitter,
A.fg.lightGreen["+{1}"],
A.fg.gray["/"],
A.fg.lightRed["-{2}"],
graySplitter,
"More defs: {3}"]))
responses = [IRCResponse(ResponseType.Say,
defFormatString.format(word, definition),
message.ReplyTo),
IRCResponse(ResponseType.Say,
exampleFormatString.format(example),
message.ReplyTo),
IRCResponse(ResponseType.Say,
byFormatString.format(author, up, down, more),
message.ReplyTo)]
return responses
| Python | 0.000001 |
55f3e0e222246bfbc9c1a19f68b06941bac6cd70 | Add an option to include spaces on random string generator | base/utils.py | base/utils.py | """ Small methods for generic use """
# standard library
import itertools
import random
import re
import string
import unicodedata
# django
from django.utils import timezone
def today():
"""
This method obtains today's date in local time
"""
return timezone.localtime(timezone.now()).date()
# BROKEN
def grouper(iterable, n):
args = [iter(iterable)] * n
return ([e for e in t if e is not None] for t in itertools.izip_longest(
*args
))
def format_rut(rut):
if not rut:
return ''
rut = rut.replace(' ', '').replace('.', '').replace('-', '')
rut = rut[:9]
if not rut:
return ''
verifier = rut[-1]
code = rut[0:-1][::-1]
code = re.sub("(.{3})", "\\1.", code, 0, re.DOTALL)
code = code[::-1]
return '%s-%s' % (code, verifier)
def camel_to_underscore(string):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def underscore_to_camel(word):
return ''.join(x.capitalize() or '_' for x in word.split('_'))
def strip_accents(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# BROKEN
def tz_datetime(s, *args, **kwargs):
"""
Creates a datetime.datetime object but with the current timezone
"""
tz = timezone.get_current_timezone()
naive_dt = timezone.datetime(*args, **kwargs)
return timezone.make_aware(naive_dt, tz)
def random_string(length=6, chars=None, include_spaces=True):
if chars is None:
chars = string.ascii_uppercase + string.digits
if include_spaces:
chars += ' '
return ''.join(random.choice(chars) for x in range(length))
| """ Small methods for generic use """
# standard library
import itertools
import random
import re
import string
import unicodedata
# django
from django.utils import timezone
def today():
"""
This method obtains today's date in local time
"""
return timezone.localtime(timezone.now()).date()
# BROKEN
def grouper(iterable, n):
args = [iter(iterable)] * n
return ([e for e in t if e is not None] for t in itertools.izip_longest(
*args
))
def format_rut(rut):
if not rut:
return ''
rut = rut.replace(' ', '').replace('.', '').replace('-', '')
rut = rut[:9]
if not rut:
return ''
verifier = rut[-1]
code = rut[0:-1][::-1]
code = re.sub("(.{3})", "\\1.", code, 0, re.DOTALL)
code = code[::-1]
return '%s-%s' % (code, verifier)
def camel_to_underscore(string):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def underscore_to_camel(word):
return ''.join(x.capitalize() or '_' for x in word.split('_'))
def strip_accents(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# BROKEN
def tz_datetime(s, *args, **kwargs):
"""
Creates a datetime.datetime object but with the current timezone
"""
tz = timezone.get_current_timezone()
naive_dt = timezone.datetime(*args, **kwargs)
return timezone.make_aware(naive_dt, tz)
def random_string(length=6, chars=None):
if chars is None:
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for x in range(length))
| Python | 0.000004 |
5496bd29c4262c252367d7b305d2a78fd1ad2fa7 | move debug call | bcdata/wcs.py | bcdata/wcs.py | import logging
import requests
import bcdata
log = logging.getLogger(__name__)
def get_dem(
bounds, out_file="dem.tif", src_crs="EPSG:3005", dst_crs="EPSG:3005", resolution=25, interpolation=None
):
"""Get TRIM DEM for provided bounds, write to GeoTIFF.
"""
bbox = ",".join([str(b) for b in bounds])
# do not upsample
if resolution < 25:
raise ValueError("Resolution requested must be 25m or greater")
# if specifying interpolation method, there has to actually be a
# resampling requested - resolution can't be the native 25m
if interpolation and resolution == 25:
raise ValueError("Requested coverage at native resolution, no resampling required, interpolation {} invalid")
# if downsampling, default to bilinear (the server defaults to nearest)
if resolution > 25 and not interpolation:
log.info("Interpolation not specified, defaulting to bilinear")
interpolation = "bilinear"
# make sure interpolation is valid
if interpolation:
valid_interpolations = ["nearest", "bilinear", "bicubic"]
if interpolation not in valid_interpolations:
raise ValueError("Interpolation {} invalid. Valid keys are: {}".format(interpolation, ",".join(valid_interpolations)))
# build request
payload = {
"service": "WCS",
"version": "1.0.0",
"request": "GetCoverage",
"coverage": "pub:bc_elevation_25m_bcalb",
"Format": "GeoTIFF",
"bbox": bbox,
"CRS": src_crs,
"RESPONSE_CRS": dst_crs,
"resx": str(resolution),
"resy": str(resolution),
}
if interpolation:
payload["INTERPOLATION"] = interpolation
# request data from WCS
r = requests.get(bcdata.WCS_URL, params=payload)
# save to tiff
if r.status_code == 200:
with open(out_file, "wb") as file:
file.write(r.content)
return out_file
else:
raise RuntimeError(
"WCS request {} failed with status code {}".format(r.url, str(r.status_code))
)
| import logging
import requests
import bcdata
log = logging.getLogger(__name__)
def get_dem(
bounds, out_file="dem.tif", src_crs="EPSG:3005", dst_crs="EPSG:3005", resolution=25, interpolation=None
):
"""Get TRIM DEM for provided bounds, write to GeoTIFF.
"""
bbox = ",".join([str(b) for b in bounds])
# do not upsample
if resolution < 25:
raise ValueError("Resolution requested must be 25m or greater")
# if specifying interpolation method, there has to actually be a
# resampling requested - resolution can't be the native 25m
if interpolation and resolution == 25:
raise ValueError("Requested coverage at native resolution, no resampling required, interpolation {} invalid")
# if downsampling, default to bilinear (the server defaults to nearest)
if resolution > 25 and not interpolation:
log.info("Interpolation not specified, defaulting to bilinear")
interpolation = "bilinear"
# make sure interpolation is valid
if interpolation:
valid_interpolations = ["nearest", "bilinear", "bicubic"]
if interpolation not in valid_interpolations:
raise ValueError("Interpolation {} invalid. Valid keys are: {}".format(interpolation, ",".join(valid_interpolations)))
# build request
payload = {
"service": "WCS",
"version": "1.0.0",
"request": "GetCoverage",
"coverage": "pub:bc_elevation_25m_bcalb",
"Format": "GeoTIFF",
"bbox": bbox,
"CRS": src_crs,
"RESPONSE_CRS": dst_crs,
"resx": str(resolution),
"resy": str(resolution),
}
if interpolation:
payload["INTERPOLATION"] = interpolation
# request data from WCS
r = requests.get(bcdata.WCS_URL, params=payload)
log.debug(r.url)
# save to tiff
if r.status_code == 200:
with open(out_file, "wb") as file:
file.write(r.content)
return out_file
else:
raise RuntimeError(
"WCS request failed with status code {}".format(str(r.status_code))
)
| Python | 0.000002 |
c4ad9519c117edfdc59f229380fa0797bc6bfffa | Update BitshareComFolder.py | module/plugins/crypter/BitshareComFolder.py | module/plugins/crypter/BitshareComFolder.py | # -*- coding: utf-8 -*-
from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo
class BitshareComFolder(SimpleCrypter):
__name__ = "BitshareComFolder"
__type__ = "crypter"
__version__ = "0.04"
__pattern__ = r'http://(?:www\.)?bitshare\.com/\?d=\w+'
__config__ = [("use_premium" , "bool", "Use premium account if available" , True),
("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description__ = """Bitshare.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it")]
LINK_PATTERN = r'<a href="(http://bitshare\.com/files/.+)">.+</a></td>'
NAME_PATTERN = r'View public folder "(?P<N>.+?)"</h1>'
getInfo = create_getInfo(BitshareComFolder)
| # -*- coding: utf-8 -*-
from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo
class BitshareComFolder(SimpleCrypter):
__name__ = "BitshareComFolder"
__type__ = "crypter"
__version__ = "0.03"
__pattern__ = r'http://(?:www\.)?bitshare\.com/\?d=\w+'
__config__ = [("use_premium" , "bool", "Use premium account if available" , True),
("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description__ = """Bitshare.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it")]
LINK_PATTERN = r'<a href="(http://bitshare\.com/files/.+)">.+</a></td>'
NAME_PATTERN = r'View public folder "(?P<N>.+)"</h1>'
getInfo = create_getInfo(BitshareComFolder)
| Python | 0 |
326f0b881d36ed19d0a37495ae34fc24fc1eb707 | Load the spotify header file from an absolute path | connect_ffi.py | connect_ffi.py | from cffi import FFI
ffi = FFI()
print "Loading Spotify library..."
#TODO: Use absolute paths for open() and stuff
#Header generated with cpp spotify.h > spotify.processed.h && sed -i 's/__extension__//g' spotify.processed.h
with open(os.path.join(sys.path[0], "spotify.processed.h")) as file:
header = file.read()
ffi.cdef(header)
ffi.cdef("""
void *malloc(size_t size);
void exit(int status);
""")
C = ffi.dlopen(None)
lib = ffi.verify("""
#include "spotify.h"
""", include_dirs=['./'],
library_dirs=['./'],
libraries=[str('spotify_embedded_shared')]) | from cffi import FFI
ffi = FFI()
print "Loading Spotify library..."
#TODO: Use absolute paths for open() and stuff
#Header generated with cpp spotify.h > spotify.processed.h && sed -i 's/__extension__//g' spotify.processed.h
with open("spotify.processed.h") as file:
header = file.read()
ffi.cdef(header)
ffi.cdef("""
void *malloc(size_t size);
void exit(int status);
""")
C = ffi.dlopen(None)
lib = ffi.verify("""
#include "spotify.h"
""", include_dirs=['./'],
library_dirs=['./'],
libraries=[str('spotify_embedded_shared')]) | Python | 0 |
278dcb8b2fb3e1f69434ec9c41e566501cdc50bd | Remove unused functionality | organizations/backends/forms.py | organizations/backends/forms.py | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2019, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.forms import UserCreationForm
class UserRegistrationForm(UserCreationForm):
"""
Form class for completing a user's registration and activating the
User.
The class operates on a user model which is assumed to have the required
fields of a BaseUserModel
"""
# TODO(bennylope): Remove this entirely and replace with base class
def org_registration_form(org_model):
"""
Generates a registration ModelForm for the given organization model class
"""
class OrganizationRegistrationForm(forms.ModelForm):
"""Form class for creating new organizations owned by new users."""
email = forms.EmailField()
class Meta:
model = org_model
exclude = ("is_active", "users")
# def save(self, *args, **kwargs):
# self.instance.is_active = False
# super().save(*args, **kwargs)
return OrganizationRegistrationForm
| # -*- coding: utf-8 -*-
# Copyright (c) 2012-2019, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
class UserRegistrationForm(forms.ModelForm):
"""
Form class for completing a user's registration and activating the
User.
The class operates on a user model which is assumed to have the required
fields of a BaseUserModel
"""
# TODO decouple first/last names from this form
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
password = forms.CharField(max_length=30, widget=forms.PasswordInput)
password_confirm = forms.CharField(max_length=30, widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.initial["username"] = ""
def clean(self):
password = self.cleaned_data.get("password")
password_confirm = self.cleaned_data.get("password_confirm")
if password != password_confirm or not password:
raise forms.ValidationError(_("Your password entries must match"))
return super().clean()
class Meta:
model = get_user_model()
exclude = (
"is_staff",
"is_superuser",
"is_active",
"last_login",
"date_joined",
"groups",
"user_permissions",
)
def org_registration_form(org_model):
"""
Generates a registration ModelForm for the given organization model class
"""
class OrganizationRegistrationForm(forms.ModelForm):
"""Form class for creating new organizations owned by new users."""
email = forms.EmailField()
class Meta:
model = org_model
exclude = ("is_active", "users")
def save(self, *args, **kwargs):
self.instance.is_active = False
super().save(*args, **kwargs)
return OrganizationRegistrationForm
| Python | 0 |
331fb50e6a4dcef99c8a6806d3efd7531859542f | add comments | achievements/templatetags/achievement_tags.py | achievements/templatetags/achievement_tags.py | from django import template
from achievements.models import Category, Trophy
from achievements import settings
register = template.Library()
# call single_category.html with the given parameters
@register.inclusion_tag('achievements/single_category.html')
def render_category(category, user):
return {
'category': category,
'percentage': category.get_complete_percentage(user),
'completed_achievements': category.count_all_complete_achievements(user)
}
# call navigation.html with the given parameters
@register.inclusion_tag('achievements/navigation.html')
def render_navigation(current_category=None):
return {
'categories': Category.objects.filter(parent_category__isnull=True),
'current_category': current_category,
}
# call trophies.html with the given parameters
@register.inclusion_tag('achievements/trophies.html')
def render_trophies(user, takes_context=True):
trophies = [None] * settings.TROPHY_COUNT
# put trophy on the given position in an array
for trophy in Trophy.objects.filter(user=user):
trophies[trophy.position] = trophy
return {'trophies': trophies}
# check type of achievement and return the accordingly render function
@register.simple_tag
def render_subachievement(user, achievement):
if hasattr(achievement, 'progressachievement'):
return achievement.progressachievement.render(user)
if hasattr(achievement, 'taskachievement'):
return achievement.taskachievement.render(user)
if hasattr(achievement, 'collectionachievement'):
return achievement.collectionachievement.render(user)
else:
return "" | from django import template
from achievements.models import Category, Trophy
from achievements import settings
register = template.Library()
@register.inclusion_tag('achievements/single_category.html')
def render_category(category, user):
return {
'category': category,
'percentage': category.get_complete_percentage(user),
'completed_achievements': category.count_all_complete_achievements(user)
}
@register.inclusion_tag('achievements/navigation.html')
def render_navigation(current_category=None):
return {
'categories': Category.objects.filter(parent_category__isnull=True),
'current_category': current_category,
}
@register.inclusion_tag('achievements/trophies.html')
def render_trophies(user, takes_context=True):
trophies = [None] * settings.TROPHY_COUNT
for trophy in Trophy.objects.filter(user=user):
trophies[trophy.position] = trophy
return {'trophies': trophies}
@register.simple_tag
def render_subachievement(user, achievement):
if hasattr(achievement, 'progressachievement'):
return achievement.progressachievement.render(user)
if hasattr(achievement, 'taskachievement'):
return achievement.taskachievement.render(user)
if hasattr(achievement, 'collectionachievement'):
return achievement.collectionachievement.render(user) | Python | 0 |
66fc121dbe0dbb7a69a62bfdaf98838a4f7a0bf3 | Update yeti.py | misp_modules/modules/expansion/yeti.py | misp_modules/modules/expansion/yeti.py | import json
import json
try:
import pyeti
except ImportError:
print("pyeti module not installed.")
misperrors = {'error': 'Error'}
mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domain'],
'output': ['hostname', 'domain', 'ip-src', 'ip-dst', 'url']}
# possible module-types: 'expansion', 'hover' or both
moduleinfo = {'version': '1', 'author': 'Sebastien Larinier @sebdraven',
'description': 'Query on yeti',
'module-type': ['expansion', 'hover']}
moduleconfig = ['apikey', 'url']
class Yeti:
def __init__(self, url, key):
self.api = pyeti.YetiApi(url, api_key=key)
self.dict = {'Ip': 'ip-src', 'Domain': 'domain', 'Hostname': 'hostname'}
def search(self, value):
obs = self.api.observable_search(value=value)
if obs:
return obs
def
def handler(q=False):
if q is False:
return False
request = json.loads(q)
attribute = request['attribute']
| import json
import json
try:
import pyeti
except ImportError:
print("pyeti module not installed.")
misperrors = {'error': 'Error'}
mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domain'],
'output': ['hostname', 'domain', 'ip-src', 'ip-dst', 'url']}
# possible module-types: 'expansion', 'hover' or both
moduleinfo = {'version': '1', 'author': 'Sebastien Larinier @sebdraven',
'description': 'Query on yeti',
'module-type': ['expansion', 'hover']}
| Python | 0 |
24124edccd9a822bb300815907c37d6453defed5 | Add recursive handling of nested states. | py/statemachines/simple_state_machine_script_test.py | py/statemachines/simple_state_machine_script_test.py | #----------------------------------------------------------------------------------------
# BEGIN: READ_HEXAPOD_CURRENT_POSE
# TEMPLATE: ReadTransformState
#
smach.StateMachine.add('READ_HEXAPOD_CURRENT_POSE', TFListenerState('ur10_1/base', 'hexapod_1/top', 'hexapod_current_pose'),
transitions={'succeeded':'MOVE_ABOVE_HEXAPOD_1'},
remapping={'hexapod_current_pose':'hexapod_current_pose'})
# END: READ_HEXAPOD_CURRENT_POSE
#----------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
# BEGIN: MOVE_ABOVE_HEXAPOD_1
# TEMPLATE: CartTrapVelActionState
#
sm_sub.userdata.MOVE_ABOVE_HEXAPOD_1_position_offset = np.asarray([0.0, 0.0, -0.2])
sm_sub.userdata.MOVE_ABOVE_HEXAPOD_1_rotation_offset = np.asarray([0.0, 0.0, 0.0])
sm_sub.userdata.MOVE_ABOVE_HEXAPOD_1_desired_velocity = 0.1
smach.StateMachine.add('MOVE_ABOVE_HEXAPOD_1',
smach_ros.SimpleActionState('/ur10_1/cart_trap_vel_action_server', robot_module.msg.CartTrapVelAction,
goal_cb = cart_trap_vel_goal_cb,
input_keys=['cart_trap_vel_pose_input',
'cart_trap_vel_position_offset_input',
'cart_trap_vel_rotation_offset_input',
'cart_trap_vel_desired_velocity_input']),
transitions={'succeeded':'OPEN_TOOL_EXCHANGE_1'},
remapping={'cart_trap_vel_pose_input':'hexapod_current_pose',
'cart_trap_vel_position_offset_input':'MOVE_ABOVE_HEXAPOD_1_position_offset',
'cart_trap_vel_rotation_offset_input':'MOVE_ABOVE_HEXAPOD_1_rotation_offset',
'cart_trap_vel_desired_velocity_input':'MOVE_ABOVE_HEXAPOD_1_desired_velocity'})
# END: MOVE_ABOVE_HEXAPOD_1
#----------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
# BEGIN: OPEN_TOOL_EXCHANGE_1
# TEMPLATE: SetOutput
#
OPEN_TOOL_EXCHANGE_1_request = DigitalOutputRequest(TOOL_EXCHANGE_GPIO, TOOL_EXCHANGE_OPEN)
smach.StateMachine.add('OPEN_TOOL_EXCHANGE_1',
smach_ros.ServiceState('/ur10_1/set_output',
DigitalOutput,
request = OPEN_TOOL_EXCHANGE_1_request),
transitions={'succeeded':'COUPLE_WITH_HEXAPOD'})
# END: OPEN_TOOL_EXCHANGE_1
#----------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
# BEGIN: SUB_STATE_1
# TEMPLATE: ReadTransformState
#
smach.StateMachine.add('SUB_STATE_1', TFListenerState('ur10_2/base', 'hexapod_1/top', 'hexapod_current_pose'),
transitions={'succeeded':'SUB_STATE_2'},
remapping={'hexapod_current_pose':'hexapod_current_pose'})
# END: SUB_STATE_1
#----------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
# BEGIN: SUB_STATE_2
# TEMPLATE: ReadTransformState
#
smach.StateMachine.add('SUB_STATE_2', TFListenerState('ur10_2/base', 'hexapod_1/top', 'hexapod_current_pose'),
transitions={'succeeded':'MOVE_ABOVE_HEXAPOD_1'},
remapping={'hexapod_current_pose':'hexapod_current_pose'})
# END: SUB_STATE_2
#----------------------------------------------------------------------------------------
| #----------------------------------------------------------------------------------------
# BEGIN: READ_HEXAPOD_CURRENT_POSE
# TEMPLATE: ReadTransformState
#
smach.StateMachine.add('READ_HEXAPOD_CURRENT_POSE', TFListenerState('ur10_1/base', 'hexapod_1/top', 'hexapod_current_pose'),
transitions={'succeeded':'MOVE_ABOVE_HEXAPOD_1'},
remapping={'hexapod_current_pose':'hexapod_current_pose'})
# END: READ_HEXAPOD_CURRENT_POSE
#----------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
# BEGIN: MOVE_ABOVE_HEXAPOD_1
# TEMPLATE: CartTrapVelActionState
#
sm_sub.userdata.MOVE_ABOVE_HEXAPOD_1_position_offset = np.asarray([0.0, 0.0, -0.2])
sm_sub.userdata.MOVE_ABOVE_HEXAPOD_1_rotation_offset = np.asarray([0.0, 0.0, 0.0])
sm_sub.userdata.MOVE_ABOVE_HEXAPOD_1_desired_velocity = 0.1
smach.StateMachine.add('MOVE_ABOVE_HEXAPOD_1',
smach_ros.SimpleActionState('/ur10_1/cart_trap_vel_action_server', robot_module.msg.CartTrapVelAction,
goal_cb = cart_trap_vel_goal_cb,
input_keys=['cart_trap_vel_pose_input',
'cart_trap_vel_position_offset_input',
'cart_trap_vel_rotation_offset_input',
'cart_trap_vel_desired_velocity_input']),
transitions={'succeeded':'OPEN_TOOL_EXCHANGE_1'},
remapping={'cart_trap_vel_pose_input':'hexapod_current_pose',
'cart_trap_vel_position_offset_input':'MOVE_ABOVE_HEXAPOD_1_position_offset',
'cart_trap_vel_rotation_offset_input':'MOVE_ABOVE_HEXAPOD_1_rotation_offset',
'cart_trap_vel_desired_velocity_input':'MOVE_ABOVE_HEXAPOD_1_desired_velocity'})
# END: MOVE_ABOVE_HEXAPOD_1
#----------------------------------------------------------------------------------------
| Python | 0 |
c3dffef7869c0ce19801d78393a336b6b6ecbce7 | stop littering /tmp with temporary resource files | pynodegl-utils/pynodegl_utils/tests/cmp_resources.py | pynodegl-utils/pynodegl_utils/tests/cmp_resources.py | #!/usr/bin/env python
#
# Copyright 2020 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import csv
import tempfile
from .cmp import CompareSceneBase, get_test_decorator
_COLS = (
'Textures memory',
'Buffers count',
'Buffers total',
'Blocks count',
'Blocks total',
'Medias count',
'Medias total',
'Textures count',
'Textures total',
'Computes',
'GraphicCfgs',
'Renders',
'RTTs',
)
class _CompareResources(CompareSceneBase):
def __init__(self, scene_func, columns=_COLS, **kwargs):
super().__init__(scene_func, width=320, height=240, **kwargs)
# We can't use NamedTemporaryFile because we may not be able to open it
# twice on some systems
fd, self._csvfile = tempfile.mkstemp(suffix='.csv', prefix='ngl-test-resources-')
os.close(fd)
self._columns = columns
self._hud = 1
self._hud_export_filename = self._csvfile
def get_out_data(self, debug=False, debug_func=None):
for frame in self.render_frames():
pass
# filter columns
with open(self._csvfile) as csvfile:
reader = csv.DictReader(csvfile)
data = [self._columns]
for row in reader:
data.append([row[k] for k in self._columns])
# rely on base string diff
ret = ''
for row in data:
ret += ','.join(row) + '\n'
return ret
def __del__(self):
os.remove(self._csvfile)
test_resources = get_test_decorator(_CompareResources)
| #!/usr/bin/env python
#
# Copyright 2020 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import csv
import tempfile
from .cmp import CompareSceneBase, get_test_decorator
_COLS = (
'Textures memory',
'Buffers count',
'Buffers total',
'Blocks count',
'Blocks total',
'Medias count',
'Medias total',
'Textures count',
'Textures total',
'Computes',
'GraphicCfgs',
'Renders',
'RTTs',
)
class _CompareResources(CompareSceneBase):
def __init__(self, scene_func, columns=_COLS, **kwargs):
super().__init__(scene_func, width=320, height=240, **kwargs)
# We can't use NamedTemporaryFile because we may not be able to open it
# twice on some systems
fd, self._csvfile = tempfile.mkstemp(suffix='.csv', prefix='ngl-test-resources-')
os.close(fd)
self._columns = columns
self._hud = 1
self._hud_export_filename = self._csvfile
def get_out_data(self, debug=False, debug_func=None):
for frame in self.render_frames():
pass
# filter columns
with open(self._csvfile) as csvfile:
reader = csv.DictReader(csvfile)
data = [self._columns]
for row in reader:
data.append([row[k] for k in self._columns])
# rely on base string diff
ret = ''
for row in data:
ret += ','.join(row) + '\n'
os.remove(self._csvfile)
return ret
test_resources = get_test_decorator(_CompareResources)
| Python | 0.000002 |
deccf656db39ac949f93e562e4f41a32589feb9b | Use a more complex and extendable check for shortcuts in StructuredText | cybox/common/structured_text.py | cybox/common/structured_text.py | # Copyright (c) 2013, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import cybox
import cybox.bindings.cybox_common as common_binding
class StructuredText(cybox.Entity):
_binding = common_binding
_namespace = 'http://cybox.mitre.org/common-2'
def __init__(self, value=None):
self.value = value
self.structuring_format = None
def to_obj(self, structured_text_obj=None):
if not structured_text_obj:
text_obj = common_binding.StructuredTextType()
else:
text_obj = structured_text_obj
text_obj.set_valueOf_(self.value)
if self.structuring_format is not None:
text_obj.set_structuring_format(self.structuring_format)
return text_obj
def to_dict(self):
# Shortcut if structuring_format is not defined.
if self.is_plain():
return self.value
text_dict = {}
text_dict['value'] = self.value
text_dict['structuring_format'] = self.structuring_format
return text_dict
def is_plain(self):
"""Whether this can be represented as a string rather than a dictionary
Subclasses can override this to include their custom fields in this
check:
return (super(..., self).is_plain() and self.other_field is None)
"""
return (self.structuring_format is None)
@classmethod
def from_obj(cls, text_obj, text_class=None):
if not text_obj:
return None
if not text_class:
text = StructuredText()
else:
text = text_class
text.value = text_obj.get_valueOf_()
text.structuring_format = text_obj.get_structuring_format()
return text
@classmethod
def from_dict(cls, text_dict, text_class=None):
if text_dict is None:
return None
if not text_class:
text = StructuredText()
else:
text = text_class
if not isinstance(text_dict, dict):
text.value = text_dict
else:
text.value = text_dict.get('value')
text.structuring_format = text_dict.get('structuring_format')
return text
| # Copyright (c) 2013, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import cybox
import cybox.bindings.cybox_common as common_binding
class StructuredText(cybox.Entity):
_binding = common_binding
_namespace = 'http://cybox.mitre.org/common-2'
def __init__(self, value=None):
self.value = value
self.structuring_format = None
def to_obj(self, structured_text_obj=None):
if not structured_text_obj:
text_obj = common_binding.StructuredTextType()
else:
text_obj = structured_text_obj
text_obj.set_valueOf_(self.value)
if self.structuring_format is not None:
text_obj.set_structuring_format(self.structuring_format)
return text_obj
def to_dict(self):
text_dict = {}
text_dict['value'] = self.value
text_dict['structuring_format'] = self.structuring_format
return text_dict
@classmethod
def from_obj(cls, text_obj, text_class=None):
if not text_obj:
return None
if not text_class:
text = StructuredText()
else:
text = text_class
text.value = text_obj.get_valueOf_()
text.structuring_format = text_obj.get_structuring_format()
return text
@classmethod
def from_dict(cls, text_dict, text_class=None):
if text_dict is None:
return None
if not text_class:
text = StructuredText()
else:
text = text_class
if not isinstance(text_dict, dict):
text.value = text_dict
else:
text.value = text_dict.get('value')
text.structuring_format = text_dict.get('structuring_format')
return text
| Python | 0 |
1eae87ee4435b4dda35d64295de13756394dbce9 | Add GET to 'Allow-Methods' by default. Fixes #12 | crossdomain.py | crossdomain.py | #!/usr/bin/env python
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=['GET'], headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| #!/usr/bin/env python
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| Python | 0 |
9ec25b6a5f8400b68c51ce9c5667c8c0c1648521 | Remove unneeded catch | cucco/regex.py | cucco/regex.py | #-*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
"""
Regular expression to match URLs as seen on http://daringfireball.net/2010/07/improved_regex_for_matching_urls
"""
URL_REGEX = re.compile(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
re.IGNORECASE)
"""
Regular expression to match email addresses as seen on http://www.wellho.net/resources/ex.php4?item=y115/relib.py
"""
EMAIL_REGEX = re.compile(r"[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}", re.IGNORECASE)
EMOJI_REGEX = re.compile(u'([\U00002600-\U000027BF])|([\U0001F300-\U0001F64F])|([\U0001F680-\U0001F6FF])')
| #-*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
"""
Regular expression to match URLs as seen on http://daringfireball.net/2010/07/improved_regex_for_matching_urls
"""
URL_REGEX = re.compile(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
re.IGNORECASE)
"""
Regular expression to match email addresses as seen on http://www.wellho.net/resources/ex.php4?item=y115/relib.py
"""
EMAIL_REGEX = re.compile(r"[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}", re.IGNORECASE)
try:
EMOJI_REGEX = re.compile(u'([\U00002600-\U000027BF])|([\U0001f300-\U0001f64F])|([\U0001f680-\U0001f6FF])')
except re.error:
EMOJI_REGEX = re.compile(u'([\u2600-\u27BF])|([\uD83C][\uDF00-\uDFFF])|([\uD83D][\uDC00-\uDE4F])|([\uD83D][\uDE80-\uDEFF])')
| Python | 0.000019 |
1c6f53492fc4cdc132769e4ffcfb076557a45c34 | Remove English words from Non-English corpus data | modules/preprocessor/emille_preprocessor.py | modules/preprocessor/emille_preprocessor.py | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""EMILLE Corpus Preprocessor which inherits from BasePreprocessor."""
import regex as re
from base_preprocessor import BasePreprocessor
from nltk.tokenize import sent_tokenize
from bs4 import BeautifulSoup
import sys
import unicodedata
from collections import defaultdict
class EmilleCorpusPreprocessor(BasePreprocessor):
"""Emille Corpus Preprocessor which preprocesses the EMILLE Corpus."""
def __init__(
self,
corpus_fname,
corpus_dir_path='.',
encoding='utf-8',
language='english',
need_preprocessing=False,
limit=None
):
"""Constructor which initializes the BasePreprocessor constructor."""
self.language = language
# If language is not specified, regex pattern for split is default ''
self.lang_split_sent = defaultdict(lambda : u'')
# Specify language specific split regex pattern
lang_split_sent = [
('hindi', u'[।]'),
]
# Store language specific regex pattern in the defaultdict
for k,v in lang_split_sent:
self.lang_split_sent[k] = v
super(EmilleCorpusPreprocessor, self).__init__(
corpus_fname,
corpus_dir_path=corpus_dir_path,
encoding=encoding,
need_preprocessing=need_preprocessing,
limit=limit
)
def _extract_corpus_data(self, data):
"""Extract contents of the 'p' tags which contain the body."""
soup = BeautifulSoup(data)
ptags = soup.find_all('p')
content =[]
for index in range(len(ptags)):
content.append( ". ".join(list(ptags[index].strings)))
return ". ".join(content)
def _clean_word(self, word):
"""
Preprocess words after tokenizing words from sentences.
* Remove punctuations.
* Remove English words from Non-English corpus data.
"""
if self.language is "english":
regex = ur"((\p{P}+)|(\p{S}+)|([0-9]+))"
else:
regex = ur"((\p{P}+)|(\p{S}+)|([0-9]+)|([A-Za-z]))"
return re.sub(
pattern=regex,
repl='',
string=word.lower()
).strip()
def _tokenize_sentences(self, data):
"""
Sentence tokenize corpus.
* Sentence Tokenize the corpus using NLTK.
* Remove punctuations [ except space ] from each individual sentences.
"""
lang_specific_split_pattern = self.lang_split_sent[self.language]
for generic_sentence_split in sent_tokenize(data):
for sentence in re.split(
lang_specific_split_pattern, generic_sentence_split
):
clean_sentence = sentence.expandtabs().strip()
if len(clean_sentence) > 0:
yield clean_sentence
def _tokenize_words(self, sentence):
"""Tokenize Words from sentences."""
return sentence.split()
BasePreprocessor.register(EmilleCorpusPreprocessor)
| #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""EMILLE Corpus Preprocessor which inherits from BasePreprocessor."""
import regex as re
from base_preprocessor import BasePreprocessor
from nltk.tokenize import sent_tokenize
from bs4 import BeautifulSoup
import sys
import unicodedata
from collections import defaultdict
class EmilleCorpusPreprocessor(BasePreprocessor):
"""Emille Corpus Preprocessor which preprocesses the EMILLE Corpus."""
def __init__(
self,
corpus_fname,
corpus_dir_path='.',
encoding='utf-8',
language=None,
need_preprocessing=False,
limit=None
):
"""Constructor which initializes the BasePreprocessor constructor."""
self.language = language
# If language is not specified, regex pattern for split is default ''
self.lang_split_sent = defaultdict(lambda : u'')
# Specify language specific split regex pattern
lang_split_sent = [
('hindi', u'[।]'),
]
# Store language specific regex pattern in the defaultdict
for k,v in lang_split_sent:
self.lang_split_sent[k] = v
super(EmilleCorpusPreprocessor, self).__init__(
corpus_fname,
corpus_dir_path=corpus_dir_path,
encoding=encoding,
need_preprocessing=need_preprocessing,
limit=limit
)
def _extract_corpus_data(self, data):
"""Extract contents of the 'p' tags which contain the body."""
soup = BeautifulSoup(data)
ptags = soup.find_all('p')
content =[]
for index in range(len(ptags)):
content.append( ". ".join(list(ptags[index].strings)))
return ". ".join(content)
def _clean_word(self, word):
"""
Preprocess words after tokenizing words from sentences.
* Remove punctuations.
"""
return re.sub(
pattern=ur"((\p{P}+)|(\p{S}+)|([0-9]+))",
repl='',
string=word.lower()
).strip()
def _tokenize_sentences(self, data):
"""
Sentence tokenize corpus.
* Sentence Tokenize the corpus using NLTK.
* Remove punctuations [ except space ] from each individual sentences.
"""
lang_specific_split_pattern = self.lang_split_sent[self.language]
for generic_sentence_split in sent_tokenize(data):
for sentence in re.split(
lang_specific_split_pattern, generic_sentence_split
):
clean_sentence = sentence.expandtabs().strip()
if len(clean_sentence) > 0:
yield clean_sentence
def _tokenize_words(self, sentence):
"""Tokenize Words from sentences."""
return sentence.split()
BasePreprocessor.register(EmilleCorpusPreprocessor)
| Python | 0.000083 |
efd96f03d51c1fce3ef370cae88928e16f0b9f17 | Parse the response with json | buffer/api.py | buffer/api.py | import json
from rauth import OAuth2Session
BASE_URL = 'https://api.bufferapp.com/1/%s'
class API(OAuth2Session):
'''
Small and clean class that embrace all basic
operations with the buffer app
'''
def get(self, url):
if not self.access_token:
raise ValueError('Please set an access token first!')
response = super(OAuth2Session, self).get(url=BASE_URL % url)
return json.loads(response.content)
| from rauth import OAuth2Session
BASE_URL = 'https://api.bufferapp.com/1/%s'
class API(OAuth2Session):
'''
Small and clean class that embrace all basic
operations with the buffer app
'''
def get(self, url):
if not self.access_token:
raise ValueError('Please set an access token first!')
return super(OAuth2Session, self).get(url=BASE_URL % url)
| Python | 0.999999 |
0629183a91046b746d04c1a68e190721a156560b | rename id->fileid (id is a builtin) | build/cook.py | build/cook.py | #
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
import recipe
import time
import files
import commit
import os
import util
import sha1helper
def cook(repos, cfg, recipeFile):
classList = recipe.RecipeLoader(recipeFile)
built = []
if recipeFile[0] != "/":
raise IOError, "recipe file names must be absolute paths"
for (name, recipeClass) in classList.items():
print "Building", name
# find the files and ids which were owned by the last version of
# this package on the branch
fileIdMap = {}
fullName = cfg.packagenamespace + "/" + name
if repos.hasPackage(fullName):
for pkgName in repos.getPackageList(fullName):
pkgSet = repos.getPackageSet(pkgName)
pkg = pkgSet.getLatestPackage(cfg.defaultbranch)
for (fileid, path, version) in pkg.fileList():
fileIdMap[path] = fileid
ident = IdGen(fileIdMap)
srcdirs = [ os.path.dirname(recipeFile), cfg.sourcepath % {'pkgname': name} ]
recipeObj = recipeClass(cfg, srcdirs)
ourBuildDir = cfg.buildpath + "/" + recipeObj.name
recipeObj.setup()
recipeObj.unpackSources(ourBuildDir)
recipeObj.doBuild(ourBuildDir)
rootDir = "/var/tmp/srs/%s-%d" % (recipeObj.name, int(time.time()))
util.mkdirChain(rootDir)
recipeObj.doInstall(ourBuildDir, rootDir)
recipeObj.packages(rootDir)
pkgSet = recipeObj.getPackageSet()
pkgname = cfg.packagenamespace + "/" + recipeObj.name
for (name, buildPkg) in pkgSet.packageSet():
built.append(pkgname + "/" + name)
fileList = []
for filePath in buildPkg.keys():
realPath = rootDir + filePath
f = files.FileFromFilesystem(realPath, ident(filePath))
fileList.append((f, realPath, filePath))
commit.finalCommit(repos, cfg, pkgname + "/" + name, recipeObj.version,
fileList)
recipeName = os.path.basename(recipeFile)
f = files.FileFromFilesystem(recipeFile, ident(recipeName), type = "src")
fileList = [ (f, recipeFile, recipeName) ]
for file in recipeObj.allSources():
src = util.findFile(file, srcdirs)
srcName = os.path.basename(src)
f = files.FileFromFilesystem(src, ident(srcName), type = "src")
fileList.append((f, src, srcName))
commit.finalCommit(repos, cfg, pkgname + "/sources",
recipeObj.version, fileList)
recipeObj.cleanup(ourBuildDir, rootDir)
return built
class IdGen:
def __call__(self, path):
if self.map.has_key(path):
return self.map[path]
return sha1helper.hashString("%s %f %s" % (path, time.time(),
self.noise))
def __init__(self, map):
# file ids need to be unique. we include the time and path when
# we generate them; any data put here is also used
uname = os.uname()
self.noise = "%s %s" % (uname[1], uname[2])
self.map = map
| #
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
import recipe
import time
import files
import commit
import os
import util
import sha1helper
def cook(repos, cfg, recipeFile):
classList = recipe.RecipeLoader(recipeFile)
built = []
if recipeFile[0] != "/":
raise IOError, "recipe file names must be absolute paths"
for (name, recipeClass) in classList.items():
print "Building", name
# find the files and ids which were owned by the last version of
# this package on the branch
fileIdMap = {}
fullName = cfg.packagenamespace + "/" + name
if repos.hasPackage(fullName):
for pkgName in repos.getPackageList(fullName):
pkgSet = repos.getPackageSet(pkgName)
pkg = pkgSet.getLatestPackage(cfg.defaultbranch)
for (id, path, version) in pkg.fileList():
fileIdMap[path] = id
ident = IdGen(fileIdMap)
srcdirs = [ os.path.dirname(recipeFile), cfg.sourcepath % {'pkgname': name} ]
recipeObj = recipeClass(cfg, srcdirs)
ourBuildDir = cfg.buildpath + "/" + recipeObj.name
recipeObj.setup()
recipeObj.unpackSources(ourBuildDir)
recipeObj.doBuild(ourBuildDir)
rootDir = "/var/tmp/srs/%s-%d" % (recipeObj.name, int(time.time()))
util.mkdirChain(rootDir)
recipeObj.doInstall(ourBuildDir, rootDir)
recipeObj.packages(rootDir)
pkgSet = recipeObj.getPackageSet()
pkgname = cfg.packagenamespace + "/" + recipeObj.name
for (name, buildPkg) in pkgSet.packageSet():
built.append(pkgname + "/" + name)
fileList = []
for filePath in buildPkg.keys():
realPath = rootDir + filePath
f = files.FileFromFilesystem(realPath, ident(filePath))
fileList.append((f, realPath, filePath))
commit.finalCommit(repos, cfg, pkgname + "/" + name, recipeObj.version,
fileList)
recipeName = os.path.basename(recipeFile)
f = files.FileFromFilesystem(recipeFile, ident(recipeName), type = "src")
fileList = [ (f, recipeFile, recipeName) ]
for file in recipeObj.allSources():
src = util.findFile(file, srcdirs)
srcName = os.path.basename(src)
f = files.FileFromFilesystem(src, ident(srcName), type = "src")
fileList.append((f, src, srcName))
commit.finalCommit(repos, cfg, pkgname + "/sources",
recipeObj.version, fileList)
recipeObj.cleanup(ourBuildDir, rootDir)
return built
class IdGen:
def __call__(self, path):
if self.map.has_key(path):
return self.map[path]
return sha1helper.hashString("%s %f %s" % (path, time.time(),
self.noise))
def __init__(self, map):
# file ids need to be unique. we include the time and path when
# we generate them; any data put here is also used
uname = os.uname()
self.noise = "%s %s" % (uname[1], uname[2])
self.map = map
| Python | 0 |
29273b0d7473a1efa955cd35686838780d390106 | add more counters | monasca_persister/repositories/persister.py | monasca_persister/repositories/persister.py | # (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import monascastatsd
from oslo_log import log
from monasca_common.kafka.consumer import KafkaConsumer
LOG = log.getLogger(__name__)
statsd_client = monascastatsd.Client('monasca.persister',
dimensions={'service': 'monitoring', 'component': 'monasca-persister'})
statsd_timer = statsd_client.get_timer()
statsd_flush_error_count = statsd_client.get_counter('flush.errors')
class Persister(object):
def __init__(self, kafka_conf, zookeeper_conf, repository):
self._data_points = []
self._kafka_topic = kafka_conf.topic
self._database_batch_size = kafka_conf.database_batch_size
self._consumer = KafkaConsumer(
kafka_conf.uri,
zookeeper_conf.uri,
kafka_conf.zookeeper_path,
kafka_conf.group_id,
kafka_conf.topic,
repartition_callback=self._flush,
commit_callback=self._flush,
commit_timeout=kafka_conf.max_wait_time_seconds)
self.repository = repository()
self.statsd_msg_count = statsd_client.get_counter('messages.consumed', dimensions={'type': self._kafka_topic})
self.statsd_msg_dropped_count = statsd_client.get_counter('messages.dropped',
dimensions={'type': self._kafka_topic})
@statsd_timer.timed("flush.time", sample_rate=0.01)
def _flush(self):
if not self._data_points:
return
try:
self.repository.write_batch(self._data_points)
LOG.info("Processed %d messages from topic %s", len(self._data_points), self._kafka_topic)
self._data_points = []
self._consumer.commit()
except Exception:
LOG.exception("Error writing to database: %s", self._data_points)
global statsd_flush_error_count
statsd_flush_error_count += 1
raise
def run(self):
try:
for raw_message in self._consumer:
message = None
try:
message = raw_message[1]
data_point = self.repository.process_message(message)
self._data_points.append(data_point)
self.statsd_msg_count += 1
except Exception:
LOG.exception('Error processing message. Message is '
'being dropped. %s', message)
self.statsd_msg_dropped_count += 1
if len(self._data_points) >= self._database_batch_size:
self._flush()
except:
LOG.exception(
'Persister encountered fatal exception processing '
'messages. '
'Shutting down all threads and exiting')
os._exit(1)
| # (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import monascastatsd
from oslo_log import log
from monasca_common.kafka.consumer import KafkaConsumer
LOG = log.getLogger(__name__)
statsd_client = monascastatsd.Client('monasca.persister',
dimensions={'service': 'monitoring', 'component': 'monasca-persister'})
statsd_timer = statsd_client.get_timer()
class Persister(object):
def __init__(self, kafka_conf, zookeeper_conf, repository):
self._data_points = []
self._kafka_topic = kafka_conf.topic
self._database_batch_size = kafka_conf.database_batch_size
self._consumer = KafkaConsumer(
kafka_conf.uri,
zookeeper_conf.uri,
kafka_conf.zookeeper_path,
kafka_conf.group_id,
kafka_conf.topic,
repartition_callback=self._flush,
commit_callback=self._flush,
commit_timeout=kafka_conf.max_wait_time_seconds)
self.repository = repository()
self.statsd_flush_error_count = statsd_client.get_counter('flush.errors')
self.statsd_msg_count = statsd_client.get_counter('messages.processed')
self.statsd_msg_dropped_count = statsd_client.get_counter('messages.dropped')
@statsd_timer.timed("flush.time", sample_rate=0.01)
def _flush(self):
if not self._data_points:
return
try:
self.repository.write_batch(self._data_points)
LOG.info("Processed %d messages from topic %s", len(self._data_points), self._kafka_topic)
self._data_points = []
self._consumer.commit()
except Exception:
LOG.exception("Error writing to database: %s", self._data_points)
self.statsd_flush_error_count += 1
raise
def run(self):
try:
for raw_message in self._consumer:
message = None
try:
message = raw_message[1]
data_point = self.repository.process_message(message)
self._data_points.append(data_point)
self.statsd_msg_count += 1
except Exception:
LOG.exception('Error processing message. Message is '
'being dropped. %s', message)
self.statsd_msg_dropped_count += 1
if len(self._data_points) >= self._database_batch_size:
self._flush()
except:
LOG.exception(
'Persister encountered fatal exception processing '
'messages. '
'Shutting down all threads and exiting')
os._exit(1)
| Python | 0 |
fa1f4a1420f2ea6d66234dae2189a7fb8fdf1f6f | remove debug print | common/lib/xmodule/xmodule/mongo_utils.py | common/lib/xmodule/xmodule/mongo_utils.py | """
Common MongoDB connection functions.
"""
import logging
import pymongo
from pymongo import ReadPreference
from mongodb_proxy import MongoProxy
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# pylint: disable=bad-continuation
def connect_to_mongodb(
db, host,
port=27017, tz_aware=True, user=None, password=None,
retry_wait_time=0.1, proxy=True, **kwargs
):
"""
Returns a MongoDB Database connection, optionally wrapped in a proxy. The proxy
handles AutoReconnect errors by retrying read operations, since these exceptions
typically indicate a temporary step-down condition for MongoDB.
"""
# The MongoReplicaSetClient class is deprecated in Mongo 3.x, in favor of using
# the MongoClient class for all connections. Update/simplify this code when using
# PyMongo 3.x.
if kwargs.get('replicaSet'):
# Enable reading from secondary nodes in the MongoDB replicaset by using the
# MongoReplicaSetClient class.
# The 'replicaSet' parameter in kwargs is required for secondary reads.
# The read_preference should be set to a proper value, like SECONDARY_PREFERRED.
mongo_client_class = pymongo.MongoReplicaSetClient
else:
# No 'replicaSet' in kwargs - so no secondary reads.
mongo_client_class = pymongo.MongoClient
# If read_preference is given as a name of a valid ReadPreference.<NAME> constant
# such as "SECONDARY_PREFERRED", convert it. Otherwise pass it through unchanged.
if 'read_preference' in kwargs:
read_preference = getattr(ReadPreference, kwargs['read_preference'], None)
if read_preference is not None:
kwargs['read_preference'] = read_preference
mongo_conn = pymongo.database.Database(
mongo_client_class(
host=host,
port=port,
tz_aware=tz_aware,
document_class=dict,
**kwargs
),
db
)
if proxy:
mongo_conn = MongoProxy(
mongo_conn,
wait_time=retry_wait_time
)
# default the authSource to be whatever db we are connecting to (for backwards compatiblity)
authSource=db
if kwargs.get('authSource'):
# override if configured to use a different db for auth (e.g. Mongodb Atlas)
authSource=kwargs.get('authSource')
# If credentials were provided, authenticate the user.
if user is not None and password is not None:
mongo_conn.authenticate(user, password, authSource)
return mongo_conn
def create_collection_index(
collection, keys,
ignore_created=True, ignore_created_opts=True, **kwargs
):
"""
Create a MongoDB index in a collection. Optionally,
ignore errors related to the index already existing.
"""
# For an explanation of the error codes:
# https://github.com/mongodb/mongo/blob/v3.0/src/mongo/db/catalog/index_catalog.cpp#L542-L583
# https://github.com/mongodb/mongo/blob/v3.0/src/mongo/base/error_codes.err#L70-L87
# pylint: disable=invalid-name
INDEX_ALREADY_EXISTS = 68
INDEX_OPTIONS_CONFLICT = 85
try:
collection.create_index(keys, **kwargs)
except pymongo.errors.OperationFailure as exc:
errors_to_ignore = []
if ignore_created:
errors_to_ignore.append(INDEX_ALREADY_EXISTS)
if ignore_created_opts:
errors_to_ignore.append(INDEX_OPTIONS_CONFLICT)
if exc.code in errors_to_ignore:
logger.warning("Existing index in collection '{}' remained unchanged!: {}".format(
collection.full_name, exc.details['errmsg'])
)
else:
raise exc
| """
Common MongoDB connection functions.
"""
import logging
import pymongo
from pymongo import ReadPreference
from mongodb_proxy import MongoProxy
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# pylint: disable=bad-continuation
def connect_to_mongodb(
db, host,
port=27017, tz_aware=True, user=None, password=None,
retry_wait_time=0.1, proxy=True, **kwargs
):
"""
Returns a MongoDB Database connection, optionally wrapped in a proxy. The proxy
handles AutoReconnect errors by retrying read operations, since these exceptions
typically indicate a temporary step-down condition for MongoDB.
"""
# The MongoReplicaSetClient class is deprecated in Mongo 3.x, in favor of using
# the MongoClient class for all connections. Update/simplify this code when using
# PyMongo 3.x.
if kwargs.get('replicaSet'):
# Enable reading from secondary nodes in the MongoDB replicaset by using the
# MongoReplicaSetClient class.
# The 'replicaSet' parameter in kwargs is required for secondary reads.
# The read_preference should be set to a proper value, like SECONDARY_PREFERRED.
mongo_client_class = pymongo.MongoReplicaSetClient
else:
# No 'replicaSet' in kwargs - so no secondary reads.
mongo_client_class = pymongo.MongoClient
# If read_preference is given as a name of a valid ReadPreference.<NAME> constant
# such as "SECONDARY_PREFERRED", convert it. Otherwise pass it through unchanged.
if 'read_preference' in kwargs:
read_preference = getattr(ReadPreference, kwargs['read_preference'], None)
if read_preference is not None:
kwargs['read_preference'] = read_preference
print "host"
print host
print "port"
print port
print "tz_aware"
print tz_aware
print "dict"
print dict
print "db"
print db
mongo_conn = pymongo.database.Database(
mongo_client_class(
host=host,
port=port,
tz_aware=tz_aware,
document_class=dict,
**kwargs
),
db
)
if proxy:
mongo_conn = MongoProxy(
mongo_conn,
wait_time=retry_wait_time
)
# default the authSource to be whatever db we are connecting to (for backwards compatiblity)
authSource=db
if kwargs.get('authSource'):
# override if configured to use a different db for auth (e.g. Mongodb Atlas)
authSource=kwargs.get('authSource')
# If credentials were provided, authenticate the user.
if user is not None and password is not None:
mongo_conn.authenticate(user, password, authSource)
return mongo_conn
def create_collection_index(
collection, keys,
ignore_created=True, ignore_created_opts=True, **kwargs
):
"""
Create a MongoDB index in a collection. Optionally,
ignore errors related to the index already existing.
"""
# For an explanation of the error codes:
# https://github.com/mongodb/mongo/blob/v3.0/src/mongo/db/catalog/index_catalog.cpp#L542-L583
# https://github.com/mongodb/mongo/blob/v3.0/src/mongo/base/error_codes.err#L70-L87
# pylint: disable=invalid-name
INDEX_ALREADY_EXISTS = 68
INDEX_OPTIONS_CONFLICT = 85
try:
collection.create_index(keys, **kwargs)
except pymongo.errors.OperationFailure as exc:
errors_to_ignore = []
if ignore_created:
errors_to_ignore.append(INDEX_ALREADY_EXISTS)
if ignore_created_opts:
errors_to_ignore.append(INDEX_OPTIONS_CONFLICT)
if exc.code in errors_to_ignore:
logger.warning("Existing index in collection '{}' remained unchanged!: {}".format(
collection.full_name, exc.details['errmsg'])
)
else:
raise exc
| Python | 0.000008 |
a9ff2da085738770e1b3c03162f79454851df3b8 | Fix issue #144, getting wrong field name for harakiri_count | newrelic_plugin_agent/plugins/uwsgi.py | newrelic_plugin_agent/plugins/uwsgi.py | """
uWSGI
"""
import json
import logging
from newrelic_plugin_agent.plugins import base
LOGGER = logging.getLogger(__name__)
class uWSGI(base.SocketStatsPlugin):
GUID = 'com.meetme.newrelic_uwsgi_agent'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 1717
def add_datapoints(self, stats):
"""Add all of the data points for a node
:param dict stats: all of the nodes
"""
self.add_gauge_value('Listen Queue Size', '',
stats.get('listen_queue', 0))
self.add_gauge_value('Listen Queue Errors', '',
stats.get('listen_queue_errors', 0))
for lock in stats.get('locks', list()):
lock_name = lock.keys()[0]
self.add_gauge_value('Locks/%s' % lock_name, '', lock[lock_name])
exceptions = 0
harakiris = 0
requests = 0
respawns = 0
signals = 0
apps = dict()
for worker in stats.get('workers', list()):
id = worker['id']
# totals
exceptions += worker.get('exceptions', 0)
harakiris += worker.get('harakiri_count', 0)
requests += worker.get('requests', 0)
respawns += worker.get('respawns', 0)
signals += worker.get('signals', 0)
# Add the per worker
self.add_derive_value('Worker/%s/Exceptions' % id, '',
worker.get('exceptions', 0))
self.add_derive_value('Worker/%s/Harakiri' % id, '',
worker.get('harakiri_count', 0))
self.add_derive_value('Worker/%s/Requests' % id, '',
worker.get('requests', 0))
self.add_derive_value('Worker/%s/Respawns' % id, '',
worker.get('respawn_count', 0))
self.add_derive_value('Worker/%s/Signals' % id, '',
worker.get('signals', 0))
for app in worker['apps']:
if app['id'] not in apps:
apps[app['id']] = {'exceptions': 0,
'requests': 0}
apps[app['id']]['exceptions'] += app['exceptions']
apps[app['id']]['requests'] += app['requests']
for app in apps:
self.add_derive_value('Application/%s/Exceptions' % app, '',
apps[app].get('exceptions', 0))
self.add_derive_value('Application/%s/Requests' % app, '',
apps[app].get('requests', 0))
self.add_derive_value('Summary/Applications', '', len(apps))
self.add_derive_value('Summary/Exceptions', '', exceptions)
self.add_derive_value('Summary/Harakiris', '', harakiris)
self.add_derive_value('Summary/Requests', '', requests)
self.add_derive_value('Summary/Respawns', '', respawns)
self.add_derive_value('Summary/Signals', '', signals)
self.add_derive_value('Summary/Workers', '',
len(stats.get('workers', ())))
def fetch_data(self, connection):
"""Read the data from the socket
:param socket connection: The connection
:return: dict
"""
data = super(uWSGI, self).fetch_data(connection, read_till_empty=True)
if data:
return json.loads(data)
return {}
| """
uWSGI
"""
import json
import logging
from newrelic_plugin_agent.plugins import base
LOGGER = logging.getLogger(__name__)
class uWSGI(base.SocketStatsPlugin):
GUID = 'com.meetme.newrelic_uwsgi_agent'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 1717
def add_datapoints(self, stats):
"""Add all of the data points for a node
:param dict stats: all of the nodes
"""
self.add_gauge_value('Listen Queue Size', '',
stats.get('listen_queue', 0))
self.add_gauge_value('Listen Queue Errors', '',
stats.get('listen_queue_errors', 0))
for lock in stats.get('locks', list()):
lock_name = lock.keys()[0]
self.add_gauge_value('Locks/%s' % lock_name, '', lock[lock_name])
exceptions = 0
harakiris = 0
requests = 0
respawns = 0
signals = 0
apps = dict()
for worker in stats.get('workers', list()):
id = worker['id']
# totals
exceptions += worker.get('exceptions', 0)
harakiris += worker.get('harakiris', 0)
requests += worker.get('requests', 0)
respawns += worker.get('respawns', 0)
signals += worker.get('signals', 0)
# Add the per worker
self.add_derive_value('Worker/%s/Exceptions' % id, '',
worker.get('exceptions', 0))
self.add_derive_value('Worker/%s/Harakiri' % id, '',
worker.get('harakiri_count', 0))
self.add_derive_value('Worker/%s/Requests' % id, '',
worker.get('requests', 0))
self.add_derive_value('Worker/%s/Respawns' % id, '',
worker.get('respawn_count', 0))
self.add_derive_value('Worker/%s/Signals' % id, '',
worker.get('signals', 0))
for app in worker['apps']:
if app['id'] not in apps:
apps[app['id']] = {'exceptions': 0,
'requests': 0}
apps[app['id']]['exceptions'] += app['exceptions']
apps[app['id']]['requests'] += app['requests']
for app in apps:
self.add_derive_value('Application/%s/Exceptions' % app, '',
apps[app].get('exceptions', 0))
self.add_derive_value('Application/%s/Requests' % app, '',
apps[app].get('requests', 0))
self.add_derive_value('Summary/Applications', '', len(apps))
self.add_derive_value('Summary/Exceptions', '', exceptions)
self.add_derive_value('Summary/Harakiris', '', harakiris)
self.add_derive_value('Summary/Requests', '', requests)
self.add_derive_value('Summary/Respawns', '', respawns)
self.add_derive_value('Summary/Signals', '', signals)
self.add_derive_value('Summary/Workers', '',
len(stats.get('workers', ())))
def fetch_data(self, connection):
"""Read the data from the socket
:param socket connection: The connection
:return: dict
"""
data = super(uWSGI, self).fetch_data(connection, read_till_empty=True)
if data:
return json.loads(data)
return {}
| Python | 0 |
2d40d6a9623adb9e91bd1e4d99c5c111d0ff4f8f | Update the New Season service to use MySportsFeeds API v2 | nflpool/services/new_season_service.py | nflpool/services/new_season_service.py | from nflpool.data.seasoninfo import SeasonInfo
from nflpool.data.dbsession import DbSessionFactory
import requests
import pendulum
import nflpool.data.secret as secret
from requests.auth import HTTPBasicAuth
class NewSeasonService:
@staticmethod
def get_install():
return []
'''After first time installation or before a new season starts, this will update the season year
in the database. This is used to for the MySportsFeeds API to get the correct year of stats needed.'''
# TODO Add logging
@classmethod
def create_season(cls, season):
session = DbSessionFactory.create_session()
season_row = session.query(SeasonInfo)
if season_row.count() == 0:
print("New install, adding a season")
response = requests.get('https://api.mysportsfeeds.com/v2/pull/nfl/' + str(season) +
'-regular/games.json',
auth=HTTPBasicAuth(secret.msf_api, secret.msf_v2pw))
gameday_json = response.json()
gameday_data = gameday_json["games"][0]
# first_game_date = gameday_data["date"]
# first_game_time = gameday_data["time"]
# TODO Refactor the first game date and time in the database and change ID in the template
first_game_date = gameday_data["startTime"]
home_team = gameday_data["homeTeam"]["id"]
away_team = gameday_data["awayTeam"]["id"]
# first_game = first_game_date + "T" + "0" + first_game_time[:-2]
first_game_calc = pendulum.parse(first_game_date)
# if first_game_calc.hour <= 11:
# first_game = first_game_calc.add(hours=12)
# first_game_instance = pendulum.instance(first_game)
new_season = SeasonInfo(season_start_date=first_game_instance,
home_team=home_team, away_team=away_team, current_season=season)
session.add(new_season)
session.commit()
else:
print("Existing season found, updating to new year")
response = requests.get('https://api.mysportsfeeds.com/v2/pull/nfl/' + str(season) +
'-regular/games.json',
auth=HTTPBasicAuth(secret.msf_api, secret.msf_v2pw))
gameday_json = response.json()
gameday_data = gameday_json["games"][0]
first_game_date = gameday_data["startTime"]
home_team = gameday_data["homeTeam"]["id"]
away_team = gameday_data["awayTeam"]["id"]
# first_game = first_game_date + "T" + "0" + first_game_time[:-2]
first_game_calc = pendulum.parse(first_game_date)
# if first_game_calc.hour <= 11:
# first_game = first_game_calc.add(hours=12)
update_row = session.query(SeasonInfo).filter(SeasonInfo.id == '1').first()
update_row.current_season = season
update_row.season_start_date = pendulum.instance(first_game_calc)
update_row.away_team = away_team
update_row.home_team = home_team
session.commit()
| from nflpool.data.seasoninfo import SeasonInfo
from nflpool.data.dbsession import DbSessionFactory
import requests
import pendulum
import nflpool.data.secret as secret
from requests.auth import HTTPBasicAuth
class NewSeasonService:
@staticmethod
def get_install():
return []
'''After first time installation or before a new season starts, this will update the season year
in the database. This is used to for the MySportsFeeds API to get the correct year of stats needed.'''
# TODO Add logging
@classmethod
def create_season(cls, season):
session = DbSessionFactory.create_session()
season_row = session.query(SeasonInfo)
if season_row.count() == 0:
print("New install, adding a season")
response = requests.get('https://api.mysportsfeeds.com/v1.2/pull/nfl/' + str(season) +
'-regular/full_game_schedule.json',
auth=HTTPBasicAuth(secret.msf_username, secret.msf_pw))
gameday_json = response.json()
gameday_data = gameday_json["fullgameschedule"]["gameentry"][0]
first_game_date = gameday_data["date"]
first_game_time = gameday_data["time"]
away_team = gameday_data["awayTeam"]["Name"]
home_team = gameday_data["homeTeam"]["Name"]
first_game = first_game_date + "T" + "0" + first_game_time[:-2]
first_game_calc = pendulum.parse(first_game)
if first_game_calc.hour <= 11:
first_game = first_game_calc.add(hours=12)
first_game_instance = pendulum.instance(first_game)
new_season = SeasonInfo(season_start_date=first_game_instance, season_start_time=first_game_time,
home_team=home_team, away_team=away_team, current_season=season)
session.add(new_season)
session.commit()
else:
print("Existing season found, updating to new year")
response = requests.get('https://api.mysportsfeeds.com/v1.2/pull/nfl/' + str(season) +
'-regular/full_game_schedule.json',
auth=HTTPBasicAuth(secret.msf_username, secret.msf_pw))
gameday_json = response.json()
gameday_data = gameday_json["fullgameschedule"]["gameentry"][0]
first_game_date = gameday_data["date"]
first_game_time = gameday_data["time"]
away_team = gameday_data["awayTeam"]["Name"]
home_team = gameday_data["homeTeam"]["Name"]
first_game = first_game_date + "T" + "0" + first_game_time[:-2]
first_game_calc = pendulum.parse(first_game)
if first_game_calc.hour <= 11:
first_game = first_game_calc.add(hours=12)
update_row = session.query(SeasonInfo).filter(SeasonInfo.id == '1').first()
update_row.current_season = season
update_row.season_start_date = pendulum.instance(first_game)
update_row.season_start_time = first_game_time
update_row.away_team = away_team
update_row.home_team = home_team
session.commit()
| Python | 0 |
4a83439926181f26e4656d2a2b78021209d3b629 | fix the dropout to 0.2 because that is what they use | code/nolearntrail.py | code/nolearntrail.py | from nolearn.dbn import DBN
from readfacedatabases import *
from sklearn import cross_validation
from sklearn.metrics import zero_one_score
from sklearn.metrics import classification_report
import argparse
import numpy as np
from common import *
parser = argparse.ArgumentParser(description='nolearn test')
parser.add_argument('--equalize',dest='equalize',action='store_true', default=False,
help="if true, the input images are equalized before being fed into the net")
parser.add_argument('--maxEpochs', type=int, default=1000,
help='the maximum number of supervised epochs')
args = parser.parse_args()
def KanadeClassifier():
clf = DBN(
[1200, 1500, 1500, 1500, 7],
learn_rates=0.01,
learn_rates_pretrain=0.05,
learn_rate_decays=0.9,
use_re_lu=True,
nesterov=True,
momentum=0.95,
dropouts=[0.2, 0.5, 0.5, 0.5],
real_valued_vis=True,
minibatch_size=20,
epochs=args.maxEpochs,
verbose=False)
data, labels = readKanade(False, None, equalize=args.equalize)
data = scale(data)
data, labels = shuffle(data, labels)
labels = np.argmax(labels, axis=1)
# Split data for training and testing
kf = cross_validation.KFold(n=len(data), n_folds=5)
for train, test in kf:
break
trainData = data[train]
trainLabels = labels[train]
testData = data[test]
testLabels = labels[test]
clf.fit(trainData, trainLabels)
predictedLabels = clf.predict(testData)
print "testLabels"
print testLabels
print predictedLabels
print "Accuracy:", zero_one_score(testLabels, predictedLabels)
print "Classification report:"
print classification_report(testLabels, predictedLabels)
if __name__ == '__main__':
KanadeClassifier() | from nolearn.dbn import DBN
from readfacedatabases import *
from sklearn import cross_validation
from sklearn.metrics import zero_one_score
from sklearn.metrics import classification_report
import argparse
import numpy as np
from common import *
parser = argparse.ArgumentParser(description='nolearn test')
parser.add_argument('--equalize',dest='equalize',action='store_true', default=False,
help="if true, the input images are equalized before being fed into the net")
parser.add_argument('--maxEpochs', type=int, default=1000,
help='the maximum number of supervised epochs')
args = parser.parse_args()
def KanadeClassifier():
clf = DBN(
[1200, 1500, 1500, 1500, 7],
learn_rates=0.01,
learn_rates_pretrain=0.05,
learn_rate_decays=0.9,
use_re_lu=True,
nesterov=True,
momentum=0.95,
dropouts=[0.8, 0.5, 0.5, 0.5],
real_valued_vis=True,
minibatch_size=20,
epochs=args.maxEpochs,
verbose=False)
data, labels = readKanade(False, None, equalize=args.equalize)
data = scale(data)
data, labels = shuffle(data, labels)
labels = np.argmax(labels, axis=1)
# Split data for training and testing
kf = cross_validation.KFold(n=len(data), n_folds=5)
for train, test in kf:
break
trainData = data[train]
trainLabels = labels[train]
testData = data[test]
testLabels = labels[test]
clf.fit(trainData, trainLabels)
predictedLabels = clf.predict(testData)
print "testLabels"
print testLabels
print predictedLabels
print "Accuracy:", zero_one_score(testLabels, predictedLabels)
print "Classification report:"
print classification_report(testLabels, predictedLabels)
if __name__ == '__main__':
KanadeClassifier() | Python | 0.002569 |
ddc571f32212a57f725101314878d17df9124bb8 | fix loop range | commands/cmd_roll.py | commands/cmd_roll.py | import random
from lib.command import Command
class RollCommand(Command):
name = 'roll'
description = 'Roll some dice.'
def run(self, message, args):
if not args:
self.reply(message, 'No roll specification supplied. Try */roll 3d6*.', parse_mode='Markdown')
return
spec = ''.join(char for char in ''.join(args) if char.isdigit() or char == 'd')
dice_count, __, dice_size = spec.partition('d')
if not dice_count or not dice_size:
self.reply(message, 'Invalid roll specification. Example: */roll 3d6*', parse_mode='Markdown')
return
dice_count = int(''.join(char for char in dice_count if char.isdigit()))
dice_size = int(''.join(char for char in dice_size if char.isdigit()))
if dice_count < 1 or dice_count > 64 or dice_size < 4 or dice_size > 128:
self.reply(message, 'Invalid roll specification. Must be a minimum of *1d4* and a maximum of *64d128*',
parse_mode='Markdown')
return
rolls = [random.SystemRandom().randint(1, dice_size) for _ in range(dice_count)]
self.reply(message, '[{0}] = {1}'.format(', '.join(map(str, rolls)), sum(rolls)))
| import random
from lib.command import Command
class RollCommand(Command):
name = 'roll'
description = 'Roll some dice.'
def run(self, message, args):
if not args:
self.reply(message, 'No roll specification supplied. Try */roll 3d6*.', parse_mode='Markdown')
return
spec = ''.join(char for char in ''.join(args) if char.isdigit() or char == 'd')
dice_count, __, dice_size = spec.partition('d')
if not dice_count or not dice_size:
self.reply(message, 'Invalid roll specification. Example: */roll 3d6*', parse_mode='Markdown')
return
dice_count = int(''.join(char for char in dice_count if char.isdigit()))
dice_size = int(''.join(char for char in dice_size if char.isdigit()))
if dice_count < 1 or dice_count > 64 or dice_size < 4 or dice_size > 128:
self.reply(message, 'Invalid roll specification. Must be a minimum of *1d4* and a maximum of *64d128*',
parse_mode='Markdown')
return
rolls = [random.SystemRandom().randint(1, dice_size) for _ in dice_count]
self.reply(message, '[{0}] = {1}'.format(', '.join(map(str, rolls)), sum(rolls)))
| Python | 0.000001 |
f92c8c9620524d0414af6f039885c2875a247cd0 | add msrest dependency (#7062) | sdk/appconfiguration/azure-appconfiguration/setup.py | sdk/appconfiguration/azure-appconfiguration/setup.py | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import sys
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-appconfiguration"
PACKAGE_PPRINT_NAME = "App Configuration Data"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.md', encoding='utf-8') as f:
history = f.read()
exclude_packages = [
'tests',
'examples',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
]
if sys.version_info < (3, 5, 3):
exclude_packages.extend([
'*.aio',
'*.aio.*'
])
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft {} Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=exclude_packages),
install_requires=[
"msrest>=0.5.0",
"azure-core<2.0.0,>=1.0.0b2",
],
extras_require={
":python_version<'3.0'": ['azure-nspkg'],
":python_version<'3.4'": ['enum34>=1.0.4'],
":python_version<'3.5'": ['typing'],
"async:python_version>='3.5'": [
'aiohttp>=3.0',
'aiodns>=2.0'
],
}
) | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import sys
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-appconfiguration"
PACKAGE_PPRINT_NAME = "App Configuration Data"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.md', encoding='utf-8') as f:
history = f.read()
exclude_packages = [
'tests',
'examples',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
]
if sys.version_info < (3, 5, 3):
exclude_packages.extend([
'*.aio',
'*.aio.*'
])
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft {} Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=exclude_packages),
install_requires=[
"azure-core<2.0.0,>=1.0.0b2",
],
extras_require={
":python_version<'3.0'": ['azure-nspkg'],
":python_version<'3.4'": ['enum34>=1.0.4'],
":python_version<'3.5'": ['typing'],
"async:python_version>='3.5'": [
'aiohttp>=3.0',
'aiodns>=2.0'
],
}
) | Python | 0 |
fe06c3a839bdc13384250924a4a30d9dd3455fc7 | fix archive resource unit test | service/test/unit/resources/test_archive_resource.py | service/test/unit/resources/test_archive_resource.py | from twisted.trial import unittest
import json
from mockito import mock, when, verify
from test.unit.resources import DummySite
from twisted.web.test.requesthelper import DummyRequest
from pixelated.resources.mails_resource import MailsArchiveResource
from twisted.internet import defer
class TestArchiveResource(unittest.TestCase):
def setUp(self):
self.mail_service = mock()
self.web = DummySite(MailsArchiveResource(self.mail_service))
def test_render_POST_should_archive_mails(self):
request = DummyRequest(['/mails/archive'])
request.method = 'POST'
idents = ['1', '2']
content = mock()
when(content).read().thenReturn(json.dumps({'idents': ['1', '2']}))
d1 = defer.Deferred()
d1.callback(None)
when(self.mail_service).archive_mail('1').thenReturn(d1)
d2 = defer.Deferred()
d2.callback(None)
when(self.mail_service).archive_mail('2').thenReturn(d2)
request.content = content
d = self.web.get(request)
def assert_response(_):
verify(self.mail_service).archive_mail('1')
verify(self.mail_service).archive_mail('2')
d.addCallback(assert_response)
return d
| import unittest
import json
from mockito import mock, when, verify
from test.unit.resources import DummySite
from twisted.web.test.requesthelper import DummyRequest
from pixelated.resources.mails_resource import MailsArchiveResource
from twisted.internet import defer
class TestArchiveResource(unittest.TestCase):
def setUp(self):
self.mail_service = mock()
self.web = DummySite(MailsArchiveResource(self.mail_service))
def test_render_POST_should_archive_mails(self):
request = DummyRequest(['/mails/archive'])
request.method = 'POST'
content = mock()
when(content).read().thenReturn(json.dumps({'idents': ['1', '2']}))
when(self.mail_service).archive_mail('1').thenReturn(defer.Deferred())
when(self.mail_service).archive_mail('2').thenReturn(defer.Deferred())
request.content = content
d = self.web.get(request)
def assert_response(_):
verify(self.mail_service).archive_mail('1')
verify(self.mail_service).archive_mail('2')
d.addCallback(assert_response)
return d
| Python | 0 |
6ecae8f97723b90193bc64e53f8dcee22c3cbf55 | add tag to return settings values to django template. | odm2admin/templatetags/admin_extras.py | odm2admin/templatetags/admin_extras.py | # this came from https://djangosnippets.org/snippets/2196/
# adds a collect tag for templates so you can build lists
from django import template
from django.contrib import admin
from django.contrib.gis.geos import GEOSGeometry
from django.core.management import settings
register = template.Library()
@register.tag
def collect(token):
bits = list(token.split_contents())
if len(bits) > 3 and bits[-2] == 'as':
varname = bits[-1]
items = bits[1:-2]
return CollectNode(items, varname)
else:
raise template.TemplateSyntaxError('%r expected format is "item [item ...] as varname"'
% bits[0])
class CollectNode(template.Node):
def __init__(self, items, varname):
self.items = map(template.Variable, items)
self.varname = varname
def render(self, context):
context[self.varname] = [i.resolve(context) for i in self.items]
return ''
class AssignNode(template.Node):
def __init__(self, name, value):
self.name = name
self.value = value
def render(self, context):
context[self.name] = self.value.resolve(context, True)
return ''
def do_assign(parser, token):
"""
Assign an expression to a variable in the current context.
Syntax::
{% assign [name] [value] %}
Example::
{% assign list entry.get_related %}
"""
bits = token.contents.split()
if len(bits) != 3:
raise template.TemplateSyntaxError("'%s' tag takes two arguments" % bits[0])
value = parser.compile_filter(bits[2])
return AssignNode(bits[1], value)
register = template.Library()
register.tag('assign', do_assign)
# Extra template tags for map
@register.filter()
def get_lat_lng(value, gc):
lat = GEOSGeometry(value).coords[1]
lon = GEOSGeometry(value).coords[0]
if gc == 'lat':
return "{}".format(lat)
elif gc == 'lon':
return "{}".format(lon)
@register.filter()
def filter_coords(value):
sites = list()
for site in value:
lat = GEOSGeometry(site.featuregeometry).coords[1]
lon = GEOSGeometry(site.featuregeometry).coords[0]
if lat != 0 and lon != 0:
sites.append(site)
return sites
@register.filter()
def get_title(value, short):
if value == 'site_title':
return admin.site.site_title
elif value == 'site_header':
return admin.site.site_header
elif value == 'shortcut_title':
return settings.ADMIN_SHORTCUTS[0]['shortcuts'][short]['title']
@register.filter()
def in_field(value):
val = value.split(" ")
return val[0]
# settings value
@register.simple_tag
def settings_value(name):
return getattr(settings, name, "")
# https://stackoverflow.com/questions/771890/how-do-i-get-the-class-of-a-object-within-a-django-template
@register.filter(name='get_class')
def get_class(value):
return value.__class__.__name__ | # this came from https://djangosnippets.org/snippets/2196/
# adds a collect tag for templates so you can build lists
from django import template
from django.contrib import admin
from django.contrib.gis.geos import GEOSGeometry
from django.core.management import settings
register = template.Library()
@register.tag
def collect(token):
bits = list(token.split_contents())
if len(bits) > 3 and bits[-2] == 'as':
varname = bits[-1]
items = bits[1:-2]
return CollectNode(items, varname)
else:
raise template.TemplateSyntaxError('%r expected format is "item [item ...] as varname"'
% bits[0])
class CollectNode(template.Node):
def __init__(self, items, varname):
self.items = map(template.Variable, items)
self.varname = varname
def render(self, context):
context[self.varname] = [i.resolve(context) for i in self.items]
return ''
class AssignNode(template.Node):
def __init__(self, name, value):
self.name = name
self.value = value
def render(self, context):
context[self.name] = self.value.resolve(context, True)
return ''
def do_assign(parser, token):
"""
Assign an expression to a variable in the current context.
Syntax::
{% assign [name] [value] %}
Example::
{% assign list entry.get_related %}
"""
bits = token.contents.split()
if len(bits) != 3:
raise template.TemplateSyntaxError("'%s' tag takes two arguments" % bits[0])
value = parser.compile_filter(bits[2])
return AssignNode(bits[1], value)
register = template.Library()
register.tag('assign', do_assign)
# Extra template tags for map
@register.filter()
def get_lat_lng(value, gc):
lat = GEOSGeometry(value).coords[1]
lon = GEOSGeometry(value).coords[0]
if gc == 'lat':
return "{}".format(lat)
elif gc == 'lon':
return "{}".format(lon)
@register.filter()
def filter_coords(value):
sites = list()
for site in value:
lat = GEOSGeometry(site.featuregeometry).coords[1]
lon = GEOSGeometry(site.featuregeometry).coords[0]
if lat != 0 and lon != 0:
sites.append(site)
return sites
@register.filter()
def get_title(value, short):
if value == 'site_title':
return admin.site.site_title
elif value == 'site_header':
return admin.site.site_header
elif value == 'shortcut_title':
return settings.ADMIN_SHORTCUTS[0]['shortcuts'][short]['title']
@register.filter()
def in_field(value):
val = value.split(" ")
return val[0]
# https://stackoverflow.com/questions/771890/how-do-i-get-the-class-of-a-object-within-a-django-template
@register.filter(name='get_class')
def get_class(value):
return value.__class__.__name__ | Python | 0 |
b9b7ed8f4ddf139bd031ce7650558f7a0e753718 | Fix "compilation" error. | solidity/python/constants/PrintMaxExpPerPrecision.py | solidity/python/constants/PrintMaxExpPerPrecision.py | from math import factorial
MIN_PRECISION = 32
MAX_PRECISION = 63
NUM_OF_VALUES_PER_ROW = 4
assert((MAX_PRECISION+1) % NUM_OF_VALUES_PER_ROW == 0)
NUM_OF_COEFS = 34
maxFactorial = factorial(NUM_OF_COEFS)
coefficients = [maxFactorial/factorial(i) for i in range(NUM_OF_COEFS)]
def fixedExpUnsafe(x,precision):
xi = x
res = safeMul(coefficients[0],1 << precision)
for i in range(1,NUM_OF_COEFS-1):
res = safeAdd(res,safeMul(xi,coefficients[i]))
xi = safeMul(xi,x) >> precision
res = safeAdd(res,safeMul(xi,coefficients[-1]))
return res / coefficients[0]
def safeMul(x,y):
assert(x * y < (1 << 256))
return x * y
def safeAdd(x,y):
assert(x + y < (1 << 256))
return x + y
def binarySearch(func,args):
lo = 1
hi = 1 << 256
while lo+1 < hi:
mid = (lo+hi)/2
try:
func(mid,args)
lo = mid
except Exception,error:
hi = mid
try:
func(hi,args)
return hi
except Exception,error:
func(lo,args)
return lo
def getMaxExp(precision,factor):
maxExp = maxExpArray[MIN_PRECISION]
for p in range (MIN_PRECISION,precision):
maxExp = safeMul(maxExp,factor) >> MAX_PRECISION
fixedExpUnsafe(maxExp,precision)
return maxExp
def assertFactor(factor,args):
for precision in range(MIN_PRECISION,MAX_PRECISION+1):
getMaxExp(precision,factor)
maxExpArray = [0]*(MAX_PRECISION+1)
for precision in range(MAX_PRECISION+1):
maxExpArray[precision] = binarySearch(fixedExpUnsafe,precision)
growthFactor = binarySearch(assertFactor,None)
maxMaxExpLen = len('0x{:x}'.format(maxExpArray[-1]))
print 'Max Exp Per Precision:'
formatString = '{:s}{:d}{:s}'.format('Precision = {:2d} | Max Exp = {:',maxMaxExpLen,'s} | Ratio = {:9.7f}')
for precision in range(MAX_PRECISION+1):
maxExp = '0x{:x}'.format(maxExpArray[precision])
ratio = float(maxExpArray[precision])/float(maxExpArray[precision-1]) if precision > 0 else 0.0
print formatString.format(precision,maxExp,ratio)
print ''
print 'maxExpArray = ['
formatString = '{:s}{:d}{:s}'.format('{:',maxMaxExpLen,'s},')
for i in range(len(maxExpArray)/NUM_OF_VALUES_PER_ROW):
items = []
for j in range(NUM_OF_VALUES_PER_ROW):
items.append('0x{:x}'.format(maxExpArray[i*NUM_OF_VALUES_PER_ROW+j]))
print ' '+''.join([formatString.format(item) for item in items])
print ']\n'
print 'Compute the values dynamically, using a growth-factor of 0x{:x} >> {:d}:'.format(growthFactor,MAX_PRECISION)
formatString = '{:s}{:d}{:s}{:d}{:s}'.format('Precision = {:2d} | Theoretical Max Exp = {:',maxMaxExpLen,'s} | Practical Max Exp = {:',maxMaxExpLen,'s} | Difference = {:d}')
for precision in range(MIN_PRECISION,MAX_PRECISION+1):
theoreticalMaxExp = maxExpArray[precision]
practicalMaxExp = getMaxExp(precision,growthFactor)
print formatString.format(precision,'0x{:x}'.format(theoreticalMaxExp),'0x{:x}'.format(practicalMaxExp),theoreticalMaxExp-practicalMaxExp)
| from math import factorial
MIN_PRECISION = 32
MAX_PRECISION = 63
NUM_OF_VALUES_PER_ROW = 4
assert((MAX_PRECISION+1) % NUM_OF_VALUES_PER_ROW == 0)
NUM_OF_COEFS = 34
maxFactorial = factorial(NUM_OF_COEFS)
coefficients = [maxFactorial/factorial(i) for i in range(NUM_OF_COEFS)]
def fixedExpUnsafe(x,precision):
xi = x
res = safeMul(coefficients[0],1 << precision)
for i in range(1,NUM_OF_COEFS-1):
res = safeAdd(res,safeMul(xi,coefficients[i]))
xi = safeMul(xi,x) >> precision
res = safeAdd(res,safeMul(xi,coefficients[-1]))
return res / coefficients[0]
def safeMul(x,y):
assert(x * y < (1 << 256))
return x * y
def safeAdd(x,y):
assert(x + y < (1 << 256))
return x + y
def binarySearch(func,args):
lo = 1
hi = 1 << 256
while lo+1 < hi:
mid = (lo+hi)/2
try:
func(mid,args)
lo = mid
except Exception,error:
hi = mid
try:
func(hi,args)
return hi
except Exception,error:
func(lo,args)
return lo
def getMaxExp(precision,factor):
maxExp = maxExpArray[MIN_PRECISION]
for p in range (MIN_PRECISION,precision):
maxExp = safeMul(maxExp,factor) >> MAX_PRECISION
fixedExpUnsafe(maxExp,precision)
return maxExp
def assertFactor(factor,args):
for precision in range(MIN_PRECISION,MAX_PRECISION+1):
getMaxExp(precision,factor)
maxExpArray = [0]*(MAX_PRECISION+1)
for precision in range(MAX_PRECISION+1):
maxExpArray[precision] = binarySearch(fixedExpUnsafe,precision)
growthFactor = binarySearch(assertFactor,None)
maxMaxExpLen = len('0x{:x}'.format(maxExpArray[-1]))
print 'Max Exp Per Precision:'
formatString = '{:s}{:d}{:s}'.format('Precision = {:2d} | Max Exp = {:',maxMaxExpLen,'s} | Ratio = {:9.7f}')
for precision in range(MAX_PRECISION+1):
maxExp = '0x{:x}'.format(maxExpArray[precision])
ratio = float(maxExpArray[precision])/float(maxExpArray[precision-1]) if precision > 0 else 0.0
print formatString.format(precision,maxExp,ratio)
print ''
print 'maxExpArray = ['
formatString = '{:s}{:d}{:s}'.format('{:',maxMaxExpLen,'s},')
for i in range(len(maxExpArray)/NUM_OF_VALUES_PER_ROW):
items = []
for j in range(NUM_OF_VALUES_PER_ROW):
items.append('0x{:x}'.format(maxExpArray[i*NUM_OF_VALUES_PER_ROW+j]))
print ' '+''.join([formatString.format(item) for item in items])
print ']\n'
print 'Compute the values dynamically, using a growth-factor of 0x{:x} >> {:d}:'.format(growthFactor,MAX_PRECISION)
formatString = '{:s}{:d}{:s}{:d}{:s}'.format('Precision = {:2d} | Theoretical Max Exp = {:',maxMaxExpLen,'s} | Practical Max Exp = {:',maxMaxExpLen,'s} | Difference = {:d}')
for precision in range(MIN_PRECISION,MAX_PRECISION+1):
theoreticalMaxExp = maxExpArray[precision]
practicalMaxExp = getMaxExp(precision,maxFactor)
print formatString.format(precision,'0x{:x}'.format(theoreticalMaxExp),'0x{:x}'.format(practicalMaxExp),theoreticalMaxExp-practicalMaxExp)
| Python | 0.000013 |
283dd9918bd16202bf799c470e8e5b50d2ef1cd6 | Increment version number to 0.7.0 | datajoint/version.py | datajoint/version.py | __version__ = "0.7.0"
| __version__ = "0.6.1"
| Python | 0.99997 |
0f5fe279d6b4641b2a2741271da4f021238f00a1 | fix import in generator | dataset_generator.py | dataset_generator.py | import csv
import os
# execfile("C:\\Users\\YONI\\Documents\\Projects\\degree\\attack detection methods\\anomaly_generator\\dataset_generator.py")
ROW_NUM = 10
path = "C:\\Users\\YONI\\Documents\\anomally_detector\\data_sets\\example\\"
users_num = 100
features_num = 20
directory = "data_sets\\"
if not os.path.exists(directory):
os.makedirs(directory)
users = []
features = []
for i in range(0,users_num):
users.append('user'+str(i))
for i in range(0,features_num):
features.append('feature'+str(i))
for user in users:
with open("data_sets\\"+user+'.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n', fieldnames=features)
writer.writeheader()
for i in range(1,ROW_NUM):
featDic = {}
for feature in features:
featDic[feature] = user + '_' + feature + '_' + str(i)
writer.writerow(featDic) | import csv
# execfile("C:\\Users\\YONI\\Documents\\Projects\\degree\\attack detection methods\\anomaly_generator\\dataset_generator.py")
ROW_NUM = 10
path = "C:\\Users\\YONI\\Documents\\anomally_detector\\data_sets\\example\\"
users_num = 100
features_num = 20
directory = "data_sets\\"
if not os.path.exists(directory):
os.makedirs(directory)
users = []
features = []
for i in range(0,users_num):
users.append('user'+str(i))
for i in range(0,features_num):
features.append('feature'+str(i))
for user in users:
with open("data_sets\\"+user+'.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n', fieldnames=features)
writer.writeheader()
for i in range(1,ROW_NUM):
featDic = {}
for feature in features:
featDic[feature] = user + '_' + feature + '_' + str(i)
writer.writerow(featDic) | Python | 0 |
7ccb9cb0d6e3ce6e3c6c09604af5e2bbdfae63ae | update urls.py | openstax/urls.py | openstax/urls.py | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from wagtail.contrib.wagtailapi import urls as wagtailapi_urls
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailimages import urls as wagtailimages_urls
from .api import api_router
from news.search import search
from api import urls as api_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^accounts/', include('accounts.urls')),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^images/', include(wagtailimages_urls)),
url(r'^api/mail/', include('mail.urls')),
url(r'^api/', include(wagtailapi_urls)),
url(r'^api/', include(api_urls)),
url(r'^api/search/$', search, name='search'),
url(r'^api/v2/', api_router.urls),
url(r'^api/pages/', include('pages.urls')),
url(r'^api/books/', include('books.urls')),
url(r'^api/news/', include('news.urls')),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import RedirectView
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
url(r'^favicon\.ico$', RedirectView.as_view(
url=settings.STATIC_URL + 'pages/images/favicon.ico'))
]
| from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from wagtail.contrib.wagtailapi import urls as wagtailapi_urls
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailimages import urls as wagtailimages_urls
from .api import api_router
from .functions import S3DocumentServe
from news.search import search
from api import urls as api_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^accounts/', include('accounts.urls')),
url(r'^documents/(?P<document_id>\d+)/(.*)$', S3DocumentServe.as_view(), name='wagtaildocs_serve'),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^images/', include(wagtailimages_urls)),
url(r'^api/mail/', include('mail.urls')),
url(r'^api/v2/', api_router.urls),
url(r'^api/', include(wagtailapi_urls)),
url(r'^api/', include(api_urls)),
url(r'^api/search/$', search, name='search'),
url(r'^api/pages/', include('pages.urls')),
url(r'^api/books/', include('books.urls')),
url(r'^api/news/', include('news.urls')),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import RedirectView
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
url(r'^favicon\.ico$', RedirectView.as_view(
url=settings.STATIC_URL + 'pages/images/favicon.ico'))
]
| Python | 0.000001 |
853135b61f34ece1363da9b53244e775a2ba16a8 | Add docstring for convert_timezone() | datetime/datetime.py | datetime/datetime.py | import datetime
# ==============================================================================
# TIMESTAMP 2 STR
# ==============================================================================
def timestamp2str(t, pattern="%Y-%m-%d %H:%M:%S"):
""" Given a float timestamp it returns the date as a formatted string,
based on the date `pattern` specified """
return datetime.datetime.fromtimestamp(t).strftime(pattern)
# ==============================================================================
# CONVERT_TIMEZONE
# ==============================================================================
# import datetime
from dateutil import tz
def convert_timezone(time, a="UTC", b="local"):
""" Given a datetime object, in timezone a, it changes it to timezone b.
Args:
time: (datetime object)
a: (str) timezone code to set the from time as.
eg:
"UTC"
"Australia/Melbourne"
or..
"local"
b: (str) timezone to set the to time as.
"""
# TIMEZONE OBJECTS
tza = tz.tzlocal(a) if (a=="local") else tz.gettz(a)
tzb = tz.tzlocal(b) if (b=="local") else tz.gettz(b)
# FORMAT TIME WITH FROM TIMEZONE
time = time.replace(tzinfo=tza)
# CHANGE TIME ZONE
newtime = time.astimezone(tzb)
return newtime
| import datetime
# ==============================================================================
# TIMESTAMP 2 STR
# ==============================================================================
def timestamp2str(t, pattern="%Y-%m-%d %H:%M:%S"):
""" Given a float timestamp it returns the date as a formatted string,
based on the date `pattern` specified """
return datetime.datetime.fromtimestamp(t).strftime(pattern)
# import datetime
from dateutil import tz
def convert_timezone(time, a="UTC", b="local"):
# TIMEZONE OBJECTS
tza = tz.tzlocal(a) if (a=="local") else tz.gettz(a)
tzb = tz.tzlocal(b) if (b=="local") else tz.gettz(b)
# FORMAT TIME WITH FROM TIMEZONE
time = time.replace(tzinfo=tza)
# CHANGE TIME ZONE
newtime = time.astimezone(tzb)
return newtime
| Python | 0 |
21b668f8f6d75ff56c85f6f21a2565e39a220679 | Add more datetime functions | datetime/datetime.py | datetime/datetime.py | import datetime
import dateutil
import dateutil.tz
def gettz(tz):
""" Return a timezone object to be used by dateutul given a timezone as a
string such as "UTC" or "Australia/Melbourne" """
return dateutil.tz.gettz(tz)
def datetime2str(dt, format="%Y-%m-%d %H:%M:%S", tz="Australia/Melbourne"):
""" """
# Set timezone information
tzinfo = dateutil.tz.gettz(tz)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=tzinfo)
else:
dt = dt.astimezone(tzinfo)
# format as string
return dt.strftime(format)
def timestamp2str(t, tz="Australia/Melbourne", format="%Y-%m-%d %H:%M:%S.f %Z"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
return datetime.datetime.fromtimestamp(t, tz=tzinfo).strftime(format)
def str2datetime(t, format="%Y-%m-%d %H:%M:%S", tz="Australia/Melbourne"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
dt = datetime.datetime.strptime(t, format).replace(tzinfo=tzinfo)
return dt
def str2timestamp(t, format="%Y-%m-%d %H:%M:%S", tz="Australia/Melbourne"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
dt = datetime.datetime.strptime(t, format).replace(tzinfo=tzinfo)
return dt.timestamp()
def now_datetime(tz="Australia/Melbourne"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
return datetime.datetime.now(tz=tzinfo)
def now_timestamp():
tzinfo = dateutil.tz.gettz("UTC")
assert tzinfo is not None, "Could not get timezone data"
return datetime.datetime.now(tz=tzinfo).timestamp()
def now_string(format="%Y-%m-%d %H:%M:%S", tz="Australia/Melbourne"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
return datetime.datetime.now(tz=tzinfo).strftime(format)
def set_timezone(dt, tz="UTC"):
""" overwrites the timezone information of a datetime and returns a copy """
tzinfo = dateutil.tz.gettz(tz)
return dt.replace(tzinfo=tzinfo)
def convert_timezone(dt, tz, tzin=None):
""" Returns a copy of a datetime objet with time converted to new timezoneself.
WARNING: it might be problematic to use tz="local" for output timezone.
It is better to explicitly specify an actual output timezone.
"""
# Ensure datetime object is timesone aware
if dt.tzinfo is None:
assert isinstance(tzin, str), \
"\n datetime object must either be timezone aware, OR, you should"\
"\n provide original timezone as a string in the `tzin` argument"
tzinfo_in = dateutil.tz.tzlocal() if (tzin=="local") else dateutil.tz.gettz(tzin)
dt = dt.replace(tzinfo=tzinfo_in)
# Convert to new timesone
tzinfo_out = dateutil.tz.tzlocal() if (tz=="local") else dateutil.tz.gettz(tz)
return dt.astimezone(tzinfo_out)
| import datetime
import dateutil
import dateutil.tz
def timestamp2str(t, tz="Australia/Melbourne", format="%Y-%m-%d %H:%M:%S.f %Z"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
return datetime.datetime.fromtimestamp(t, tz=tzinfo).strftime(format)
def str2datetime(t, format="%Y-%m-%d %H:%M:%S", tz="Australia/Melbourne"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
dt = datetime.datetime.strptime(t, format).replace(tzinfo=tzinfo)
return dt
def str2timestamp(t, format="%Y-%m-%d %H:%M:%S", tz="Australia/Melbourne"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
dt = datetime.datetime.strptime(t, format).replace(tzinfo=tzinfo)
return dt.timestamp()
def now_datetime(tz="Australia/Melbourne"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
return datetime.datetime.now(tz=tzinfo)
def now_timestamp():
tzinfo = dateutil.tz.gettz("UTC")
assert tzinfo is not None, "Could not get timezone data"
return datetime.datetime.now(tz=tzinfo).timestamp()
def now_string(format="%Y-%m-%d %H:%M:%S", tz="Australia/Melbourne"):
tzinfo = dateutil.tz.gettz(tz)
assert tzinfo is not None, "Could not get timezone data"
return datetime.datetime.now(tz=tzinfo).strftime(format)
# ==============================================================================
# CONVERT_TIMEZONE
# ==============================================================================
# import datetime
from dateutil import tz
def convert_timezone(time, a="UTC", b="local"):
""" Given a datetime object, in timezone a, it changes it to timezone b.
Args:
time: (datetime object)
a: (str) timezone code to set the from time as.
eg:
"UTC"
"Australia/Melbourne"
or..
"local"
b: (str) timezone to set the to time as.
"""
# TIMEZONE OBJECTS
tza = tz.tzlocal(a) if (a=="local") else tz.gettz(a)
tzb = tz.tzlocal(b) if (b=="local") else tz.gettz(b)
# FORMAT TIME WITH FROM TIMEZONE
time = time.replace(tzinfo=tza)
# CHANGE TIME ZONE
newtime = time.astimezone(tzb)
return newtime
| Python | 0.000001 |
9d2ef02367380c76f39c4bd84ea2f35897d0bebf | Edit school enrollment management command | education/management/commands/create_school_enrollment_script.py | education/management/commands/create_school_enrollment_script.py | '''
Created on May 28, 2013
@author: raybesiga
'''
import datetime
import logging
import itertools
from logging import handlers
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.conf import settings
from django.template import Context, Template
import traceback
from rapidsms.models import Contact, Connection, Backend
from rapidsms_httprouter.models import Message
from django.db import transaction
from rapidsms.messages.outgoing import OutgoingMessage
from script.utils.outgoing import check_progress
from script.models import ScriptProgress, Email, Script, ScriptStep
from poll.models import Poll
from optparse import OptionParser, make_option
class Command(BaseCommand):
help = "Create school enrollment termly script"
def handle(self, **options):
poll0 = Poll.objects.get(name="total_enrollment_girls")
poll1 = Poll.objects.get(name="total_enrollment_boys")
script_school_enrollment_termly = Script.objects.create(
slug="edtrac_school_enrollment_termly",
name="School Enrollment Termly Script",
)
script_school_enrollment_termly.sites.add(Site.objects.get_current())
script_school_enrollment_termly.steps.add(ScriptStep.objects.create(
script=script_school_enrollment_termly,
poll=poll0,
order=0,
rule = ScriptStep.WAIT_MOVEON,
start_offset=0,
giveup_offset=14400, # we'll give them four hours to respond
))
script_school_enrollment_termly.steps.add(ScriptStep.objects.create(
script=script_school_enrollment_termly,
poll=poll1,
order=1,
rule=ScriptStep.WAIT_MOVEON, # for polls, this likely means a poll whose answer we aren't particularly concerned with
start_offset=0, #start immediately after the giveup time has elapsed from the previous step
giveup_offset=14400, # we'll give them four hours to respond
))
| '''
Created on May 28, 2013
@author: raybesiga
'''
import datetime
import logging
import itertools
from logging import handlers
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.conf import settings
from django.template import Context, Template
import traceback
from rapidsms.models import Contact, Connection, Backend
from rapidsms_httprouter.models import Message
from django.db import transaction
from rapidsms.messages.outgoing import OutgoingMessage
from script.utils.outgoing import check_progress
from script.models import ScriptProgress, Email, Script, ScriptStep
from poll.models import Poll
from optparse import OptionParser, make_option
class Command(BaseCommand):
help = "Create school enrollment termly polls"
def handle(self, **options):
poll0 = Poll.objects.get(name="total_enrollment_girls")
poll1 = Poll.objects.get(name="total_enrollment_boys")
script_school_enrollment_termly = Script.objects.create(
slug="edtrac_school_enrollment_termly",
name="School Enrollment Termly Script",
)
script_school_enrollment_termly.sites.add(Site.objects.get_current())
script_school_enrollment_termly.steps.add(ScriptStep.objects.create(
script=script_headteacher_violence_monthly,
poll=poll0,
order=0,
rule = ScriptStep.WAIT_MOVEON,
start_offset=0,
giveup_offset=14400, # we'll give them four hours to respond
))
script_school_enrollment_termly.steps.add(ScriptStep.objects.create(
script=script_headteacher_violence_monthly,
poll=poll1,
order=1,
rule=ScriptStep.WAIT_MOVEON, # for polls, this likely means a poll whose answer we aren't particularly concerned with
start_offset=0, #start immediately after the giveup time has elapsed from the previous step
giveup_offset=14400, # we'll give them four hours to respond
))
| Python | 0 |
900ddf92a1cf65270a7b420a848c0f2611647899 | handle & | freelancefinder/remotes/sources/workinstartups/workinstartups.py | freelancefinder/remotes/sources/workinstartups/workinstartups.py | """Wrapper for the WorkInStartups source."""
import json
import bleach
import maya
import requests
from jobs.models import Post
ADDITIONAL_TAGS = ['p', 'br']
class WorkInStartups(object):
"""Wrapper for the WorkInStartups source."""
json_api_address = 'http://workinstartups.com/job-board/api/api.php?action=getJobs&type=0&category=0&count=100&random=0&days_behind=0&response=json'
def __init__(self, source):
"""Parse the API."""
self.api_response = requests.get(self.json_api_address)
self.source = source
def jobs(self):
"""Iterate through all available jobs."""
# Remove the 'var jobs = ' at the beginning and the ';' at the end
response_json = json.loads(self.api_response.text[len("var jobs = "):-1])
for job_info in response_json:
post = self.parse_job_to_post(job_info)
yield post
def parse_job_to_post(self, job_info):
"""Convert from the rss feed format to a Post."""
created = maya.parse(job_info['mysql_date']).datetime()
job_url = 'http://workinstartups.com/job-board/job/{}/{}/'.format(job_info['id'], job_info['url_title'])
post = Post(
url=job_url,
source=self.source,
title=job_info['type_name'] + " - " + bleach.clean(job_info['title'].replace("&", "&"), strip=True),
description=bleach.clean(job_info['description'], tags=bleach.ALLOWED_TAGS + ADDITIONAL_TAGS, strip=True),
unique=job_info['id'],
created=created,
subarea='all',
)
return post
| """Wrapper for the WorkInStartups source."""
import json
import bleach
import maya
import requests
from jobs.models import Post
ADDITIONAL_TAGS = ['p', 'br']
class WorkInStartups(object):
"""Wrapper for the WorkInStartups source."""
json_api_address = 'http://workinstartups.com/job-board/api/api.php?action=getJobs&type=0&category=0&count=100&random=0&days_behind=0&response=json'
def __init__(self, source):
"""Parse the API."""
self.api_response = requests.get(self.json_api_address)
self.source = source
def jobs(self):
"""Iterate through all available jobs."""
# Remove the 'var jobs = ' at the beginning and the ';' at the end
response_json = json.loads(self.api_response.text[len("var jobs = "):-1])
for job_info in response_json:
post = self.parse_job_to_post(job_info)
yield post
def parse_job_to_post(self, job_info):
"""Convert from the rss feed format to a Post."""
created = maya.parse(job_info['mysql_date']).datetime()
job_url = 'http://workinstartups.com/job-board/job/{}/{}/'.format(job_info['id'], job_info['url_title'])
post = Post(
url=job_url,
source=self.source,
title=job_info['type_name'] + " - " + bleach.clean(job_info['title'], strip=True),
description=bleach.clean(job_info['description'], tags=bleach.ALLOWED_TAGS + ADDITIONAL_TAGS, strip=True),
unique=job_info['id'],
created=created,
subarea='all',
)
return post
| Python | 0.000001 |
2c53bc17f98a3e9fdc71ba77f1ab9c1c06f82509 | remove test param on srvy | collection/srvy.py | collection/srvy.py | #!/usr/bin/python
import sys
import time
from time import sleep
from datetime import datetime
import random
import sqlite3
import csv
from configparser import ConfigParser
from gpiozero import Button
import pygame
# VARIABLES
question_csv_location = '../archive/questions.csv'
sqlite_file = '../archive/srvy.db'
yes_button = Button(26)
no_button = Button(19)
# FUNCTIONS
def module_installed(module):
if module in sys.modules:
return True
else:
return False
def get_current_questions(file_location):
"""Add each question from a text file to a list. Questions should be separated by newlines."""
with open(file_location, 'r') as csv_file:
readCSV = csv.reader(csv_file, delimiter=',', quotechar='"')
questions = []
for row in readCSV:
if row:
question = row[0]
questions.append(question)
return questions
def random_questions():
"""pulls returns a random question into main loop."""
question = get_current_questions(question_csv_location)
return random.choice(question)
def add_response_to_database(question, opinion):
"""Add response to SQLite 3 database"""
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
current_date = datetime.now()
current_unix_time = time.time()
try:
c.execute('''INSERT INTO responses (pythonDateTime, unixTime, question, opinion) VALUES (?,?,?,?)''',
(current_date, current_unix_time, question, opinion))
print("Successfully added response to database.")
print("Thank you!")
except Exception as e:
print(e)
conn.commit()
conn.close()
main()
def main():
qs = random_questions() # calls questions function that returns random question.
print(qs)
while True:
opinion = input("Opinion [y/n]: ")
if opinion == "y":
sleep(.5)
opinion = 1
add_response_to_database(qs, opinion)
elif opinion == "n":
sleep(.5)
opinion = -1
add_response_to_database(qs, opinion)
main()
| #!/usr/bin/python
import sys
import time
from time import sleep
from datetime import datetime
import random
import sqlite3
import csv
from configparser import ConfigParser
if __name__ == '__main__':
# Check if running on a Raspberry Pi
try:
from gpiozero import Button
except ImportError:
print("gpiozero is not installed.")
pass
try:
import pygame
except ImportError:
print("pygame is not installed.")
pass
# VARIABLES
question_csv_location = '../archive/questions.csv'
sqlite_file = '../archive/srvy.db'
yes_button = Button(26)
no_button = Button(19)
# FUNCTIONS
def module_installed(module):
if module in sys.modules:
return True
else:
return False
def get_current_questions(file_location):
"""Add each question from a text file to a list. Questions should be separated by newlines."""
with open(file_location, 'r') as csv_file:
readCSV = csv.reader(csv_file, delimiter=',', quotechar='"')
questions = []
for row in readCSV:
if row:
question = row[0]
questions.append(question)
return questions
def random_questions():
"""pulls returns a random question into main loop."""
question = get_current_questions(question_csv_location)
return random.choice(question)
def add_response_to_database(question, opinion):
"""Add response to SQLite 3 database"""
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
current_date = datetime.now()
current_unix_time = time.time()
try:
c.execute('''INSERT INTO responses (pythonDateTime, unixTime, question, opinion) VALUES (?,?,?,?)''',
(current_date, current_unix_time, question, opinion))
print("Successfully added response to database.")
print("Thank you!")
except Exception as e:
print(e)
conn.commit()
conn.close()
main()
def main():
qs = random_questions() # calls questions function that returns random question.
print(qs)
while True:
opinion = input("Opinion [y/n]: ")
if opinion == "y":
sleep(.5)
opinion = 1
add_response_to_database(qs, opinion)
elif opinion == "n":
sleep(.5)
opinion = -1
add_response_to_database(qs, opinion)
main()
| Python | 0.000001 |
a2d9edbe8b154858fe89be12ca281a926ad46ac7 | Remove double negative | api/init/health/routes.py | api/init/health/routes.py | import os
from flask import jsonify
from flask_restplus import Resource, Namespace
# pylint: disable=unused-variable
def register_health(namespace: Namespace):
"""Method used to register the health check namespace and endpoint."""
@namespace.route('/health')
@namespace.doc()
class Health(Resource):
def get(self):
"""
Get API health status
Use this endpoint to get the health status of this API.
"""
is_debug = os.environ.get('FLASK_DEBUG')
mode = 'debug' if is_debug else 'production'
message = {'message': f'MobyDQ API running in {mode} mode'}
return jsonify(message)
| import os
from flask import jsonify
from flask_restplus import Resource, Namespace
# pylint: disable=unused-variable
def register_health(namespace: Namespace):
"""Method used to register the health check namespace and endpoint."""
@namespace.route('/health')
@namespace.doc()
class Health(Resource):
def get(self):
"""
Get API health status
Use this endpoint to get the health status of this API.
"""
is_debug = os.environ.get('FLASK_DEBUG')
mode = 'production' if not is_debug else 'debug'
message = {'message': f'MobyDQ API running in {mode} mode'}
return jsonify(message)
| Python | 0.999999 |
9a2b3477dcfd3e8ba6fac43678713f5213fe87b2 | Caugh edge cause for initials of n=0 v. n=None | dedupe/predicates.py | dedupe/predicates.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
def tokenFieldPredicate(field):
"""returns the tokens"""
return tuple(field.split())
def commonIntegerPredicate(field):
""""return any integers"""
return tuple(re.findall("\d+", field))
def nearIntegersPredicate(field):
"""return any integers N, N+1, and N-1"""
ints = sorted([int(i) for i in re.findall("\d+", field)])
near_ints = set([])
[near_ints.update((i - 1, i, i + 1)) for i in ints]
return tuple(near_ints)
def ngrams(field, n):
"""ngrams returns all unique, contiguous sequences of n characters
of a given field.
:param field: the string to be
:param n: the number of characters to be included in each gram
usage:
>>> from dedupe.dedupe.predicated import ngrams
>>> ngrams("deduplicate", 3)
('ded', 'edu', 'dup', 'upl', 'pli', 'lic', 'ica', 'cat', 'ate')
"""
return tuple([field[pos:pos + n] for pos in xrange(len(field) - n + 1)])
def commonFourGram(field):
"""return 4-grams"""
return ngrams(field, 4)
def commonSixGram(field):
""""return 6-grams"""
return ngrams(field, 6)
def initials(field, n=None):
"""predicate which returns first a tuple containing
the first n chars of a field if and only if the
field contains at least n characters, or an empty
tuple otherwise.
:param field: the string
:type n: int, default None
usage:
>>> initials("dedupe", 7)
()
>>> initials("deduplication", 7)
('dedupli', )
>>> initials("noslice")
('noslice', )
"""
return (field[:n], ) if n is not None or len(field) > n-1 else ()
def wholeFieldPredicate(field):
"""return the whole field
consider replacing with initials(field)
"""
return (field, ) if field else ()
def sameThreeCharStartPredicate(field):
"""return first three characters"""
return initials(field, 3)
def sameFiveCharStartPredicate(field):
"""return first five characters"""
return initials(field, 5)
def sameSevenCharStartPredicate(field):
"""return first seven characters"""
return initials(field, 7)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
def tokenFieldPredicate(field):
"""returns the tokens"""
return tuple(field.split())
def commonIntegerPredicate(field):
""""return any integers"""
return tuple(re.findall("\d+", field))
def nearIntegersPredicate(field):
"""return any integers N, N+1, and N-1"""
ints = sorted([int(i) for i in re.findall("\d+", field)])
near_ints = set([])
[near_ints.update((i - 1, i, i + 1)) for i in ints]
return tuple(near_ints)
def ngrams(field, n):
"""ngrams returns all unique, contiguous sequences of n characters
of a given field.
:param field: the string to be
:param n: the number of characters to be included in each gram
usage:
>>> from dedupe.dedupe.predicated import ngrams
>>> ngrams("deduplicate", 3)
('ded', 'edu', 'dup', 'upl', 'pli', 'lic', 'ica', 'cat', 'ate')
"""
return tuple([field[pos:pos + n] for pos in xrange(len(field) - n + 1)])
def commonFourGram(field):
"""return 4-grams"""
return ngrams(field, 4)
def commonSixGram(field):
""""return 6-grams"""
return ngrams(field, 6)
def initials(field, n=None):
"""predicate which returns first a tuple containing
the first n chars of a field if and only if the
field contains at least n characters, or an empty
tuple otherwise.
:param field: the string
:type n: int, default None
usage:
>>> initials("dedupe", 7)
()
>>> initials("deduplication", 7)
('dedupli', )
>>> initials("noslice")
('noslice', )
"""
return (field[:n], ) if not n or len(field) > n-1 else ()
def wholeFieldPredicate(field):
"""return the whole field
consider replacing with initials(field)
"""
return (field, ) if field else ()
def sameThreeCharStartPredicate(field):
"""return first three characters"""
return initials(field, 3)
def sameFiveCharStartPredicate(field):
"""return first five characters"""
return initials(field, 5)
def sameSevenCharStartPredicate(field):
"""return first seven characters"""
return initials(field, 7)
| Python | 0.999999 |
2b0f4345ff1d4f97f8c00bdad3be035bd5478073 | Use a temporary file which exists. | libcloud/test/test_init.py | libcloud/test/test_init.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import tempfile
import logging
try:
import paramiko # NOQA
have_paramiko = True
except ImportError:
have_paramiko = False
from mock import patch
import libcloud
from libcloud import _init_once
from libcloud.utils.loggingconnection import LoggingConnection
from libcloud.base import DriverTypeNotFoundError
from libcloud.test import unittest
class TestUtils(unittest.TestCase):
def tearDown(self):
if 'LIBCLOUD_DEBUG' in os.environ:
del os.environ['LIBCLOUD_DEBUG']
def test_init_once_and_debug_mode(self):
if have_paramiko:
paramiko_logger = logging.getLogger('paramiko')
paramiko_logger.setLevel(logging.INFO)
# Debug mode is disabled
_init_once()
self.assertIsNone(LoggingConnection.log)
if have_paramiko:
paramiko_log_level = paramiko_logger.getEffectiveLevel()
self.assertEqual(paramiko_log_level, logging.INFO)
# Enable debug mode
_, tmp_path = tempfile.mkstemp()
os.environ['LIBCLOUD_DEBUG'] = tmp_path
_init_once()
self.assertTrue(LoggingConnection.log is not None)
if have_paramiko:
paramiko_log_level = paramiko_logger.getEffectiveLevel()
self.assertEqual(paramiko_log_level, logging.DEBUG)
def test_factory(self):
driver = libcloud.get_driver(libcloud.DriverType.COMPUTE, libcloud.DriverType.COMPUTE.EC2)
self.assertEqual(driver.__name__, 'EC2NodeDriver')
def test_raises_error(self):
with self.assertRaises(DriverTypeNotFoundError):
libcloud.get_driver('potato', 'potato')
@patch.object(libcloud.requests, '__version__', '2.6.0')
@patch.object(libcloud.requests.packages.chardet, '__version__', '2.2.1')
def test_init_once_detects_bad_yum_install_requests(self, *args):
expected_msg = 'Known bad version of requests detected'
with self.assertRaisesRegexp(AssertionError, expected_msg):
_init_once()
@patch.object(libcloud.requests, '__version__', '2.6.0')
@patch.object(libcloud.requests.packages.chardet, '__version__', '2.3.0')
def test_init_once_correct_chardet_version(self, *args):
_init_once()
if __name__ == '__main__':
sys.exit(unittest.main())
| # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
try:
import paramiko
have_paramiko = True
except ImportError:
have_paramiko = False
from mock import patch
import libcloud
from libcloud import _init_once
from libcloud.utils.loggingconnection import LoggingConnection
from libcloud.base import DriverTypeNotFoundError
from libcloud.test import unittest
class TestUtils(unittest.TestCase):
def tearDown(self):
if 'LIBCLOUD_DEBUG' in os.environ:
del os.environ['LIBCLOUD_DEBUG']
def test_init_once_and_debug_mode(self):
if have_paramiko:
paramiko_logger = logging.getLogger('paramiko')
paramiko_logger.setLevel(logging.INFO)
# Debug mode is disabled
_init_once()
self.assertIsNone(LoggingConnection.log)
if have_paramiko:
paramiko_log_level = paramiko_logger.getEffectiveLevel()
self.assertEqual(paramiko_log_level, logging.INFO)
# Enable debug mode
os.environ['LIBCLOUD_DEBUG'] = '/tmp/foobartest'
_init_once()
self.assertTrue(LoggingConnection.log is not None)
if have_paramiko:
paramiko_log_level = paramiko_logger.getEffectiveLevel()
self.assertEqual(paramiko_log_level, logging.DEBUG)
def test_factory(self):
driver = libcloud.get_driver(libcloud.DriverType.COMPUTE, libcloud.DriverType.COMPUTE.EC2)
self.assertEqual(driver.__name__, 'EC2NodeDriver')
def test_raises_error(self):
with self.assertRaises(DriverTypeNotFoundError):
libcloud.get_driver('potato', 'potato')
@patch.object(libcloud.requests, '__version__', '2.6.0')
@patch.object(libcloud.requests.packages.chardet, '__version__', '2.2.1')
def test_init_once_detects_bad_yum_install_requests(self, *args):
expected_msg = 'Known bad version of requests detected'
with self.assertRaisesRegexp(AssertionError, expected_msg):
_init_once()
@patch.object(libcloud.requests, '__version__', '2.6.0')
@patch.object(libcloud.requests.packages.chardet, '__version__', '2.3.0')
def test_init_once_correct_chardet_version(self, *args):
_init_once()
if __name__ == '__main__':
sys.exit(unittest.main())
| Python | 0 |
a51a089e90719dfda2e6164b0f4c1aec50c26534 | Add ordering | entity/migrations/0006_entity_relationship_unique.py | entity/migrations/0006_entity_relationship_unique.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-12-12 18:20
from __future__ import unicode_literals
from django.db import migrations, connection
from django.db.models import Count, Max
def disable_triggers(apps, schema_editor):
"""
Temporarily disable user triggers on the relationship table. We do not want things
like entity history to attach onto these migrations as this is a core bug where duplicates
should not exist
:param apps:
:param schema_editor:
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
ALTER TABLE entity_entityrelationship DISABLE TRIGGER USER;
"""
)
def enable_triggers(apps, schema_editor):
"""
Re-enable the triggers (if any)
:param apps:
:param schema_editor:
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
ALTER TABLE entity_entityrelationship ENABLE TRIGGER USER;
"""
)
def remove_duplicates(apps, schema_editor):
"""
Remove any duplicates from the entity relationship table
:param apps:
:param schema_editor:
:return:
"""
# Get the model
EntityRelationship = apps.get_model('entity', 'EntityRelationship')
# Find the duplicates
duplicates = EntityRelationship.objects.all().order_by(
'sub_entity_id',
'super_entity_id'
).values(
'sub_entity_id',
'super_entity_id'
).annotate(
Count('sub_entity_id'),
Count('super_entity_id'),
max_id=Max('id')
).filter(
super_entity_id__count__gt=1
)
# Loop over the duplicates and delete
for duplicate in duplicates:
EntityRelationship.objects.filter(
sub_entity_id=duplicate['sub_entity_id'],
super_entity_id=duplicate['super_entity_id']
).exclude(
id=duplicate['max_id']
).delete()
class Migration(migrations.Migration):
dependencies = [
('entity', '0005_remove_entitygroup_entities'),
]
operations = [
migrations.RunPython(disable_triggers),
migrations.RunPython(remove_duplicates),
migrations.RunPython(enable_triggers),
migrations.AlterUniqueTogether(
name='entityrelationship',
unique_together=set([('sub_entity', 'super_entity')]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-12-12 18:20
from __future__ import unicode_literals
from django.db import migrations, connection
from django.db.models import Count, Max
def disable_triggers(apps, schema_editor):
"""
Temporarily disable user triggers on the relationship table. We do not want things
like entity history to attach onto these migrations as this is a core bug where duplicates
should not exist
:param apps:
:param schema_editor:
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
ALTER TABLE entity_entityrelationship DISABLE TRIGGER USER;
"""
)
def enable_triggers(apps, schema_editor):
"""
Re-enable the triggers (if any)
:param apps:
:param schema_editor:
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
ALTER TABLE entity_entityrelationship ENABLE TRIGGER USER;
"""
)
def remove_duplicates(apps, schema_editor):
"""
Remove any duplicates from the entity relationship table
:param apps:
:param schema_editor:
:return:
"""
# Get the model
EntityRelationship = apps.get_model('entity', 'EntityRelationship')
# Find the duplicates
duplicates = EntityRelationship.objects.all().order_by().values(
'sub_entity_id', 'super_entity_id'
).annotate(
Count('sub_entity_id'),
Count('super_entity_id'),
max_id=Max('id')
).filter(
super_entity_id__count__gt=1
)
# Loop over the duplicates and delete
for duplicate in duplicates:
EntityRelationship.objects.filter(
sub_entity_id=duplicate['sub_entity_id'],
super_entity_id=duplicate['super_entity_id']
).exclude(
id=duplicate['max_id']
).delete()
class Migration(migrations.Migration):
dependencies = [
('entity', '0005_remove_entitygroup_entities'),
]
operations = [
migrations.RunPython(disable_triggers),
migrations.RunPython(remove_duplicates),
migrations.RunPython(enable_triggers),
migrations.AlterUniqueTogether(
name='entityrelationship',
unique_together=set([('sub_entity', 'super_entity')]),
),
]
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.