commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
f834e728e2635c91f95bd234c9dd2ffca7699ee0
|
fix flake8
|
dvc/progress.py
|
dvc/progress.py
|
"""Manages progress bars for dvc repo."""
from __future__ import print_function
import logging
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
class TqdmThreadPoolExecutor(ThreadPoolExecutor):
"""
Ensure worker progressbars are cleared away properly.
"""
def __enter__(self):
"""
Creates a blank initial dummy progress bar so that workers are forced to
create "nested" bars.
"""
self.blank_bar = Tqdm(bar_format="Multi-Threaded:", leave=False)
super(TqdmThreadPoolExecutor, self).__enter__(self)
def __exit__(self, *a, **k):
super(TqdmThreadPoolExecutor, self).__exit__(*a, **k)
self.blank_bar.close()
class Tqdm(tqdm):
"""
maximum-compatibility tqdm-based progressbars
"""
def __init__(
self,
iterable=None,
disable=None,
bytes=False, # pylint: disable=W0622
desc_truncate=None,
**kwargs
):
"""
bytes : shortcut for
`unit='B', unit_scale=True, unit_divisor=1024, miniters=1`
desc_truncate : like `desc` but will truncate to 10 chars
kwargs : anything accepted by `tqdm.tqdm()`
"""
# kwargs = deepcopy(kwargs)
if bytes:
for k, v in dict(
unit="B", unit_scale=True, unit_divisor=1024, miniters=1
).items():
kwargs.setdefault(k, v)
if desc_truncate is not None:
kwargs.setdefault("desc", self.truncate(desc_truncate))
if disable is None:
disable = (
logging.getLogger(__name__).getEffectiveLevel()
>= logging.CRITICAL
)
super(Tqdm, self).__init__(
iterable=iterable, disable=disable, **kwargs
)
# self.set_lock(Lock())
def update_desc(self, desc, n=1, truncate=True):
"""
Calls `set_description(truncate(desc))` and `update(n)`
"""
self.set_description(
self.truncate(desc) if truncate else desc, refresh=False
)
self.update(n)
def update_to(self, current, total=None):
if total:
self.total = total # pylint: disable=W0613,W0201
self.update(current - self.n)
@classmethod
def truncate(cls, s, max_len=10, end=True, fill="..."):
"""
Guarantee len(output) < max_lenself.
>>> truncate("hello", 4)
'...o'
"""
if len(s) <= max_len:
return s
if len(fill) > max_len:
return fill[-max_len:] if end else fill[:max_len]
i = max_len - len(fill)
return (fill + s[-i:]) if end else (s[:i] + fill)
|
Python
| 0
|
@@ -401,19 +401,16 @@
e forced
- to
%0A
@@ -410,16 +410,19 @@
+to
create %22
|
426972e55f155d817e1db975afa2f25dbf860445
|
disable super progress bar for single-files
|
dvc/repo/add.py
|
dvc/repo/add.py
|
import logging
import os
import colorama
from . import locked
from dvc.exceptions import RecursiveAddingWhileUsingFilename
from dvc.progress import Tqdm
from dvc.repo.scm_context import scm_context
from dvc.stage import Stage
from dvc.utils import LARGE_DIR_SIZE
logger = logging.getLogger(__name__)
@locked
@scm_context
def add(repo, targets, recursive=False, no_commit=False, fname=None):
if recursive and fname:
raise RecursiveAddingWhileUsingFilename()
if isinstance(targets, str):
targets = [targets]
stages_list = []
with Tqdm(total=len(targets), desc="Add", unit="file", leave=True) as pbar:
for target in targets:
sub_targets = _find_all_targets(repo, target, recursive)
pbar.total += len(sub_targets) - 1
if os.path.isdir(target) and len(sub_targets) > LARGE_DIR_SIZE:
logger.warning(
"You are adding a large directory '{target}' recursively,"
" consider tracking it as a whole instead.\n"
"{purple}HINT:{nc} Remove the generated DVC-file and then"
" run `{cyan}dvc add {target}{nc}`".format(
purple=colorama.Fore.MAGENTA,
cyan=colorama.Fore.CYAN,
nc=colorama.Style.RESET_ALL,
target=target,
)
)
stages = _create_stages(repo, sub_targets, fname, pbar=pbar)
repo.check_modified_graph(stages)
for stage in stages:
stage.save()
if not no_commit:
stage.commit()
stage.dump()
stages_list += stages
return stages_list
def _find_all_targets(repo, target, recursive):
if os.path.isdir(target) and recursive:
return [
fname
for fname in repo.tree.walk_files(target)
if not repo.is_dvc_internal(fname)
if not Stage.is_stage_file(fname)
if not repo.scm.belongs_to_scm(fname)
if not repo.scm.is_tracked(fname)
]
return [target]
def _create_stages(repo, targets, fname, pbar=None):
stages = []
for out in targets:
stage = Stage.create(repo, outs=[out], add=True, fname=fname)
if not stage:
if pbar is not None:
pbar.total -= 1
continue
stages.append(stage)
if pbar is not None:
pbar.update_desc(out)
return stages
|
Python
| 0
|
@@ -551,16 +551,47 @@
st = %5B%5D%0A
+ num_targets = len(targets)%0A
with
@@ -600,27 +600,43 @@
qdm(
+%0A
total=
-len(
+num_
targets
-)
,
+%0A
des
@@ -643,16 +643,24 @@
c=%22Add%22,
+%0A
unit=%22f
@@ -664,16 +664,24 @@
=%22file%22,
+%0A
leave=T
@@ -683,16 +683,73 @@
ave=True
+,%0A disable=True if num_targets %3C 2 else None,%0A
) as pba
@@ -1850,16 +1850,77 @@
stages%0A%0A
+ if pbar.disable:%0A pbar.write(%22100%25 Add%22)%0A%0A
retu
|
357516f14b2d04b23fe54d30fb00b09acfe25458
|
Add test case: symlink source exists but is regular file.
|
scripts/test_dotfiles.py
|
scripts/test_dotfiles.py
|
#!/usr/bin/python
import unittest
import mock
import dotfiles
import platform
import sys
import os
import io
import time
class MockFile(io.StringIO):
name = None
def __init__(self, name, buffer_ = None):
super(MockFile, self).__init__(buffer_)
self.name = name
class DotfilesTest(unittest.TestCase):
def setUp(self):
dotfiles.init()
dotfiles.identifySystem()
dotfiles.cleanUp()
self.symlinkTarget = 'bar'
self.macBashOutputFile = dotfiles.macBashOutputFile
self.macBashOutputDotFile = '.' + self.macBashOutputFile
self.linuxBashOutputFile = dotfiles.linuxBashOutputFile
self.linuxBashOutputDotFile = '.' + self.linuxBashOutputFile
self.inputFilesDir = '../inputfiles/'
self.bashLinux = self.inputFilesDir + 'bash_linux'
self.bashPrivate = self.inputFilesDir + 'bash_private'
def tearDown(self):
self.createdSymlink = dotfiles.homeDir + 'foo'
if os.path.islink(self.createdSymlink):
os.remove(self.createdSymlink)
if os.path.isfile(self.symlinkTarget):
os.remove(self.symlinkTarget)
dotfiles.cleanUp()
@mock.patch('platform.system', mock.MagicMock(return_value='Darwin'))
def testWhenSystemIsDarwinInstallerIdentifiesSystemAsDarwin(self):
dotfiles.identifySystem()
assert(sys.stdout.getvalue().strip().endswith('Darwin'))
@mock.patch('platform.system', mock.MagicMock(return_value='Linux'))
def testWhenSystemIsLinuxInstallerIdentifiesSystemAsLinux(self):
dotfiles.identifySystem()
assert(sys.stdout.getvalue().strip().endswith('Linux'))
@mock.patch('platform.system', mock.MagicMock(return_value='Windows'))
def testWhenSystemIsWindowsInstallerIdentifiesSystemAsWindowsAndExitsWithCode1(self):
with self.assertRaises(SystemExit) as cm:
dotfiles.identifySystem()
assert(sys.stdout.getvalue().strip().endswith('not supported!'))
assertEqual(cm.exception.code, 1)
def testInstallerWillDeleteExistingOutputFiles(self):
dotfiles.init()
dotfiles.identifySystem()
self.macBashOutputFile = dotfiles.macBashOutputFile
for file in [self.macBashOutputFile, self.macBashOutputDotFile, self.linuxBashOutputFile, self.linuxBashOutputDotFile]:
with open(file,'a') as bash:
bash.write('Test file...')
dotfiles.cleanUp()
for file in [self.macBashOutputFile, self.macBashOutputDotFile, self.linuxBashOutputFile, self.linuxBashOutputDotFile]:
assert("Removing " + file in sys.stdout.getvalue().strip())
self.assertFalse(os.path.isfile(file))
def testWhenOutputFilesDoNotExistInstallerWillNotAttemptDeletion(self):
if os.path.isfile(self.macBashOutputFile):
os.remove(self.macBashOutputFile)
try:
dotfiles.cleanUp()
except OSError, e:
if e.errno == 2:
self.fail("Tried to delete nonexistent file!")
def testBashOutputFileStartsWithShebang(self):
dotfiles.addBashOutputFileHeader()
with open(self.macBashOutputFile,'r') as bashrc:
self.assertEquals(bashrc.readline(), "#!/bin/bash\n")
def testBashLinuxFileContentsAreWrittenToOutputFile(self):
self.bashLinuxFileMock = MockFile(self.bashLinux, u'some_token=some_value\n')
dotfiles.addInputFileContents(self.bashLinuxFileMock, False)
with open(self.linuxBashOutputFile,'r') as bashrc:
self.assertTrue(bashrc.read() in self.bashLinuxFileMock.getvalue())
def testBashOutputFileDoesNotContainBashPrivateTokens(self):
self.bashLinuxFileMock = MockFile(self.bashLinux, u'some_token=some_value\n')
self.bashPrivateFileMock = MockFile(self.bashPrivate, u'private_token=private_value\n')
dotfiles.addInputFileContents(self.bashLinuxFileMock, False)
dotfiles.addInputFileContents(self.bashPrivateFileMock, False)
with open(self.linuxBashOutputFile,'r') as bashrc:
self.assertTrue(bashrc.read() not in self.bashPrivateFileMock.getvalue())
def setUpSymlink(self):
with open(self.symlinkTarget,'a') as bar:
dotfiles.createSymlink(self.symlinkTarget, 'foo')
self.createdSymlink = dotfiles.homeDir + 'foo'
def testWhenSymlinkDoesNotExistItGetsCreated(self):
self.setUpSymlink()
try:
os.stat(self.createdSymlink)
except OSError:
self.fail("Symlink " + self.createdSymlink + " not created!")
def testWhenSymlinkExistsButIsBrokenItGetsDeletedAndReCreated(self):
dotfiles.createSymlink(self.symlinkTarget, 'foo')
dotfiles.createSymlink(self.macBashOutputFile, 'foo')
assert("Link is broken." in sys.stdout.getvalue().strip())
assert("Link created." in sys.stdout.getvalue().strip())
def testWhenSymlinkExistsAndIsValidItDoesNotGetDeleted(self):
self.setUpSymlink()
dotfiles.createSymlink('bar', 'foo')
assert("Link is valid." in sys.stdout.getvalue().strip())
def testLinuxTokensNotInMacBashOutputFile(self):
dotfiles.main()
with open(self.macBashOutputFile,'r') as macBashOutput:
with open(self.bashLinux,'r') as bashLinux:
linuxContents = bashLinux.read()
macContents = macBashOutput.read()
assert(linuxContents not in macContents)
suite = unittest.TestLoader().loadTestsFromTestCase(DotfilesTest)
unittest.main(module=__name__, buffer=True, exit=False)
|
Python
| 0
|
@@ -432,16 +432,64 @@
= 'bar'%0A
+ self.regularFile = dotfiles.homeDir + 'foo'%0A
self
@@ -1029,24 +1029,192 @@
tedSymlink)%0A
+ if os.path.isfile(self.regularFile):%0A os.remove(self.regularFile)%0A if os.path.isfile(self.regularFile + '.bak'):%0A os.remove(self.regularFile + '.bak')%0A
if os.pa
@@ -4958,32 +4958,432 @@
lue().strip())%0A%0A
+ def testWhenSymlinkSourceExistsAndIsRegularFileItGetsRenamed(self):%0A with open(self.regularFile,'w') as source:%0A source.write('some_token=some_value')%0A self.setUpSymlink()%0A dotfiles.createSymlink('bar','foo')%0A assert(%22Renaming%22 in sys.stdout.getvalue().strip())%0A self.assertFalse(os.path.isfile(self.regularFile))%0A self.assertTrue(os.path.isfile(self.regularFile + '.bak'))%0A%0A
def testLinuxT
|
7921c5ceb9cd9fe698254288d5d1a49480917956
|
Use numpy arrays for list return values
|
engine/run_c.py
|
engine/run_c.py
|
import os
import ctypes
import pickle
import subprocess
from numpy import ndarray, zeros
from numpy.ctypeslib import ndpointer
def infer_simple_ctype(var):
if isinstance(var, int):
return ctypes.c_int
elif isinstance(var, float):
return ctypes.c_double
elif isinstance(var, bool):
return ctypes.c_bool
elif isinstance(var, str):
return ctypes.c_char_p
else:
raise NotImplementedError("Cannot infer ctype of type(var)={:}, var={:}".format(type(var), var))
def preprocess_types(input_tuple, output_tuple):
if len(output_tuple) > 1:
raise NotImplementedError("C does not support multiple return values but len(output_tuple)={:d}"
.format(len(output_tuple)))
input_list = []
arg_ctypes = []
output_list = []
for var in input_tuple:
if isinstance(var, str):
arg_ctypes.append(ctypes.c_char_p)
# C wants bytes, not strings.
c_str = bytes(var, "utf-8")
input_list.append(ctypes.c_char_p(c_str))
elif isinstance(var, list):
if isinstance(var[0], (list, tuple)):
raise NotImplementedError(f"Cannot infer ctype of a list containing lists or tuples: var={var}")
arr_ctype = infer_simple_ctype(var[0]) * len(var)
arg_ctypes.append(arr_ctype)
arr = arr_ctype(*var)
input_list.append(arr)
# For a Python list, we add an extra argument for the size of the C array.
arg_ctypes.append(ctypes.c_int)
input_list.append(len(var))
elif isinstance(var, ndarray):
arr_ctype = ndpointer(dtype=var.dtype, flags="C_CONTIGUOUS")
arg_ctypes.append(arr_ctype)
input_list.append(var)
# For a numpy ndarray, we add extra arguments for each dimension size of the input C array.
for s in var.shape:
arg_ctypes.append(ctypes.c_int)
input_list.append(s)
else:
arg_ctypes.append(infer_simple_ctype(var))
input_list.append(var)
rvar = output_tuple[0] # Return variable
if isinstance(rvar, list):
# If the C function needs to return an array, Python must allocate memory for the array and pass it to the
# C function. So we add an extra argument for a pointer to the pre-allocated C array and set the return type
# to void.
if isinstance(rvar[0], (list, tuple)):
raise NotImplementedError(f"Cannot infer ctype of a list containing lists or tuples: var={var}")
arr_ctype = infer_simple_ctype(rvar[0]) * len(rvar)
arg_ctypes.append(arr_ctype)
arr = arr_ctype()
input_list.append(arr)
res_ctype = ctypes.c_void_p
output_list.append(arr)
elif isinstance(rvar, ndarray):
arr_ctype = ndpointer(dtype=rvar.dtype, flags="C_CONTIGUOUS")
arg_ctypes.append(arr_ctype)
arr = zeros(rvar.shape, dtype=rvar.dtype)
input_list.append(arr)
res_ctype = ctypes.c_void_p
output_list.append(arr)
else:
res_ctype = infer_simple_ctype(rvar)
return arg_ctypes, res_ctype, input_list, output_list
def ctype_output(var):
if isinstance(var, bytes):
return var.decode("utf-8")
else:
return var
run_id = os.path.basename(__file__).split('.')[0]
input_pickle = "{:s}.input.pickle".format(run_id)
correct_pickle = "{:s}.correct.pickle".format(run_id)
code_file = "{:s}.c".format(run_id)
lib_file = "{:s}.so".format(run_id)
with open(input_pickle, mode='rb') as f:
input_tuples = pickle.load(f)
with open(correct_pickle, mode='rb') as f:
correct_output_tuples = pickle.load(f)
print("PRECOMPILE")
# Compile the user's C code.
# -fPIC for position-independent code, needed for shared libraries to work no matter where in memory they are loaded.
# check=True will raise a CalledProcessError for non-zero return codes (user code failed to compile.)
subprocess.run(["gcc", "-fPIC", "-shared", "-o", lib_file, code_file], check=True)
print("COMPILE")
# Load the compiled shared library. We use the absolute path as the cwd is not in LD_LIBRARY_PATH so cdll won't find
# the .so file if we use a relative path or just a filename.
cwd = os.path.dirname(os.path.realpath(__file__))
_lib = ctypes.cdll.LoadLibrary(os.path.join(cwd, lib_file))
for i, (input_tuple, correct_output_tuple) in enumerate(zip(input_tuples, correct_output_tuples)):
# Use the input and output tuple to infer the type of input arguments and return value. We do this again for each
# test case in case outputs change type or arrays change size.
print(f"input_tuple={input_tuple}, output_tuple={correct_output_tuple}")
arg_ctypes, res_ctype, ctyped_input_list, output_list = preprocess_types(input_tuple, correct_output_tuple)
print(f"arg_ctypes={arg_ctypes}, res_ctype={res_ctype}")
_lib.$FUNCTION_NAME.argtypes = arg_ctypes
_lib.$FUNCTION_NAME.restype = res_ctype
# $FUNCTION_NAME will be replaced by the name of the user's function by the CodeRunner before this script is run.
user_output = _lib.$FUNCTION_NAME(*ctyped_input_list)
print(f"user_output={user_output}")
# If the C function returns nothing, then it must have mutated some of its input arguments.
# We'll pull them out here.
if res_ctype == ctypes.c_void_p:
user_output = []
for var in output_list:
user_output.append(ctype_output(var))
user_output = tuple(user_output)
else:
user_output = ctype_output(user_output)
output_dict = {
'user_output': user_output if isinstance(user_output, tuple) else (user_output,),
'runtime': 0,
'max_mem_usage': 0
}
output_pickle = '{:s}.output{:d}.pickle'.format(run_id, i)
with open(output_pickle, mode='wb') as f:
pickle.dump(output_dict, file=f, protocol=pickle.HIGHEST_PROTOCOL)
|
Python
| 0.000004
|
@@ -67,16 +67,23 @@
y import
+ array,
ndarray
@@ -2617,32 +2617,59 @@
s: var=%7Bvar%7D%22)%0A%0A
+ arr = array(rvar)%0A%0A
arr_ctyp
@@ -2676,46 +2676,55 @@
e =
+ndpo
in
-fer_simple_ctype(rvar%5B0%5D) * len(rvar
+ter(dtype=arr.dtype, flags=%22C_CONTIGUOUS%22
)%0A
@@ -2763,34 +2763,8 @@
e)%0A%0A
- arr = arr_ctype()%0A
@@ -2820,32 +2820,32 @@
types.c_void_p%0A%0A
-
output_l
@@ -2856,24 +2856,58 @@
append(arr)%0A
+%0A output_list.append(arr)%0A%0A
elif isi
|
48f5d906bb72543a844d9ccf33e5c1488ba3e154
|
make newebe install line cleaner
|
deploy/fabfile.py
|
deploy/fabfile.py
|
from fabric.api import sudo, cd, task, prompt, run
from fabric.contrib import files
from fabtools import require, python, supervisor
# Variables
newebe_dir = "/home/newebe/newebe"
newebe_process = newebe_user = "newebe"
newebe_user_dir = "/home/newebe/"
python_exe = newebe_dir + "/virtualenv/bin/python"
newebe_exe = "newebe_server.py"
# Helpers
import random
import string
def random_string(n):
"""Create n length random string"""
chars = string.letters + string.digits
code = ''.join([random.choice(chars) for i in range(n)])
return code
def newebedo(cmd):
"""Run a commande as a newebe user"""
sudo(cmd, user=newebe_user)
def delete_if_exists(filename):
"""Delete given file if it already exists"""
if files.exists(filename):
newebedo("rm -rf %s" % filename)
# Install tasks
@task()
def setup():
"""Deploy the whole newebe stack"""
install_deb_packages()
create_user()
install_newebe()
make_dirs()
build_configuration_file()
build_certificates()
setup_supervisord()
set_supervisord_config()
@task()
def install_deb_packages():
"""Install required deb packages"""
require.deb.packages([
'python',
'python-dev',
'build-essential',
'python-setuptools',
'python-pip',
'python-pycurl',
'python-imaging',
'couchdb',
'git',
'libxml2-dev',
'libxslt-dev',
'openssl'
])
#sudo("apt-get build-dep python-imaging")
@task()
def create_user():
"""Create newebe user"""
require.user(newebe_user, newebe_user_dir)
@task()
def install_newebe():
"""Install Newebe as a main software"""
update_source()
@task()
def make_dirs():
"""Make dir required for a proper install"""
with cd(newebe_user_dir):
delete_if_exists('newebe')
newebedo("mkdir newebe")
newebedo("mkdir newebe/certs")
@task()
def build_configuration_file():
"""Build default configuration file """
timezone = prompt("""
Which time zone do you want for your database (default is Europe/Paris,
Check Newebe wiki for timezone list) ?
\n \n
""")
if not timezone:
timezone = "Europe/Paris"
with cd(newebe_dir):
delete_if_exists('local_settings.py')
newebedo('echo "main:" >> config.yaml')
newebedo('echo " port: 8000" >> config.yaml')
newebedo('echo " debug: False" >> config.yaml')
newebedo("echo ' timezone: \"%s\"' >> config.yaml" % timezone)
newebedo('echo "db:" >> config.yaml')
newebedo("echo ' name: \"newebe\"' >> config.yaml")
newebedo('echo "security:" >> config.yaml')
newebedo("echo ' cookie_key: \"%s\"' >> config.yaml" % \
random_string(42))
@task()
def build_certificates():
"""Build HTTPS certificates"""
with cd(newebe_dir + "/certs"):
delete_if_exists('server.key')
delete_if_exists('server.crt')
newebedo("openssl genrsa -out ./server.key 1024")
newebedo("openssl req -new -x509 -days 3650 -key ./server.key -out\
./server.crt")
@task()
def setup_supervisord():
"""Install python daemon manager, supervisord"""
python.install("meld3==0.6.9", use_sudo=True)
require.deb.package("supervisor")
@task()
def set_supervisord_config():
"""Configure Newebe runner for supervisord"""
require.supervisor.process(newebe_process,
command='%s --configfile=%s' %
(newebe_exe, newebe_dir + "/config.yaml"),
user=newebe_user
)
supervisor.start_process(newebe_process)
# Update tasks
@task()
def update():
"""Update source code, build require couchdb views then restart newebe"""
update_source()
restart_newebe()
@task()
def update_source():
"""Simple git pull inside newebe directory"""
sudo("pip install git+git://github.com/gelnior/newebe.git")
@task()
def restart_newebe():
"""Restart newebe surpervisord process"""
supervisor.restart_process(newebe_process)
|
Python
| 0
|
@@ -42,13 +42,8 @@
ompt
-, run
%0Afro
@@ -3871,26 +3871,24 @@
-sudo(%22pip
+python.
install
-
+(%22
git+
|
0aae504d88bc0948dcf8eb4256c39e38301b089d
|
Version bump to 0.4.1
|
overextends/__init__.py
|
overextends/__init__.py
|
__version__ = "0.4.0"
|
Python
| 0
|
@@ -13,11 +13,11 @@
= %220.4.
-0
+1
%22%0A
|
e6c072aedfebfeacfe98ccf03385b90335e74f00
|
improve tests
|
testing/testSamplingAndPlotting.py
|
testing/testSamplingAndPlotting.py
|
import pandas as pd
import numpy as np
import pickle
from matplotlib import pyplot as plt
import GPflow
from BranchedGP import VBHelperFunctions as bplot
from BranchedGP import BranchingTree as bt
from BranchedGP import branch_kernParamGPflow as bk
import unittest
from BranchedGP import FitBranchingModel
class TestSamplingAndPlotting(unittest.TestCase):
def test(self):
branchingPoint = 0.5
tree = bt.BinaryBranchingTree(0, 10, fDebug=False) # set to true to print debug messages
tree.add(None, 1, branchingPoint) # single branching point
(fm, fmb) = tree.GetFunctionBranchTensor()
# Specify where to evaluate the kernel
t = np.linspace(0.01, 1, 40)
(XForKernel, indicesBranch, Xtrue) = tree.GetFunctionIndexList(t, fReturnXtrue=True)
# Specify the kernel and its hyperparameters
# These determine how smooth and variable the branching functions are
Bvalues = np.expand_dims(np.asarray(tree.GetBranchValues()), 1)
KbranchParam = bk.BranchKernelParam(GPflow.kernels.RBF(1), fm, b=Bvalues)
KbranchParam.kern.lengthscales = 2
KbranchParam.kern.variance = 1
# Sample the kernel
samples = bk.SampleKernel(KbranchParam, XForKernel)
# Plot the sample
bk.PlotSample(XForKernel, samples)
# Fit model
BgridSearch = [0.1, branchingPoint, 1.1]
globalBranchingLabels = XForKernel[:, 1] # use correct labels for tests
# could add a mistake
print('Sparse model')
d = FitBranchingModel.FitModel(BgridSearch, XForKernel[:, 0], samples, globalBranchingLabels,
maxiter=20, priorConfidence=0.80, M=10)
bmode = BgridSearch[np.argmax(d['loglik'])]
assert bmode == branchingPoint, bmode
# Plot model
pred = d['prediction'] # prediction object from GP
_=bplot.plotBranchModel(bmode, XForKernel[:, 0], samples, pred['xtest'], pred['mu'], pred['var'],
d['Phi'], fPlotPhi=True, fColorBar=True, fPlotVar = True)
_=bplot.PlotBGPFit(samples, XForKernel[:, 0], BgridSearch, d)
print('Try dense model')
d = FitBranchingModel.FitModel(BgridSearch, XForKernel[:, 0], samples, globalBranchingLabels,
maxiter=20, priorConfidence=0.80, M=0)
bmode = BgridSearch[np.argmax(d['loglik'])]
assert bmode == branchingPoint, bmode
print('Try sparse model with fixed inducing points')
d = FitBranchingModel.FitModel(BgridSearch, XForKernel[:, 0], samples, globalBranchingLabels,
maxiter=20, priorConfidence=0.80, M=15, fixInducingPoints=True)
bmode = BgridSearch[np.argmax(d['loglik'])]
assert bmode == branchingPoint, bmode
print('Try sparse model with fixed hyperparameters')
d = FitBranchingModel.FitModel(BgridSearch, XForKernel[:, 0], samples, globalBranchingLabels,
maxiter=20, priorConfidence=0.80, M=15,
likvar=1e-3, kerlen=2., kervar=1., fixHyperparameters=True)
# You can rerun the same code as many times as you want and get different sample paths
# We can also sample independent functions. This is the assumption in the overlapping mixtures of GPs model (OMGP) discussed in the paper.
indKernel = bk.IndKern(GPflow.kernels.RBF(1))
samplesInd = bk.SampleKernel(indKernel, XForKernel)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000016
|
@@ -700,9 +700,9 @@
1,
-4
+6
0)%0A
@@ -2705,18 +2705,18 @@
0.80, M=
-15
+20
, fixInd
|
cf92a9fc30220a8b877b8d333dbbf81ef93fdb9d
|
Add new keys.
|
app/models/__init__.py
|
app/models/__init__.py
|
# Copyright (C) 2014 Linaro Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The default mongodb database name.
DB_NAME = 'kernel-ci'
DEFAULT_SCHEMA_VERSION = "1.0"
# The default ID key, and other keys, for mongodb documents and queries.
ACCEPTED_KEYS = 'accepted'
ADDRESS_KEY = "address"
AGGREGATE_KEY = 'aggregate'
ARCHITECTURE_KEY = 'arch'
BOARD_KEY = 'board'
BOOT_ID_KEY = 'boot_id'
BOOT_LOAD_ADDR_KEY = 'loadaddr'
BOOT_LOG_HTML_KEY = 'boot_log_html'
BOOT_LOG_KEY = 'boot_log'
BOOT_RESULT_DESC_KEY = "boot_result_description"
BOOT_RESULT_KEY = 'boot_result'
BOOT_RETRIES_KEY = 'boot_retries'
BOOT_TIME_KEY = 'boot_time'
BOOT_WARNINGS_KEY = 'boot_warnings'
BUILD_ERRORS_KEY = 'build_errors'
BUILD_PLATFORM_KEY = 'build_platform'
BUILD_RESULT_KEY = 'build_result'
BUILD_TIME_KEY = 'build_time'
BUILD_WARNINGS_KEY = 'build_warnings'
COMPILER_VERSION_KEY = 'compiler_version'
CONTACT_KEY = "contact"
COUNT_KEY = "count"
CREATED_KEY = 'created_on'
CROSS_COMPILE_KEY = 'cross_compile'
DATE_RANGE_KEY = 'date_range'
DEFCONFIG_ID_KEY = 'defconfig_id'
DEFCONFIG_KEY = 'defconfig'
DIRNAME_KEY = 'dirname'
DOC_ID_KEY = 'doc_id'
DTB_ADDR_KEY = 'dtb_addr'
DTB_APPEND_KEY = 'dtb_append'
DTB_DIR_KEY = 'dtb_dir'
DTB_KEY = 'dtb'
EMAIL_KEY = 'email'
EMAIL_LIST_KEY = 'emails'
ENDIANNESS_KEY = 'endian'
ERRORS_KEY = 'errors'
EXPIRED_KEY = 'expired'
EXPIRES_KEY = 'expires_on'
FASTBOOT_KEY = 'fastboot'
FIELD_KEY = 'field'
GIT_BRANCH_KEY = 'git_branch'
GIT_COMMIT_KEY = 'git_commit'
GIT_DESCRIBE_KEY = 'git_describe'
GIT_URL_KEY = 'git_url'
ID_KEY = '_id'
INITRD_ADDR_KEY = 'initrd_addr'
IP_ADDRESS_KEY = 'ip_address'
JOB_ID_KEY = 'job_id'
JOB_KEY = 'job'
KERNEL_CONFIG_KEY = 'kernel_config'
KERNEL_IMAGE_KEY = 'kernel_image'
KERNEL_KEY = 'kernel'
LAB_ID_KEY = "lab_id"
LAB_NAME_KEY = 'lab_name'
LIMIT_KEY = 'limit'
LOAD_ADDR_KEY = 'load_addr'
MANDATORY_KEYS = 'mandatory'
METADATA_KEY = 'metadata'
MODULES_KEY = 'modules'
NAME_KEY = "name"
NOT_FIELD_KEY = 'nfield'
PRIVATE_KEY = 'private'
PROPERTIES_KEY = 'properties'
RESULT_KEY = "result"
RETRIES_KEY = 'retries'
SKIP_KEY = 'skip'
SORT_KEY = 'sort'
SORT_ORDER_KEY = 'sort_order'
STATUS_KEY = 'status'
SURNAME_KEY = 'surname'
SYSTEM_MAP_KEY = 'system_map'
TEXT_OFFSET_KEY = 'text_offset'
TIME_KEY = 'time'
TOKEN_KEY = 'token'
UPDATED_KEY = 'updated_on'
USERNAME_KEY = 'username'
VERSION_KEY = 'version'
WARNINGS_KEY = 'warnings'
# Token special fields.
ADMIN_KEY = 'admin'
DELETE_KEY = 'delete'
GET_KEY = 'get'
IP_RESTRICTED = 'ip_restricted'
POST_KEY = 'post'
SUPERUSER_KEY = 'superuser'
# Job and/or build status.
BUILD_STATUS = 'BUILD'
FAIL_STATUS = 'FAIL'
PASS_STATUS = 'PASS'
UNKNOWN_STATUS = 'UNKNOWN'
OFFLINE_STATUS = 'OFFLINE'
UNTRIED_STATUS = 'UNTRIED'
# Build file names.
DONE_FILE = '.done'
DONE_FILE_PATTERN = '*.done'
BUILD_META_FILE = 'build.meta'
BUILD_META_JSON_FILE = 'build.json'
BUILD_FAIL_FILE = 'build.FAIL'
BUILD_PASS_FILE = 'build.PASS'
# Batch operation related keys.
BATCH_KEY = "batch"
METHOD_KEY = "method"
COLLECTION_KEY = "collection"
DOCUMENT_ID_KEY = "document_id"
QUERY_KEY = "query"
OP_ID_KEY = "operation_id"
# Collection names.
BOOT_COLLECTION = 'boot'
COUNT_COLLECTION = "count"
DEFCONFIG_COLLECTION = 'defconfig'
JOB_COLLECTION = 'job'
SUBSCRIPTION_COLLECTION = 'subscription'
TOKEN_COLLECTION = 'api-token'
BISECT_COLLECTION = 'bisect'
LAB_COLLECTION = 'lab'
# Bisect values.
BISECT_BOOT_STATUS_KEY = 'boot_status'
BISECT_BOOT_CREATED_KEY = 'boot_created_on'
BISECT_BOOT_METADATA_KEY = 'boot_metadata'
BISECT_DEFCONFIG_STATUS_KEY = 'defconfig_status'
BISECT_DEFCONFIG_CREATED_KEY = 'defconfig_created'
BISECT_DEFCONFIG_METADATA_KEY = 'defconfig_metadata'
BISECT_DEFCONFIG_ARCHITECTURE_KEY = 'defconfig_arch'
BISECT_DATA_KEY = 'bisect_data'
BISECT_GOOD_COMMIT_KEY = 'good_commit'
BISECT_BAD_COMMIT_KEY = 'bad_commit'
BISECT_GOOD_COMMIT_DATE = 'good_commit_date'
BISECT_BAD_COMMIT_DATE = 'bad_commit_date'
BISECT_GOOD_COMMIT_URL = 'good_commit_url'
BISECT_BAD_COMMIT_URL = 'bad_commit_url'
# Name formats.
JOB_DOCUMENT_NAME = '%(job)s-%(kernel)s'
BOOT_DOCUMENT_NAME = '%(board)s-%(job)s-%(kernel)s-%(defconfig)s'
DEFCONFIG_DOCUMENT_NAME = '%(job)s-%(kernel)s-%(defconfig)s'
SUBSCRIPTION_DOCUMENT_NAME = 'sub-%(job)s-%(kernel)s'
# Valid build status.
VALID_BUILD_STATUS = [
BUILD_STATUS,
FAIL_STATUS,
PASS_STATUS,
UNKNOWN_STATUS
]
# Valid boot status.
VALID_BOOT_STATUS = [
FAIL_STATUS,
OFFLINE_STATUS,
PASS_STATUS,
UNTRIED_STATUS,
]
# Valid job status.
VALID_JOB_STATUS = [
BUILD_STATUS,
FAIL_STATUS,
PASS_STATUS,
UNKNOWN_STATUS,
]
# The valid collections for the bisect handler.
BISECT_VALID_COLLECTIONS = [
BOOT_COLLECTION
]
|
Python
| 0.000038
|
@@ -1305,16 +1305,44 @@
errors'%0A
+BUILD_LOG_KEY = 'build_log'%0A
BUILD_PL
@@ -2523,16 +2523,48 @@
tadata'%0A
+MODULES_DIR_KEY = 'modules_dir'%0A
MODULES_
|
5f3e659d2346e10138fb75b01239396b04ceec3f
|
Allow runserver to be executed from anywhere
|
contentdensity/contentdensity/settings.py
|
contentdensity/contentdensity/settings.py
|
"""
Django settings for contentdensity project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k%*36fgmtlqp!38^@a(72i0r$u7a-xgm2!^dlj*7785xz=+0+7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'textifai.apps.TextifaiConfig',
'nltk',
'indicoio',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'contentdensity.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'contentdensity.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/textifai/account'
|
Python
| 0
|
@@ -1703,11 +1703,32 @@
': %5B
-'./
+os.path.join(BASE_DIR, '
temp
@@ -1733,16 +1733,17 @@
mplates'
+)
,%5D,%0A
|
50b327fe1ab9ae4476d1fc203739cc38d5f578a3
|
fix unittest unneeded sleeps
|
tests/PyroTests/test_threadpool.py
|
tests/PyroTests/test_threadpool.py
|
"""
Tests for the thread pool.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import time
import random
import unittest
from Pyro4.socketserver.threadpool import Pool, PoolError, NoFreeWorkersError
from Pyro4.socketserver.threadpoolserver import SocketServer_Threadpool
import Pyro4.socketutil
import Pyro4.threadutil
JOB_TIME = 0.2
class Job(object):
def __init__(self, name="unnamed"):
self.name = name
def __call__(self):
time.sleep(JOB_TIME - random.random() / 10.0)
class SlowJob(object):
def __init__(self, name="unnamed"):
self.name = name
def __call__(self):
time.sleep(5*JOB_TIME - random.random() / 10.0)
class PoolTests(unittest.TestCase):
def setUp(self):
Pyro4.config.THREADPOOL_SIZE_MIN = 2
Pyro4.config.THREADPOOL_SIZE = 4
def tearDown(self):
Pyro4.config.reset()
def testCreate(self):
with Pool() as jq:
_ = repr(jq)
self.assertTrue(jq.closed)
def testSingle(self):
with Pool() as p:
job = Job()
p.process(job)
time.sleep(0.02) # let it pick up the job
self.assertEqual(1, len(p.busy))
def testAllBusy(self):
try:
Pyro4.config.COMMTIMEOUT = 0.2
with Pool() as p:
for i in range(Pyro4.config.THREADPOOL_SIZE):
p.process(SlowJob(str(i+1)))
# putting one more than the number of workers should raise an error:
with self.assertRaises(NoFreeWorkersError):
p.process(SlowJob("toomuch"))
finally:
Pyro4.config.COMMTIMEOUT = 0.0
def testClose(self):
with Pool() as p:
for i in range(Pyro4.config.THREADPOOL_SIZE):
p.process(Job(str(i + 1)))
with self.assertRaises(PoolError):
p.process(Job(1)) # must not allow new jobs after closing
self.assertEqual(0, len(p.busy))
self.assertEqual(0, len(p.idle))
def testScaling(self):
with Pool() as p:
for i in range(Pyro4.config.THREADPOOL_SIZE_MIN-1):
p.process(Job("x"))
self.assertEqual(1, len(p.idle))
self.assertEqual(Pyro4.config.THREADPOOL_SIZE_MIN-1, len(p.busy))
p.process(Job("x"))
self.assertEqual(0, len(p.idle))
self.assertEqual(Pyro4.config.THREADPOOL_SIZE_MIN, len(p.busy))
# grow until no more free workers
while True:
try:
p.process(Job("x"))
except NoFreeWorkersError:
break
self.assertEqual(0, len(p.idle))
self.assertEqual(Pyro4.config.THREADPOOL_SIZE, len(p.busy))
# wait till jobs are done and check ending situation
time.sleep(JOB_TIME*1.5)
self.assertEqual(0, len(p.busy))
self.assertEqual(Pyro4.config.THREADPOOL_SIZE_MIN, len(p.idle))
class ServerCallback(Pyro4.core.Daemon):
def __init__(self):
self.received_denied_reasons = []
def _handshake(self, connection, denied_reason=None):
self.received_denied_reasons.append(denied_reason) # store the denied reason
return True
def handleRequest(self, connection):
time.sleep(0.05)
def _housekeeping(self):
pass
class ThreadPoolServerTests(unittest.TestCase):
def setUp(self):
Pyro4.config.THREADPOOL_SIZE_MIN = 1
Pyro4.config.THREADPOOL_SIZE = 1
Pyro4.config.POLLTIMEOUT = 0.5
Pyro4.config.COMMTIMEOUT = 0.5
def tearDown(self):
Pyro4.config.reset()
def testServerPoolFull(self):
port = Pyro4.socketutil.findProbablyUnusedPort()
serv = SocketServer_Threadpool()
daemon = ServerCallback()
serv.init(daemon, "localhost", port)
serversock = serv.sock.getsockname()
csock1 = Pyro4.socketutil.createSocket(connect=serversock)
time.sleep(0.1) # XXX
csock2 = Pyro4.socketutil.createSocket(connect=serversock)
time.sleep(0.1) # XXX
try:
serv.events([serv.sock])
time.sleep(0.2)
self.assertEqual([None], daemon.received_denied_reasons)
serv.events([serv.sock])
time.sleep(0.2)
self.assertEqual(2, len(daemon.received_denied_reasons))
self.assertIn("no free workers, increase server threadpool size", daemon.received_denied_reasons)
finally:
csock1.close()
csock2.close()
serv.shutdown()
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Python
| 0.000002
|
@@ -4190,39 +4190,8 @@
k)%0D%0A
- time.sleep(0.1) # XXX%0D%0A
@@ -4258,39 +4258,8 @@
k)%0D%0A
- time.sleep(0.1) # XXX%0D%0A
|
5389bd7b53e1ca2186c7bde06ffdf1c84ef6fd54
|
Add put_device method
|
devicehive/api.py
|
devicehive/api.py
|
from devicehive.api_unit import Info
from devicehive.api_unit import Token
from devicehive.api_unit import Device
class Api(object):
"""Api class."""
def __init__(self, transport, authentication):
self._transport = transport
self._token = Token(transport, authentication)
def authenticate(self):
self._token.authenticate()
def get_info(self):
info = Info(self._transport)
return info.get()
def get_cluster_info(self):
info = Info(self._transport)
return info.get_cluster_info()
def create_token(self, user_id, expiration=None, actions=None,
network_ids=None, device_ids=None):
return self._token.create(user_id, expiration, actions, network_ids,
device_ids)
def refresh_token(self):
self._token.refresh()
return self._token.access_token()
def list_devices(self, name=None, name_pattern=None, network_id=None,
network_name=None, sort_field=None, sort_order=None,
take=None, skip=None):
# TODO: implement filters for websocket when API will be extended.
url = 'device'
action = 'device/list'
request = {}
params = {'data_key': 'devices', 'params': {}}
if name:
params['params']['name'] = name
if name_pattern:
params['params']['namePattern'] = name_pattern
if network_id:
params['params']['networkId'] = network_id
if network_name:
params['params']['networkName'] = network_name
if sort_field:
params['params']['sortField'] = sort_field
if sort_order:
params['params']['sortOrder'] = sort_order
if take:
params['params']['take'] = take
if skip:
params['params']['skip'] = skip
response = self._token.authorized_request(url, action, request,
**params)
assert response.is_success, 'List devices failure'
devices = []
for device in response.data['devices']:
devices.append(Device(self._transport, self._token, device['id'],
device['name'], device['data'],
device['networkId'], device['isBlocked']))
return devices
def get_device(self, device_id):
device = Device(self._transport, self._token)
device.get(device_id)
if device.id:
return device
|
Python
| 0.000006
|
@@ -2532,16 +2532,18 @@
evice.id
+()
:%0A
@@ -2558,12 +2558,345 @@
turn device%0A
+%0A def put_device(self, device_id, name=None, data=None, network_id=None,%0A is_blocked=False):%0A if not name:%0A name = device_id%0A device = Device(self._transport, self._token, device_id, name, data,%0A network_id, is_blocked)%0A device.save()%0A return device%0A
|
18e3187c2f65486fcb5456504cbfe48242c72b20
|
Replace == None with is None
|
send2trash/plat_other.py
|
send2trash/plat_other.py
|
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
# This is a reimplementation of plat_other.py with reference to the
# freedesktop.org trash specification:
# [1] http://www.freedesktop.org/wiki/Specifications/trash-spec
# [2] http://www.ramendik.ru/docs/trashspec.html
# See also:
# [3] http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
#
# For external volumes this implementation will raise an exception if it can't
# find or create the user's trash directory.
import sys
import os
import os.path as op
import logging
from datetime import datetime
import stat
FILES_DIR = 'files'
INFO_DIR = 'info'
INFO_SUFFIX = '.trashinfo'
# Default of ~/.local/share [3]
XDG_DATA_HOME = os.environ.get('XDG_DATA_HOME') or '~/.local/share'
HOMETRASH = op.expanduser(op.join(XDG_DATA_HOME,'Trash'))
uid = os.getuid()
TOPDIR_TRASH = '.Trash'
TOPDIR_FALLBACK = '.Trash-' + str(uid)
def is_parent(parent, path):
path = op.abspath(path)
parent = op.abspath(parent)
while path != '/':
path = op.abspath(op.join(path, '..'))
if path == parent:
return True
return False
def format_date(date):
return date.strftime("%Y-%m-%dT%H:%M:%S")
def info_for(src, topdir):
# ...it MUST not include a ".."" directory, and for files not "under" that
# directory, absolute pathnames must be used. [2]
if topdir == None or not is_parent(topdir, src):
src = op.abspath(src)
else:
src = op.relpath(src, topdir)
info = "[Trash Info]\n"
info += "Path=" + src + "\n"
info += "DeletionDate=" + format_date(datetime.now()) + "\n"
return info
def check_create(dir):
# use 0700 for paths [3]
if not op.exists(dir):
os.makedirs(dir, 0o700)
def trash_move(src, dst, topdir=None):
filename = op.basename(src)
filespath = op.join(dst, FILES_DIR)
infopath = op.join(dst, INFO_DIR)
base_name, ext = op.splitext(filename)
counter = 0
destname = filename
while op.exists(op.join(filespath, destname)) or op.exists(op.join(infopath, destname + INFO_SUFFIX)):
counter += 1
destname = '%s %s%s' % (base_name, counter, ext)
check_create(filespath)
check_create(infopath)
os.rename(src, op.join(filespath, destname))
f = open(op.join(infopath, destname + INFO_SUFFIX), 'w')
f.write(info_for(src, topdir))
f.close()
def find_mount_point(path):
# Even if something's wrong, "/" is a mount point, so the loop will exit.
path = op.abspath(path) # Required to avoid infinite loop
while not op.ismount(path):
path = op.split(path)[0]
return path
def find_ext_volume_global_trash(volume_root):
# from [2] Trash directories (1) check for a .Trash dir with the right
# permissions set.
trash_dir = op.join(volume_root, TOPDIR_TRASH)
if not op.exists(trash_dir):
return None
mode = os.lstat(trash_dir).st_mode
# vol/.Trash must be a directory, cannot be a symlink, and must have the
# sticky bit set.
if not op.isdir(trash_dir) or op.islink(trash_dir) or not (mode & stat.S_ISVTX):
return None
trash_dir = op.join(trash_dir, str(uid))
try:
check_create(trash_dir)
except OSError:
return None
return trash_dir
def find_ext_volume_fallback_trash(volume_root):
# from [2] Trash directories (1) create a .Trash-$uid dir.
trash_dir = op.join(volume_root, TOPDIR_FALLBACK)
# Try to make the directory, if we can't the OSError exception will escape
# be thrown out of send2trash.
check_create(trash_dir)
return trash_dir
def find_ext_volume_trash(volume_root):
trash_dir = find_ext_volume_global_trash(volume_root)
if trash_dir == None:
trash_dir = find_ext_volume_fallback_trash(volume_root)
return trash_dir
def send2trash(path):
if not isinstance(path, str):
path = str(path, sys.getfilesystemencoding())
try:
trash_move(path, HOMETRASH, XDG_DATA_HOME)
except OSError:
# Check if we're on an external volume
mount_point = find_mount_point(path)
dest_trash = find_ext_volume_trash(mount_point)
trash_move(path, dest_trash, mount_point)
|
Python
| 1
|
@@ -1591,26 +1591,26 @@
if topdir
-==
+is
None or not
@@ -3953,18 +3953,18 @@
ash_dir
-==
+is
None:%0A
|
0bc2a8ddae824a74ce443ffc120e3152641842d6
|
Added a filter method to dicts
|
app/soc/logic/dicts.py
|
app/soc/logic/dicts.py
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to handling dictionaries.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
def filter(target, keys):
"""Filters a dictonary to only allow items with the given keys.
Args:
target: The dictionary that is to be filtered
keys: The list with keys to filter the dictionary on
Returns:
A dictionary that only contains the (key,value) from target that
have their key in keys.
"""
result = {}
for key, value in target.iteritems():
if key in keys:
result[key] = value
return result
def merge(target, updates):
"""Like the builtin 'update' method but does not overwrite existing values.
Args:
target: The dictionary that is to be updated, may be None
updates: A dictionary containing new values for the original dict
Returns:
the target dict, with any missing values from updates merged in, in-place.
"""
if not target:
target = {}
for key, value in updates.iteritems():
if key not in target:
target[key] = value
return target
def zip(keys, values):
"""Returns a dict containing keys with values.
If there are more items in keys than in values, None will be used.
If there are more items in values than in keys, they will be ignored.
Args:
keys: the keys for the dictionary
values: the values for the dictionary
"""
result = {}
size = len(keys)
for i in range(size):
if i < len(values):
value = values[i]
else:
value = None
key = keys[i]
result[key] = value
return result
def unzip(target, order):
"""Constructs a list from target in the order specified by order
Args:
target: the dictionary to pull the values from
order: the order of the keys
"""
return (target[key] for key in order)
def rename(target, keys):
"""Returns a dict containing only the key/value pairs from keys.
The keys from target will be looked up in keys, and the corresponding
value from keys will be used instead. If a key is not found, it is skipped.
Args:
target: the dictionary to filter
keys: the fields to filter
"""
result = {}
for key, value in target.iteritems():
if key in keys:
new_key = keys[key]
result[new_key] = value
return result
|
Python
| 0.999999
|
@@ -2901,28 +2901,1324 @@
y%5D = value%0A%0A return result%0A
+%0A%0Adef split(target):%0A %22%22%22Takes a dictionary and splits it into single-valued dicts%0A%0A If there are any values in target that are a list it is split up%0A into a new dictionary instead.%0A%0A %3E%3E%3E split(%7B%7D)%0A %5B%7B%7D%5D%0A %3E%3E%3E split(%7B'foo':'bar'%7D)%0A %5B%7B'foo': 'bar'%7D%5D%0A %3E%3E%3E split(%7B'foo':'bar', 'bar':'baz'%7D)%0A %5B%7B'foo': 'bar', 'bar': 'baz'%7D%5D%0A %3E%3E%3E split(%7B'foo':'bar', 'bar':%5B'one', 'two'%5D%7D)%0A %5B%7B'foo': 'bar', 'bar': 'one'%7D, %7B'foo': 'bar', 'bar': 'two'%7D%5D%0A %3E%3E%3E split(%7B'foo':'bar', 'bar':%5B'one', 'two'%5D, 'baz': %5B'three', 'four'%5D%7D)%0A %5B%7B'bar': 'one', 'foo': 'bar', 'baz': 'three'%7D,%0A %7B'bar': 'two', 'foo': 'bar', 'baz': 'three'%7D,%0A %7B'bar': 'one', 'foo': 'bar', 'baz': 'four'%7D,%0A %7B'bar': 'two', 'foo': 'bar', 'baz': 'four'%7D%5D%0A %22%22%22%0A%0A result = %5B%7B%7D%5D%0A%0A for key, values in target.iteritems():%0A # Make the value a list if it's not%0A if not isinstance(values, list):%0A values = %5Bvalues%5D%0A%0A tmpresult = %5B%5D%0A%0A # Iterate over all we gathered so far%0A for filter in result:%0A for value in values:%0A # Create a new dict from the current filter%0A newdict = dict(filter)%0A%0A # And create a new dict that also has the current key/value pair%0A newdict%5Bkey%5D = value%0A tmpresult.append(newdict)%0A%0A # Update the result for the next iteration%0A result = tmpresult%0A%0A return result%0A
|
6d7dcf2d44c1de9c51f94c96305afbb2bde2bce2
|
Revert "Patch eventlet in the runner to try and avoid blocking scenarios"
|
sentry/scripts/runner.py
|
sentry/scripts/runner.py
|
#!/usr/bin/env python
"""
sentry.scripts.runner
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from eventlet import patcher
patcher.monkey_patch()
import base64
import datetime
import errno
import imp
import os
import os.path
import sys
from django.conf import settings as django_settings
from optparse import OptionParser
from sentry import VERSION, environment, commands
ALL_COMMANDS = (
# General use commands
'init',
'upgrade',
'start',
'stop',
'restart',
'cleanup',
# These should probably be hidden by default
'manage',
)
KEY_LENGTH = 40
DEFAULT_CONFIG_PATH = os.environ.get('SENTRY_CONFIG',
os.path.expanduser(os.path.join('~', '.sentry', 'sentry.conf.py')))
CONFIG_TEMPLATE = """
import os.path
from sentry.conf.server import *
ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
# You can swap out the engine for MySQL easily by changing this value
# to ``django.db.backends.mysql`` or to PostgreSQL with
# ``django.db.backends.postgresql_psycopg2``
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT, 'sentry.db'),
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
SENTRY_KEY = %(default_key)r
# Set this to false to require authentication
SENTRY_PUBLIC = True
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_LOG_DIR = os.path.join(ROOT, 'log')
SENTRY_RUN_DIR = os.path.join(ROOT, 'run')
"""
def copy_default_settings(filepath):
"""
Creates a default settings file at ``filepath``.
"""
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filepath, 'w') as fp:
key = base64.b64encode(os.urandom(KEY_LENGTH))
output = CONFIG_TEMPLATE % dict(default_key=key)
fp.write(output)
def settings_from_file(filename, silent=False):
"""
Configures django settings from an arbitrary (non sys.path) filename.
"""
mod = imp.new_module('config')
mod.__file__ = filename
try:
execfile(filename, mod.__dict__)
except IOError, e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
if not django_settings.configured:
django_settings.configure()
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(django_settings, setting, setting_value)
def main():
args = sys.argv
if len(args) < 2 or args[1] not in ALL_COMMANDS:
print "usage: sentry [command] [options]"
print
print "Available subcommands:"
for cmd in ALL_COMMANDS:
print " ", cmd
sys.exit(1)
parser = OptionParser(version="%%prog %s" % VERSION)
if args[1] == 'init':
(options, args) = parser.parse_args()
config_path = ' '.join(args[1:]) or DEFAULT_CONFIG_PATH
if os.path.exists(config_path):
resp = None
while resp not in ('Y', 'n'):
resp = raw_input('File already exists at %r, overwrite? [nY] ' % config_path)
if resp == 'n':
print "Aborted!"
return
try:
copy_default_settings(config_path)
except OSError, e:
raise e.__class__, 'Unable to write default settings file to %r' % config_path
print "Configuration file created at %r" % config_path
return
parser.add_option('--config', metavar='CONFIG', default=DEFAULT_CONFIG_PATH)
command = getattr(commands, args[1])
for option in getattr(command, 'options', []):
parser.add_option(option)
(options, args) = parser.parse_args()
config_path = options.config
# We hardcode skipping this check via init
if not os.path.exists(config_path):
raise ValueError("Configuration file does not exist. Use 'init' to initialize the file.")
environment['config'] = config_path
environment['start_date'] = datetime.datetime.utcnow()
settings_from_file(config_path)
# set debug
if getattr(options, 'debug', False):
django_settings.DEBUG = True
# filter out reserved options
kwargs = dict((k, v) for k, v in options.__dict__.iteritems() if k != 'config')
# execute command
if getattr(command, 'consume_args', False):
command(args, **kwargs)
else:
command(**kwargs)
sys.exit(0)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -188,63 +188,8 @@
%22%22%22%0A
-from eventlet import patcher%0A%0Apatcher.monkey_patch()%0A%0A%0A
impo
|
5906946b0287536976f816884169e3a3c91df043
|
Add a verbose_name and help_text to the User.id Property.
|
app/soc/models/user.py
|
app/soc/models/user.py
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the User Model."""
__authors__ = [
'"Todd Larsen" <tlarsen@google.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Pawel Solyga" <pawel.solyga@gmail.com>',
]
import logging
from google.appengine.api import users
from google.appengine.ext import db
from django.utils.translation import ugettext_lazy
from soc.models import base
from soc.views.helpers import forms_helpers
class User(base.ModelWithFieldAttributes):
"""A user and associated login credentials, the fundamental identity entity.
User is a separate Model class from Person because the same login
ID may be used to, for example, serve as Contributor in one Program
and a Reviewer in another.
Also, this allows a Person to, in the future, re-associate that
Person entity with a different Google Account if necessary.
A User entity participates in the following relationships implemented
as a db.ReferenceProperty elsewhere in another db.Model:
persons) a 1:many relationship of Person entities identified by the
User. This relation is implemented as the 'persons' back-reference
Query of the Person model 'user' reference.
"""
#: A Google Account, which also provides a "private" email address.
#: This email address is only used in an automated fashion by
#: Melange web applications and is not made visible to other users
#: of any Melange application.
id = db.UserProperty(required=True)
#: A list (possibly empty) of former Google Accounts associated with
#: this User.
former_ids = db.ListProperty(users.User)
#: Required field storing a nickname; displayed publicly.
#: Nicknames can be any valid UTF-8 text.
nick_name = db.StringProperty(required=True,
verbose_name=ugettext_lazy('Nick name'))
#: Required field storing linkname used in URLs to identify user.
#: Lower ASCII characters only.
link_name = db.StringProperty(required=True,
verbose_name=ugettext_lazy('Link name'))
link_name.help_text = ugettext_lazy(
'Field used in URLs to identify user. '
'Lower ASCII characters only.')
#: field storing whether User is a Developer with site-wide access.
is_developer = db.BooleanProperty(
verbose_name=ugettext_lazy('Is Developer'))
is_developer.help_text = ugettext_lazy(
'Field used to indicate user with site-wide "Developer" access.')
|
Python
| 0.001817
|
@@ -2046,16 +2046,155 @@
red=True
+,%0A verbose_name=ugettext_lazy('User account'))%0A id.help_text = ugettext_lazy(%0A 'Email address of a valid user (Google Account).'
)%0A%0A #:
|
2bd4a4509a1a89ef1c2648a27c0aa74017aeca41
|
Read data from stdin if no path or query given
|
wikigenre.py
|
wikigenre.py
|
import codecs
import logging
import re
from glob import iglob
from os.path import join, dirname, normpath
from gevent import monkey
from gevent import spawn, joinall
from gevent.event import AsyncResult
monkey.patch_socket()
monkey.patch_ssl()
import requests
from lxml import html
from mutagen import easyid3, flac, easymp4, oggvorbis, musepack
from wikiapi import WikiApi
logger = logging.getLogger(__name__)
URI_SCHEME = 'http'
ARTICLE_URI = 'wikipedia.org/wiki/'
GENRE_CACHE = {} # {(album, artist): AsyncResult([genre1, genre2, ...])}
def titlecase(string):
return u' '.join(part.capitalize() for part in string.split())
def get_genres(query):
wiki = WikiApi()
results = wiki.find(query.encode('utf-8'))
if results:
try:
url = '{0}://{1}.{2}{3}'.format(
URI_SCHEME, wiki.options['locale'], ARTICLE_URI,
results[0].encode('utf-8'))
resp = requests.get(url)
dom = html.fromstring(resp.content)
return (dom.xpath('.'
'//table[contains(@class, "haudio")]'
'//td[@class="category"]'
'/a'
'/text()') or
dom.xpath('.'
'//table[contains(@class, "infobox")]'
'//th'
'/a[text()="Genre"]'
'/..'
'/..'
'/td'
'/a'
'/text()'))
except Exception as e:
logger.error('Error getting genres for %s: %s', query, repr(e))
return []
def search_variants(artist, album):
if artist and album:
yield get_genres(u'%s (%s album)' % (album, artist))
if album:
yield get_genres(u'%s (album)' % album)
yield get_genres(album)
if artist:
yield get_genres(artist)
def albumgenres(artist='', album=''):
result = GENRE_CACHE.get((artist, album))
if result is None:
GENRE_CACHE[(artist, album)] = result = AsyncResult()
result.set(reduce(lambda a, b: a or b, search_variants(artist, album)))
return result.get()
def load_track(track):
track_lower = track.lower()
if track_lower.endswith('.mp3'):
return easyid3.EasyID3(track)
elif track_lower.endswith('.flac'):
return flac.FLAC(track)
elif track_lower.endswith('.mp4') or track_lower.endswith('.m4a'):
return easymp4.EasyMP4(track)
elif track_lower.endswith('.ogg'):
return oggvorbis.OggVorbis(track)
elif track_lower.endswith('.mpc'):
return musepack.Musepack(track)
else:
raise ValueError("unhandled format '%s'" % track)
def wikigenre(track, force=False):
track = normpath(track)
try:
audio = load_track(track)
audio_genre = audio.get('genre')
if audio_genre is not None and not force:
logger.info('Skipping %s', track)
else:
artist = audio.get('artist', [None])[0]
album = audio.get('album', [None])[0]
genres = map(titlecase, albumgenres(artist, album))
if genres:
audio['genre'] = genres
audio.save()
logger.info('Tagged %s', track)
else:
logger.warn('No genres found for %s', track)
except Exception as e:
logger.error('Error tagging %s: %s', track, repr(e))
raise
def main(query='', path=None, force=False):
with open(join(dirname(__file__), 'wikigenre.log'), 'a') as log:
handler = logging.StreamHandler()
filehandler = logging.StreamHandler(log)
formatter = logging.Formatter('%(asctime)s;%(levelname)s;%(message)s')
handler.setFormatter(formatter)
filehandler.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(filehandler)
logger.setLevel('DEBUG')
if query:
for artistalbum in query.split('; '):
parts = artistalbum.split(' - ', 1)
try:
artist, album = parts
except ValueError:
artist, album = '', artistalbum
print (artistalbum + ': ' +
'; '.join(map(titlecase, albumgenres(artist, album))))
elif path is not None:
logger.info('Starting')
# Escape square brackets
path = re.sub(r'([\[\]])', r'[\1]', path)
joinall([spawn(wikigenre, track, force=force)
for track in iglob(path)])
logger.info('Finished')
else:
print 'either query or path required'
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('path', metavar='PATH', nargs='?')
parser.add_argument('-q', '--query', metavar='QUERY', nargs='?', default='',
help='[artist - ]album(; [artist - ]album)*')
parser.add_argument('-f', '--force', action='store_true')
namespace = parser.parse_args()
kwargs = dict(namespace._get_kwargs())
main(**kwargs)
|
Python
| 0.000065
|
@@ -1,17 +1,42 @@
-import codecs
+from __future__ import print_function%0A
%0Aimp
@@ -57,16 +57,27 @@
port re%0A
+import sys%0A
from glo
@@ -4362,17 +4362,16 @@
print
-
(artista
@@ -4384,17 +4384,16 @@
': ' +%0A
-
@@ -4791,45 +4791,662 @@
-print 'either query or path required'
+# Read data from stdin%0A # Sample input: %22The Beatles - %5BAbbey Road #07%5D Here Comes the Sun%22%0A trackinfo = re.compile(%0A r'(.+) - %5C%5B(.+?)(?: CD%5Cd+)?(?: #%5Cd+)?%5C%5D')%0A lines = sys.stdin.read()%0A greenlets = %5B%5D%0A for line in lines.splitlines():%0A mo = trackinfo.match(line)%0A if mo is None:%0A continue%0A artist, album = mo.groups()%0A greenlets.append(spawn(albumgenres, artist, album))%0A joinall(greenlets)%0A for greenlet in greenlets:%0A print('; '.join(map(titlecase, greenlet.get())))
%0A%0A%0Ai
@@ -5590,16 +5590,16 @@
gs='?')%0A
-
pars
@@ -5630,16 +5630,40 @@
-query',
+%0A
metavar
|
6910bfbebcc3cde2d67b4ed131118f77776ff967
|
move callbacks above disconnection
|
nyuki/nyuki.py
|
nyuki/nyuki.py
|
import asyncio
from jsonschema import validate, ValidationError
import logging
import logging.config
import signal
from nyuki.bus import Bus
from nyuki.capabilities import Exposer, Response, resource
from nyuki.commands import get_command_kwargs
from nyuki.config import (
get_full_config, write_conf_json, merge_configs, DEFAULT_CONF_FILE
)
from nyuki.events import Event, EventManager
from nyuki.handlers import MetaHandler
from nyuki.loop import EventLoop
log = logging.getLogger(__name__)
class Nyuki(metaclass=MetaHandler):
"""
A lightweigh base class to build nyukis. A nyuki provides tools that shall
help the developer with managing the following topics:
- Bus of communication between nyukis.
- Asynchronous events.
- Capabilities exposure through a REST API.
This class has been written to perform the features above in a reliable,
single-threaded, asynchronous and concurrent-safe environment.
The core engine of a nyuki implementation is the asyncio event loop
(a single loop is used for all features).
A wrapper is also provide to ease the use of asynchronous calls
over the actions nyukis are inteded to do.
"""
# Configuration schema must follow jsonschema rules.
BASE_CONF_SCHEMA = {
"type": "object",
"required": ["bus", "api", "log"],
"properties": {
"bus": {
"type": "object",
"required": ["jid", "password"],
"properties": {
"jid": {"type": "string"},
"password": {"type": "string"}
}
}
}
}
def __init__(self, **kwargs):
# List of configuration schemas
self._schemas = []
# Get configuration from multiple sources and register base schema
kwargs = kwargs or get_command_kwargs()
self.config_filename = kwargs.get('config', DEFAULT_CONF_FILE)
self._config = get_full_config(**kwargs)
self.register_schema(self.BASE_CONF_SCHEMA)
# Initialize logging
logging.config.dictConfig(self._config['log'])
self.event_loop = EventLoop(loop=asyncio.get_event_loop())
self.event_manager = EventManager(self.event_loop)
self._bus = self._make_bus()
self._exposer = Exposer(self.event_loop.loop)
@property
def config(self):
return self._config
@property
def capabilities(self):
return self._exposer.capabilities
@property
def capability_exposer(self):
return self._exposer
@property
def request(self):
return self._bus.request
@property
def publish(self):
return self._bus.publish
@property
def subscribe(self):
return self._bus.subscribe
def _make_bus(self):
"""
Returns a new Bus object attribute.
"""
return Bus(
loop=self.event_loop,
event_manager=self.event_manager,
**self._config['bus'])
def start(self):
"""
Start the nyuki: launch the bus client and expose capabilities.
Basically, it starts the event loop.
"""
signal.signal(signal.SIGTERM, self.abort)
signal.signal(signal.SIGINT, self.abort)
self._bus.connect()
self._exposer.expose(**self._config['api'])
self.event_loop.start(block=True)
def abort(self, signum, frame):
"""
Signal handler: gracefully stop the nyuki.
"""
log.warning("Caught signal {}".format(signum))
self.stop()
def stop(self, wait=0):
"""
Stop the nyuki. Basically, disconnect to the bus. That will eventually
trigger a `Disconnected` event.
"""
self._exposer.shutdown()
self._bus.disconnect(wait=wait)
self._bus.client.disconnected.add_done_callback(self.event_loop.stop)
def register_schema(self, schema, format_checker=None):
"""
Add a jsonschema to validate on configuration update.
"""
self._schemas.append((schema, format_checker))
self._validate_config()
def _validate_config(self, config=None):
"""
Validate on all registered configuration schemas.
"""
log.debug('Validating configuration')
config = config or self._config
for schema, checker in self._schemas:
validate(config, schema, format_checker=checker)
def update_config(self, *new_confs):
"""
Update the current configuration with the given list of dicts.
"""
config = merge_configs(self._config, *new_confs)
self._validate_config(config)
self._config = config
def save_config(self):
"""
Save the current configuration dict to its JSON file.
"""
write_conf_json(self.config, self.config_filename)
def reload(self, services=False):
"""
Override this to implement a reloading to your Nyuki.
(called on PATCH /config)
"""
def reconnect(future):
self._bus = self._make_bus()
self._bus.connect()
self.save_config()
logging.config.dictConfig(self._config['log'])
if services:
self._bus.disconnect()
self._bus.client.disconnected.add_done_callback(reconnect)
self._exposer.restart(**self._config['api'])
@resource(endpoint='/config', version='v1')
class Configuration:
def get(self, request):
return Response(self._config)
def patch(self, request):
try:
self.update_config(request)
except ValidationError as error:
error = {'error': error.message}
log.error('Bad configuration received : {}'.format(request))
log.debug(error)
return Response(body=error, status=400)
else:
self.reload('api' in request or 'bus' in request)
return Response(self._config)
|
Python
| 0
|
@@ -3615,17 +3615,17 @@
f, wait=
-0
+2
):%0A
@@ -3799,48 +3799,8 @@
n()%0A
- self._bus.disconnect(wait=wait)%0A
@@ -3872,16 +3872,56 @@
op.stop)
+%0A self._bus.disconnect(wait=wait)
%0A%0A de
@@ -5274,43 +5274,8 @@
es:%0A
- self._bus.disconnect()%0A
@@ -5341,16 +5341,51 @@
onnect)%0A
+ self._bus.disconnect()%0A
|
b717696b5cff69e3586e06c399be7d06c057e503
|
Make spawn_n() stub properly ignore errors in the child thread work
|
nova/tests/fake_utils.py
|
nova/tests/fake_utils.py
|
# Copyright (c) 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This modules stubs out functions in nova.utils."""
from nova import utils
def stub_out_utils_spawn_n(stubs):
"""Stubs out spawn_n with a blocking version.
This aids testing async processes by blocking until they're done.
"""
def no_spawn(func, *args, **kwargs):
return func(*args, **kwargs)
stubs.Set(utils, 'spawn_n', no_spawn)
|
Python
| 0.000006
|
@@ -888,24 +888,41 @@
**kwargs):%0A
+ try:%0A
retu
@@ -945,16 +945,352 @@
*kwargs)
+%0A except Exception:%0A # NOTE(danms): This is supposed to simulate spawning%0A # of a thread, which would run separate from the parent,%0A # and die silently on error. If we don't catch and discard%0A # any exceptions here, we're not honoring the usual%0A # behavior.%0A pass
%0A%0A st
|
1f65be1f67867fc445b692df0f9390d6aa576e02
|
Fix import in common/utils
|
manyfaced/common/utils.py
|
manyfaced/common/utils.py
|
import time
import pickle
from socket import error as socket_error
from manyfaced.common.status import CLIENT_TIMEOUT
def dump_file(data):
try:
with file('temp.db') as f:
string_file = f.read()
db = pickle.loads(string_file)
except:
db = list()
db.append(data)
with open('temp.db', "w") as f:
f.write(str(pickle.dumps(db)))
def receive_timeout(the_socket, timeout=CLIENT_TIMEOUT):
# make socket non blocking
the_socket.setblocking(0)
# total data partwise in an array
total_data = []
# beginning time
begin = time.time()
while True:
# if you got some data, then break after timeout
if total_data and time.time() - begin > timeout:
break
# if you got no data at all, wait a little longer, twice the timeout
elif time.time() - begin > timeout * 2:
break
# recv something
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
# change the beginning time for measurement
begin = time.time()
else:
# sleep for sometime to indicate a gap
time.sleep(0.1)
except socket_error:
pass
# join all parts to make final string
return ''.join(total_data)
|
Python
| 0.000022
|
@@ -70,25 +70,8 @@
rom
-manyfaced.common.
stat
|
7ea980cf44a572d85f31b6cd63d2ba8f893592cc
|
Update main.py
|
scripts/eurotram/main.py
|
scripts/eurotram/main.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Update and crop osm dump file for Europe
# Author: Artem Svetlov <artem.svetlov@nextgis.com>
import os
import config
import argparse
def argparser_prepare():
class PrettyFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
max_help_position = 35
parser = argparse.ArgumentParser(description='',
formatter_class=PrettyFormatter)
parser.add_argument('-u', '--update', type=str, choices=['day', 'hour', 'minute'], default='day',
help='command for osmupdate')
parser.epilog = \
'''Samples:
%(prog)s
''' \
% {'prog': parser.prog}
return parser
#if prevdump not exists - download CFO from geofabrik and crop to Europe
def updateDump(update='day',work_dump='dump.osm.pbf',poly_file='bounds.poly'):
dump_url='http://download.geofabrik.de/europe/estonia-latest.osm.pbf'
downloaded_dump='estonia-latest.osm.pbf'
updated_dump='osm/just_updated_dump.osm.pbf'
poly_file='europe.poly'
directory='osm'
if not os.path.exists(directory):
os.makedirs(directory)
#frist run of program
if os.path.exists(work_dump) == False:
os.system('wget ' + dump_url)
os.rename(downloaded_dump, work_dump)
#if prevdump dump exists - run osmupdate, it updating it to last hour state with clipping, and save as currentdump
cmd = 'osmupdate {work_dump} {updated_dump} --{update} -v -B={poly_file}'.format(
work_dump=work_dump,
updated_dump=updated_dump,
update=update,
poly_file=poly_file)
print cmd
os.system(cmd)
#if osmupdate worked good, it create updated_dump file. Work dump will replaced by it
if os.path.exists(updated_dump) == True:
os.remove(work_dump)
os.rename(updated_dump, work_dump)
else:
#osmupdate found your file is already up-to-date
pass
return 0
def filter_osm_dump(work_dump='dump.osm.pbf',file_result='routesFinal.osm.pbf'):
import json
import pprint
pp=pprint.PrettyPrinter(indent=2)
refs=[]
file_src = work_dump
file_temp_1 = 'routes.osm.pbf'
file_result = 'routesFinal.osm.pbf'
print 'Filter step 1'
cmd='''
~/osmosis/bin/osmosis \
-q \
--read-pbf {file_src} \
--tf accept-relations route=tram \
--used-way --used-node \
--write-pbf {file_temp_1}
'''.format(file_src=file_src,file_temp_1=file_temp_1)
os.system(cmd)
print 'Filter step 3'
cmd='''
~/osmosis/bin/osmosis \
-q \
--read-pbf {file_temp_1} \
--tf accept-relations "type=route" \
--used-way --used-node \
--write-pbf {file_result}
'''
cmd = cmd.format(file_temp_1=file_temp_1,file_result=file_result)
os.system(cmd)
def importdb(host,database,username,password,filename='routesFinal.osm.pbf'):
cmd = 'osm2pgsql --create --slim -E 3857 --cache-strategy sparse --cache 100 --host {host} --database {database} --username {username} {filename}'.format(host=host,
database=database,username=username,password=password,filename=filename)
print cmd
os.system(cmd)
def process(host,dbname,user,password):
cmd='''python ../../osmot.py -hs {host} -d {dbname] -u {user} -p {password}
'''.format(
host=host,
dbname=dbname,
user=user,
password=password)
os.system(cmd)
if __name__ == '__main__':
host=config.host
dbname=config.dbname
user=config.user
password=config.password
parser = argparser_prepare()
args = parser.parse_args()
update = args.update
updateDump(update,poly_file='europe.poly')
filter_osm_dump(work_dump='dump.osm.pbf',file_result='routesFinal.osm.pbf')
importdb(host,dbname,user,password)
process(host,dbname,user,password)
|
Python
| 0.000001
|
@@ -2253,16 +2253,21 @@
'routes
+_temp
.osm.pbf
@@ -2899,16 +2899,47 @@
em(cmd)%0A
+ os.remove(file_temp_1)%0A
%0A%0Adef im
|
8355340347f57db0796385c0700a91d61cb9b82a
|
Fix typos
|
scripts/guides_master.py
|
scripts/guides_master.py
|
KT_GUIDES_MASTER = {
'path': 'keras_tuner/',
'title': 'Hyperparameter Tuning',
'toc': True,
'children': [
{
'path': 'getting_started',
'title': 'Getting started with KerasTuner',
},
{
'path': 'distributed_tuning',
'title': 'Distributed hyperparameter tuning with KerasTuner',
},
{
'path': 'custom_tuner',
'title': 'Tune hyperparameters in your custom training loop',
},
{
'path': 'visualize_tuning',
'title': 'Visualize the hyperparameter tuning process',
},
{
'path': 'tailor_the_search_space',
'title': 'Tailor the search space',
},
]
}
GUIDES_MASTER = {
'path': 'guides/',
'title': 'Developer guides',
'toc': True,
'children': [
{
'path': 'functional_api',
'title': 'The Functional API',
},
{
'path': 'sequential_model',
'title': 'The Sequential model',
},
{
'path': 'making_new_layers_and_models_via_subclassing',
'title': 'Making new Layers & Models via subclassing',
},
{
'path': 'training_with_built_in_methods',
'title': 'Training & evaluation with the built-in methods',
},
{
'path': 'customizing_what_happens_in_fit',
'title': 'Customizing what happens in `fit()`',
},
{
'path': 'writing_a_training_loop_from_scratch',
'title': 'Writing a training loop from scratch',
},
{
'path': 'serialization_and_saving',
'title': 'Serialization & saving',
},
{
'path': 'writing_your_own_callbacks',
'title': 'Writing your own Callbacks',
},
# {
# 'path': 'writing_your_own_metrics',
# 'title': 'Writing your own Metrics',
# },
# {
# 'path': 'writing_your_own_losses',
# 'title': 'Writing your own Losses',
# },
{
'path': 'preprocessing_layers',
'title': 'Working with preprocessing Layers',
},
{
'path': 'working_with_rnns',
'title': 'Working with recurrent neural networks',
},
{
'path': 'understanding_masking_and_padding',
'title': 'Understanding masking & padding',
},
{
'path': 'distributed_training',
'title': 'Multi-GPU & distributed training',
},
# {
# 'path': 'tpu_training',
# 'title': 'Training Keras models on TPU',
# },
{
'path': 'transfer_learning',
'title': 'Transfer learning & fine-tuning',
},
# {
# 'path': 'hyperparameter_optimization',
# 'title': 'Hyperparameter optimization',
# },
{
'path': 'training_keras_models_on_cloud',
'title': 'Training Keras models with TensorFlow Cloud',
},
KT_GUIDES_MASTER,
# TODO: mixed precision
]
}
|
Python
| 0.999999
|
@@ -1185,17 +1185,17 @@
new
-L
+l
ayers &
Mode
@@ -1190,17 +1190,17 @@
ayers &
-M
+m
odels vi
@@ -1866,17 +1866,17 @@
our own
-C
+c
allbacks
@@ -2242,17 +2242,17 @@
cessing
-L
+l
ayers',%0A
|
03f99a79941ade157689534e7ed0d0d196dd4d56
|
fix grep command
|
scripts/logfetch/grep.py
|
scripts/logfetch/grep.py
|
import os
import sys
from termcolor import colored
GREP_COMMAND_FORMAT = 'xargs -n {0} {1} < {2}'
DEFAULT_GREP_COMMAND = 'grep --color=always \'{1}\''
def grep_files(args, all_logs):
if args.grep:
greplist_filename = '{0}/.greplist'.format(args.dest)
create_greplist(args, all_logs, greplist_filename)
command = grep_command(args, all_logs, greplist_filename)
sys.stderr.write(colored('Running "{0}" this might take a minute'.format(command), 'blue') + '\n')
sys.stdout.write(os.popen(command).read() + '\n')
remove_greplist(greplist_filename)
sys.stderr.write(colored('Finished grep, exiting', 'green') + '\n')
def create_greplist(args, all_logs, greplist_filename):
greplist_file = open(greplist_filename, 'wb')
for log in all_logs:
greplist_file.write('{0}\n'.format(log))
greplist_file.close()
def remove_greplist(greplist_filename):
if os.path.isfile(greplist_filename):
os.remove(greplist_filename)
def grep_command(args, all_logs, greplist_filename):
if 'grep' in args.grep:
return GREP_COMMAND_FORMAT.format(len(all_logs), args.grep, greplist_filename)
else:
return GREP_COMMAND_FORMAT.format(len(all_logs), DEFAULT_GREP_COMMAND.format(args.grep), greplist_filename)
|
Python
| 0.000609
|
@@ -139,17 +139,17 @@
ways %5C'%7B
-1
+0
%7D%5C''%0A%0Ade
|
c4bbe848f2e8f972423e766a42d1959df782f623
|
fix publisher for sending messages
|
oct/core/hq.py
|
oct/core/hq.py
|
from __future__ import print_function
import zmq
import time
import json
class HightQuarter(object):
"""The main hight quarter that will receive informations from the turrets
and send the start message
:param publish_port int: the port for publishing information to turrets
:param rc_port int: the result collector port for collecting results from the turrets
:param results_writer ResultsWriter: the results writer
:param config dict: the configuration of the test
"""
def __init__(self, publish_port, rc_port, results_writer, config):
context = zmq.Context()
self.poller = zmq.Poller()
self.result_collector = context.socket(zmq.PULL)
self.result_collector.bind("tcp://*:{}".format(rc_port))
self.publisher = context.socket(zmq.PUB)
self.publisher.bind("tcp://*:{}".format(publish_port))
self.poller.register(self.result_collector, zmq.POLLIN)
self.results_writer = results_writer
self.config = config
self.turrets = []
self.publisher.send_multipart(['hq', json.dumps({'command': 'status_request', 'msg': None})])
def _turret_already_exists(self, turret_data):
for t in self.turrets:
if turret_data['uuid'] == t['uuid']:
return False
return True
def _update_turret(self, turret_data):
for t in self.turrets:
if turret_data['uuid'] == t['uuid']:
t['status'] = turret_data['status']
break
def wait_turrets(self, wait_for):
"""Wait until wait_for turrets are connected and ready
"""
print("waiting for {} turrets to connect".format(wait_for - len(self.turrets)))
while len(self.turrets) < wait_for:
socks = dict(self.poller.poll(1000))
if self.result_collector in socks:
data = self.result_collector.recv_json()
if 'turret' in data and 'status' in data and not self._turret_already_exists(data):
self.turrets.append({'turret': data['turret'], 'status': data['status'], 'uuid': data['uuid']})
print("{} turrets are now connected".format(len(self.turrets)))
print("waiting for {} turrets to connect".format(wait_for - len(self.turrets)))
elif 'turret' in data and 'status' in data and self._turret_already_exists(data):
self._update_turret(data)
def run(self):
"""Run the hight quarter, lunch the turrets and wait for results
"""
elapsed = 0
start_time = time.time()
self.publisher.send_multipart(['hq', json.dumps({'command': 'start', 'msg': 'open fire'})])
display = 'turrets: {}, elapsed: {} transactions: {} timers: {} errors: {}\r'
while elapsed < (self.config['run_time'] + 1):
try:
socks = dict(self.poller.poll(1000))
if self.result_collector in socks:
data = self.result_collector.recv_json()
if 'status' in data:
self.turrets.append((data['turret'], data['status']))
else:
self.results_writer.write_result(data)
print(display.format(self.turrets, round(elapsed), self.results_writer.trans_count,
self.results_writer.timer_count,
self.results_writer.error_count), end='')
elapsed = time.time() - start_time
except (Exception, KeyboardInterrupt) as e:
print("\nStopping test, sending stop command to turrets")
self.publisher.send_multipart(['hq', json.dumps({'command': 'stop', 'msg': 'premature stop'})])
print(e)
break
self.publisher.send_multipart(['hq', json.dumps({'command': 'stop', 'msg': 'stopping fire'})])
|
Python
| 0.000001
|
@@ -1053,50 +1053,16 @@
elf.
+_
publish
-er.send_multipart(%5B'hq', json.dumps
(%7B'c
@@ -1101,18 +1101,16 @@
: None%7D)
-%5D)
%0A%0A de
@@ -1482,16 +1482,169 @@
break%0A%0A
+ def _publish(self, message):%0A data = json.dumps(message)%0A self.publisher.send_multipart(%5Bbytes('hq', 'UTF-8'), bytes(data, 'UTF-8')%5D)%0A%0A
def
@@ -2752,50 +2752,16 @@
elf.
+_
publish
-er.send_multipart(%5B'hq', json.dumps
(%7B'c
@@ -2798,18 +2798,16 @@
fire'%7D)
-%5D)
%0A
@@ -3789,50 +3789,16 @@
elf.
+_
publish
-er.send_multipart(%5B'hq', json.dumps
(%7B'c
@@ -3839,18 +3839,16 @@
stop'%7D)
-%5D)
%0A
@@ -3904,50 +3904,16 @@
elf.
+_
publish
-er.send_multipart(%5B'hq', json.dumps
(%7B'c
@@ -3953,11 +3953,9 @@
fire'%7D)
-%5D)
%0A
|
3eea445a445a9154758cd82c11c52751f2804eca
|
add axis to 3d example
|
examples/tomo/xray_trafo_parallel_3d.py
|
examples/tomo/xray_trafo_parallel_3d.py
|
# Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Example using the X-ray transform with 3d parallel beam geometry."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
import odl
# Discrete reconstruction space: discretized functions on the cube
# [-20, 20]^3 with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_corner=[-20, -20, -20], max_corner=[20, 20, 20],
nsamples=[300, 300, 300], dtype='float32')
# Make a parallel beam geometry with flat detector
# Angles: uniformly spaced, n = 360, min = 0, max = 2 * pi
angle_partition = odl.uniform_partition(0, 2 * np.pi, 360)
# Detector: uniformly sampled, n = (558, 558), min = (-30, -30), max = (30, 30)
detector_partition = odl.uniform_partition([-30, -30], [30, 30], [558, 558])
# Discrete reconstruction space
# Astra cannot handle axis aligned origin_to_det unless it is aligned
# with the third coordinate axis. See issue #18 at ASTRA's github.
# This is fixed in new versions of astra, with older versions, this could
# give a zero result.
geometry = odl.tomo.Parallel3dSingleAxisGeometry(angle_partition,
detector_partition)
# X-ray transform aka forward projection. We use ASTRA CUDA backend.
xray_trafo = odl.tomo.XrayTransform(reco_space, geometry, impl='astra_cuda')
# Create a discrete Shepp-Logan phantom (modified version)
phantom = odl.util.phantom.shepp_logan(reco_space, True)
# Create projection data by calling the ray transform on the phantom
proj_data = xray_trafo(phantom)
# Back-projection can be done by simply calling the adjoint operator on the
# projection data (or any element in the projection space).
backproj = xray_trafo.adjoint(proj_data)
# Shows a slice of the phantom, projections, and reconstruction
phantom.show(indices=np.s_[:, :, 150], title='Phantom, middle z slice')
proj_data.show(indices=np.s_[0, :, :], title='Projection 0')
proj_data.show(indices=np.s_[90, :, :], title='Projection 90')
backproj.show(indices=np.s_[:, :, 150],
title='back-projection, middle z slice')
|
Python
| 0.00002
|
@@ -1841,14 +1841,8 @@
el3d
-Single
Axis
@@ -1866,16 +1866,36 @@
rtition,
+ detector_partition,
%0A
@@ -1934,32 +1934,22 @@
- detector_partition
+axis=%5B0, 1, 0%5D
)%0A%0A#
@@ -2846,9 +2846,8 @@
slice')%0A
-%0A
|
5a2b175be78e5e97fd381a84d08cdeb5796513c2
|
Fix detecting cudatoolkit on win32
|
numba/cuda/cuda_paths.py
|
numba/cuda/cuda_paths.py
|
import sys
import re
import os
from collections import defaultdict, namedtuple
from numba.config import IS_WIN32
from numba.findlib import find_lib, find_file
from numba.cuda.envvars import get_numbapro_envvar
_env_path_tuple = namedtuple('_env_path_tuple', ['by', 'info'])
def _find_valid_path(options):
"""Find valid path from *options*, which is a list of 2-tuple of
(name, path). Return first pair where *path* is not None.
If no valid path is found, return ('<unavailable>', None)
"""
for by, data in options:
if data is not None:
return by, data
else:
return '<unavailable>', None
def _get_libdevice_path_decision():
options = [
('NUMBAPRO_LIBDEVICE', get_numbapro_envvar('NUMBAPRO_LIBDEVICE')),
('NUMBAPRO_CUDALIB', get_numbapro_envvar('NUMBAPRO_CUDALIB')),
('Conda environment', get_conda_ctk()),
('CUDA_HOME', get_cuda_home('nvvm', 'libdevice')),
('System', get_system_ctk('nvvm', 'libdevice')),
]
by, libdir = _find_valid_path(options)
return by, libdir
def _nvvm_lib_dir():
if IS_WIN32:
return 'nvvm', 'bin'
else:
return 'nvvm', 'lib'
def _get_nvvm_path_decision():
options = [
('NUMBAPRO_NVVM', get_numbapro_envvar('NUMBAPRO_NVVM')),
('NUMBAPRO_CUDALIB', get_numbapro_envvar('NUMBAPRO_CUDALIB')),
('Conda environment', get_conda_ctk()),
('CUDA_HOME', get_cuda_home(*_nvvm_lib_dir())),
('System', get_system_ctk(*_nvvm_lib_dir())),
]
by, libdir = _find_valid_path(options)
return by, libdir
def _get_libdevice_paths():
by, libdir = _get_libdevice_path_decision()
# Search for pattern
pat = r'libdevice(\.(?P<arch>compute_\d+))?(\.\d+)*\.bc$'
candidates = find_file(re.compile(pat), libdir)
# Grouping
out = defaultdict(list)
for path in candidates:
m = re.search(pat, path)
arch = m.group('arch')
out[arch].append(path)
# Keep only the max (most recent version) of the bitcode files.
out = {k: max(v) for k, v in out.items()}
return _env_path_tuple(by, out)
def _cudalib_path():
return 'bin' if IS_WIN32 else 'lib'
def _get_cudalib_dir_path_decision():
options = [
('NUMBAPRO_CUDALIB', get_numbapro_envvar('NUMBAPRO_CUDALIB')),
('Conda environment', get_conda_ctk()),
('CUDA_HOME', get_cuda_home(_cudalib_path())),
('System', get_system_ctk(_cudalib_path())),
]
by, libdir = _find_valid_path(options)
return by, libdir
def _get_cudalib_dir():
by, libdir = _get_cudalib_dir_path_decision()
return _env_path_tuple(by, libdir)
def get_system_ctk(*subdirs):
"""Return path to system-wide cudatoolkit; or, None if it doesn't exist.
"""
# Linux?
if sys.platform.startswith('linux'):
# Is cuda alias to /usr/local/cuda?
# We are intentionally not getting versioned cuda installation.
base = '/usr/local/cuda'
if os.path.exists(base):
return os.path.join(base, *subdirs)
def get_conda_ctk():
"""Return path to directory containing the shared libraries of cudatoolkit.
"""
is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
if not is_conda_env:
return
# Asssume the existence of NVVM to imply cudatoolkit installed
paths = find_lib('nvvm')
if not paths:
return
return os.path.join(sys.prefix, 'lib')
def get_cuda_home(*subdirs):
"""Get paths of CUDA_HOME.
If *subdirs* are the subdirectory name to be appended in the resulting
path.
"""
cuda_home = os.environ.get('CUDA_HOME')
if cuda_home is not None:
return os.path.join(cuda_home, *subdirs)
def _get_nvvm_path():
by, libdir = _get_nvvm_path_decision()
candidates = find_lib('nvvm', libdir)
path = max(candidates) if candidates else None
return _env_path_tuple(by, path)
def get_cuda_paths():
"""Returns a dictionary mapping component names to a 2-tuple
of (source_variable, info).
The returned dictionary will have the following keys and infos:
- "nvvm": file_path
- "libdevice": List[Tuple[arch, file_path]]
- "cudalib_dir": directory_path
Note: The result of the function is cached.
"""
# Check cache
if hasattr(get_cuda_paths, '_cached_result'):
return get_cuda_paths._cached_result
else:
# Not in cache
d = {
'nvvm': _get_nvvm_path(),
'libdevice': _get_libdevice_paths(),
'cudalib_dir': _get_cudalib_dir(),
}
# Cache result
get_cuda_paths._cached_result = d
return d
|
Python
| 0.000001
|
@@ -3423,45 +3423,86 @@
-return os.path.join(sys.prefix, 'lib'
+# Use the directory name of the max path%0A return os.path.dirname(max(paths)
)%0A%0A%0A
|
e76777897bed5b9396d126e384555ea230b35784
|
Use StaticFileStorage to determine source directories
|
sass_processor/apps.py
|
sass_processor/apps.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.apps import apps, AppConfig
APPS_INCLUDE_DIRS = []
class SassProcessorConfig(AppConfig):
name = 'sass_processor'
verbose_name = "Sass Processor"
_static_dir = 'static'
_sass_exts = ('.scss', '.sass')
def ready(self):
app_configs = apps.get_app_configs()
for app_config in app_configs:
static_dir = os.path.join(app_config.path, self._static_dir)
if os.path.isdir(static_dir):
self.traverse_tree(static_dir)
print(APPS_INCLUDE_DIRS)
@classmethod
def traverse_tree(cls, static_dir):
"""traverse the static folders an look for at least one file ending in .scss/.sass"""
for root, dirs, files in os.walk(static_dir):
for filename in files:
basename, ext = os.path.splitext(filename)
if basename.startswith('_') and ext in cls._sass_exts:
APPS_INCLUDE_DIRS.append(static_dir)
return
|
Python
| 0.000001
|
@@ -108,16 +108,105 @@
pConfig%0A
+from django.conf import settings%0Afrom django.core.files.storage import get_storage_class%0A
%0A%0AAPPS_I
@@ -329,35 +329,8 @@
or%22%0A
- _static_dir = 'static'%0A
@@ -360,16 +360,93 @@
'.sass')
+%0A _storage = get_storage_class(import_path=settings.STATICFILES_STORAGE)()
%0A%0A de
@@ -607,24 +607,49 @@
self._st
-atic_dir
+orage.base_url.strip(os.path.sep)
)%0A
@@ -736,42 +736,8 @@
r)%0A%0A
- print(APPS_INCLUDE_DIRS)%0A%0A
|
25f12dacbd2d447ee2340aa0e18da4569bcc319e
|
disable libunwind on windows
|
scripts/pipeline_main.py
|
scripts/pipeline_main.py
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script runs in checked out llvm-project directory.
import os
from steps import generic_linux, generic_windows, from_shell_output, extend_steps_env, bazel
from sync_fork import sync_fork
import git
import yaml
steps_generators = [
'${BUILDKITE_BUILD_CHECKOUT_PATH}/libcxx/utils/ci/buildkite-pipeline-snapshot.sh',
]
if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "main")
no_cache = os.getenv('ph_no_cache') is not None
log_level = os.getenv('ph_log_level', 'WARNING')
notify_emails = list(filter(None, os.getenv('ph_notify_emails', '').split(',')))
# Syncing LLVM fork so any pipelines started from upstream llvm-project#
# but then triggered a build on fork will observe the commit.
sync_fork(os.path.join(os.getenv('BUILDKITE_BUILD_PATH'), 'llvm-project-fork'), [os.getenv('BUILDKITE_BRANCH'), 'main'])
steps = []
env = {}
for e in os.environ:
if e.startswith('ph_'):
env[e] = os.getenv(e)
repo = git.Repo('.')
steps.extend(generic_linux(
os.getenv('ph_projects', 'llvm;clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;openmp;polly;flang'),
False))
# FIXME: openmp is removed as it constantly fails.
# TODO: Make this project list be evaluated through "choose_projects"(? as now we define "all" and exclusions in
# two placess).
steps.extend(generic_windows(
os.getenv('ph_projects', 'llvm;clang;clang-tools-extra;libc;libcxx;libcxxabi;lld;libunwind;mlir;polly;flang')))
steps.extend(bazel([], force=True))
if os.getenv('ph_skip_generated') is None:
e = os.environ.copy()
# BUILDKITE_COMMIT might be an alias, e.g. "HEAD". Resolve it to make the build hermetic.
if ('BUILDKITE_COMMIT' not in env) or (env['BUILDKITE_COMMIT'] == "HEAD"):
env['BUILDKITE_COMMIT'] = repo.head.commit.hexsha
for gen in steps_generators:
steps.extend(from_shell_output(gen, env=e))
notify = []
for e in notify_emails:
notify.append({'email': e})
extend_steps_env(steps, env)
print(yaml.dump({'steps': steps, 'notify': notify}))
|
Python
| 0
|
@@ -2098,34 +2098,24 @@
bcxxabi;lld;
-libunwind;
mlir;polly;f
|
49b102159e47f0865f5a8790d341987b664cadf0
|
Add CLI test
|
numba/tests/test_help.py
|
numba/tests/test_help.py
|
from __future__ import print_function
import types as pytypes
import numpy as np
from numba.six.moves import builtins
from numba import types
from .support import TestCase
from numba.help.inspector import inspect_function, inspect_module
class TestInspector(TestCase):
def check_function_descriptor(self, info, must_be_defined=False):
self.assertIsInstance(info, dict)
self.assertIn('numba_type', info)
numba_type = info['numba_type']
if numba_type is None:
self.assertFalse(must_be_defined)
else:
self.assertIsInstance(numba_type, types.Type)
self.assertIn('explained', info)
self.assertIsInstance(info['explained'], str)
self.assertIn('source_infos', info)
self.assertIsInstance(info['source_infos'], dict)
def test_inspect_function_on_range(self):
info = inspect_function(range)
self.check_function_descriptor(info, must_be_defined=True)
def test_inspect_function_on_np_all(self):
info = inspect_function(np.all)
self.check_function_descriptor(info, must_be_defined=True)
source_infos = info['source_infos']
self.assertGreater(len(source_infos), 0)
c = 0
for srcinfo in source_infos.values():
self.assertIsInstance(srcinfo['kind'], str)
self.assertIsInstance(srcinfo['name'], str)
self.assertIsInstance(srcinfo['sig'], str)
self.assertIsInstance(srcinfo['filename'], str)
self.assertIsInstance(srcinfo['lines'], tuple)
self.assertIn('docstring', srcinfo)
c += 1
self.assertEqual(c, len(source_infos))
def test_inspect_module(self):
c = 0
for it in inspect_module(builtins):
self.assertIsInstance(it['module'], pytypes.ModuleType)
self.assertIsInstance(it['name'], str)
self.assertTrue(callable(it['obj']))
self.check_function_descriptor(it)
c += 1
self.assertGreater(c, 0)
|
Python
| 0
|
@@ -32,16 +32,45 @@
nction%0A%0A
+import sys%0Aimport subprocess%0A
import t
@@ -84,16 +84,31 @@
pytypes
+%0Aimport os.path
%0A%0Aimport
@@ -211,16 +211,32 @@
TestCase
+, temp_directory
%0Afrom nu
@@ -2101,12 +2101,1084 @@
eater(c, 0)%0A
+%0A def test_inspect_cli(self):%0A # Try CLI on math module%0A cmdbase = %5Bsys.executable, '-m', 'numba.help.inspector'%5D%0A%0A # Try default format %22html%22%0A dirpath = temp_directory('%7B%7D.%7B%7D'.format(__name__,%0A self.__class__.__name__))%0A filename = os.path.join(dirpath, 'out')%0A expected_file = filename + '.html'%0A cmds = cmdbase + %5B'--file', filename, 'math'%5D%0A # File shouldn't exist yet%0A self.assertFalse(os.path.isfile(expected_file))%0A # Run CLI%0A subprocess.check_output(cmds)%0A # File should exist now%0A self.assertTrue(os.path.isfile(expected_file))%0A%0A # Try changing the format to %22rst%22%0A cmds = cmdbase + %5B'--file', filename, '--format', 'rst', 'math'%5D%0A expected_file = filename + '.rst'%0A # File shouldn't exist yet%0A self.assertFalse(os.path.isfile(expected_file))%0A # Run CLI%0A subprocess.check_output(cmds)%0A # File should exist now%0A self.assertTrue(os.path.isfile(expected_file))%0A
|
81622074d2d7544b897cec196257b130904f06b7
|
Comment about JSON
|
firefox/src/py/extensionconnection.py
|
firefox/src/py/extensionconnection.py
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Communication with the firefox extension."""
import logging
import socket
import time
try:
import json
except ImportError: # Python < 2.6
import simplejson as json
# FIXME: What is this?
if not hasattr(json, 'dumps'):
import simplejson as json
from selenium.remote.command import Command
from selenium.remote.remote_connection import RemoteConnection
_DEFAULT_TIMEOUT = 20
_DEFAULT_PORT = 7055
LOGGER = logging.getLogger("webdriver.ExtensionConnection")
class ExtensionConnection(RemoteConnection):
"""This class maintains a connection to the firefox extension.
"""
def __init__(self, timeout=_DEFAULT_TIMEOUT):
RemoteConnection.__init__(
self, "http://localhost:%d/hub" % _DEFAULT_PORT)
LOGGER.debug("extension connection initiated")
self.timeout = timeout
def quit(self, sessionId=None):
self.execute(Command.QUIT, {'sessionId':sessionId})
while self.is_connectable():
logging.info("waiting to quit")
time.sleep(1)
def connect(self):
"""Connects to the extension and retrieves the session id."""
return self.execute(Command.NEW_SESSION, {'desiredCapabilities':{
'browserName': 'firefox',
'platform': 'ANY',
'version': '',
'javascriptEnabled': True}})
def connect_and_quit(self):
"""Connects to an running browser and quit immediately."""
self._request('%s/extensions/firefox/quit' % self._url)
def is_connectable(self):
"""Trys to connect to the extension but do not retrieve context."""
try:
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.settimeout(1)
socket_.connect(("localhost", _DEFAULT_PORT))
socket_.close()
return True
except socket.error:
return False
class ExtensionConnectionError(Exception):
"""An internal error occurred int the extension.
Might be caused by bad input or bugs in webdriver
"""
pass
|
Python
| 0.000001
|
@@ -800,28 +800,80 @@
%0A%0A#
-FIXME: What is this?
+Some old JSON libraries don't have %22dumps%22, make sure we have a good one
%0Aif
|
b667bb6b463c8049fcc67d54d02ffbba2094823f
|
Fix test
|
numba/tests/test_help.py
|
numba/tests/test_help.py
|
from __future__ import print_function
import sys
import subprocess
import types as pytypes
import os.path
import numpy as np
from numba.six.moves import builtins
from numba import types, utils
from .support import TestCase, temp_directory
from numba.help.inspector import inspect_function, inspect_module
class TestInspector(TestCase):
def check_function_descriptor(self, info, must_be_defined=False):
self.assertIsInstance(info, dict)
self.assertIn('numba_type', info)
numba_type = info['numba_type']
if numba_type is None:
self.assertFalse(must_be_defined)
else:
self.assertIsInstance(numba_type, types.Type)
self.assertIn('explained', info)
self.assertIsInstance(info['explained'], str)
self.assertIn('source_infos', info)
self.assertIsInstance(info['source_infos'], dict)
def test_inspect_function_on_range(self):
info = inspect_function(range)
self.check_function_descriptor(info, must_be_defined=True)
def test_inspect_function_on_np_all(self):
info = inspect_function(np.all)
self.check_function_descriptor(info, must_be_defined=True)
source_infos = info['source_infos']
self.assertGreater(len(source_infos), 0)
c = 0
for srcinfo in source_infos.values():
self.assertIsInstance(srcinfo['kind'], str)
self.assertIsInstance(srcinfo['name'], str)
self.assertIsInstance(srcinfo['sig'], str)
self.assertIsInstance(srcinfo['filename'], str)
self.assertIsInstance(srcinfo['lines'], tuple)
self.assertIn('docstring', srcinfo)
c += 1
self.assertEqual(c, len(source_infos))
def test_inspect_module(self):
c = 0
for it in inspect_module(builtins):
self.assertIsInstance(it['module'], pytypes.ModuleType)
self.assertIsInstance(it['name'], str)
self.assertTrue(callable(it['obj']))
self.check_function_descriptor(it)
c += 1
self.assertGreater(c, 0)
def test_inspect_cli(self):
# Try CLI on math module
cmdbase = [sys.executable, '-m', 'numba.help.inspector']
dirpath = temp_directory('{}.{}'.format(__name__,
self.__class__.__name__))
filename = os.path.join(dirpath, 'out')
# Try default format "html"
if utils.IS_PY3:
expected_file = filename + '.html'
cmds = cmdbase + ['--file', filename, 'math']
# File shouldn't exist yet
self.assertFalse(os.path.isfile(expected_file))
# Run CLI
subprocess.check_output(cmds)
# File should exist now
self.assertTrue(os.path.isfile(expected_file))
# Try changing the format to "rst"
cmds = cmdbase + ['--file', filename, '--format', 'rst', 'math']
expected_file = filename + '.rst'
# File shouldn't exist yet
self.assertFalse(os.path.isfile(expected_file))
# Run CLI
subprocess.check_output(cmds)
# File should exist now
self.assertTrue(os.path.isfile(expected_file))
# Try unsupported format
cmds = cmdbase + ['--file', filename, '--format', 'foo', 'math']
# Run CLI
with self.assertRaises(subprocess.CalledProcessError) as raises:
subprocess.check_output(cmds, stderr=subprocess.STDOUT)
if utils.IS_PY3:
# No .stdout in CalledProcessError in python<3
self.assertIn("foo is not supported", str(raises.exception.stdout))
|
Python
| 0.000004
|
@@ -3619,19 +3619,23 @@
sertIn(%22
+%5C'
foo
+%5C'
is not
@@ -3649,13 +3649,35 @@
ed%22,
- str(
+%0A
rais
@@ -3695,11 +3695,26 @@
n.stdout
+.decode('ascii'
))%0A
|
b6b514d385e8e18d03b939cf5fae9873c9f02a21
|
add constraint for price_list_ite
|
netforce_product/netforce_product/models/price_list_item.py
|
netforce_product/netforce_product/models/price_list_item.py
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
class PriceListItem(Model):
_name = "price.list.item"
_string = "Price List Item"
_key = ["list_id","product_id","price"]
_fields = {
"list_id": fields.Many2One("price.list", "Price List", required=True, on_delete="cascade", search=True),
"type": fields.Selection([["sale", "Sales"], ["purchase", "Purchasing"]], "Type", function="_get_related", function_context={"path": "list_id.type"}, search=True),
"currency_id": fields.Many2One("currency", "Currency", function="_get_related", function_context={"path": "list_id.currency_id"}, search=True),
"product_id": fields.Many2One("product", "Product", required=True, search=True, on_delete="cascade"),
"price": fields.Decimal("Price", required=True, scale=6),
"discount_percent": fields.Decimal("Discount %"),
"min_qty": fields.Decimal("Min Qty"),
"max_qty": fields.Decimal("Max Qty"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"discount_text": fields.Char("Discount Text"),
}
PriceListItem.register()
|
Python
| 0.000005
|
@@ -2190,16 +2190,158 @@
%0A %7D%0A%0A
+ _sql_constraints = %5B%0A (%22key_uniq%22, %22unique (list_id,product_id,price)%22, %22The price list, product and type must be unique!%22)%0A %5D%0A%0A
PriceLis
|
699524032f9bbcae410637f66b762fb21b92d796
|
Use Crypto's pad function
|
CanvasSync/settings/cryptography.py
|
CanvasSync/settings/cryptography.py
|
"""
CanvasSync by Mathias Perslev
February 2017
--------------------------------------------
cryptography.py, module
Functions used to encrypt and decrypt the settings stored in the .CanvasSync.settings file. When the user has specified
settings the string of information is encrypted using the AES 256 module of the PyCrypto library. A password is
specified by the user upon creation of the settings file. A hashed (thus unreadable) version of the password is stored
locally in the .ps.sync file in the home folder of the user. Upon launch of CanvasSync, the user must specify
a password that matches the one stored in the hashed version. If the password is correct the the settings file is
decrypted and parsed for settings.
"""
# Future imports
from __future__ import print_function
# Inbuilt modules
import getpass
import os.path
import sys
# Third party modules
import bcrypt
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
def get_key_hash(password):
""" Get a 256 byte SHA hash from any length password """
hasher = SHA256.new(password.encode(u"utf-8"))
return hasher.digest()
def encrypt(message):
"""
Encrypts a string using AES-256 (CBC) encryption
A random initialization vector (IV) is padded as the initial 16 bytes of the string
The encrypted message will be padded to length%16 = 0 bytes (AES needs 16 bytes block sizes)
"""
print(u"\nPlease enter a password to encrypt the settings file:")
hashed_password = bcrypt.hashpw(getpass.getpass(), bcrypt.gensalt())
with open(os.path.expanduser(u"~") + u"/.CanvasSync.pw", "w") as pass_file:
pass_file.write(hashed_password)
# Generate random 16 bytes IV
IV = os.urandom(16)
# AES object
encrypter = AES.new(get_key_hash(hashed_password), AES.MODE_CBC, IV)
# Padding to 16 bytes
if len(message) % 16 != 0:
message += " " * (16 - (len(message) % 16))
# Add the unencrypted IV to the beginning of the encrypted_message
encrypted_message = IV + encrypter.encrypt(message.encode("utf-8"))
return encrypted_message
def decrypt(message, password):
"""
Decrypts an AES encrypted string
"""
# Load the locally stored bcrypt hashed password (answer)
path = os.path.expanduser(u"~") + u"/.CanvasSync.pw"
if not os.path.exists(path):
return False
with open(path, "r") as pw_file:
hashed_password = pw_file.read()
# Get password from user and compare to answer
valid_password = False
# If the password isn't null then it was specified as a command-line argument
if password:
if bcrypt.hashpw(password, hashed_password) != hashed_password:
print(u"\n[ERROR] Invalid password. Please try again or invoke CanvasSync with the -s flag to reset settings.")
sys.exit()
else:
# Otherwise, get the password from the user
while not valid_password:
print(u"\nPlease enter password to decrypt the settings file:")
password = getpass.getpass()
if bcrypt.hashpw(password, hashed_password) == hashed_password:
valid_password = True
else:
print(u"\n[ERROR] Invalid password. Please try again or invoke CanvasSync with the -s flag to reset settings.")
# Read the remote IV
remoteIV = message[:16]
# Decrypt message using the correct password
decrypter = AES.new(get_key_hash(hashed_password), AES.MODE_CBC, remoteIV)
decrypted_message = decrypter.decrypt(message[16:])
return decrypted_message.rstrip()
|
Python
| 0.000015
|
@@ -941,16 +941,52 @@
SHA256%0A
+from Crypto.Util.Padding import pad%0A
%0A%0Adef ge
@@ -1844,118 +1844,8 @@
V)%0A%0A
- # Padding to 16 bytes%0A if len(message) %25 16 != 0:%0A message += %22 %22 * (16 - (len(message) %25 16))%0A%0A
@@ -1954,24 +1954,28 @@
ter.encrypt(
+pad(
message.enco
@@ -1985,16 +1985,21 @@
%22utf-8%22)
+, 16)
)%0A%0A r
|
47a83c283147a43b6b543e2305d31fae5fd15ac3
|
Add docker environment variable consumption
|
docker/settings_import.py
|
docker/settings_import.py
|
#!/usr/bin/python
from os import getenv
import locale
from system_settings import *
# Read the DEBUG setting from env var
try:
if getenv('DOCKER_SAL_DEBUG').lower() == 'true':
DEBUG = True
else:
DEBUG = False
except:
DEBUG = False
# Read the Brute force protection setting from env var
try:
if getenv('DOCKER_SAL_BRUTE_PROTECT').lower() == 'true':
BRUTE_PROTECT = True
else:
BRUTE_PROTECT = False
except:
BRUTE_PROTECT = False
# Read the Brute force protection timeout setting from env var
try:
BRUTE_COOLOFF = int(getenv('DOCKER_SAL_BRUTE_COOLOFF'))
except:
BRUTE_COOLOFF = 3
# Read the BASIC_AUTH setting from env var
try:
if getenv('DOCKER_SAL_BASIC_AUTH').lower() == 'true':
DEBUG = True
else:
DEBUG = False
except:
DEBUG = True
# Read the Brute force protection limit setting from env var
try:
BRUTE_LIMIT = int(getenv('DOCKER_SAL_BRUTE_LIMIT'))
except:
BRUTE_LIMIT = 3
# Read list of admins from $DOCKER_SAL_ADMINS env var
admin_list = []
if getenv('DOCKER_SAL_ADMINS'):
admins_var = getenv('DOCKER_SAL_ADMINS')
if ',' in admins_var and ':' in admins_var:
for admin in admins_var.split(':'):
admin_list.append(tuple(admin.split(',')))
ADMINS = tuple(admin_list)
elif ',' in admins_var:
admin_list.append(tuple(admins_var.split(',')))
ADMINS = tuple(admin_list)
else:
ADMINS = []
# Read the preferred time zone from $DOCKER_SAL_TZ, use system locale or
# set to 'America/New_York' if neither are set
if getenv('DOCKER_SAL_TZ'):
if '/' in getenv('DOCKER_SAL_TZ'):
TIME_ZONE = getenv('DOCKER_SAL_TZ')
else: TIME_ZONE = 'Europe/London'
# elif getenv('TZ'):
# TIME_ZONE = getenv('TZ')
# else:
# TIME_ZONE = 'America/New_York'
# Read the preferred language code from $DOCKER_SAL_LANG, use system locale or
# set to 'en_US' if neither are set
if getenv('DOCKER_SAL_LANG'):
if '_' in getenv('DOCKER_SAL_LANG'):
LANGUAGE_CODE = getenv('DOCKER_SAL_LANG')
else:
LANGUAGE_CODE = 'en_US'
# elif locale.getdefaultlocale():
# LANGUAGE_CODE = locale.getdefaultlocale()[0]
else:
LANGUAGE_CODE = 'en_US'
# Read the list of allowed hosts from the $DOCKER_SAL_ALLOWED env var, or
# allow all hosts if none was set.
if getenv('DOCKER_SAL_ALLOWED'):
ALLOWED_HOSTS = getenv('DOCKER_SAL_ALLOWED').split(',')
else:
ALLOWED_HOSTS = ['*']
# Set the display name from the $DOCKER_SAL_DISPLAY_NAME env var, or
# use the default
if getenv('DOCKER_SAL_DISPLAY_NAME'):
DISPLAY_NAME = getenv('DOCKER_SAL_DISPLAY_NAME')
else:
DISPLAY_NAME = 'Sal'
# Set the default machine group key from the $DOCKER_SAL_DEFAULT_MACHINE_GROUP_KEY env var, or
# use the default (unassigned)
if getenv('DOCKER_SAL_DEFAULT_MACHINE_GROUP_KEY'):
DEFAULT_MACHINE_GROUP_KEY = getenv('DOCKER_SAL_DEFAULT_MACHINE_GROUP_KEY')
else:
DEFAULT_MACHINE_GROUP_KEY = None
try:
if getenv('DOCKER_SAL_ADD_TO_ALL_BUSINESS_UNITS').lower() == 'true':
ADD_TO_ALL_BUSINESS_UNITS = True
else:
ADD_TO_ALL_BUSINESS_UNITS = False
except:
ADD_TO_ALL_BUSINESS_UNITS = False
if BRUTE_PROTECT == True:
###############
# Max number of login attemts within the ``AXES_COOLOFF_TIME``
AXES_LOGIN_FAILURE_LIMIT = BRUTE_LIMIT
AXES_COOLOFF_TIME=BRUTE_COOLOFF
|
Python
| 0.000001
|
@@ -3365,8 +3365,223 @@
COOLOFF%0A
+%0A%0A# Read the SSH_ACCOUNT setting from env var%0Atry:%0A if getenv('DOCKER_SAL_SSH_ACCOUNT'):%0A SSH_ACCOUNT = getenv('DOCKER_SAL_SSH_ACCOUNT')%0A else:%0A SSH_ACCOUNT = None%0Aexcept:%0A SSH_ACCOUNT = None%0A
|
577c0bff1e7333fe0f0fd5e45ce7c7cf19710605
|
Fix migration [WAL-904]
|
nodeconductor/structure/migrations/0052_customer_subnets.py
|
nodeconductor/structure/migrations/0052_customer_subnets.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-07 08:52
from __future__ import unicode_literals
from django.db import migrations, models
import nodeconductor.core.validators
class Migration(migrations.Migration):
dependencies = [
('structure', '0051_add_customer_email_phone_agreement_number'),
]
operations = [
migrations.AddField(
model_name='customer',
name='access_subnets',
field=models.TextField(blank=True, default='', validators=[nodeconductor.core.validators.validate_cidr_list], help_text='Enter a comma separated list of IPv4 or IPv6 subnets from which connection to self-service is allowed.'),
),
]
|
Python
| 0
|
@@ -624,22 +624,29 @@
or IPv6
-subnet
+CIDR addresse
s from w
|
324c8572ae0207774a2e959b435525979ead2ebe
|
allow partial-dollar donations
|
batch.py
|
batch.py
|
from datetime import datetime, timedelta
import json
import redis
import celery
from emails import send_email
from pytz import timezone
import requests
import stripe
from salesforce import SalesforceConnection
from config import STRIPE_KEYS
from config import ACCOUNTING_MAIL_RECIPIENT
from config import TIMEZONE
from config import REDIS_URL
zone = timezone(TIMEZONE)
stripe.api_key = STRIPE_KEYS['secret_key']
class Log(object):
"""
This encapulates sending to the console/stdout and email all in one.
"""
def __init__(self):
self.log = list()
def it(self, string):
"""
Add something to the log.
"""
print(string)
self.log.append(string)
def send(self):
"""
Send the assembled log out as an email.
"""
body = '\n'.join(self.log)
recipient = ACCOUNTING_MAIL_RECIPIENT
subject = 'Batch run'
send_email(body=body, recipient=recipient, subject=subject)
def amount_to_charge(entry):
"""
Determine the amount to charge. This depends on whether the payer agreed
to pay fees or not. If they did then we add that to the amount charged.
Stripe charges 2.9% + $0.30.
Stripe wants the amount to charge in cents. So we multiply by 100 and
return that.
"""
amount = int(entry['Amount'])
if entry['Stripe_Agreed_to_pay_fees__c']:
fees = amount * .029 + .30
else:
fees = 0
total = amount + fees
total_in_cents = total * 100
return int(total_in_cents)
def process_charges(query, log):
sf = SalesforceConnection()
response = sf.query(query)
# TODO: check response code
log.it('Found {} opportunities available to process.'.format(
len(response)))
for item in response:
amount = amount_to_charge(item)
try:
log.it('---- Charging ${} to {} ({})'.format(amount / 100,
item['Stripe_Customer_ID__c'],
item['Name']))
charge = stripe.Charge.create(
customer=item['Stripe_Customer_ID__c'],
amount=amount,
currency='usd',
description=item['Description'],
)
except stripe.error.CardError as e:
# look for decline code:
error = e.json_body['error']
log.it('The card has been declined:')
log.it('\tStatus: {}'.format(e.http_status))
log.it('\tType: {}'.format(error.get('type', '')))
log.it('\tCode: {}'.format(error.get('code', '')))
log.it('\tParam: {}'.format(error.get('param', '')))
log.it('\tMessage: {}'.format(error.get('message', '')))
log.it('\tDecline code: {}'.format(error.get('decline_code', '')))
continue
except stripe.error.InvalidRequestError as e:
log.it('Problem: {}'.format(e))
continue
except Exception as e:
log.it('Problem: {}'.format(e))
continue
if charge.status != 'succeeded':
log.it('Charge failed. Check Stripe logs.')
continue
update = {
'Stripe_Transaction_Id__c': charge.id,
'Stripe_Card__c': charge.source.id,
'StageName': 'Closed Won',
}
path = item['attributes']['url']
sf.patch(path=path, data=update)
log.it('ok')
class AlreadyExecuting(Exception):
"""
Here to show when more than one job of the same type is running.
"""
pass
class Lock(object):
"""
Claim an exclusive lock. Using Redis.
"""
def __init__(self, key):
self.key = key
self.connection = redis.from_url(REDIS_URL)
def acquire(self):
if self.connection.get(self.key):
raise AlreadyExecuting
self.connection.setex(name=self.key, value='bar', time=1200)
def release(self):
self.connection.delete(self.key)
@celery.task()
def charge_cards():
lock = Lock(key='charge-cards-lock')
lock.acquire()
log = Log()
log.it('---Starting batch job...')
three_days_ago = (datetime.now(tz=zone) - timedelta(
days=3)).strftime('%Y-%m-%d')
today = datetime.now(tz=zone).strftime('%Y-%m-%d')
# regular (non Circle) pledges:
log.it('---Processing regular charges...')
query = """
SELECT Amount, Name, Stripe_Customer_Id__c, Description,
Stripe_Agreed_to_pay_fees__c
FROM Opportunity
WHERE CloseDate <= {}
AND CloseDate >= {}
AND StageName = 'Pledged'
AND Stripe_Customer_Id__c != ''
AND Type != 'Giving Circle'
""".format(today, three_days_ago)
process_charges(query, log)
#
# Circle transactions are different from the others. The Close Dates for a
# given Circle donation are all identical. That's so that the gift can be
# recognized all at once on the donor wall. So we use another field to
# determine when the card is actually charged: Expected_Giving_Date__c.
# So we process charges separately for Circles.
#
log.it('---Processing Circle charges...')
query = """
SELECT Amount, Name, Stripe_Customer_Id__c, Description,
Stripe_Agreed_to_pay_fees__c
FROM Opportunity
WHERE Expected_Giving_Date__c <= {}
AND Expected_Giving_Date__c >= {}
AND StageName = 'Pledged'
AND Stripe_Customer_Id__c != ''
AND Type = 'Giving Circle'
""".format(today, three_days_ago)
process_charges(query, log)
log.send()
lock.release()
if __name__ == '__main__':
charge_cards()
|
Python
| 0.000001
|
@@ -1319,18 +1319,20 @@
mount =
-in
+floa
t(entry%5B
|
f9a02492ca8f902ca349e60ce42dee4cadbd35c0
|
Make run under Python 2.4.
|
include/HFacer.py
|
include/HFacer.py
|
# HFacer.py - regenerate the Scintilla.h and SciLexer.h files from the Scintilla.iface interface
# definition file.
# The header files are copied to a temporary file apart from the section between a //++Autogenerated
# comment and a //--Autogenerated comment which is generated by the printHFile and printLexHFile
# functions. After the temporary file is created, it is copied back to the original file name.
import string
import sys
import os
import Face
def Contains(s,sub):
return string.find(s, sub) != -1
def printLexHFile(f,out):
for name in f.order:
v = f.features[name]
if v["FeatureType"] in ["val"]:
if Contains(name, "SCE_") or Contains(name, "SCLEX_"):
out.write("#define " + name + " " + v["Value"] + "\n")
def printHFile(f,out):
for name in f.order:
v = f.features[name]
if v["Category"] != "Deprecated":
if v["FeatureType"] in ["fun", "get", "set"]:
featureDefineName = "SCI_" + string.upper(name)
out.write("#define " + featureDefineName + " " + v["Value"] + "\n")
elif v["FeatureType"] in ["evt"]:
featureDefineName = "SCN_" + string.upper(name)
out.write("#define " + featureDefineName + " " + v["Value"] + "\n")
elif v["FeatureType"] in ["val"]:
if not (Contains(name, "SCE_") or Contains(name, "SCLEX_")):
out.write("#define " + name + " " + v["Value"] + "\n")
def CopyWithInsertion(input, output, genfn, definition):
copying = 1
for line in input.readlines():
if copying:
output.write(line)
if Contains(line, "//++Autogenerated"):
copying = 0
genfn(definition, output)
if Contains(line, "//--Autogenerated"):
copying = 1
output.write(line)
def contents(filename):
f = file(filename)
t = f.read()
f.close()
return t
def Regenerate(filename, genfn, definition):
inText = contents(filename)
tempname = "HFacer.tmp"
out = open(tempname,"w")
hfile = open(filename)
CopyWithInsertion(hfile, out, genfn, definition)
out.close()
hfile.close()
outText = contents(tempname)
if inText == outText:
os.unlink(tempname)
else:
os.unlink(filename)
os.rename(tempname, filename)
f = Face.Face()
try:
f.ReadFromFile("Scintilla.iface")
Regenerate("Scintilla.h", printHFile, f)
Regenerate("SciLexer.h", printLexHFile, f)
print "Maximum ID is", max(x for x in f.values if int(x) < 3000)
except:
raise
|
Python
| 0.000001
|
@@ -2249,16 +2249,17 @@
s%22, max(
+%5B
x for x
@@ -2286,16 +2286,17 @@
) %3C 3000
+%5D
)%0Aexcept
|
dea7ffe79e674315ff9b1f69f44b3c8b725697a0
|
use str instead of basestring
|
corehq/apps/hqadmin/management/commands/make_supervisor_conf.py
|
corehq/apps/hqadmin/management/commands/make_supervisor_conf.py
|
import json
import os
import sys
from django.core.management.base import BaseCommand
from django.conf import settings
from django.template import Context, Template
def parse_params(option, opt, value, parser):
try:
args_dict = json.loads(value)
except ValueError:
print "argument error, %s should be valid JSON" % value
setattr(parser.values, option.dest, args_dict)
class SupervisorConfCommand(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--conf_file',
help='Config template file to use',
default=False,
)
parser.add_argument(
'--conf_destination',
help='Rendered supervisor configuration file path destination',
default=None,
)
parser.add_argument(
'--params',
type=basestring,
# action='callback',
# callback=parse_params,
dest='params',
default=None,
help='template parameters as JSON data',
)
def render_configuration_file(self, conf_template_string, params):
return Template(conf_template_string).render(Context(params))
def handle(self, **options):
self.conf_file_template = options['conf_file']
self.conf_dest = options['conf_destination']
self.params = options['params'] or {}
if self.params:
self.params = json.loads(self.params)
service_dir = settings.SERVICE_DIR
conf_template_fullpath = os.path.join(service_dir, self.conf_file_template)
if not os.path.isfile(conf_template_fullpath):
sys.exit("[make_supervisor_conf] Error: file %s does not exist as a template to use - you're doing something wrong" % conf_template_fullpath) #needs to be in source control moron!
if not os.path.exists(self.conf_dest):
sys.exit("[make_supervisor_confs] Error: the destination path %s is not reachable by this process" % self.conf_dest)
conf_template_string = None
with open(conf_template_fullpath, 'r') as fin:
conf_template_string = fin.read()
dest_filepath = os.path.join(self.conf_dest, '%s_%s' % (settings.SERVER_ENVIRONMENT, self.conf_file_template))
rendered_conf = self.render_configuration_file(conf_template_string, self.params)
self.write_configuration_file(dest_filepath, rendered_conf)
def write_configuration_file(self, destination_fullpath, rendered_configuration):
with open(destination_fullpath, 'w') as fout:
fout.write(rendered_configuration)
print "\t[make_supervisor_conf] Wrote supervisor configuration: %s" % destination_fullpath
class Command(SupervisorConfCommand):
help = "Make a supervisord conf file to deposit into a services path that supervisord knows about"
|
Python
| 0.000035
|
@@ -864,18 +864,11 @@
ype=
-basestring
+str
,%0A
|
4eb7bce888aef1a7eb5501c03f63e0818c0a79ee
|
Disable trailing slash
|
kytos/core/api_server.py
|
kytos/core/api_server.py
|
"""Module used to handle a API Server."""
import logging
import os
import sys
import warnings
from urllib.error import URLError
from urllib.request import urlopen
from flask import Flask, request, send_from_directory
from flask_socketio import SocketIO, join_room, leave_room
class APIServer:
"""Api server used to provide Kytos Controller routes."""
#: tuple: Default Flask HTTP methods.
DEFAULT_METHODS = ('GET',)
_NAPP_PREFIX = "/api/{napp.username}/{napp.name}/"
_CORE_PREFIX = "/api/kytos/core/"
def __init__(self, app_name, listen='0.0.0.0', port=8181):
"""Start a Flask+SocketIO server.
Args:
app_name(string): String representing a App Name
listen (string): host name used by api server instance
port (int): Port number used by api server instance
"""
dirname = os.path.dirname(os.path.abspath(__file__))
self.flask_dir = os.path.join(dirname, '../web-ui')
self.log = logging.getLogger('api_server')
self.listen = listen
self.port = port
self.app = Flask(app_name, root_path=self.flask_dir)
self.server = SocketIO(self.app, async_mode='threading')
self._enable_websocket_rooms()
def _enable_websocket_rooms(self):
socket = self.server
socket.on_event('join', join_room)
socket.on_event('leave', leave_room)
def run(self):
"""Run the Flask API Server."""
try:
self.server.run(self.app, self.listen, self.port)
except OSError as exception:
msg = "Couldn't start API Server: {}".format(exception)
self.log.critical(msg)
sys.exit(msg)
def register_rest_endpoint(self, url, function, methods):
"""Deprecate in favor of @rest decorator."""
warnings.warn("From now on, use @rest decorator.", DeprecationWarning,
stacklevel=2)
if url.startswith('/'):
url = url[1:]
self._start_endpoint(f'/kytos/{url}', function, methods=methods)
def start_api(self):
"""Start this APIServer instance API.
Start /api/kytos/core/shutdown/ and status/ endpoints, web UI.
"""
self.register_core_endpoint('shutdown/', self.shutdown_api)
self.register_core_endpoint('status/', self.status_api)
self._register_web_ui()
def register_core_endpoint(self, rule, function):
"""Register an endpoint with the URL /api/kytos/core/<rule>.
Not used by NApps, but controller.
"""
self._start_endpoint(self._CORE_PREFIX + rule, function)
def _register_web_ui(self):
"""Register routes to the admin-ui homepage."""
self.app.add_url_rule('/', self.web_ui.__name__, self.web_ui)
self.app.add_url_rule('/index.html', self.web_ui.__name__, self.web_ui)
@staticmethod
def status_api():
"""Display kytos status using the route ``/kytos/status/``."""
return '{"response": "running"}', 201
def stop_api_server(self):
"""Send a shutdown request to stop Api Server."""
try:
url = f'http://127.0.0.1:{self.port}/api/kytos/core/shutdown'
urlopen(url)
except URLError:
pass
def shutdown_api(self):
"""Handle shutdown requests received by Api Server.
This method must be called by kytos using the method
stop_api_server, otherwise this request will be ignored.
"""
allowed_host = ['127.0.0.1:'+str(self.port),
'localhost:'+str(self.port)]
if request.host not in allowed_host:
return "", 403
self.server.stop()
return 'Server shutting down...', 200
def web_ui(self):
"""Serve the index.html page for the admin-ui."""
return send_from_directory(self.flask_dir, 'index.html')
# BEGIN decorator methods
@staticmethod
def decorate_as_endpoint(rule, **options):
"""Decorate methods as REST endpoints.
Example for URL ``/api/myusername/mynapp/sayhello/World``:
.. code-block:: python3
from flask.json import jsonify
from kytos.core.napps import rest
@rest('sayhello/<string:name>')
def say_hello(name):
return jsonify({"data": f"Hello, {name}!"})
``@rest`` parameters are the same as Flask's ``@app.route``. You can
also add ``methods=['POST']``, for example.
As we don't have the NApp instance now, we store the parameters in a
method attribute in order to add the route later, after we have both
APIServer and NApp instances.
"""
def store_route_params(function):
"""Store ``Flask`` ``@route`` parameters in a method attribute.
There can be many @route decorators in a single function.
"""
# To support any order: @classmethod, @rest or @rest, @classmethod
# class and static decorators return a descriptor with the function
# in __func__.
if isinstance(function, (classmethod, staticmethod)):
inner = function.__func__
else:
inner = function
# Add route parameters
if not hasattr(inner, 'route_params'):
inner.route_params = []
inner.route_params.append((rule, options))
# Return the same function, now with "route_params" attribute
return function
return store_route_params
def register_napp_endpoints(self, napp):
"""Add all NApp REST endpoints with @rest decorator.
URLs will be prefixed with ``/api/{username}/{napp_name}/``.
"""
for function in self._get_decorated_functions(napp):
for rule, options in function.route_params:
absolute_rule = self.get_absolute_rule(rule, napp)
self._start_endpoint(absolute_rule, function, **options)
@staticmethod
def _get_decorated_functions(napp):
"""Return ``napp``'s methods having the @rest decorator."""
for name in dir(napp):
if not name.startswith('_'): # discarding private names
pub_attr = getattr(napp, name)
if callable(pub_attr) and hasattr(pub_attr, 'route_params'):
yield pub_attr
@classmethod
def get_absolute_rule(cls, rule, napp):
"""Prefix the rule, e.g. "flow" to "/api/user/napp/flow".
This code is used by kytos-utils when generating an OpenAPI skel.
"""
# Flask does require 2 slashes if specified, so we remove a starting
# slash if applicable.
relative_rule = rule[1:] if rule.startswith('/') else rule
return cls._NAPP_PREFIX.format(napp=napp) + relative_rule
# END decorator methods
def _start_endpoint(self, rule, function, **options):
"""Start ``function``'s endpoint.
Forward parameters to ``Flask.add_url_rule`` mimicking Flask
``@route`` decorator.
"""
endpoint = options.pop('endpoint', None)
self.app.add_url_rule(rule, endpoint, function, **options)
self.log.info('Started %s - %s', rule,
', '.join(options.get('methods', self.DEFAULT_METHODS)))
|
Python
| 0
|
@@ -1233,24 +1233,106 @@
et_rooms()%0A%0A
+ # Disable trailing slash%0A self.app.url_map.strict_slashes = False%0A%0A
def _ena
|
4ed76f0edd63c539d19a193ff42c40bb0df521ad
|
delete debug log with collect data in validation_strategy
|
federatedml/util/validation_strategy.py
|
federatedml/util/validation_strategy.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
from arch.api.utils import log_utils
from federatedml.util import consts
from federatedml.evaluation.evaluation import Evaluation
LOGGER = log_utils.getLogger()
class ValidationStrategy(object):
"""
This module is used for evaluating the performance of model during training process.
it will be called only in fit process of models.
Attributes
----------
validation_freqs: None or positive integer or container object in python. Do validation in training process or Not.
if equals None, will not do validation in train process;
if equals positive integer, will validate data every validation_freqs epochs passes;
if container object in python, will validate data if epochs belong to this container.
e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.
Default: None
train_data: None or DTable,
if train_data not equal to None, and judge need to validate data according to validation_freqs,
training data will be used for evaluating
validate_data: None or DTable,
if validate_data not equal to None, and judge need to validate data according to validation_freqs,
validate data will be used for evaluating
"""
def __init__(self, role=None, mode=None, validation_freqs=None):
self.validation_freqs = validation_freqs
self.role = role
self.mode = mode
self.flowid = ''
self.train_data = None
self.validate_data = None
LOGGER.debug("end to init validation_strategy, freqs is {}".format(self.validation_freqs))
def set_train_data(self, train_data):
self.train_data = train_data
def set_validate_data(self, validate_data):
self.validate_data = validate_data
def set_flowid(self, flowid):
self.flowid = flowid
def need_run_validation(self, epoch):
LOGGER.debug("validation_freqs is {}".format(self.validation_freqs))
if not self.validation_freqs:
return False
if isinstance(self.validation_freqs, int):
return (epoch + 1) % self.validation_freqs == 0
return epoch in self.validation_freqs
def generate_flowid(self, prefix, epoch, keywords="iteration", data_type="train"):
return "_".join([prefix, keywords, str(epoch), data_type])
def make_data_set_name(self, need_cv, model_flowid, epoch):
data_iteration_name = "_".join(["iteration", str(epoch)])
if not need_cv:
return data_iteration_name
cv_fold = "_".join(["fold", model_flowid.split(".", -1)[-1]])
return ".".join([cv_fold, data_iteration_name])
def evaluate(self, predicts, model, epoch):
evaluate_param = model.get_metrics_param()
eval_obj = Evaluation()
LOGGER.debug("evaluate type is {}".format(evaluate_param.eval_type))
eval_obj._init_model(evaluate_param)
eval_obj.set_tracker(model.tracker)
data_set_name = self.make_data_set_name(model.need_cv, model.flowid, epoch);
eval_data = {data_set_name : predicts}
eval_obj.fit(eval_data)
eval_obj.save_data()
LOGGER.debug("end to eval")
def evaluate_data(self, model, epoch, data, data_type):
if not data:
return
LOGGER.debug("start to evaluate data {}".format(data_type))
model_flowid = model.flowid
# model_flowid = ".".join(model.flowid.split(".", -1)[1:])
flowid = self.generate_flowid(model_flowid, epoch, "iteration", data_type)
model.set_flowid(flowid)
predicts = model.predict(data)
model.set_flowid(model_flowid)
if self.mode == consts.HOMO and self.role == consts.ARBITER:
pass
elif self.mode == consts.HETERO and self.role == consts.HOST:
pass
else:
predicts = predicts.mapValues(lambda value: value + [data_type])
return predicts
def validate(self, model, epoch):
LOGGER.debug("begin to check validate status, need_run_validation is {}".format(self.need_run_validation(epoch)))
if not self.need_run_validation(epoch):
return
if self.mode == consts.HOMO and self.role == consts.ARBITER:
return
train_predicts = self.evaluate_data(model, epoch, self.train_data, "train")
validate_predicts = self.evaluate_data(model, epoch, self.validate_data, "validate")
if train_predicts is None and validate_predicts is None:
return
else:
LOGGER.debug("train_predicts data is {}".format(list(train_predicts.collect())))
predicts = train_predicts
if validate_predicts:
LOGGER.debug("validate_predicts data is {}".format(list(validate_predicts.collect())))
predicts = predicts.union(validate_predicts)
LOGGER.debug("predicts data is {}".format(list(predicts.collect())))
self.evaluate(predicts, model, epoch)
|
Python
| 0
|
@@ -5618,32 +5618,34 @@
se:%0D%0A
+ #
LOGGER.debug(%22t
@@ -5792,32 +5792,34 @@
%0A
+ #
LOGGER.debug(%22v
@@ -5962,24 +5962,26 @@
%0A
+ #
LOGGER.debu
|
de06d0530023b25eeeadba3fbbc7d2264dfc0f2c
|
clarify error message for kik creation
|
microbot/views/api/bot.py
|
microbot/views/api/bot.py
|
from microbot.serializers import BotSerializer, BotUpdateSerializer, TelegramBotSerializer, TelegramBotUpdateSerializer, \
KikBotSerializer, KikBotUpdateSerializer
from microbot.views.api.base import MicrobotAPIView
from microbot.models import Bot, TelegramBot, KikBot
from rest_framework.response import Response
from rest_framework import status
from microbot.views.api.base import ListBotAPIView, DetailBotAPIView
import logging
logger = logging.getLogger(__name__)
class BotList(MicrobotAPIView):
def get(self, request, format=None):
"""
Get list of bots
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
bots = Bot.objects.filter(owner=request.user)
serializer = BotSerializer(bots, many=True)
return Response(serializer.data)
def post(self, request, format=None):
"""
Add a new bot
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
serializer = BotSerializer(data=request.data)
if serializer.is_valid():
bot = Bot.objects.create(owner=request.user,
name=serializer.data['name'])
return Response(BotSerializer(bot).data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class BotDetail(MicrobotAPIView):
def get(self, request, id, format=None):
"""
Get bot by id
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
bot = self.get_bot(id, request.user)
serializer = BotSerializer(bot)
return Response(serializer.data)
def put(self, request, id, format=None):
"""
Update an existing bot
---
serializer: BotUpdateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
bot = self.get_bot(id, request.user)
serializer = BotUpdateSerializer(bot, data=request.data)
if serializer.is_valid():
try:
bot = serializer.save()
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(BotSerializer(bot).data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id, format=None):
"""
Delete an existing bot
---
responseMessages:
- code: 401
message: Not authenticated
"""
bot = self.get_bot(id, request.user)
bot.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class TelegramBotList(ListBotAPIView):
serializer = TelegramBotSerializer
many = False
def _query(self, bot):
return bot.telegram_bot
def _creator(self, bot, serializer):
try:
telegram_bot = TelegramBot.objects.create(token=serializer.data['token'],
enabled=serializer.data['enabled'])
except:
logger.error("Error trying to create Bot %s" % serializer.data['token'])
raise
else:
bot.telegram_bot = telegram_bot
bot.save()
return telegram_bot
def get(self, request, bot_id, format=None):
"""
Get list of Telegram bots
---
serializer: TelegramBotSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
return super(TelegramBotList, self).get(request, bot_id, format)
def post(self, request, bot_id, format=None):
"""
Add TelegramBot
---
serializer: TelegramBotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
try:
return super(TelegramBotList, self).post(request, bot_id, format)
except:
return Response({"error": 'Telegram Error. Check Token or try later.'}, status=status.HTTP_400_BAD_REQUEST)
class TelegramBotDetail(DetailBotAPIView):
model = TelegramBot
serializer = TelegramBotSerializer
serializer_update = TelegramBotUpdateSerializer
def get(self, request, bot_id, id, format=None):
"""
Get TelegramBot by id
---
serializer: TelegramBotSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
return super(TelegramBotDetail, self).get(request, bot_id, id, format)
def put(self, request, bot_id, id, format=None):
"""
Update existing TelegramBot
---
serializer: TelegramBotUpdateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(TelegramBotDetail, self).put(request, bot_id, id, format)
def delete(self, request, bot_id, id, format=None):
"""
Delete existing Telegram Bot
---
responseMessages:
- code: 401
message: Not authenticated
"""
return super(TelegramBotDetail, self).delete(request, bot_id, id, format)
class KikBotList(ListBotAPIView):
serializer = KikBotSerializer
many = False
def _query(self, bot):
return bot.kik_bot
def _creator(self, bot, serializer):
try:
kik_bot = KikBot.objects.create(api_key=serializer.data['api_key'],
username=serializer.data['username'],
enabled=serializer.data['enabled'])
except:
logger.error("Error trying to create Kik Bot %s" % serializer.data['api_key'])
raise
else:
bot.kik_bot = kik_bot
bot.save()
return kik_bot
def get(self, request, bot_id, format=None):
"""
Get list of Kik bots
---
serializer: KikBotSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
return super(KikBotList, self).get(request, bot_id, format)
def post(self, request, bot_id, format=None):
"""
Add KIkBot
---
serializer: KikBotSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
try:
return super(KikBotList, self).post(request, bot_id, format)
except:
return Response({"error": 'Kik Error. Check Api key or try later.'}, status=status.HTTP_400_BAD_REQUEST)
class KikBotDetail(DetailBotAPIView):
model = KikBot
serializer = KikBotSerializer
serializer_update = KikBotUpdateSerializer
def get(self, request, bot_id, id, format=None):
"""
Get KikBot by id
---
serializer: KikBotSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
return super(KikBotDetail, self).get(request, bot_id, id, format)
def put(self, request, bot_id, id, format=None):
"""
Update existing KikBot
---
serializer: KikBotUpdateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(KikBotDetail, self).put(request, bot_id, id, format)
def delete(self, request, bot_id, id, format=None):
"""
Delete existing Kik Bot
---
responseMessages:
- code: 401
message: Not authenticated
"""
return super(KikBotDetail, self).delete(request, bot_id, id, format)
|
Python
| 0.000001
|
@@ -4491,13 +4491,28 @@
eck
-Token
+Kik username/api_key
or
|
bcee5519b650ffb8ecb8494a38f0a54bdeba3e5c
|
Update rules_swift to 0.8.0
|
apple/repositories.bzl
|
apple/repositories.bzl
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for handling Bazel repositories used by the Apple rules."""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
def _colorize(text, color):
"""Applies ANSI color codes around the given text."""
return "\033[1;{color}m{text}{reset}".format(
color = color,
reset = "\033[0m",
text = text,
)
def _green(text):
return _colorize(text, "32")
def _yellow(text):
return _colorize(text, "33")
def _warn(msg):
"""Outputs a warning message."""
print("\n{prefix} {msg}\n".format(
msg = msg,
prefix = _yellow("WARNING:"),
))
def _maybe(repo_rule, name, ignore_version_differences, **kwargs):
"""Executes the given repository rule if it hasn't been executed already.
Args:
repo_rule: The repository rule to be executed (e.g.,
`native.git_repository`.)
name: The name of the repository to be defined by the rule.
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
**kwargs: Additional arguments passed directly to the repository rule.
"""
if name in native.existing_rules():
if not ignore_version_differences:
# Verify that the repository is being loaded from the same URL and tag
# that we asked for, and warn if they differ.
# TODO(allevato): This isn't perfect, because the user could load from the
# same commit SHA as the tag, or load from an HTTP archive instead of a
# Git repository, but this is a good first step toward validating.
# Long-term, we should extend this function to support dependencies other
# than Git.
existing_repo = native.existing_rule(name)
if (existing_repo.get("remote") != kwargs.get("remote") or
existing_repo.get("tag") != kwargs.get("tag")):
expected = "{url} (tag {tag})".format(
tag = kwargs.get("tag"),
url = kwargs.get("remote"),
)
existing = "{url} (tag {tag})".format(
tag = existing_repo.get("tag"),
url = existing_repo.get("remote"),
)
_warn("""\
`build_bazel_rules_apple` depends on `{repo}` loaded from {expected}, but we \
have detected it already loaded into your workspace from {existing}. You may \
run into compatibility issues. To silence this warning, pass \
`ignore_version_differences = True` to `apple_rules_dependencies()`.
""".format(
existing = _yellow(existing),
expected = _green(expected),
repo = name,
))
return
repo_rule(name = name, **kwargs)
def apple_rules_dependencies(ignore_version_differences = False):
"""Fetches repositories that are dependencies of the `rules_apple` workspace.
Users should call this macro in their `WORKSPACE` to ensure that all of the
dependencies of the Swift rules are downloaded and that they are isolated from
changes to those dependencies.
Args:
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
"""
_maybe(
http_archive,
name = "bazel_skylib",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/0.8.0/bazel-skylib.0.8.0.tar.gz",
],
sha256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e",
ignore_version_differences = ignore_version_differences,
)
_maybe(
http_archive,
name = "build_bazel_apple_support",
urls = [
"https://github.com/bazelbuild/apple_support/releases/download/0.6.0/apple_support.0.6.0.tar.gz",
],
sha256 = "7356dbd44dea71570a929d1d4731e870622151a5f27164d966dda97305f33471",
ignore_version_differences = ignore_version_differences,
)
_maybe(
git_repository,
name = "build_bazel_rules_swift",
remote = "https://github.com/bazelbuild/rules_swift.git",
tag = "0.7.0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
http_file,
name = "xctestrunner",
executable = 1,
sha256 = "15fc7d09315a230f3d8ee2913eef8699456366e44b37a9266e36b28517003628",
urls = ["https://github.com/google/xctestrunner/releases/download/0.2.6/ios_test_runner.par"],
ignore_version_differences = ignore_version_differences,
)
|
Python
| 0.000003
|
@@ -676,78 +676,8 @@
%22%22%0A%0A
-load(%22@bazel_tools//tools/build_defs/repo:git.bzl%22, %22git_repository%22)%0A
load
@@ -1461,29 +1461,20 @@
%60
-native.git_repository
+http_archive
%60.)%0A
@@ -4704,22 +4704,20 @@
-git_repository
+http_archive
,%0A
@@ -4768,16 +4768,28 @@
-remote =
+urls = %5B%0A
%22ht
@@ -4831,35 +4831,153 @@
wift
-.git%22,%0A tag = %220.7.0
+/releases/download/0.8.0/rules_swift.0.8.0.tar.gz%22,%0A %5D,%0A sha256 = %2231aad005a9c4e56b256125844ad05eb27c88303502d74138186f9083479f93a6
%22,%0A
|
cbe38648644c63dc01a02f3ba6cbcac8eec45274
|
fix celerybeat
|
scrapy_joy/__init__.py
|
scrapy_joy/__init__.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os, sys, django
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scrapy_joy.settings")
django.setup()
##################################################
# add django-dynamic-scraper function:lambda处理器
##################################################
from dynamic_scraper.utils import processors
def lambda_str(text, loader_context):
lam_str = loader_context.get('lambda_str', '')
return eval(lam_str)(text)
if not hasattr(processors, 'lambda_str'):
setattr(processors, 'lambda_str', lambda_str)
def url_hour_arg(text, loader_context):
""" 加上小时的时间戳 """
from datetime import datetime
url = loader_context.get('url_hour_arg', '')
return url + '#' + datetime.now().strftime('%Y%m%d%H')
if not hasattr(processors, 'url_hour_arg'):
setattr(processors, 'url_hour_arg', url_hour_arg)
try:
from dynamic_scraper import migrations
import shutil
shutil.rmtree(migrations.__dict__['__path__'][0])
print '######', u'删除migrations'
except:
pass
###############################################
# fix xadmin bug:启动时报AppRegistryNotReady错误
###############################################
from django.contrib import auth
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, AppRegistryNotReady
def get_user_model():
"""
Returns the User model that is active in this project.
"""
try:
return django_apps.get_model(settings.AUTH_USER_MODEL)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL
)
except AppRegistryNotReady:
return getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
setattr(auth, 'get_user_model', get_user_model)
################################################################
# fix xadmin bug:AuthenticationForm没有check_for_test_cookie方法
################################################################
from django.contrib.auth.forms import AuthenticationForm
def check_for_test_cookie(self):
pass
setattr(AuthenticationForm, 'check_for_test_cookie', check_for_test_cookie)
from django.http import HttpResponse
def __init__(self, content=b'', *args, **kwargs):
if 'mimetype' in kwargs:
mimetype = kwargs.pop('mimetype')
kwargs['content_type'] = mimetype
super(HttpResponse, self).__init__(*args, **kwargs)
self.content = content
setattr(HttpResponse, '__init__', __init__)
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
|
Python
| 0.000128
|
@@ -234,23 +234,8 @@
s%22)%0A
-django.setup()%0A
%0A%0A##
@@ -2912,12 +2912,27 @@
s celery_app
+%0Adjango.setup()
|
9c6ae84ed18cf0f289519342a26543017f4e0160
|
W293 blank line contains whitespace
|
mido/messages/messages.py
|
mido/messages/messages.py
|
import re
from .specs import make_msgdict, SPEC_BY_TYPE, REALTIME_TYPES
from .checks import check_msgdict, check_value, check_data
from .decode import decode_message
from .encode import encode_message
from .strings import msg2str, str2msg
from ..py2 import convert_py2_bytes
class BaseMessage(object):
"""Abstract base class for messages."""
is_meta = False
def copy(self):
raise NotImplemented
def bytes(self):
raise NotImplemented
def bin(self):
"""Encode message and return as a bytearray.
This can be used to write the message to a file.
"""
return bytearray(self.bytes())
def hex(self, sep=' '):
"""Encode message and return as a string of hex numbers,
Each number is separated by the string sep.
"""
return sep.join('{:02X}'.format(byte) for byte in self.bytes())
def dict(self):
"""Returns a dictionary containing the attributes of the message.
Example: {'type': 'sysex', 'data': [1, 2], 'time': 0}
Sysex data will be returned as a list.
"""
data = vars(self).copy()
if data['type'] == 'sysex':
# Make sure we return a list instead of a SysexData object.
data['data'] = list(data['data'])
return data
@classmethod
def from_dict(cl, data):
"""Create a message from a dictionary.
Only "type" is required. The other will be set to default
values.
"""
return cl(**data)
def _get_value_names(self):
# This is overriden by MetaMessage.
return list(SPEC_BY_TYPE[self.type]['value_names']) + ['time']
def __repr__(self):
items = [repr(self.type)]
for name in self._get_value_names():
items.append('{}={!r}'.format(name, getattr(self, name)))
return '{}({})'.format(type(self).__name__, ', '.join(items))
@property
def is_realtime(self):
"""True if the message is a system realtime message."""
return self.type in REALTIME_TYPES
def __delattr__(self, name):
raise AttributeError('attribute cannot be deleted')
def __setattr__(self, name, value):
raise AttributeError('message is immutable')
def __eq__(self, other):
if not isinstance(other, BaseMessage):
raise TypeError('can\'t compare message to {}'.format(type(other)))
# This includes time in comparison.
return vars(self) == vars(other)
class SysexData(tuple):
"""Special kind of tuple accepts and converts any sequence in +=."""
def __iadd__(self, other):
check_data(other)
return self + SysexData(convert_py2_bytes(other))
class Message(BaseMessage):
def __init__(self, type, **args):
msgdict = make_msgdict(type, args)
if type == 'sysex':
msgdict['data'] = SysexData(convert_py2_bytes(msgdict['data']))
check_msgdict(msgdict)
vars(self).update(msgdict)
def copy(self, **overrides):
"""Return a copy of the message.
Attributes will be overridden by the passed keyword arguments.
Only message specific attributes can be overridden. The message
type can not be changed.
"""
if not overrides:
# Bypass all checks.
msg = self.__class__.__new__(self.__class__)
vars(msg).update(vars(self))
return msg
if 'type' in overrides and overrides['type'] != self.type:
raise ValueError('copy must be same message type')
if 'data' in overrides:
overrides['data'] = bytearray(overrides['data'])
msgdict = vars(self).copy()
msgdict.update(overrides)
check_msgdict(msgdict)
return self.__class__(**msgdict)
@classmethod
def from_bytes(cl, data, time=0):
"""Parse a byte encoded message.
Accepts a byte string or any iterable of integers.
This is the reverse of msg.bytes() or msg.bin().
"""
msg = cl.__new__(cl)
msgdict = decode_message(data, time=time)
if 'data' in msgdict:
msgdict['data'] = SysexData(msgdict['data'])
vars(msg).update(msgdict)
return msg
@classmethod
def from_hex(cl, text, time=0, sep=None):
"""Parse a hex encoded message.
This is the reverse of msg.hex().
"""
# bytearray.fromhex() is a bit picky about its input
# so we need to replace all whitespace characters with spaces.
text = re.sub(r'\s', ' ', text)
if sep is not None:
# We also replace the separator with spaces making sure
# the string length remains the same so char positions will
# be correct in bytearray.fromhex() error messages.
text = text.replace(sep, ' ' * len(sep))
return cl.from_bytes(bytearray.fromhex(text), time=time)
@classmethod
def from_str(cl, text):
"""Parse a string encoded message.
This is the reverse of str(msg).
"""
return cl(**str2msg(text))
def __len__(self):
if self.type == 'sysex':
return 2 + len(self.data)
else:
return SPEC_BY_TYPE[self.type]['length']
def __str__(self):
return msg2str(vars(self))
def _setattr(self, name, value):
if name == 'type':
raise AttributeError('type attribute is read only')
elif name not in vars(self):
raise AttributeError('{} message has no '
'attribute {}'.format(self.type,
name))
else:
check_value(name, value)
if name == 'data':
vars(self)['data'] = SysexData(value)
else:
vars(self)[name] = value
__setattr__ = _setattr
def bytes(self):
"""Encode message and return as a list of integers."""
return encode_message(vars(self))
def parse_string(text):
"""Parse a string of text and return a message.
The string can span multiple lines, but must contain
one full message.
Raises ValueError if the string could not be parsed.
"""
return Message.from_str(text)
def parse_string_stream(stream):
"""Parse a stram of messages and yield (message, error_message)
stream can be any iterable that generates text strings, where each
string is a string encoded message.
If a string can be parsed, (message, None) is returned. If it
can't be parsed (None, error_message) is returned. The error
message containes the line number where the error occurred.
"""
line_number = 1
for line in stream:
try:
line = line.split('#')[0].strip()
if line:
yield parse_string(line), None
except ValueError as exception:
error_message = 'line {line_number}: {msg}'.format(
line_number=line_number,
msg=exception.args[0])
yield None, error_message
line_number += 1
def format_as_string(msg, include_time=True):
"""Format a message and return as a string.
This is equivalent to str(message).
To leave out the time attribute, pass include_time=False.
"""
return msg2str(vars(msg), include_time=include_time)
|
Python
| 0.998957
|
@@ -1838,24 +1838,16 @@
name)))%0A
-
%0A
|
41b4cb48de1e6db7b2cb95e893ea5ed981d49425
|
handle date object passed as argument
|
custom/icds_reports/management/commands/run_custom_data_pull.py
|
custom/icds_reports/management/commands/run_custom_data_pull.py
|
import os
import zipfile
from django.conf import settings
from django.core.management.base import (
BaseCommand,
CommandError,
)
from django.db import connections
from custom.icds_reports.const import CUSTOM_DATA_PULLS
class Command(BaseCommand):
help = "Dump data from a pre-defined query for ICDS data pull requests"
def add_arguments(self, parser):
parser.add_argument('name')
parser.add_argument('db_alias', choices=settings.DATABASES)
parser.add_argument('--month', help="format YYYY-MM-DD")
parser.add_argument('--location_id')
parser.add_argument('-s', '--skip_confirmation', action='store_true')
parser.add_argument('-l', '--log_progress', action='store_true')
def handle(self, name, db_alias, *arg, **options):
generated_files = []
month = options.get('month')
location_id = options.get('location_id')
skip_confirmation = options.get('skip_confirmation')
log_progress = options.get('log_progress')
if db_alias not in settings.DATABASES:
raise CommandError("Unexpected db alias")
if name in CUSTOM_DATA_PULLS:
generated_files = self.run_via_class(name, db_alias, month, location_id, skip_confirmation,
log_progress)
else:
generated_files = self.run_via_sql_file(name, db_alias, month, location_id, skip_confirmation,
log_progress)
if generated_files:
if name in CUSTOM_DATA_PULLS:
zip_file_name = "%s-DataPull.zip" % CUSTOM_DATA_PULLS[name].name
else:
zip_file_name = "%s-DataPull.zip" % name
with zipfile.ZipFile(zip_file_name, mode='a') as z:
for generated_file in generated_files:
z.write(generated_file)
os.remove(generated_file)
return zip_file_name
def run_via_class(self, slug, db_alias, month, location_id, skip_confirmation, log_progress):
data_pull_class = CUSTOM_DATA_PULLS[slug]
data_pull = data_pull_class(db_alias, month=month, location_id=location_id)
if log_progress:
self._log(data_pull.get_queries())
if skip_confirmation or self._get_confirmation():
return data_pull.run()
return []
def run_via_sql_file(self, sql_query_file_path, db_alias, month, location_id, skip_confirmation, log_progress):
filepath = 'custom/icds_reports/data_pull/sql_queries/%s.sql' % sql_query_file_path
with open(filepath) as _sql:
sql = _sql.read()
sql = sql.format(month=month, location_id=location_id)
if log_progress:
self._log([sql])
if skip_confirmation or self._get_confirmation():
sql = sql.replace('\n', ' ')
result_file = "%s-%s-%s.csv" % (sql_query_file_path.split('/')[-1], month, location_id)
with open(result_file, "w") as output:
db_conn = connections[db_alias]
c = db_conn.cursor()
c.copy_expert("COPY ({query}) TO STDOUT DELIMITER ',' CSV HEADER;".format(query=sql), output)
return [result_file]
def _log(self, queries):
print("Running queries")
for sql in queries:
print(sql)
def _get_confirmation(self):
proceed = input("Continue?(YES)")
return proceed == "YES"
|
Python
| 0.000007
|
@@ -853,16 +853,121 @@
month')%0A
+ if month:%0A # convert to string if date object received%0A month = str(month)%0A
|
b958af7f43a6aed903600336908f0898c00ab594
|
fix directionality to fit with wsdl protocol
|
opennsa/nsa.py
|
opennsa/nsa.py
|
"""
Core abstractions used in OpenNSA.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011)
"""
import urlparse
from opennsa import error
STP_PREFIX = 'urn:ogf:network:stp:'
NSA_PREFIX = 'urn:ogf:network:nsa:'
class STP: # Service Termination Point
def __init__(self, network, endpoint):
self.network = network
self.endpoint = endpoint
def urn(self):
return STP_PREFIX + self.network + ':' + self.endpoint
def __eq__(self, other):
if not isinstance(other, STP):
return False
return self.network == other.network and self.endpoint == other.endpoint
def __str__(self):
return '<STP %s:%s>' % (self.network, self.endpoint)
class SDP: # service demarcation point
def __init__(self, stp1, stp2):
self.stp1 = stp1
self.stp2 = stp2
def __eq__(self, other):
if not isinstance(other, SDP):
return False
return self.stp1 == other.stp1 and self.stp2 == other.stp2
def __str__(self):
return '<SDP %s:%s-%s:%s>' % (self.stp1.network, self.stp1.endpoint, self.stp2.network, self.stp2.endpoint)
class Path:
"""
Represent a path from a source and destitionation STP, with the endpoint pairs between them.
"""
def __init__(self, source_stp, dest_stp, endpoint_pairs):
self.source_stp = source_stp
self.dest_stp = dest_stp
self.endpoint_pairs = endpoint_pairs
def __str__(self):
return '%s - %s - %s' % (self.source_stp, ' - '.join( [ str(e) for e in self.endpoint_pairs ] ), self.dest_stp)
class NetworkEndpoint(STP):
def __init__(self, network, endpoint, config, dest_stp=None, max_capacity=None, available_capacity=None):
STP.__init__(self, network, endpoint)
self.config = config
self.dest_stp = dest_stp
self.max_capacity = max_capacity
self.available_capacity = available_capacity
def __str__(self):
return '<NetworkEndpoint %s:%s-%s#%s>' % (self.network, self.endpoint, self.dest_stp, self.config)
class NetworkServiceAgent:
def __init__(self, identity, endpoint): #, service_attributes=None):
assert type(identity) is str, 'NSA identity type must be string'
assert type(endpoint) is str, 'NSA endpoint type must be string'
self.identity = identity
self.endpoint = endpoint
def getHostPort(self):
url = urlparse.urlparse(self.address)
host, port = url.netloc.split(':',2)
port = int(port)
return host, port
def url(self):
return self.endpoint
def urn(self):
return NSA_PREFIX + self.identity
def __str__(self):
return '<NetworkServiceAgent %s>' % self.identity
class Network:
def __init__(self, name, nsa):
self.name = name
self.nsa = nsa
self.endpoints = []
def addEndpoint(self, endpoint):
self.endpoints.append(endpoint)
def getEndpoint(self, endpoint_name):
for ep in self.endpoints:
if ep.endpoint == endpoint_name:
return ep
raise error.TopologyError('No such endpoint (%s)' % (endpoint_name))
def __str__(self):
return '<Network %s,%i>' % (self.name, len(self.endpoints))
class BandwidthParameters:
def __init__(self, desired=None, minimum=None, maximum=None):
self.desired = desired
self.minimum = minimum
self.maximum = maximum
class ServiceParameters:
def __init__(self, start_time, end_time, source_stp, dest_stp, stps=None, directionality='unidirectional', bandwidth_params=None):
# should probably make path object sometime..
# schedule
self.start_time = start_time
self.end_time = end_time
# path
self.source_stp = source_stp
self.dest_stp = dest_stp
self.stps = stps
self.directionality = directionality
self.bandwidth_params = bandwidth_params or BandwidthParameters()
def protoSP(self):
return { 'start_time' : self.start_time,
'end_time' : self.end_time,
'source_stp' : self.source_stp.urn(),
'dest_stp' : self.dest_stp.urn(),
'stps' : self.stps }
def __str__(self):
return '<ServiceParameters %s>' % str(self.protoSP())
|
Python
| 0
|
@@ -3624,10 +3624,9 @@
ty='
-un
+B
idir
@@ -3925,16 +3925,85 @@
= stps%0A
+ assert directionality in ('Unidirectional', 'Bidirectional')%0A
|
eece11275f3251d1db3eb7f05eb72935a1ecb073
|
add util import. (required for send_message to work)
|
FlotypeBridge/flotype/connection.py
|
FlotypeBridge/flotype/connection.py
|
import sys
import struct
import socket
import logging
from collections import deque
from datetime import timedelta
from tornado import ioloop, iostream
from tornado.escape import json_encode, json_decode, utf8, to_unicode
from tornado.httpclient import HTTPClient, HTTPError
class Connection(object):
def __init__(self, bridge, interval=400):
# Set associated bridge object
self.bridge = bridge
self.options = bridge._options
# Connection configurations
self.interval = interval
self.loop = ioloop.IOLoop.instance()
self.msg_queue = deque()
self.on_message = self._connect_on_message
self.connected = False
self.client_id = None
self.secret = None
if self.options.get('host') is None or self.options.get('port') is None:
self.redirector()
else:
self.establish_connection()
def redirector(self):
client = HTTPClient()
try:
res = client.fetch('%s/redirect/%s' % (
self.options['redirector'], self.options['api_key']
))
except:
logging.error('Unable to contact redirector')
client.close()
return
try:
body = json_decode(res.body).get('data')
except:
logging.error('Unable to parse redirector response %s', res.body)
client.close()
return
if('bridge_port' not in body or 'bridge_host' not in body):
logging.error('Could not find host and port in JSON')
else:
self.options['host'] = body.get('bridge_host')
self.options['port'] = int(body.get('bridge_port'))
self.establish_connection()
client.close()
def reconnect(self):
self.on_message = self._connect_on_message
if not self.connected and self.interval < 32678:
delta = timedelta(milliseconds=self.interval)
self.loop.add_timeout(delta, self.establish_connection)
def establish_connection(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.stream = iostream.IOStream(self.sock)
logging.info('Starting TCP connection')
server = (self.options['host'], self.options['port'])
self.stream.connect(server, self.on_connect)
def on_connect(self):
self.connected = True
logging.info('Beginning handshake')
msg = {
'command': 'CONNECT',
'data': {
'session': [self.client_id, self.secret],
'api_key': self.options['api_key'],
},
}
self.send(msg)
self.stream.set_close_callback(self.on_close)
if len(self.msg_queue):
self.bridge.emit('reconnect')
self.loop.add_callback(self.process_queue)
self.wait()
def wait(self):
self.stream.read_bytes(4, self.header_handler)
def header_handler(self, data):
size = struct.unpack('>I', data)[0]
self.stream.read_bytes(size, self.body_handler)
def body_handler(self, data):
self.on_message(to_unicode(data))
self.wait()
def _connect_on_message(self, msg):
logging.info('clientId and secret received')
ids = msg.split('|')
if len(ids) != 2:
self._process_message(msg)
else:
self.client_id, self.secret = ids
self.on_message = self._process_message
self.bridge._on_ready()
def _process_message(self, msg):
try:
obj = json_decode(msg)
except:
logging.warn("Message parsing failed")
return
logging.info('Received %s', msg)
util.deserialize(self.bridge, obj)
destination = obj.get('destination', None)
if not destination:
logging.warn('No destination in message')
return
self.bridge._execute(destination._address, obj['args'])
def on_close(self):
self.connected = False
logging.error('Connection closed')
self.bridge.emit('disconnect')
if self.options['reconnect']:
self.reconnect()
def process_queue(self):
while self.connected and len(self.msg_queue):
buf = self.msg_queue.popleft()
self.stream.write(buf)
def send_command(self, command, data):
msg = {'command': command, 'data': data}
msg = util.serialize(self.bridge, msg)
self.send(msg)
def send(self, msg):
data = utf8(json_encode(msg))
size = struct.pack('>I', len(data))
buf = size + data
if self.connected:
self.stream.write(buf)
else:
self.msg_queue.append(buf)
def start(self):
self.loop.start()
|
Python
| 0
|
@@ -270,16 +270,42 @@
PError%0A%0A
+from flotype import util%0A%0A
%0Aclass C
|
cd02c0a262d79cfc60e15f3e8ac71121a3201aa1
|
Fix permissions on bde_make_vscode.py
|
bin/bde_make_vscode.py
|
bin/bde_make_vscode.py
|
#!/usr/bin/env python
import os
import subprocess
import sys
# If BDE_TOOLS_DIR is not specified, try finding it via 'which' and default
# to '/bb/bde/bbshr/bde-tools'
bdeToolsDir = os.getenv("BDE_TOOLS_DIR")
if not bdeToolsDir:
try:
whichBuildEnv = subprocess.run(
["which", "bde_build_env.py"], stdout=subprocess.PIPE, text=True
).stdout
bdeToolsDir = os.path.dirname(os.path.dirname(whichBuildEnv))
except:
bdeToolsDir = "/bb/bde/bbshr/bde-tools"
bdeCmakeBuildDir = os.getenv("BDE_CMAKE_BUILD_DIR")
bdeCmakeToolchain = os.getenv("BDE_CMAKE_TOOLCHAIN")
bdeCmakeUfid = os.getenv("BDE_CMAKE_UFID")
if not bdeCmakeBuildDir or not bdeCmakeToolchain or not bdeCmakeUfid:
print("Please set the BDE build environment using 'bde_build_env.py'.")
sys.exit(1)
print(f"Generating .vscode folder...")
print(f" BDE tools directory: {bdeToolsDir}")
print(f" Build directory: {bdeCmakeBuildDir}")
print(f" Toolchain: {bdeCmakeToolchain}")
print(f" UFID: {bdeCmakeUfid}")
os.makedirs(".vscode", exist_ok=True)
with open(".vscode/settings.json", "wt") as settings:
settings.write(
f"""
{{
"cmake.configureOnOpen": false,
"cmake.buildDirectory": "${{workspaceFolder}}/{bdeCmakeBuildDir}",
"cmake.generator": "Ninja",
"cmake.parallelJobs": 0,
"cmake.configureSettings": {{
"CMAKE_MODULE_PATH": "{bdeToolsDir}/cmake",
"CMAKE_EXPORT_COMPILE_COMMANDS": true,
"CMAKE_TOOLCHAIN_FILE": "{bdeToolsDir}/cmake/{bdeCmakeToolchain}.cmake",
"BUILD_BITNESS": "64",
"UFID": "{bdeCmakeUfid}"
}},
"cmake.ctestArgs": [],
"C_Cpp.default.configurationProvider": "ms-vscode.cmake-tools",
"files.associations": {{
"*.ipp": "cpp"
}}
}}
"""
)
with open(".vscode/c_cpp_properties.json", "wt") as settings:
settings.write(
f"""
{{
"configurations": [
{{
"name": "CMake",
"compileCommands": "${{workspaceFolder}}/{bdeCmakeBuildDir}/compile_commands.json",
"configurationProvider": "ms-vscode.cmake-tools"
}}
],
"version": 4
}}
"""
)
with open(".vscode/launch.json", "wt") as settings:
settings.write(
f"""
{{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{{
"name": "(gdb) Launch",
"type": "cppdbg",
"request": "launch",
"program": "${{command:cmake.launchTargetPath}}",
"args": ["${{input:args}}"],
"stopAtEntry": true,
"cwd": "${{command:cmake.getLaunchTargetDirectory}}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"setupCommands": [
{{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
}}
]
}},
],
"inputs": [
{{
"id": "args",
"type":"promptString",
"description": "Program Args",
"default": "0"
}}
]
}}
"""
)
# -----------------------------------------------------------------------------
# Copyright 2022 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------- END-OF-FILE -----------------------------------
|
Python
| 0.000001
| |
4918392cbdee161f2263900c28fd374bc1556f2b
|
Fix functions in oldnumeric.mlab
|
numpy/oldnumeric/mlab.py
|
numpy/oldnumeric/mlab.py
|
# This module is for compatibility only. All functions are defined elsewhere.
__all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle', 'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort', 'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud', 'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc', 'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean']
import linear_algebra as LinearAlgebra
import random_array as RandomArray
from numpy import tril, trapz as _Ntrapz, hanning, rot90, triu, diff, \
angle, roots, ptp as _Nptp, kaiser, cumprod as _Ncumprod, \
diag, msort, prod as _Nprod, std as _Nstd, hamming, flipud, \
amax as _Nmax, amin as _Nmin, blackman, bartlett, \
squeeze, sinc, median, fliplr, mean as _Nmean, transpose
from numpy.linalg import eig, svd
from numpy.random import rand, randn
from typeconv import convtypecode
def eye(N, M=None, k=0, typecode=None, dtype=None):
""" eye returns a N-by-M 2-d array where the k-th diagonal is all ones,
and everything else is zeros.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = nn.equal(nn.subtract.outer(nn.arange(N), nn.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def tri(N, M=None, k=0, typecode=None, dtype=None):
""" returns a N-by-M array where all the diagonals starting from
lower left corner up to the k-th are all ones.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = nn.greater_equal(nn.subtract.outer(nn.arange(N), nn.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def trapz(y, x=None, axis=-1):
return _Ntrapz(y, x, axis=axis)
def ptp(x, axis=0):
return _Nptp(x, axis)
def cumprod(x, axis=0):
return _Ncumprod(x, axis)
def max(x, axis=0):
return _Nmax(x, axis)
def min(x, axis=0):
return _Nmin(x, axis)
def prod(x, axis=0):
return _Nprod(x, axis)
def std(x, axis=0):
return _Nstd(x, axis)
def mean(x, axis=0):
return _Nmean(x, axis)
# This is exactly the same cov function as in MLab
def cov(m, y=None, rowvar=0, bias=0):
if y is None:
y = m
else:
y = y
if rowvar:
m = transpose(m)
y = transpose(y)
if (m.shape[0] == 1):
m = transpose(m)
if (y.shape[0] == 1):
y = transpose(y)
N = m.shape[0]
if (y.shape[0] != N):
raise ValueError, "x and y must have the same number "\
"of observations"
m = m - _Nmean(m,axis=0)
y = y - _Nmean(y,axis=0)
if bias:
fact = N*1.0
else:
fact = N-1.0
return squeeze(dot(transpose(m), conjugate(y)) / fact)
from numpy import sqrt, multiply
def corrcoef(x, y=None):
c = cov(x,y)
d = diag(c)
return c/sqrt(multiply.outer(d,d))
from compat import *
from functions import *
from precision import *
from ufuncs import *
from misc import *
import compat
import precision
import functions
import misc
import ufuncs
import numpy
__version__ = numpy.__version__
del numpy
__all__ += ['__version__']
__all__ += compat.__all__
__all__ += precision.__all__
__all__ += functions.__all__
__all__ += ufuncs.__all__
__all__ += misc.__all__
del compat
del functions
del precision
del ufuncs
del misc
|
Python
| 0.999997
|
@@ -887,16 +887,35 @@
, randn%0A
+import numpy as nn%0A
%0Afr
|
850c9b65b1f1d21da9ae680a94f70433f1225e55
|
Fix extract method
|
mimesis/providers/base.py
|
mimesis/providers/base.py
|
"""Base data provider."""
import contextlib
import functools
import json
import operator
import typing as t
from functools import reduce
from pathlib import Path
from mimesis.exceptions import NonEnumerableError
from mimesis.locales import Locale, validate_locale
from mimesis.random import Random, get_random_item
from mimesis.types import JSON, Seed
__all__ = ["BaseDataProvider", "BaseProvider"]
class BaseProvider:
"""This is a base class for all providers."""
def __init__(self, *, seed: Seed = None, **kwargs: t.Any) -> None:
"""Initialize attributes.
Keep in mind, that locale-independent data providers will work
only with keyword-only arguments since version 5.0.0.
:param seed: Seed for random.
When set to `None` the current system time is used.
"""
self.seed = seed
self.random = Random()
self.reseed(seed)
def reseed(self, seed: Seed = None) -> None:
"""Reseed the internal random generator.
In case we use the default seed, we need to create a per instance
random generator, in this case two providers with the same seed
will always return the same values.
:param seed: Seed for random.
When set to `None` the current system time is used.
"""
self.seed = seed
self.random.seed(seed)
def validate_enum(self, item: t.Any, enum: t.Any) -> t.Any:
"""Validate enum parameter of method in subclasses of BaseProvider.
:param item: Item of enum object.
:param enum: Enum object.
:return: Value of item.
:raises NonEnumerableError: if ``item`` not in ``enum``.
"""
if item is None:
result = get_random_item(enum, self.random)
elif item and isinstance(item, enum):
result = item
else:
raise NonEnumerableError(enum)
return result.value
def __str__(self) -> str:
"""Human-readable representation of locale."""
return self.__class__.__name__
class BaseDataProvider(BaseProvider):
"""This is a base class for all data providers."""
def __init__(self, locale: Locale = Locale.DEFAULT, seed: Seed = None) -> None:
"""Initialize attributes for data providers.
:param locale: Current locale.
:param seed: Seed to all the random functions.
"""
super().__init__(seed=seed)
self._data: JSON = {}
self._datafile: str = ""
self._setup_locale(locale)
self._data_dir = Path(__file__).parent.parent.joinpath("data")
def _setup_locale(self, locale: Locale = Locale.DEFAULT) -> None:
"""Set up locale after pre-check.
:param str locale: Locale
:raises UnsupportedLocale: When locale not supported.
:return: Nothing.
"""
locale_obj = validate_locale(locale)
self.locale = locale_obj.value
def extract(self, keys: t.List[str], default: t.Optional[t.Any] = None) -> t.Any:
"""Extracts nested values from JSON file by list of keys.
:param keys: List of keys (order extremely matters).
:param default: Default value.
:return: Data.
"""
if not keys:
raise ValueError("List of keys cannot be empty.")
try:
if len(keys) == 1:
return self._data[keys[0]]
return reduce(operator.getitem, keys, self._data)
except (TypeError, KeyError):
return default
def _update_dict(self, initial: JSON, other: JSON) -> JSON:
"""Recursively update a dictionary.
:param initial: Dict to update.
:param other: Dict to update from.
:return: Updated dict.
"""
for key, value in other.items():
if isinstance(value, dict):
r = self._update_dict(initial.get(key, {}), value)
initial[key] = r
else:
initial[key] = other[key]
return initial
@functools.lru_cache(maxsize=None)
def _load_datafile(self, datafile: str = "") -> None:
"""Pull the content from the JSON and memorize one.
Opens JSON file ``file`` in the folder ``data/locale``
and get content from the file and memorize ones using lru_cache.
:param datafile: The name of file.
:return: The content of the file.
:raises UnsupportedLocale: Raises if locale is unsupported.
"""
locale = self.locale
data_dir = self._data_dir
if not datafile:
datafile = self._datafile
def get_data(locale_name: str) -> t.Any:
"""Pull JSON data from file.
:param locale_name: Locale name.
:return: Content of JSON file as dict.
"""
file_path = Path(data_dir).joinpath(locale_name, datafile)
with open(file_path, encoding="utf8") as f:
return json.load(f)
locale_separator = "-"
master_locale = locale.split(locale_separator).pop(0)
data = get_data(master_locale)
if locale_separator in locale:
data = self._update_dict(data, get_data(locale))
self._data = data
def get_current_locale(self) -> str:
"""Get current locale.
If locale is not defined then this method will always return ``en``,
because ``en`` is default locale for all providers, excluding builtins.
:return: Current locale.
"""
return self.locale
def _override_locale(self, locale: Locale = Locale.DEFAULT) -> None:
"""Overrides current locale with passed and pull data for new locale.
:param locale: Locale
:return: Nothing.
"""
self._setup_locale(locale)
self._load_datafile.cache_clear()
self._load_datafile()
@contextlib.contextmanager
def override_locale(
self,
locale: Locale,
) -> t.Generator["BaseDataProvider", None, None]:
"""Context manager which allows overriding current locale.
Temporarily overrides current locale for
locale-dependent providers.
:param locale: Locale.
:return: Provider with overridden locale.
"""
try:
origin_locale = Locale(self.locale)
self._override_locale(locale)
try:
yield self
finally:
self._override_locale(origin_locale)
except AttributeError:
raise ValueError(f"«{self.__class__.__name__}» has not locale dependent")
def __str__(self) -> str:
"""Human-readable representation of locale."""
locale = Locale(getattr(self, "locale", Locale.DEFAULT))
return f"{self.__class__.__name__} <{locale}>"
|
Python
| 0.000021
|
@@ -3275,17 +3275,21 @@
eError(%22
-L
+The l
ist of k
@@ -3311,17 +3311,16 @@
mpty.%22)%0A
-%0A
@@ -3328,82 +3328,8 @@
ry:%0A
- if len(keys) == 1:%0A return self._data%5Bkeys%5B0%5D%5D%0A
|
ba86e7c9025bbf601db88a23d208b8bfaade34d6
|
Version bump to beta 1
|
parler_rest/__init__.py
|
parler_rest/__init__.py
|
# -*- coding: utf-8 -*-
"""
This package provides support for integrating translatable fields into *django-rest-framework*.
"""
# following PEP 440
__version__ = "1.3a1"
|
Python
| 0
|
@@ -160,12 +160,12 @@
_ = %221.3
-a
+b
1%22%0A
|
f399f8e4ae3fde706a404a7e18d182cd605ea97a
|
revert the 2 hdmi inputs (only hdmi_in1 working???)
|
opsis_video.py
|
opsis_video.py
|
#!/usr/bin/env python3
from opsis_base import *
from litevideo.output import VideoOut
base_cls = MiniSoC
class VideoMixerSoC(base_cls):
csr_peripherals = (
"hdmi_out0",
"hdmi_out1"
)
csr_map_update(base_cls.csr_map, csr_peripherals)
def __init__(self, platform, **kwargs):
base_cls.__init__(self, platform, **kwargs)
# hdmi out 0
hdmi_out0_dram_port = self.sdram.crossbar.get_port(mode="read", dw=16, cd="hdmi_out0_pix", reverse=True)
self.submodules.hdmi_out0 = VideoOut(platform.device,
platform.request("hdmi_out", 0),
hdmi_out0_dram_port,
"ycbcr422")
# hdmi out 1 : Share clocking with hdmi_out0 since no PLL_ADV left.
hdmi_out1_dram_port = self.sdram.crossbar.get_port(mode="read", dw=16, cd="hdmi_out1_pix", reverse=True)
self.submodules.hdmi_out1 = VideoOut(platform.device,
platform.request("hdmi_out", 1),
hdmi_out1_dram_port,
"ycbcr422",
self.hdmi_out0.driver.clocking)
# all PLL_ADV are used: router needs help...
platform.add_platform_command("""INST PLL_ADV LOC=PLL_ADV_X0Y0;""")
# FIXME: Fix the HDMI out so this can be removed.
platform.add_platform_command(
"""PIN "hdmi_out_pix_bufg.O" CLOCK_DEDICATED_ROUTE = FALSE;""")
platform.add_platform_command(
"""PIN "hdmi_out_pix_bufg_1.O" CLOCK_DEDICATED_ROUTE = FALSE;""")
platform.add_platform_command(
"""
NET "{pix0_clk}" TNM_NET = "GRPpix0_clk";
NET "{pix1_clk}" TNM_NET = "GRPpix1_clk";
""",
pix0_clk=self.hdmi_out0.driver.clocking.cd_pix.clk,
pix1_clk=self.hdmi_out1.driver.clocking.cd_pix.clk,
)
self.platform.add_false_path_constraints(
self.crg.cd_sys.clk,
self.hdmi_out0.driver.clocking.cd_pix.clk,
self.hdmi_out1.driver.clocking.cd_pix.clk)
def main():
parser = argparse.ArgumentParser(description="Opsis LiteX SoC")
builder_args(parser)
soc_sdram_args(parser)
parser.add_argument("--nocompile-gateware", action="store_true")
args = parser.parse_args()
platform = opsis_platform.Platform()
soc = VideoMixerSoC(platform, **soc_sdram_argdict(args))
builder = Builder(soc, output_dir="build",
compile_gateware=not args.nocompile_gateware,
csr_csv="test/csr.csv")
vns = builder.build()
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -42,16 +42,51 @@
port *%0A%0A
+from litevideo.input import HDMIIn%0A
from lit
@@ -236,67 +236,290 @@
ut1%22
-%0A )%0A csr_map_update(base_cls.csr_map, csr_peripherals
+,%0A %22hdmi_in0%22,%0A %22hdmi_in0_edid_mem%22,%0A %22hdmi_in1%22,%0A %22hdmi_in1_edid_mem%22,%0A )%0A csr_map_update(base_cls.csr_map, csr_peripherals)%0A%0A interrupt_map = %7B%0A %22hdmi_in0%22: 3,%0A %22hdmi_in1%22: 4,%0A %7D%0A interrupt_map.update(base_cls.interrupt_map
)%0A%0A
@@ -628,19 +628,18 @@
# hdmi
-out
+in
0%0A
@@ -645,112 +645,476 @@
-hdmi_out0_dram_port = self.sdram.crossbar.get_port(mode=%22read%22, dw=16, cd=%22hdmi_out0_pix%22, reverse=True)
+self.submodules.hdmi_in0 = HDMIIn(platform.request(%22hdmi_in%22, 0),%0A self.sdram.crossbar.get_port(mode=%22write%22),%0A fifo_depth=512)%0A # hdmi in 1%0A self.submodules.hdmi_in1 = HDMIIn(platform.request(%22hdmi_in%22, 1),%0A self.sdram.crossbar.get_port(mode=%22write%22),%0A fifo_depth=512)%0A # hdmi out 0
%0A
@@ -1297,27 +1297,90 @@
-hdmi_out0_dram_port
+self.sdram.crossbar.get_port(mode=%22read%22, dw=16, cd=%22hdmi_out0_pix%22, reverse=True)
,%0A
@@ -1513,121 +1513,8 @@
ft.%0A
- hdmi_out1_dram_port = self.sdram.crossbar.get_port(mode=%22read%22, dw=16, cd=%22hdmi_out1_pix%22, reverse=True)%0A
@@ -1696,27 +1696,90 @@
-hdmi_out1_dram_port
+self.sdram.crossbar.get_port(mode=%22read%22, dw=16, cd=%22hdmi_out1_pix%22, reverse=True)
,%0A
|
5b270e2f541c763012e0acd4fa5e23933029b9db
|
Fix compatibility layer definition of std
|
numpy/oldnumeric/mlab.py
|
numpy/oldnumeric/mlab.py
|
# This module is for compatibility only. All functions are defined elsewhere.
__all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle', 'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort', 'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud', 'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc', 'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean']
import numpy.oldnumeric.linear_algebra as LinearAlgebra
import numpy.oldnumeric.random_array as RandomArray
from numpy import tril, trapz as _Ntrapz, hanning, rot90, triu, diff, \
angle, roots, ptp as _Nptp, kaiser, cumprod as _Ncumprod, \
diag, msort, prod as _Nprod, std as _Nstd, hamming, flipud, \
amax as _Nmax, amin as _Nmin, blackman, bartlett, \
squeeze, sinc, median, fliplr, mean as _Nmean, transpose
from numpy.linalg import eig, svd
from numpy.random import rand, randn
import numpy as nn
from typeconv import convtypecode
def eye(N, M=None, k=0, typecode=None, dtype=None):
""" eye returns a N-by-M 2-d array where the k-th diagonal is all ones,
and everything else is zeros.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = nn.equal(nn.subtract.outer(nn.arange(N), nn.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def tri(N, M=None, k=0, typecode=None, dtype=None):
""" returns a N-by-M array where all the diagonals starting from
lower left corner up to the k-th are all ones.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = nn.greater_equal(nn.subtract.outer(nn.arange(N), nn.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def trapz(y, x=None, axis=-1):
return _Ntrapz(y, x, axis=axis)
def ptp(x, axis=0):
return _Nptp(x, axis)
def cumprod(x, axis=0):
return _Ncumprod(x, axis)
def max(x, axis=0):
return _Nmax(x, axis)
def min(x, axis=0):
return _Nmin(x, axis)
def prod(x, axis=0):
return _Nprod(x, axis)
def std(x, axis=0):
return _Nstd(x, axis)
def mean(x, axis=0):
return _Nmean(x, axis)
# This is exactly the same cov function as in MLab
def cov(m, y=None, rowvar=0, bias=0):
if y is None:
y = m
else:
y = y
if rowvar:
m = transpose(m)
y = transpose(y)
if (m.shape[0] == 1):
m = transpose(m)
if (y.shape[0] == 1):
y = transpose(y)
N = m.shape[0]
if (y.shape[0] != N):
raise ValueError, "x and y must have the same number "\
"of observations"
m = m - _Nmean(m,axis=0)
y = y - _Nmean(y,axis=0)
if bias:
fact = N*1.0
else:
fact = N-1.0
return squeeze(dot(transpose(m), conjugate(y)) / fact)
from numpy import sqrt, multiply
def corrcoef(x, y=None):
c = cov(x,y)
d = diag(c)
return c/sqrt(multiply.outer(d,d))
from compat import *
from functions import *
from precision import *
from ufuncs import *
from misc import *
import compat
import precision
import functions
import misc
import ufuncs
import numpy
__version__ = numpy.__version__
del numpy
__all__ += ['__version__']
__all__ += compat.__all__
__all__ += precision.__all__
__all__ += functions.__all__
__all__ += ufuncs.__all__
__all__ += misc.__all__
del compat
del functions
del precision
del ufuncs
del misc
|
Python
| 0.000011
|
@@ -2048,32 +2048,63 @@
std(x, axis=0):%0A
+ N = asarray(x).shape%5Baxis%5D%0A
return _Nstd
@@ -2108,24 +2108,39 @@
std(x, axis)
+*sqrt(N/(N-1.))
%0A%0Adef mean(x
|
8683400f5c76a5afd71655c67eef89e98b39c19c
|
make test runnable standalone
|
tests/test_issues/test_issue_26.py
|
tests/test_issues/test_issue_26.py
|
from radical.entk import Pipeline, Stage, Task, AppManager
from radical.entk import states
from radical.entk.exceptions import *
import pytest
import os
hostname = os.environ.get('RMQ_HOSTNAME','localhost')
port = int(os.environ.get('RMQ_PORT',5672))
# MLAB = 'mongodb://entk:entk123@ds143511.mlab.com:43511/entk_0_7_4_release'
MLAB = os.environ.get('RADICAL_PILOT_DBURL')
def test_issue_26():
def create_pipeline():
p = Pipeline()
s = Stage()
t1 = Task()
t1.name = 'simulation'
t1.executable = ['/bin/echo']
t1.arguments = ['hello']
t1.copy_input_data = []
t1.copy_output_data = []
s.add_tasks(t1)
p.add_stages(s)
return p
res_dict = {
'resource': 'local.localhost',
'walltime': 10,
'cpus': 1,
'project': ''
}
os.environ['RADICAL_PILOT_DBURL'] = MLAB
appman = AppManager(hostname=hostname, port=port, autoterminate=False)
appman.resource_desc = res_dict
p1 = create_pipeline()
appman.workflow = [p1]
appman.run()
print p1.uid, p1.stages[0].uid
p2 = create_pipeline()
appman.workflow = [p2]
appman.run()
print p2.uid, p2.stages[0].uid
appman.resource_terminate()
lhs = int(p1.stages[0].uid.split('.')[-1]) + 1
rhs = int(p2.stages[0].uid.split('.')[-1])
assert lhs == rhs
for t in p1.stages[0].tasks:
for tt in p2.stages[0].tasks:
lhs = int(t.uid.split('.')[-1]) + 1
rhs = int(tt.uid.split('.')[-1])
assert lhs == rhs
|
Python
| 0.000002
|
@@ -1,28 +1,51 @@
+#!/usr/bin/env python%0A%0A
from radical.entk import Pip
@@ -395,29 +395,200 @@
')%0A%0A
-def test_issue_26():%0A
+%0A# ------------------------------------------------------------------------------%0A#%0Adef test_issue_26():%0A%0A # --------------------------------------------------------------------------%0A #
%0A
@@ -622,16 +622,17 @@
p
+
= Pipeli
@@ -632,25 +632,24 @@
Pipeline()%0A
-%0A
s =
@@ -646,16 +646,17 @@
s
+
= Stage(
@@ -653,25 +653,24 @@
= Stage()%0A
-%0A
t1 =
@@ -693,16 +693,28 @@
t1.name
+
= 'simul
@@ -742,16 +742,22 @@
cutable
+
= %5B'/bin
@@ -785,16 +785,23 @@
guments
+
= %5B'hell
@@ -827,24 +827,25 @@
_input_data
+
= %5B%5D%0A
@@ -895,17 +895,16 @@
sks(t1)%0A
-%0A
@@ -956,17 +956,16 @@
ict = %7B%0A
-%0A
@@ -1045,16 +1045,20 @@
'cpus'
+
: 1,%0A
@@ -1075,16 +1075,17 @@
project'
+
: ''%0A%0A
@@ -1790,28 +1790,249 @@
assert lhs == rhs%0A
+%0A%0A# ------------------------------------------------------------------------------%0A#%0Aif __name__ == '__main__':%0A %0A test_issue_26()%0A%0A%0A# ------------------------------------------------------------------------------%0A%0A
|
b7fc3d9bd6b59b1278fb09f4b1e61ef67937a494
|
fix the old hdf5 fils
|
scripts/FIX_OLD_RUN.py
|
scripts/FIX_OLD_RUN.py
|
""" Turn a catalog of photometry from PS1 into an HDF5 file """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
# Third-party
import astropy.coordinates as coord
from astropy import log as logger
import astropy.units as u
import numpy as np
import h5py
# Global configuration stuff
# HACK: should be able to set this at command line?
cluster_c = coord.SkyCoord(ra=229.352*u.degree,
dec=-21.01*u.degree)
cluster_pad = {
'inner': 0.08*u.degree,
'outer': 0.2*u.degree
}
def data_to_X_cov(data):
X = np.vstack([data['dered_{}'.format(band)] for band in 'griz']).T
Xerr = np.vstack([data['{}Err'.format(band)] for band in 'griz']).T
# mixing matrix W
W = np.array([[1, 0, 0, 0], # g magnitude
[1, -1, 0, 0], # g-r color
[1, 0, -1, 0], # g-i color
[1, 0, 0, -1]]) # g-z color
X = np.dot(X, W.T)
# compute error covariance with mixing matrix
Xcov = np.zeros(Xerr.shape + Xerr.shape[-1:])
Xcov[:, range(Xerr.shape[1]), range(Xerr.shape[1])] = Xerr ** 2
# each covariance C = WCW^T
Xcov = np.tensordot(np.dot(Xcov, W.T), W, (-2, -1))
return X, Xcov
def main(XCov_filename, results_filename, ps1_filename):
# Load PS1 photometry
ps1 = np.load(ps1_filename)
ps1 = ps1[np.isfinite(ps1['dered_g']) & np.isfinite(ps1['dered_r']) &
np.isfinite(ps1['dered_i']) & np.isfinite(ps1['dered_z'])]
with h5py.File(results_filename, mode='r') as f:
ll = f['cluster_log_likelihood']
with h5py.File(XCov_filename, mode='r+') as f:
# feature and covariance matrices for all stars
g = f['all']
g.create_dataset('ra', ps1['ra'].shape, dtype='f', data=ps1['ra'])
g.create_dataset('dec', ps1['dec'].shape, dtype='f', data=ps1['dec'])
g.create_dataset('cluster_log_likelihood', ll.shape, dtype='f', data=ll)
# define coordinates object for all stars
ps1_c = coord.ICRS(ra=ps1['ra']*u.degree, dec=ps1['dec']*u.degree)
# feature and covariance matrices for cluster stars
cluster_idx = ps1_c.separation(cluster_c) < cluster_pad['inner']
# feature and covariance matrices for NON-cluster stars
g = f.create_group('noncluster')
ncX, ncCov = data_to_X_cov(ps1[~cluster_idx])
g.create_dataset('X', ncX.shape, dtype='f', data=ncX)
g.create_dataset('Cov', ncCov.shape, dtype='f', data=ncCov)
g.create_dataset('ra', ncX.shape[0:1], dtype='f', data=ps1[~cluster_idx]['ra'])
g.create_dataset('dec', ncX.shape[0:1], dtype='f', data=ps1[~cluster_idx]['dec'])
if __name__ == "__main__":
import sys
XCov_filename, results_filename, ps1_filename = sys.argv[1:]
main(XCov_filename, results_filename, ps1_filename)
|
Python
| 0.003354
|
@@ -1617,16 +1617,19 @@
lihood'%5D
+%5B:%5D
%0A%0A wi
@@ -1751,16 +1751,46 @@
%5B'all'%5D%0A
+ if 'ra' not in g:%0A
@@ -1848,32 +1848,63 @@
data=ps1%5B'ra'%5D)%0A
+ if 'dec' not in g:%0A
g.create
@@ -1961,24 +1961,74 @@
ps1%5B'dec'%5D)%0A
+ if 'cluster_log_likelihood' not in g:%0A
g.cr
@@ -2095,16 +2095,53 @@
data=ll)
+%0A print(%22added log likes%22)
%0A%0A
@@ -2461,24 +2461,26 @@
tars%0A
+ #
g = f.creat
@@ -2501,16 +2501,44 @@
uster')%0A
+ g = f%5B'noncluster'%5D%0A
@@ -2583,16 +2583,45 @@
r_idx%5D)%0A
+ if 'X' not in g:%0A
@@ -2674,32 +2674,36 @@
ta=ncX)%0A
+
+
g.create_dataset
@@ -2746,16 +2746,20 @@
=ncCov)%0A
+
@@ -2838,32 +2838,36 @@
%5B'ra'%5D)%0A
+
+
g.create_dataset
@@ -2931,16 +2931,270 @@
%5B'dec'%5D)
+%0A else:%0A g%5B'X'%5D%5B...%5D = ncX%0A print('x')%0A g%5B'Cov'%5D%5B...%5D = ncCov%0A print('cov')%0A g%5B'ra'%5D%5B:%5D = ps1%5B~cluster_idx%5D%5B'ra'%5D%0A g%5B'dec'%5D%5B:%5D = ps1%5B~cluster_idx%5D%5B'dec'%5D%0A print('ra dec')
%0A%0Aif __n
|
e5d13f315624be780fd60a04ff255f6682bdd84b
|
Update set_student_guardian.py
|
erpnext/patches/v7_1/set_student_guardian.py
|
erpnext/patches/v7_1/set_student_guardian.py
|
import frappe
def execute():
if frappe.db.exists("DocType", "Guardian"):
frappe.reload_doc("schools", "doctype", "student")
frappe.reload_doc("schools", "doctype", "student_guardian")
frappe.reload_doc("schools", "doctype", "student_sibling")
guardian = frappe.get_all("Guardian", fields=["name", "student"])
for d in guardian:
if d.student:
student = frappe.get_doc("Student", d.student)
if student:
student.append("guardians", {"guardian": d.name})
student.save()
|
Python
| 0.000003
|
@@ -244,16 +244,89 @@
bling%22)%0A
+%09%09if %22student%22 not in frappe.db.get_table_columns(%22Guardian%22):%0A%09%09%09return%0A
%09%09guardi
@@ -564,8 +564,9 @@
t.save()
+%0A
|
1b13a1da1b483d8f31fb1f694a7c0236ce01befa
|
update stats widget
|
lase/gui/stats_widget.py
|
lase/gui/stats_widget.py
|
# -*- coding: utf-8 -*-
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
class StatsWidget(QtGui.QWidget):
def __init__(self, driver):
super(StatsWidget, self).__init__()
self.driver = driver
self.layout = QtGui.QHBoxLayout()
self.mean_labels = []
self.ampl_labels = []
self.n_channels = 2
for i in range(self.n_channels+1):
self.mean_labels.append(QtGui.QLabel(''))
self.ampl_labels.append(QtGui.QLabel(''))
self.mean_labels[i].setAlignment(QtCore.Qt.AlignCenter)
self.ampl_labels[i].setAlignment(QtCore.Qt.AlignCenter)
self.mean_labels[0].setText('Average')
self.ampl_labels[0].setText('Amplitude')
self.boxes = []
self.box_names = ['Measures', 'ADC 1', 'ADC 2']
for i in range(self.n_channels+1):
self.boxes.append(QtGui.QGroupBox(self.box_names[i]))
self.boxes[i].setAlignment(5)
self.name_layout = QtGui.QVBoxLayout()
self.name_layout.addWidget(self.mean_labels[0])
self.name_layout.addWidget(self.ampl_labels[0])
self.boxes[0].setLayout(self.name_layout)
self.adc_1_layout = QtGui.QVBoxLayout()
self.adc_1_layout.addWidget(self.mean_labels[1])
self.adc_1_layout.addWidget(self.ampl_labels[1])
self.boxes[1].setLayout(self.adc_1_layout)
self.adc_2_layout = QtGui.QVBoxLayout()
self.adc_2_layout.addWidget(self.mean_labels[2])
self.adc_2_layout.addWidget(self.ampl_labels[2])
self.boxes[2].setLayout(self.adc_2_layout)
for i in range(self.n_channels+1):
self.layout.addWidget(self.boxes[i])
def update(self):
for i in range(self.n_channels):
if 1e-2 < np.abs(np.mean(self.driver.adc[i,:])) < 1e3:
mean_text = '{:.2f}'.format(np.mean(self.driver.adc[i,:]))
else:
mean_text = '%.2e'%(np.mean(self.driver.adc[i,:]))
self.mean_labels[i+1].setText(mean_text)
if 1e-2 < np.abs(np.max(self.driver.adc[i,:])-np.min(self.driver.adc[i,:])) < 1e3:
ampl_text = '{:.2f}'.format(np.max(self.driver.adc[i,:])-np.min(self.driver.adc[i,:]))
else:
ampl_text = '%.2e'%(np.max(self.driver.adc[i,:])-np.min(self.driver.adc[i,:]))
self.ampl_labels[i+1].setText(ampl_text)
|
Python
| 0.000001
|
@@ -1945,33 +1945,33 @@
mean_text = '%25.
-2
+4
e'%25(np.mean(self
@@ -2289,17 +2289,17 @@
xt = '%25.
-2
+4
e'%25(np.m
|
28f5ae9d527dc043290cbd03fd68a2c2ee6ff519
|
use -j 6 with mingw too
|
bin/scripts/updateoffi.py
|
bin/scripts/updateoffi.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import input
import os, shutil, platform, subprocess, multiprocessing
import pytools.utils as pu
import pytools.versioning as vrs
def guessSystem():
guesses = []
import platform
system, node, release, version, machine, processor = platform.uname()
# machine name
machine_name = node.split('.')[0].split('-')[0].lower()
print('machine_name =', machine_name)
guesses.append(machine_name)
# robo's libs
#print os.environ['MYLOCAL']
if 'MYLOCAL' in os.environ:
guesses.append('garfield')
# system name
print('system =', system)
if system=='Darwin':
mac_release, mac_versioninfo, mac_machine = platform.mac_ver()
print('\tmac_release =', mac_release)
print('\tmac_versioninfo =', mac_versioninfo)
print('\tmac_machine =', mac_machine)
guesses.append('macos')
if system=='Linux':
lin_distname, lin_version, lin_id = platform.linux_distribution()
print('\tlin_distname =', lin_distname)
print('\tlin_version =', lin_version)
print('\tlin_id =', lin_id)
guesses.append(lin_distname.lower())
if system=='Windows':
win_release, win_version, win_csd, win_ptype = platform.win32_ver()
print('\twin_release =', win_release)
print('\twin_version =', win_version)
print('\twin_csd =', win_csd)
print('\twin_ptype =', win_ptype)
guesses.append(system.lower())
guesses = list(set(guesses)) # remove duplicates
return guesses
def chooseCfg():
guesses = guessSystem()
#print guesses
print('current working dir=', os.getcwd())
avfiles = os.listdir(os.path.join('oo_meta','CMake'))
#print avfiles
cfiles=[]
for g in guesses:
for avf in avfiles:
#print "testing", avf, g
if avf.find(g)!=-1:
cfiles.append(avf)
#print cfiles
# ask
print('Choose a config file:')
for i, cf in enumerate(cfiles):
print('\t%d: %s' % (i+1, cf))
ii = input('? ')
return cfiles[int(ii)-1]
def main(repos, opts):
# checkout/update everything
build_required = False
for rep in repos:
outdated = rep.outdated()
print(rep.name, ": outdated =", outdated)
if outdated:
build_required = True
if not os.path.isdir('oo_metaB'):
print('oo_metaB folder is missing!')
build_required = True
if not build_required:
print('=> build is NOT required')
print('do you want to force the build (y/[n])?')
c = pu.getch()
if c=='y' or c=='Y':
build_required=True
else:
print('=> build is required')
if build_required:
# update
for rep in repos:
rep.update()
cfg = chooseCfg() # requires oo_meta to be checked out!
# clean build dir
if os.path.isdir('oo_metaB'):
print('removing build dir')
# http://stackoverflow.com/questions/16373747/permission-denied-doing-os-mkdird-after-running-shutil-rmtreed-in-python
os.rename('oo_metaB','oo_metaB_trash') # avoid the failure of os.mkdir() is same name is used
shutil.rmtree('oo_metaB_trash')
# create folder
os.mkdir('oo_metaB') # could fail (access denied) on Windows:
pu.chDir('oo_metaB')
# cmake
cmd = ['cmake', '-C', os.path.join('..','oo_meta','CMake',cfg), os.path.join('..','oo_meta') ]
subprocess.call(cmd)
"""
if pu.isUnix():
cmd='cmake -C ../oo_meta/CMake/%s ../oo_meta' %cfg
else:
cmd=r'cmake -C ..\oo_meta\CMake\%s ..\oo_meta' %cfg
os.system(cmd)
"""
# build
if pu.isInstalled("BuildConsole") and os.path.isfile('Metafor.sln'):
print("[using incredibuild]")
cmd = ['BuildConsole', 'Metafor.sln', '/rebuild', '/cfg=Release|x64']
subprocess.call(cmd)
#os.system('BuildConsole Metafor.sln /rebuild /cfg="Release|x64"')
else:
ncores = multiprocessing.cpu_count()
print("[using cmake --build] with %d core(s)" % ncores)
cmd = ['cmake', '--build', '.', '--config', 'Release']
if pu.isUnix():
cmd.extend([ '--', '-j%d' % ncores])
subprocess.call(cmd)
if __name__ == "__main__":
opts = {
'build_type' : {
'type': 'combo',
'value': 'full',
'values': ['full', 'student']
},
}
repos = []
repos.append(vrs.GITRepo('MetaforSetup', 'git@gitlab.uliege.be:am-dept/MN2L/MetaforSetup.git'))
repos.append(vrs.GITRepo('linuxbin', 'git@github.com:ulgltas/linuxbin.git'))
repos.append(vrs.SVNRepo('oo_meta', 'svn+ssh://boman@blueberry.ltas.ulg.ac.be/home/metafor/SVN/oo_meta/trunk'))
if opts['build_type']['value']=='full':
repos.append(vrs.SVNRepo('oo_nda', 'svn+ssh://boman@blueberry.ltas.ulg.ac.be/home/metafor/SVN/oo_nda/trunk'))
repos.append(vrs.GITRepo('parasolid', 'git@gitlab.uliege.be:am-dept/MN2L/parasolid.git'))
repos.append(vrs.GITRepo('keygen', 'git@gitlab.uliege.be:am-dept/MN2L/keygen.git'))
main(repos, opts)
|
Python
| 0
|
@@ -4974,34 +4974,49 @@
if
-pu.isUnix(
+os.path.isfile('Makefile'
):%0A
|
9986da1599e5beaaad49d389b2a2ee3d4f308991
|
fix https://github.com/BackofenLab/AlgoDat/issues/61
|
Lecture-4/Code/AssociativeArray.py
|
Lecture-4/Code/AssociativeArray.py
|
# creates a new map (called dictionary)
countries = {"DE" : "Deutschland", \
"EN" : "England"}
# check if element exists
if "EN" in countries:
print("Found %s!" % countries["EN"])
# map key "DE" to value 0
countries["DE"] = "Germany"
# delete key "DE"
del countries["DE"]
|
Python
| 0
|
@@ -213,9 +213,17 @@
lue
-0
+%22Germany%22
%0D%0Aco
|
f98ef68949b8875daeb5b8346a65a842a682a5df
|
replace linebreaks in version strings
|
nvchecker/get_version.py
|
nvchecker/get_version.py
|
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import logging
from importlib import import_module
logger = logging.getLogger(__name__)
handler_precedence = (
'github', 'aur', 'pypi', 'archpkg', 'debianpkg', 'ubuntupkg',
'gems', 'pacman',
'cmd', 'bitbucket', 'regex', 'manual', 'vcs',
'cratesio', 'npm', 'hackage', 'cpan', 'gitlab', 'packagist',
'anitya',
)
async def get_version(name, conf):
for key in handler_precedence:
if key in conf:
func = import_module('.source.' + key, __package__).get_version
return await func(name, conf)
else:
logger.error('%s: no idea to get version info.', name)
|
Python
| 0.000002
|
@@ -563,14 +563,17 @@
-return
+version =
awa
@@ -592,16 +592,56 @@
, conf)%0A
+ return version.replace('%5Cn', ' ')%0A
else:%0A
|
11c81ff161cad04a24a8746b93d3afb64e6a5b49
|
fix typo
|
python/example_code/ec2/describe_subnets.py
|
python/example_code/ec2/describe_subnets.py
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
ec2 = boto3.client('ec2')
response = ec2.describe_subnets()
print(response)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[describe_subnets.py demonstrates how to describe describe one or more of your Subnets.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon EC2]
# snippet-service:[ec2]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-11-13]
# snippet-sourceauthor:[nprajilesh]
|
Python
| 0.999991
|
@@ -782,25 +782,16 @@
escribe
-describe
one or m
|
50a0d32b880061fd6f85ec3f3ffe540bcead19e9
|
rename test pillow member
|
corehq/form_processor/tests/test_kafka.py
|
corehq/form_processor/tests/test_kafka.py
|
import uuid
from django.test import TestCase, override_settings
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import change_meta_from_kafka_message, KafkaChangeFeed
from corehq.apps.change_feed.tests.utils import get_test_kafka_consumer
from corehq.apps.commtrack.helpers import make_product
from corehq.apps.commtrack.tests.util import get_single_balance_block
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.apps.receiverwrapper import submit_form_locally
from corehq.form_processor.interfaces.dbaccessors import FormAccessors, CaseAccessors
from corehq.form_processor.tests.utils import FormProcessorTestUtils, run_with_all_backends
from corehq.form_processor.utils import get_simple_form_xml
from corehq.util.test_utils import OverridableSettingsTestMixin, create_and_save_a_case, create_and_save_a_form
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors.sample import TestProcessor
from testapps.test_pillowtop.utils import process_kafka_changes, process_couch_changes
@override_settings(TESTS_SHOULD_USE_SQL_BACKEND=True)
class KafkaPublishingTest(OverridableSettingsTestMixin, TestCase):
domain = 'kafka-publishing-test'
def setUp(self):
super(KafkaPublishingTest, self).setUp()
FormProcessorTestUtils.delete_all_xforms()
FormProcessorTestUtils.delete_all_cases()
self.form_accessors = FormAccessors(domain=self.domain)
self.processor = TestProcessor()
self.pillow = ConstructedPillow(
name='test-kafka-form-feed',
checkpoint=None,
change_feed=KafkaChangeFeed(topics=[topics.FORM, topics.FORM_SQL], group_id='test-kafka-form-feed'),
processor=self.processor
)
@run_with_all_backends
def test_form_is_published(self):
with process_kafka_changes(self.pillow):
with process_couch_changes('DefaultChangeFeedPillow'):
form = create_and_save_a_form(self.domain)
self.assertEqual(1, len(self.processor.changes_seen))
change_meta = self.processor.changes_seen[0].metadata
self.assertEqual(form.form_id, change_meta.document_id)
self.assertEqual(self.domain, change_meta.domain)
@run_with_all_backends
def test_duplicate_form_published(self):
form_id = uuid.uuid4().hex
form_xml = get_simple_form_xml(form_id)
orig_form = submit_form_locally(form_xml, domain=self.domain)[1]
self.assertEqual(form_id, orig_form.form_id)
self.assertEqual(1, len(self.form_accessors.get_all_form_ids_in_domain()))
with process_kafka_changes(self.pillow):
with process_couch_changes('DefaultChangeFeedPillow'):
# post an exact duplicate
dupe_form = submit_form_locally(form_xml, domain=self.domain)[1]
self.assertTrue(dupe_form.is_duplicate)
self.assertNotEqual(form_id, dupe_form.form_id)
self.assertEqual(form_id, dupe_form.orig_id)
# make sure changes made it to kafka
self.assertEqual(2, len(self.processor.changes_seen))
dupe_form_meta = self.processor.changes_seen[0].metadata
self.assertEqual(dupe_form.form_id, dupe_form_meta.document_id)
# then the original form
orig_form_meta = self.processor.changes_seen[1].metadata
self.assertEqual(orig_form.form_id, orig_form_meta.document_id)
self.assertEqual(self.domain, orig_form_meta.domain)
def test_case_is_published(self):
kafka_consumer = get_test_kafka_consumer(topics.CASE_SQL)
case = create_and_save_a_case(self.domain, case_id=uuid.uuid4().hex, case_name='test case')
change_meta = change_meta_from_kafka_message(kafka_consumer.next().value)
self.assertEqual(case.case_id, change_meta.document_id)
self.assertEqual(self.domain, change_meta.domain)
def test_duplicate_case_published(self):
case_id = uuid.uuid4().hex
form_xml = get_simple_form_xml(uuid.uuid4().hex, case_id)
submit_form_locally(form_xml, domain=self.domain)[1]
self.assertEqual(1, len(CaseAccessors(self.domain).get_case_ids_in_domain()))
case_consumer = get_test_kafka_consumer(topics.CASE_SQL)
dupe_form = submit_form_locally(form_xml, domain=self.domain)[1]
self.assertTrue(dupe_form.is_duplicate)
# check the case was republished
case_meta = change_meta_from_kafka_message(case_consumer.next().value)
self.assertEqual(case_id, case_meta.document_id)
self.assertEqual(self.domain, case_meta.domain)
def test_duplicate_ledger_published(self):
# setup products and case
product_a = make_product(self.domain, 'A Product', 'prodcode_a')
product_b = make_product(self.domain, 'B Product', 'prodcode_b')
case_id = uuid.uuid4().hex
form_xml = get_simple_form_xml(uuid.uuid4().hex, case_id)
submit_form_locally(form_xml, domain=self.domain)[1]
# submit ledger data
balances = (
(product_a._id, 100),
(product_b._id, 50),
)
ledger_blocks = [
get_single_balance_block(case_id, prod_id, balance)
for prod_id, balance in balances
]
form = submit_case_blocks(ledger_blocks, self.domain)
# submit duplicate
ledger_consumer = get_test_kafka_consumer(topics.LEDGER)
dupe_form = submit_form_locally(form.get_xml(), domain=self.domain)[1]
self.assertTrue(dupe_form.is_duplicate)
# confirm republished
ledger_meta_a = change_meta_from_kafka_message(ledger_consumer.next().value)
ledger_meta_b = change_meta_from_kafka_message(ledger_consumer.next().value)
format_id = lambda product_id: '{}/{}/{}'.format(case_id, 'stock', product_id)
expected_ids = {format_id(product_a._id), format_id(product_b._id)}
for meta in [ledger_meta_a, ledger_meta_b]:
self.assertTrue(meta.document_id in expected_ids)
expected_ids.remove(meta.document_id)
self.assertEqual(self.domain, meta.domain)
# cleanup
product_a.delete()
product_b.delete()
|
Python
| 0.000001
|
@@ -1506,24 +1506,29 @@
self.
+form_
pillow = Con
@@ -1871,32 +1871,37 @@
ka_changes(self.
+form_
pillow):%0A
@@ -2672,16 +2672,21 @@
es(self.
+form_
pillow):
|
178de6396dade359a3f8e137d4a3449155b88b89
|
Add schema for workflow input with default value support
|
mistral/workbook/types.py
|
mistral/workbook/types.py
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NONEMPTY_STRING = {
"type": "string",
"minLength": 1
}
UNIQUE_STRING_LIST = {
"type": "array",
"items": NONEMPTY_STRING,
"uniqueItems": True,
"minItems": 1
}
POSITIVE_INTEGER = {
"type": "integer",
"minimum": 0
}
POSITIVE_NUMBER = {
"type": "number",
"minimum": 0.0
}
YAQL = {
"type": "string",
"pattern": "^<%.*?%>\\s*$"
}
YAQL_CONDITION = {
"type": "object",
"minProperties": 1,
"patternProperties": {
"^\w+$": YAQL
}
}
ANY = {
"anyOf": [
{"type": "array"},
{"type": "boolean"},
{"type": "integer"},
{"type": "number"},
{"type": "object"},
{"type": "string"},
YAQL
]
}
ANY_NULLABLE = {
"anyOf": [
{"type": "null"},
{"type": "array"},
{"type": "boolean"},
{"type": "integer"},
{"type": "number"},
{"type": "object"},
{"type": "string"},
YAQL
]
}
NONEMPTY_DICT = {
"type": "object",
"minProperties": 1,
"patternProperties": {
"^\w+$": ANY_NULLABLE
}
}
STRING_OR_YAQL_CONDITION = {
"oneOf": [
NONEMPTY_STRING,
YAQL_CONDITION
]
}
UNIQUE_STRING_OR_YAQL_CONDITION_LIST = {
"type": "array",
"items": STRING_OR_YAQL_CONDITION,
"uniqueItems": True,
"minItems": 1
}
VERSION = {
"anyOf": [
NONEMPTY_STRING,
POSITIVE_INTEGER,
POSITIVE_NUMBER
]
}
WORKFLOW_TYPE = {
"enum": ["reverse", "direct"]
}
|
Python
| 0.999965
|
@@ -1696,16 +1696,169 @@
%7D%0A%7D%0A%0A
+ONE_KEY_DICT = %7B%0A %22type%22: %22object%22,%0A %22minProperties%22: 1,%0A %22maxProperties%22: 1,%0A %22patternProperties%22: %7B%0A %22%5E%5Cw+$%22: ANY_NULLABLE%0A %7D%0A%7D%0A%0A
STRING_O
@@ -2258,12 +2258,252 @@
%22direct%22%5D%0A%7D%0A
+%0ASTRING_OR_ONE_KEY_DICT = %7B%0A %22oneOf%22: %5B%0A NONEMPTY_STRING,%0A ONE_KEY_DICT%0A %5D%0A%7D%0A%0AUNIQUE_STRING_OR_ONE_KEY_DICT_LIST = %7B%0A %22type%22: %22array%22,%0A %22items%22: STRING_OR_ONE_KEY_DICT,%0A %22uniqueItems%22: True,%0A %22minItems%22: 1%0A%7D%0A
|
c9d86a07ca35f259d819b6d0940294c66b00fa4c
|
increase cache size
|
originality.py
|
originality.py
|
# System
"""Originality Checking."""
import logging
import functools
from threading import Lock
# Third Party
from scipy.stats import ks_2samp
from scipy.stats.stats import pearsonr
import numpy as np
import pandas as pd
from bson.objectid import ObjectId
lock = Lock()
@functools.lru_cache(maxsize=512)
def get_submission(db_manager, filemanager, submission_id):
"""Gets the submission file from S3
Parameters:
-----------
db_manager: DatabaseManager
MongoDB data access object that has read and write functions to NoSQL DB
filemanager: FileManager
S3 Bucket data access object for querying competition datasets
submission_id : string
The ID of the submission
Returns:
--------
submission : ndarray
Array of the submission probabilities sorted by ID
"""
if not submission_id:
return None
s3_filename = db_manager.get_filename(submission_id)
try:
local_files = filemanager.download([s3_filename])
if len(local_files) != 1:
logging.getLogger().info("Error looking for submission {}, found files".format(submission_id, local_files))
return None
local_file = local_files[0]
except Exception as e:
logging.getLogger().info("Could not get submission {}".format(submission_id))
return None
df = pd.read_csv(local_file)
df.sort_values("id", inplace=True)
df = df["probability"]
return df.as_matrix()
def original(submission1, submission2, threshold=0.05):
"""Determines if two submissions are original
Paramters:
----------
submission1, submission2 : 1-D ndarrays
Submission arrays that will be used in the Kolmogorov-Smirnov statistic
threshold : float, optional, default: 0.05
threshold in which the originality_score must be greater than to be "original"
Returns:
--------
original : bool
boolean value that indicates if a submission is original
"""
score = originality_score(submission1, submission2)
return score > threshold
# this function is taken from scipy (ks_2samp) and modified and so falls
# under their BSD license
def originality_score(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Warning: data1 is assumed sorted in ascending order.
Parameters
----------
data1, data2 : ndarray
Two arrays of sample observations assumed to be drawn from a
continuous distribution. Arrays must be of the same size. data1 is
assumed sorted in ascending order.
Returns
-------
statistic : float
KS statistic
"""
# data1 is assumed sorted in ascending order
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if n1 != n2:
raise ValueError("`data1` and `data2` must have the same length")
# the following commented out line is slower than the two after it
# cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf1 = np.searchsorted(data1, data2, side='right')
cdf1 = np.concatenate((np.arange(n1) + 1, cdf1)) / (1.0*n1)
# the following commented out line is slower than the two after it
# cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
cdf2 = np.searchsorted(data2, data1, side='right')
cdf2 = np.concatenate((cdf2, np.arange(n1) + 1)) / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
return d
def is_almost_unique(submission_data, submission, db_manager, filemanager, is_exact_dupe_thresh, is_similar_thresh, max_similar_models):
"""Determines how similar/exact a submission is to all other submission for the competition round
Paramters:
----------
submission_data : dictionary
Submission metadata containing the submission_id and the user associated to the submission
submission : ndarray
Submission data that contains the probabilities for the competition data
db_manager : DatabaseManager
MongoDB data access object that has read and write functions to NoSQL DB
filemanager : FileManager
S3 Bucket data access object for querying competition datasets
is_exact_dupe_thresh :
Threshold for determining if a submission is and exact duplicate to another submission
is_similar_thresh :
Similarity threshold that determines if a submission is too similar and counts against the submissions originality
max_similar_models :
The max number of models that a submission is allow to be similar to
Returns:
--------
bool
Whether the submission data is considered to be original or not
"""
num_similar_models = 0
is_original = True
similar_models = []
is_not_a_constant = np.std(submission) > 0
date_created = db_manager.get_date_created(submission_data['submission_id'])
sorted_submission = np.sort(submission)
for user_sub in db_manager.get_everyone_elses_recent_submssions(submission_data['competition_id'], submission_data['user'], date_created):
with lock:
other_submission = get_submission(db_manager, filemanager, user_sub["submission_id"])
if other_submission is None:
continue
score = originality_score(sorted_submission, other_submission)
if is_not_a_constant and np.std(other_submission) > 0 :
correlation = pearsonr(submission, other_submission)[0]
if np.abs(correlation) > 0.95:
logging.getLogger().info("Found a highly correlated submission {} with score {}".format(user_sub["submission_id"], correlation))
is_original = False
break
if score < is_exact_dupe_thresh:
logging.getLogger().info("Found a duplicate submission {} with score {}".format(user_sub["submission_id"], score))
is_original = False
break
if score <= is_similar_thresh:
num_similar_models += 1
similar_models.append(user_sub["submission_id"])
if num_similar_models >= max_similar_models:
logging.getLogger().info("Found too many similar models. Similar models were {}".format(similar_models))
is_original = False
break
return is_original
def submission_originality(submission_data, db_manager, filemanager):
"""Pulls submission data from MongoDB and determines the originality score and will update the submissions originality score
This checks a few things
1. If the current submission is similar to the previous submission, we give it the same originality score
2. Otherwise, we check that it is sufficently unique. To check this we see if it is A. Almost identitical to
any other submission or B. Very similar to a handful of other models.
Parameters:
-----------
submission_data : dictionary
Metadata about the submission pulled from the queue
db_manager : DatabaseManager
MongoDB data access object that has read and write functions to NoSQL DB
filemanager : FileManager
S3 Bucket data access object for querying competition datasets
"""
s = db_manager.db.submissions.find_one({'_id':ObjectId(submission_data['submission_id'])})
submission_data['user'] = s['username']
submission_data['competition_id'] = s['competition_id']
logging.getLogger().info("Scoring {} {}".format(submission_data['user'], submission_data['submission_id']))
with lock:
submission = get_submission(db_manager, filemanager, submission_data['submission_id'])
if submission is None:
logging.getLogger().info("Couldn't find {} {}".format(submission_data['user'], submission_data['submission_id']))
return
is_exact_dupe_thresh = 0.005
is_similar_thresh = 0.03
max_similar_models = 1
is_original = is_almost_unique(submission_data, submission, db_manager, filemanager, is_exact_dupe_thresh, is_similar_thresh, max_similar_models)
db_manager.write_originality(submission_data['submission_id'], submission_data['competition_id'], is_original)
|
Python
| 0.000001
|
@@ -299,11 +299,12 @@
ize=
-512
+2048
)%0Ade
|
0b93f9e8557409ff8c3abadcaad674e6eac48e95
|
test uses unicode rather than ascii for py3
|
blaze/tests/test_bcolz.py
|
blaze/tests/test_bcolz.py
|
from __future__ import absolute_import, division, print_function
import pytest
bcolz = pytest.importorskip('bcolz')
import numpy as np
from pandas import DataFrame
from toolz import count
import os
from datashape import discover, dshape
from collections import Iterator
from blaze.bcolz import into, chunks, resource
from blaze.utils import tmpfile
b = bcolz.ctable([[1, 2, 3],
[1., 2., 3.]],
names=['a', 'b'])
def test_into_ndarray_ctable():
assert str(into(np.ndarray, b)) == \
str(np.array([(1, 1.), (2, 2.), (3, 3.)],
dtype=[('a', int), ('b', float)]))
def test_into_ctable_numpy():
assert str(into(bcolz.ctable, np.array([(1, 1.), (2, 2.), (3, 3.)],
dtype=[('a', np.int32), ('b', np.float32)]))) == \
str(bcolz.ctable([np.array([1, 2, 3], dtype=np.int32),
np.array([1., 2., 3.], dtype=np.float32)],
names=['a', 'b']))
def test_into_ctable_DataFrame():
df = DataFrame([[1, 'Alice'],
[2, 'Bob'],
[3, 'Charlie']], columns=['id', 'name'])
ds = dshape('var * {id: int32, name: string[7, "ascii"]}')
b = into(bcolz.ctable, df, dshape=ds)
assert list(b.names) == list(df.columns)
assert list(b['id']) == [1, 2, 3]
assert list(b['name']) == ['Alice', 'Bob', 'Charlie']
assert discover(b).measure == ds.measure
def test_into_ctable_list():
b = into(bcolz.ctable, [(1, 1.), (2, 2.), (3, 3.)], names=['a', 'b'])
assert list(b['a']) == [1, 2, 3]
assert b.names == ['a', 'b']
def test_into_ctable_list_datetimes():
from datetime import datetime
L = [datetime(2012, 1, 1), datetime(2013, 2, 2)]
b = into(bcolz.carray, L)
assert np.issubdtype(b.dtype, np.datetime64)
assert list(into(Iterator, b)) == L
def test_into_ctable_iterator():
b = into(bcolz.ctable, iter([(1, 1.), (2, 2.), (3, 3.)]), names=['a', 'b'])
assert list(b['a']) == [1, 2, 3]
assert b.names == ['a', 'b']
def test_into_ndarray_carray():
assert str(into(np.ndarray, b['a'])) == \
str(np.array([1, 2, 3]))
def test_into_list_ctable():
assert into([], b) == [(1, 1.), (2, 2.), (3, 3.)]
def test_into_DataFrame_ctable():
result = into(DataFrame(), b)
expected = DataFrame([[1, 1.], [2, 2.], [3, 3.]], columns=['a', 'b'])
assert str(result) == str(expected)
def test_into_list_carray():
assert into([], b['a']) == [1, 2, 3]
def test_chunks():
x = np.array([(int(i), float(i)) for i in range(100)],
dtype=[('a', np.int32), ('b', np.float32)])
b = bcolz.ctable(x)
assert count(chunks(b, chunksize=10)) == 10
assert (next(chunks(b, chunksize=10)) == x[:10]).all()
def test_into_chunks():
from blaze.compute.numpy import chunks, compute_up
from blaze.compute.chunks import chunks, compute_up, ChunkIterator
from blaze import into
x = np.array([(int(i), float(i)) for i in range(100)],
dtype=[('a', np.int32), ('b', np.float32)])
cs = chunks(x, chunksize=10)
b1 = into(bcolz.ctable, ChunkIterator(cs))
b2 = into(bcolz.ctable, x)
assert str(b1) == str(b2)
def test_resource():
f = None
with tmpfile('.bcolz') as filename:
f = filename
bcolz.ctable(rootdir=f, columns=[[1, 2, 3], [1., 2., 3.]], names=['a', 'b'])
bc2 = resource(f)
assert isinstance(bc2, bcolz.ctable)
try:
os.remove(f)
except OSError:
pass
def test_resource_works_with_empty_file():
f = None
with tmpfile('.bcolz') as filename:
f = filename
bc = resource(f, dshape=dshape('{a: int32, b: float64}'))
assert len(bc) == 0
assert discover(bc).measure == dshape('{a: int32, b: float64}').measure
try:
os.remove(f)
except OSError:
pass
def test_into_bcolz_from_many_numpy_arrays():
x = np.array([(1, 'Hello'), (2, 'abc')],
dtype=[('num', 'i4'), ('name', 'S5')])
b = into(bcolz.ctable, x)
assert len(b) == len(x)
b = into(b, x)
assert len(b) == 2 * len(x)
|
Python
| 0.000315
|
@@ -1209,13 +1209,11 @@
7, %22
-ascii
+U32
%22%5D%7D'
|
27748acb4eb9b3b9c4e996693f81ba7c5d16be12
|
add works
|
tests/trashtest/trashtest_utils.py
|
tests/trashtest/trashtest_utils.py
|
'''
Scratchpad for test-based development.
LICENSING
-------------------------------------------------
hypergolix: A python Golix client.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
badg@muterra.io | badg@nickbadger.com | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
import unittest
import weakref
import gc
# These imports fall within the scope of testing.
from hypergolix.utils import _WeakSet
from hypergolix.utils import SetMap
from hypergolix.utils import WeakSetMap
# ###############################################
# Fixtures
# ###############################################
class Refferee:
''' Trivial class that supports both hashing and weak references.
'''
# ###############################################
# Testing
# ###############################################
class _WeakSetTest(unittest.TestCase):
''' Test everything about a _WeakSet.
'''
def test_make(self):
obj1 = Refferee()
obj2 = Refferee()
obj3 = Refferee()
obj4 = Refferee()
obj5 = Refferee()
west0 = weakref.WeakSet((obj1, obj2, obj3, obj4, obj5))
west1 = _WeakSet()
west2 = _WeakSet((obj1, obj2, obj3, obj4, obj5))
for obj in west0:
self.assertIn(obj, west2)
self.assertNotIn(obj, gc.get_referents(west1))
self.assertNotIn(obj, gc.get_referents(west2))
def test_iter(self):
''' Here we want to test three things:
1. that iteration works
2. that iteration correctly defers removal until after iteration
3. that removal occurs immediately after iteration
'''
objs = [Refferee() for __ in range(10)]
objrefs = [weakref.ref(obj) for obj in objs]
west1 = _WeakSet(objs)
# Does iteration work?
for obj1 in west1:
self.assertIn(obj1, objs)
# Does iteration defer removal until after iteration?
referents_before = len(gc.get_referents(west1))
for ii, obj1 in enumerate(west1):
# Delete the first member.
if ii == 0:
# Remove the only strong reference to objs[0] (which is not
# necessarily the same as the obj1 from enumerate, because
# set iteration order is undefined
del objs[0]
# Make sure the reference persists
self.assertEqual(
referents_before,
len(gc.get_referents(west1))
)
# Wait until we get to the next object to make sure the reference
# is actually dead, just in case we were enumerating over the same
# object -- in which case, obj1 would hold a strong reference. For
# good measure, do an explicit GC call first.
elif ii == 1:
gc.collect()
self.assertIsNone(objrefs[0]())
# Does removal occur immediately after iteration?
self.assertEqual(
referents_before - 1,
len(gc.get_referents(west1))
)
def test_contains(self):
''' Here we want to test one quick thing: that contains works
with objects.
'''
objs = [Refferee() for __ in range(10)]
west1 = _WeakSet(objs)
self.assertIn(objs[0], west1)
if __name__ == "__main__":
from hypergolix import logutils
logutils.autoconfig(loglevel='debug')
# from hypergolix.utils import TraceLogger
# with TraceLogger(interval=10):
# unittest.main()
unittest.main()
|
Python
| 0
|
@@ -4243,16 +4243,1065 @@
west1)%0A
+ %0A def test_add(self):%0A objs = %5BRefferee() for __ in range(10)%5D%0A other = Refferee()%0A west1 = _WeakSet(objs)%0A %0A self.assertNotIn(other, west1)%0A %0A # Make sure we don't add extra references to the same thing%0A referents_before = len(gc.get_referents(west1))%0A west1.add(objs%5B0%5D)%0A self.assertEqual(%0A referents_before,%0A len(gc.get_referents(west1))%0A )%0A # And that it's still there%0A self.assertIn(objs%5B0%5D, west1)%0A %0A # Now make sure add works for something new%0A west1.add(other)%0A self.assertIn(other, west1)%0A %0A # def test_iter(self):%0A # ''' Here we want to test three things:%0A # 1. that iteration works%0A # 2. that iteration correctly defers removal until after iteration%0A # 3. that removal occurs immediately after iteration%0A # '''%0A # objs = %5BRefferee() for __ in range(10)%5D%0A # objrefs = %5Bweakref.ref(obj) for obj in objs%5D%0A # west1 = _WeakSet(objs)%0A
%0A%0Aif __n
|
4c4e997767681e91f5d115e998cda22433eae7f6
|
allow to set a mode (list|map)
|
apps/mapideas/views.py
|
apps/mapideas/views.py
|
import django_filters
from django.contrib import messages
from django.utils.translation import ugettext as _
from adhocracy4.maps import mixins as map_mixins
from adhocracy4.modules import views as module_views
from apps.contrib import filters
from . import forms
from . import models
def get_ordering_choices(request):
choices = (('-created', _('Most recent')),)
if request.module.has_feature('rate', models.MapIdea):
choices += ('-positive_rating_count', _('Most popular')),
choices += ('-comment_count', _('Most commented')),
return choices
class MapIdeaFilterSet(django_filters.FilterSet):
category = filters.CategoryFilter()
ordering = filters.OrderingFilter(
choices=get_ordering_choices
)
@property
def qs(self):
return super().qs.filter(module=self.request.module) \
.annotate_positive_rating_count() \
.annotate_negative_rating_count() \
.annotate_comment_count()
class Meta:
model = models.MapIdea
fields = ['category']
class MapIdeaListView(map_mixins.MapItemListMixin, module_views.ItemListView):
model = models.MapIdea
filter_set = MapIdeaFilterSet
class MapIdeaDetailView(map_mixins.MapItemDetailMixin,
module_views.ItemDetailView):
model = models.MapIdea
queryset = models.MapIdea.objects.annotate_positive_rating_count()\
.annotate_negative_rating_count()
permission_required = 'meinberlin_mapideas.view_idea'
class MapIdeaCreateView(module_views.ItemCreateView):
model = models.MapIdea
form_class = forms.MapIdeaForm
permission_required = 'meinberlin_mapideas.propose_idea'
template_name = 'meinberlin_mapideas/mapidea_create_form.html'
class MapIdeaUpdateView(module_views.ItemUpdateView):
model = models.MapIdea
form_class = forms.MapIdeaForm
permission_required = 'meinberlin_mapideas.modify_idea'
template_name = 'meinberlin_mapideas/mapidea_update_form.html'
class MapIdeaDeleteView(module_views.ItemDeleteView):
model = models.MapIdea
success_message = _("Your Idea has been deleted")
permission_required = 'meinberlin_mapideas.modify_idea'
template_name = 'meinberlin_mapideas/mapidea_confirm_delete.html'
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super().delete(request, *args, **kwargs)
|
Python
| 0
|
@@ -1193,16 +1193,163 @@
terSet%0A%0A
+ def dispatch(self, request, **kwargs):%0A self.mode = request.GET.get('mode', 'list')%0A return super().dispatch(request, **kwargs)%0A%0A
%0Aclass M
|
dac0afb3db74b1e8cd144993e662dc8ac0622cb9
|
Add missing import to FTP module
|
osbrain/ftp.py
|
osbrain/ftp.py
|
"""
Implementation of FTP-related features.
"""
from .core import BaseAgent
from .common import address_to_host_port
class FTPAgent(BaseAgent):
"""
An agent that provides basic FTP functionality.
"""
def ftp_configure(self, addr, user, passwd, path, perm='elr'):
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
# Create authorizer
authorizer = DummyAuthorizer()
authorizer.add_user(user, passwd, path, perm=perm)
# Create handler
handler = FTPHandler
handler.authorizer = authorizer
# Create server
host, port = address_to_host_port(addr)
# TODO: is this necessary? Or would `None` be sufficient?
if port is None:
port = 0
self.ftp_server = FTPServer((host, port), handler)
return self.ftp_server.socket.getsockname()
@Pyro4.oneway
def ftp_run(self):
# Serve forever
self.ftp_server.serve_forever()
def ftp_addr(self):
return self.ftp_server.socket.getsockname()
def ftp_retrieve(self, addr, origin, destiny, user, passwd):
import ftplib
host, port = addr
ftp = ftplib.FTP()
ftp.connect(host, port)
ftp.login(user, passwd)
ftp.retrbinary('RETR %s' % origin, open(destiny, 'wb').write)
ftp.close()
return destiny
|
Python
| 0.000001
|
@@ -41,16 +41,29 @@
es.%0A%22%22%22%0A
+import Pyro4%0A
from .co
|
d4965867cae83de876910c16ce587c076ba3ab37
|
fix long tweet continuation to show on profile
|
everyailment.py
|
everyailment.py
|
"""Tweet every ICD-10-CM code."""
# test index 4744
import json
import os
import textwrap
import click
import twython
DEFAULT_CODES_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data/ailments.json')
def make_tweets(index, codes):
"""Generate tweets from the next code."""
next_code = codes['codes'][index + 1]
tweet = '{code} {desc}'.format(**next_code)
if len(tweet) < 141:
return [tweet]
else:
wrapped = textwrap.wrap(tweet, 100)
return ['{}… (1/2)'.format(wrapped[0]),
'@everyailment …{} (2/2)'.format(wrapped[1])]
def increment_index(indexfile):
"""Increment the index file."""
indexfile.seek(0)
index = json.load(indexfile)['index']
indexfile.seek(0)
json.dump({'index': index + 1}, indexfile)
indexfile.truncate()
indexfile.seek(0)
def oauth_dance(ctx, param, value):
"""Set up OAuth."""
if not value or ctx.resilient_parsing:
return
# set up
try:
auth_info = ctx.params['auth_info']
except KeyError:
click.echo("Error: --keyfile option is required to request access")
ctx.exit(1)
pre_auth_twitter = twython.Twython(auth_info['consumer_key'],
auth_info['consumer_secret'])
twitter_auth = pre_auth_twitter.get_authentication_tokens()
# prompt user to go to web and get verifier code
click.echo("Open: {}".format(twitter_auth['auth_url']))
verifier = click.prompt("Please enter the code provided by Twitter")
post_auth_twitter = twython.Twython(auth_info['consumer_key'],
auth_info['consumer_secret'],
twitter_auth['oauth_token'],
twitter_auth['oauth_token_secret'])
access_info = post_auth_twitter.get_authorized_tokens(verifier)
click.echo("")
click.echo("Access key: {}".format(access_info['oauth_token']))
click.echo("Access secret: {}".format(access_info['oauth_token_secret']))
new_keyfile_data = dict(auth_info)
new_keyfile_data['access_key'] = access_info['oauth_token']
new_keyfile_data['access_secret'] = access_info['oauth_token_secret']
click.echo("Save this JSON object to your keyfile:")
click.echo("")
click.echo(json.dumps(new_keyfile_data))
ctx.exit()
def load_json(ctx, param, value):
if value is not None:
try:
obj = json.load(value)
except:
click.echo('{value} is not a valid JSON file!'.format(value))
raise
return obj
else:
return value
@click.command(help=__doc__)
@click.argument('indexfile',
'index_info',
type=click.File('r+'),
envvar='EVERYAILMENT_INDEX_FILE',
required=True)
@click.option('--keyfile',
'auth_info',
type=click.File('r'),
envvar='EVERYAILMENT_KEYFILE',
required=True,
callback=load_json,
help='JSON file with Twitter keys and secrets.')
@click.option('--request-access',
default=False,
is_flag=True,
callback=oauth_dance,
expose_value=False,
help='Request access key and secret.')
@click.option('--post/--no-post',
default=False)
def cli(indexfile, auth_info, post):
twitter = twython.Twython(auth_info['consumer_key'],
auth_info['consumer_secret'],
auth_info['access_key'],
auth_info['access_secret'])
twitter
index = json.load(indexfile)['index']
with open(DEFAULT_CODES_FILE) as fp:
codes = json.load(fp)
tweets = make_tweets(index, codes)
prev_status = None
for tweet in tweets:
if post:
result = twitter.update_status(status=tweet,
in_reply_to_status_id=prev_status)
prev_status = result['id_str']
else:
click.echo(tweet)
if post:
increment_index(indexfile)
if __name__ == '__main__':
cli()
|
Python
| 0.000245
|
@@ -84,16 +84,28 @@
textwrap
+%0Aimport time
%0A%0Aimport
@@ -603,22 +603,8 @@
'
-@everyailment
%E2%80%A6%7B%7D
@@ -3692,21 +3692,8 @@
%5D)%0A%0A
- twitter%0A%0A
@@ -4128,16 +4128,37 @@
(tweet)%0A
+ time.sleep(2)
%0A if
|
43d12bfcae483c3632f3e55db1b4c58d38131dab
|
Update about_multiple_inheritance.py
|
python2/koans/about_multiple_inheritance.py
|
python2/koans/about_multiple_inheritance.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Slightly based on AboutModules in the Ruby Koans
#
from runner.koan import *
class AboutMultipleInheritance(Koan):
class Nameable(object):
def __init__(self):
self._name = None
def set_name(self, new_name):
self._name = new_name
def here(self):
return "In Nameable class"
class Animal(object):
def legs(self):
return 4
def can_climb_walls(self):
return False
def here(self):
return "In Animal class"
class Pig(Animal):
def __init__(self):
super(AboutMultipleInheritance.Animal, self).__init__()
self._name = "Jasper"
@property
def name(self):
return self._name
def speak(self):
return "OINK"
def color(self):
return 'pink'
def here(self):
return "In Pig class"
class Spider(Animal):
def __init__(self):
super(AboutMultipleInheritance.Animal, self).__init__()
self._name = "Boris"
def can_climb_walls(self):
return True
def legs(self):
return 8
def color(self):
return 'black'
def here(self):
return "In Spider class"
class Spiderpig(Pig, Spider, Nameable):
def __init__(self):
super(AboutMultipleInheritance.Pig, self).__init__()
super(AboutMultipleInheritance.Nameable, self).__init__()
self._name = "Jeff"
def speak(self):
return "This looks like a job for Spiderpig!"
def here(self):
return "In Spiderpig class"
#
# Hierarchy:
# Animal
# / \
# Pig Spider Nameable
# \ | /
# Spiderpig
#
# ------------------------------------------------------------------
def test_normal_methods_are_available_in_the_object(self):
jeff = self.Spiderpig()
self.assertMatch(__, jeff.speak())
def test_base_class_methods_are_also_available_in_the_object(self):
jeff = self.Spiderpig()
try:
jeff.set_name("Rover")
except:
self.fail("This should not happen")
self.assertEqual(True, jeff.can_climb_walls())
def test_base_class_methods_can_affect_instance_variables_in_the_object(self):
jeff = self.Spiderpig()
self.assertEqual('Jeff', jeff.name)
jeff.set_name("Rover")
self.assertEqual("Rover", jeff.name)
def test_left_hand_side_inheritance_tends_to_be_higher_priority(self):
jeff = self.Spiderpig()
self.assertEqual("pink", jeff.color())
def test_super_class_methods_are_higher_priority_than_super_super_classes(self):
jeff = self.Spiderpig()
self.assertEqual(8, jeff.legs())
def test_we_can_inspect_the_method_resolution_order(self):
#
# MRO = Method Resolution Order
#
mro = type(self.Spiderpig()).__mro__
self.assertEqual('Spiderpig', mro[0].__name__)
self.assertEqual('Pig', mro[1].__name__)
self.assertEqual('Spider', mro[2].__name__)
self.assertEqual('Animal', mro[3].__name__)
self.assertEqual('Nameable', mro[4].__name__)
self.assertEqual('object', mro[5].__name__)
def test_confirm_the_mro_controls_the_calling_order(self):
jeff = self.Spiderpig()
self.assertMatch('Spiderpig', jeff.here())
next = super(AboutMultipleInheritance.Spiderpig, jeff)
self.assertMatch('Pig', next.here())
next = super(AboutMultipleInheritance.Pig, jeff)
self.assertMatch("In Spider class", next.here())
# Hang on a minute?!? That last class name might be a super class of
# the 'jeff' object, but its hardly a superclass of Pig, is it?
#
# To avoid confusion it may help to think of super() as next_mro().
|
Python
| 0
|
@@ -2121,18 +2121,54 @@
rtMatch(
-__
+%22This looks like a job for Spiderpig!%22
, jeff.s
|
c450ee554daf1b5c4143e33d5688df2fed776f99
|
fix bug: 隐藏管理后台侧边栏配置owner字段,修复不填写作者无法保存侧边栏内容问题
|
blogsys/blogsys/adminx.py
|
blogsys/blogsys/adminx.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:zq time:2018/3/15
from __future__ import unicode_literals
import xadmin
from xadmin.views import CommAdminView
class BaseOwnerAdmin(object):
'''
1.用来处理文章、分类、标签、侧边栏、友链这些model的owner子段自动补充
2.用来针对queryset过滤当前用户的数据
'''
exclude = ('owner')
def get_list_queryset(self):
request = self.request
queryset = super(BaseOwnerAdmin, self).get_list_queryset()
if request.user.is_superuser:
return queryset.all()
return queryset.filter(owner=request.user)
def save_models(self):
if not self.org_obj:
self.new_obj.owner = self.request.user
return super(BaseOwnerAdmin, self).save_models()
class XAdminGlobalSetting(object):
site_title = '車乞的博客后台'
site_footer = 'power by zhangqianlinux@qq.com'
xadmin.site.register(CommAdminView, XAdminGlobalSetting)
|
Python
| 0
|
@@ -303,16 +303,17 @@
('owner'
+,
)%0A de
|
b5e542ad361d39e9f670198fabbfe30a48faef6e
|
make sure airtime-liquidsoap DNE before attempting to create
|
python_apps/pypo/install/pypo-initialize.py
|
python_apps/pypo/install/pypo-initialize.py
|
import platform
import shutil
from subprocess import Popen, PIPE
import subprocess
import sys
import os
sys.path.append('/usr/lib/airtime/')
from api_clients import api_client
from configobj import ConfigObj
import logging
if os.geteuid() != 0:
print "Please run this as root."
sys.exit(1)
"""
This function returns the codename of the host OS by querying lsb_release.
If lsb_release does not exist, or an exception occurs the codename returned
is 'unknown'
"""
def get_os_codename():
try:
p = Popen("which lsb_release > /dev/null", shell=True)
sts = os.waitpid(p.pid, 0)[1]
if (sts == 0):
#lsb_release is available on this system. Let's get the os codename
p = Popen("lsb_release -sc", shell=True, stdout=PIPE)
codename = p.communicate()[0].strip('\r\n')
p = Popen("lsb_release -sd", shell=True, stdout=PIPE)
fullname = p.communicate()[0].strip('\r\n')
return (codename, fullname)
except Exception, e:
pass
return ("unknown", "unknown")
def generate_liquidsoap_config(ss):
data = ss['msg']
fh = open('/etc/airtime/liquidsoap.cfg', 'w')
fh.write("################################################\n")
fh.write("# THIS FILE IS AUTO GENERATED. DO NOT CHANGE!! #\n")
fh.write("################################################\n")
for d in data:
buffer = d[u'keyname'] + " = "
if(d[u'type'] == 'string'):
temp = d[u'value']
buffer += '"%s"' % temp
else:
temp = d[u'value']
if(temp == ""):
temp = "0"
buffer += temp
buffer += "\n"
fh.write(api_client.encode_to(buffer))
fh.write('log_file = "/var/log/airtime/pypo-liquidsoap/<script>.log"\n')
fh.close()
PATH_INI_FILE = '/etc/airtime/pypo.cfg'
PATH_LIQUIDSOAP_BIN = '/usr/lib/airtime/pypo/bin/liquidsoap_bin'
#any debian/ubuntu codename in this et will automatically use the natty liquidsoap binary
arch_map = dict({"32bit":"i386", "64bit":"amd64"})
# load config file
try:
config = ConfigObj(PATH_INI_FILE)
except Exception, e:
print 'Error loading config file: ', e
sys.exit(1)
try:
#select appropriate liquidsoap file for given system os/architecture
architecture = platform.architecture()[0]
arch = arch_map[architecture]
print "* Detecting OS: ...",
(codename, fullname) = get_os_codename()
print " Found %s (%s) on %s architecture" % (fullname, codename, arch)
print " * Installing Liquidsoap binary"
p = Popen("which liquidsoap", shell=True, stdout=PIPE)
liq_path = p.communicate()[0].strip()
if p.returncode == 0:
os.symlink(liq_path, "/usr/bin/airtime-liquidsoap")
else:
print " * Liquidsoap binary not found!"
sys.exit(1)
#initialize init.d scripts
subprocess.call("update-rc.d airtime-playout defaults >/dev/null 2>&1", shell=True)
#clear out an previous pypo cache
print "* Clearing previous pypo cache"
subprocess.call("rm -rf /var/tmp/airtime/pypo/cache/scheduler/* >/dev/null 2>&1", shell=True)
if "airtime_service_start" in os.environ and os.environ["airtime_service_start"] == "t":
print "* Waiting for pypo processes to start..."
subprocess.call("invoke-rc.d airtime-playout start-no-monit > /dev/null 2>&1", shell=True)
except Exception, e:
print e
|
Python
| 0
|
@@ -2761,24 +2761,174 @@
ncode == 0:%0A
+ try:%0A os.unlink(liq_path)%0A except Exception:%0A #liq_path DNE, which is OK.%0A pass%0A %0A %0A
os.s
|
0627033d55ffc21e820f03ab2609462a19ea32dd
|
Use content_type instead of mime type
|
bluebottle/utils/admin.py
|
bluebottle/utils/admin.py
|
from django.contrib import admin
from django.utils.translation import ugettext as _
from django.contrib.admin.views.main import ChangeList
from django.db.models.aggregates import Sum
from .models import Language
import csv
from django.db.models.fields.files import FieldFile
from django.db.models.query import QuerySet
from django.http import HttpResponse
from bluebottle.bb_projects.models import ProjectPhase
class LanguageAdmin(admin.ModelAdmin):
model = Language
list_display = ('code', 'language_name', 'native_name')
admin.site.register(Language, LanguageAdmin)
def prep_field(request, obj, field, manyToManySep=';'):
""" Returns the field as a unicode string. If the field is a callable, it
attempts to call it first, without arguments.
"""
if '__' in field:
bits = field.split('__')
field = bits.pop()
for bit in bits:
obj = getattr(obj, bit, None)
if obj is None:
return ""
attr = getattr(obj, field)
if isinstance(attr, (FieldFile,) ):
attr = request.build_absolute_uri(attr.url)
output = attr() if callable(attr) else attr
if isinstance(output, (list, tuple, QuerySet)):
output = manyToManySep.join([str(item) for item in output])
return unicode(output).encode('utf-8') if output else ""
def mark_as_plan_new(modeladmin, request, queryset):
try:
status = ProjectPhase.objects.get(slug='plan-new')
except ProjectPhase.DoesNotExist:
return
queryset.update(status=status)
mark_as_plan_new.short_description = _("Mark selected projects as status Plan New")
def export_as_csv_action(description="Export as CSV", fields=None, exclude=None, header=True,
manyToManySep=';'):
""" This function returns an export csv action. """
def export_as_csv(modeladmin, request, queryset):
""" Generic csv export admin action.
Based on http://djangosnippets.org/snippets/2712/
"""
opts = modeladmin.model._meta
field_names = [field.name for field in opts.fields]
labels = []
if exclude:
field_names = [f for f in field_names if f not in exclude]
elif fields:
try:
field_names = [field for field, _ in fields]
labels = [label for _, label in fields]
except ValueError:
field_names = [field for field in fields]
labels = field_names
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % (
unicode(opts).replace('.', '_')
)
writer = csv.writer(response)
if header:
writer.writerow(labels if labels else field_names)
for obj in queryset:
writer.writerow([prep_field(request, obj, field, manyToManySep) for field in field_names])
return response
export_as_csv.short_description = description
export_as_csv.acts_on_all = True
return export_as_csv
class TotalAmountAdminChangeList(ChangeList):
def get_results(self, *args, **kwargs):
total_column = self.model_admin.total_column or 'amount'
self.model_admin.change_list_template = 'utils/admin/change_list.html'
super(TotalAmountAdminChangeList, self).get_results(*args, **kwargs)
q = self.result_list.aggregate(total=Sum(total_column))
self.total = q['total']
|
Python
| 0.000006
|
@@ -2518,12 +2518,16 @@
nse(
-mime
+content_
type
|
fa6927ec47253b8b7f642c67e1470680b0f8253f
|
fix logging
|
crate_project/settings/production/base.py
|
crate_project/settings/production/base.py
|
from ..base import *
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse",
},
},
"formatters": {
"simple": {
"format": "%(levelname)s %(message)s"
},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple"
},
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"sentry": {
"level": "ERROR",
"class": "raven.contrib.django.handlers.SentryHandler",
},
},
"loggers": {
"": {
"handlers": ["console", "sentry"],
"propagate": True,
"level": "DEBUG",
},
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"sentry.errors": {
"level": "DEBUG",
"handlers": ["console"],
"propagate": False,
},
}
}
SITE_ID = 3
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
SERVER_EMAIL = "server@crate.io"
DEFAULT_FROM_EMAIL = "support@crate.io"
CONTACT_EMAIL = "support@crate.io"
# MIDDLEWARE_CLASSES += ["privatebeta.middleware.PrivateBetaMiddleware"]
PACKAGE_FILE_STORAGE = "storages.backends.s3boto.S3BotoStorage"
PACKAGE_FILE_STORAGE_OPTIONS = {
"bucket": "crate-production",
"custom_domain": "packages.crate-cdn.com",
}
DEFAULT_FILE_STORAGE = "storages.backends.s3boto.S3BotoStorage"
# STATICFILES_STORAGE = "storages.backends.s3boto.S3BotoStorage"
AWS_STORAGE_BUCKET_NAME = "crate-media-production"
AWS_S3_CUSTOM_DOMAIN = "media.crate-cdn.com"
# PRIVATE_BETA_ALLOWED_URLS = [
# "/account/login/",
# "/account/signup/",
# "/account/confirm_email/",
# ]
# PRIVATE_BETA_ALLOWED_HOSTS = [
# "simple.crate.io",
# ]
INTERCOM_APP_ID = "79qt2qu3"
SIMPLE_API_URL = "http://simple.crate.io/"
# Security
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 600 # @@@ Increase This
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
|
Python
| 0.000002
|
@@ -793,38 +793,17 @@
%22
-loggers%22: %7B%0A %22%22: %7B%0A
+root%22: %7B%0A
@@ -845,43 +845,8 @@
%22%5D,%0A
- %22propagate%22: True,%0A
@@ -863,26 +863,38 @@
%22: %22
-DEBUG
+INFO
%22,%0A
+%7D,%0A
-%7D,
+%22loggers%22: %7B
%0A
|
06f2438bd3baad9d234f394697f8ebaa69002230
|
clear code
|
apps/pages/managers.py
|
apps/pages/managers.py
|
from django.db import models
from django.core.cache import cache
from mysmile.settings.main import LANGUAGES
from apps.pages.models import Page, Page_translation
from apps.settings.managers import SettingsManager
class PagesManager(models.Manager):
def get_content(self, request, lang=None, slug=None):
c = self.get_page(lang, slug)
c.update({'lang': lang, 'slug': slug})
c['languages'] = LANGUAGES if len(LANGUAGES) > 1 else ''
sm = SettingsManager()
c.update(cache.get('app_settings'))
c['main_menu'] = self.get_main_menu(lang)
c['logo_slug'] = c['main_menu'][0]['page__slug']
c['inav'] = self.get_additional_dynamic_menu(request, slug, c['menu'], c['page__ptype'], int(c['MAX_INNERLINK_HISTORY']))
return c
def get_main_menu(self, lang):
main_menu = Page_translation.objects.filter(lang=lang, page__status=Page.STATUS_PUBLISHED, page__ptype__in=[Page.PTYPE_MENU,Page.PTYPE_MENU_API]).values('page__slug', 'menu').order_by('page__sortorder')
return main_menu
def get_additional_dynamic_menu(self, request, slug, menu, ptype, max_innerlink_history):
inner_nav = request.session.get('inner_nav', [])
if ptype == Page.PTYPE_INNER:
if not [slug, menu] in inner_nav: # work with sessions
inner_nav.append([slug, menu]) # add to dynamic menu
request.session['inner_nav'] = inner_nav # save data to the session
while len(inner_nav) > max_innerlink_history:
inner_nav.pop(0)
return inner_nav
def get_page(self, lang, slug):
page = Page_translation.objects.filter(lang=lang, page__ptype__in = [Page.PTYPE_INNER,Page.PTYPE_MENU,Page.PTYPE_MENU_API], page__status=Page.STATUS_PUBLISHED, page__slug=slug).values('page__color', 'page__photo', 'menu', 'name', 'col_central', 'col_right', 'youtube', 'col_bottom_1', 'col_bottom_2', 'col_bottom_3', 'photo_alt', 'photo_description', 'meta_title', 'meta_description', 'meta_keywords', 'page__ptype')[0]
cols = ['col_bottom_1', 'col_bottom_2', 'col_bottom_3'] # some processing of the columns...
page['bottom_cols'] = [page.pop(item) for item in cols if page[item]]
page['youtube'] = self.get_youtube_embedded_url(page['youtube']) if page['youtube'] else ''
return page
def get_youtube_embedded_url(self, url):
try:
code = url.split('=')[-1]
embedded_url = 'https://www.youtube.com/embed/' + code + '?feature=player_detailpage'
except Exception:
embedded_url = False
return embedded_url
|
Python
| 0.000081
|
@@ -232,15 +232,16 @@
ger(
-models.
+Settings
Mana
@@ -246,16 +246,28 @@
nager):%0A
+
%0A def
@@ -471,39 +471,8 @@
''%0A
- sm = SettingsManager()%0A
|
651fd4ca1376064b3444cf7dbfe873ca149defdb
|
version bump
|
dlcli/_version.py
|
dlcli/_version.py
|
__version__ = '0.0.23'
|
Python
| 0.000001
|
@@ -17,7 +17,7 @@
.0.2
-3
+4
'%0A
|
fe983892386c7ca8a96817ce26158e667d920b97
|
Add links from programs to pipelines
|
apps/pipeline/admin.py
|
apps/pipeline/admin.py
|
#
# Copyright (C) 2017 Maha Farhat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
try:
from adminsortable2.admin import SortableAdminMixin, SortableInlineAdminMixin
except ImportError:
class SortableAdminMixin(object):
pass
class SortableInlineAdminMixin(object):
pass
from .models import *
class PipelineProgramInline(SortableInlineAdminMixin, admin.TabularInline):
model = PipelineProgram
class PipelineAdmin(admin.ModelAdmin):
filter_horizontal = ('test_files',)
list_display = ('name', 'description', 'errors')
inlines = (PipelineProgramInline,)
@staticmethod
def errors(obj):
"""Return true if the pipeline has errors (based on tests)"""
count = 0
for count, test in enumerate(obj.runs.filter(run_as_test__isnull=False)):
err = test.get_errors()
if err:
return err
if count == 0:
return 'No Tests!'
return ''
admin.site.register(Pipeline, PipelineAdmin)
class ProgramAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'keep',)
filter_horizontal = ('files', 'test_files')
admin.site.register(Program, ProgramAdmin)
admin.site.register(ProgramFile)
class ProgramRunInline(admin.TabularInline):
fields = ('program', 'job_id', 'is_submitted', 'is_started', 'is_complete', 'is_error', 'debug_text', 'error_text', 'input_files', 'output_files')
model = ProgramRun
extra = 0
class PipelineRunAdmin(admin.ModelAdmin):
actions = ['all_stop']
list_display = ('name', 'created', 'pipeline', 'status', 'age')
search_fields = ['name', 'pipeline__name']
list_filter = ['pipeline']
inlines = (ProgramRunInline,)
def status(self, obj):
for prog in obj.programs.all():
if prog.is_complete:
continue
if prog.is_error:
return 'Error: %s' % str(prog.program)
if prog.is_started:
return 'Running: %s' % str(prog.program)
if prog.is_submitted:
return 'Waiting: %s' % str(prog.program)
return "Complete"
def age(self, obj):
if obj.modified and obj.created:
return obj.modified - obj.created
return '-'
def all_stop(modeladmin, request, queryset):
for run in queryset.all():
run.stop_all(msg='Admin Stopped this Program')
all_stop.short_description = "Emergency All Stop"
admin.site.register(PipelineRun, PipelineRunAdmin)
|
Python
| 0
|
@@ -713,16 +713,62 @@
rt admin
+%0Afrom django.utils.safestring import mark_safe
%0A%0Atry:%0A
@@ -1770,16 +1770,28 @@
iption',
+ 'pipeline',
'keep',
@@ -1841,16 +1841,371 @@
iles')%0A%0A
+ @staticmethod%0A def pipeline(obj):%0A urk = 'admin:pipeline_pipeline_change'%0A res = %5B%5D%0A for pipe in obj.pipelines.all():%0A url = reverse(urk, args=%5Bpipe.pipeline.pk%5D)%0A res.append(%0A %22%3Ca href='%7B%7D'%3E%7B%7D%3C/a%3E%22.format(url, str(pipe.pipeline))%0A )%0A return mark_safe('%3Cbr/%3E'.join(res))%0A%0A
admin.si
|
5488a3d7247633995e19dcbaa16ad554104f221f
|
Update cumulusci/tasks/metadata_etl/help_text.py
|
cumulusci/tasks/metadata_etl/help_text.py
|
cumulusci/tasks/metadata_etl/help_text.py
|
from collections import defaultdict
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.metadata_etl import MetadataSingleEntityTransformTask
from cumulusci.utils.xml.metadata_tree import MetadataElement
from cumulusci.core.utils import process_list_arg, process_bool_arg
class SetFieldHelpText(MetadataSingleEntityTransformTask):
entity = "CustomObject"
task_options = {
"fields": {
"description": "List of object fields to affect, in Object__c.Field__c form.",
"required": True,
},
"overwrite": {
"description": "List of object fields to affect, in Object__c.Field__c form."
},
**MetadataSingleEntityTransformTask.task_options,
}
def _init_options(self, kwargs):
self.task_config.options["api_names"] = "dummy"
super()._init_options(kwargs)
self.options["overwrite"] = process_bool_arg(
self.options.get("overwrite", False)
)
try:
float(self.api_version)
except ValueError:
raise TaskOptionsError(f"Invalid API version {self.api_version}")
if type(self.options["fields"]) != list or len(self.options["fields"]) == 0:
raise TaskOptionsError(
"Please populate the fields field with a list of dictionaries containing at minimum one entry with an 'api_name' and 'help_text' keys"
)
if not all(["api_name" in entry for entry in self.options["fields"]]):
raise TaskOptionsError(
"The 'api_name' key is required on all entry values."
)
if not all(["help_text" in entry for entry in self.options["fields"]]):
raise TaskOptionsError(
"The 'help_text' key is required on all entry values to declare what help text value to insert."
)
self.api_name_list = defaultdict(list)
for entry in process_list_arg(self.options["fields"]):
try:
obj, field = entry["api_name"].split(".")
self.api_name_list[self._inject_namespace(obj)].append(
(self._inject_namespace(field), entry["help_text"])
)
except ValueError:
raise TaskOptionsError(
f"api_name {entry} is not a valid Object.Field reference"
)
self.api_names = set(self.api_name_list.keys())
def _transform_entity(self, metadata: MetadataElement, api_name: str):
for field, help_text in self.api_name_list[api_name]:
self._modify_help_text(metadata, api_name, field, help_text)
return metadata
def _modify_help_text(
self,
metadata: MetadataElement,
api_name: str,
custom_field: str,
help_text: str,
):
# Locate the <fields> entry for this field entry.
field = metadata.find("fields", fullName=custom_field)
if not field:
raise TaskOptionsError(
f"The field {api_name}.{custom_field} was not found."
)
try:
if self.options["overwrite"]:
field.inlineHelpText.text = help_text
else:
self.logger.warning(
f"Skipping over help text field: {field.inlineHelpText.text}. Please set the overwrite option to True to overwrite this help text field."
)
except AttributeError:
field.append("inlineHelpText", text=help_text)
|
Python
| 0
|
@@ -1156,11 +1156,21 @@
if
-typ
+not isinstanc
e(se
@@ -1179,25 +1179,29 @@
.options
-%5B
+.get(
%22fields%22
%5D) != li
@@ -1196,18 +1196,16 @@
lds%22
-%5D) !=
+),
list
+)
or
|
b37057c35452f26d567677eaf15216fcf0282b1c
|
fix max_history in aug memo, it was my mistake there shouldn't be -1
|
rasa_core/policies/augmented_memoization.py
|
rasa_core/policies/augmented_memoization.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import typing
from typing import Dict, List, Text, Optional
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.events import ActionExecuted
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from rasa_core.trackers import DialogueStateTracker
from rasa_core.domain import Domain
class AugmentedMemoizationPolicy(MemoizationPolicy):
"""The policy that remembers examples from training stories
for up to `max_history` turns.
If it is needed to recall turns from training dialogues
where some slots might not be set during prediction time,
add relevant stories without such slots to training data.
E.g. reminder stories.
Since `slots` that are set some time in the past are
preserved in all future feature vectors until they are set
to None, this policy has a capability to recall the turns
up to `max_history` from training stories during prediction
even if additional slots were filled in the past
for current dialogue.
"""
def _preprocess_states(self, states):
# type: (List[Dict[Text, float]]) -> List[List[Dict[Text, float]]]
"""Overrides the helper method to preprocess tracker's states.
Creates a list of states with deleted history
to add the ability of augmented memoization
to recall partial history"""
augmented = [list(states)]
augmented_states = list(states)
for i in range(self.max_history - 1):
augmented_states[i] = None
augmented.append(list(augmented_states))
return augmented
def _back_to_the_future(self, tracker):
if self.max_history <= 1:
return []
historic_events = []
collected_events = []
idx_of_last_evt = len(tracker.applied_events()) - 1
for e_i, event in enumerate(reversed(tracker.applied_events())):
collected_events.append(event)
if isinstance(event, ActionExecuted):
if e_i == idx_of_last_evt:
# if arrived at the end of the tracker,
# the last historic_events repeat the tracker
# so `break` is called before appending them
break
historic_events.append(collected_events[:])
if len(historic_events) == self.max_history - 1:
# the length of `historic_events` should be
# one less than max_history, in order
# to not recall again with the same features
break
mcfly_trackers = []
for events in reversed(historic_events):
mcfly_tracker = tracker.init_copy()
for e in reversed(events):
mcfly_tracker.update(e)
mcfly_trackers.append(mcfly_tracker)
return mcfly_trackers
def _recall_using_delorean(self, tracker, domain):
# correctly forgetting slots
logger.debug("Launch DeLorean...")
mcfly_trackers = self._back_to_the_future(tracker)
tracker_as_states = self.featurizer.prediction_states(
mcfly_trackers, domain)
for states in tracker_as_states:
logger.debug("Current tracker state {}".format(states))
memorised = self._recall_states(states)
if memorised is not None:
return memorised
# No match found
return None
def recall(self,
states, # type: List[Dict[Text, float]]
tracker, # type: DialogueStateTracker
domain # type: Domain
):
# type: (...) -> Optional[int]
recalled = self._recall_states(states)
if recalled is None:
# let's try a different method to recall that tracker
return self._recall_using_delorean(tracker, domain)
else:
return recalled
|
Python
| 0.000135
|
@@ -2588,14 +2588,10 @@
tory
- - 1
:%0A
+
@@ -2676,108 +2676,109 @@
#
-one less than max_history, in order%0A # to not recall again with the same features
+max_history, because due to forgetting%0A # of slots the features might be different
%0A
|
23eabb23ce0423a3b11ea56fff4ff06f91ddef95
|
Create DebtLoan population method
|
finance/management/commands/populate.py
|
finance/management/commands/populate.py
|
from datetime import datetime
from datetime import timedelta
import random
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils import timezone
import factory
import pytz
from accounts.factories import UserFactory
from books.factories import TransactionFactory
from books.models import Transaction
class Command(BaseCommand):
help = "Popoulates databse with dummy data"
def handle(self, *args, **options):
if not User.objects.filter(username='admin'):
self.create_admin()
else:
self.admin = User.objects.get(username='admin')
print("admin user already exists")
self.create_transactions()
def create_admin(self):
# Factory creates simple user, so ``is_staff`` is set later
self.admin = UserFactory(username='admin', password='asdasd')
self.admin.is_staff = True
self.admin.is_superuser = True
self.admin.save()
print("admin user have been created successfully")
def _get_last_month(self):
"Returns random date in last month"
today = timezone.now()
first_month_day = today.replace(day=1)
last_month = first_month_day - timedelta(days=1)
return datetime(last_month.year,
last_month.month,
random.randint(1, 28),
tzinfo=pytz.utc)
def _get_this_year(self):
"Returns random date in this year"
today = timezone.now()
return datetime(today.year,
random.randint(1, today.month),
random.randint(1, today.day),
tzinfo=pytz.utc)
def _get_all_time(self):
"Returns random date"
today = timezone.now()
return datetime(random.randint(2000, today.year-1),
random.randint(1, 12),
random.randint(1, 28),
tzinfo=pytz.utc)
def create_transactions(self):
categories = [Transaction.EXPENSE, Transaction.INCOME]
# create now
TransactionFactory.create_batch(
5,
amount=factory.Sequence(lambda n: random.randint(1, 10)),
category=factory.Sequence(lambda n: random.choice(categories)),
user=self.admin,
)
# create for last month
TransactionFactory.create_batch(
5,
amount=factory.Sequence(lambda n: random.randint(1, 10)),
category=factory.Sequence(lambda n: random.choice(categories)),
user=self.admin,
created=factory.Sequence(lambda n: self._get_last_month()),
)
# create for this year
TransactionFactory.create_batch(
5,
amount=factory.Sequence(lambda n: random.randint(1, 10)),
category=factory.Sequence(lambda n: random.choice(categories)),
user=self.admin,
created=factory.Sequence(lambda n: self._get_this_year()),
)
# create for all time
TransactionFactory.create_batch(
5,
amount=factory.Sequence(lambda n: random.randint(1, 10)),
category=factory.Sequence(lambda n: random.choice(categories)),
user=self.admin,
created=factory.Sequence(lambda n: self._get_all_time()),
)
print("Transactions for admin created")
|
Python
| 0.000001
|
@@ -300,34 +300,112 @@
import
-TransactionFactory
+DebtLoanFactory%0Afrom books.factories import TransactionFactory%0Afrom books.models import DebtLoan
%0Afrom bo
@@ -795,16 +795,49 @@
ctions()
+%0A self.create_debt_loans()
%0A%0A de
@@ -3557,8 +3557,1422 @@
eated%22)%0A
+%0A def create_debt_loans(self):%0A categories = %5BDebtLoan.DEBT, DebtLoan.LOAN%5D%0A%0A # create now%0A DebtLoanFactory.create_batch(%0A 5,%0A amount=factory.Sequence(lambda n: random.randint(1, 10)),%0A category=factory.Sequence(lambda n: random.choice(categories)),%0A user=self.admin,%0A )%0A%0A # create for last month%0A DebtLoanFactory.create_batch(%0A 5,%0A amount=factory.Sequence(lambda n: random.randint(1, 10)),%0A category=factory.Sequence(lambda n: random.choice(categories)),%0A user=self.admin,%0A created=factory.Sequence(lambda n: self._get_last_month()),%0A )%0A%0A # create for this year%0A DebtLoanFactory.create_batch(%0A 5,%0A amount=factory.Sequence(lambda n: random.randint(1, 10)),%0A category=factory.Sequence(lambda n: random.choice(categories)),%0A user=self.admin,%0A created=factory.Sequence(lambda n: self._get_this_year()),%0A )%0A%0A # create for all time%0A DebtLoanFactory.create_batch(%0A 5,%0A amount=factory.Sequence(lambda n: random.randint(1, 10)),%0A category=factory.Sequence(lambda n: random.choice(categories)),%0A user=self.admin,%0A created=factory.Sequence(lambda n: self._get_all_time()),%0A )%0A print(%22DebtLoans for admin created%22)%0A
|
9d334caa16f2bd3385288a8cf263c71f8a6fb827
|
Fix to previous csvbills commit
|
membership/management/commands/csvbills.py
|
membership/management/commands/csvbills.py
|
# encoding: UTF-8
from __future__ import with_statement
from django.db.models import Q, Sum
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
import codecs
import csv
import os
from datetime import datetime
from decimal import Decimal
import logging
logger = logging.getLogger("csvbills")
from membership.models import Bill, BillingCycle, Payment
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
<http://docs.python.org/library/csv.html#examples>
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
<http://docs.python.org/library/csv.html#examples>
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeDictReader(UnicodeReader):
"""A CSV reader which stores the headers from the first line
"""
def __init__(self, *args, **kw):
UnicodeReader.__init__(self, *args, **kw)
# Read headers from first line
self.headers = UnicodeReader.next(self)
def next(self):
row = UnicodeReader.next(self)
return dict(zip(self.headers, row))
class RequiredFieldNotFoundException(Exception): pass
class DuplicateColumnException(Exception): pass
class OpDictReader(UnicodeDictReader):
'''Reader for Osuuspankki CSV file format
The module converts Osuuspankki CSV format data into a more usable form.'''
# If these fields are not found on the first line, an exception is raised
REQUIRED_COLUMNS = ['date', 'amount', 'transaction']
# Translation table from Osuuspankki CSV format to short names
OP_CSV_TRANSLATION = {u'Kirjauspäivä' : 'date',
u'Arvopäivä' : 'value_date',
u'Tap.pv' : 'date', # old format
u'Määrä EUROA' : 'amount',
u'Määrä EUROA' : 'amount',
u'Tapahtumalajikoodi' : 'event_type_code',
u'Selitys' : 'event_type_description',
u'Saaja/Maksaja' : 'fromto',
u'Saajan tilinumero' : 'account',
u'Viite' : 'reference',
u'Viesti' : 'message',
u'Arkistotunnus' : 'transaction'}
def __init__(self, f, delimiter=';', encoding="iso8859-1", *args, **kw):
UnicodeDictReader.__init__(self, f, delimiter=delimiter,
encoding=encoding, *args, **kw)
# Translate headers
h = self.headers
for i in xrange(0, len(h)):
self.headers[i] = self.OP_CSV_TRANSLATION.get(h[i], h[i])
# Check that all required columns exist in the header
for name in self.REQUIRED_COLUMNS:
if name not in self.headers:
error = "CSV format is invalid: missing field '%s'." % name
raise RequiredFieldNotFoundException(error)
# Check that each field is unique
for name in self.headers:
if self.headers.count(name) != 1:
error = "The field '%s' occurs multiple times in the header"
raise DuplicateColumnException(error)
def next(self):
row = UnicodeDictReader.next(self)
if len(row) == 0:
return None
row['amount'] = Decimal(row['amount'].replace(",", "."))
row['date'] = datetime.strptime(row['date'], "%d.%m.%Y")
row['reference'] = row['reference'].replace(' ', '').lstrip('0')
if row.has_key('value_date'):
row['value_date'] = datetime.strptime(row['value_date'], "%d.%m.%Y")
return row
def row_to_payment(row):
try:
p = Payment.objects.get(transaction_id__exact=row['transaction'])
return p
except Payment.DoesNotExist:
p = Payment(payment_day=min(datetime.now(), row['date']),
amount=row['amount'],
type=row['event_type_description'],
payer_name=row['fromto'],
reference_number=row['reference'],
message=row['message'],
transaction_id=row['transaction'])
return p
def process_csv(file_handle):
"""Actual CSV file processing logic
"""
return_messages = []
num_attached = num_notattached = 0
sum_attached = sum_notattached = 0
reader = OpDictReader(file_handle)
for row in reader:
if row == None:
continue
if row['amount'] < 0: # Transaction is paid by us, ignored
continue
payment = row_to_payment(row)
# Do nothing if this payment has already been assigned
if payment.billingcycle:
continue
try:
reference = payment.reference_number
cycle = BillingCycle.objects.get(reference_number=reference)
payment.attach_to_cycle(cycle)
return_messages.append(_("Attached payment {payment} to cycle {cycle}").
replace("{payment}", payment).replace("{cycle}", cycle))
num_attached = num_attached + 1
sum_attached = sum_attached + payment.amount
except BillingCycle.DoesNotExist:
# Failed to find cycle for this reference number
if not payment.id:
payment.save() # Only save if object not in database yet
logger.warning("No billing cycle found for %s" % payment.reference_number)
return_messages.append(_("No billing cycle found for %s") % payment)
num_notattached = num_notattached + 1
sum_notattached = sum_notattached + payment.amount
log_message ="Processed %s payments total %.2f EUR. Unidentified payments: %s (%.2f EUR)" % \
(num_attached + num_notattached, sum_attached + sum_notattached, num_notattached, \
sum_notattached)
logger.info(log_message)
return_messages.append(log_message)
return return_messages
class Command(BaseCommand):
args = '<csvfile> [<csvfile> ...]'
help = 'Read a CSV list of payment transactions'
def handle(self, *args, **options):
for csvfile in args:
logger.info("Starting the processing of file %s." %
os.path.abspath(csvfile))
# Exceptions of process_csv are fatal in command line run
with open(csvfile, 'r') as file_handle:
process_csv(file_handle)
logger.info("Done processing file %s." % os.path.abspath(csvfile))
|
Python
| 0.000007
|
@@ -5697,24 +5697,32 @@
ment%7D%22,
+unicode(
payment)
.replace
@@ -5713,16 +5713,17 @@
payment)
+)
.replace
@@ -5738,15 +5738,24 @@
%7D%22,
+unicode(
cycle))
+)
%0A
|
0846347ab7931993e602463d81dc493457c2cf5f
|
Support Git URI
|
pipes/app/create_app.py
|
pipes/app/create_app.py
|
#!/usr/bin/env python
# A script for creating an application in spinnaker.
# Simply looks to see if the application already exists, if not, creates
import argparse
import configparser
import json
import logging
import os
import sys
from jinja2 import Environment, FileSystemLoader
import requests
class SpinnakerApp:
def __init__(self):
config = configparser.ConfigParser()
self.here = os.path.dirname(os.path.realpath(__file__))
configpath = "{}/../../configs/spinnaker.conf".format(self.here)
config.read(configpath)
self.gate_url = config['spinnaker']['gate_url']
self.header = {'content-type': 'application/json'}
def get_apps(self):
'''Gets all applications from spinnaker'''
url = self.gate_url + "/applications"
r = requests.get(url)
if r.status_code == 200:
self.apps = r.json()
else:
logging.error(r.text)
sys.exit(1)
def app_exists(self):
'''Checks to see if application already exists'''
self.get_apps()
for app in self.apps:
if app['name'].lower() == self.appname.lower():
logging.info('{} app already exists'.format(self.appname))
return True
logging.info('{} does not exist...creating'.format(self.appname))
return False
def setup_appdata(self):
'''Uses jinja2 to setup POST data for application creation'''
templatedir = "{}/../../templates".format(self.here)
jinjaenv = Environment(loader=FileSystemLoader(templatedir))
template = jinjaenv.get_template("app_data_template.json")
rendered_json = json.loads(template.render(appinfo=self.appinfo))
print(rendered_json)
return rendered_json
def create_app(self, appinfo=None):
'''Sends a POST to spinnaker to create a new application'''
# setup class variables for processing
self.appinfo = appinfo
if appinfo:
self.appname = appinfo['app']
url = "{}/applications/{}/tasks".format(self.gate_url, self.appname)
jsondata = self.setup_appdata()
r = requests.post(url, data=json.dumps(jsondata), headers=self.header)
if not r.ok:
logging.error("Failed to create app: {}".format(r.text))
sys.exit(1)
logging.info("Successfully created {} application".format(self.appname))
return
if __name__ == "__main__":
# Setup parser
parser = argparse.ArgumentParser()
parser.add_argument("--app", help="The application name to create",
required=True)
parser.add_argument("--email", help="Email address to associate with application",
default="PS-DevOpsTooling@example.com")
parser.add_argument("--project", help="The project to associate with application",
default="None")
parser.add_argument("--repo", help="The repo to associate with application",
default="None")
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
# Dictionary containing application info. This is passed to the class for processing
appinfo = {
"app": args.app,
"email": args.email,
"project": args.project,
"repo": args.repo
}
spinnakerapps = SpinnakerApp()
spinnakerapps.create_app(appinfo=appinfo)
|
Python
| 0
|
@@ -226,16 +226,33 @@
port sys
+%0Aimport gogoutils
%0A%0Afrom j
@@ -2247,24 +2247,16 @@
header)%0A
-
%0A
@@ -3051,24 +3051,87 @@
ult=%22None%22)%0A
+ parser.add_argument(%22--git%22, help=%22Git URI%22, default=None)%0A
args = p
@@ -3229,16 +3229,247 @@
.INFO)%0A%0A
+ if args.git:%0A generated = gogoutils.Generator(*gogoutils.Parser(args.git).parse_url())%0A project = generated.project%0A repo = generated.repo%0A else:%0A project = args.project%0A repo = args.repo%0A%0A
# Di
@@ -3638,21 +3638,16 @@
oject%22:
-args.
project,
@@ -3663,21 +3663,16 @@
%22repo%22:
-args.
repo%0A
|
a6128825eb7b4267a2a5a3116ec2625fdd8d3552
|
Add outsuffix to prottable-qvality driver
|
app/drivers/prottable/qvality.py
|
app/drivers/prottable/qvality.py
|
from app.drivers.pycolator.qvality import QvalityDriver
from app.actions.prottable import qvality as preparation
from app.readers import tsv
class ProttableQvalityDriver(QvalityDriver):
def __init__(self, **kwargs):
super(ProttableQvalityDriver).__init__(**kwargs)
self.score_get_fun = preparation.prepare_qvality_input
if '***reverse' not in self.qvalityoptions:
self.qvalityoptions.extend(['***reverse'])
def set_features(self):
targetheader = tsv.get_tsv_header(self.fn)
self.target = tsv.generate_tsv_proteins(self.fn, targetheader)
decoyheader = tsv.get_tsv_header(self.decoy)
self.decoy = tsv.generate_tsv_proteins(self.decoy, decoyheader)
super().set_features()
|
Python
| 0
|
@@ -181,16 +181,97 @@
river):%0A
+ %22%22%22Runs qvality on two protein tables%22%22%22%0A outsuffix = '_protqvality.txt'%0A%0A
def
|
ca30a98655d289d2367f24503d947536e6f3a5eb
|
Sort the order.
|
scripts/slave/recipes/infra/infra_continuous.py
|
scripts/slave/recipes/infra/infra_continuous.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'bot_update',
'file',
'gclient',
'json',
'path',
'platform',
'properties',
'python',
'step',
]
# Path to a service account credentials to use to talk to CIPD backend.
# Deployed by Puppet.
CIPD_BUILDER_CREDS = '/creds/service_accounts/service-account-cipd-builder.json'
def build_cipd_packages(api, repo, rev):
tags = [
'buildbot_build:%s/%s/%s' % (
api.properties['mastername'],
api.properties['buildername'],
api.properties['buildnumber']),
'git_repository:%s' % repo,
'git_revision:%s' % rev,
]
try:
return api.python(
'build cipd packages',
api.path['checkout'].join('build', 'build.py'),
[
'--upload',
'--service-account-json', CIPD_BUILDER_CREDS,
'--json-output', api.json.output(),
] + ['--tags'] + tags)
finally:
step_result = api.step.active_result
output = step_result.json.output or {}
p = step_result.presentation
for pkg in output.get('succeeded', []):
info = pkg['info']
title = '%s %s' % (info['package'], info['instance_id'])
p.links[title] = info.get('url', 'http://example.com/not-implemented-yet')
def build_luci(api):
go_bin = api.path['checkout'].join('go', 'bin')
go_env = api.path['checkout'].join('go', 'env.py')
api.file.rmcontents('clean go bin', go_bin)
api.python(
'build luci-go', go_env,
['go', 'install', 'github.com/luci/luci-go/client/cmd/...'])
files = api.file.listdir('listing go bin', go_bin)
absfiles = [api.path.join(go_bin, i) for i in files]
api.python(
'upload go bin',
api.path['depot_tools'].join('upload_to_google_storage.py'),
['-b', 'chromium-luci'] + absfiles)
for name, abspath in zip(files, absfiles):
sha1 = api.file.read(
'%s sha1' % str(name), abspath + '.sha1',
test_data='0123456789abcdeffedcba987654321012345678')
api.step.active_result.presentation.step_text = sha1
def GenSteps(api):
builder_name = api.properties.get('buildername')
if builder_name.startswith('infra-internal-continuous'):
project_name = 'infra_internal'
repo_name = 'https://chrome-internal.googlesource.com/infra/infra_internal'
elif builder_name.startswith('infra-continuous'):
project_name = 'infra'
repo_name = 'https://chromium.googlesource.com/infra/infra'
else: # pragma: no cover
raise ValueError(
'This recipe is not intended for builder %s. ' % builder_name)
api.gclient.set_config(project_name)
bot_update_step = api.bot_update.ensure_checkout(force=True)
api.gclient.runhooks()
# Whatever is checked out by bot_update. It is usually equal to
# api.properties['revision'] except when the build was triggered manually
# ('revision' property is missing in that case).
rev = bot_update_step.presentation.properties['got_revision']
with api.step.defer_results():
# TODO(crbug.com/487485): expect_test + venv is broken on Windows.
if not api.platform.is_win:
api.python(
'infra python tests',
'test.py',
['test'],
cwd=api.path['checkout'])
# This downloads Go third parties, so that the next step doesn't have junk
# output in it.
api.python(
'go third parties',
api.path['checkout'].join('go', 'env.py'),
['go', 'version'])
# Note: env.py knows how to expand 'python' into sys.executable.
api.python(
'infra go tests',
api.path['checkout'].join('go', 'env.py'),
['python', api.path['checkout'].join('go', 'test.py')])
# TODO(crbug.com/481661): CIPD client doesn't support Windows yet.
if not api.platform.is_win:
build_cipd_packages(api, repo_name, rev)
# Only build luci-go executables on 64 bits, public CI.
if project_name == 'infra' and builder_name.endswith('-64'):
build_luci(api)
def GenTests(api):
cipd_json_output = {
'succeeded': [
{
'info': {
'instance_id': 'abcdefabcdef63ad814cd1dfffe2fcfc9f81299c',
'package': 'infra/tools/some_tool/linux-bitness',
},
'pkg_def_name': 'some_tool',
},
],
'failed': [],
}
yield (
api.test('infra') +
api.properties.git_scheduled(
buildername='infra-continuous',
buildnumber=123,
mastername='chromium.infra',
repository='https://chromium.googlesource.com/infra/infra',
) +
api.override_step_data(
'build cipd packages', api.json.output(cipd_json_output))
)
yield (
api.test('infra_win') +
api.properties.git_scheduled(
buildername='infra-continuous',
buildnumber=123,
mastername='chromium.infra',
repository='https://chromium.googlesource.com/infra/infra',
) +
api.platform.name('win')
)
yield (
api.test('infra_internal') +
api.properties.git_scheduled(
buildername='infra-internal-continuous',
buildnumber=123,
mastername='internal.infra',
repository=
'https://chrome-internal.googlesource.com/infra/infra_internal',
) +
api.override_step_data(
'build cipd packages', api.json.output(cipd_json_output))
)
yield (
api.test('infra-64') +
api.properties.git_scheduled(
buildername='infra-continuous-64',
buildnumber=123,
mastername='chromium.infra',
repository='https://chromium.googlesource.com/infra/infra',
)
)
|
Python
| 0.999994
|
@@ -1644,16 +1644,23 @@
files =
+sorted(
api.file
@@ -1693,16 +1693,17 @@
go_bin)
+)
%0A absfi
|
45fac42c7a790c5ff6cf656ee8e5984869cddbf3
|
Clean up JWT args before creation
|
dockci/api/jwt.py
|
dockci/api/jwt.py
|
""" API relating to JWT authentication """
from datetime import datetime
import jwt
from flask import url_for
from flask_restful import Resource
from flask_security import current_user, login_required
from .base import BaseRequestParser
from .exceptions import OnlyMeError, WrappedTokenError, WrongAuthMethodError
from .fields import NonBlankInput
from .util import DT_FORMATTER, ensure_roles_found
from dockci.models.auth import lookup_role
from dockci.server import API, CONFIG
from dockci.util import require_admin, jwt_token
JWT_ME_DETAIL_PARSER = BaseRequestParser()
JWT_NEW_PARSER = BaseRequestParser()
JWT_NEW_PARSER.add_argument('name',
required=True, type=NonBlankInput(),
help="Service name for the token")
JWT_NEW_PARSER.add_argument('exp',
type=DT_FORMATTER,
help="Expiration time of the token")
JWT_SERVICE_NEW_PARSER = JWT_NEW_PARSER.copy()
JWT_SERVICE_NEW_PARSER.add_argument('roles',
required=True, action='append',
help="Roles the service is given")
# pylint:disable=no-self-use
class JwtNew(Resource):
""" API resource that handles creating JWT tokens for users """
@login_required
def post(self, user_id):
""" Create a JWT token for a user """
if current_user.id != user_id:
raise OnlyMeError("create JWT tokens")
args = JWT_NEW_PARSER.parse_args(strict=True)
args = {
'name': args['name'],
'exp': args['exp'],
}
return {'token': jwt_token(**args)}, 201
class JwtServiceNew(Resource):
""" API resource that handles creating new JWT tokens for services """
@login_required
@require_admin
def post(self):
""" Create a JWT token for a service user """
args = JWT_SERVICE_NEW_PARSER.parse_args(strict=True)
args = {
'name': args['name'],
'exp': args['exp'],
'roles': args['roles'],
}
found_roles = [
role for role in [
lookup_role(name)
for name in args['roles']
]
if role is not None
]
ensure_roles_found(args['roles'], found_roles)
return {'token': jwt_token(sub='service', **args)}, 201
class JwtMeDetail(Resource):
"""
API resource to handle getting current JWT token details, and creating one
for the current user
"""
@login_required
def get(self):
""" Get details about the current JWT token """
args = JWT_ME_DETAIL_PARSER.parse_args()
api_key = args['x_dockci_api_key'] or args['hx_dockci_api_key']
if api_key is None:
raise WrongAuthMethodError("a JWT token")
else:
return JwtDetail().get(api_key)
@login_required
def post(self):
""" Create a JWT token for the currently logged in user """
return JwtNew().post(current_user.id)
class JwtDetail(Resource):
""" API resource to handle getting job details """
def get(self, token):
""" Get details about a JWT token """
try:
jwt_data = jwt.decode(token, CONFIG.secret)
except jwt.exceptions.InvalidTokenError as ex:
raise WrappedTokenError(ex)
jwt_data['iat'] = DT_FORMATTER.format(
datetime.fromtimestamp(jwt_data['iat'])
)
try:
user_id = jwt_data['sub']
jwt_data['sub_detail'] = url_for('user_detail', user_id=user_id)
except KeyError:
pass
return jwt_data
API.add_resource(JwtNew,
'/users/<int:id>/jwt',
endpoint='jwt_user_new')
API.add_resource(JwtServiceNew,
'/jwt/service',
endpoint='jwt_service_new')
API.add_resource(JwtMeDetail,
'/me/jwt',
endpoint='jwt_me_detail')
API.add_resource(JwtDetail,
'/jwt/<string:token>',
endpoint='jwt_detail')
|
Python
| 0.000004
|
@@ -361,16 +361,29 @@
l import
+ clean_attrs,
DT_FORM
@@ -1536,32 +1536,44 @@
%0A args =
+clean_attrs(
%7B%0A 'n
@@ -1625,32 +1625,33 @@
exp'%5D,%0A %7D
+)
%0A return
@@ -1982,16 +1982,28 @@
args =
+clean_attrs(
%7B%0A
@@ -2107,16 +2107,17 @@
%7D
+)
%0A
|
3199b523a67f9c241950992a07fe38d2bbee07dc
|
Update migration file for namechange
|
seedlibrary/migrations/0003_extendedview_fix.py
|
seedlibrary/migrations/0003_extendedview_fix.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-02-21 02:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seedlibrary', '0002_auto_20170219_2058'),
]
operations = [
migrations.RenameField(
model_name='extendedview',
old_name='external_field',
new_name='external_url',
),
migrations.AddField(
model_name='extendedview',
name='grain_subcategory',
field=models.CharField(blank=True, max_length=50),
),
]
|
Python
| 0
|
@@ -245,25 +245,23 @@
02_a
-uto_20170219_2058
+dd_extendedview
'),%0A
|
d4675f644278167842eb2419bd0e00f94ece6b92
|
Add self
|
serenata_toolbox/chamber_of_deputies/dataset.py
|
serenata_toolbox/chamber_of_deputies/dataset.py
|
import os.path
import csv
from datetime import date
from urllib.request import urlretrieve
from zipfile import ZipFile
import numpy as np
import pandas as pd
from .reimbursements import Reimbursements
class Dataset:
YEARS = [n for n in range(2009, date.today().year+1)]
def __init__(self, path):
self.path = path
def fetch(self):
base_url = "http://www.camara.leg.br/cotas/Ano-{}.csv.zip"
for year in self.YEARS:
zip_file_path = os.path.join(self.path, "Ano-{}.zip".format(year))
url = base_url.format(year)
urlretrieve(url, zip_file_path)
zip_file = ZipFile(zip_file_path, 'r')
zip_file.extractall(self.path)
zip_file.close()
os.remove(zip_file_path)
urlretrieve('http://www2.camara.leg.br/transparencia/cota-para-exercicio-da-atividade-parlamentar/explicacoes-sobre-o-formato-dos-arquivos-xml',
os.path.join(self.path, 'datasets-format.html'))
def convert_to_csv(self):
# deprecated but still here so we don't break poor Rosie (for now)
pass
def translate(self):
for year in self.YEARS:
csv_path = os.path.join(self.path, 'Ano-{}.csv'.format(year))
self.__translate_file(csv_path)
def clean(self):
reimbursements = Reimbursements(self.path)
dataset = reimbursements.group(reimbursements.receipts)
reimbursements.write_reimbursement_file(dataset)
def __translate_file(self, csv_path):
output_file_path = csv_path \
.replace('.csv', '.xz') \
.replace('Ano-', 'reimbursements-')
data = pd.read_csv(csv_path,
encoding='utf-8',
delimiter=";",
quoting=csv.QUOTE_NONE,
dtype={'ideDocumento': np.str,
'idecadastro': np.str,
'nuCarteiraParlamentar': np.str,
'codLegislatura': np.str,
'txtCNPJCPF': np.str,
'numRessarcimento': np.str},
converters={'vlrDocumento': self.__parse_float,
'vlrGlosa': self.__parse_float,
'vlrLiquido': self.__parse_float,
'vlrRestituicao': self.__parse_float})
data.rename(columns={
'ideDocumento': 'document_id',
'txNomeParlamentar': 'congressperson_name',
'idecadastro': 'congressperson_id',
'nuCarteiraParlamentar': 'congressperson_document',
'nuLegislatura': 'term',
'sgUF': 'state',
'sgPartido': 'party',
'codLegislatura': 'term_id',
'numSubCota': 'subquota_number',
'txtDescricao': 'subquota_description',
'numEspecificacaoSubCota': 'subquota_group_id',
'txtDescricaoEspecificacao': 'subquota_group_description',
'txtFornecedor': 'supplier',
'txtCNPJCPF': 'cnpj_cpf',
'txtNumero': 'document_number',
'indTipoDocumento': 'document_type',
'datEmissao': 'issue_date',
'vlrDocumento': 'document_value',
'vlrGlosa': 'remark_value',
'vlrLiquido': 'net_value',
'numMes': 'month',
'numAno': 'year',
'numParcela': 'installment',
'txtPassageiro': 'passenger',
'txtTrecho': 'leg_of_the_trip',
'numLote': 'batch_number',
'numRessarcimento': 'reimbursement_number',
'vlrRestituicao': 'reimbursement_value',
'nuDeputadoId': 'applicant_id',
}, inplace=True)
subquotas = (
(1, 'Maintenance of office supporting parliamentary activity'),
(2, 'Locomotion, meal and lodging'),
(3, 'Fuels and lubricants'),
(4, 'Consultancy, research and technical work'),
(5, 'Publicity of parliamentary activity'),
(6, 'Purchase of office supplies'),
(7, 'Software purchase or renting; Postal services; Subscriptions'),
(8, 'Security service provided by specialized company'),
(9, 'Flight tickets'),
(10, 'Telecommunication'),
(11, 'Postal services'),
(12, 'Publication subscriptions'),
(13, 'Congressperson meal'),
(14, 'Lodging, except for congressperson from Distrito Federal'),
(15, 'Automotive vehicle renting or watercraft charter'),
(119, 'Aircraft renting or charter of aircraft'),
(120, 'Automotive vehicle renting or charter'),
(121, 'Watercraft renting or charter'),
(122, 'Taxi, toll and parking'),
(123, 'Terrestrial, maritime and fluvial tickets'),
(137, 'Participation in course, talk or similar event'),
(999, 'Flight ticket issue')
)
for code, name in subquotas:
data.loc[data['subquota_number']==code, ['subquota_description']] = name
data.to_csv(output_file_path, compression='xz', index=False,
encoding='utf-8')
return output_file_path
def __parse_float(string):
return float(string.replace(',', '.'))
|
Python
| 0.000359
|
@@ -5408,24 +5408,30 @@
parse_float(
+self,
string):%0A
|
17b0f5d7b718bc12755f7ddefdd76ee9312adf5f
|
Add content type text/html to response
|
books.py
|
books.py
|
import falcon
import template
def get_paragraphs(pathname: str) -> list:
result = []
with open(pathname) as f:
for line in f.readlines():
if line != '\n':
result.append(line[:-1])
return result
class BooksResource:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
resp.body = template.render_template('book.html', paragraphs=paragraphs)
app = falcon.API()
books = BooksResource()
app.add_route('/books', books)
if __name__ == '__main__':
paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
print(paragraphs)
|
Python
| 0.000001
|
@@ -331,16 +331,56 @@
TTP_200%0A
+ resp.content_type = 'text/html'%0A
|
c024c69c9719693f3a8a47bc70f3649e8abff17a
|
Use cache db backend for session in the default state.
|
leonardo/conf/default.py
|
leonardo/conf/default.py
|
from leonardo.base import default
EMAIL = {
'HOST': 'mail.domain.com',
'PORT': '25',
'USER': 'username',
'PASSWORD': 'pwd',
'SECURITY': True,
}
RAVEN_CONFIG = {}
ALLOWED_HOSTS = ['*']
USE_TZ = True
DEBUG = True
ADMINS = (
('admin', 'mail@leonardo.cz'),
)
# month
LEONARDO_CACHE_TIMEOUT = 60 * 60 * 24 * 31
DEFAULT_CHARSET = 'utf-8'
MANAGERS = ADMINS
SITE_ID = 1
SITE_NAME = 'Leonardo'
TIME_ZONE = 'Europe/Prague'
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'EN'),
('cs', 'CS'),
)
USE_I18N = True
DBTEMPLATES_MEDIA_PREFIX = '/static-/'
DBTEMPLATES_AUTO_POPULATE_CONTENT = True
DBTEMPLATES_ADD_DEFAULT_SITE = True
FILER_ENABLE_PERMISSIONS = True # noqa
MIDDLEWARE_CLASSES = default.middlewares
ROOT_URLCONF = 'leonardo.urls'
LEONARDO_BOOTSTRAP_URL = 'http://github.com/django-leonardo/django-leonardo/raw/master/contrib/bootstrap/demo.yaml'
MARKITUP_FILTER = ('markitup.renderers.render_rest', {'safe_mode': True})
INSTALLED_APPS = default.apps
# For easy_thumbnails to support retina displays (recent MacBooks, iOS)
FEINCMS_USE_PAGE_ADMIN = False
LEONARDO_USE_PAGE_ADMIN = True
FEINCMS_DEFAULT_PAGE_MODEL = 'web.Page'
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_CONFIG = {}
CONSTANCE_ADDITIONAL_FIELDS = {}
# enable auto loading packages
LEONARDO_MODULE_AUTO_INCLUDE = True
# enable system module
LEONARDO_SYSTEM_MODULE = True
##########################
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
'compressor.finders.CompressorFinder',
)
LOGIN_URL = '/auth/login'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = "/"
LOGOUT_ON_GET = True
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'DEBUG',
'handlers': ['console'],
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'leonardo': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
CRISPY_TEMPLATE_PACK = 'bootstrap3'
SECRET_KEY = None
APPS = []
PAGE_EXTENSIONS = []
MIGRATION_MODULES = {}
# use default leonardo auth urls
LEONARDO_AUTH = True
FEINCMS_TIDY_HTML = False
APPLICATION_CHOICES = []
ADD_JS_FILES = []
ADD_CSS_FILES = []
ADD_SCSS_FILES = []
ADD_JS_SPEC_FILES = []
ADD_ANGULAR_MODULES = []
ADD_PAGE_ACTIONS = []
ADD_WIDGET_ACTIONS = []
ADD_MIGRATION_MODULES = {}
ADD_JS_COMPRESS_FILES = []
CONSTANCE_CONFIG_GROUPS = {}
ABSOLUTE_URL_OVERRIDES = {}
SELECT2_CACHE_PREFIX = 'SELECT2'
MODULE_URLS = {}
WIDGETS = {}
|
Python
| 0
|
@@ -1295,16 +1295,79 @@
S = %7B%7D%0A%0A
+SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'%0A%0A
# enable
|
02f241a646ba991046208949156d00fd2d163c26
|
Remove unneeded override of field
|
partner_communication_switzerland/models/account_invoice.py
|
partner_communication_switzerland/models/account_invoice.py
|
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo import api, models, fields
from odoo.addons.queue_job.job import job, related_action
logger = logging.getLogger(__name__)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
communication_id = fields.Many2one(
'partner.communication.job', 'Thank you letter', ondelete='set null',
readonly=True
)
@api.model
def thankyou_summary_cron(self):
"""
Sends a summary each month of the donations
:return: True
"""
comm_obj = self.env['partner.communication.job']
first = datetime.today().replace(day=1)
last_month = first - relativedelta(months=1)
partners = self.env['res.users'].search([
'|', '|',
('name', 'like', 'Maglo Rachel'),
('name', 'like', 'Willi Christian'),
('name', 'like', 'Wulliamoz David'),
]).mapped('partner_id')
invoices = self.search([
('type', '=', 'out_invoice'),
('invoice_type', '!=', 'sponsorship'),
('state', '=', 'paid'),
('last_payment', '>=', fields.Date.to_string(last_month)),
('last_payment', '<', fields.Date.to_string(first)),
])
config = self.env.ref('thankyou_letters.config_thankyou_summary')
for partner in partners:
comm_obj.create({
'config_id': config.id,
'partner_id': partner.id,
'object_ids': invoices.ids
})
return True
@api.multi
def generate_thank_you(self):
"""
Creates a thank you letter communication separating events thank you
and regular thank you.
"""
partners = self.mapped('partner_id').filtered(
lambda p: p.thankyou_letter != 'no')
gift_category = self.env.ref(
'sponsorship_compassion.product_category_gift')
for partner in partners:
invoice_lines = self.mapped('invoice_line_ids').filtered(
lambda l: l.partner_id == partner)
event_thank = invoice_lines.filtered('event_id')
other_thank = invoice_lines - event_thank
for event in event_thank.mapped('event_id'):
event_thank.filtered(
lambda l: l.event_id == event).generate_thank_you()
if other_thank:
other_thank.generate_thank_you()
# Send confirmation to ambassadors
ambassador_config = self.env.ref(
'partner_communication_switzerland.'
'ambassador_donation_confirmation_config'
)
ambassadors = self.mapped('invoice_line_ids.user_id').filtered(
'advocate_details_id.mail_copy_when_donation')
for ambassador in ambassadors:
# Filter only donations not for made for himself and filter
# gifts that are thanked but not directly for ambassador.
ambassador_lines = self.mapped('invoice_line_ids').filtered(
lambda l: l.user_id == ambassador and
l.partner_id != ambassador and
l.product_id.categ_id != gift_category)
if ambassador_lines:
self.env['partner.communication.job'].create({
'partner_id': ambassador.id,
'object_ids': ambassador_lines.ids,
'config_id': ambassador_config.id
})
@api.multi
def _filter_invoice_to_thank(self):
"""
Given a recordset of paid invoices, return only those that have
to be thanked.
:return: account.invoice recordset
"""
return self.filtered(
lambda i: i.type == 'out_invoice' and (
not i.communication_id or i.communication_id.state in (
'call', 'pending')) and i.invoice_type != 'sponsorship' and
(not i.mapped('invoice_line_ids.contract_id') or (
i.invoice_type == 'gift' and i.origin !=
'Automatic birthday gift'))
)
@job(default_channel='root.group_reconcile')
@related_action(action='related_action_invoices')
def group_or_split_reconcile(self):
"""Reconcile given invoices with partner open payments.
"""
super().group_or_split_reconcile()
# Find if a communication with payment slips is pending and
# regenerate it.
jobs = self.env['partner.communication.job'].search([
('model', 'in', ['recurring.contract', 'account.invoice']),
('state', '!=', 'done'),
('partner_id', 'in', self.mapped('partner_id').ids)
])
jobs.refresh_text()
@api.model
def cron_send_ambassador_donation_receipt(self):
"""
Cron for sending the donation receipts to ambassadors
:return: True
"""
ambassador_config = self.env.ref(
'partner_communication_switzerland.'
'ambassador_donation_confirmation_config'
)
jobs = self.env['partner.communication.job'].search([
('config_id', '=', ambassador_config.id),
('state', '=', 'pending')
])
return jobs.send()
|
Python
| 0.000001
|
@@ -677,155 +677,8 @@
e'%0A%0A
- communication_id = fields.Many2one(%0A 'partner.communication.job', 'Thank you letter', ondelete='set null',%0A readonly=True%0A )%0A%0A
|
4f10a72f6d7f351ee45c53f757b2889a372ba3e6
|
Handle also osv, orm exception in wizard execution
|
bin/wizard/__init__.py
|
bin/wizard/__init__.py
|
##############################################################################
#
# Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import netsvc
import copy
from tools.misc import UpdateableStr
from tools.translate import translate
from xml import dom
import ir
import pooler
class except_wizard(Exception):
def __init__(self, name, value):
self.name = name
self.value = value
class interface(netsvc.Service):
states = {}
def __init__(self, name):
assert not netsvc.service_exist('wizard.'+name), 'The wizard "%s" already exist!'%name
super(interface, self).__init__('wizard.'+name)
self.exportMethod(self.execute)
self.wiz_name = name
def translate_view(self, cr, uid, node, state, lang):
if node.nodeType == node.ELEMENT_NODE:
if node.hasAttribute('string') and node.getAttribute('string'):
trans = translate(cr, uid, self.wiz_name+','+state, 'wizard_view', lang, node.getAttribute('string').encode('utf8'))
if trans:
node.setAttribute('string', trans.decode('utf8'))
for n in node.childNodes:
self.translate_view(cr, uid, n, state, lang)
def execute_cr(self, cr, uid, data, state='init', context=None):
if not context:
context={}
res = {}
try:
state_def = self.states[state]
result_def = state_def.get('result', {})
actions_res = {}
# iterate through the list of actions defined for this state
for action in state_def.get('actions', []):
# execute them
action_res = action(self, cr, uid, data, context)
assert isinstance(action_res, dict), 'The return value of wizard actions should be a dictionary'
actions_res.update(action_res)
res = copy.copy(result_def)
res['datas'] = actions_res
lang = context.get('lang', False)
if result_def['type'] == 'action':
res['action'] = result_def['action'](self, cr, uid, data, context)
elif result_def['type'] == 'choice':
next_state = result_def['next_state'](self, cr, uid, data, context)
return self.execute_cr(cr, uid, data, next_state, context)
elif result_def['type'] == 'form':
fields = copy.copy(result_def['fields'])
arch = copy.copy(result_def['arch'])
button_list = copy.copy(result_def['state'])
# fetch user-set defaut values for the field... shouldn't we pass it the uid?
defaults = ir.ir_get(cr, uid, 'default', False, [('wizard.'+self.wiz_name, False)])
default_values = dict([(x[1], x[2]) for x in defaults])
for val in fields.keys():
if 'default' in fields[val]:
# execute default method for this field
if callable(fields[val]['default']):
fields[val]['value'] = fields[val]['default'](uid, data, state)
else:
fields[val]['value'] = fields[val]['default']
del fields[val]['default']
else:
# if user has set a default value for the field, use it
if val in default_values:
fields[val]['value'] = default_values[val]
if 'selection' in fields[val]:
if not isinstance(fields[val]['selection'], (tuple, list)):
fields[val] = copy.copy(fields[val])
fields[val]['selection'] = fields[val]['selection'](self, cr, uid, context)
if isinstance(arch, UpdateableStr):
arch = arch.string
if lang:
# translate fields
for field in fields:
trans = translate(cr, uid, self.wiz_name+','+state+','+field, 'wizard_field', lang)
if trans:
fields[field]['string'] = trans
# translate arch
if not isinstance(arch, UpdateableStr):
doc = dom.minidom.parseString(arch)
self.translate_view(cr, uid, doc, state, lang)
arch = doc.toxml()
# translate buttons
button_list = list(button_list)
for i, aa in enumerate(button_list):
button_name = aa[0]
trans = translate(cr, uid, self.wiz_name+','+state+','+button_name, 'wizard_button', lang)
if trans:
aa = list(aa)
aa[1] = trans
button_list[i] = aa
res['fields'] = fields
res['arch'] = arch
res['state'] = button_list
except except_wizard, e:
self.abortResponse(2, e.name, 'warning', e.value)
return res
def execute(self, db, uid, data, state='init', context=None):
if not context:
context={}
cr = pooler.get_db(db).cursor()
try:
try:
res = self.execute_cr(cr, uid, data, state, context)
cr.commit()
except Exception:
cr.rollback()
raise
finally:
cr.close()
return res
|
Python
| 0
|
@@ -1458,16 +1458,79 @@
pooler%0A%0A
+from osv.osv import except_osv%0Afrom osv.orm import except_orm%0A%0A
class ex
@@ -5253,26 +5253,129 @@
ept
-except_wizard, e
+Exception, e:%0A%09%09%09if isinstance(e, except_wizard) %5C%0A%09%09%09%09or isinstance(e, except_osv) %5C%0A%09%09%09%09or isinstance(e, except_orm)
:%0A
+%09
%09%09%09s
@@ -5426,16 +5426,31 @@
lue)%0A%09%09%09
+else:%0A%09%09%09%09raise
%0A%09%09retur
|
ced7ff090c44f706fa161b5e5bed8f36fb6570c9
|
Set default pool to 1 worker.
|
blackgate/component.py
|
blackgate/component.py
|
# -*- coding: utf-8 -*-
from functools import partial
from blackgate.executor_pools import ExecutorPools
from blackgate.circuit_beaker import NoCircuitBeaker, InProcessCircuitBeaker, get_circuit_beaker
class Component(object):
def __init__(self):
self.pools = ExecutorPools()
self.circuit_beakers = {}
self.circuit_beaker_impl = NoCircuitBeaker
self.circuit_beaker_options = {}
self.get_circuit_beaker = partial(
get_circuit_beaker,
table=self.circuit_beakers,
)
self.configurations = {}
def set(self, key, value):
self.configurations[key] = value
def add(self, key, value):
self.configurations.setdefault(key, [])
if key in self.configurations:
assert isinstance(self.configurations[key], list)
self.configurations[key].append(value)
def delete(self, key):
del self.configurations[key]
def install(self):
self.install_executor_pool()
self.install_circuit_beaker()
def install_executor_pool(self):
if 'executor_pool' in self.configurations:
for executor_pool in self.configurations['executor_pool']:
self.pools.register_pool(executor_pool['group_key'], executor_pool['max_workers'])
def install_circuit_beaker(self):
if 'circuit_beaker_enabled' in self.configurations:
self.circuit_beaker_impl = NoCircuitBeaker
self.circuit_beaker_options = {}
elif 'circuit_beaker_impl' not in self.configurations:
self.circuit_beaker_impl = InProcessCircuitBeaker
self.circuit_beaker_options = {'metrics': None} # FIXME
else:
# FIXME: add definition of import_string
self.circuit_beaker_impl = import_string(self.configurations['circuit_beaker_impl'])
self.circuit_beaker_options = self.configurations.get('circuit_beaker_options') or {}
|
Python
| 0
|
@@ -503,14 +503,8 @@
-table=
self
@@ -1059,32 +1059,80 @@
tor_pool(self):%0A
+ self.pools.register_pool('default', 1)%0A%0A
if 'exec
|
500baa732d08712eef802f57f4c3473f667802d8
|
make xlim and ylim start at 0 (less confusing to user)
|
bonvoyage/visualize.py
|
bonvoyage/visualize.py
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from modish import MODALITY_TO_COLOR, MODALITY_TO_CMAP
def switchy_score(array):
"""Transform a 1D array of data scores to a vector of "switchy scores"
Calculates std deviation and mean of sine- and cosine-transformed
versions of the array. Better than sorting by just the mean which doesn't
push the really lowly variant events to the ends.
Parameters
----------
array : numpy.array
A 1-D numpy array or something that could be cast as such (like a list)
Returns
-------
switchy_score : float
The "switchy score" of the study_data which can then be compared to
other splicing event study_data
"""
array = np.array(array)
variance = 1 - np.std(np.sin(array[~np.isnan(array)] * np.pi))
mean_value = -np.mean(np.cos(array[~np.isnan(array)] * np.pi))
return variance * mean_value
def get_switchy_score_order(x):
"""Apply switchy scores to a 2D array of data scores
Parameters
----------
x : numpy.array
A 2-D numpy array in the shape [n_events, n_samples]
Returns
-------
score_order : numpy.array
A 1-D array of the ordered indices, in switchy score order
"""
switchy_scores = np.apply_along_axis(switchy_score, axis=0, arr=x)
return np.argsort(switchy_scores)
def arrowplot(*args, **kwargs):
data = kwargs.pop('data')
voyage_space_positions = kwargs.pop('voyage_space_positions')
ax = plt.gca()
phenotype1, phenotype2 = data.transition.values[0].split('-')
print phenotype1, phenotype2
# PLot a phantom line for the legend to work
ax.plot(0, 0, **kwargs)
for event in data.event_name:
df = voyage_space_positions.ix[event].ix[[phenotype1, phenotype2]].dropna()
if df.shape[0] != 2:
continue
x1, x2 = df.pc_1.values
y1, y2 = df.pc_2.values
dx = x2 - x1
dy = y2 - y1
ax.arrow(x1, y1, dx, dy, head_width=0.005, head_length=0.005, #fc='k', ec='k',
alpha=0.25, **kwargs)
def hexbin(x, y, *args, **kwargs):
"""Wrapper around hexbin to create a colormap for that modality
Created for compatibility with seaborn FacetGrid
"""
ax = kwargs['ax'] if 'ax' in kwargs else plt.gca()
modality = kwargs.pop('modality', 'multimodal')
cmap = MODALITY_TO_CMAP[modality]
ax.hexbin(x, y, cmap=cmap, *args, **kwargs)
def _waypoint_scatter(waypoints, modality=None, ax=None, alpha=0.5,
color='#262626', markeredgewidth=0.5,
markeredgecolor='darkgrey', **kwargs):
x = waypoints.iloc[:, 0]
y = waypoints.iloc[:, 1]
if ax is None:
ax = plt.gca()
if modality is not None:
color = MODALITY_TO_COLOR[modality]
return ax.plot(x, y, 'o', color=color,
alpha=alpha, markeredgewidth=markeredgewidth,
markeredgecolor=markeredgecolor, **kwargs)
def _waypoint_hexbin(waypoints, modality=None, ax=None, edgecolor='darkgrey',
gridsize=20, mincnt=1, bins='log', cmap='Greys',
extent=(0, 1, 0, 1), **kwargs):
x = waypoints.iloc[:, 0]
y = waypoints.iloc[:, 1]
if ax is None:
ax = plt.gca()
if modality is not None:
cmap = MODALITY_TO_CMAP[modality]
return ax.hexbin(x, y, cmap=cmap, edgecolor=edgecolor, gridsize=gridsize,
mincnt=mincnt, bins=bins, extent=extent, **kwargs)
def _waypoint_kde(waypoints, modality=None, ax=None, cmap='Greys',
shade_lowest=False, n_levels=5, **kwargs):
x = waypoints.iloc[:, 0]
y = waypoints.iloc[:, 1]
if ax is None:
ax = plt.gca()
if modality is not None:
cmap = MODALITY_TO_CMAP[modality]
return sns.kdeplot(x, y, cmap=cmap, shade_lowest=shade_lowest,
n_levels=n_levels, ax=ax, **kwargs)
def waypointplot(waypoints, kind='hexbin', features_groupby=None, ax=None,
**kwargs):
if ax is None:
ax = plt.gca()
if kind.startswith('scatter'):
plotter = _waypoint_scatter
if kind.startswith('hex'):
plotter = _waypoint_hexbin
if kind.startswith('kde'):
plotter = _waypoint_kde
if features_groupby is None:
plotter(waypoints, ax=ax, **kwargs)
else:
for modality, modality_waypoints in waypoints.groupby(features_groupby):
plotter(modality_waypoints, modality, ax=ax, **kwargs)
vlim = -0.05, 1.05
sns.despine()
ax.set(xlabel='~0', ylabel='~1',
xticks=[], yticks=[], ylim=vlim,
xlim=vlim)
return ax
def voyageplot(nmf_space_positions, feature_id, phenotype_to_color,
phenotype_to_marker, order, ax=None, xlabel=None, ylabel=None):
"""Plot 2d space traveled by individual splicing events
Parameters
----------
nmf_space_positions : pandas.DataFrame
A dataframe with a multiindex of (event, phenotype) and columns of
x- and y- position, respectively
feature_id : str
Unique identifier of the feature to plot
phenotype_to_color : dict
Mapping of the phenotype name to a color
phenotype_to_marker : dict
Mapping of the phenotype name to a plotting symbol
order : tuple
Order in which to plot the phenotypes (e.g. if there is a biological
ordering)
ax : matplotlib.Axes object, optional
An axes to plot these onto. If not provided, grabs current axes
xlabel : str, optional
How to label the x-axis
ylabel : str, optional
How to label the y-axis
"""
df = nmf_space_positions.ix[feature_id]
if ax is None:
ax = plt.gca()
for color, s in df.groupby(phenotype_to_color, axis=0):
phenotype = s.index[0]
marker = phenotype_to_marker[phenotype]
ax.plot(s.pc_1, s.pc_2, color=color, marker=marker, markersize=14,
alpha=0.75, label=phenotype, linestyle='none')
# ax.scatter(df.ix[:, 0], df.ix[:, 1], color=color, s=100, alpha=0.75)
# ax.legend(points, df.index.tolist())
ax.set_xlim(0, nmf_space_positions.ix[:, 0].max() * 1.05)
ax.set_ylim(0, nmf_space_positions.ix[:, 1].max() * 1.05)
x = [df.ix[pheno, 0] for pheno in order if pheno in df.index]
y = [df.ix[pheno, 1] for pheno in order if pheno in df.index]
ax.plot(x, y, zorder=-1, color='#262626', alpha=0.5, linewidth=1)
ax.legend()
if xlabel is not None:
ax.set_xlabel(xlabel)
ax.set_xticks([])
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.set_yticks([])
|
Python
| 0
|
@@ -4554,13 +4554,9 @@
m =
--0.05
+0
, 1.
|
89237e9af27fa46c08ec90cab4029f41b335708f
|
fix pep8 violations
|
examples/plugin/simphony_example/__init__.py
|
examples/plugin/simphony_example/__init__.py
|
# Functions, classes and constants exported here will be available
# when the `example` module is imported.
__all__ = ['A', 'B']
from .code import A, B
|
Python
| 0
|
@@ -101,17 +101,16 @@
ported.%0A
-%0A
__all__
|
4a498d83c15f89e00c095659df1fc38377acc0a3
|
fix permissions for rewards
|
bluebottle/rewards/models.py
|
bluebottle/rewards/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import (CreationDateTimeField,
ModificationDateTimeField)
from bluebottle.utils.fields import MoneyField
from bluebottle.utils.utils import StatusDefinition
GROUP_PERMS = {
'Staff': {
'perms': (
'add_reward', 'change_reward', 'delete_reward',
)
}
}
class Reward(models.Model):
"""
Rewards for donations
"""
amount = MoneyField(_('Amount'))
title = models.CharField(_('Title'), max_length=30)
description = models.CharField(_('Description'), max_length=200)
project = models.ForeignKey('projects.Project', verbose_name=_('Project'))
limit = models.IntegerField(_('Limit'), null=True, blank=True,
help_text=_('How many of this rewards are available'))
created = CreationDateTimeField(_('creation date'))
updated = ModificationDateTimeField(_('last modification'))
@property
def owner(self):
return self.project.owner
@property
def parent(self):
return self.project
@property
def count(self):
from bluebottle.donations.models import Donation
return Donation.objects \
.filter(project=self.project) \
.filter(reward=self) \
.filter(order__status__in=[StatusDefinition.PENDING, StatusDefinition.SUCCESS]) \
.count()
def __unicode__(self):
return self.title
class Meta:
ordering = ['-project__created', 'amount']
verbose_name = _("Gift")
verbose_name_plural = _("Gifts")
permissions = (
('api_read_reward', 'Can view reward through the API'),
('api_add_reward', 'Can add reward through the API'),
('api_change_reward', 'Can change reward through the API'),
('api_delete_reward', 'Can delete reward through the API'),
)
|
Python
| 0
|
@@ -433,16 +433,230 @@
)%0A
+ %7D,%0A 'Anonymous': %7B%0A 'perms': ('api_read_reward',)%0A %7D,%0A 'Authenticated': %7B%0A 'perms': (%0A 'api_read_reward', 'api_add_reward', 'api_change_reward', 'api_delete_reward',%0A )%0A
%7D%0A%7D%0A
|
7851e867aec82f771683cc267ecb5989d2005aa1
|
add same features as program in 03-janus
|
experiments/01-single-dimer/plot_velocity.py
|
experiments/01-single-dimer/plot_velocity.py
|
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='H5MD datafile')
parser.add_argument('--directed', action='store_true')
args = parser.parse_args()
import numpy as np
import h5py
import matplotlib.pyplot as plt
with h5py.File(args.file, 'r') as f:
r = f['particles/dimer/position/value'][...]
r_dt = f['particles/dimer/position/time'][()]
im = f['particles/dimer/image/value'][...]
v = f['particles/dimer/velocity/value'][...]
v_dt = f['particles/dimer/velocity/time'][()]
edges = f['particles/dimer/box/edges'][:].reshape((1,-1))
r += edges*im
assert abs(r_dt-v_dt) < 1e-12
assert r.shape[1]==2
assert r.shape[2]==3
assert v.shape[1]==2
assert v.shape[2]==3
time = np.arange(r.shape[0])*r_dt
v_com = v.mean(axis=1)
if args.directed:
unit_z = r[:,1,:]-r[:,0,:]
unit_z /= np.sqrt(np.sum(unit_z**2, axis=1)).reshape((-1,1))
vz = np.sum(v_com*unit_z, axis=1)
plt.plot(time, vz)
else:
plt.plot(time, v_com)
plt.show()
|
Python
| 0
|
@@ -184,16 +184,72 @@
_true')%0A
+parser.add_argument('--histogram', action='store_true')%0A
args = p
@@ -1026,57 +1026,343 @@
-plt.plot(time, vz)%0Aelse:%0A plt.plot(time, v_com
+if args.histogram:%0A plt.hist(vz, bins=20)%0A else:%0A plt.plot(time, vz)%0Aelse:%0A for i in range(3):%0A plt.subplot(3,1,i+1)%0A if args.histogram:%0A plt.hist(v_com%5B:,i%5D)%0A plt.ylabel(r'$P(v_'+'xyz'%5Bi%5D+')$')%0A else:%0A plt.plot(time, v_com%5B:,i%5D)%0A plt.ylabel('xyz'%5Bi%5D
)%0A%0Ap
|
aabe64773baf0516ecce2e96793221d5bfa91040
|
change to use environment variables
|
scripts/sillyserver.py
|
scripts/sillyserver.py
|
# Example of using the MQTT client class to subscribe to a feed and print out
# any changes made to the feed. Edit the variables below to configure the key,
# username, and feed to subscribe to for changes.
# Import standard python modules.
import sys
import serial
# Import Adafruit IO MQTT client.
from Adafruit_IO import MQTTClient
# Set to your Adafruit IO key & username below.
ADAFRUIT_IO_KEY = "yourkey"
ADAFRUIT_IO_USERNAME = "yourusername"
FEED_ID = 'SillyFire'
ser = serial.Serial('/dev/cu.usbmodem1411',115200)
# Define callback functions which will be called when certain events happen.
def connected(client):
# Connected function will be called when the client is connected to Adafruit IO.
# This is a good place to subscribe to feed changes. The client parameter
# passed to this function is the Adafruit IO MQTT client so you can make
# calls against it easily.
print 'Connected to Adafruit IO! Listening for {0} changes...'.format(FEED_ID)
# Subscribe to changes on a feed named DemoFeed.
client.subscribe(FEED_ID)
def disconnected(client):
# Disconnected function will be called when the client disconnects.
print 'Disconnected from Adafruit IO!'
client.connect()
def message(client, feed_id, payload):
# Message function will be called when a subscribed feed has a new value.
# The feed_id parameter identifies the feed, and the payload parameter has
# the new value.
print 'Feed {0} received new value: {1}'.format(feed_id, payload)
if int(payload) == 1:
print "Fire"
ser.write(b'F')
# Create an MQTT client instance.
client = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
# Setup the callback functions defined above.
client.on_connect = connected
client.on_disconnect = disconnected
client.on_message = message
# Connect to the Adafruit IO server.
client.connect()
# Start a message loop that blocks forever waiting for MQTT messages to be
# received. Note there are other options for running the event loop like doing
# so in a background thread--see the mqtt_client.py example to learn more.
client.loop_blocking()
|
Python
| 0.000002
|
@@ -246,23 +246,21 @@
port sys
-%0Aimport
+, os,
serial%0A
@@ -401,56 +401,227 @@
Y =
-%22yourkey%22%0AADAFRUIT_IO_USERNAME = %22yourusername%22%0A
+os.getenv('AIOKEY','nokey')%0AADAFRUIT_IO_USERNAME = os.getenv('AIOUSER','nouser')%0A%0Aif (ADAFRUIT_IO_KEY == 'nokey' or ADAFRUIT_IO_USERNAME == 'nouser'):%0A print('no user or key environment variable')%0A sys.exit()%0A
%0AFEE
@@ -2033,16 +2033,39 @@
server.%0A
+print %22try to connect%22%0A
client.c
|
d4879a7640869b16e1ea50cdbff84f173a81b521
|
simplify variable
|
examples/apt.py
|
examples/apt.py
|
from pyinfra import host
from pyinfra.modules import apt
SUDO = True
# Note: Using linux_distribution fact so running from docker
# will show valid name since the lsb-release tool is not installed,
# otherwise could just use host.fact.linux_name
linux_name = host.fact.linux_distribution.get('name', '')
code_name = host.fact.linux_distribution['release_meta'].get('DISTRIB_CODENAME')
print(linux_name, code_name)
if linux_name in ['Debian', 'Ubuntu']:
apt.packages(
{'Install some packages'},
['vim-addon-manager', 'vim', 'software-properties-common', 'wget'],
update=True,
)
apt.ppa(
{'Add the Bitcoin ppa'},
'ppa:bitcoin/bitcoin',
)
# typically after adding a ppk, you want to update
apt.update()
# but you could just include the update in the apt install step
# like this:
apt.packages(
{'Install Bitcoin'},
'bitcoin-qt',
update=True,
)
apt.deb(
{'Install Chrome via deb'},
'https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb',
)
apt.key(
{'Install VirtualBox key'},
'https://www.virtualbox.org/download/oracle_vbox_2016.asc',
)
apt.repo(
{'Install VirtualBox repo'},
'deb https://download.virtualbox.org/virtualbox/debian {} contrib'.format(code_name),
)
|
Python
| 0.999861
|
@@ -68,243 +68,8 @@
ue%0A%0A
-# Note: Using linux_distribution fact so running from docker%0A# will show valid name since the lsb-release tool is not installed,%0A# otherwise could just use host.fact.linux_name%0Alinux_name = host.fact.linux_distribution.get('name', '')%0A
code
@@ -151,16 +151,26 @@
)%0Aprint(
+host.fact.
linux_na
@@ -188,16 +188,26 @@
me)%0A%0Aif
+host.fact.
linux_na
|
4c81f9375529604999c35e36c4586856f9c4d458
|
Handle bad error codes.
|
bot/app/sync/closure_sync.py
|
bot/app/sync/closure_sync.py
|
import logging
import urllib
import pytz
from datetime import datetime, timedelta
from api.models import RoadClosure, RoadClosureStatus
from bs4 import BeautifulSoup
from dateutil import tz
logger = logging.getLogger('bot.digest')
def get_road_closure():
tzTex = pytz.timezone('US/Central')
url = "http://www.cameroncounty.us/spacex/"
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
request = urllib.request.Request(url, headers={'User-Agent': user_agent})
try:
response = urllib.request.urlopen(request)
except Exception as e:
logger.error(e)
return
html_content = response.read()
# Parse the html content
soup = BeautifulSoup(html_content, "lxml")
tableRows = []
gdp_table = soup.find("table")
gdp_table_data = gdp_table.tbody.find_all("tr") # contains 2 rows
for row in gdp_table_data:
aux = ''
for cell in row.find_all("td"):
aux += str(cell.text.strip()) + '|'
tableRows.append(aux[:-1])
closures = []
for row in tableRows:
row = row.replace('a.m.', 'AM')
row = row.replace('p.m.', 'PM')
dtP1 = row.split('|')[1]
dtP2 = row.split('|')[2]
dtP2B = dtP2.split('–')[0].strip()
dtP2E = dtP2.split('–')[1].strip()
dtSta = dtP1 + ' ' + dtP2B
staTex = tzTex.localize(datetime.strptime(dtSta, "%B %d, %Y %I:%M %p"))
dtEnd = dtP1 + ' ' + dtP2E
endTex = tzTex.localize(datetime.strptime(dtEnd, "%B %d, %Y %I:%M %p"))
if 'PM' in dtP2B and 'AM' in dtP2E:
endTex += timedelta(days=1)
nowTex = datetime.now(tz=pytz.utc)
if endTex < nowTex:
continue
name = row.split('|')[0]
status = row.split('|')[3].replace('Closure ', '')
closures.append([staTex, endTex, name, status])
logger.info("Found %s closures" % len(closures))
for closure in closures:
window_start = closure[0].astimezone(tz.tzutc())
window_end = closure[1].astimezone(tz.tzutc())
status_text = closure[3]
status, created = RoadClosureStatus.objects.get_or_create(name=status_text)
try:
obj = RoadClosure.objects.get(window_start__exact=window_start,
window_end__exact=window_end)
obj.status = status
except RoadClosure.DoesNotExist:
obj = RoadClosure.objects.create(title=closure[2],
window_start=window_start,
window_end=window_end,
status=status)
logger.info("Creating new Road Closure %s" % obj)
obj.save()
|
Python
| 0.000002
|
@@ -684,16 +684,130 @@
return%0A
+ if response.code != 200:%0A logger.error(%22Received bad response code %25s%22 %25 response.code)%0A return%0A
html
|
630e362e727d4c7274b987008488d203e21f8ec6
|
Use default quality
|
electionleaflets/apps/api/serializers.py
|
electionleaflets/apps/api/serializers.py
|
from rest_framework import serializers
from sorl.thumbnail import get_thumbnail
from leaflets.models import Leaflet, LeafletImage
from constituencies.models import Constituency
from uk_political_parties.models import Party
from people.models import Person
class ConstituencySerializer(serializers.ModelSerializer):
class Meta:
model = Constituency
fields = (
'pk',
'name',
'country_name',
'slug',
)
class PartySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Party
fields = (
'pk',
'party_name',
'party_type',
'status',
)
class PersonSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Person
fields = (
'pk',
'name',
'remote_id',
'source_name',
'source_url',
)
class LeafletImageSerializer(serializers.ModelSerializer):
class Meta:
model = LeafletImage
fields = (
'image',
'image_text',
)
image = serializers.ImageField()
class LeafletSerializer(serializers.HyperlinkedModelSerializer):
images = LeafletImageSerializer(many=True, required=False)
constituency = ConstituencySerializer(required=False)
publisher_party = PartySerializer(required=False)
publisher_person = PersonSerializer(required=False)
first_page_thumb = serializers.SerializerMethodField()
def get_first_page_thumb(self, obj):
image = obj.get_first_image()
if image:
return get_thumbnail(obj.get_first_image, '350', quality=80).url
def validate(self, data):
if not data.get('status') or not data.get('images'):
data['status'] = 'draft'
return data
class Meta:
model = Leaflet
depth = 1
fields = (
'pk',
'title',
'description',
'publisher_party',
'publisher_person',
'constituency',
'images',
'first_page_thumb',
'date_uploaded',
'date_delivered',
'status',
)
|
Python
| 0.000001
|
@@ -1681,20 +1681,8 @@
350'
-, quality=80
).ur
|
4626a20b2d46a3a8ea17d265dff220c5a02700d8
|
Fix bug in clang-format's vim integration cause by r186789.
|
tools/clang-format/clang-format.py
|
tools/clang-format/clang-format.py
|
# This file is a minimal clang-format vim-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Add to your .vimrc:
#
# map <C-I> :pyf <path-to-this-file>/clang-format.py<CR>
# imap <C-I> <ESC>:pyf <path-to-this-file>/clang-format.py<CR>i
#
# The first line enables clang-format for NORMAL and VISUAL mode, the second
# line adds support for INSERT mode. Change "C-I" to another binding if you
# need clang-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and clang-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
import difflib
import json
import subprocess
import sys
import vim
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles (see
# clang-format -help)
style = 'LLVM'
# Get the current text.
buf = vim.current.buffer
text = '\n'.join(buf)
# Determine range to format.
cursor = int(vim.eval('line2byte(line("."))+col(".")')) - 2
lines = '%s:%s' % (vim.current.range.start + 1, vim.current.range.end + 1)
# Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
startupinfo = None
if sys.platform.startswith('win32'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call formatter.
p = subprocess.Popen([binary, '-lines', lines, '-style', style,
'-cursor', str(cursor)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate(input=text)
# If successful, replace buffer contents.
if stderr:
message = stderr.splitlines()[0]
parts = message.split(' ', 2)
if len(parts) > 2:
message = parts[2]
print 'Formatting failed: %s (total %d warnings, %d errors)' % (
message, stderr.count('warning:'), stderr.count('error:'))
if not stdout:
print ('No output from clang-format (crashed?).\n' +
'Please report to bugs.llvm.org.')
else:
lines = stdout.split('\n')
output = json.loads(lines[0])
lines = lines[1:]
sequence = difflib.SequenceMatcher(None, vim.current.buffer, lines)
for op in sequence.get_opcodes():
if op[0] is not 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
vim.command('goto %d' % (output['Cursor'] + 1))
|
Python
| 0.000141
|
@@ -2546,16 +2546,25 @@
r op in
+reversed(
sequence
@@ -2577,16 +2577,17 @@
pcodes()
+)
:%0A if
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.