commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
7284de859a6f0cc500e079bd5daf60748006cabf
|
Make --transformed a required argument, otherwise bad things happen.
|
msmbuilder/commands/featurizer.py
|
msmbuilder/commands/featurizer.py
|
from __future__ import print_function, absolute_import
import os
import warnings
import numpy as np
import mdtraj as md
from ..utils.progressbar import ProgressBar, Percentage, Bar, ETA
from ..utils import verbosedump
from ..cmdline import NumpydocClassCommand, argument, exttype, stripquotestype
from ..dataset import dataset, MDTrajDataset
from ..featurizer import (AtomPairsFeaturizer, SuperposeFeaturizer,
DRIDFeaturizer, DihedralFeaturizer,
ContactFeaturizer, GaussianSolventFeaturizer)
class FeaturizerCommand(NumpydocClassCommand):
_group = '1-Featurizer'
trjs = argument(
'--trjs', help='Glob pattern for trajectories',
default='', required=True, type=stripquotestype)
top = argument(
'--top', help='Path to topology file matching the trajectories', default='')
chunk = argument(
'--chunk',
help='''Chunk size for loading trajectories using mdtraj.iterload''',
default=10000, type=int)
out = argument(
'-o', '--out', help='''Path to save featurizer instance using
the pickle protocol''',
default='', type=exttype('.pkl'))
transformed = argument(
'--transformed',
help="Output path for transformed data",
type=exttype('/'))
stride = argument(
'--stride', default=1, type=int,
help='Load only every stride-th frame')
def start(self):
if os.path.exists(self.transformed):
self.error('File exists: %s' % self.transformed)
if os.path.exists(self.out):
self.error('File exists: %s' % self.out)
print(self.instance)
if os.path.exists(os.path.expanduser(self.top)):
top = os.path.expanduser(self.top)
else:
top = None
input_dataset = MDTrajDataset(self.trjs, topology=top, stride=self.stride, verbose=False)
out_dataset = input_dataset.create_derived(self.transformed, fmt='dir-npy')
pbar = ProgressBar(widgets=[Percentage(), Bar(), ETA()],
maxval=len(input_dataset)).start()
for key in pbar(input_dataset.keys()):
trajectory = []
for i, chunk in enumerate(input_dataset.iterload(key, chunk=self.chunk)):
trajectory.append(self.instance.partial_transform(chunk))
out_dataset[key] = np.concatenate(trajectory)
out_dataset.close()
print("\nSaving transformed dataset to '%s'" % self.transformed)
print("To load this dataset interactive inside an IPython")
print("shell or notebook, run\n")
print(" $ ipython")
print(" >>> from msmbuilder.dataset import dataset")
print(" >>> ds = dataset('%s')\n" % self.transformed)
if self.out is not '':
verbosedump(self.instance, self.out)
print("To load this %s object interactively inside an IPython\n"
"shell or notebook, run: \n" % self.klass.__name__)
print(" $ ipython")
print(" >>> from msmbuilder.utils import load")
print(" >>> model = load('%s')\n" % self.out)
class DihedralFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = DihedralFeaturizer
example = '''
$ msmb DihedralFeaturizer --trjs './trajectories/*.h5' \\
--transformed dihedrals-withchi --types phi psi chi1
'''
class AtomPairsFeaturizerCommand(FeaturizerCommand):
klass = AtomPairsFeaturizer
_concrete = True
def _pair_indices_type(self, fn):
if fn is None:
return None
return np.loadtxt(fn, dtype=int, ndmin=2)
class SuperposeFeaturizerCommand(FeaturizerCommand):
klass = SuperposeFeaturizer
_concrete = True
def _reference_traj_type(self, fn):
return md.load(fn)
def _atom_indices_type(self, fn):
if fn is None:
return None
return np.loadtxt(fn, dtype=int, ndmin=1)
class DRIDFeaturizerCommand(FeaturizerCommand):
klass = DRIDFeaturizer
_concrete = True
def _atom_indices_type(self, fn):
if fn is None:
return None
return np.loadtxt(fn, dtype=int, ndmin=1)
class ContactFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = ContactFeaturizer
def _contacts_type(self, val):
if val is 'all':
return val
else:
return np.loadtxt(val, dtype=int, ndmin=2)
class GaussianSolventFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = GaussianSolventFeaturizer
def _solvent_indices_type(self, fn):
return np.loadtxt(fn, dtype=int, ndmin=1)
def _solute_indices_type(self, fn):
return np.loadtxt(fn, dtype=int, ndmin=1)
|
Python
| 0.000907
|
@@ -1298,16 +1298,31 @@
ype('/')
+, required=True
)%0A st
|
ffa67682628e0140e43ae3e886cd022aedfb9750
|
Fix lint warnings in api_helper.py
|
src/tests/ggrc/api_helper.py
|
src/tests/ggrc/api_helper.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc.app import app
from ggrc.services.common import Resource
from ggrc import services
import inspect
import flask
import logging
from sqlalchemy.orm.collections import InstrumentedList
# style: should the class name be all capitals?
class Api():
def __init__(self):
self.tc = app.test_client()
self.tc.get("/login")
self.resource = Resource()
self.service_dict = {s.model_class.__name__: s.name
for s in services.all_services()}
self.headers = {'Content-Type': 'application/json',
"X-Requested-By": "gGRC"
}
self.user_headers = {}
def set_user(self, person=None):
# Refresh the person instance from the db:
person = person.__class__.query.get(person.id)
if person:
self.user_headers = {
"X-ggrc-user": self.resource.as_json({
"name": person.name,
"email": person.email,
})
}
else:
self.user_headers = {}
self.tc.get("/logout")
self.tc.get("/login", headers=self.user_headers)
def get_service(self, obj):
if inspect.isclass(obj):
return self.service_dict[obj.__name__]
else:
return self.service_dict[obj.__class__.__name__]
def api_link(self, obj, obj_id=None):
obj_id = "" if obj_id is None else "/" + str(obj_id)
return "/api/%s%s" % (self.get_service(obj), obj_id)
def data_to_json(self, response):
""" add docoded json to response object """
try:
response.json = flask.json.loads(response.data)
except:
response.json = None
return response
def send_request(self, request, obj, data, headers={}, api_link=None):
if api_link is None:
api_link = self.api_link(obj)
headers.update(self.headers)
headers.update(self.user_headers)
json_data = self.resource.as_json(data)
logging.info("request json" + json_data)
response = request(api_link, data=json_data, headers=headers.items())
return self.data_to_json(response)
def put(self, obj, data):
response = self.get(obj, obj.id)
headers = {
"If-Match": response.headers.get("Etag"),
"If-Unmodified-Since": response.headers.get("Last-Modified")
}
api_link = self.api_link(obj, obj.id)
return self.send_request(self.tc.put , obj, data, headers=headers, api_link=api_link)
def post(self, obj, data):
return self.send_request(self.tc.post, obj, data)
def get(self, obj, id):
return self.data_to_json(self.tc.get(self.api_link(obj, id)))
def delete(self, obj, id):
response = self.get(obj, obj.id)
headers = {
"If-Match": response.headers.get("Etag"),
"If-Unmodified-Since": response.headers.get("Last-Modified")
}
headers.update(self.headers)
api_link = self.api_link(obj, obj.id)
return self.tc.delete(api_link, headers=headers)
def search(self, types, q="", counts=False):
return (self.tc.get('/search?q={}&types={}&counts_only={}'.format(
q, types, counts)), self.headers)
|
Python
| 0.00013
|
@@ -373,64 +373,8 @@
ing%0A
-from sqlalchemy.orm.collections import InstrumentedList%0A
%0A%0A#
@@ -1128,17 +1128,16 @@
%7D%0A
-%0A
else
@@ -1781,17 +1781,16 @@
sponse%0A%0A
-%0A
def se
@@ -2272,32 +2272,34 @@
aders = %7B%0A
+
%22If-Match%22: resp
@@ -2316,32 +2316,34 @@
rs.get(%22Etag%22),%0A
+
%22If-Unmodi
@@ -2462,32 +2462,41 @@
lf.send_request(
+%0A
self.tc.put , ob
@@ -2490,17 +2490,16 @@
f.tc.put
-
, obj, d
@@ -2801,24 +2801,26 @@
s = %7B%0A
+
%22If-Match%22:
@@ -2845,24 +2845,26 @@
et(%22Etag%22),%0A
+
%22If-Un
|
1696d6b1f240f8403819e3d817ae8e387ab5d08c
|
Add FFT checkers.
|
numscons/checkers/fft_checkers.py
|
numscons/checkers/fft_checkers.py
|
Python
| 0
|
@@ -0,0 +1,1525 @@
+#! /usr/bin/env python%0A# Last Change: Tue Dec 04 03:00 PM 2007 J%0A%0A# Module for custom, common checkers for numpy (and scipy)%0Aimport sys%0Aimport os.path%0Afrom copy import deepcopy%0Afrom distutils.util import get_platform%0A%0A# from numpy.distutils.scons.core.libinfo import get_config_from_section, get_config%0A# from numpy.distutils.scons.testcode_snippets import cblas_sgemm as cblas_src, %5C%0A# c_sgemm as sunperf_src, lapack_sgesv, blas_sgemm, c_sgemm2, %5C%0A# clapack_sgesv as clapack_src%0A# from numpy.distutils.scons.fortran_scons import CheckF77Mangling, CheckF77Clib%0Afrom numscons.configuration import add_info%0Afrom perflib import CheckMKL, CheckFFTW3, CheckFFTW2%0Afrom support import check_include_and_run, ConfigOpts, ConfigRes%0A%0A__all__ = %5B'CheckFFT'%5D%0A%0Adef CheckFFT(context, autoadd = 1, check_version = 0):%0A %22%22%22This checker tries to find optimized library for fft%22%22%22%0A libname = 'fft'%0A env = context.env%0A%0A def check(func, name, suplibs):%0A st, res = func(context, autoadd, check_version)%0A # XXX: check for fft code ?%0A if st:%0A for lib in suplibs:%0A res.cfgopts%5B'libs'%5D.append(lib)%0A add_info(env, libname, res)%0A%0A return st%0A%0A # Check MKL%0A st = check(CheckMKL, 'MKL', %5B%5D)%0A if st:%0A return st%0A%0A # Check fftw3%0A st = check(CheckFFTW3, 'fftw3', %5B'fftw3'%5D)%0A if st:%0A return st%0A%0A # Check fftw2%0A st = check(CheckFFTW2, 'fftw2', %5B'fftw'%5D)%0A if st:%0A return st%0A%0A add_info(env, libname, None)%0A return 0%0A
|
|
88e87392204884102b17a92581c5d5b29a258bb7
|
add ftpsync
|
openprocurement/search/ftpsync.py
|
openprocurement/search/ftpsync.py
|
Python
| 0
|
@@ -0,0 +1,2140 @@
+# -*- coding: utf-8 -*-%0Aimport os%0Aimport sys%0Aimport signal%0Aimport os.path%0Aimport logging%0Aimport logging.config%0A%0Afrom ftplib import FTP%0Afrom ConfigParser import ConfigParser%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Aclass FTPSyncApp(object):%0A config = %7B%0A 'host': '127.0.0.1',%0A 'port': 21,%0A 'timeout': 120,%0A 'user': 'anonymous',%0A 'passwd': 'anonymous@user.tld',%0A 'ftp_dir': '',%0A 'local_dir': '',%0A 'filematch': 'ocds-tender-*.json',%0A %7D%0A%0A def __init__(self, config=%7B%7D):%0A self.config.update(config)%0A self.config%5B'timeout'%5D = float(self.config%5B'timeout'%5D)%0A self.ftp = FTP()%0A%0A def run(self):%0A self.ftp.connect(%0A self.config%5B'host'%5D,%0A self.config%5B'port'%5D,%0A self.config%5B'timeout'%5D)%0A%0A self.ftp.login(%0A self.config%5B'user'%5D,%0A self.config%5B'passwd'%5D)%0A%0A if self.config%5B'ftp_dir'%5D:%0A self.ftp.cwd(self.config%5B'ftp_dir'%5D)%0A%0A if self.config%5B'local_dir'%5D:%0A logger.info(%22CD %25s%22, self.config%5B'local_dir'%5D)%0A os.chdir(self.config%5B'local_dir'%5D)%0A%0A filematch = self.config%5B'filematch'%5D%0A%0A for filename in self.ftp.nlst(filematch):%0A if os.path.exists(filename):%0A logger.info(%22EXISTS %25s%22, filename)%0A continue%0A try:%0A fp = open(filename, 'wb')%0A logger.info(%22RETR %25s%22, filename)%0A self.ftp.retrbinary('RETR ' + filename, fp.write)%0A fp.close()%0A except Exception as e:%0A logger.error(%22Exception %7B%7D%22.format(e))%0A os.unlink(filename)%0A%0A%0Adef signal_handler(signo, frame):%0A sys.exit(0)%0A%0A%0Adef main():%0A if len(sys.argv) %3C 2:%0A print(%22Usage: ftpsync config.ini%22)%0A sys.exit(1)%0A%0A #logging.config.fileConfig(sys.argv%5B1%5D)%0A logging.basicConfig(level=logging.DEBUG)%0A%0A parser = ConfigParser()%0A parser.read(sys.argv%5B1%5D)%0A%0A signal.signal(signal.SIGTERM, signal_handler)%0A%0A config = parser.items('ftpsync')%0A%0A app = FTPSyncApp(config)%0A app.run()%0A%0Aif __name__ == %22__main__%22:%0A main()%0A%0A%0A
|
|
eb4294f95cb05337ef432840d9538de1275b22b4
|
Add routes.
|
web2py/routes.py
|
web2py/routes.py
|
Python
| 0
|
@@ -0,0 +1,50 @@
+routes_in = %5B%0A%09('/', '/addrest/default/index'),%0A%5D%0A
|
|
e3757b20ca74e070e57dd251bf60f691922999fe
|
add new test file
|
test/test_collection.py
|
test/test_collection.py
|
Python
| 0.000001
|
@@ -0,0 +1,532 @@
+import unittest%0Afrom solr_instance import SolrInstance%0Afrom solrcloudpy import Connection%0A%0Aclass TestCollection(unittest.TestCase):%0A def setUp(self):%0A self.solrprocess = SolrInstance(%22solr2%22)%0A self.solrprocess.start()%0A self.solrprocess.wait_ready()%0A self.conn = Connection()%0A %0A def tearDown(self):%0A self.solrprocess.terminate()%0A%0A def test_create_collection(self):%0A coll2 = self.conn.create_collection('coll2')%0A %0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
40dd078b5e176ae5039bf20dcb50350e8f065808
|
Create python script to scroll error messages
|
recognition/scrollError.py
|
recognition/scrollError.py
|
Python
| 0.000001
|
@@ -0,0 +1,142 @@
+from sense_hat import SenseHat%0Aimport sys%0A%0Asense = SenseHat()%0Asense.show_message(sys.stdin.read(), scroll_speed=.08, text_colour=%5B255, 0, 0%5D)%0A
|
|
d6a53b1b8acbddc16006c0c8752b44f176aecb12
|
add ntuple analyser
|
PyAnalysisTools/AnalysisTools/NTupleAnalyser.py
|
PyAnalysisTools/AnalysisTools/NTupleAnalyser.py
|
Python
| 0
|
@@ -0,0 +1,2741 @@
+import os%0Afrom PyAnalysisTools.base import InvalidInputError%0Afrom PyAnalysisTools.base.YAMLHandle import YAMLLoader%0Afrom PyAnalysisTools.ROOTUtils.FileHandle import FileHandle%0Aimport pathos.multiprocessing as mp%0Atry:%0A import pyAMI.client%0Aexcept Exception as e:%0A _logger.error(%22pyAMI not loaded%22)%0A sys.exit(1)%0A%0Aclass NTupleAnalyser(object):%0A def __init__(self, **kwargs):%0A if not %22dataset_list%22 in kwargs:%0A raise InvalidInputError(%22No dataset list provided%22)%0A self.datasets = YAMLLoader.read_yaml(kwargs%5B%22dataset_list%22%5D)%0A self.input_path = kwargs%5B%22input_path%22%5D%0A%0A def transform_dataset_list(self):%0A self.datasets = %5Bds for campaign in self.datasets.values() for ds in campaign%5D%0A self.datasets = map(lambda ds: %5Bds, %22.%22.join(%5Bds.split(%22.%22)%5B1%5D, ds.split(%22.%22)%5B5%5D%5D)%5D, self.datasets)%0A%0A def add_path(self):%0A processed_datasets = os.listdir(self.input_path)%0A for ds in self.datasets:%0A match = %5Bds%5B1%5D in pds for pds in processed_datasets%5D%0A try:%0A index = match.index(True)%0A ds.append(processed_datasets%5Bindex%5D)%0A except ValueError:%0A ds.append(None)%0A%0A def get_events(self, ds):%0A n_processed_events = 0%0A for rf in os.listdir(os.path.join(self.input_path, ds%5B2%5D)):%0A%0A n_processed_events += int(FileHandle(file_name=os.path.join(self.input_path, ds%5B2%5D, rf),%0A switch_off_process_name_analysis=True).get_number_of_total_events(True))%0A ds.append(n_processed_events)%0A client = pyAMI.client.Client('atlas')%0A n_expected_events = int(client.execute(%22GetDatasetInfo -logicalDatasetName=%25s%22 %25 ds%5B0%5D,%0A format=%22dict_object%22).get_rows()%5B0%5D%5B%22totalEvents%22%5D)%0A ds.append(n_expected_events)%0A%0A @staticmethod%0A def print_summary(missing, incomplete):%0A print %22--------------- Missing datasets ---------------%22%0A for ds in missing:%0A print ds%5B0%5D%0A print %22------------------------------------------------%22%0A print%0A print%0A print%0A print %22--------------- Incomplete datasets ---------------%22%0A for ds in incomplete:%0A print ds%5B2%5D, ds%5B-2%5D, ds%5B-1%5D%0A%0A def run(self):%0A self.transform_dataset_list()%0A self.add_path()%0A missing_datasets = filter(lambda ds: ds%5B2%5D is None, self.datasets)%0A self.datasets = filter(lambda ds: ds not in missing_datasets, self.datasets)%0A mp.ThreadPool(10).map(self.get_events, self.datasets)%0A incomplete_datasets = filter(lambda ds: not ds%5B-2%5D ==ds%5B-1%5D, self.datasets)%0A self.print_summary(missing_datasets, incomplete_datasets)
|
|
67d1382c5c36e4476c56a9cd5c2e841131b07e6c
|
add classMulInherit.py
|
classMulInherit.py
|
classMulInherit.py
|
Python
| 0.000001
|
@@ -0,0 +1,1070 @@
+class A(object):%0A def __init__(self):%0A self.a = 1%0A def x(self):%0A print %22A.x%22%0A def y(self):%0A print %22A.y%22%0A def z(self):%0A print %22A.z%22%0A%0Aclass B(A):%0A def __init__(self):%0A A.__init__(self)%0A self.a = 2%0A self.b = 3%0A def y(self):%0A print %22B.y%22%0A def z(self):%0A print %22B.z%22%0A%0Aclass C(object):%0A def __init__(self):%0A self.a = 4%0A self.c = 5%0A def y(self):%0A print %22C.y%22%0A def z(self):%0A print %22C.z%22%0A%0Aclass D(C, B):%0A def __init__(self):%0A C.__init__(self)%0A B.__init__(self)%0A self.d = 6%0A def z(self):%0A print %22D.z%22%0A %0A# When resolving a reference to an attribute of an object%0A# that's an instance of class D,%0A# Python first searches the object's instance variables%0A# then uses a simple left-to-right, depth first search through the class hierarchy.%0A# In this case that would mean searching D's attributes, then the class C,%0A# followed the class B and its superclasses (ie, class A,%0A# and then any superclasses it may have, et cetera).
|
|
555dc74ad29b99fd4cf4c3ba97b7edfdaf8e485f
|
Create next-greater-element-i.py
|
Python/next-greater-element-i.py
|
Python/next-greater-element-i.py
|
Python
| 0.999265
|
@@ -0,0 +1,1686 @@
+# Time: O(m + n)%0A# Space: O(m + n)%0A%0A# You are given two arrays (without duplicates) nums1 and nums2 where nums1%E2%80%99s elements are subset of nums2.%0A# Find all the next greater numbers for nums1's elements in the corresponding places of nums2.%0A#%0A# The Next Greater Number of a number x in nums1 is the first greater number to its right in nums2.%0A# If it does not exist, output -1 for this number.%0A#%0A# Example 1:%0A# Input: nums1 = %5B4,1,2%5D, nums2 = %5B1,3,4,2%5D.%0A# Output: %5B-1,3,-1%5D%0A# Explanation:%0A# For number 4 in the first array, you cannot find the next greater number for it in the second array, so output -1.%0A# For number 1 in the first array, the next greater number for it in the second array is 3.%0A# For number 2 in the first array, there is no next greater number for it in the second array, so output -1.%0A# Example 2:%0A# Input: nums1 = %5B2,4%5D, nums2 = %5B1,2,3,4%5D.%0A# Output: %5B3,-1%5D%0A# Explanation:%0A# For number 2 in the first array, the next greater number for it in the second array is 3.%0A# For number 4 in the first array, there is no next greater number for it in the second array, so output -1.%0A# Note:%0A# All elements in nums1 and nums2 are unique.%0A# The length of both nums1 and nums2 would not exceed 1000.%0A%0Aclass Solution(object):%0A def nextGreaterElement(self, findNums, nums):%0A %22%22%22%0A :type findNums: List%5Bint%5D%0A :type nums: List%5Bint%5D%0A :rtype: List%5Bint%5D%0A %22%22%22%0A stk, lookup = %5B%5D, %7B%7D%0A for num in nums:%0A while stk and num %3E stk%5B-1%5D:%0A lookup%5Bstk.pop()%5D = num%0A stk.append(num)%0A while stk:%0A lookup%5Bstk.pop()%5D = -1%0A return map(lambda x : lookup%5Bx%5D, findNums)%0A
|
|
b0c03b86d606c85dd1cab1ad9e9678e1057d0ae1
|
Add pen which draws to TrueType glyphs.
|
Lib/fontTools/pens/ttGlyphPen.py
|
Lib/fontTools/pens/ttGlyphPen.py
|
Python
| 0
|
@@ -0,0 +1,2358 @@
+from __future__ import print_function, division, absolute_import%0Afrom array import array%0A%0Afrom fontTools.misc.py23 import *%0Afrom fontTools.pens.basePen import AbstractPen%0Afrom fontTools.ttLib.tables import ttProgram%0Afrom fontTools.ttLib.tables._g_l_y_f import Glyph%0Afrom fontTools.ttLib.tables._g_l_y_f import GlyphComponent%0Afrom fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates%0A%0A%0A__all__ = %5B%22TTGlyphPen%22%5D%0A%0A%0Aclass TTGlyphPen(AbstractPen):%0A %22%22%22Pen used for drawing to a TrueType glyph.%22%22%22%0A%0A def __init__(self):%0A self.points = %5B%5D%0A self.endPts = %5B%5D%0A self.types = %5B%5D%0A self.components = %5B%5D%0A%0A def _addPoint(self, pt, onCurve):%0A self.points.append(%5Bint(coord) for coord in pt%5D)%0A self.types.append(onCurve)%0A%0A def lineTo(self, pt):%0A self._addPoint(pt, 1)%0A%0A def moveTo(self, pt):%0A assert (not self.points) or (self.endPts%5B-1%5D == len(self.points) - 1)%0A self.lineTo(pt)%0A%0A def qCurveTo(self, *points):%0A for pt in points%5B:-1%5D:%0A self._addPoint(pt, 0)%0A self._addPoint(points%5B-1%5D, 1)%0A%0A def closePath(self):%0A endPt = len(self.points) - 1%0A%0A # ignore anchors%0A if endPt == 0 or (self.endPts and endPt == self.endPts%5B-1%5D + 1):%0A self.points.pop()%0A self.types.pop()%0A return%0A%0A self.endPts.append(endPt)%0A%0A def endPath(self):%0A # TrueType contours are always %22closed%22%0A self.closePath()%0A%0A def addComponent(self, glyphName, transformation):%0A component = GlyphComponent()%0A component.glyphName = glyphName%0A component.transform = (transformation%5B:2%5D, transformation%5B2:4%5D)%0A component.x, component.y = %5Bint(n) for n in transformation%5B4:%5D%5D%0A component.flags = 0%0A self.components.append(component)%0A%0A def glyph(self):%0A glyph = Glyph()%0A%0A glyph.coordinates = GlyphCoordinates(self.points)%0A glyph.endPtsOfContours = self.endPts%0A glyph.flags = array(%22B%22, self.types)%0A glyph.components = self.components%0A%0A # TrueType glyphs can't have both contours and components%0A if glyph.components:%0A glyph.numberOfContours = -1%0A else:%0A glyph.numberOfContours = len(glyph.endPtsOfContours)%0A%0A glyph.program = ttProgram.Program()%0A glyph.program.fromBytecode(%22%22)%0A%0A return glyph%0A
|
|
95e2e9af124595aae4801fc9813ee1c294d404cd
|
Change invalidtxrequest to use BitcoinTestFramework
|
test/functional/p2p_invalid_tx.py
|
test/functional/p2p_invalid_tx.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid transactions.
In this test we connect to one node over p2p, and test tx requests."""
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
from test_framework.comptool import RejectResult, TestInstance, TestManager
from test_framework.messages import COIN
from test_framework.mininode import network_thread_start
from test_framework.test_framework import ComparisonTestFramework
class InvalidTxRequestTest(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
network_thread_start()
test.run()
def get_tests(self):
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = int(time.time()) + 1
self.log.info("Create a new block with an anyone-can-spend coinbase.")
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
self.log.info("Mature the block.")
test = TestInstance(sync_every_block=False)
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
# b'\x64' is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000)
yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]])
# TODO: test further transactions...
if __name__ == '__main__':
InvalidTxRequestTest().main()
|
Python
| 0
|
@@ -331,21 +331,8 @@
%22%22%22%0A
-import time%0A%0A
from
@@ -419,84 +419,8 @@
ion%0A
-from test_framework.comptool import RejectResult, TestInstance, TestManager%0A
from
@@ -512,16 +512,30 @@
ad_start
+, P2PDataStore
%0Afrom te
@@ -569,25 +569,22 @@
import
-Compariso
+Bitcoi
nTestFra
@@ -622,17 +622,14 @@
est(
-Compariso
+Bitcoi
nTes
@@ -742,186 +742,238 @@
rue%0A
-%0A def run_test(self):%0A test = TestManager(self, self.options.tmpdir)%0A test.add_all_connections(self.nodes)%0A self.tip = None%0A self.block_time = None
+ self.extra_args = %5B%5B%22-whitelist=127.0.0.1%22%5D%5D%0A%0A def run_test(self):%0A # Add p2p connection to node0%0A node = self.nodes%5B0%5D # convenience reference to the node%0A node.add_p2p_connection(P2PDataStore())%0A
%0A
@@ -1012,24 +1012,40 @@
-test.run
+node.p2p.wait_for_verack
()%0A%0A
def
@@ -1044,28 +1044,57 @@
-def get_tests(self):
+ best_block = self.nodes%5B0%5D.getbestblockhash()
%0A
@@ -1094,29 +1094,24 @@
h()%0A
-self.
tip = int(%220
@@ -1112,14 +1112,49 @@
int(
-%220x%22 +
+best_block, 16)%0A best_block_time =
sel
@@ -1171,27 +1171,33 @@
.get
+block(
best
+_
block
-hash(), 0)
+)%5B'time'%5D
%0A
@@ -1193,37 +1193,32 @@
'time'%5D%0A
-self.
block_time = int
@@ -1218,24 +1218,23 @@
e =
-int(time.
+best_block_
time
-())
+ 1
@@ -1354,37 +1354,32 @@
= create_block(
-self.
tip, create_coin
@@ -1384,37 +1384,32 @@
inbase(height),
-self.
block_time)%0A
@@ -1404,37 +1404,32 @@
k_time)%0A
-self.
block_time += 1%0A
@@ -1492,29 +1492,24 @@
ter%0A
-self.
block1 = blo
@@ -1511,37 +1511,32 @@
= block%0A
-self.
tip = block.sha2
@@ -1570,42 +1570,65 @@
-yield TestInstance
+node.p2p.send_blocks_and_test
(%5B
-%5B
block
+%5D
,
+node, success=
True
-%5D%5D
)%0A%0A
@@ -1681,366 +1681,35 @@
-test = TestInstance(sync_every_block=False)%0A for i in range(100):%0A block = create_block(self.tip, create_coinbase(height), self.block_time)%0A block.solve()%0A self.tip = block.sha256%0A self.block_time += 1%0A test.blocks_and_transactions.append(%5Bblock, True%5D)%0A height += 1%0A yield test
+self.nodes%5B0%5D.generate(100)
%0A%0A
@@ -1838,21 +1838,16 @@
saction(
-self.
block1.v
@@ -1896,51 +1896,93 @@
-yield TestInstance(%5B
+node.p2p.send_txs_and_test(
%5Btx1
+%5D
,
-RejectResult(16,
+node, success=False, reject_code=16, reject_reason=
b'ma
@@ -2018,12 +2018,38 @@
iled
-')%5D%5D
+ (Invalid OP_IF construction)'
)%0A%0A
|
08447fa344e21d6d704c6f195ad2b7405fa8f916
|
Add test for total property
|
saleor/order/test_order.py
|
saleor/order/test_order.py
|
Python
| 0
|
@@ -0,0 +1,200 @@
+from .models import Order%0A%0A%0Adef test_total_property():%0A order = Order(total_net=20, total_tax=5)%0A assert order.total.gross == 25%0A assert order.total.tax == 5%0A assert order.total.net == 20%0A
|
|
2b09a8d75e0d59bba41467210b7d0588eb4a09d5
|
add migration for junebug channel type
|
temba/channels/migrations/0050_add_junebug_channel_type.py
|
temba/channels/migrations/0050_add_junebug_channel_type.py
|
Python
| 0
|
@@ -0,0 +1,1297 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.12 on 2017-01-26 15:56%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('channels', '0049_auto_20170106_0910'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='channel',%0A name='channel_type',%0A field=models.CharField(choices=%5B('AT', %22Africa's Talking%22), ('A', 'Android'), ('BM', 'Blackmyna'), ('CT', 'Clickatell'), ('DA', 'Dart Media'), ('DM', 'Dummy'), ('EX', 'External'), ('FB', 'Facebook'), ('GL', 'Globe Labs'), ('HX', 'High Connection'), ('H9', 'Hub9'), ('IB', 'Infobip'), ('JS', 'Jasmin'), ('JN', 'Junebug'), ('KN', 'Kannel'), ('LN', 'Line'), ('M3', 'M3 Tech'), ('MB', 'Mblox'), ('NX', 'Nexmo'), ('PL', 'Plivo'), ('SQ', 'Shaqodoon'), ('SC', 'SMSCentral'), ('ST', 'Start Mobile'), ('TG', 'Telegram'), ('T', 'Twilio'), ('TW', 'TwiML Rest API'), ('TMS', 'Twilio Messaging Service'), ('TT', 'Twitter'), ('VB', 'Verboice'), ('VI', 'Viber'), ('VP', 'Viber Public Channels'), ('VM', 'Vumi'), ('VMU', 'Vumi USSD'), ('YO', 'Yo!'), ('ZV', 'Zenvia')%5D, default='A', help_text='Type of this channel, whether Android, Twilio or SMSC', max_length=3, verbose_name='Channel Type'),%0A ),%0A %5D%0A
|
|
ace26ab5e713fabd02f4f481956c47640f50b166
|
Add unit test for volume limits client
|
tempest/tests/lib/services/volume/v2/test_limits_client.py
|
tempest/tests/lib/services/volume/v2/test_limits_client.py
|
Python
| 0
|
@@ -0,0 +1,2146 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom tempest.lib.services.volume.v2 import limits_client%0Afrom tempest.tests.lib import fake_auth_provider%0Afrom tempest.tests.lib.services import base%0A%0A%0Aclass TestLimitsClient(base.BaseServiceTest):%0A%0A FAKE_LIMIT_INFO = %7B%0A %22limits%22: %7B%0A %22rate%22: %5B%5D,%0A %22absolute%22: %7B%0A %22totalSnapshotsUsed%22: 0,%0A %22maxTotalBackups%22: 10,%0A %22maxTotalVolumeGigabytes%22: 1000,%0A %22maxTotalSnapshots%22: 10,%0A %22maxTotalBackupGigabytes%22: 1000,%0A %22totalBackupGigabytesUsed%22: 0,%0A %22maxTotalVolumes%22: 10,%0A %22totalVolumesUsed%22: 0,%0A %22totalBackupsUsed%22: 0,%0A %22totalGigabytesUsed%22: 0%0A %7D%0A %7D%0A %7D%0A%0A def setUp(self):%0A super(TestLimitsClient, self).setUp()%0A fake_auth = fake_auth_provider.FakeAuthProvider()%0A self.client = limits_client.LimitsClient(fake_auth,%0A 'volume',%0A 'regionOne')%0A%0A def _test_show_limits(self, bytes_body=False):%0A self.check_service_client_function(%0A self.client.show_limits,%0A 'tempest.lib.common.rest_client.RestClient.get',%0A self.FAKE_LIMIT_INFO,%0A bytes_body)%0A%0A def test_show_limits_with_str_body(self):%0A self._test_show_limits()%0A%0A def test_show_limits_with_bytes_body(self):%0A self._test_show_limits(bytes_body=True)%0A
|
|
29c268db2cbb3b4787d3e925f925a49f0df68c46
|
add cache UT
|
test/test_cache.py
|
test/test_cache.py
|
Python
| 0.000001
|
@@ -0,0 +1,1925 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%22%22%22Logger Class%0A Simple encapsulation on logging functions.%0A - Console printing%0A - File handler and the mapping for multithreading file handlers are under design yet.%0A%0A .. moduleauthor:: Max Wu %3Chttp://maxwu.me%3E%0A .. References::%0A **ReadtheDocs**: https://pythonguidecn.readthedocs.io/zh/latest/writing/logging.html%0A%22%22%22%0Aimport unittest%0Afrom me.maxwu.cistat.cache import CacheIt%0Afrom me.maxwu.cistat import config%0Afrom me.maxwu.cistat.reqs.circleci_request import CircleCiReq%0A%0A%0Aclass MyTestCase(unittest.TestCase):%0A @staticmethod%0A def get_cache_stat():%0A dc = CacheIt(enable=config.get_cache_enable())%0A hit, miss = dc.cache.stats()%0A dc.close()%0A print(%22%3E%3E%3ETest%3E%3E%3E Cache Stat: hit=%7B:d%7D miss=%7B:d%7D%22.format(hit, miss))%0A return hit, miss%0A%0A def setUp(self):%0A self.hit_orig, self.miss_orig = self.get_cache_stat()%0A self.url = 'https://80-77958022-gh.circle-artifacts.com/0/tmp/circle-junit.BxjS188/junit/TEST-org.maxwu.jrefresh.HttpApi.SourceIpApiTest.xml'%0A%0A def test_cache_stat(self):%0A hit_0, miss_0 = self.get_cache_stat()%0A xunit1 = CircleCiReq.get_artifact_report(url=self.url)%0A hit_1, miss_1 = self.get_cache_stat()%0A self.assertEqual(1, hit_1 + miss_1 - hit_0 - miss_0)%0A%0A xunit2 = CircleCiReq.get_artifact_report(url=self.url)%0A hit_2, miss_2 = self.get_cache_stat()%0A self.assertEqual(miss_2, miss_1) # For the 2nd fetch, it won't be missed.%0A self.assertEqual(1, hit_2 - hit_1) # For the 2nd fetch, it shall hit at least once%0A%0A def test_cache_stat(self):%0A hit_0, miss_0 = self.get_cache_stat()%0A xunit1 = CircleCiReq.get_artifact_report() # No url provided%0A hit_1, miss_1 = self.get_cache_stat()%0A self.assertEqual((hit_0, miss_0), (hit_1, miss_1))%0A%0A def tearDown(self):%0A pass%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
e4b9c43d53121d2b21c4b864fcc74674b0b6dfc1
|
Create class to interpolate values between indexes
|
scratchpad/Interpolator.py
|
scratchpad/Interpolator.py
|
Python
| 0
|
@@ -0,0 +1,956 @@
+class Interpolator:%0A def __init__(self):%0A self.data = %5B%5D%0A%0A def addIndexValue(self, index, value):%0A self.data.append((index, value))%0A%0A def valueAtIndex(self, target_index):%0A if target_index %3C self.data%5B0%5D%5B0%5D:%0A return None%0A elif self.data%5B-1%5D%5B0%5D %3C target_index:%0A return None%0A else:%0A start = None%0A end = None%0A%0A for (index, value) in self.data:%0A if index == target_index:%0A return value%0A else:%0A if index %3C= target_index:%0A start = (index, value)%0A elif target_index %3C index:%0A end = (index, value)%0A break%0A%0A index_delta = end%5B0%5D - start%5B0%5D%0A percent = (target_index - start%5B0%5D) / index_delta%0A value_delta = end%5B1%5D - start%5B1%5D%0A%0A return start%5B1%5D + value_delta * percent%0A
|
|
c3de9ebfa84fd93572d0a4ac991272609a593328
|
Create af_renameSG.py
|
scripts/af_renameSG.py
|
scripts/af_renameSG.py
|
Python
| 0.000002
|
@@ -0,0 +1,365 @@
+# rename shading group name to material name but with SG ended%0Aimport pymel.core as pm%0Aimport re%0AselSG = pm.ls(sl=True,fl=True)%0Afor SG in selSG:%0A curMat = pm.listConnections(SG,d=1)%0A for mat in curMat:%0A if pm.nodeType(mat) == 'blinn' or pm.nodeType(mat) == 'lambert':%0A sgNM = re.split(%22_mat%22,str(mat))%5B0%5D+%22SG%22%0A pm.rename(SG,sgNM)%0A
|
|
b8777453cf03b212f2b06ca0afeef6c780e39f51
|
add face_classifier.py
|
scripts/face_classifier.py
|
scripts/face_classifier.py
|
Python
| 0.000009
|
@@ -0,0 +1,1307 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# face_classifier.py%0A# author: Kentaro Wada %3Cwww.kentaro.wada@gmail.com%3E%0A%0Aimport os%0Aimport sys%0Aimport collections%0A%0Afrom sklearn import svm%0Aimport cv2%0A%0A%0Aclass FaceClassifier(object):%0A def __init__(self, data_dir):%0A self.data_dir = data_dir%0A self.img_dict = collections.defaultdict(list)%0A self._lookup_imgs()%0A%0A def _lookup_imgs(self):%0A face_dirs = os.listdir(self.data_dir)%0A for face_dir in face_dirs:%0A face_dir_abs = os.path.join(self.data_dir, face_dir)%0A if not os.path.isdir(face_dir_abs):%0A continue%0A for img in os.listdir(face_dir_abs):%0A base, ext = os.path.splitext(img)%0A if ext not in %5B'.png', '.pgm'%5D:%0A continue%0A self.img_dict%5Bface_dir%5D.append(%0A os.path.join(self.data_dir, face_dir, img))%0A%0A%0Adef main():%0A data_dir = '../data/cropped'%0A face_clf = FaceClassifier(data_dir=data_dir)%0A for person, img_path in face_clf.img_dict.items():%0A img = cv2.imread(img_path%5B0%5D)%0A print img_path%5B0%5D%0A cv2.imshow('cropped img', img)%0A k = cv2.waitKey(0)%0A if k == 27:%0A continue%0A cv2.destroyAllWindows()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
ef7abdab7681e496cebd1e4655a63cafcb9163db
|
add gafton's migration script to scripts/
|
scripts/migrate-dbstore.py
|
scripts/migrate-dbstore.py
|
Python
| 0
|
@@ -0,0 +1,2488 @@
+#!/usr/bin/python%0A%0Aimport sys%0Aimport os%0Aif 'CONARY_PATH' in os.environ:%0A sys.path.insert(0, os.environ%5B'CONARY_PATH'%5D)%0A%0Afrom conary import dbstore%0Afrom conary.dbstore import sqlerrors%0Afrom conary.repository.netrepos import schema%0A%0Aif len(sys.argv) != 3:%0A print %22Usage: migrate %3Csqlite_path%3E %3Cmysql_spec%3E%22%0A%0Asqlite = dbstore.connect(sys.argv%5B1%5D, driver = %22sqlite%22)%0Acs = sqlite.cursor()%0Amysql = dbstore.connect(sys.argv%5B2%5D, driver = %22mysql%22)%0Acm = mysql.cursor()%0A%0Aschema.createSchema(mysql)%0A%0Afor t in sqlite.tables.keys():%0A if t in mysql.tables:%0A continue%0A print %22Only in sqlite:%22, t%0Afor t in mysql.tables.keys():%0A if t in sqlite.tables:%0A continue%0A print %22Only in mysql:%22, t%0A%0AtList = %5B%0A 'Branches',%0A 'Versions',%0A 'Items',%0A 'Labels',%0A 'LabelMap',%0A 'Flavors',%0A 'FlavorMap',%0A 'FlavorScores',%0A 'UserGroups',%0A 'Users',%0A 'UserGroupMembers',%0A 'Permissions',%0A 'Instances',%0A 'Dependencies',%0A 'Latest',%0A 'Metadata',%0A 'MetadataItems',%0A 'Nodes',%0A 'ChangeLogs',%0A 'PGPKeys',%0A 'PGPFingerprints',%0A 'Provides',%0A 'Requires',%0A 'FileStreams',%0A 'TroveFiles',%0A 'TroveInfo',%0A 'TroveTroves',%0A 'EntitlementGroups',%0A 'Entitlements',%0A %5D%0A%0Afor t in tList:%0A print%0A print %22Converting%22, t%0A count = cs.execute(%22select count(*) from %25s%22 %25 t).fetchone()%5B0%5D%0A i = 0%0A cs.execute(%22select * from %25s%22 %25 t)%0A cm.execute('alter table %25s disable keys' %25 t)%0A while True:%0A row = cs.fetchone_dict()%0A if row is None:%0A break%0A if t == %22Permissions%22:%0A row%5B%22canWrite%22%5D = row%5B%22write%22%5D%0A del row%5B%22write%22%5D%0A if 'entGroupEdmin' in row:%0A del row%5B%22entGroupAdmin%22%5D%0A row = row.items()%0A sql = %22insert into %25s (%25s) values (%25s)%22 %25 (%0A t, %22, %22.join(x%5B0%5D for x in row),%0A %22, %22.join(%5B%22?%22%5D * len(row)))%0A i += 1%0A try:%0A cm.execute(sql, %5Bx%5B1%5D for x in row%5D)%0A except sqlerrors.ColumnNotUnique:%0A print %22%5Cr%25s: SKIPPING%22 %25 t, row%0A except:%0A print %22ERROR - SQL%22, sql, %22ARGS:%22, %5Bx%5B1%5D for x in row%5D%0A raise%0A else:%0A if i %25 1000 == 0:%0A sys.stdout.write(%22%5Cr%25s: %25d/%25d %25d%25%25%22 %25 (t, i, count, i*100/count))%0A sys.stdout.flush()%0A if i %25 50000 == 0:%0A mysql.commit()%0A cm.execute('alter table %25s enable keys' %25 t)%0A print %22%5Cr%25s: %25d/%25d 100%25%25%22 %25 (t, i, count)%0A mysql.commit()%0A%0A
|
|
36781fb1b04a3d2fd3162ea88969244faab22a60
|
Convert GML to EWKT, via PostGIS
|
open511/utils/postgis.py
|
open511/utils/postgis.py
|
Python
| 0.002223
|
@@ -0,0 +1,327 @@
+from django.db import connection%0A%0Adef gml_to_ewkt(gml_string, force_2D=False):%0A cursor = connection.cursor()%0A if force_2D:%0A sql = 'SELECT ST_AsEWKT(ST_Force_2D(ST_GeomFromGML(%25s)))'%0A else:%0A sql = 'SELECT ST_AsEWKT(ST_GeomFromGML(%25s))'%0A cursor.execute(sql, %5Bgml_string%5D)%0A return cursor.fetchone()%5B0%5D
|
|
aa320244cc03fe299aa33057c8b92a6c2352a5fd
|
Add tracer for sqlalchemy
|
osprofiler/sqlalchemy.py
|
osprofiler/sqlalchemy.py
|
Python
| 0.000063
|
@@ -0,0 +1,1611 @@
+# Copyright 2013 OpenStack Foundation.%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom osprofiler import profiler%0A%0A%0Adef before_execute(name):%0A %22%22%22Add listener that will send trace info before sql executed.%22%22%22%0A def handler(conn, clauseelement, multiparams, params):%0A p = profiler.get_profiler()%0A if p:%0A info = %7B%22db.statement%22: str(clauseelement),%0A %22db.multiparams%22: str(multiparams),%0A %22db.params%22: str(params)%7D%0A p.start(name, info=info)%0A%0A return handler%0A%0A%0Adef after_execute():%0A %22%22%22Add listener that will send trace info after sql executed.%22%22%22%0A def handler(conn, clauseelement, multiparams, params, result):%0A p = profiler.get_profiler()%0A if p:%0A p.stop(info=%7B%22db.result%22: str(result)%7D)%0A%0A return handler%0A%0A%0Adef add_tracing(sqlalchemy, engine, name):%0A %22%22%22Add tracing to all sqlalchemy calls.%22%22%22%0A sqlalchemy.event.listen(engine, 'before_execute', before_execute(name))%0A sqlalchemy.event.listen(engine, 'after_execute', after_execute())%0A
|
|
eefef8a5917243b75065441d46db19cbd65a7f1d
|
Create debounce decorator
|
mopidy_headless/decorator.py
|
mopidy_headless/decorator.py
|
Python
| 0
|
@@ -0,0 +1,474 @@
+import time%0A%0Adef debounce(wait):%0A %22%22%22%0A Wait before calling a function again, discarding any calls in between%0A %22%22%22%0A def decorator(fn):%0A def wrapped(*args, **kwargs):%0A now = time.time()%0A if wrapped.last is not None:%0A delta = now - wrapped.last%0A if delta %3C wait: return%0A%0A wrapped.last = now%0A fn(*args, **kwargs)%0A wrapped.last = None%0A return wrapped%0A return decorator%0A
|
|
b2e059ce247de4b083c059d1ffe925983c262183
|
add test cases
|
tests/test_fast.py
|
tests/test_fast.py
|
Python
| 0.003542
|
@@ -0,0 +1,651 @@
+from unittest import TestCase%0A%0Aimport numpy as np%0A%0A%0Aclass TestFast(TestCase):%0A def test_clip_grad(self):%0A from vlgp import fast%0A np.random.seed(0)%0A n = 100%0A x = np.random.randn(n)%0A x_clipped = fast.clip_grad(x, bound=1.0)%0A%0A self.assertTrue(np.all(np.logical_and(x_clipped %3E= -1.0, x_clipped %3C= 1.0)))%0A%0A def test_cut_trial(self):%0A from vlgp import fast%0A y = np.random.randn(100, 10)%0A x = np.random.randn(100, 5)%0A trial = %7B'y': y, 'x': x%7D%0A fast_trials = fast.cut_trial(trial, 10)%0A for each in fast_trials:%0A self.assertTrue(each%5B'y'%5D.shape == (10, 10))%0A
|
|
3fc118da6cdc29f4867dc33319ca56f4f3731346
|
add leetcode 121
|
leetcode/121.py
|
leetcode/121.py
|
Python
| 0.000137
|
@@ -0,0 +1,1115 @@
+#!/usr/bin/env python%0A%22%22%22%0ASay you have an array for which the ith element is the price of a given stock on day i.%0A%0AIf you were only permitted to complete at most one transaction %0A(ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.%0A%22%22%22%0A%0Aclass Solution(object):%0A def maxProfit(self, prices):%0A %22%22%22%0A :type prices: List%5Bint%5D%0A :rtype: int%0A%0A Idea: dynamic programming%0A dp(i): the profit of day i,%0A dp(i) = prices%5Bi%5D - min_price or max_profit in day i - 1%0A%0A min_price: current min price till day i%0A%0A Dynamic programming equation:%0A%0A dp(i) = max %7C prices%5Bi%5D - min_price%0A %7C %0A %7C dp%5Bi - 1%5D%0A%0A min_price = min %7C min_price%0A %7C prices%5Bi%5D%0A%0A Time: O(n)%0A Space: O(1)%0A %22%22%22%0A %0A if prices == None or len(prices) == 0:%0A return 0%0A%0A min_price = prices%5B0%5D%0A profit = 0%0A%0A for p in prices:%0A profit = max(p - min_price, profit)%0A min_price = min(min_price, p)%0A%0A return profit%0A
|
|
16ad7991c22b4d9834a5db57912789d825a0cefb
|
Add unit tests
|
tests/test_util.py
|
tests/test_util.py
|
Python
| 0.000001
|
@@ -0,0 +1,471 @@
+import util%0A%0Afrom nose.tools import assert_equal%0A%0A%0Aclass TestPick():%0A def check(self, filenames, expected, k, randomized):%0A result = util.pick(filenames, k, randomized)%0A assert_equal(result, expected)%0A%0A def test_all_sequential(self):%0A filenames = %5B'a-4.txt', 'b-2.txt', 'c-3.txt', 'd-1.txt', 'e-0.txt'%5D%0A expected = %5B'e-0.txt', 'd-1.txt', 'b-2.txt', 'c-3.txt', 'a-4.txt'%5D%0A self.check(filenames, expected, k=None, randomized=False)%0A
|
|
fcc92760db0d1dc56aca70aff69b34a29c9e8e6c
|
Add unit tests for the methods in util
|
tests/test_util.py
|
tests/test_util.py
|
Python
| 0
|
@@ -0,0 +1,985 @@
+from lib import util%0A%0A%0Adef test_cachedproperty():%0A class Target:%0A def __init__(self):%0A self.call_count = 0%0A%0A @util.cachedproperty%0A def prop(self):%0A self.call_count += 1%0A return self.call_count%0A%0A t = Target()%0A assert t.prop == t.prop == 1%0A%0A%0Adef test_deep_getsizeof():%0A int_t = util.deep_getsizeof(1)%0A assert util.deep_getsizeof(%5B1, 1%5D) %3E 2 * int_t%0A assert util.deep_getsizeof(%7B1: 1%7D) %3E 2 * int_t%0A assert util.deep_getsizeof(%7B1: %7B1: 1%7D%7D) %3E 3 * int_t%0A%0A%0Aclass Base:%0A pass%0A%0A%0Aclass A(Base):%0A pass%0A%0A%0Aclass B(Base):%0A pass%0A%0A%0Adef test_subclasses():%0A assert util.subclasses(Base) == %5BA, B%5D%0A%0A%0Adef test_chunks():%0A assert list(util.chunks(%5B1, 2, 3, 4, 5%5D, 2)) == %5B%5B1, 2%5D, %5B3, 4%5D, %5B5%5D%5D%0A%0A%0Adef test_increment_byte_string():%0A assert util.increment_byte_string(b'1') == b'2'%0A assert util.increment_byte_string(b'%5Cx01%5Cx01') == b'%5Cx01%5Cx02'%0A assert util.increment_byte_string(b'%5Cxff%5Cxff') == b'%5Cx01%5Cx00%5Cx00'%0A
|
|
a1fc7311ddc50eb43f43fc51d3290f2c91fd4fa1
|
Update cheapest-flights-within-k-stops.py
|
Python/cheapest-flights-within-k-stops.py
|
Python/cheapest-flights-within-k-stops.py
|
# Time: O((|E| + |V|) * log|V|) = O(|E| * log|V|)
# Space: O(|E| + |V|)
# There are n cities connected by m flights. Each fight starts from city u and arrives at v with a price w.
#
# Now given all the cities and fights, together with starting city src and the destination dst,
# your task is to find the cheapest price from src to dst with up to k stops.
# If there is no such route, output -1.
#
# Example 1:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 1
# Output: 200
# Explanation:
# The cheapest price from city 0 to city 2 with at most 1 stop costs 200, as marked red in the picture.
#
# Example 2:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 0
# Output: 500
#
# Explanation:
# The cheapest price from city 0 to city 2 with at most 0 stop costs 500, as marked blue in the picture.
# Note:
# - The number of nodes n will be in range [1, 100], with nodes labeled from 0 to n - 1.
# - The size of flights will be in range [0, n * (n - 1) / 2].
# - The format of each flight will be (src, dst, price).
# - The price of each flight will be in the range [1, 10000].
# - k is in the range of [0, n - 1].
# - There will not be any duplicated flights or self cycles.
import collections
import heapq
class Solution(object):
def findCheapestPrice(self, n, flights, src, dst, K):
"""
:type n: int
:type flights: List[List[int]]
:type src: int
:type dst: int
:type K: int
:rtype: int
"""
adj = collections.defaultdict(list)
for u, v, w in flights:
adj[u].append((v, w))
min_heap = [(0, src, K+1)]
while min_heap:
result, u, k = heapq.heappop(min_heap)
if u == dst:
return result
if k > 0:
for v, w in adj[u]:
heapq.heappush(min_heap, (result+w, v, k-1))
return -1
|
Python
| 0
|
@@ -65,16 +65,25 @@
%7C + %7CV%7C)
+ = O(%7CE%7C)
%0A%0A# Ther
|
33bcc472fdc780154403eb1616114957ce9e2b21
|
refactor app creation/run so tests can spin up an instance
|
dataactbroker/app.py
|
dataactbroker/app.py
|
import os
import sys
import inspect
import traceback
import json
from flask.ext.cors import CORS
from flask.ext.bcrypt import Bcrypt
from flask import Flask
from dataactcore.utils.cloudLogger import CloudLogger
from dataactcore.utils.jsonResponse import JsonResponse
from dataactbroker.handlers.aws.sesEmail import sesEmail
from dataactbroker.handlers.accountHandler import AccountHandler
from dataactbroker.handlers.aws.session import DynamoInterface, SessionTable
from dataactbroker.fileRoutes import add_file_routes
from dataactbroker.loginRoutes import add_login_routes
from dataactbroker.userRoutes import add_user_routes
def runApp():
try :
"""Set up the Application"""
# Create application
config_path = "".join([os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))),"/config"])
app = Flask(__name__,instance_path=config_path)
def getAppConfiguration() :
"""gets the web_api_configuration JSON"""
configFile = "".join([app.instance_path, "/web_api_configuration.json"])
return json.loads(open(configFile,"r").read())
config = getAppConfiguration()
# Set parameters
AccountHandler.FRONT_END = config["frontend_url"]
sesEmail.SIGNING_KEY = config["security_key"]
debugFlag = config["server_debug"] # Should be false for prod
runLocal = config["local_dynamo"] # False for prod, when True this assumes that the Dynamo is on the same server
JsonResponse.debugMode = config["rest_trace"]
app.config.from_object(__name__)
if(config["origins"] == "*"):
cors = CORS(app,supports_credentials=True)
else:
cors = CORS(app,supports_credentials=True,origins=config["origins"])
#Enable AWS Sessions
app.session_interface = DynamoInterface()
# Set up bcrypt
bcrypt = Bcrypt(app)
# Root will point to index.html
@app.route("/", methods=["GET"])
def root():
return "Broker is running"
# Add routes for modules here
add_login_routes(app,bcrypt)
add_file_routes(app,config["create_credentials"])
add_user_routes(app,config["system_email"],bcrypt)
SessionTable.localPort = int( config["dynamo_port"])
SessionTable.setup(app, runLocal)
app.run(debug=debugFlag,threaded=True,host="0.0.0.0",port= int(config["port"]))
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
trace = traceback.extract_tb(exc_tb, 10)
CloudLogger.logError('Broker App Level Error: ',e,trace)
del exc_tb
if __name__ == '__main__':
runApp()
|
Python
| 0.000001
|
@@ -629,32 +629,227 @@
def
-runApp():%0A try :%0A
+getAppConfiguration(app) :%0A %22%22%22gets the web_api_configuration JSON%22%22%22%0A configFile = %22%22.join(%5Bapp.instance_path, %22/web_api_configuration.json%22%5D)%0A return json.loads(open(configFile,%22r%22).read())%0A%0Adef createApp():%0A
@@ -873,24 +873,34 @@
lication%22%22%22%0A
+ try :%0A
# Cr
@@ -1089,25 +1089,24 @@
g_path)%0A
-%0A
def getA
@@ -1101,243 +1101,8 @@
-def getAppConfiguration() :%0A %22%22%22gets the web_api_configuration JSON%22%22%22%0A configFile = %22%22.join(%5Bapp.instance_path, %22/web_api_configuration.json%22%5D)%0A return json.loads(open(configFile,%22r%22).read())%0A%0A
conf
@@ -1126,16 +1126,19 @@
uration(
+app
)%0A
@@ -2021,17 +2021,16 @@
nning%22%0A%0A
-%0A
@@ -2321,95 +2321,8 @@
al)%0A
- app.run(debug=debugFlag,threaded=True,host=%220.0.0.0%22,port= int(config%5B%22port%22%5D))
%0A
@@ -2529,16 +2529,263 @@
exc_tb%0A
+%0A return app%0A%0Adef runApp():%0A %22%22%22runs the application%22%22%22%0A%0A app = createApp()%0A config = getAppConfiguration(app)%0A debugFlag = config%5B%22server_debug%22%5D%0A app.run(debug=debugFlag,threaded=True,host=%220.0.0.0%22,port= int(config%5B%22port%22%5D))%0A%0A
if __nam
|
1c41bc4d06ad2209ddd6fe79621cabd210b94589
|
Add __init__
|
demcoreg/__init__.py
|
demcoreg/__init__.py
|
Python
| 0.000917
|
@@ -0,0 +1,23 @@
+#! /usr/bin/env python%0A
|
|
97671650987d74c6281e56f3f4e1950f2d996d5b
|
upgrade version...
|
setup.py
|
setup.py
|
#!/usr/bin/python
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from setuptools.command.sdist import sdist
import os
import subprocess
class local_sdist(sdist):
"""Customized sdist hook - builds the ChangeLog file from VC first"""
def run(self):
if os.path.isdir('.bzr'):
# We're in a bzr branch
log_cmd = subprocess.Popen(["bzr", "log", "--gnu"],
stdout=subprocess.PIPE)
changelog = log_cmd.communicate()[0]
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(changelog)
sdist.run(self)
name = 'glance'
version = '0.1.1'
setup(
name=name,
version=version,
description='Glance',
license='Apache License (2.0)',
author='OpenStack, LLC.',
author_email='openstack-admins@lists.launchpad.net',
url='https://launchpad.net/glance',
packages=find_packages(exclude=['tests', 'bin']),
test_suite='nose.collector',
cmdclass={'sdist': local_sdist},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Environment :: No Input/Output (Daemon)',
],
install_requires=[], # removed for better compat
scripts=['bin/glance-api',
'bin/glance-registry'])
|
Python
| 0
|
@@ -1244,17 +1244,20 @@
= '0.1.
-1
+3pre
'%0A%0Asetup
|
e123f31a2a863491bb6353336038e7d324475bc9
|
Add setuptools for install
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,621 @@
+from setuptools import setup%0A%0Asetup(%0A name='netbyte',%0A version='0.4',%0A url='http://www.sc0tfree.com',%0A license='MIT License',%0A author='sc0tfree',%0A author_email='henry@sc0tfree.com',%0A description='Netbyte is a Netcat-style tool that facilitates probing proprietary TCP and UDP services. It is lightweight, fully interactive and provides formatted output in both hexadecimal and ASCII.',%0A keywords='utils cli netcat hexadecimal',%0A packages=%5B'netbyte'%5D,%0A install_requires=%5B%0A 'colorama',%0A %5D,%0A entry_points = %7B%0A %22console_scripts%22 : %5B'netbyte = netbyte.netbyte:main'%5D%0A %7D,%0A)%0A
|
|
6e805995a165f923c1c4f71c163c64a245f9a3d5
|
Add simple distutils script for modules
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,922 @@
+from distutils.core import setup%0A%0Asetup(name='dimreducer',%0A version='1.0',%0A description='Dimension reduction methods',%0A py_modules=%5B'dimreducer'%5D,%0A )%0A%0Asetup(name='multiphenotype_utils',%0A version='1.0',%0A description='Utility functions for all methods',%0A py_modules=%5B'multiphenotype_utils'%5D,%0A )%0A%0Asetup(name='general_autoencoder',%0A version='1.0',%0A description='Autoencoder base class',%0A py_modules=%5B'general_autoencoder'%5D,%0A )%0A%0Asetup(name='standard_autoencoder',%0A version='1.0',%0A description='Standard autoencoder',%0A py_modules=%5B'standard_autoencoder'%5D,%0A )%0A%0Asetup(name='variational_autoencoder',%0A version='1.0',%0A description='VAE',%0A py_modules=%5B'variational_autoencoder'%5D,%0A )%0A%0Asetup(name='variational_age_autoencoder',%0A version='1.0',%0A description='VAE with age',%0A py_modules=%5B'variational_age_autoencoder'%5D,%0A )%0A
|
|
914b7cd2c94bddd1a68eb2293364633a9325506f
|
add a unit test
|
_unittests/ut_td_1a/test_diff.py
|
_unittests/ut_td_1a/test_diff.py
|
Python
| 0.000001
|
@@ -0,0 +1,2751 @@
+%22%22%22%0A@brief test log(time=1s)%0A%0AYou should indicate a time in seconds. The program %60%60run_unittests.py%60%60%0Awill sort all test files by increasing time and run them.%0A%22%22%22%0A%0A%0Aimport sys%0Aimport os%0Aimport unittest%0Afrom difflib import SequenceMatcher%0A%0A%0Atry:%0A import src%0A import pyquickhelper as skip_%0Aexcept ImportError:%0A path = os.path.normpath(%0A os.path.abspath(%0A os.path.join(%0A os.path.split(__file__)%5B0%5D,%0A %22..%22,%0A %22..%22)))%0A if path not in sys.path:%0A sys.path.append(path)%0A path = os.path.normpath(%0A os.path.abspath(%0A os.path.join(%0A os.path.split(__file__)%5B0%5D,%0A %22..%22,%0A %22..%22,%0A %22..%22,%0A %22pyquickhelper%22,%0A %22src%22)))%0A if path not in sys.path:%0A sys.path.append(path)%0A import src%0A import pyquickhelper.loghelper as skip_%0A%0Afrom pyquickhelper.loghelper import fLOG%0A%0A%0Aclass TestDiff(unittest.TestCase):%0A%0A def test_diff(self):%0A fLOG(%0A __file__,%0A self._testMethodName,%0A OutputPrint=__name__ == %22__main__%22)%0A%0A seq1 = %22ab ab abc abcd abc%22.split()%0A seq2 = %22ab ab abc abc abc adb%22.split()%0A diff = SequenceMatcher(a=seq1, b=seq2)%0A nb = 0%0A for opcode in diff.get_opcodes():%0A fLOG(opcode)%0A nb += 1%0A self.assertEqual(nb, 4)%0A%0A if __name__ == %22__main__%22:%0A from src.ensae_teaching_cs.helpers.pygame_helper import wait_event%0A import pygame%0A pygame.init()%0A h = 20%0A font = pygame.font.Font(%22freesansbold.ttf%22, h)%0A font_small = pygame.font.Font(%22freesansbold.ttf%22, 3 * h // 4)%0A size = 500, 500%0A white = 255, 255, 255%0A screen = pygame.display.set_mode(size)%0A screen.fill(white)%0A%0A pos = 0%0A for opcode in diff.get_opcodes():%0A if opcode%5B0%5D == %22delete%22:%0A color = (200, 0, 0)%0A for i in range(opcode%5B1%5D, opcode%5B2%5D):%0A text = seq1%5Bi%5D%0A text = font_small.render(text, True, color)%0A screen.blit(text, (10, h * pos + h // 6))%0A pos += 1%0A else:%0A color = (0, 0, 0) if opcode%5B0%5D == %22equal%22 else (0, 120, 0)%0A for i in range(opcode%5B3%5D, opcode%5B4%5D):%0A text = seq2%5Bi%5D%0A text = font.render(text, True, color)%0A screen.blit(text, (10, h * pos))%0A pos += 1%0A pygame.display.flip()%0A wait_event(pygame)%0A%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
|
|
c03411020db80b703260314236d96cc409398545
|
Create variable.py
|
introduction/variable.py
|
introduction/variable.py
|
Python
| 0.000008
|
@@ -0,0 +1,32 @@
+a = 10%0AA = 10%0Aprint(a)%0Aprint(A)%0A
|
|
3314f5d6ffb843a58e61856e726bd47e426538aa
|
Add spec_cleaner/__main__.py to allow running spec-cleaner without installing it.
|
spec_cleaner/__main__.py
|
spec_cleaner/__main__.py
|
Python
| 0
|
@@ -0,0 +1,572 @@
+from __future__ import absolute_import%0A%0Aimport os%0Aimport sys%0A%0A# If we are running from a wheel, add the wheel to sys.path.%0Aif __package__ == '':%0A # __file__ is spec-cleaner-*.whl/spec_cleaner/__main__.py.%0A # First dirname call strips of '/__main__.py', second strips off '/spec_cleaner'.%0A # Resulting path is the name of the wheel itself.%0A # Add that to sys.path so we can import spec_cleaner.%0A path = os.path.dirname(os.path.dirname(__file__))%0A sys.path.insert(0, path)%0A%0Aimport spec_cleaner%0A%0Aif __name__ == '__main__':%0A sys.exit(spec_cleaner.main())%0A
|
|
6f8db8eddb7b55a3854ac2570e7e4b2df0a89036
|
Add coveralls dep
|
setup.py
|
setup.py
|
#!/usr/bin/env python
"""
Sentry
======
Sentry is a realtime event logging and aggregation platform. It specializes
in monitoring errors and extracting all the information needed to do a proper
post-mortem without any of the hassle of the standard user feedback loop.
Sentry is a Server
------------------
The Sentry package, at its core, is just a simple server and web UI. It will
handle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)
and all of the logic behind storage and aggregation.
That said, Sentry is not limited to Python. The primary implementation is in
Python, but it contains a full API for sending events from any language, in
any application.
:copyright: (c) 2011-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from setuptools import setup, find_packages
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
dev_requires = [
'flake8>=1.7.0,<2.0',
'pytest-cov>=1.4',
]
tests_require = [
'exam>=0.5.1',
'eventlet',
'pytest',
'pytest-django',
'pytest-timeout',
'nydus',
'mock>=0.8.0',
'mock-django>=0.6.4',
'redis',
'unittest2',
]
install_requires = [
'cssutils>=0.9.9,<0.9.10',
'BeautifulSoup>=3.2.1,<3.3.0',
'django-celery>=3.0.11,<3.1.0',
'celery>=3.0.15,<3.1.0',
'django-crispy-forms>=1.2.3,<1.3.0',
'Django>=1.5.1,<1.6',
'django-paging>=0.2.5,<0.3.0',
'django-picklefield>=0.3.0,<0.4.0',
'django-static-compiler>=0.3.0,<0.4.0',
'django-templatetag-sugar>=0.1.0,<0.2.0',
'gunicorn>=0.17.2,<0.18.0',
'logan>=0.5.6,<0.6.0',
'nydus>=0.10.0,<0.11.0',
'Pygments>=1.6.0,<1.7.0',
'pynliner>=0.4.0,<0.5.0',
'python-dateutil>=1.5.0,<2.0.0',
'raven>=3.1.17',
'redis>2.7.0,<2.8.0',
'simplejson>=3.1.0,<3.2.0',
'South>=0.7.6,<0.8.0',
'httpagentparser>=1.2.1,<1.3.0',
'django-social-auth>=0.7.23,<0.8.0',
'django-social-auth-trello>=1.0.3,<1.1.0',
'setproctitle>=1.1.7,<1.2.0',
]
postgres_requires = [
'psycopg2>=2.4.0,<2.5.0',
]
postgres_pypy_requires = [
'psycopg2cffi',
]
mysql_requires = [
'MySQL-python>=1.2.0,<1.3.0',
]
setup(
name='sentry',
version='5.5.0-DEV',
author='David Cramer',
author_email='dcramer@gmail.com',
url='http://www.getsentry.com',
description='A realtime logging and aggregation server.',
long_description=open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
'dev': dev_requires,
'postgres': install_requires + postgres_requires,
'postgres_pypy': install_requires + postgres_pypy_requires,
'mysql': install_requires + mysql_requires,
},
test_suite='runtests.runtests',
license='BSD',
include_package_data=True,
entry_points={
'console_scripts': [
'sentry = sentry.utils.runner:main',
],
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
Python
| 0.000001
|
@@ -1383,24 +1383,48 @@
t-timeout',%0A
+ 'python-coveralls',%0A
'nydus',
|
c54623d673d03d841d330e80d414a687770cc2a1
|
Add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,864 @@
+import setuptools%0A%0Awith open(%22README%22, %22r%22, encoding=%22utf-8%22) as fh:%0A long_description = fh.read()%0A%0Asetuptools.setup(%0A name=%22zvm%22, # Replace with your own username%0A version=%221.0.0%22,%0A author=%22Ben Collins-Sussman%22,%0A author_email=%22sussman@gmail.com%22,%0A description=%22A pure-python implementation of a Z-machine for interactive fiction%22,%0A long_description=long_description,%0A long_description_content_type=%22text/x-rst%22,%0A url=%22https://github.com/sussman/zvm%22,%0A project_urls=%7B%0A %22Bug Tracker%22: %22https://github.com/sussman/zvm/issues%22,%0A %7D,%0A classifiers=%5B%0A %22Programming Language :: Python :: 3%22,%0A %22License :: OSI Approved :: BSD License%22,%0A %22Operating System :: OS Independent%22,%0A %22Topic :: Games/Entertainment%22,%0A %5D,%0A packages=setuptools.find_packages(include=%5B%22zvm%22%5D),%0A python_requires=%22%3E=3.6%22,%0A)%0A
|
|
ce5883c6a7a0c8c8f79c941f66288ce748b1b405
|
Add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1170 @@
+from setuptools import setup%0A%0Asetup(%0A name = 'brunnhilde',%0A version = '1.4.0',%0A url = 'https://github.com/timothyryanwalsh/brunnhilde',%0A author = 'Tim Walsh',%0A author_email = 'timothyryanwalsh@gmail.com',%0A py_modules = %5B'brunnhilde'%5D,%0A scripts = %5B'brunnhilde.py'%5D,%0A description = 'A Siegfried-based digital archives reporting tool for directories and disk images',%0A keywords = 'archives reporting formats directories diskimages',%0A platforms = %5B'POSIX'%5D,%0A classifiers = %5B%0A 'Development Status :: 5 - Production/Stable',%0A 'License :: OSI Approved :: MIT License',%0A 'Intended Audience :: End Users/Desktop',%0A 'Intended Audience :: Developers',%0A 'Natural Language :: English', %0A 'Operating System :: MacOS',%0A 'Operating System :: MacOS :: MacOS X',%0A 'Operating System :: POSIX :: Linux',%0A 'Topic :: Communications :: File Sharing',%0A 'Programming Language :: Python :: 2.7',%0A 'Programming Language :: Python :: 3.5',%0A 'Topic :: Database',%0A 'Topic :: System :: Archiving',%0A 'Topic :: System :: Filesystems',%0A 'Topic :: Utilities'%0A %5D,%0A)
|
|
73e0bd62ac7a2d8b8322e21130ee7ec0659dc3cc
|
add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,630 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Afrom distutils.core import setup%0A%0A%0Asetup(%0A name = %22jsroot%22,%0A version = %220.0.0%22,%0A description = %22VISPA ROOT Browser - Inspect contents of root files.%22,%0A author = %22VISPA Project%22,%0A author_email = %22vispa@lists.rwth-aachen.de%22,%0A url = %22http://vispa.physik.rwth-aachen.de/%22,%0A license = %22GNU GPL v2%22,%0A packages = %5B%22jsroot%22%5D,%0A package_dir = %7B%22jsroot%22: %22jsroot%22%7D,%0A package_data = %7B%22jsroot%22: %5B%0A %22workspace/*%22,%0A %22static/*%22,%0A %5D%7D,%0A # install_requires = %5B%22vispa%22%5D,%0A)%0A
|
|
8e8678c2bc915e671f50bb6ea91288662053c280
|
add setup file
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,400 @@
+#!/usr/bin/env python%0A# encoding: utf-8%0A%0Afrom setuptools import setup, find_packages%0A%0Asetup(%0A name = 'yard',%0A version = '0.1.0',%0A author = %22Diogo Laginha%22,%0A url = 'https://github.com/laginha/yard',%0A description = %22Yet Another Resftul Django-app%22,%0A packages = %5B'yard'%5D,%0A install_requires = %5B%5D,%0A extras_require = %7B%7D,%0A)%0A
|
|
dd1810ddf1f85312c7a8b5ec23d4844b5ca63a13
|
add data_filtering.py
|
code/data_filtering.py
|
code/data_filtering.py
|
Python
| 0.000003
|
@@ -0,0 +1,2475 @@
+import numpy as np%0Aimport matplotlib.pyplot as plt%0Aimport os%0Aimport sys%0Aimport nitime%0A%0A# Import the time-series objects:%0Afrom nitime.timeseries import TimeSeries%0A%0A# Import the analysis objects:%0Afrom nitime.analysis import SpectralAnalyzer, FilterAnalyzer, NormalizationAnalyzer%0A%0Aos.getcwd()%0Aos.chdir('..')%0Aos.chdir('data')%0A%0A## load data%0Adata2d = np.load('masked_data_50k.npy')%0A%0A%0A## warning%0Aprint('Warning!! This scripts take at least 20 minutes to run.')%0A%0A## plot data%0Aplt.plot(data2d%5B7440,:%5D)%0A%0A## setting the TR %0ATR = 2%0A%0AT = TimeSeries(data2d, sampling_interval=TR)%0A%0A## examining the spectrum of the original data, before filtering. %0A# We do this by initializing a SpectralAnalyzer for the original data:%0AS_original = SpectralAnalyzer(T)%0A%0Afig01 = plt.figure()%0Aax01 = fig01.add_subplot(1, 1, 1)%0A# ax01.plot(S_original.psd%5B0%5D,%0A# S_original.psd%5B1%5D%5B9%5D,%0A# label='Welch PSD')%0A%0Aax01.plot(S_original.spectrum_fourier%5B0%5D,%0A np.abs(S_original.spectrum_fourier%5B1%5D%5B9%5D),%0A label='FFT')%0A%0Aax01.plot(S_original.periodogram%5B0%5D,%0A S_original.periodogram%5B1%5D%5B9%5D,%0A label='Periodogram')%0A%0A# ax01.plot(S_original.spectrum_multi_taper%5B0%5D,%0A# S_original.spectrum_multi_taper%5B1%5D%5B9%5D,%0A# label='Multi-taper')%0A%0Aax01.set_xlabel('Frequency (Hz)')%0Aax01.set_ylabel('Power')%0Aplt.ylim((0,8000))%0A%0Aax01.legend()%0Aplt.savefig(%22../figure/FFT.jpg%22)%0Aprint('FFT.jpg saved')%0A%0A## We start by initializing a FilterAnalyzer. %0A#This is initialized with the time-series containing the data %0A#and with the upper and lower bounds of the range into which we wish to filter%0A%0AF = FilterAnalyzer(T, ub=0.15, lb=0.02)%0A%0A# Initialize a figure to display the results:%0Afig02 = plt.figure()%0Aax02 = fig02.add_subplot(1, 1, 1)%0A%0A# Plot the original, unfiltered data:%0Aax02.plot(F.data%5B7440%5D, label='unfiltered')%0Aax02.plot(F.filtered_fourier.data%5B7440%5D, label='Fourier')%0Aax02.legend()%0Aax02.set_xlabel('Time (TR)')%0Aax02.set_ylabel('Signal amplitude (a.u.)')%0A%0Aplt.savefig(%22../figure/data_filtering_on_smoothed_data.jpg%22)%0Aprint('data_filtering_on_smoothed_data.jpg')%0A%0Anp.save('filtered_data.npy',F.filtered_fourier.data)%0Aprint('filtered_data.npy saved')%0A%0AF.filtered_fourier.data.shape%0A%0Afdata = F.filtered_fourier.data%0A%0Afdata.shape%0A%0Av = np.var(fdata,axis=1)%0A%0Aplt.hist(v)%0Aplt.xlabel(%22voxels%22)%0Aplt.ylabel(%22variance%22)%0Aplt.title(%22variance of the voxel activity filtering%22)%0Aplt.savefig(%22../figure/voxel_variance_on_smoothed_data.jpg%22)%0Aprint('voxel_variance_on_smoothed_data.jpg')
|
|
e0fbd1d0e5e9b845ebfa6aa1739937a9974cbc87
|
Add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,358 @@
+#!/usr/bin/env python%0A%0Afrom distutils.core import setup%0A%0Asetup(%0A name='iroha-ya-cli',%0A version='0.7',%0A description='Cli for hyperledger/iroha',%0A author='Sonoko Mizuki',%0A author_email='mizuki.sonoko@gmail.com',%0A packages=%5B'src'%5D,%0A entry_points=%7B%0A 'console_scripts':%0A 'iroha-ya-cli = src.main:main'%0A %7D,%0A)
|
|
2484c0f9415694c99e5b1ac15ee4b64f12e839b6
|
add migration to reflect schema updates to wagtailforms
|
demo/migrations/0005_auto_20160531_1736.py
|
demo/migrations/0005_auto_20160531_1736.py
|
Python
| 0
|
@@ -0,0 +1,2571 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('demo', '0004_auto_20151019_1351'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='formfield',%0A name='choices',%0A field=models.CharField(blank=True, max_length=512, verbose_name='choices', help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.'),%0A ),%0A migrations.AlterField(%0A model_name='formfield',%0A name='default_value',%0A field=models.CharField(blank=True, max_length=255, verbose_name='default value', help_text='Default value. Comma separated values supported for checkboxes.'),%0A ),%0A migrations.AlterField(%0A model_name='formfield',%0A name='field_type',%0A field=models.CharField(choices=%5B('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')%5D, max_length=16, verbose_name='field type'),%0A ),%0A migrations.AlterField(%0A model_name='formfield',%0A name='help_text',%0A field=models.CharField(blank=True, max_length=255, verbose_name='help text'),%0A ),%0A migrations.AlterField(%0A model_name='formfield',%0A name='label',%0A field=models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label'),%0A ),%0A migrations.AlterField(%0A model_name='formfield',%0A name='required',%0A field=models.BooleanField(verbose_name='required', default=True),%0A ),%0A migrations.AlterField(%0A model_name='formpage',%0A name='from_address',%0A field=models.CharField(blank=True, max_length=255, verbose_name='from address'),%0A ),%0A migrations.AlterField(%0A model_name='formpage',%0A name='subject',%0A field=models.CharField(blank=True, max_length=255, verbose_name='subject'),%0A ),%0A migrations.AlterField(%0A model_name='formpage',%0A name='to_address',%0A field=models.CharField(blank=True, max_length=255, verbose_name='to address', help_text='Optional - form submissions will be emailed to this address'),%0A ),%0A %5D%0A
|
|
38c16264147501d56b36f9b3259652759ae70464
|
Version 1.15.8
|
setup.py
|
setup.py
|
"""
The setup package to install SeleniumBase dependencies and plugins
(Uses selenium 3.x and is compatible with Python 2.7+ and Python 3.6+)
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = 'Web Automation, Testing, and User-Onboarding Framework'
setup(
name='seleniumbase',
version='1.15.7',
description='All-In-One Test Automation Framework',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/seleniumbase/SeleniumBase',
platforms=["Windows", "Linux", "Unix", "Mac OS-X"],
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Topic :: Internet",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Operating System :: Microsoft :: Windows",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
install_requires=[
'pip',
'setuptools',
'ipython==5.6.0',
'selenium==3.14.0',
'nose==1.3.7',
'pytest==3.8.0',
'pytest-html==1.19.0',
'pytest-xdist==1.23.0',
'six==1.11.0',
'flake8==3.5.0',
'requests==2.19.1',
'beautifulsoup4==4.6.0',
'unittest2==1.1.0',
'chardet==3.0.4',
'urllib3==1.23',
'boto==2.48.0',
'ipdb==0.11',
'parameterized==0.6.1',
'PyVirtualDisplay==0.2.1',
],
packages=[
'seleniumbase',
'seleniumbase.common',
'seleniumbase.config',
'seleniumbase.console_scripts',
'seleniumbase.core',
'seleniumbase.drivers',
'seleniumbase.fixtures',
'seleniumbase.masterqa',
'seleniumbase.plugins',
'seleniumbase.utilities',
'seleniumbase.utilities.selenium_grid',
'seleniumbase.utilities.selenium_ide',
],
entry_points={
'console_scripts': [
'seleniumbase = seleniumbase.console_scripts.run:main',
],
'nose.plugins': [
'base_plugin = seleniumbase.plugins.base_plugin:Base',
'selenium = seleniumbase.plugins.selenium_plugin:SeleniumBrowser',
'page_source = seleniumbase.plugins.page_source:PageSource',
'screen_shots = seleniumbase.plugins.screen_shots:ScreenShots',
'test_info = seleniumbase.plugins.basic_test_info:BasicTestInfo',
('db_reporting = '
'seleniumbase.plugins.db_reporting_plugin:DBReporting'),
's3_logging = seleniumbase.plugins.s3_logging_plugin:S3Logging',
],
'pytest11': ['seleniumbase = seleniumbase.plugins.pytest_plugin']
}
)
# print(os.system("cat seleniumbase.egg-info/PKG-INFO"))
print("\n*** SeleniumBase Installation Complete! ***\n")
|
Python
| 0
|
@@ -562,17 +562,17 @@
n='1.15.
-7
+8
',%0A d
|
97d96097122ca50e84fcadd3a5c21ae51ccc8bf7
|
Create Polarity_classifier.py
|
src/Polarity_classifier.py
|
src/Polarity_classifier.py
|
Python
| 0.000008
|
@@ -0,0 +1,1670 @@
+import pickle%0Aimport itertools%0Afrom nltk.collocations import BigramCollocationFinder%0Afrom nltk.metrics import BigramAssocMeasures%0Afrom nltk.corpus import stopwords%0A%0Aclass Polarity_classifier:%0A%0A def __init__(self):%0A pass%0A%0A def bigram_word_feats(self, words, score_fn=BigramAssocMeasures.chi_sq, n=200):%0A bigram_finder = BigramCollocationFinder.from_words(words)%0A bigrams = bigram_finder.nbest(score_fn, n)%0A return dict(%5B(ngram, True) for ngram in itertools.chain(words, bigrams)%5D)%0A%0A def bag_of_word_feats(self, words):%0A return dict(%5B(word, True) for word in words%5D)%0A%0A def stopword_filtered_word_feats(self, words):%0A stopset = set(stopwords.words('english'))%0A return dict(%5B(word, True) for word in words if word not in stopset%5D)%0A%0A def set_polarity_bigram_classifier(self, json_tweets):%0A f = open('bigram_classifier.pickle')%0A classifier = pickle.load(f)%0A f.close()%0A for tweet in json_tweets:%0A tweet%5B%22polarity%22%5D = classifier.classify(self.bigram_word_feats(tweet%5B%22text%22%5D.split()))%0A%0A def set_polarity_bag_classifier(self, json_tweets):%0A f = open('bag_classifier.pickle')%0A classifier = pickle.load(f)%0A f.close()%0A for tweet in json_tweets:%0A tweet%5B%22polarity%22%5D = classifier.classify(self.bag_of_word_feats(tweet%5B%22text%22%5D.split()))%0A%0A def set_polarity_stop_classifier(self, json_tweets):%0A f = open('stop_word_classifier.pickle')%0A classifier = pickle.load(f)%0A f.close()%0A for tweet in json_tweets:%0A tweet%5B%22polarity%22%5D = classifier.classify(self.stopword_filtered_word_feats(tweet%5B%22text%22%5D.split()))%0A
|
|
2c39bc6e1586dcacc1d23d9be643d1f27f035eac
|
Add wsgi file
|
agendadulibre/agendadulibre.wsgi
|
agendadulibre/agendadulibre.wsgi
|
Python
| 0.000001
|
@@ -0,0 +1,273 @@
+import sys%0Asys.path.insert(0, '/var/www/agendadulibre/agendadulibre')%0A%0A#sys.path.insert(0, os.curdir)%0A%0Aactivate_this = '/home/numahell/.virtualenvs/flask/local/bin/activate_this.py'%0Aexecfile(activate_this, dict(__file__=activate_this))%0A%0A%0Afrom app import app as application%0A
|
|
f9c68d3c250e3a83ab1d0ed9e0760c0631dca869
|
add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,412 @@
+#!/usr/bin/env python%0Afrom setuptools import find_packages, setup%0Afrom fabliip import __version__%0A%0Asetup(%0A name='fabliip',%0A version=__version__,%0A packages=find_packages(),%0A description='Set of Fabric functions to help deploying websites.',%0A author='Sylvain Fankhauser',%0A author_email='sylvain.fankhauser@liip.ch',%0A url='https://github.com/sephii/fabliip',%0A install_requires=%5B'fabric'%5D,%0A)%0A
|
|
054be2f9a06c0da3b7fcf5d40985ce8055f3f447
|
add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1231 @@
+from setuptools import setup, find_packages%0A%0Asetup(%0A name='bark',%0A version='1.0',%0A url='https://github.com/battleroid/bark',%0A description='Single file static site generator.',%0A license='MIT License',%0A keywords='bark static site generator jinja blog python markdown',%0A author='Casey Weed',%0A author_email='me@caseyweed.net',%0A download_url='https://github.com/battleroid/bark/tarball/master',%0A packages = find_packages(),%0A install_requires=%5B%0A 'Jinja2%3E=2.8',%0A 'python-frontmatter%3E=0.2.1',%0A 'python-slugify%3E=1.1.3',%0A 'python-dateutil%3E=2.4.2',%0A 'misaka%3E=1.0.2'%0A %5D,%0A setup_requires=%5B%5D,%0A entry_points=%7B%0A 'console_scripts': %5B'bark = bark.bark:main'%5D%0A %7D,%0A platforms=%5B'any'%5D,%0A classifiers=%5B%0A 'Programming Language :: Python',%0A 'Topic :: Internet',%0A 'Topic :: Internet :: WWW/FTP',%0A 'Topic :: Internet :: WWW/HTTP :: Site Management',%0A 'Topic :: Text Processing',%0A 'Topic :: Text Processing :: Markup',%0A 'Topic :: Text Processing :: Markup :: HTML'%0A %5D%0A )%0A
|
|
c6c6594cda35aaa15f1efb9f336548671b0028c5
|
Add generic serializer tool for plugins to use
|
rmake/lib/twisted_extras/tools.py
|
rmake/lib/twisted_extras/tools.py
|
Python
| 0
|
@@ -0,0 +1,1281 @@
+#%0A# Copyright (c) rPath, Inc.%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A%0Afrom twisted.internet import defer%0A%0A%0Aclass Serializer(object):%0A%0A def __init__(self):%0A self._lock = defer.DeferredLock()%0A self._waiting = %7B%7D%0A%0A def call(self, func, collapsible=False):%0A d = self._lock.acquire()%0A self._waiting%5Bd%5D = collapsible%0A @d.addCallback%0A def _locked(_):%0A if collapsible and len(self._waiting) %3E 1:%0A # Superseded%0A return%0A return func()%0A @d.addBoth%0A def _unlock(result):%0A self._lock.release()%0A del self._waiting%5Bd%5D%0A return result%0A return d%0A
|
|
b4f5b5da5e7a7266e7f908b6ffc975ea3f1f0657
|
Add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,233 @@
+from distutils.core import setup%0A%0Asetup(name='MathLibPy',%0A version='0.0.0',%0A description='Math library for Python',%0A author='Jack Romo',%0A author_email='sharrackor@gmail.com',%0A packages=%5B'mathlibpy'%5D,%0A )%0A
|
|
49178742953cc63b066d2142d9e2b3f0f2e20e17
|
Tweak setup.py so that it may run even when fired from different locations, as suggested by Maarten Damen.
|
setup.py
|
setup.py
|
#!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
zip_safe=False,
)
|
Python
| 0
|
@@ -159,16 +159,58 @@
EST%22)%0A%0A%0A
+TOPDIR = os.path.dirname(__file__) or %22.%22%0A
VERSION
@@ -273,17 +273,27 @@
open(
-%22
+TOPDIR + %22/
dateutil
|
ba9235b758fe44279e3bd55bfb785308febb8685
|
Add padding between layout and children (#1980)
|
kivy/uix/anchorlayout.py
|
kivy/uix/anchorlayout.py
|
'''
Anchor Layout
=============
.. only:: html
.. image:: images/anchorlayout.gif
:align: right
.. only:: latex
.. image:: images/anchorlayout.png
:align: right
The :class:`AnchorLayout` aligns children to a border (top, bottom,
left, right) or center.
To draw a button in the lower-right corner::
layout = AnchorLayout(
anchor_x='right', anchor_y='bottom')
btn = Button(text='Hello World')
layout.add_widget(btn)
'''
__all__ = ('AnchorLayout', )
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, OptionProperty
class AnchorLayout(Layout):
'''Anchor layout class. See the module documentation for more information.
'''
padding = NumericProperty(0)
'''Padding between the widget box and it's children, in pixels.
:attr:`padding` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0.
'''
anchor_x = OptionProperty('center', options=(
'left', 'center', 'right'))
'''Horizontal anchor.
:attr:`anchor_x` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'center'. It accepts values of 'left', 'center' or
'right'.
'''
anchor_y = OptionProperty('center', options=(
'top', 'center', 'bottom'))
'''Vertical anchor.
:attr:`anchor_y` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'center'. It accepts values of 'top', 'center' or
'bottom'.
'''
def __init__(self, **kwargs):
super(AnchorLayout, self).__init__(**kwargs)
self.bind(
children=self._trigger_layout,
parent=self._trigger_layout,
padding=self._trigger_layout,
anchor_x=self._trigger_layout,
anchor_y=self._trigger_layout,
size=self._trigger_layout,
pos=self._trigger_layout)
def do_layout(self, *largs):
_x, _y = self.pos
width = self.width
height = self.height
anchor_x = self.anchor_x
anchor_y = self.anchor_y
padding = self.padding
for c in self.children:
x, y = _x, _y
w, h = c.size
if c.size_hint[0]:
w = c.size_hint[0] * width
elif not self.size_hint[0]:
width = max(width, c.width)
if c.size_hint[1]:
h = c.size_hint[1] * height
elif not self.size_hint[1]:
height = max(height, c.height)
if anchor_x == 'left':
x = x + padding
if anchor_x == 'right':
x = x + width - (w + padding)
if self.anchor_x == 'center':
x = x + (width / 2) - (w / 2)
if anchor_y == 'bottom':
y = y + padding
if anchor_y == 'top':
y = y + height - (h + padding)
if anchor_y == 'center':
y = y + (height / 2) - (h / 2)
c.x = x
c.y = y
c.width = w
c.height = h
self.size = (width, height) # might have changed inside loop
|
Python
| 0
|
@@ -2199,16 +2199,17 @@
w =
+(
c.size_h
@@ -2218,24 +2218,41 @@
t%5B0%5D * width
+) - (2 * padding)
%0A
@@ -2371,24 +2371,25 @@
h =
+(
c.size_hint%5B
@@ -2395,24 +2395,41 @@
%5B1%5D * height
+) - (2 * padding)
%0A
|
65c9335775688a15b344be4762ee7c75bd66bdb2
|
Add a setup.py file
|
setup.py
|
setup.py
|
Python
| 0.000002
|
@@ -0,0 +1,695 @@
+import os%0Aimport codecs%0Afrom setuptools import setup, find_packages%0A%0A%0Adef read(fname):%0A file_path = os.path.join(os.path.dirname(__file__), fname)%0A return codecs.open(file_path, encoding='utf-8').read()%0A%0A%0Asetup(%0A name='cities',%0A version='0.0.1',%0A description='Load data from cities and countries all over the world',%0A author='Artur Sousa',%0A author_email='arturfelipe.sousa@gmail.com',%0A url='https://github.com/arturfelipe/cities',%0A packages=find_packages(exclude=%5B'demo'%5D),%0A install_requires=%5B%0A 'requests',%0A %5D,%0A include_package_data=True,%0A zip_safe=False,%0A long_description=read('README.md'),%0A license='MIT',%0A keywords='cities countries'%0A)%0A
|
|
f57605c4f37fb29a93f06d165b9eb69fee2771b9
|
Add fake setup.py (#1620)
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,679 @@
+import sys%0A%0Afrom setuptools import setup%0A%0Asys.stderr.write(%0A %22%22%22%0A===============================%0AUnsupported installation method%0A===============================%0Ahttpx no longer supports installation with %60python setup.py install%60.%0APlease use %60python -m pip install .%60 instead.%0A%22%22%22%0A)%0Asys.exit(1)%0A%0A%0A# The below code will never execute, however GitHub is particularly%0A# picky about where it finds Python packaging metadata.%0A# See: https://github.com/github/feedback/discussions/6456%0A#%0A# To be removed once GitHub catches up.%0A%0Asetup(%0A name=%22uvicorn%22,%0A install_requires=%5B%0A %22click%3E=7.0%22,%0A %22h11%3E=0.8%22,%0A %22typing-extensions;python_version %3C '3.8'%22,%0A %5D,%0A)%0A
|
|
7c863017bd687a06c63a5c60c53c6efca80d6b0e
|
Add setup script
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,241 @@
+from setuptools import setup%0A%0Asetup(%0A name='discord-toastlogger',%0A version='0.1.0',%0A scripts=%5B'toastbot'%5D,%0A url='https://github.com/mdegreg/discord-toastlogger',%0A license='MIT',%0A install_requires=%5B%0A 'discord'%0A %5D%0A)
|
|
1b0b91e9445e080e790571a00e767f31f5035fd1
|
Add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1624 @@
+#!/usr/bin/env python3%0A#%0A# The MIT License (MIT)%0A#%0A# Copyright (c) 2014 Philippe Proulx %3Ceepp.ca%3E%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0A# THE SOFTWARE.%0A%0Aimport sys%0Afrom setuptools import setup%0A%0A%0A# make sure we run Python 3+ here%0Av = sys.version_info%0Aif v.major %3C 3:%0A sys.stderr.write('Sorry, pytsdl needs Python 3%5Cn')%0A sys.exit(1)%0A%0Apackages = %5B%0A 'pytsdl',%0A%5D%0A%0Asetup(name='pytsdl',%0A version=0.1,%0A description='TSDL parser implemented entirely in Python 3',%0A author='Philippe Proulx',%0A author_email='eeppeliteloop@gmail.com',%0A url='https://github.com/eepp/pytsdl',%0A packages=packages)%0A
|
|
c40a65c46b075881222f5c9ccebccfb0c627aa51
|
Create setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1 @@
+%0A
|
|
e87d736c83d89129f4a152163993cb5c173dddd4
|
Add setup
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,189 @@
+from setuptools import setup%0A%0A%0Asetup(name='Kamanian',%0A version='1.00',%0A packages=%5B'dzdy'%5D,%0A install_requires=%5B'pandas', 'numpy', 'scipy', 'pcore', 'matplotlib', 'networkx'%5D)%0A
|
|
05477b14e19d1e2d0483405bf3558f7d80fb9b60
|
Switch to setuptools.
|
setup.py
|
setup.py
|
# setup.py - distutils configuration for esm and esmre modules
# Copyright (C) 2007 Tideway Systems Limited.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from distutils.core import setup, Extension
module1 = Extension("esm",
#define_macros=[("HEAP_CHECK", 1)],
sources = ['src/esm.c',
'src/aho_corasick.c',
'src/ac_heap.c',
'src/ac_list.c'])
setup (name = "esmre",
version = '0.2.1',
description = 'Regular expression accelerator',
long_description = " ".join("""
Modules used to accelerate execution of a large collection of regular
expressions using the Aho-Corasick algorithms.
""".strip().split()),
author = 'Will Harris',
author_email = 'w.harris@tideway.com',
url = 'http://code.google.com/p/esmre/',
license = 'GNU LGPL',
platforms = ['POSIX'],
ext_modules = [module1],
package_dir = {'': 'src'},
py_modules = ["esmre"])
|
Python
| 0.000007
|
@@ -846,22 +846,18 @@
rom
-distutils.core
+setuptools
imp
@@ -1481,16 +1481,514 @@
lit()),%0A
+ classifiers = %5B%0A 'Development Status :: 4 - Beta',%0A 'Intended Audience :: Developers',%0A 'License :: OSI Approved ::',%0A 'GNU Library or Lesser General Public License (LGPL)',%0A 'Operating System :: POSIX',%0A 'Programming Language :: C',%0A 'Programming Language :: Python',%0A 'Topic :: Software Development :: Libraries :: Python Modules',%0A 'Topic :: Text Processing :: Indexing'%0A %5D,%0A install_requires=%5B'setuptools'%5D,%0A
a
|
ed3c7942e5717d187c922c8e6169def414562886
|
Allow higher version of flask-login. Why is this capped?
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
# Kept manually in sync with airflow.__version__
version = '1.5.1'
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
crypto = ['cryptography>=0.9.3']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
druid = ['pydruid>=0.2.1']
hdfs = ['snakebite>=2.4.13']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'pyhs2>=0.6.0',
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.13.0']
mysql = ['mysql-python>=1.2.5']
optional = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.6']
s3 = ['boto>=2.36.0']
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=0.15']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica
devel = all_dbs + doc + samba + s3 + ['nose'] + slack + crypto + oracle
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini']},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.0, <0.9',
'chartkick>=0.4.2, < 0.5',
'dill>=0.2.2, <0.3',
'flask>=0.10.1, <0.11',
'flask-admin==1.2.0',
'flask-cache>=0.13.1, <0.14',
'flask-login>=0.2.11, <0.3',
'future>=0.15.0, <0.16',
'gunicorn>=19.3.0, <20.0',
'jinja2>=2.7.3, <3.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.15.2, <1.0.0',
'pygments>=2.0.1, <3.0',
'python-dateutil>=2.3, <3',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8, <0.10',
'thrift>=0.9.2, <0.10',
],
extras_require={
'all': devel + optional,
'all_dbs': all_dbs,
'celery': celery,
'crypto': crypto,
'devel': devel,
'doc': doc,
'druid': druid,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'postgres': postgres,
's3': s3,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
},
author='Maxime Beauchemin',
author_email='maximebeauchemin@gmail.com',
url='https://github.com/airbnb/airflow',
download_url=(
'https://github.com/airbnb/airflow/tarball/' + version),
cmdclass={'test': Tox},
)
|
Python
| 0
|
@@ -2072,25 +2072,25 @@
=0.2.11, %3C0.
-3
+5
',%0A '
|
44bdeb2d5bf8c7877eb1e92cda65f6c844a93642
|
add models.
|
contentpacks/models.py
|
contentpacks/models.py
|
Python
| 0
|
@@ -0,0 +1,1114 @@
+from peewee import Model, SqliteDatabase, CharField, TextField, BooleanField,%5C%0A ForeignKeyField, PrimaryKeyField, Using, IntegerField, %5C%0A OperationalError%0A%0A%0Aclass Item(Model):%0A title = CharField()%0A description = TextField()%0A available = BooleanField()%0A files_complete = IntegerField(default=0)%0A total_files = IntegerField(default=0)%0A kind = CharField()%0A parent = ForeignKeyField(%22self%22, null=True, index=True, related_name=%22children%22)%0A id = CharField(index=True)%0A pk = PrimaryKeyField(primary_key=True)%0A slug = CharField()%0A path = CharField(index=True, unique=True)%0A extra_fields = CharField(null=True)%0A youtube_id = CharField(null=True)%0A size_on_disk = IntegerField(default=0)%0A remote_size = IntegerField(default=0)%0A%0A def __init__(self, *args, **kwargs):%0A # kwargs = parse_model_data(kwargs)%0A super(Item, self).__init__(*args, **kwargs)%0A%0A%0Aclass AssessmentItem(Model):%0A id = CharField(max_length=50, primary_key=True)%0A item_data = TextField() # A serialized JSON blob%0A author_names = CharField(max_length=200) # A serialized JSON list%0A
|
|
d72f9f06afcf5d1c177afa418a7c4bf60af8fb75
|
Support mm:ss.
|
since.py
|
since.py
|
Python
| 0
|
@@ -0,0 +1,602 @@
+#!/usr/bin/env python3%0A%0Aimport datetime%0Aimport re%0Aimport sys%0A%0A%0Adef main(strTime):%0A now = datetime.datetime.now()%0A pattern = r'(%5Cd%5Cd):(%5Cd%5Cd)'%0A match = re.match(pattern, strTime)%0A time = datetime.datetime(%0A now.year,%0A now.month,%0A now.day,%0A int(match.group(1)),%0A int(match.group(2)))%0A diff = now - time%0A if diff.total_seconds() %3C 0:%0A return now - time + datetime.timedelta(1)%0A return diff%0A%0Aif __name__ == '__main__':%0A if len(sys.argv) %3C 2:%0A print('Usage %7B0%7D %7B%7Btime%7D%7D'.format(sys.argv%5B0%5D))%0A else:%0A print(main(sys.argv%5B1%5D))%0A
|
|
6a364f620f73a3bff2b2cc11113778d75202f37f
|
fix bug with putting the unknown users doc to ES
|
corehq/pillows/user.py
|
corehq/pillows/user.py
|
from corehq.apps.groups.models import Group
from corehq.apps.users.models import CommCareUser, CouchUser
from corehq.apps.users.util import WEIRD_USER_IDS
from corehq.elastic import es_query, ES_URLS, stream_es_query, get_es
from corehq.pillows.mappings.user_mapping import USER_MAPPING, USER_INDEX
from couchforms.models import XFormInstance
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow, BulkPillow
from django.conf import settings
class UserPillow(AliasedElasticPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = CommCareUser # while this index includes all users,
# I assume we don't care about querying on properties specific to WebUsers
couch_filter = "users/all_users"
es_host = settings.ELASTICSEARCH_HOST
es_port = settings.ELASTICSEARCH_PORT
es_timeout = 60
es_index_prefix = "hqusers"
es_alias = "hqusers"
es_type = "user"
es_meta = {
"settings": {
"analysis": {
"analyzer": {
"default": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase"]
},
}
}
}
}
es_index = USER_INDEX
default_mapping = USER_MAPPING
@memoized
def calc_meta(self):
#todo: actually do this correctly
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determined md5
"""
return self.calc_mapping_hash({"es_meta": self.es_meta,
"mapping": self.default_mapping})
def get_mapping_from_type(self, doc_dict):
"""
Define mapping uniquely to the user_type document.
See below on why date_detection is False
NOTE: DO NOT MODIFY THIS UNLESS ABSOLUTELY NECESSARY. A CHANGE BELOW WILL GENERATE A NEW
HASH FOR THE INDEX NAME REQUIRING A REINDEX+RE-ALIAS. THIS IS A SERIOUSLY RESOURCE
INTENSIVE OPERATION THAT REQUIRES SOME CAREFUL LOGISTICS TO MIGRATE
"""
#the meta here is defined for when the case index + type is created for the FIRST time
#subsequent data added to it will be added automatically, but date_detection is necessary
# to be false to prevent indexes from not being created due to the way we store dates
#all are strings EXCEPT the core case properties which we need to explicitly define below.
#that way date sort and ranges will work with canonical date formats for queries.
return {
self.get_type_string(doc_dict): self.default_mapping
}
def get_type_string(self, doc_dict):
return self.es_type
class GroupToUserPillow(BulkPillow):
couch_filter = "groups/all_groups"
document_class = CommCareUser
def __init__(self, **kwargs):
super(GroupToUserPillow, self).__init__(**kwargs)
self.couch_db = Group.get_db()
def change_trigger(self, changes_dict):
es = get_es()
user_ids = changes_dict["doc"].get("users", [])
q = {"filter": {"and": [{"terms": {"_id": user_ids}}]}}
for user_source in stream_es_query(es_url=ES_URLS["users"], q=q, fields=["__group_ids", "__group_names"]):
group_ids = set(user_source.get('fields', {}).get("__group_ids", []))
group_names = set(user_source.get('fields', {}).get("__group_names", []))
if changes_dict["doc"]["name"] not in group_names or changes_dict["doc"]["_id"] not in group_ids:
group_ids.add(changes_dict["doc"]["_id"])
group_names.add(changes_dict["doc"]["name"])
doc = {"__group_ids": list(group_ids), "__group_names": list(group_names)}
es.post("%s/user/%s/_update" % (USER_INDEX, user_source["_id"]), data={"doc": doc})
def change_transport(self, doc_dict):
pass
def send_bulk(self, payload):
pass
class UnknownUsersPillow(BulkPillow):
"""
This pillow adds users from xform submissions that come in to the User Index if they don't exist in HQ
"""
document_class = XFormInstance
couch_filter = "couchforms/xforms"
include_docs = False
def __init__(self, **kwargs):
super(UnknownUsersPillow, self).__init__(**kwargs)
self.couch_db = XFormInstance.get_db()
self.user_db = CouchUser.get_db()
def get_fields(self, changes_or_emitted_dict):
if "doc" in changes_or_emitted_dict:
form_meta = changes_or_emitted_dict["doc"].get("form", {}).get("meta", {})
user_id, username = form_meta.get("userID"), form_meta.get("username")
domain = changes_or_emitted_dict["doc"].get("domain")
xform_id = changes_or_emitted_dict["doc"].get("_id")
else:
emitted = changes_or_emitted_dict["value"]
user_id, username, domain = emitted["user_id"], emitted["username"], emitted["domain"]
xform_id = changes_or_emitted_dict["id"]
user_id = None if user_id in WEIRD_USER_IDS else user_id
return user_id, username, domain, xform_id
def change_trigger(self, changes_dict):
user_id, username, domain, xform_id = self.get_fields(changes_dict)
es = get_es()
es_path = USER_INDEX + "/user/"
if user_id and not self.user_db.doc_exist(user_id) and not es.head(es_path + user_id):
print "adding unknown user: %s" % user_id
doc = {
"_id": user_id,
"domain": domain,
"username": username,
"first_form_found_in": xform_id,
"doc_type": "UnknownUser",
}
if domain:
doc["domain_membership"] = {"domain": domain}
es.put(es_path + user_id, data={"doc": doc})
def change_transport(self, doc_dict):
pass
def send_bulk(self, payload):
pass
|
Python
| 0
|
@@ -5579,62 +5579,8 @@
d):%0A
- print %22adding unknown user: %25s%22 %25 user_id%0A
@@ -5929,36 +5929,27 @@
er_id, data=
-%7B%22
doc
-%22: doc%7D
)%0A%0A def c
|
38a5b5a74ec68027b30560c5a8c1087e5b49d5e6
|
criada query tira_lote para deendereçar lote
|
src/cd/queries/lote.py
|
src/cd/queries/lote.py
|
Python
| 0.999826
|
@@ -0,0 +1,319 @@
+from pprint import pprint%0A%0Afrom utils.functions.queries import debug_cursor_execute%0A%0A%0Adef tira_lote(cursor, lote):%0A sql = f%22%22%22%0A DELETE FROM SYSTEXTIL.ENDR_014%0A WHERE ORDEM_CONFECCAO = '%7Blote%7D'%0A %22%22%22%0A try:%0A debug_cursor_execute(cursor, sql)%0A except Exception as e:%0A return repr(e)%0A
|
|
1d25676049994db266129b1a1c98cec3acbba0ca
|
Add missing file on last merge
|
goodtablesio/models/subscription.py
|
goodtablesio/models/subscription.py
|
Python
| 0.000001
|
@@ -0,0 +1,813 @@
+import logging%0Aimport datetime%0A%0Afrom sqlalchemy import (%0A Column, Unicode, DateTime, Boolean, ForeignKey)%0Afrom sqlalchemy.orm import relationship%0A%0Afrom goodtablesio.models.base import Base, BaseModelMixin, make_uuid%0A%0A%0Alog = logging.getLogger(__name__)%0A%0A%0Aclass Subscription(Base, BaseModelMixin):%0A%0A __tablename__ = 'subscriptions'%0A%0A id = Column(Unicode, primary_key=True, default=make_uuid)%0A plan_id = Column(Unicode, ForeignKey('plans.id'))%0A user_id = Column(Unicode, ForeignKey('users.id'))%0A%0A active = Column(Boolean, default=True)%0A started = Column(DateTime(timezone=True), default=datetime.datetime.utcnow)%0A expires = Column(DateTime(timezone=True))%0A finished = Column(DateTime(timezone=True))%0A%0A plan = relationship(%0A 'Plan', primaryjoin='Subscription.plan_id == Plan.id')%0A
|
|
9b0278530c2c4f32dd2a751fb4f8b93c8c34a3ea
|
add arch tool for waf backend.
|
bento/backends/waf_tools/arch.py
|
bento/backends/waf_tools/arch.py
|
Python
| 0
|
@@ -0,0 +1,2364 @@
+import re%0A%0Afrom waflib.Tools.c_config import SNIP_EMPTY_PROGRAM%0Afrom waflib.Configure import conf%0A%0AARCHS = %5B%22i386%22, %22x86_64%22, %22ppc%22, %22ppc64%22%5D%0A%0AFILE_MACHO_RE = re.compile(%22Mach-O.*object (%5Ba-zA-Z_0-9%5D+)%22)%0A%0A@conf%0Adef check_cc_arch(conf):%0A env = conf.env%0A archs = %5B%5D%0A%0A for arch in ARCHS:%0A env.stash()%0A try:%0A env.append_value('CFLAGS', %5B'-arch', arch%5D)%0A env.append_value('LINKFLAGS', %5B'-arch', arch%5D)%0A try:%0A conf.check_cc(fragment=SNIP_EMPTY_PROGRAM, msg=%22Checking for %25r suport%22 %25 arch)%0A archs.append(arch)%0A except conf.errors.ConfigurationError:%0A pass%0A finally:%0A env.revert()%0A%0A env%5B%22ARCH_CC%22%5D = archs%0A%0A#def detect_arch(filename):%0A%0A@conf%0Adef check_cc_default_arch(conf):%0A start_msg = %22Checking for default CC arch%22%0A fragment = SNIP_EMPTY_PROGRAM%0A output_var = %22DEFAULT_CC_ARCH%22%0A%0A return _check_default_arch(conf, start_msg, fragment, output_var)%0A%0A@conf%0Adef check_cxx_default_arch(conf):%0A start_msg = %22Checking for default CXX arch%22%0A fragment = SNIP_EMPTY_PROGRAM%0A output_var = %22DEFAULT_CXX_ARCH%22%0A%0A return _check_default_arch(conf, start_msg, fragment, output_var)%0A%0A@conf%0Adef check_fc_default_arch(conf):%0A start_msg = %22Checking for default FC arch%22%0A fragment = %22%22%22%5C%0A program main%0A end%0A%22%22%22%0A output_var = %22DEFAULT_FC_ARCH%22%0A compile_filename = 'test.f'%0A features = %22fc fcprogram%22%0A%0A return _check_default_arch(conf, start_msg, fragment, output_var, compile_filename, features)%0A%0A@conf%0Adef _check_default_arch(conf, start_msg, fragment, output_var, compile_filename=%22test.c%22, features=%22c cprogram%22):%0A env = conf.env%0A%0A if not %22FILE_BIN%22 in conf.env:%0A file_bin = conf.find_program(%5B%22file%22%5D, var=%22FILE_BIN%22)%0A else:%0A file_bin = conf.env.FILE_BIN%0A%0A conf.start_msg(start_msg)%0A ret = conf.check_cc(fragment=fragment, compile_filename=compile_filename, features=features)%0A task_gen = conf.test_bld.groups%5B0%5D%5B0%5D%0A obj_filename = task_gen.tasks%5B0%5D.outputs%5B0%5D.abspath()%0A out = conf.cmd_and_log(%5Bfile_bin, obj_filename%5D)%0A m = FILE_MACHO_RE.search(out)%0A if m is None:%0A conf.fatal(%22Could not determine arch from output %25r%22 %25 out)%0A else:%0A default_arch = m.group(1)%0A conf.env%5Boutput_var%5D = default_arch%0A conf.end_msg(default_arch)%0A
|
|
856f855e10588ddbe2ad5053cc5d7366c76459a8
|
Implement basic perception
|
percept/perceptron.py
|
percept/perceptron.py
|
Python
| 0.000149
|
@@ -0,0 +1,1306 @@
+import random%0A%0A%0Adef rand_w():%0A '''%0A Generate a random weight.%0A '''%0A return round(random.uniform(-1, 1), 3)%0A%0A%0Aclass Perceptron:%0A def __init__(%0A self, w0=rand_w(), w1=rand_w(), w2=rand_w(), learning_rate=0.1):%0A self.w0, self.w1, self.w2 = w0, w1, w2%0A self.learning_rate = learning_rate%0A%0A def train(self, y, x1, x2):%0A y_hat = self.predict(x1, x2)%0A%0A if y_hat %3C y:%0A self.w0 += self.learning_rate%0A self.w1 += self.learning_rate * x1%0A self.w2 += self.learning_rate * x2%0A if y_hat %3E y:%0A self.w0 -= self.learning_rate%0A self.w1 -= self.learning_rate * x1%0A self.w2 -= self.learning_rate * x2%0A self.round_weights()%0A%0A def evaluate(self, x1, x2):%0A return self.w0 + self.w1 * x1 + self.w2 * x2%0A%0A def activate(self, y):%0A return int(y %3E= 0)%0A%0A def predict(self, x1, x2):%0A return self.activate(self.evaluate(x1, x2))%0A%0A def round_weights(self, dp=3):%0A self.w0 = round(self.w0, dp)%0A self.w1 = round(self.w1, dp)%0A self.w2 = round(self.w2, dp)%0A%0A def get_weights(self):%0A return (self.w0, self.w1, self.w2)%0A%0A def get_plot_fn(self):%0A def fn(x):%0A return -self.w1 / self.w2 * x - self.w0 / self.w2%0A return fn%0A
|
|
e5f82b794ee2e6054deb15433c7dc7261146f181
|
Add merge migration
|
osf/migrations/0112_merge_20180614_1454.py
|
osf/migrations/0112_merge_20180614_1454.py
|
Python
| 0.000001
|
@@ -0,0 +1,332 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.13 on 2018-06-14 19:54%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('osf', '0107_merge_20180604_1232'),%0A ('osf', '0111_auto_20180605_1240'),%0A %5D%0A%0A operations = %5B%0A %5D%0A
|
|
5f051f2ae1b105d6cc58d1cac760cb5d20908c3b
|
Support rudimentary translation service from IIT Bombay via web API.
|
valai/translate.py
|
valai/translate.py
|
Python
| 0.000607
|
@@ -0,0 +1,880 @@
+# * coding: utf8 *%0A#%0A# (C) 2020 Muthiah Annamalai %3Cezhillang@gmail.com%3E%0A#%0A# Uses the IIT-Bombay service on the web.%0A#%0A%0Aimport json%0Aimport requests%0Afrom urllib.parse import quote%0Afrom functools import lru_cache%0A%0A%0A@lru_cache(1024,str)%0Adef en2ta(text):%0A %22%22%22translate from English to Tamil%22%22%22%0A return IITB_translator('en', 'ta', text)%0A%0A@lru_cache(1024,str)%0Adef ta2en(text):%0A %22%22%22translate from Tamil to English%22%22%22%0A return IITB_translator('ta','en',text)%0A%0Adef IITB_translator(src_lang,dest_lang,_text):%0A text = quote(_text)%0A URLFMT = 'http://www.cfilt.iitb.ac.in/indicnlpweb/indicnlpws/translate/%7B0%7D/%7B1%7D/%7B2%7D/'%0A url = URLFMT.format(src_lang.lower(),dest_lang.lower(),text)%0A response = requests.get(url)%0A return response.json()%5Bdest_lang.lower()%5D%0A%0Aif __name__ == %22__main__%22:%0A print(ta2en('%E0%AE%95%E0%AE%B5%E0%AE%BF%E0%AE%A4%E0%AF%88 %E0%AE%AE%E0%AE%BF%E0%AE%95 %E0%AE%85%E0%AE%B4%E0%AE%95%E0%AE%BE%E0%AE%95 %E0%AE%87%E0%AE%B0%E0%AF%81%E0%AE%95%E0%AF%8D%E0%AE%95%E0%AE%BF%E0%AE%B1%E0%AE%A4%E0%AF%81'))%0A print(en2ta('world is not flat'))%0A
|
|
25056e74093f01d68af14277da6089903b617ee6
|
Create Career.py
|
Career.py
|
Career.py
|
Python
| 0
|
@@ -0,0 +1,379 @@
+class Career:%0A def __init__(career_name, advances, skills_to_take, talents_to_take, career_trappings, race_dependent)%0A self.career_name = career_name%0A self.advances = advances%0A self.skills_to_take = skills_to_take%0A self.talents_to_take = talents_to_take%0A self.career_trappings = career_trappings%0A self.race_dependent = race_dependent%0A
|
|
64ab32daba1ddbe7e8b56850188dab3f8ca42286
|
Add TCP check
|
sauna/plugins/ext/tcp.py
|
sauna/plugins/ext/tcp.py
|
Python
| 0.000001
|
@@ -0,0 +1,821 @@
+import socket%0A%0Afrom sauna.plugins import (Plugin, PluginRegister)%0A%0Amy_plugin = PluginRegister('TCP')%0A%0A%0A@my_plugin.plugin()%0Aclass Tcp(Plugin):%0A%0A @my_plugin.check()%0A def request(self, check_config):%0A try:%0A with socket.create_connection((check_config%5B'host'%5D,%0A check_config%5B'port'%5D),%0A timeout=check_config%5B'timeout'%5D):%0A pass%0A except Exception as e:%0A return Plugin.STATUS_CRIT, %22%7B%7D%22.format(e)%0A else:%0A return Plugin.STATUS_OK, %22OK%22%0A%0A @staticmethod%0A def config_sample():%0A return '''%0A # Tcp%0A - type: TCP%0A checks:%0A - type: request%0A host: localhost%0A port: 11211%0A timeout: 5%0A '''%0A
|
|
d95ce2570989e1b18c313efb1f95f611a9a2cc80
|
add color_histogram_matcher for objects
|
jsk_2015_05_baxter_apc/node_scripts/color_histogram_matcher.py
|
jsk_2015_05_baxter_apc/node_scripts/color_histogram_matcher.py
|
Python
| 0.000009
|
@@ -0,0 +1,2986 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A#%0Afrom __future__ import division%0Aimport rospy%0Aimport cv2%0Aimport numpy as np%0A%0Afrom sensor_msgs.msg import Image%0Afrom jsk_2014_picking_challenge.srv import ObjectMatch, ObjectMatchResponse%0Afrom jsk_recognition_msgs.msg import ColorHistogram%0A%0Aquery_features = None%0Atarget_features = None%0A%0Aclass ColorHistogramMatcher(object):%0A def __init__(self):%0A self.query_histogram = %7B%7D%0A self.target_histograms = None%0A%0A rospy.Service('/semi/color_histogram_matcher', ObjectMatch,%0A self.handle_colorhist_matcher)%0A # input is color_histograms extracted by camera_image%0A rospy.Subscriber('~input/histogram/red', ColorHistogram,%0A self.cb_histogram_red)%0A rospy.Subscriber('~input/histogram/green', ColorHistogram,%0A self.cb_histogram_green)%0A rospy.Subscriber('~input/histogram/blue', ColorHistogram,%0A self.cb_histogram_blue)%0A%0A def handle_colorhist_matcher(self, req):%0A %22%22%22Handler of service request%22%22%22%0A self.load_target_histograms(req.objects)%0A return ObjectMatchResponse(probabilities=self.get_probabilities())%0A%0A def load_target_histograms(self):%0A %22%22%22Load extracted color histogram features of objects%22%22%22%0A rospy.loginfo('Loading object color histogram features')%0A # self.target_histograms = ...%0A raise NotImplementedError%0A%0A def coefficient(query_hist, target_hist, method=0):%0A %22%22%22Compute coefficient of 2 histograms with several methods%22%22%22%0A if method == 0:%0A return (1. + cv2.compareHist(query_hist, target_hist,%0A cv2.cv.CV_COMP_CORREL)) / 2.;%0A%0A def get_probabilities(self):%0A %22%22%22Get probabilities of color matching%22%22%22%0A query_histogram = self.query_histogram%0A target_histograms = self.target_histograms%0A obj_coefs = %5B%5D%0A for obj_name, target_histgram in target_histograms.iteritems():%0A # loop for RGB color &%0A # compute max coefficient about each histograms%0A coefs = %5B%5D%0A for q_hist, t_hist in zip(%0A query_histogram.values(), target_histogram.values()):%0A coefs.append(coefficient(q_hist, t_hist))%0A obj_coefs.append(max(coefs))%0A obj_coefs = np.array(obj_coefs)%0A # change coefficient array to probability array%0A if obj_coefs.sum() == 0:%0A return obj_coefs%0A else:%0A return obj_coefs / obj_coefs.sum()%0A%0A def cb_histogram_red(self, msg):%0A %22%22%22Get input red histogram%22%22%22%0A self.query_histogram%5B'red'%5D = msg.histogram%0A%0A def cb_histogram_green(self, msg):%0A %22%22%22Get input green histogram%22%22%22%0A self.query_histogram%5B'green'%5D = msg.histogram%0A%0A def cb_histogram_blue(self, msg):%0A %22%22%22Get input blue histogram%22%22%22%0A self.query_histogram%5B'blue'%5D = msg.histogram%0A%0A%0Adef main():%0A m = ColorHistogramMatcher()%0A rospy.spin()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A%0A
|
|
74197adab35815bc1168f661d6f5cf5c829afc99
|
Add example
|
example/serialize.py
|
example/serialize.py
|
Python
| 0.000003
|
@@ -0,0 +1,306 @@
+from pykt import KyotoTycoon, set_serializer, set_deserializer%0Afrom cPickle import dumps, loads%0A%0Aset_serializer(dumps)%0Aset_deserializer(loads)%0A%0Akey = %22A%22 * 12%0Aval = %22B%22 * 1024 %0A%0Ad = dict(name=%22John%22, no=1)%0A%0Adb = KyotoTycoon()%0Adb.open()%0Aprint db.set(key, d)%0Aret = db.get(key)%0Aassert(d == ret)%0Adb.close()%0A%0A%0A
|
|
9c0a74194e6546eac6dbaec000599a623d525909
|
Create drivers.py
|
chips/digital/pca9698/drivers.py
|
chips/digital/pca9698/drivers.py
|
Python
| 0.000001
|
@@ -0,0 +1,37 @@
+%0A%0ADRIVERS%5B%22pca9698%22 %5D = %5B%22PCA9698%22%5D%0A%0A
|
|
d6492629e3c837374082cac71034a7bad36291bc
|
Test of commit
|
Parser.py
|
Parser.py
|
Python
| 0
|
@@ -0,0 +1,34 @@
+if __name__ == '__main__':%0A main()
|
|
bef69c38103e8ef937fea41a0a58c934b34f4281
|
add yaml syntax checker script
|
bosi/rhosp_resources/yamls/yaml_syntax_check.py
|
bosi/rhosp_resources/yamls/yaml_syntax_check.py
|
Python
| 0.000003
|
@@ -0,0 +1,1411 @@
+#!/usr/bin/env python%0A%0Aimport os%0Aimport sys%0Aimport yaml%0A%0AEXIT_ERROR = -1%0AYAML_FILE_EXT = %22.yaml%22%0A%0A%0Adef help():%0A %22%22%22 Print how to use the script %22%22%22%0A print %22Usage: %25s %3Cdirectory%3E%22 %25 sys.argv%5B0%5D%0A%0A%0Adef check_yaml_syntax(f):%0A %22%22%22 Check the syntax of the given YAML file.%0A return: True if valid, False otherwise%0A %22%22%22%0A with open(f, 'r') as stream:%0A try:%0A yaml.load(stream)%0A except yaml.YAMLError as exc:%0A print %22%25s: Invalid YAML syntax.%5Cn%25s%5Cn%22 %25 (f, exc)%0A return False%0A return True%0A%0A%0Adef main():%0A %22%22%22 Find all YAML files in the input directory and validate their syntax%0A %22%22%22%0A if len(sys.argv) %3C 2:%0A help()%0A sys.exit(EXIT_ERROR)%0A%0A yaml_dir = sys.argv%5B1%5D%0A if not os.path.isdir(yaml_dir):%0A print %22ERROR: Invalid directory %25s%22 %25 yaml_dir%0A sys.exit(EXIT_ERROR)%0A%0A all_valid = True%0A for root, dirs, files in os.walk(yaml_dir):%0A for f in files:%0A if YAML_FILE_EXT in f:%0A fname = root + %22/%22 + f%0A valid = check_yaml_syntax(fname)%0A if valid:%0A print %22%25s: Valid YAML syntax%22 %25 fname%0A else:%0A all_valid = False%0A break%0A%0A if all_valid:%0A print %22All files have valid YAML syntax%22%0A else:%0A print %22Some files have invalid YAML syntax%22%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A%0A
|
|
f09c45cde66dd8da07511e1105af14ffd41799b0
|
add a command to trigger a bulk sync
|
crate_project/apps/crate/management/commands/trigger_bulk_sync.py
|
crate_project/apps/crate/management/commands/trigger_bulk_sync.py
|
Python
| 0.000001
|
@@ -0,0 +1,240 @@
+from django.core.management.base import BaseCommand%0A%0Afrom pypi.tasks import bulk_synchronize%0A%0A%0Aclass Command(BaseCommand):%0A%0A def handle(self, *args, **options):%0A bulk_synchronize.delay()%0A print %22Bulk Synchronize Triggered%22%0A
|
|
27ed68923579c5afff0c70b025deb8b73d448aa8
|
Set calculation type of all indicators to Number
|
indicators/migrations/0013_set_all_calculation_type_to_numeric.py
|
indicators/migrations/0013_set_all_calculation_type_to_numeric.py
|
Python
| 0.000003
|
@@ -0,0 +1,515 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.3 on 2018-07-04 09:56%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Afrom ..models import Indicator%0A%0A%0Adef set_calculation_type(apps, schema_editor):%0A Indicator.objects.all().update(%0A calculation_type=Indicator.CALC_TYPE_NUMERIC)%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('indicators', '0012_auto_20180704_0256'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(set_calculation_type),%0A %5D%0A
|
|
834516acf7b5cfbbb0f728f8b725bea120b5f5b3
|
Add python version of the post-receive hook
|
post_receive.py
|
post_receive.py
|
Python
| 0.000001
|
@@ -0,0 +1,1303 @@
+import re%0Aimport os%0Aimport sys%0Aimport os%0Aimport json%0Afrom subprocess import Popen, PIPE%0Afrom httplib2 import Http%0A%0ApostURL = %22http://localhost:2069/json%22%0A%0Apwd = os.getcwd()%0Aif len(sys.argv) %3C= 3:%0A print(%22Usage: post-receive %5Bold%5D %5Bnew%5D %5Bref%5D%22)%0A exit()%0A%0Aold, new, ref = sys.argv%5B1:4%5D%0Am = re.match(r%22%5E.*/(%5B%5E/%5D+)$%22, pwd)%0Aif not m:%0A print(%22Could not figure out which project this is :(%22, project)%0A exit()%0A%0Aproject = m.group(1)%0Aprint(%22Posting commit message for project %22 + project)%0A%0Aprocess = Popen(%5B%22git%22, %22show%22, %22--name-only%22, new%5D, stdout=PIPE)%0A#process = Popen(%5B%22ls%22, %22-la%22%5D, stdout=PIPE)%0Aexit_code = os.waitpid(process.pid, 0)%0Aoutput = process.communicate()%5B0%5D%0A%0Aoutput = %22%22%22%0AAuthor: Humbedooh %3Chumbedooh@apache.org%3E%0AStuffs: Mooo%0A%0ALog message goes here%0A%22%22%22%0A%0Acommit = %7B'ref': ref, 'repository': %22git%22, 'hash': new, 'project': project%7D%0A%0Aheaders, commit%5B'log'%5D = output.split(%22%5Cn%5Cn%22, 2)%0A%0Aparsed = dict(re.findall(r%22(?P%3Cname%3E%5B%5E:%5Cn%5D+): (?P%3Cvalue%3E%5B%5E%5Cr%5Cn%5D+)%22, headers))%0A%0Aauthor = re.match(r%22%5E(.+) %3C(.+)%3E$%22, parsed.get(%22Author%22, %22?? %3C??@??%3E%22))%0Aif author:%0A commit%5B'author'%5D = author.group(1)%0A commit%5B'email'%5D = author.group(2)%0Aelse:%0A commit%5B'author'%5D = %22Unknown%22%0A commit%5B'email'%5D = %22unknown@unknown%22%0A%0A%0Adata = json.dumps(commit) + %22%5Cn%5Cn%22%0Aprint(data)%0AHttp().request(postURL, %22PUT%22, data)%0A%0A
|
|
6e28da4e1a1d8ad794f12d9782b0e2dd54119dc4
|
add mysql module
|
db_mysql_module.py
|
db_mysql_module.py
|
Python
| 0.000001
|
@@ -0,0 +1,2471 @@
+__author__ = 'root'%0Aimport pymysql;%0Aimport sqlalchemy;%0Aimport threading;%0Afrom time import clock;%0Aclass SQLiteWraper(object):%0A def __init__(self):%0A # self.lock = threading.RLock()%0A self.engine = sqlalchemy.create_engine('mysql+pymysql://developer:developer@172.28.217.66/xixiche?charset=utf8')%0A%0A def get_conn(self):%0A conn = self.engine.connect();%0A return conn%0A%0A def conn_close(self,conn=None):%0A conn.close()%0A%0A def time_counter(func):%0A def count_second(self,*args,**kwargs):%0A start=clock()%0A rs = func(self,*args,**kwargs)%0A finish=clock()%0A print(%22%25.2f%22 %25 (finish-start))%0A return rs%0A return count_second%0A%0A def conn_trans(func):%0A def connection(self,*args,**kwargs):%0A # self.lock.acquire()%0A conn = self.get_conn()%0A kwargs%5B'conn'%5D = conn%0A rs = func(self,*args,**kwargs)%0A self.conn_close(conn)%0A # self.lock.release()%0A return rs%0A return connection%0A%0A @time_counter%0A @conn_trans%0A def batch(self,sqllist,conn=None):%0A trans = conn.begin()%0A try:%0A for sql in sqllist:%0A print(%22executing ...%22+sql)%0A conn.execute(sql)%0A trans.commit()%0A except pymysql.IntegrityError as e:%0A #print e%0A return -1%0A except Exception as e:%0A print (e)%0A return -2%0A return 0%0A%0A @conn_trans%0A def execute(self,sql,conn=None):%0A trans = conn.begin()%0A try:%0A result = conn.execute(sql)%0A trans.commit()%0A except pymysql.IntegrityError as e:%0A #print e%0A return -1%0A except Exception as e:%0A print (e)%0A return -2%0A return result%0A%0A @time_counter%0A def sqrt(self,a, eps=1e-10):%0A if a == 0.0 or a == 1.0:%0A return a%0A x = 1.0%0A y = x - (x*x-a)/(2*x)%0A while not (-eps %3C y-x %3C eps):%0A x = y%0A y = x - (x*x-a)/(2*x)%0A return x%0Aif __name__=='__main__':%0A db = SQLiteWraper();%0A # data_merchant = db.execute(%22select * from data_merchant%22)%0A # for row in data_merchant:%0A # print(row.items())%0A print(db.sqrt(100))%0A testsql = %5B%5D;%0A for i in range(1,1):%0A testsql.append(%22insert into test (name,test_bigint) values ('hehe','%22+str(i)+%22')%22)%0A print(%22sqllist prepared%22)%0A db.batch(testsql)%0A
|
|
11d0d641adf32a7e976bf9df8c4dc9ba19bba3b4
|
Binary graph algorithms to find height of binary tree and to check whether the given binary tree is full binary or not
|
binary_tree/basic_binary_tree.py
|
binary_tree/basic_binary_tree.py
|
Python
| 0.996305
|
@@ -0,0 +1,1130 @@
+class Node:%0A def __init__(self, data):%0A self.data = data%0A self.left = None%0A self.right = None%0A%0A%0Adef depth_of_tree(tree):%0A if tree is None:%0A return 0%0A else:%0A depth_l_tree = depth_of_tree(tree.left)%0A depth_r_tree = depth_of_tree(tree.right)%0A if depth_l_tree %3E depth_r_tree:%0A return 1 + depth_l_tree%0A else:%0A return 1 + depth_r_tree%0A%0A%0Adef is_full_binary_tree(tree):%0A if tree is None:%0A return True%0A if (tree.left is None) and (tree.right is None):%0A return True%0A if (tree.left is not None) and (tree.right is not None):%0A return (is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right))%0A else:%0A return False%0A%0A%0Adef main():%0A tree = Node(1)%0A tree.left = Node(2)%0A tree.right = Node(3)%0A tree.left.left = Node(4)%0A tree.left.right = Node(5)%0A tree.left.right.left = Node(6)%0A tree.right.left = Node(7)%0A tree.right.left.left = Node(8)%0A tree.right.left.left.right = Node(9)%0A%0A print(is_full_binary_tree(tree))%0A print(depth_of_tree(tree))%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
35748678aaea24355d5207ae26d10dd455a47820
|
implement HostTestsSuite
|
src/test/hosttestssuite.py
|
src/test/hosttestssuite.py
|
Python
| 0
|
@@ -0,0 +1,1144 @@
+%0Afrom src.test.abstractovirttestssuite import AbstractOvirtTestsSuite%0Afrom ovirtsdk.xml import params%0Afrom src.infrastructure.annotations import conflicts%0Afrom src.resource.hostresourcemanager import HostResourceManager%0A%0A%0Aclass HostTestsSuite(AbstractOvirtTestsSuite):%0A%0A __hostResourceManager = HostResourceManager()%0A%0A def getHostResourceManager(self):%0A return HostTestsSuite.__hostResourceManager%0A%0A####### pre/post test run #############%0A%0A def setUp(self):%0A pass%0A%0A def tearDown(self):%0A pass%0A%0A######## pre/post class run #############%0A%0A @classmethod%0A def setUpClass(cls):%0A pass%0A%0A @classmethod%0A def tearDownClass(cls):%0A pass%0A%0A# ############### test/s ###############%0A%0A%0A @conflicts.resources(%5Bparams.Host%5D)%0A def testCreate(self):%0A%0A # verify add() response%0A new_host = self.getHostResourceManager().add()%0A self.assertNotEqual(new_host, None, 'Host create has failed!')%0A%0A # verify get of newly created cluster%0A host = self.getHostResourceManager().get(get_only=True)%0A self.assertNotEqual(host, None, 'Fetch of host post create has failed!')%0A
|
|
cef6f559f20d8aace00cbed8621b16339aa6e0c6
|
hello world, first problem in python
|
problems/1/1.py
|
problems/1/1.py
|
Python
| 0.999367
|
@@ -0,0 +1,888 @@
+# coding: utf-8%0A%0A%22%22%22%0ATo run:%0A python2.7 1.py%0A%0AProblem:%0A If we list all the natural numbers below 10 that are multiples of 3 or 5,%0A we get 3, 5, 6 and 9. The sum of these multiples is 23.%0A%0A Find the sum of all the multiples of 3 or 5 below 1000.%0A%22%22%22%0A%0Aimport time%0A%0Adef oneliner():%0A return sum(%0A i for i in range(1000)%0A if i %25 3 == 0 or i %25 5 == 0%0A )%0A%0A%0Adef impl1():%0A numbers = %5B%5D%0A for i in range(1000):%0A if i %25 3 == 0 or i %25 5 == 0:%0A numbers.append(i)%0A%0A return sum(numbers)%0A%0Adef impl2():%0A j = 0%0A for i in range(1000):%0A if i %25 3 == 0 or i %25 5 == 0:%0A j += i%0A%0A return j%0A%0Aif __name__ == %22__main__%22:%0A%0A def timeit(function):%0A t1 = time.time()%0A output = function()%0A t2 = time.time()%0A return output, t2-t1%0A%0A print timeit(impl1)%0A print timeit(impl2)%0A print timeit(oneliner)%0A
|
|
264f4a827e39d55259aaa53bde967dae6befc606
|
Complete Programming Experience: polysum
|
pset2/grader.py
|
pset2/grader.py
|
Python
| 0
|
@@ -0,0 +1,770 @@
+# Grader%0A# 10.0 points possible (ungraded)%0A# A regular polygon has n number of sides. Each side has length s.%0A%0A# The area of a regular polygon is: 0.25%E2%88%97n%E2%88%97s2tan(%CF%80/n)%0A# The perimeter of a polygon is: length of the boundary of the polygon%0A# Write a function called polysum that takes 2 arguments, n and s. This function should sum the area and square of the perimeter of the regular polygon. The function returns the sum, rounded to 4 decimal places.%0A%0Afrom math import tan, pi%0A%0Adef polysum(n, s):%0A %22%22%22 calculate the sum of the perimeter and area of the polygon %22%22%22%0A perimeter_squared = (n * s)**2%0A%0A fraction_top = (.25 * n) * (s**2)%0A fraction_bottom = tan(pi/n)%0A%0A area = fraction_top/fraction_bottom%0A%0A return round(perimeter_squared + area, 4)%0A%0Aprint(polysum(52, 78))
|
|
e8170b2f446f23771bd746747493bebbd0dc9288
|
add velocity filter
|
nodes/velocity_filter.py
|
nodes/velocity_filter.py
|
Python
| 0.000001
|
@@ -0,0 +1,2807 @@
+#! /usr/bin/env python%0A%0Aimport rospy%0Aimport roslib%0Aroslib.load_manifest(%22otl_diff_drive%22)%0A%0Afrom otl_diff_drive import twist_velocities%0A%0Afrom geometry_msgs.msg import Twist%0A%0Adef isStopVelocity(twist):%0A VERY_SMALL = 0.0001%0A return abs(twist.linear.x) %3C VERY_SMALL and abs(twist.angular.z) %3C VERY_SMALL%0A%0Aclass VelocityFilter:%0A def __init__(self):%0A self._max_linear_velocity = rospy.get_param(%22~max_translational_velocity%22, 1.0)%0A self._max_angular_velocity = rospy.get_param(%22~max_rotational_velocity%22, 3.0)%0A self._velocity_filter = twist_velocities.VelocityFilter(self._max_linear_velocity, self._max_angular_velocity)%0A%0A self._max_linear_accel = rospy.get_param(%22~max_translational_acceleration%22, 1.0)%0A self._max_angular_accel = rospy.get_param(%22~max_rotational_acceleration%22, 3.0)%0A self._accel_filter = twist_velocities.AccelFilter(self._max_linear_accel, self._max_angular_accel)%0A%0A self._output_pub = rospy.Publisher('/output_vel', Twist)%0A self._command_sub = rospy.Subscriber(%22/cmd_vel%22, Twist, self.on_twist_command)%0A self._current_velocity = None%0A self._output_velocity = Twist()%0A self._last_command_stamp = None%0A%0A def on_twist_command(self, command):%0A self._current_velocity = command%0A self.publish()%0A self._last_command_stamp = rospy.Time.now()%0A%0A def get_elapsed_sec(self):%0A return (rospy.Time.now() - self._last_command_stamp).to_sec() %0A%0A def publish(self):%0A if self._current_velocity:%0A limited_x, limited_theta = self._velocity_filter.filter(self._current_velocity.linear.x, self._current_velocity.angular.z)%0A if self._last_command_stamp:%0A duration = self.get_elapsed_sec()%0A else:%0A duration = 0.1%0A if duration %3E 0.1:%0A duration = 0.1%0A x, theta = self._accel_filter.filter(limited_x, limited_theta, duration)%0A self._output_velocity.linear.x = x%0A self._output_velocity.angular.z = theta%0A self._output_pub.publish(self._output_velocity)%0A%0A def main(self):%0A r = rospy.Rate(10)%0A TIME_FOR_STOP = 5.0%0A while not rospy.is_shutdown():%0A if self._last_command_stamp:%0A # recently updated%0A if self.get_elapsed_sec() %3C TIME_FOR_STOP:%0A # target is stop%0A if isStopVelocity(self._current_velocity):%0A # output is not stop velocity%0A if not isStopVelocity(self._output_velocity):%0A # then repeat publish%0A self.publish()%0A r.sleep()%0A%0A%0Aif __name__ == '__main__':%0A rospy.init_node('velocity_filter')%0A node = VelocityFilter()%0A node.main()%0A
|
|
18ed0900c22fa2ed646f08adf66e1917a6a04b43
|
add collect_impression
|
amimoto_alexa/collect_message.py
|
amimoto_alexa/collect_message.py
|
Python
| 0.000002
|
@@ -0,0 +1,715 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%22%22%22%0A for amimoto_alexa%0A%22%22%22%0A%0A%0Aimport lamvery%0Afrom helpers import *%0Afrom debugger import *%0A%0A%0Adef collect_impression(intent, session):%0A %22%22%22Collect impression and finalize session%0A %22%22%22%0A session_attributes = build_session_attributes(session)%0A card_title = %22Impression%22%0A%0A debug_logger(session)%0A speech_output = %22Thank you! You can see impressions on twitter and ,A MI MO TO Blog.%22 %5C%0A %22Have a nice day! %22%0A%0A# todo: tweet if exist id.%0A# todo: store session summary to firehose%0A should_end_session = True%0A return build_response(session_attributes, build_speechlet_response(%0A card_title, speech_output, None, should_end_session))%0A
|
|
aec48fe807e4589344a9f04e13b8f0b651110917
|
add package installer.
|
python/setup.py
|
python/setup.py
|
Python
| 0
|
@@ -0,0 +1,341 @@
+from setuptools import setup%0A%0Aimport epidb_client%0Aversion = epidb_client.__version__%0A%0Asetup(%0A name = %22epidb-client%22,%0A version = version,%0A url = 'http://www.epiwork.eu/',%0A description = 'EPIWork Database - Client Code',%0A author = 'Fajran Iman Rusadi',%0A packages = %5B'epidb_client'%5D,%0A install_requires = %5B'setuptools'%5D,%0A)%0A%0A
|
|
437431289b25418c5acd9890b86350aa62ae0668
|
add updated script with changes from @fransua
|
transposon_annotation/transposon_annotation_ecolopy_scripts/ecolopy.py
|
transposon_annotation/transposon_annotation_ecolopy_scripts/ecolopy.py
|
Python
| 0
|
@@ -0,0 +1,2373 @@
+import matplotlib %0Amatplotlib.use('Agg') %0Afrom ecolopy_dev import Community%0Afrom ecolopy_dev.utils import draw_shannon_distrib%0A%0Acom = Community('test_log_abund.txt')%0Aprint com%0A%0Acom.fit_model('ewens')%0Acom.set_current_model('ewens')%0Aewens_model = com.get_model('ewens')%0Aprint ewens_model%0A%0Acom.fit_model('lognormal')%0Acom.set_current_model('lognormal')%0Alognormal_model = com.get_model('lognormal')%0Aprint lognormal_model%0A%0Acom.fit_model('etienne')%0Acom.set_current_model('etienne')%0Aetienne_model = com.get_model('etienne')%0Aprint etienne_model%0A%0Atmp = %7B%7D%0Alikelihoods = %5B%5D%0Afor met in %5B'fmin', 'slsqp', 'l_bfgs_b', 'tnc'%5D:%0A print 'Optimizing with %25s...' %25 met%0A try:%0A com.fit_model(name='etienne', method=met, verbose=False)%0A model = com.get_model('etienne')%0A tmp%5Bmet%5D =%7B%7D%0A tmp%5Bmet%5D%5B'model'%5D = model%0A tmp%5Bmet%5D%5B'theta'%5D = model.theta%0A tmp%5Bmet%5D%5B'I'%5D = model.I%0A tmp%5Bmet%5D%5B'm'%5D = model.m%0A tmp%5Bmet%5D%5B'lnL'%5D = model.lnL%0A # in case you reach two times the same likelyhood it may not be necessary%0A # to go on with other optimization strategies... %0A # of course if time is not limiting it is not worth to check :)%0A if round(model.lnL,1) in likelihoods:%0A break%0A likelihoods.append(round(model.lnL, 1))%0A except Exception as e:%0A print ' optimization failed: ' + e.args%5B0%5D%0A%0A# in case optimization by fmin failed to found correct values for theta and m:%0Aif not (1 %3C= tmp%5B'fmin'%5D%5B'theta'%5D %3C com.S and %5C%0A 1e-50 %3C= tmp%5B'fmin'%5D%5B'm'%5D %3C 1-1e-50):%0A del (tmp%5B'fmin'%5D)%0A%0A# find the model with the higher likelihood:%0Amet = min(tmp, key=lambda x: tmp%5Bx%5D%5B'lnL'%5D)%0A%0A# load it as 'etienne' model%0Acom.set_model(tmp%5Bmet%5D%5B'model'%5D)%0A%0Alrt = com.lrt('ewens', 'etienne')%0Abest = 'ewens' if lrt %3E 0.05 else 'etienne'%0Aprint 'Best model by LRT was: ' + best%0A%0Acom.generate_random_neutral_distribution(model=best)%0A%0Apval, neut_h = com.test_neutrality (model=best, gens=10000, full=True)%0A#draw_shannon_distrib(neut_h, abd.shannon)%0Adraw_shannon_distrib(neut_h, com.shannon, outfile='test_log_shannon_dist.pdf', filetype='pdf')%0Aprint 'P-value for neutrality test was: ', pval%0A%0Aout = open('test_log_shannon_neutral_data.tsv', 'w')%0Aout.write('# shannon:' + str(com.shannon) + '%5Cn')%0Aout.write('%5Cn'.join(%5Bstr(s) for s in neut_h%5D) + '%5Cn')%0Aout.close()%0A%0Acom.dump_community('test_log_ecolopy.pik')
|
|
c7c3ab0a4013df99b928351040f1156b07ba6767
|
Add some tests for the tokens
|
tests/unit/utils/test_tokens.py
|
tests/unit/utils/test_tokens.py
|
Python
| 0.000001
|
@@ -0,0 +1,1627 @@
+from flask import current_app%0Afrom itsdangerous import TimedJSONWebSignatureSerializer%0Afrom flaskbb.utils.tokens import make_token, get_token_status%0A%0A%0Adef test_make_token(user):%0A token = make_token(user, %22test%22)%0A s = TimedJSONWebSignatureSerializer(current_app.config%5B'SECRET_KEY'%5D)%0A unpacked_token = s.loads(token)%0A assert user.id == unpacked_token%5B%22id%22%5D%0A assert %22test%22 == unpacked_token%5B%22op%22%5D%0A%0A%0Adef test_valid_token_status(user):%0A token = make_token(user, %22valid_test%22)%0A expired, invalid, token_user = get_token_status(token, %22valid_test%22)%0A%0A assert not expired%0A assert not invalid%0A assert token_user == user%0A%0A%0Adef test_token_status_with_data(user):%0A token = make_token(user, %22test_data%22)%0A expired, invalid, token_user, data = %5C%0A get_token_status(token, %22test_data%22, return_data=True)%0A assert user.id == data%5B%22id%22%5D%0A assert %22test_data%22 == data%5B%22op%22%5D%0A%0A%0Adef test_token_operation(user):%0A token = make_token(user, %22operation_test%22)%0A expired, invalid, token_user = get_token_status(token, %22invalid_op%22)%0A assert invalid%0A assert not expired%0A assert not token_user%0A%0A%0Adef test_invalid_token_status(user):%0A token = %22this-is-not-a-token%22%0A expired, invalid, token_user, data = %5C%0A get_token_status(token, %22invalid_test%22, return_data=True)%0A%0A assert invalid%0A assert not expired%0A assert not token_user%0A assert data is None%0A%0A%0Adef test_expired_token_status(user):%0A token = make_token(user, %22expired_test%22, -1)%0A expired, invalid, token_user = get_token_status(token, %22expired_test%22)%0A assert expired%0A assert not invalid%0A assert not token_user%0A
|
|
0848197b3c9ff8d09575b85b5e3a2ca1aac6f6c5
|
Put split and merge in own module too
|
app/drivers/pycolator/splitmerge.py
|
app/drivers/pycolator/splitmerge.py
|
Python
| 0
|
@@ -0,0 +1,2394 @@
+from app.drivers.basedrivers import PycolatorDriver%0Afrom app.preparation import pycolator as preparation%0Afrom app.readers import pycolator as readers%0A%0A%0Aclass SplitDriver(PycolatorDriver):%0A def __init__(self, **kwargs):%0A super(SplitDriver, self).__init__(**kwargs)%0A self.targetsuffix = kwargs.get('targetsuffix', '_target.xml')%0A self.decoysuffix = kwargs.get('decoysuffix', '_decoy.xml')%0A%0A def prepare(self):%0A self.ns, self.static_xml = self.prepare_percolator_output(self.fn)%0A%0A def run(self):%0A td = %7B'target': self.targetsuffix, 'decoy': self.decoysuffix%7D%0A for filter_type in %5B'target', 'decoy'%5D:%0A self.prepare()%0A self.set_features(filter_type)%0A self.outsuffix = td%5Bfilter_type%5D%0A self.write(filter_type)%0A%0A def set_features(self, filter_type):%0A %22%22%22 Calls splitter to split percolator output into target/decoy elements.%0A Writes two new xml files with features. Currently only psms and%0A peptides. Proteins not here, since one cannot do protein inference%0A before having merged and remapped multifraction data anyway.%0A %22%22%22%0A elements_to_split = %7B'psm': self.allpsms, 'peptide': self.allpeps%7D%0A self.features = preparation.split_target_decoy(elements_to_split,%0A self.ns, filter_type)%0A%0A%0Aclass MergeDriver(PycolatorDriver):%0A %22%22%22Base class for merging multiple percolator fractions under different%0A sorts of filtering. It writes a single percolator out xml from multiple fractions.%0A Namespace and static xml come from first percolator file.%0A Make sure fractions are from same percolator run.%22%22%22%0A outsuffix = '_merged.xml'%0A%0A def __init__(self, **kwargs):%0A super(MergeDriver, self).__init__(**kwargs)%0A self.mergefiles = %5Bself.fn%5D%0A self.mergefiles.extend(kwargs.get('multifile_input', None))%0A%0A def prepare(self):%0A self.ns, self.static_xml = self.prepare_percolator_output(self.fn)%0A%0A def set_features(self):%0A %22%22%22%22Merge all psms and peptides%22%22%22%0A allpsms_str = readers.generate_psms_multiple_fractions_strings(%0A self.mergefiles, self.ns)%0A allpeps_str = readers.generate_peptides_multiple_fractions_strings(%0A self.mergefiles, self.ns)%0A self.features = %7B'psm': allpsms_str, 'peptide': allpeps_str%7D%0A
|
|
4bf5d21402d5394f36eec006fd3ba03354bb8523
|
Add dashboard url route
|
dashboard/urls.py
|
dashboard/urls.py
|
Python
| 0.000001
|
@@ -0,0 +1,281 @@
+from django.conf.urls import patterns, url%0Afrom dashboard import views%0A%0Aurlpatterns = patterns('dashboard.views',%0A url(r'%5E$', views.dashboard, name = 'dashboard'),%0A url(r'%5Elogin/$', views.enter_gate, name = 'login'),%0A url(r'%5Elogout/$', views.exit_gate, name = 'logout'),%0A)
|
|
5dd9cc55368e9f5bd8c79f74f3c7c1fc84a6bd8b
|
Add common migration (unrelated to branch)
|
common/migrations/0010_auto_20200529_0514.py
|
common/migrations/0010_auto_20200529_0514.py
|
Python
| 0
|
@@ -0,0 +1,421 @@
+# Generated by Django 2.2.12 on 2020-05-29 05:14%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('common', '0009_upload_hosting'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='upload',%0A name='destination',%0A field=models.CharField(max_length=256, verbose_name='destination path'),%0A ),%0A %5D%0A
|
|
f53488e3c797afb4f47f005e078d53a3bea14715
|
add solution for Combination Sum III
|
algorithms/combinationSumIII/combinationSumIII.py
|
algorithms/combinationSumIII/combinationSumIII.py
|
Python
| 0.000007
|
@@ -0,0 +1,323 @@
+class Solution:%0A # @param %7Binteger%7D k%0A # @param %7Binteger%7D n%0A # @return %7Binteger%5B%5D%5B%5D%7D%0A%0A def combinationSum3(self, k, n):%0A return %5Barr for arr in (%5Bj+1 for j in xrange(10) if i & (1 %3C%3C j)%5D%0A for i in xrange(1, 512) if bin(i).count('1') == k)%0A if sum(arr) == n%5D%0A
|
|
fedb80cf8ee5859e1d8f5caccc7a67ae979e743e
|
Remove unnecessary grit_out_dir variable from component_strings.gyp.
|
components/component_strings.gyp
|
components/component_strings.gyp
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/components',
},
'targets': [
{
'target_name': 'component_strings',
'type': 'none',
'actions': [
{
'action_name': 'component_strings',
'variables': {
'grit_grd_file': 'component_strings.grd',
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/components/strings',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/components/strings',
],
},
},
],
}
|
Python
| 0.000166
|
@@ -166,91 +166,8 @@
%0A%0A%7B%0A
- 'variables': %7B%0A 'grit_out_dir': '%3C(SHARED_INTERMEDIATE_DIR)/components',%0A %7D,%0A
't
|
6307a8a813062b3faad6b0f393d1886d4ad9bed8
|
add initial date for committees
|
application/migrations/0019_auto_20150316_2009.py
|
application/migrations/0019_auto_20150316_2009.py
|
Python
| 0.000011
|
@@ -0,0 +1,713 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Afrom application.models import Committee%0A%0Adef add_committees(apps, schema_editor):%0A committees = %5B%0A 'Fundraising and Social Action',%0A 'Membership',%0A 'Social Functions',%0A 'Archivist and Historian',%0A 'Treasury',%0A 'Communication and Social Media',%0A 'Executive Committee'%0A %5D%0A%0A for committee in committees:%0A c = Committee(name=committee)%0A c.save()%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('application', '0018_auto_20150316_1920'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(add_committees),%0A %5D%0A
|
|
fa0afad07f34f350233ae2a4f1654faef9bc1814
|
Add a python version for the phonebook benchmark
|
utils/benchmark/Strings/PySort.py
|
utils/benchmark/Strings/PySort.py
|
Python
| 0.000001
|
@@ -0,0 +1,1595 @@
+%0Awords=%5B%0A u%22James%22, u%22John%22, u%22Robert%22, u%22Michael%22, u%22William%22, u%22David%22, u%22Richard%22, u%22Joseph%22,%0A u%22Charles%22, u%22Thomas%22, u%22Christopher%22, u%22Daniel%22, u%22Matthew%22, u%22Donald%22, u%22Anthony%22,%0A u%22Paul%22, u%22Mark%22, u%22George%22, u%22Steven%22, u%22Kenneth%22, u%22Andrew%22, u%22Edward%22, u%22Brian%22,%0A u%22Joshua%22, u%22Kevin%22, u%22Ronald%22, u%22Timothy%22, u%22Jason%22, u%22Jeffrey%22, u%22Gary%22, u%22Ryan%22,%0A u%22Nicholas%22, u%22Eric%22, u%22Stephen%22, u%22Jacob%22, u%22Larry%22, u%22Frank%22, u%22Jonathan%22, u%22Scott%22,%0A u%22Justin%22, u%22Raymond%22, u%22Brandon%22, u%22Gregory%22, u%22Samuel%22, u%22Patrick%22, u%22Benjamin%22,%0A u%22Jack%22, u%22Dennis%22, u%22Jerry%22, u%22Alexander%22, u%22Tyler%22, u%22Douglas%22, u%22Henry%22, u%22Peter%22,%0A u%22Walter%22, u%22Aaron%22, u%22Jose%22, u%22Adam%22, u%22Harold%22, u%22Zachary%22, u%22Nathan%22, u%22Carl%22,%0A u%22Kyle%22, u%22Arthur%22, u%22Gerald%22, u%22Lawrence%22, u%22Roger%22, u%22Albert%22, u%22Keith%22, u%22Jeremy%22,%0A u%22Terry%22, u%22Joe%22, u%22Sean%22, u%22Willie%22, u%22Jesse%22, u%22Ralph%22, u%22Billy%22, u%22Austin%22, u%22Bruce%22,%0A u%22Christian%22, u%22Roy%22, u%22Bryan%22, u%22Eugene%22, u%22Louis%22, u%22Harry%22, u%22Wayne%22, u%22Ethan%22,%0A u%22Jordan%22, u%22Russell%22, u%22Alan%22, u%22Philip%22, u%22Randy%22, u%22Juan%22, u%22Howard%22, u%22Vincent%22,%0A u%22Bobby%22, u%22Dylan%22, u%22Johnny%22, u%22Phillip%22, u%22Craig%22%5D%0A%0A# This is a phone book record.%0Aclass Record:%0A def __init__(self, firstname, lastname):%0A self.first = firstname%0A self.last = lastname%0A%0A def __lt__(self, other):%0A if self.last %3C other.last:%0A return True%0A if self.last %3E other.last:%0A return False%0A return self.first %3C other.first%0A%0ARecords = %5B%5D%0A%0Afor first in words:%0A for last in words:%0A Records.append(Record(first, last))%0A%0Afor i in xrange(100):%0A y = Records%5B:%5D%0A y = sorted(y)%0A #for w in y:%0A # print w.first, w.last%0A%0A
|
|
5c063b06a35fef646a45b52f0d414c818b1d0993
|
Remove the dependency to dbus-glib from device.gyp.
|
device/device.gyp
|
device/device.gyp
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
},
'targets': [
{
'target_name': 'device_bluetooth',
'type': '<(library)',
'dependencies': [
'../chrome/chrome_resources.gyp:chrome_strings',
'../third_party/libxml/libxml.gyp:libxml',
'../ui/ui.gyp:ui'
],
'sources': [
'bluetooth/bluetooth_adapter.cc',
'bluetooth/bluetooth_adapter.h',
'bluetooth/bluetooth_adapter_chromeos.cc',
'bluetooth/bluetooth_adapter_chromeos.h',
'bluetooth/bluetooth_adapter_factory.cc',
'bluetooth/bluetooth_adapter_factory.h',
'bluetooth/bluetooth_device.cc',
'bluetooth/bluetooth_device.h',
'bluetooth/bluetooth_device_chromeos.cc',
'bluetooth/bluetooth_device_chromeos.h',
'bluetooth/bluetooth_out_of_band_pairing_data.h',
'bluetooth/bluetooth_service_record.cc',
'bluetooth/bluetooth_service_record.h',
'bluetooth/bluetooth_socket.h',
'bluetooth/bluetooth_socket_chromeos.cc',
'bluetooth/bluetooth_socket_chromeos.h',
'bluetooth/bluetooth_utils.cc',
'bluetooth/bluetooth_utils.h',
],
'conditions': [
['chromeos==0', {
'sources!': [
# ChromeOs-only; exclude on other platforms.
'bluetooth/bluetooth_adapter_chromeos.cc',
'bluetooth/bluetooth_adapter_chromeos.h',
'bluetooth/bluetooth_device_chromeos.cc',
'bluetooth/bluetooth_device_chromeos.h',
'bluetooth/bluetooth_socket_chromeos.cc',
'bluetooth/bluetooth_socket_chromeos.h',
]
}, { # chromeos==1
'dependencies': [
'../build/linux/system.gyp:dbus-glib',
'../chromeos/chromeos.gyp:chromeos',
'../dbus/dbus.gyp:dbus',
]
}],
],
},
{
'target_name': 'device_bluetooth_mocks',
'type': '<(library)',
'dependencies': [
'device_bluetooth',
'../testing/gmock.gyp:gmock',
],
'sources': [
'bluetooth/test/mock_bluetooth_adapter.cc',
'bluetooth/test/mock_bluetooth_adapter.h',
'bluetooth/test/mock_bluetooth_device.cc',
'bluetooth/test/mock_bluetooth_device.h',
],
'include_dirs': [
'..',
],
},
{
'target_name': 'device_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'device_bluetooth',
'device_bluetooth_mocks',
'../base/base.gyp:test_support_base',
'../content/content.gyp:test_support_content',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
],
'sources': [
'bluetooth/bluetooth_adapter_chromeos_unittest.cc',
'bluetooth/bluetooth_adapter_chromeos_devices_unittest.cc',
'bluetooth/bluetooth_service_record_unittest.cc',
'bluetooth/bluetooth_utils_unittest.cc',
'test/device_test_suite.cc',
'test/device_test_suite.h',
'test/run_all_unittests.cc',
],
'conditions': [
['chromeos==0', {
'sources!': [
# ChromeOs-only; exclude on other platforms.
'bluetooth/bluetooth_adapter_chromeos_unittest.cc',
'bluetooth/bluetooth_adapter_chromeos_devices_unittest.cc',
]
}, { # chromeos==1
'dependencies': [
'../build/linux/system.gyp:dbus-glib',
'../chromeos/chromeos.gyp:chromeos_test_support',
'../dbus/dbus.gyp:dbus',
]
}],
],
},
],
}
|
Python
| 0.000064
|
@@ -1866,37 +1866,32 @@
/system.gyp:dbus
--glib
',%0A '
@@ -3579,13 +3579,8 @@
dbus
--glib
',%0A
|
6b76a0dc048e91137d432c697ebc8865b968a793
|
fix tests to be compatible with timestamp-based export checkpoints
|
corehq/apps/reports/tests/test_export_api.py
|
corehq/apps/reports/tests/test_export_api.py
|
from django.test.client import Client
from django.test import TestCase
from receiver.util import spoof_submission
import uuid
from corehq.apps.receiverwrapper.util import get_submit_url
from corehq.apps.domain.shortcuts import create_domain
from django.core.urlresolvers import reverse
from corehq.apps.users.models import WebUser
from couchforms.models import XFormInstance
from couchexport.export import ExportConfiguration
FORM_TEMPLATE = """<?xml version='1.0' ?>
<foo xmlns:jrm="http://openrosa.org/jr/xforms" xmlns="http://www.commcarehq.org/export/test">
<meta>
<uid>%(uid)s</uid>
</meta>
</foo>
"""
DOMAIN = "test"
def get_form():
return FORM_TEMPLATE % {"uid": uuid.uuid4().hex}
def submit_form(f=None, domain=DOMAIN):
if f is None:
f = get_form()
url = get_submit_url(domain)
return spoof_submission(url, f, hqsubmission=False)
def get_export_response(client, previous="", include_errors=False):
# e.g. /a/wvtest/reports/export/?export_tag=%22http://openrosa.org/formdesigner/0B5AEAF6-0394-4E4B-B2FD-6CDDE1BCBC8D%22
return client.get(reverse("corehq.apps.reports.views.export_data",
args=[DOMAIN]),
{"export_tag": '"http://www.commcarehq.org/export/test"',
"previous_export": previous,
"include_errors": include_errors,
"format": "html",
"use_cache": False})
class ExportTest(TestCase):
def _clear_docs(self):
config = ExportConfiguration(XFormInstance.get_db(),
[DOMAIN, "http://www.commcarehq.org/export/test"])
for form in config.get_docs():
XFormInstance.wrap(form).delete()
def setUp(self):
self._clear_docs()
create_domain(DOMAIN)
self.couch_user = WebUser.create(None, "test", "foobar")
self.couch_user.add_domain_membership(DOMAIN, is_admin=True)
self.couch_user.save()
def tearDown(self):
self.couch_user.delete()
self._clear_docs()
def testExportTokens(self):
c = Client()
c.login(**{'username': 'test', 'password': 'foobar'})
# no data = redirect
resp = get_export_response(c)
self.assertEqual(302, resp.status_code)
# data = data
submit_form()
resp = get_export_response(c)
self.assertEqual(200, resp.status_code)
self.assertTrue(resp.content is not None)
self.assertTrue("X-CommCareHQ-Export-Token" in resp)
prev_token = resp["X-CommCareHQ-Export-Token"]
# data but no new data = redirect
resp = get_export_response(c, prev_token)
self.assertEqual(302, resp.status_code)
submit_form()
resp = get_export_response(c, prev_token)
self.assertEqual(200, resp.status_code)
self.assertTrue(resp.content is not None)
self.assertTrue("X-CommCareHQ-Export-Token" in resp)
prev_token = resp["X-CommCareHQ-Export-Token"]
full_data = get_export_response(c).content
partial_data = get_export_response(c, prev_token).content
self.assertTrue(len(full_data) > len(partial_data))
def testExportFilter(self):
c = Client()
c.login(**{'username': 'test', 'password': 'foobar'})
# initially nothing
self.assertEqual(302, get_export_response(c).status_code)
# submit, assert something
f = get_form()
submit_form(f)
resp = get_export_response(c)
self.assertEqual(200, resp.status_code)
initial_content = resp.content
# resubmit, assert same since it's a dupe
submit_form(f)
resp = get_export_response(c)
self.assertEqual(200, resp.status_code)
# hack: check for the number of rows to ensure the new one
# didn't get added. They aren't exactly the same because the
# duplicate adds to the schema.
self.assertEqual(initial_content.count("<tr>"),
resp.content.count("<tr>"))
# unless we explicitly include errors
resp = get_export_response(c, include_errors=True)
self.assertEqual(200, resp.status_code)
self.assertTrue(len(resp.content) > len(initial_content))
|
Python
| 0
|
@@ -418,16 +418,28 @@
guration
+%0Aimport time
%0A%0AFORM_T
@@ -2344,32 +2344,134 @@
submit_form()
+%0A%0A # now that this is time based we have to sleep first. this is annoying%0A time.sleep(2)
%0A resp =
@@ -2852,32 +2852,24 @@
tatus_code)%0A
-
%0A sub
@@ -2871,32 +2871,54 @@
submit_form()%0A
+ time.sleep(2)%0A
resp = g
|
22e3933f6a9ff6c424d1a1f6d225f32c234359c5
|
add leetcode Pascal's Triangle II
|
leetcode/PascalTriangleII/solution.py
|
leetcode/PascalTriangleII/solution.py
|
Python
| 0.001927
|
@@ -0,0 +1,437 @@
+# -*- coding:utf-8 -*-%0Aclass Solution:%0A # @return a list of integers%0A def getRow(self, rowIndex):%0A if rowIndex == 0:%0A return %5B1%5D%0A ret = %5B1%5D%0A begin = 1%0A while rowIndex %3E 0:%0A ret.append(ret%5B-1%5D * rowIndex / begin)%0A rowIndex -= 1%0A begin += 1%0A return ret%0A%0A%0Aif __name__ == '__main__':%0A s = Solution()%0A for x in xrange(5):%0A print x, s.getRow(x)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.