commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
d26069ddbb35a10f4a368c855d94d1dde1872a82
|
Add better solution for etl
|
etl/etl.better.py
|
etl/etl.better.py
|
Python
| 0.000178
|
@@ -0,0 +1,287 @@
+def transform(d):%0A '''Just reverse the dictionary'''%0A return %7Bl.lower(): p for p, letters in d.items() for l in letters%7D%0A%0Adef transform(strs):%0A result = %7B%7D%0A for k,v in strs.items():%0A for i in v:%0A result.update(%7Bi.lower():k%7D)%0A return dict(result.items())%0A
|
|
54b94346d2669347cf2a9a2b24df6b657cf80c5b
|
Mask computation utilities (from nipy).
|
nisl/mask.py
|
nisl/mask.py
|
Python
| 0
|
@@ -0,0 +1,2941 @@
+import numpy as np%0Afrom scipy import ndimage%0A%0A%0A###############################################################################%0A# Operating on connect component%0A###############################################################################%0A%0A%0Adef largest_cc(mask):%0A %22%22%22 Return the largest connected component of a 3D mask array.%0A%0A Parameters%0A -----------%0A mask: 3D boolean array%0A 3D array indicating a mask.%0A%0A Returns%0A --------%0A mask: 3D boolean array%0A 3D array indicating a mask, with only one connected component.%0A %22%22%22%0A # We use asarray to be able to work with masked arrays.%0A mask = np.asarray(mask)%0A labels, label_nb = ndimage.label(mask)%0A if not label_nb:%0A raise ValueError('No non-zero values: no connected components')%0A if label_nb == 1:%0A return mask.astype(np.bool)%0A label_count = np.bincount(labels.ravel())%0A # discard 0 the 0 label%0A label_count%5B0%5D = 0%0A return labels == label_count.argmax()%0A%0A%0A###############################################################################%0A# Utilities to calculate masks%0A###############################################################################%0A%0A%0Adef compute_mask(mean_volume, m=0.2, M=0.9, cc=True,%0A exclude_zeros=False):%0A %22%22%22%0A Compute a mask file from fMRI data in 3D or 4D ndarrays.%0A%0A Compute and write the mask of an image based on the grey level%0A This is based on an heuristic proposed by T.Nichols:%0A find the least dense point of the histogram, between fractions%0A m and M of the total image histogram.%0A%0A In case of failure, it is usually advisable to increase m.%0A%0A Parameters%0A ----------%0A mean_volume : 3D ndarray%0A mean EPI image, used to compute the threshold for the mask.%0A m : float, optional%0A lower fraction of the histogram to be discarded.%0A M: float, optional%0A upper fraction of the histogram to be discarded.%0A cc: boolean, optional%0A if cc is True, only the largest connect component is kept.%0A exclude_zeros: boolean, optional%0A Consider zeros as missing values for the computation of the%0A threshold. This option is useful if the images have been%0A resliced with a large padding of zeros.%0A%0A Returns%0A -------%0A mask : 3D boolean ndarray%0A The brain mask%0A %22%22%22%0A sorted_input = np.sort(mean_volume.reshape(-1))%0A if exclude_zeros:%0A sorted_input = sorted_input%5Bsorted_input != 0%5D%0A limite_inf = np.floor(m * len(sorted_input))%0A limite_sup = np.floor(M * len(sorted_input))%0A%0A delta = sorted_input%5Blimite_inf + 1:limite_sup + 1%5D %5C%0A - sorted_input%5Blimite_inf:limite_sup%5D%0A ia = delta.argmax()%0A threshold = 0.5 * (sorted_input%5Bia + limite_inf%5D%0A + sorted_input%5Bia + limite_inf + 1%5D)%0A%0A mask = (mean_volume %3E= threshold)%0A%0A if cc:%0A mask = largest_cc(mask)%0A%0A return mask.astype(bool)%0A
|
|
8e6c1a296be39c5cd1e75d5ff9974f80449690e3
|
Add VVT tool class
|
benchexec/tools/vvt.py
|
benchexec/tools/vvt.py
|
Python
| 0
|
@@ -0,0 +1,1594 @@
+%22%22%22%0ABenchExec is a framework for reliable benchmarking.%0AThis file is part of BenchExec.%0A%0ACopyright (C) 2007-2015 Dirk Beyer%0AAll rights reserved.%0A%0ALicensed under the Apache License, Version 2.0 (the %22License%22);%0Ayou may not use this file except in compliance with the License.%0AYou may obtain a copy of the License at%0A%0A http://www.apache.org/licenses/LICENSE-2.0%0A%0AUnless required by applicable law or agreed to in writing, software%0Adistributed under the License is distributed on an %22AS IS%22 BASIS,%0AWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0ASee the License for the specific language governing permissions and%0Alimitations under the License.%0A%22%22%22%0Aimport benchexec.tools.template%0Aimport benchexec.util as util%0Aimport benchexec.result as result%0A%0Aclass Tool(benchexec.tools.template.BaseTool):%0A %22%22%22%0A Tool wrapper for the Vienna Verification Toolkit%0A %22%22%22%0A%0A def executable(self):%0A return util.find_executable('vvt-svcomp-bench.sh')%0A%0A def version(self,executable):%0A return 'prerelease'%0A%0A def name(self):%0A return 'VVT'%0A%0A def cmdline(self, executable, options, tasks, propertyfile, rlimits):%0A return %5Bexecutable%5D + tasks%0A%0A def determine_result(self, returncode, returnsignal, output, isTimeOut):%0A try:%0A if %22No bug found.%5Cn%22 in output:%0A return result.RESULT_TRUE_PROP%0A elif %22Bug found:%5Cn%22 in output:%0A return result.RESULT_FALSE_REACH%0A else:%0A return result.RESULT_UNKNOWN%0A except Exception:%0A return result.RESULT_UNKNOWN%0A
|
|
f333b9c5741a7ffbf49caa0a6130831a834b944f
|
Add unit tests for recent bugfix and move operation
|
test_dotfiles.py
|
test_dotfiles.py
|
Python
| 0
|
@@ -0,0 +1,2452 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport os%0Aimport shutil%0Aimport tempfile%0Aimport unittest%0A%0Afrom dotfiles import core%0A%0A%0Adef touch(fname, times=None):%0A with file(fname, 'a'):%0A os.utime(fname, times)%0A%0A%0Aclass DotfilesTestCase(unittest.TestCase):%0A%0A def setUp(self):%0A %22%22%22Create a temporary home directory.%22%22%22%0A%0A self.home = tempfile.mkdtemp()%0A%0A # create a repository for the tests to use%0A self.repo = os.path.join(self.home, 'Dotfiles')%0A os.mkdir(self.repo)%0A%0A def tearDown(self):%0A %22%22%22Delete the temporary home directory and its contents.%22%22%22%0A%0A shutil.rmtree(self.home)%0A%0A def test_force_sync_directory(self):%0A %22%22%22Test forced sync when the dotfile is a directory.%0A%0A I installed the lastpass chrome extension which stores a socket in%0A ~/.lastpass. So I added that directory as an external to /tmp and%0A attempted a forced sync. An error occurred because sync() calls%0A os.remove() as it mistakenly assumes the dotfile is a file and not%0A a directory.%0A %22%22%22%0A%0A os.mkdir(os.path.join(self.home, '.lastpass'))%0A externals = %7B'.lastpass': '/tmp'%7D%0A%0A dotfiles = core.Dotfiles(home=self.home, repo=self.repo, prefix='',%0A ignore=%5B%5D, externals=externals)%0A%0A dotfiles.sync(force=True)%0A%0A self.assertEqual(%0A os.path.realpath(os.path.join(self.home, '.lastpass')), '/tmp')%0A%0A def test_move_repository(self):%0A %22%22%22Test the move() method for a Dotfiles repository.%22%22%22%0A%0A touch(os.path.join(self.repo, 'bashrc'))%0A%0A dotfiles = core.Dotfiles(%0A home=self.home, repo=self.repo, prefix='',%0A ignore=%5B%5D, force=True, externals=%7B%7D)%0A%0A dotfiles.sync()%0A%0A # make sure sync() did the right thing%0A self.assertEqual(%0A os.path.realpath(os.path.join(self.home, '.bashrc')),%0A os.path.join(self.repo, 'bashrc'))%0A%0A target = os.path.join(self.home, 'MyDotfiles')%0A%0A dotfiles.move(target)%0A%0A self.assertTrue(os.path.exists(os.path.join(target, 'bashrc')))%0A self.assertEqual(%0A os.path.realpath(os.path.join(self.home, '.bashrc')),%0A os.path.join(target, 'bashrc'))%0A%0A%0Adef suite():%0A suite = unittest.TestLoader().loadTestsFromTestCase(DotfilesTestCase)%0A return suite%0A%0Aif __name__ == '__main__':%0A unittest.TextTestRunner().run(suite())%0A
|
|
2b0e13039dad8d116a5719540004bed317bb6960
|
Add tests and fixtures for the Organizations API wrapper
|
tests/api/test_organizations.py
|
tests/api/test_organizations.py
|
Python
| 0
|
@@ -0,0 +1,1103 @@
+# -*- coding: utf-8 -*-%0A%0A%22%22%22pytest Licenses functions, fixtures and tests.%22%22%22%0A%0A%0Aimport pytest%0A%0Aimport ciscosparkapi%0A%0A%0A# Helper Functions%0A%0Adef list_organizations(api, max=None):%0A return list(api.organizations.list(max=max))%0A%0A%0Adef get_organization_by_id(api, orgId):%0A return api.organizations.get(orgId)%0A%0A%0Adef is_valid_organization(obj):%0A return isinstance(obj, ciscosparkapi.Organization) and obj.id is not None%0A%0Adef are_valid_organizations(iterable):%0A return all(%5Bis_valid_organization(obj) for obj in iterable%5D)%0A%0A%0A# pytest Fixtures%0A%0A@pytest.fixture(scope=%22session%22)%0Adef organizations_list(api):%0A return list_organizations(api)%0A%0A%0A# Tests%0A%0Aclass TestOrganizationsAPI(object):%0A %22%22%22Test OrganizationsAPI methods.%22%22%22%0A%0A def test_list_organizations(self, organizations_list):%0A assert are_valid_organizations(organizations_list)%0A%0A def test_get_organization_by_id(self, api, organizations_list):%0A assert len(organizations_list) %3E= 1%0A org_id = organizations_list%5B0%5D.id%0A org = get_organization_by_id(api, orgId=org_id)%0A assert is_valid_organization(org)%0A
|
|
feea11952ceab35523052a93a8ca6ff822d1357c
|
add 141
|
vol3/141.py
|
vol3/141.py
|
Python
| 0.999994
|
@@ -0,0 +1,664 @@
+import math%0A%0Adef gcd(a, b):%0A if a %25 b == 0:%0A return b%0A return gcd(b, a %25 b)%0A%0Adef is_square(n):%0A sqrt_n = int(math.sqrt(n))%0A return n == sqrt_n * sqrt_n%0A%0Aif __name__ == %22__main__%22:%0A L = 10 ** 12%0A s = set()%0A for a in xrange(2, 10000):%0A for b in xrange(1, a):%0A if a * a * a * b + b * b %3E= L:%0A break%0A if gcd(a, b) %3E 1:%0A continue%0A%0A c = 1%0A while True:%0A n = a * a * a * b * c * c + b * b * c%0A if n %3E= L:%0A break%0A if is_square(n):%0A s.add(n)%0A c += 1%0A print sum(s)%0A
|
|
284c29d257b7c6902b5973ca05278ee5b05571e9
|
test subclassing!
|
tests/delivery/test_frontend.py
|
tests/delivery/test_frontend.py
|
Python
| 0
|
@@ -0,0 +1,533 @@
+from wizard_builder.tests import test_frontend as wizard_builder_tests%0A%0A%0Aclass EncryptedFrontendTest(wizard_builder_tests.FrontendTest):%0A secret_key = 'soooooo seekrit'%0A%0A def setUp(self):%0A super().setUp()%0A self.browser.find_element_by_css_selector(%0A '%5Bname=%22key%22%5D').send_keys(self.secret_key)%0A self.browser.find_element_by_css_selector(%0A '%5Bname=%22key_confirmation%22%5D').send_keys(self.secret_key)%0A self.browser.find_element_by_css_selector(%0A '%5Btype=%22submit%22%5D').click()%0A
|
|
bd9f509bbd97f3a28eb24740dc08bc153cf82613
|
add voronoi cell class
|
order/avc.py
|
order/avc.py
|
Python
| 0.000031
|
@@ -0,0 +1,1428 @@
+###############################################################################%0A# -*- coding: utf-8 -*-%0A# Order: A tool to characterize the local structure of liquid water %0A# by geometric order parameters%0A# %0A# Authors: Pu Du%0A# %0A# Released under the MIT License%0A###############################################################################%0A%0Afrom __future__ import division, print_function%0Afrom six.moves import range%0A%0Aimport numpy as np%0A%0Aclass VoronoiCell(object):%0A %22%22%22asphericity of the Voronoi cell%22%22%22%0A def __init__(self):%0A pass%0A %0A def compute_vc(self, planes):%0A %22%22%22compute the Voronoi cell%22%22%22%0A #total area of all planes%0A S = 0.0%0A%0A #total volume of Voronoi polyhedron%0A V = 0.0%0A%0A #compute S and V%0A for plane in planes:%0A %0A outter_p = 0.0%0A for i in range(1, len(plane)-1):%0A outter_p += np.linalg.norm(np.outter((plane%5Bi%5D - plane%5B0%5D),%0A (plane%5Bi+1%5D - plane%5B0%5D)))%0A%0A vol = 0.0%0A for i in range(1, len(plane)-1):%0A vol += np.linalg.norm(np.dot(np.outter(plane%5B0%5D, plane%5Bi%5D),%0A plane%5Bi+1%5D)) %0A outter_p *= 0.5%0A vol *= 1 / 6%0A S += outter_p%0A V += vol%0A%0A #voronoi cell%0A eta = S ** 3 / (36 * np.pi * V ** 2)%0A%0A return eta
|
|
6e199bec3816a4a36d891e72f8de9819848bda65
|
Define ResourceDuplicatedDefinedError.
|
electro/errors.py
|
electro/errors.py
|
Python
| 0
|
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-%0A%0Aclass ResourceDuplicatedDefinedError(Exception):%0A pass%0A
|
|
f527eeb4792ea5630965d72ae73b0331fd465dea
|
add indicator migration
|
indicators/migrations/0002_auto_20170105_0205.py
|
indicators/migrations/0002_auto_20170105_0205.py
|
Python
| 0
|
@@ -0,0 +1,1273 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.4 on 2017-01-05 10:05%0Afrom __future__ import unicode_literals%0A%0Afrom decimal import Decimal%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('indicators', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='collecteddata',%0A name='achieved',%0A field=models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=20, verbose_name=b'Achieved'),%0A ),%0A migrations.AlterField(%0A model_name='collecteddata',%0A name='targeted',%0A field=models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=20, verbose_name=b'Targeted'),%0A ),%0A migrations.AlterField(%0A model_name='historicalcollecteddata',%0A name='achieved',%0A field=models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=20, verbose_name=b'Achieved'),%0A ),%0A migrations.AlterField(%0A model_name='historicalcollecteddata',%0A name='targeted',%0A field=models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=20, verbose_name=b'Targeted'),%0A ),%0A %5D%0A
|
|
103de382d7c9c0dde7aa4bc2f4756dc71ee45335
|
define pytest fixture for path to PR2 database
|
test/conftest.py
|
test/conftest.py
|
Python
| 0
|
@@ -0,0 +1,265 @@
+# content of conftest.py%0Aimport pytest%0A%0Adef pytest_addoption(parser):%0A parser.addoption(%22--uchime-ref-db-fp%22, action=%22store%22, help=%22path to PR2 database%22)%0A%0A@pytest.fixture%0Adef uchime_ref_db_fp(request):%0A return request.config.getoption(%22--uchime-ref-db-fp%22)%0A
|
|
d15564cf234def0f37c958915e0d7a99cad439e4
|
add a test for overflow
|
tests/test_jnitable_overflow.py
|
tests/test_jnitable_overflow.py
|
Python
| 0.000001
|
@@ -0,0 +1,219 @@
+# run it, and check with Java VisualVM if we are eating too much memory or not!%0Afrom jnius import autoclass%0A%0AStack = autoclass('java.util.Stack')%0Ai = 0%0Awhile True:%0A i += 1%0A stack = Stack()%0A stack.push('hello')%0A
|
|
5d3918c885f430e79e8283533ad5eb3a84ffecc7
|
Add migration code for updating lease status
|
blazar/db/migration/alembic_migrations/versions/75a74e4539cb_update_lease_status.py
|
blazar/db/migration/alembic_migrations/versions/75a74e4539cb_update_lease_status.py
|
Python
| 0.000006
|
@@ -0,0 +1,1248 @@
+# Copyright 2018 OpenStack Foundation.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or%0A# implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22update lease status%0A%0ARevision ID: 75a74e4539cb%0ARevises: e66f199a5414%0ACreate Date: 2018-01-23 11:05:56.753579%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '75a74e4539cb'%0Adown_revision = 'e66f199a5414'%0A%0Afrom blazar.db import api as db_api%0Afrom blazar.status import LeaseStatus as ls%0A%0A%0Adef upgrade():%0A leases = db_api.lease_get_all()%0A for lease in leases:%0A db_api.lease_update(lease%5B'id'%5D,%0A %7B'status': ls.derive_stable_status(lease%5B'id'%5D)%7D)%0A%0A%0Adef downgrade():%0A leases = db_api.lease_get_all()%0A for lease in leases:%0A db_api.lease_update(lease%5B'id'%5D,%0A %7B'status': None%7D)%0A
|
|
2248590ed1bcf33b17f46e4c61747f5a7cb5e92d
|
remove mutable argument: when a mutable value as list or dictionary is in a default value for an argument. Default argument values are evaluated only once at function definition time, which means that modifying the default value of the argument will affect all subsequent calls of the function.
|
src/collectors/tcp/test/testtcp.py
|
src/collectors/tcp/test/testtcp.py
|
#!/usr/bin/python
################################################################################
from test import *
from diamond.collector import Collector
from tcp import TCPCollector
################################################################################
class TestTCPCollector(CollectorTestCase):
def setUp(self, allowed_names = []):
config = get_collector_config('TCPCollector', {
'allowed_names' : allowed_names,
'interval': 1
})
self.collector = TCPCollector(config, None)
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch.object(Collector, 'publish')
def test_should_open_proc_net_netstat(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/netstat')
@patch('__builtin__.open')
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock, open_mock):
self.setUp([ 'A', 'C' ])
open_mock.return_value = StringIO('''
TcpExt: A B C
TcpExt: 0 0 0
'''.strip())
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
open_mock.return_value = StringIO('''
TcpExt: A B C
TcpExt: 0 1 2
'''.strip())
self.collector.collect()
self.assertEqual(len(publish_mock.call_args_list), 2)
self.assertEqual(publish_mock.call_args_list, [
(('A', 0.0, 0), {}),
(('C', 2.0, 0), {})
])
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
self.setUp([ 'ListenOverflows', 'ListenDrops', 'TCPLoss', 'TCPTimeouts' ])
TCPCollector.PROC = self.getFixturePath('proc_net_netstat')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
TCPCollector.PROC = self.getFixturePath('proc_net_netstat_2')
self.collector.collect()
metrics = {
'ListenOverflows' : 0,
'ListenDrops' : 0,
'TCPLoss' : 188,
'TCPTimeouts' : 15265
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -327,16 +327,79 @@
Up(self,
+ allowed_names=None):%0A if not allowed_names:%0A
allowed
@@ -409,18 +409,16 @@
mes = %5B%5D
-):
%0A
|
308e34b686686d3c42466012c864d7cc5d0f6799
|
Create go_fixup_fptrs.py
|
scripts/go/go_fixup_fptrs.py
|
scripts/go/go_fixup_fptrs.py
|
Python
| 0.000001
|
@@ -0,0 +1,1873 @@
+%22%22%22%0Awhen IDA's auto-discovery of functions in 64-bit Windows Go executables fails,%0Ascan for global (.rdata) pointers into the code section (.text) and assume these are function pointers.%0A%22%22%22%0Aimport idc%0Aimport ida_name%0Aimport ida_auto%0Aimport ida_bytes%0Aimport idautils%0A%0A%0Adef enum_segments():%0A for segstart in idautils.Segments():%0A segend = idc.get_segm_end(segstart)%0A segname = idc.get_segm_name(segstart)%0A yield segstart, segend, segname%0A%0A%0Adef find_pointers(start, end):%0A for va in range(start, end-0x8):%0A ptr = ida_bytes.get_qword(va)%0A if idc.get_segm_start(ptr) == idc.BADADDR:%0A continue%0A%0A yield va, ptr%0A%0A%0Adef is_head(va):%0A return ida_bytes.is_head(idc.get_full_flags(va))%0A%0A%0Adef get_head(va):%0A if is_head(va):%0A return va%0A else:%0A return idc.prev_head(va)%0A%0A%0Adef is_code(va):%0A if is_head(va):%0A flags = idc.get_full_flags(va)%0A return ida_bytes.is_code(flags)%0A else:%0A head = get_head(va)%0A return is_code(head)%0A%0A%0Adef is_unknown(va):%0A return ida_bytes.is_unknown(idc.get_full_flags(va))%0A%0A%0Adef main():%0A for segstart, segend, segname in enum_segments():%0A if segname not in ('.rdata', ):%0A continue%0A%0A for src, dst in find_pointers(segstart, segend):%0A if idc.get_segm_name(dst) != %22.text%22:%0A continue%0A%0A if is_code(dst):%0A continue%0A%0A print(%22new function pointer: 0x%25x -%3E 0x%25x%22 %25 (src, dst))%0A%0A ida_auto.auto_make_code(dst)%0A ida_auto.auto_make_proc(dst)%0A%0A ida_bytes.del_items(src, 8)%0A ida_bytes.create_data(src, idc.FF_QWORD, 8, idc.BADADDR)%0A # this doesn't seem to always work :-(%0A idc.op_plain_offset(src, -1, 0)%0A ida_name.set_name(src, %22j_%25s_%25x%22 %25 (src, dst))%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
08fcba713315b4ac29ed30f437b7c5c0b1da5a9d
|
Create make_upper_case.py
|
make_upper_case.py
|
make_upper_case.py
|
Python
| 0.000391
|
@@ -0,0 +1,230 @@
+def sillycase(string):%0A half = round(len(string)/2) // Find the half index%0A return string%5B:half%5D.lower() + string%5Bhalf:%5D.upper() // If you only want certain letters to be upper case //%0A
|
|
9e986214aaf6beef5b1778254cc348006a828c04
|
Create MaximalSquare_001.py
|
leetcode/221-Maximal-Square/MaximalSquare_001.py
|
leetcode/221-Maximal-Square/MaximalSquare_001.py
|
Python
| 0.000018
|
@@ -0,0 +1,1247 @@
+# brute force, optimized later%0Aclass Solution(object):%0A def maximalSquare(self, matrix):%0A %22%22%22%0A :type matrix: List%5BList%5Bstr%5D%5D%0A :rtype: int%0A %22%22%22%0A if len(matrix) == 0 or len(matrix%5B0%5D) == 0:%0A return 0%0A %0A maxv = 0%0A for i in range(len(matrix)):%0A for j in range(len(matrix%5B0%5D)):%0A if matrix%5Bi%5D%5Bj%5D == '1':%0A area = self.getArea(i, j, matrix)%0A if area %3E maxv:%0A maxv = area%0A return maxv%0A %0A def getArea(self, i, j, matrix):%0A m, n = len(matrix), len(matrix%5B0%5D)%0A length, flag = 1, False%0A for l in range(1, min(m - i, n - j)):%0A #print 'len: ' + str(l)%0A for k in range(j, j + l + 1):%0A #print i + 1, k%0A if matrix%5Bi + l%5D%5Bk%5D == '0':%0A flag = True%0A break%0A for k in range(i, i + l + 1):%0A #print k, j + 1%0A if matrix%5Bk%5D%5Bj + l%5D == '0':%0A flag = True%0A break%0A if flag:%0A break%0A length += 1%0A %0A #print i, j, length * length%0A return length * length%0A
|
|
6b6c5b836b282c53fc5a337942d187769d0a87ed
|
Add cli module.
|
fapistrano/cli.py
|
fapistrano/cli.py
|
Python
| 0
|
@@ -0,0 +1,1501 @@
+# -*- coding: utf-8 -*-%0A%0Aimport click%0Aimport yaml%0Afrom fabric.api import env as fabenv, local, execute%0Afrom fapistrano.app import init_cli%0Afrom fapistrano.utils import with_configs, register_role, register_env, _apply_env_role_config%0Afrom fapistrano import deploy%0A%0A@click.group()%0A@click.option('-d', '--deployfile', default='deploy.yml')%0Adef fap(deployfile):%0A with open(deployfile, 'rb') as f:%0A conf = yaml.load(f.read())%0A init_cli(conf)%0A%0A@fap.command()%0A@click.option('-r', '--role', required=True, help='deploy role, for example: production, staging')%0A@click.option('-e', '--env', required=True, help='deploy env, for example: app, worker, cron')%0Adef release(role, env):%0A fabenv.role = role%0A fabenv.env = env%0A _apply_env_role_config()%0A execute(deploy.release)%0A%0A@fap.command()%0A@click.option('-r', '--role', required=True, help='deploy role, for example: production, staging')%0A@click.option('-e', '--env', required=True, help='deploy env, for example: app, worker, cron')%0Adef rollback(role, env):%0A fabenv.role = role%0A fabenv.env = env%0A _apply_env_role_config()%0A execute(deploy.rollback)%0A%0A@fap.command()%0A@click.option('-r', '--role', required=True, help='deploy role, for example: production, staging')%0A@click.option('-e', '--env', required=True, help='deploy env, for example: app, worker, cron')%0Adef restart(role, env):%0A fabenv.role = role%0A fabenv.env = env%0A _apply_env_role_config()%0A execute(deploy.rollback)%0A%0Aif __name__ == '__main__':%0A fap()%0A
|
|
9341d2192da8cbaea734641aec9567a1035aa1ee
|
Add suffix list
|
scripts/update-suffixlist.py
|
scripts/update-suffixlist.py
|
Python
| 0.000004
|
@@ -0,0 +1,1704 @@
+#!/usr/bin/env python%0Aimport os%0Aimport urllib2 as urllib%0Aimport anyjson as json%0A%0A%0AURL_LIST = %22http://mxr.mozilla.org/mozilla-central/source/netwerk/dns/src/effective_tld_names.dat?raw=1%22%0A%0A# generate json%0Aprint 'downloading suffix list..'%0Arules = %7B%7D%0Alst = urllib.urlopen(URL_LIST).read()%0Aprint 'processing list..'%0Alines = lst.split('%5Cn')%0Afor i,line in enumerate(lines):%0A if line%5B:2%5D == '//' or len(line) == 0:%0A continue # skip comments%0A EXCEPT = line%5B0%5D == '!'%0A if EXCEPT: # exception rule%0A line = line%5B1:%5D%0A doms = line.split('.')%0A lst = rules%0A # find node to update%0A for d in reversed(doms):%0A node = lst.get(d, None)%0A if not node:%0A node = %7B%7D%0A lst%5Bd%5D = node%0A lst = node%0A if EXCEPT:%0A lst%5B'!'%5D = 1;%0Ajson = json.serialize(rules).replace(' ','')%0A%0A# functions for checking domains%0Adef get_reg_domain(rules, doms):%0A node = rules.get(doms%5B0%5D,None)%0A if node == None: node = rules.get('*',None)%0A if node == None or (len(node) == 1 and node%5B'!'%5D == 1):%0A return doms%5B0%5D%0A elif len(doms) == 1:%0A return None%0A reg = get_reg_domain(node, doms%5B1:%5D)%0A if(reg != None):%0A return '%25s.%25s' %25 (reg, doms%5B0%5D)%0A%0Adef get_host(domain):%0A doms = list(reversed(domain.split('.')))%0A return get_reg_domain(rules, doms)%0A%0A# test the list%0Aprint 'testing list..'%0Atests = %7B'qwe.parliament.co.uk': 'parliament.co.uk',%0A 'foo.bar.version2.dk': 'version2.dk',%0A 'ecs.soton.ac.uk': 'soton.ac.uk'%7D%0Afor (test,res) in tests.items():%0A assert get_host(test) == res%0A%0A# output new list as javascript%0Aprint 'writing list..'%0Afile('../data/suffix-list.js','w').write('suffix_list=%25s;' %25 json);%0A%0Aprint 'done.'%0A
|
|
a1d95beccd0f0f332005cd133bdd660fbe649467
|
Add a benchmarking script.
|
benchmarking/perf_cmp.py
|
benchmarking/perf_cmp.py
|
Python
| 0
|
@@ -0,0 +1,1189 @@
+#!/usr/bin/env python%0A%0A%22%22%22%0ATODO: Change the module doc.%0A%22%22%22%0A%0Afrom __future__ import division%0A%0A__author__ = %22shyuepingong%22%0A__version__ = %220.1%22%0A__maintainer__ = %22Shyue Ping Ong%22%0A__email__ = %22shyuep@gmail.com%22%0A__status__ = %22Beta%22%0A__date__ = %2211/19/12%22%0A%0Aimport numpy as np%0A%0Afrom scipy.spatial import Delaunay%0Afrom pyhull.qconvex import get_vertices%0Afrom pymatgen.command_line.qhull_caller import qconvex%0A%0A%0Adata = np.random.randn(100,3)%0A%0Adef scipy_test():%0A return Delaunay(data).convex_hull%0A%0Adef pyhull_test():%0A return get_vertices(data)%0A%0Adef pymatgen_ext_test():%0A return qconvex(data)%0A%0Aif __name__ == %22__main__%22:%0A import timeit%0A print %22Scipy results%22%0A print timeit.timeit(%22scipy_test()%22,%0A setup=%22from __main__ import scipy_test%22,%0A number=1)%0A print%0A print %22pymatgen_ext_test results%22%0A print timeit.timeit(%22pymatgen_ext_test()%22,%0A setup=%22from __main__ import pymatgen_ext_test%22,%0A number=1)%0A print%0A print %22pyhull results%22%0A print timeit.timeit(%22pyhull_test()%22,%0A setup=%22from __main__ import pyhull_test%22,%0A number=1)%0A print
|
|
8de30c6d4b5784af406d75e04feeb7c6431243d6
|
add fermi setup
|
astroquery/fermi/setup_package.py
|
astroquery/fermi/setup_package.py
|
Python
| 0
|
@@ -0,0 +1,220 @@
+# Licensed under a 3-clause BSD style license - see LICENSE.rst%0Aimport os%0A%0A%0Adef get_package_data():%0A paths_test = %5Bos.path.join('data', '*.html')%5D%0A%0A return %7B%0A 'astroquery.fermi.tests': paths_test,%0A %7D%0A
|
|
59a05f592ffc4423023f1803efcf427896ab5d41
|
Add lc0695_max_area_of_island.py
|
lc0695_max_area_of_island.py
|
lc0695_max_area_of_island.py
|
Python
| 0.000004
|
@@ -0,0 +1,1147 @@
+%22%22%22Leetcode 695. Max Area of Island%0AMedium%0A%0AURL: https://leetcode.com/problems/max-area-of-island/%0A%0AGiven a non-empty 2D array grid of 0's and 1's, an island is a group of 1's%0A(representing land) connected 4-directionally (horizontal or vertical.)%0AYou may assume all four edges of the grid are surrounded by water.%0A%0AFind the maximum area of an island in the given 2D array.%0A(If there is no island, the maximum area is 0.)%0A%0AExample 1:%0A%5B%5B0,0,1,0,0,0,0,1,0,0,0,0,0%5D,%0A %5B0,0,0,0,0,0,0,1,1,1,0,0,0%5D,%0A %5B0,1,1,0,1,0,0,0,0,0,0,0,0%5D,%0A %5B0,1,0,0,1,1,0,0,1,0,1,0,0%5D,%0A %5B0,1,0,0,1,1,0,0,1,1,1,0,0%5D,%0A %5B0,0,0,0,0,0,0,0,0,0,1,0,0%5D,%0A %5B0,0,0,0,0,0,0,1,1,1,0,0,0%5D,%0A %5B0,0,0,0,0,0,0,1,1,0,0,0,0%5D%5D%0AGiven the above grid, return 6. Note the answer is not 11,%0Abecause the island must be connected 4-directionally.%0A%0AExample 2:%0A%5B%5B0,0,0,0,0,0,0,0%5D%5D%0AGiven the above grid, return 0.%0A%0ANote: The length of each dimension in the given grid does not exceed 50.%0A%22%22%22%0A%0Aclass Solution(object):%0A def maxAreaOfIsland(self, grid):%0A %22%22%22%0A :type grid: List%5BList%5Bint%5D%5D%0A :rtype: int%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
b8b191a380ef4ab0701793c2e0ac664b05c4c505
|
Add simple word2vec to train model
|
analysis/word2vec.py
|
analysis/word2vec.py
|
Python
| 0.000011
|
@@ -0,0 +1,2326 @@
+import numpy as np%0Aimport re%0Afrom nltk.corpus import stopwords%0Aimport nltk%0Aimport logging%0Afrom gensim.models import word2vec%0A%0A%0Adef get_dataset():%0A files = %5B'./analysis/input/negative_tweets.txt', './analysis/input/neutral_tweets.txt', './analysis/input/positive_tweets.txt'%5D%0A%0A x = %5B%5D%0A for file in files:%0A s = %5B%5D%0A with open(file, 'r') as f:%0A for line in f:%0A s.append(line.strip())%0A%0A assert len(s) == 1367%0A x.extend(s)%0A%0A y = np.array(%5B-1%5D * 1367 + %5B0%5D * 1367 + %5B1%5D * 1367)%0A return x, y%0A%0A%0Adef sentence_to_wordlist(sentence, remove_stopwords=False):%0A review_text = re.sub('%5B%5Ea-zA-Z%5D', ' ', sentence)%0A words = review_text.lower().split()%0A if remove_stopwords:%0A stops = set(stopwords.words(%22english%22))%0A words = %5Bw for w in words if not w in stops%5D%0A%0A return words%0A%0A%0Adef tweet_to_sentences(review, tokenizer, remove_stopwords=False):%0A raw_sentences = tokenizer.tokenize(review.strip())%0A sentences = %5B%5D%0A for raw_sentence in raw_sentences:%0A if len(raw_sentence) %3E 0:%0A sentences.append(sentence_to_wordlist(raw_sentence, remove_stopwords))%0A%0A return sentences%0A%0A%0Apunkt_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')%0AX, Y = get_dataset()%0Asentences = %5B%5D%0A%0Aprint('Parsing sentences from training set')%0Afor tweet in X:%0A sentences += tweet_to_sentences(tweet, punkt_tokenizer)%0A%0Alogging.basicConfig(format='%25(asctime)s : %25(levelname)s : %25(message)s', level=logging.INFO)%0Anum_features = 300 # Word vector dimensionality%0Amin_word_count = 10 # Minimum word count%0Anum_workers = 4 # Number of threads to run in parallel%0Acontext = 10 # Context window size%0Adownsampling = 1e-3 # Downsample setting for frequent words%0A%0Aprint('Training model...')%0Amodel = word2vec.Word2Vec(sentences, workers=num_workers, size=num_features, min_count=min_word_count, window=context, sample=downsampling)%0A%0A# If you don't plan to train the model any further, calling%0A# init_sims will make the model much more memory-efficient.%0Amodel.init_sims(replace=True)%0A%0A# It can be helpful to create a meaningful model name and%0A# save the model for later use. You can load it later using Word2Vec.load()%0Amodel_name = '300features_40minwords_10context'%0Amodel.save(model_name)%0A%0Aimport code; code.interact(local=dict(globals(), **locals()))%0A
|
|
07631b96d87013a22008e4a6ad94e751f5e0165b
|
Fix get_setting view
|
ckeditor_filebrowser_filer/views.py
|
ckeditor_filebrowser_filer/views.py
|
# -*- coding: utf-8 -*-
import json
from distutils.version import LooseVersion
from django import http
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponseRedirect
from filer.models import File
from filer.server.views import server
try:
from filer.models import ThumbnailOption
except ImportError:
from cmsplugin_filer_image.models import ThumbnailOption
def filer_version(request):
import filer
filer_legacy = LooseVersion(filer.__version__) < LooseVersion('1.1')
filer_11 = not filer_legacy and LooseVersion(filer.__version__) < LooseVersion('1.2')
filer_12 = not filer_11 and LooseVersion(filer.__version__) < LooseVersion('1.3')
if filer_11:
version = '1.1'
elif filer_12:
version = '1.2'
else:
version = '1.0'
return http.HttpResponse(version)
def get_setting(request, setting):
setting = 'CKEDITOR_FILEBROWSER_{}'.format(setting).upper()
return http.HttpResponse(int(getattr(settings, setting, False)))
def url_reverse(request):
"""
Reverse the requested URL (passed via GET / POST as `url_name` parameter)
:param request: Request object
:return: The reversed path
"""
if request.method in ('GET', 'POST'):
data = getattr(request, request.method)
url_name = data.get('url_name')
try:
path = urlresolvers.reverse(url_name, args=data.getlist('args'))
(view_func, args, kwargs) = urlresolvers.resolve(path)
return http.HttpResponse(path, content_type='text/plain')
except urlresolvers.NoReverseMatch:
return http.HttpResponse('Error', content_type='text/plain')
return http.HttpResponseNotAllowed(('GET', 'POST'))
def _return_thumbnail(image, thumb_options=None, width=None, height=None):
thumbnail_options = {}
if thumb_options is not None:
thumbnail_options = ThumbnailOption.objects.get(pk=thumb_options).as_dict
if width is not None or height is not None:
width = int(width)
height = int(height)
size = (width, height)
thumbnail_options.update({'size': size})
if thumbnail_options != {}:
thumbnailer = image.easy_thumbnails_thumbnailer
image = thumbnailer.get_thumbnail(thumbnail_options)
return image
return None
def url_image(request, image_id, thumb_options=None, width=None, height=None):
"""
Converts a filer image ID in a complete path
:param request: Request object
:param image_id: Filer image ID
:param thumb_options: ThumbnailOption ID
:param width: user-provided width
:param height: user-provided height
:return: JSON serialized URL components ('url', 'width', 'height')
"""
image = File.objects.get(pk=image_id)
if getattr(image, 'canonical_url', None):
url = image.canonical_url
else:
url = image.url
thumb = _return_thumbnail(image, thumb_options, width, height)
if thumb:
image = thumb
url = image.url
data = {
'url': url,
'width': image.width,
'height': image.height,
}
return http.HttpResponse(json.dumps(data), content_type='application/json')
def thumbnail_options(request):
"""
Returns the requested ThumbnailOption as JSON
:param request: Request object
:return: JSON serialized ThumbnailOption
"""
response_data = [{'id': opt.pk, 'name': opt.name} for opt in ThumbnailOption.objects.all()]
return http.HttpResponse(json.dumps(response_data), content_type="application/json")
def serve_image(request, image_id, thumb_options=None, width=None, height=None):
"""
returns the content of an image sized according to the parameters
:param request: Request object
:param image_id: Filer image ID
:param thumb_options: ThumbnailOption ID
:param width: user-provided width
:param height: user-provided height
:return: JSON serialized URL components ('url', 'width', 'height')
"""
image = File.objects.get(pk=image_id)
if getattr(image, 'canonical_url', None):
url = image.canonical_url
else:
url = image.url
thumb = _return_thumbnail(image, thumb_options, width, height)
if thumb:
return server.serve(request, file_obj=thumb, save_as=False)
else:
return HttpResponseRedirect(url)
|
Python
| 0.000001
|
@@ -966,16 +966,29 @@
upper()%0A
+ try:%0A
retu
@@ -1048,16 +1048,107 @@
alse)))%0A
+ except ValueError:%0A return http.HttpResponse(getattr(settings, setting, False))%0A
%0A%0Adef ur
|
a0333aa80dd6a6baeb24e32deeecd0288419328e
|
Initialize P3_seatingCards
|
books/AutomateTheBoringStuffWithPython/Chapter17/PracticeProjects/P3_seatingCards.py
|
books/AutomateTheBoringStuffWithPython/Chapter17/PracticeProjects/P3_seatingCards.py
|
Python
| 0.000002
|
@@ -0,0 +1,669 @@
+# Chapter%C2%A013 included a practice project to create custom invitations from a list of%0A# guests in a plaintext file. As an additional project, use the pillow module to%0A# create images for custom seating cards for your guests. For each of the guests listed%0A# in the guests.txt, generate an image file with the guest name and some flowery%0A# decoration.%0A#%0A# To ensure that each seating card is the same size, add a black rectangle on the edges%0A# of the invitation image so that when the image is printed out, there will be a%0A# guideline for cutting. The PNG files that Pillow produces are set to 72 pixels per%0A# inch, so a 4%C3%975-inch card would require a 288%C3%97360-pixel image.%0A
|
|
1e45df8375c4e72257defc82137fa570fbb44249
|
add StringOperation to repository
|
StringOperation.py
|
StringOperation.py
|
Python
| 0
|
@@ -0,0 +1,1318 @@
+#encoding = utf-8%0A__author__ = 'lg'%0A%0Alist1 = %5B'java','python','ruby','perl','mac'%5D%0Alist2 = %5B'linux','mac','windows','ruby'%5D%0A%0A#%E4%B8%A4%E4%B8%AAlist%E7%9A%84%E4%BA%A4%E9%9B%86(%E6%B3%95%E4%B8%80) %E6%97%B6%E9%97%B4%E5%A4%8D%E6%9D%82%E5%BA%A6%E4%B8%BAO(n%5E2)%0Adef intersect(a,b):%0A listRes = %5B%5D%0A for i in range(len(a)):%0A for j in range(len(b)):%0A if a%5Bi%5D == b%5Bj%5D:%0A if a%5Bi%5D not in listRes:%0A listRes.append(a%5Bi%5D)%0A return listRes%0A%0A#%E4%B8%A4%E4%B8%AAlist%E7%9A%84%E4%BA%A4%E9%9B%86%EF%BC%88%E6%B3%95%E4%BA%8C%EF%BC%89%E3%80%80%E3%80%80%E6%97%B6%E9%97%B4%E5%A4%8D%E6%9D%82%E5%BA%A6%E4%B8%BAO(n)%0Adef intersect_1(a,b):%0A listRes = %5B%5D%0A for i in range(len(a)):%0A if a%5Bi%5D in b:%0A if a%5Bi%5D not in listRes:%0A listRes.append(a%5Bi%5D)%0A return listRes%0A%0A#%E4%B8%A4%E4%B8%AAlist%E7%9A%84%E5%B7%AE%E9%9B%86%0Adef minus(a,b):%0A listRes = %5B%5D%0A for i in range(len(a)):%0A if a%5Bi%5D not in b:%0A listRes.append(a%5Bi%5D)%0A return listRes%0A%0A# %E6%8C%89%E5%AD%97%E6%AF%8D%E8%A1%A8%E8%BE%93%E5%87%BA%E5%AD%97%E7%AC%A6%E4%B8%B2%0Adef alphabet_output(listPram):%0A sortedList = %5B%5D%0A for i in range(len(listPram)):%0A sortedStrRes = ''%0A sortedStrList = sorted(listPram%5Bi%5D)%0A for j in range(len(sortedStrList)):%0A sortedStrRes += sortedStrList%5Bj%5D%0A sortedList.append(sortedStrRes)%0A print(sortedList)%0A%0A# list1 intersect list2%0AintersectList = intersect_1(list1,list2)%0Aalphabet_output(intersectList)%0A%0A# list1 minus list2%0AminusList = minus(list1,list2)%0Aalphabet_output(minusList)%0A%0A# list2 minus list1%0AminusList_1 = minus(list2,list1)%0Aalphabet_output(minusList_1)%0A
|
|
59d55a5911e99a0886b8c3cc48ee92f247e96e0a
|
add Voronoi
|
Voronoi/Voronoi.py
|
Voronoi/Voronoi.py
|
Python
| 0
|
@@ -0,0 +1,867 @@
+import numpy as np%0Aimport matplotlib.pyplot as plt%0Afrom scipy.spatial import Voronoi, voronoi_plot_2d%0A%0Aimport csv%0A%0ACOUNT_LIMIT = None%0ASAMPLE_LIMIT = 100%0A%0APoints = %5B%5D%0Awith open('cell_info.csv', 'r', encoding='utf_8') as obj_file:%0A csv_file = csv.reader(obj_file)%0A for cnt, line in enumerate(csv_file):%0A if COUNT_LIMIT and cnt %3E= COUNT_LIMIT: # %E5%8F%AA%E8%AF%BB%E5%8F%96%E5%89%8D k %E4%B8%AA%E7%82%B9%0A break%0A Points.append(%5Bfloat(line%5B1%5D), float(line%5B2%5D)%5D) # make up data points%0A%0A# %E9%9A%8F%E6%9C%BA%E6%8A%BD%E5%8F%96 n %E4%B8%AA%E7%82%B9%0Aif SAMPLE_LIMIT:%0A points = np.array(%5BPoints%5Bi%5D for i in np.random.choice(len(Points), size = SAMPLE_LIMIT)%5D)%0Aelse:%0A points = np.array(Points)%0A%0A# compute Voronoi tesselation%0Avor = Voronoi(points)%0A%0A# plot%0Avoronoi_plot_2d(vor)%0A%0A# colorize%0Afor region in vor.regions:%0A if not -1 in region:%0A polygon = %5Bvor.vertices%5Bi%5D for i in region%5D%0A plt.fill(*zip(*polygon))%0A%0Aplt.show()
|
|
de0265b609ab56035544018e368a108b573ae503
|
define the index of filters to prune by examining the classification activations
|
tools/prune_with_classification_guidance.py
|
tools/prune_with_classification_guidance.py
|
Python
| 0
|
@@ -0,0 +1,2476 @@
+import os.path%0Aimport numpy as np%0A%0A# define th CLASSES and indices%0ACLASSES = ('__background__',%0A 'aeroplane', 'bicycle', 'bird', 'boat',%0A 'bottle', 'bus', 'car', 'cat', 'chair',%0A 'cow', 'diningtable', 'dog', 'horse',%0A 'motorbike', 'person', 'pottedplant',%0A 'sheep', 'sofa', 'train', 'tvmonitor')%0Aclass_to_ind = dict(list(zip(CLASSES, list(range(len(CLASSES))))))%0ASUB_CLAS = ('bicycle', 'bus', 'car','motorbike', 'person', 'train')%0A%0A# define path for loading the activations_versus_classes array%0Ahm_path = './activations_res/res.npy'%0A%0Adef rankmin(x):%0A u, inv, counts = np.unique(x, return_inverse=True, return_counts=True)%0A csum = np.zeros_like(counts)%0A csum%5B1:%5D = counts%5B:-1%5D.cumsum()%0A return csum%5Binv%5D%0A%0Adef list_normalizer(ori_list):%0A max_val = ori_list.max()%0A min_val = ori_list.min()%0A if max_val == 0:%0A return ori_list%0A normalized_list = %5B(i-min_val)/(max_val-min_val) for i in ori_list%5D%0A return normalized_list%0A%0Adef detect_diff_one_layer(norm_hm_one_layer):%0A interest_average = np.zeros((norm_hm_one_layer.shape%5B1%5D,))%0A diff_ind = np.zeros((norm_hm_one_layer.shape%5B1%5D,))%0A amplifier = 10%0A for clas in SUB_CLAS:%0A ind = class_to_ind%5Bclas%5D%0A interest_average%5B:%5D += norm_hm_one_layer%5Bind%5D%0A interest_average = interest_average/len(SUB_CLAS)%0A for clas in CLASSES:%0A if clas not in SUB_CLAS:%0A ind = class_to_ind%5Bclas%5D%0A temp = amplifier*(norm_hm_one_layer%5Bind%5D-interest_average)%0A # print 'max: %25d,min: %25d'%25(temp.max(),temp.min())%0A temp%5Btemp%3C0.5%5D = 0%0A temp%5Btemp%3E0.5%5D = 1%0A # diff_ind%5Bclas%5D = np.argsort(temp)%0A diff_ind += temp%0A # diff_ind = np.argsort(diff_ind)%0A return diff_ind%0A%0Adef detect_diff_all(hm_path):%0A hm_all = np.load(hm_path).item()%0A norm_hm_all = %7B%7D%0A hm_ind = %7B%7D # dictionary to record the diff_ind for every layer%0A sub_clas_index = %5Bclass_to_ind%5Bi%5D for i in SUB_CLAS%5D%0A for key in hm_all: # for evey layer%0A norm_hm_all%5Bkey%5D = np.zeros(hm_all%5Bkey%5D.shape,np.float32)%0A for i,sub_list in enumerate(hm_all%5Bkey%5D): # for every row in the layer%0A norm_hm_all%5Bkey%5D%5Bi,:%5D = list_normalizer(sub_list)%0A hm_ind%5Bkey%5D = detect_diff_one_layer(norm_hm_all%5Bkey%5D) # %5B21, 64/...%5D%0A return hm_ind%0A%0Aif __name__=='__main__':%0A hm_sorted = detect_diff_all(hm_path)%0A for key in hm_sorted:%0A print key, np.count_nonzero(hm_sorted%5Bkey%5D)%0A
|
|
5068c02e50c54c08a6991e45584c6c9b9bdd5dba
|
add import script for Midlothian
|
polling_stations/apps/data_collection/management/commands/import_midlothian.py
|
polling_stations/apps/data_collection/management/commands/import_midlothian.py
|
Python
| 0
|
@@ -0,0 +1,852 @@
+from data_collection.management.commands import BaseScotlandSpatialHubImporter%0A%0Aclass Command(BaseScotlandSpatialHubImporter):%0A council_id = 'S12000019'%0A council_name = 'Midlothian'%0A elections = %5B'local.midlothian.2017-05-04'%5D%0A%0A def district_record_to_dict(self, record):%0A code = str(record%5B0%5D).strip()%0A%0A %22%22%22%0A MN4H is represented as a polygon which sits on top of MN4G%0A (as opposed to being in an InnerRing inside MN4G).%0A This means any point which is in MN4H is also in MN4G.%0A Fortunately MN4H and MN4G share the same polling%0A station, so in this case we can fix it by just not importing MN4G.%0A If they didn't use the same polling station, this would be an issue.%0A %22%22%22%0A if code == 'MN4H':%0A return None%0A%0A return super().district_record_to_dict(record)%0A
|
|
db846aaa0f35e8888b0b3423539c0a70c9ae16fa
|
Add Source Files
|
source/GoogleSpreadsheets.py
|
source/GoogleSpreadsheets.py
|
Python
| 0.000001
|
@@ -0,0 +1,2208 @@
+%EF%BB%BF# -*- coding: utf-8 -*-%0Aimport sys%0Aimport requests%0Aimport easygui%0A%0A%0A%0Adef enum(*sequential, **named):%0A enums = dict(zip(sequential, range(len(sequential))), **named)%0A return type('Enum', (), enums)%0A%0A%0AMode = enum('PREVIEW', 'EDIT', 'REFRESH')%0Amode = 0%0Asize = 0%0Aparams = ''%0Akey = ''%0Ai = 0%0A%0A%0Afor i in range(len(sys.argv)):%0A if str(sys.argv%5Bi%5D).lower() == %22-mode%22 and (i + 1) %3C len(sys.argv):%0A if str(sys.argv%5Bi + 1%5D).lower() == %22preview%22:%0A mode = Mode.PREVIEW%0A elif str(sys.argv%5Bi + 1%5D).lower() == %22edit%22:%0A mode = Mode.EDIT%0A elif str(sys.argv%5Bi + 1%5D).lower() == %22refresh%22:%0A mode = Mode.REFRESH%0A elif str(sys.argv%5Bi%5D).lower() == %22-size%22:%0A size = int(sys.argv%5Bi + 1%5D)%0A elif str(sys.argv%5Bi%5D).lower() == %22-params%22:%0A params = str(sys.argv%5Bi + 1%5D)%0A paramslist = params.split(';')%0A for i in range(len(paramslist)):%0A if paramslist%5Bi%5D.split('=')%5B0%5D.lower() == 'key':%0A key = paramslist%5Bi%5D.split('=')%5B1%5D%0A i += 1%0A i += 1%0A%0A%0Adef printData(key):%0A if not key == '':%0A proxies = %5B%5D%0A url = ''.join(%5B'https://docs.google.com/spreadsheet/ccc?key=', key, '&output=csv'%5D)%0A csv = requests.get(url, proxies=proxies, verify=False)%0A%0A if csv.headers%5B'Content-Type'%5D == 'text/csv':%0A data = csv.content%0A else:%0A data = %22%22%22Error%0AError In Header%22%22%22%0A%0A else:%0A data = %22%22%22Error%0AError In Key%22%22%22%0A print %22beginDSInfo%22%0A print %22%22%22fileName;#;true%0Acsv_first_row_has_column_names;true;true;%0Acsv_separator;,;true%0Acsv_number_grouping;,;true%0Acsv_number_decimal;.;true%0Acsv_date_format;d.M.yyyy;true%22%22%22%0A print ''.join(%5B'key;', key, ';true'%5D)%0A print %22endDSInfo%22%0A%0A print %22beginData%22%0A print data%0A print %22endData%22%0A%0A%0Aif mode == Mode.PREVIEW:%0A default = ''%0A key = easygui.enterbox(msg=%22Enter GDocs Key%22, title=%22Google Docs Key%22, default=default)%0A key = key or default%0A printData(key=key)%0Aelif mode == Mode.EDIT:%0A default = key%0A key = easygui.enterbox(msg=%22Edit GDocs Key%22, title=%22Google Docs Key%22, default=default)%0A key = key or default%0A printData(key)%0Aelif mode == Mode.REFRESH:%0A printData(key)%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A
|
|
107f86c8c20c4d7cc4c81db464ac20607bb31ba9
|
add DBusTube constants to constants.py
|
tests/twisted/constants.py
|
tests/twisted/constants.py
|
"""
Some handy constants for other tests to share and enjoy.
"""
HT_CONTACT = 1
CHANNEL = "org.freedesktop.Telepathy.Channel"
CHANNEL_IFACE_GROUP = CHANNEL + ".Interface.Group"
CHANNEL_TYPE_TUBES = CHANNEL + ".Type.Tubes"
CHANNEL_IFACE_TUBE = CHANNEL + ".Interface.Tube.DRAFT"
CHANNEL_TYPE_STREAM_TUBE = CHANNEL + ".Type.StreamTube.DRAFT"
CHANNEL_TYPE = CHANNEL + '.ChannelType'
TARGET_HANDLE_TYPE = CHANNEL + '.TargetHandleType'
TARGET_HANDLE = CHANNEL + '.TargetHandle'
TARGET_ID = CHANNEL + '.TargetID'
REQUESTED = CHANNEL + '.Requested'
INITIATOR_HANDLE = CHANNEL + '.InitiatorHandle'
INITIATOR_ID = CHANNEL + '.InitiatorID'
CONN = "org.freedesktop.Telepathy.Connection"
CONN_IFACE_REQUESTS = CONN + '.Interface.Requests'
ERRORS = 'org.freedesktop.Telepathy.Errors'
INVALID_ARGUMENT = ERRORS + '.InvalidArgument'
NOT_IMPLEMENTED = ERRORS + '.NotImplemented'
NOT_AVAILABLE = ERRORS + '.NotAvailable'
TUBE_PARAMETERS = CHANNEL_IFACE_TUBE + '.Parameters'
TUBE_STATUS = CHANNEL_IFACE_TUBE + '.Status'
STREAM_TUBE_SERVICE = CHANNEL_TYPE_STREAM_TUBE + '.Service'
TUBE_CHANNEL_STATE_LOCAL_PENDING = 0
TUBE_CHANNEL_STATE_REMOTE_PENDING = 1
TUBE_CHANNEL_STATE_OPEN = 2
TUBE_CHANNEL_STATE_NOT_OFFERED = 3
|
Python
| 0
|
@@ -333,16 +333,74 @@
e.DRAFT%22
+%0ACHANNEL_TYPE_DBUS_TUBE = CHANNEL + %22.Type.DBusTube.DRAFT%22
%0A%0ACHANNE
@@ -1117,16 +1117,81 @@
Service'
+%0ADBUS_TUBE_SERVICE_NAME = CHANNEL_TYPE_DBUS_TUBE + '.ServiceName'
%0A%0ATUBE_C
|
b9b246e1feb728a257b343d4a07fc42ba10bac13
|
Add a wsgi app to our test tg2 app
|
moksha/tests/quickstarts/tg2app/tg2app/wsgi.py
|
moksha/tests/quickstarts/tg2app/tg2app/wsgi.py
|
Python
| 0
|
@@ -0,0 +1,163 @@
+import os%0Afrom paste.deploy import loadapp%0Acfg_path = os.path.join(os.path.dirname(__file__), '..', 'development.ini')%0Aapplication = loadapp('config:' + cfg_path)%0A
|
|
550ce185895a7b32f6bdb0750338ea6d2416ee2a
|
Add merged migration
|
organization/projects/migrations/0006_merge.py
|
organization/projects/migrations/0006_merge.py
|
Python
| 0.000001
|
@@ -0,0 +1,365 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.7 on 2016-09-07 14:02%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('organization-projects', '0005_auto_20160907_1046'),%0A ('organization-projects', '0005_auto_20160907_1138'),%0A %5D%0A%0A operations = %5B%0A %5D%0A
|
|
f6148d7a4e2d080da93d21de2f13b601465c7528
|
Add tf.contrib.checkpoint.CheckpointableBase for isinstance checks.
|
tensorflow/contrib/checkpoint/__init__.py
|
tensorflow/contrib/checkpoint/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for working with object-based checkpoints.
Visualization and inspection:
@@dot_graph_from_checkpoint
@@list_objects
@@object_metadata
Managing dependencies:
@@capture_dependencies
@@Checkpointable
@@CheckpointableObjectGraph
@@NoDependency
@@split_dependency
Checkpointable data structures:
@@List
@@Mapping
@@UniqueNameTracker
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.checkpoint.python.containers import UniqueNameTracker
from tensorflow.contrib.checkpoint.python.split_dependency import split_dependency
from tensorflow.contrib.checkpoint.python.visualize import dot_graph_from_checkpoint
from tensorflow.core.protobuf.checkpointable_object_graph_pb2 import CheckpointableObjectGraph
from tensorflow.python.training.checkpointable.base import Checkpointable
from tensorflow.python.training.checkpointable.base import NoDependency
from tensorflow.python.training.checkpointable.data_structures import List
from tensorflow.python.training.checkpointable.data_structures import Mapping
from tensorflow.python.training.checkpointable.util import capture_dependencies
from tensorflow.python.training.checkpointable.util import list_objects
from tensorflow.python.training.checkpointable.util import object_metadata
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(module_name=__name__)
|
Python
| 0
|
@@ -890,16 +890,37 @@
intable%0A
+@@CheckpointableBase%0A
@@Checkp
@@ -1573,16 +1573,94 @@
intable%0A
+from tensorflow.python.training.checkpointable.base import CheckpointableBase%0A
from ten
@@ -2211,9 +2211,8 @@
name__)%0A
-%0A
|
7ef03c975566b92fd97b7071b39cf3d8c242e480
|
Create brick.py
|
brick.py
|
brick.py
|
Python
| 0.000004
|
@@ -0,0 +1,2385 @@
+# Class: Brick%0A# Represents a single brick as displayed on screen.%0A# Used as a target for the Ball to break%0A%0A# Requires pygame%0Aimport pygame%0A%0Aclass Brick(pygame.sprite.Sprite):%0A __borderWidth = 2%0A __hitsRemaining = 1%0A __position = %7B%22x%22: 0, %22y%22: 0%7D%0A __size = 25%0A __whRatio = %7B%22width%22: 2, %22height%22: 1%7D%0A __width = __size * __whRatio%5B%22width%22%5D%0A __height = __size * __whRatio%5B%22height%22%5D%0A __outerRect = pygame.Rect(__position%5B%22x%22%5D, __position%5B%22y%22%5D, __width, __height)%0A __innerRect = pygame.Rect(__position%5B%22x%22%5D + __borderWidth, __position%5B%22y%22%5D + __borderWidth, __width - (__borderWidth * 2), __height - (__borderWidth * 2))%0A __isInPlay = True%0A%0A def __init__(self, position, fill = None, border = None):%0A from Game import Engine%0A%0A super().__init__()%0A%0A if fill != None:%0A self.__fill = fill%0A else:%0A self.__fill = Engine.Colors.LAVENDER%0A%0A if border != None:%0A self.__border = border%0A else:%0A self.__border = Engine.Colors.BLACK%0A%0A self.__position%5B%22x%22%5D = position%5B0%5D%0A self.__position%5B%22y%22%5D = position%5B1%5D%0A self.__outerRect = pygame.Rect(self.__position%5B%22x%22%5D, self.__position%5B%22y%22%5D, self.__width, self.__height)%0A self.__innerRect = pygame.Rect(self.__position%5B%22x%22%5D + self.__borderWidth, self.__position%5B%22y%22%5D + self.__borderWidth, self.__width - (self.__borderWidth * 2), self.__height - (self.__borderWidth * 2))%0A self.__hitsRemaining = 1%0A%0A self.rect = self.__outerRect%0A self.image = pygame.Surface(%5Bself.__width, self.__height%5D)%0A self.image.fill(self.__border)%0A%0A self.brick = pygame.Surface((self.__innerRect.width, self.__innerRect.height))%0A self.brick.fill(self.__fill)%0A%0A self.image.blit(self.brick, self.__innerRect)%0A%0A def update(self):%0A pass%0A%0A def getWidth(self):%0A return self.__width%0A%0A def getHeight(self):%0A return self.__height%0A%0A def __removeFromPlay(self):%0A self.__isInPlay = False #set flag%0A%0A def __animate(self):%0A if self.__hitsRemaining %3C= 0:%0A self.__removeFromPlay() #no hits remaining, get rid of this one%0A%0A def collide(self):%0A self.__hitsRemaining -= 1 #decrement hits%0A self.__animate() #and animate the hit%0A%0A def stack(self, stage):%0A if self.__isInPlay:%0A stage.blit(self, self.rect) #draw border%0A
|
|
c1d3a8d15d3e50a14ff765e7abd063cc1b390063
|
add new test case TestAssociator
|
tests/unit/EventReader/test_Associator.py
|
tests/unit/EventReader/test_Associator.py
|
Python
| 0.000001
|
@@ -0,0 +1,1239 @@
+from AlphaTwirl.EventReader import Associator%0Aimport unittest%0A%0A##____________________________________________________________________________%7C%7C%0Aclass MockReader(object):%0A def __init__(self):%0A self.content = %5B %5D%0A%0A##____________________________________________________________________________%7C%7C%0Aclass MockCollector(object):%0A def __init__(self):%0A self.readers = %5B %5D%0A%0A def addReader(self, datasetName, reader):%0A self.readers.append((datasetName, reader))%0A%0A##____________________________________________________________________________%7C%7C%0Aclass TestAssociator(unittest.TestCase):%0A%0A def test_make(self):%0A reader = MockReader()%0A collector = MockCollector()%0A associator = Associator(reader, collector)%0A%0A reader1 = associator.make(%22data1%22)%0A%0A self.assertIsNot(reader, reader1)%0A self.assertIsNot(reader.content, reader1.content)%0A self.assertIsInstance(reader1, MockReader)%0A self.assertEqual(%5B(%22data1%22, reader1)%5D, collector.readers)%0A%0A def test_NullCollector(self):%0A reader = MockReader()%0A associator = Associator(reader)%0A reader1 = associator.make(%22data1%22)%0A%0A##____________________________________________________________________________%7C%7C%0A
|
|
6a4152e805be0ba061529841fb84442d8a23ff9f
|
add label transform cpn
|
python/federatedml/components/label_transform.py
|
python/federatedml/components/label_transform.py
|
Python
| 0
|
@@ -0,0 +1,1081 @@
+#%0A# Copyright 2019 The FATE Authors. All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A#%0A%0Afrom .components import ComponentMeta%0A%0Alabel_transform_cpn_meta = ComponentMeta(%22LabelTransform%22)%0A%0A%0A@label_transform_cpn_meta.bind_param%0Adef label_transform_param():%0A from federatedml.param.label_transform_param import LabelTransformParam%0A%0A return LabelTransformParam%0A%0A%0A@label_transform_cpn_meta.bind_runner.on_guest.on_host%0Adef label_transform_client_runner():%0A from federatedml.util.label_transform import LabelTransformer%0A%0A return LabelTransformer%0A
|
|
99ab22cf5fcba719dd7d9d87c18c8d93de5591a4
|
Add IO Class
|
whitepy/ws_io.py
|
whitepy/ws_io.py
|
Python
| 0
|
@@ -0,0 +1,625 @@
+import readchar%0Aimport sys%0A%0A%0Aclass IO(object):%0A def __init__(self, stack):%0A self.stack = stack%0A%0A def i_chr(self, heap):%0A self.stack.push(readchar.readchar())%0A heap.set()%0A%0A def i_int(self, heap):%0A num = None%0A while type(num) is not int:%0A try:%0A num = int(readchar.readchar())%0A except ValueError:%0A pass%0A self.stack.push(num)%0A heap.set()%0A%0A def o_chr(self):%0A sys.stdout.buffer.write(self.stack.pop().encode('utf-8'))%0A%0A def o_int(self):%0A sys.stdout.buffer.write(str(self.stack.pop()).encode('utf-8'))%0A
|
|
6279341682ae45a228302972dbd106a2e44e0b12
|
Add example usage of the JsonTestResponse.
|
examples/example_test.py
|
examples/example_test.py
|
Python
| 0
|
@@ -0,0 +1,921 @@
+import unittest%0Afrom flask import Flask%0Afrom flask_json import json_response, FlaskJSON, JsonTestResponse%0A%0A%0Adef our_app():%0A app = Flask(__name__)%0A app.test_value = 0%0A FlaskJSON(app)%0A%0A @app.route('/increment')%0A def increment():%0A app.test_value += 1%0A return json_response(value=app.test_value)%0A%0A return app%0A%0A%0Aclass OurAppTestCase(unittest.TestCase):%0A def setUp(self):%0A self.app = our_app()%0A self.app.config%5B'TESTING'%5D = True%0A%0A # We have to change response class manually since TESTING flag is%0A # set after Flask-JSON initialization.%0A self.app.response_class = JsonTestResponse%0A self.client = self.app.test_client()%0A%0A def test_app(self):%0A r = self.client.get('/increment')%0A%0A # Here is how we can access to JSON.%0A assert 'value' in r.json%0A assert r.json%5B'value'%5D == 1%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
1fd997bc11b62cb760470fb749c2a4f0261b3e00
|
Add db2es.py to sync data
|
db2es.py
|
db2es.py
|
Python
| 0.000001
|
@@ -0,0 +1,2573 @@
+#!/usr/bin/env python%0A# -*- coding: UTF-8 -*-%0A#%0A%0Afrom __future__ import unicode_literals, absolute_import%0A%0Aimport time%0A%0Afrom elasticsearch.helpers import scan%0Afrom elasticsearch.exceptions import NotFoundError%0A%0Afrom oclubs.app import app%0Afrom oclubs.access import database, elasticsearch, done%0Afrom oclubs.objs import Activity, Club%0A%0Aclses = %5BClub, Activity%5D%0A%0Awith app.app_context():%0A for cls in clses:%0A db_ids = database.fetch_onecol(%0A cls.table,%0A cls.identifier,%0A %7B%7D%0A )%0A db_ids = set(int(x) for x in db_ids)%0A db_max = max(db_ids)%0A%0A try:%0A es_ids = scan(%0A elasticsearch.es,%0A index='oclubs',%0A doc_type=cls.table,%0A size=10000000,%0A query=%7B%0A 'query': %7B'match_all': %7B%7D%7D,%0A 'size': 10000,%0A 'fields': %5B'_id'%5D%0A %7D)%0A es_ids = (d%5B'_id'%5D for d in es_ids)%0A except NotFoundError:%0A es_ids = %5B%5D%0A%0A es_ids = set(int(x) for x in es_ids)%0A%0A if es_ids:%0A es_max = max(es_ids)%0A else:%0A es_max = 0%0A%0A max_id = max(db_max, es_max)%0A%0A cls_searchprops = %5B%0A prop.name for prop in %5B%0A getattr(cls, propname) for propname in dir(cls)%0A %5D if hasattr(prop, 'search') and prop.search%0A %5D%0A%0A for i in xrange(1, max_id + 1):%0A time.sleep(0.01)%0A%0A if i in db_ids:%0A obj = cls(i)%0A%0A db_data = %7B%7D%0A for propname in cls_searchprops:%0A db_data%5Bpropname%5D = (%0A getattr(cls, propname).search(getattr(obj, propname)))%0A%0A if i in es_ids:%0A es_data = elasticsearch.get(cls.table, i)%0A if db_data == es_data:%0A print 'TYPE %25s ID %25d MATCH' %25 (cls.table, i)%0A else:%0A print 'UPDATED ES TYPE %25s ID %25d' %25 (cls.table, i)%0A elasticsearch.update(cls.table, i, db_data)%0A else:%0A print 'CREATED ES TYPE %25s ID %25d' %25 (cls.table, i)%0A elasticsearch.create(cls.table, i, db_data)%0A else:%0A if i in es_ids:%0A print 'DELETED ES TYPE %25s ID %25d' %25 (cls.table, i)%0A elasticsearch.delete(cls.table, i)%0A else:%0A print 'TYPE %25s ID %25d DOES NOT EXIST' %25 (cls.table, i)%0A pass%0A%0A done()%0A
|
|
43e019ff26e04a6464cad3a10045ba600e98610e
|
Add __init__.py for monitorlib module.
|
monitorlib/__init__.py
|
monitorlib/__init__.py
|
Python
| 0
|
@@ -0,0 +1,161 @@
+### -*- coding: utf-8 -*-%0A###%0A### %C2%A9 2012 Krux Digital, Inc.%0A### Author: Paul Lathrop %3Cpaul@krux.com%3E%0A###%0A%0A%22%22%22Library for creating monitoring scripts/plugins.%22%22%22%0A
|
|
2427dbad4fc0cfe7685dc2767069748d37262796
|
Add initial version of identification algorithm
|
movienamer/identify.py
|
movienamer/identify.py
|
Python
| 0.000001
|
@@ -0,0 +1,2701 @@
+import os.path as path%0Aimport re%0A%0Aimport Levenshtein%0A%0Afrom .sanitize import sanitize%0Afrom .tmdb import search%0A%0A%0Adef _gather(filename, directory=None, titles=%7B%7D):%0A # Sanitize the input filename%0A name, year = sanitize(filename)%0A%0A # Start with a basic search%0A results = search(name, year)%0A%0A if year is not None and len(results) == 0:%0A # If no results are found when year is present,%0A # allow a tolerance of 1 in the year%0A results = search(name, year + 1)%0A results = results + search(name, year - 1)%0A%0A # Try to find a result with zero error and return%0A zero_distance_results = %5B%5D%0A for i, result in enumerate(results):%0A distance = Levenshtein.distance(%0A unicode(re.sub('%5B%5Ea-zA-Z0-9%5D', '', name.lower())),%0A unicode(re.sub('%5B%5Ea-zA-Z0-9%5D', '', result%5B'title'%5D.lower()))%0A )%0A%0A # Update the results with the distance%0A result%5B'distance'%5D = distance%0A results%5Bi%5D%5B'distance'%5D = distance%0A%0A # Update the results with year%0A result%5B'with_year'%5D = (year is not None)%0A results%5Bi%5D%5B'with_year'%5D = (year is not None)%0A%0A # Add count field to the result%0A result%5B'count'%5D = 1%0A results%5Bi%5D%5B'count'%5D = 1%0A%0A if distance == 0:%0A zero_distance_results.append(result)%0A%0A if len(zero_distance_results) %3E 0:%0A # Directly return results with zero error%0A return zero_distance_results%0A%0A if year is not None and len(results) %3E 0:%0A # Directly return results which were queried with year%0A return results%0A%0A # If neither zero distance results are present nor is the year,%0A # accumulate results from directory one level up%0A if directory is not None:%0A dirname = directory.split('/')%5B-1%5D%0A results_from_directory = identify(dirname)%0A%0A results_to_be_removed = %5B%5D%0A%0A # Increment count for all duplicate results%0A for i, r1 in enumerate(results):%0A for r2 in results_from_directory:%0A if r1%5B'popularity'%5D == r2%5B'popularity'%5D:%0A # Check with popularity since title can be duplicate%0A results%5Bi%5D%5B'count'%5D += r2%5B'count'%5D%0A results_from_directory.remove(r2)%0A break%0A%0A results = results + results_from_directory%0A%0A return results%0A%0A%0Adef identify(filename, directory=None):%0A results = _gather(filename, directory)%0A max_distance = 1 + max(%5Bresult%5B'distance'%5D for result in results%5D)%0A return sorted(%0A results,%0A lambda r: (r%5B'count'%5D ** 1.1) %5C%0A * ((max_distance - r%5B'distance'%5D)) %5C%0A * ((1 + r%5B'with_year'%5D)) %5C%0A * ((r%5B'popularity'%5D)),%0A reverse=True%0A )%0A
|
|
2ac52ea39a7a8db6cab756e3af2f65b228bb1c09
|
Add registration test
|
test/requests/test-registration.py
|
test/requests/test-registration.py
|
Python
| 0
|
@@ -0,0 +1,1926 @@
+import sys%0Aimport unittest%0Aimport requests%0Aimport logging%0Afrom elasticsearch import Elasticsearch, TransportError%0A#from utility.tools import ELASTICSEARCH_HOST, ELASTICSEARCH_PORT%0A%0AGN2_SERVER = None%0AES_SERVER = None%0A%0Aclass TestRegistration(unittest.TestCase):%0A %0A%0A def setUp(self):%0A self.url = GN2_SERVER+%22/n/register%22%0A self.es = Elasticsearch(%5BES_SERVER%5D)%0A self.es_cleanup = %5B%5D%0A%0A es_logger = logging.getLogger(%22elasticsearch%22)%0A es_logger.addHandler(%0A logging.FileHandler(%22/tmp/es_TestRegistrationInfo.log%22))%0A es_trace_logger = logging.getLogger(%22elasticsearch.trace%22)%0A es_trace_logger.addHandler(%0A logging.FileHandler(%22/tmp/es_TestRegistrationTrace.log%22))%0A%0A def tearDown(self):%0A for item in self.es_cleanup:%0A self.es.delete(index=%22users%22, doc_type=%22local%22, id=item%5B%22_id%22%5D)%0A%0A def testRegistrationPage(self):%0A if self.es.ping():%0A data = %7B%0A %22email_address%22: %22test@user.com%22,%0A %22full_name%22: %22Test User%22,%0A %22organization%22: %22Test Organisation%22,%0A %22password%22: %22test_password%22,%0A %22password_confirm%22: %22test_password%22%0A %7D%0A requests.post(self.url, data)%0A response = self.es.search(%0A index=%22users%22%0A , doc_type=%22local%22%0A , body=%7B%0A %22query%22: %7B%22match%22: %7B%22email_address%22: %22test@user.com%22%7D%7D%7D)%0A self.assertEqual(len(response%5B%22hits%22%5D%5B%22hits%22%5D), 1)%0A self.es_cleanup.append(response%5B%22hits%22%5D%5B%22hits%22%5D%5B0%5D)%0A else:%0A self.skipTest(%22The elasticsearch server is down%22)%0A%0Adef main():%0A suite = unittest.TestSuite()%0A suite.addTest(TestRegistration(%22testRegistrationPage%22))%0A runner = unittest.TextTestRunner()%0A runner.run(suite)%0A%0Aif __name__ == %22__main__%22:%0A GN2_SERVER = sys.argv%5B1%5D%0A ES_SERVER = sys.argv%5B2%5D%0A main()%0A
|
|
ee3e0d444dd706858a3a30cf52ebc2a960bcfb56
|
add a just for funsies pygame renderer
|
renderer-pygame.py
|
renderer-pygame.py
|
Python
| 0
|
@@ -0,0 +1,1755 @@
+import pygame%0A%0Aclass Palette():%0A def __init__(self, ppu):%0A self.ppu = ppu%0A self.colors = %5B(0x7C,0x7C,0x7C),(00,00,0xFC),(00,00,0xBC),(44,28,0xBC),(94,00,84),(0xA8,00,20),(0xA8,10,00),(88,14,00),(50,30,00),(00,78,00),(00,68,00),(00,58,00),(00,40,58),(00,00,00),(00,00,00),(00,00,00),(0xBC,0xBC,0xBC),(00,78,0xF8),(00,58,0xF8),(68,44,0xFC),(0xD8,00,0xCC),(0xE4,00,58),(0xF8,38,00),(0xE4,0x5C,10),(0xAC,0x7C,00),(00,0xB8,00),(00,0xA8,00),(00,0xA8,44),(00,88,88),(00,00,00),(00,00,00),(00,00,00),(0xF8,0xF8,0xF8),(0x3C,0xBC,0xFC),(68,88,0xFC),(98,78,0xF8),(0xF8,78,0xF8),(0xF8,58,98),(0xF8,78,58),(0xFC,0xA0,44),(0xF8,0xB8,00),(0xB8,0xF8,18),(58,0xD8,54),(58,0xF8,98),(00,0xE8,0xD8),(78,78,78),(00,00,00),(00,00,00),(0xFC,0xFC,0xFC),(0xA4,0xE4,0xFC),(0xB8,0xB8,0xF8),(0xD8,0xB8,0xF8),(0xF8,0xB8,0xF8),(0xF8,0xA4,0xC0),(0xF0,0xD0,0xB0),(0xFC,0xE0,0xA8),(0xF8,0xD8,78),(0xD8,0xF8,78),(0xB8,0xF8,0xB8),(0xB8,0xF8,0xD8),(00,0xFC,0xFC),(0xF8,0xD8,0xF8),(00,00,00),(00,00,00)%5D%0A%0A%0A def GetColor(self, colorNum):%0A if colorNum == 0x40:%0A return self.colors%5Bself.ppu.lastBGWrite%5D%0A return self.colors%5BcolorNum%5D%0A%0Aclass Renderer():%0A def __init__(self, ppu, scale=1):%0A self.palette = Palette(ppu)%0A pygame.init()%0A self.scale = scale%0A self.window = pygame.display.set_mode(%5B256*scale,240*scale%5D)%0A pygame.display.set_caption('refNes')%0A%0A%0A # draw a pixelly thing%0A def Update(self, screen, y):%0A for x in range(0, 256):%0A screenIndex = (y * 256) + x%0A rgb = self.palette.GetColor(screen%5BscreenIndex%5D)%0A color = (rgb%5B0%5D, rgb%5B1%5D, rgb%5B2%5D)%0A area = (x*self.scale, y*self.scale, self.scale, self.scale)%0A self.window.fill(color, area)%0A pygame.display.flip()%0A
|
|
75f666ad189c5a799582ce567f0df8b7848066d5
|
replace spy solved
|
Lesson3/replace_spy.py
|
Lesson3/replace_spy.py
|
Python
| 0.002478
|
@@ -0,0 +1,483 @@
+# Define a procedure, replace_spy,%0A# that takes as its input a list of%0A# three numbers, and modifies the%0A# value of the third element in the%0A# input list to be one more than its%0A# previous value.%0A%0Aspy = %5B0,0,7%5D%0A%0Adef replace_spy(spy):%0A spy%5B2%5D = spy%5B2%5D + 1%0A return spy%0A%0A%0A%0A# In the test below, the first line calls your %0A# procedure which will change spy, and the %0A# second checks you have changed it.%0A# Uncomment the top two lines below.%0A%0Areplace_spy(spy)%0Aprint spy%0A#%3E%3E%3E %5B0,0,8%5D%0A
|
|
375a8f451538d0b426c8a8ddad8c37b1be1e8ceb
|
Use unicode for AdvanceSelect widget
|
oscar/forms/widgets.py
|
oscar/forms/widgets.py
|
import re
from django import forms
from django.forms.util import flatatt
from django.forms.widgets import FileInput
from django.template import Context
from django.template.loader import render_to_string
from django.utils.encoding import force_text, force_unicode
from django.utils.safestring import mark_safe
try:
from django.utils.html import format_html
except ImportError:
# Django 1.4 compatibility
from oscar.core.compat import format_html
class ImageInput(FileInput):
"""
Widget providing a input element for file uploads based on the
Django ``FileInput`` element. It hides the actual browser-specific
input element and shows the available image for images that have
been previously uploaded. Selecting the image will open the file
dialog and allow for selecting a new or replacing image file.
"""
template_name = 'partials/image_input_widget.html'
attrs = {'accept': 'image/*'}
def render(self, name, value, attrs=None):
"""
Render the ``input`` field based on the defined ``template_name``. The
image URL is take from *value* and is provided to the template as
``image_url`` context variable relative to ``MEDIA_URL``. Further
attributes for the ``input`` element are provide in ``input_attrs`` and
contain parameters specified in *attrs* and *name*.
If *value* contains no valid image URL an empty string will be provided
in the context.
"""
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(self._format_value(value))
image_url = final_attrs.get('value', '')
return render_to_string(self.template_name, Context({
'input_attrs': flatatt(final_attrs),
'image_url': image_url,
'image_id': "%s-image" % final_attrs['id'],
}))
class WYSIWYGTextArea(forms.Textarea):
def __init__(self, *args, **kwargs):
kwargs.setdefault('attrs', {})
kwargs['attrs'].setdefault('class', '')
kwargs['attrs']['class'] += ' wysiwyg'
super(WYSIWYGTextArea, self).__init__(*args, **kwargs)
def datetime_format_to_js_date_format(format):
"""
Convert a Python datetime format to a date format suitable for use with JS
date pickers
"""
converted = format
replacements = {
'%Y': 'yy',
'%m': 'mm',
'%d': 'dd',
'%H:%M': '',
}
for search, replace in replacements.iteritems():
converted = converted.replace(search, replace)
return converted.strip()
def datetime_format_to_js_time_format(format):
"""
Convert a Python datetime format to a time format suitable for use with JS
date pickers
"""
converted = format
replacements = {
'%Y': '',
'%m': '',
'%d': '',
'%H': 'HH',
'%M': 'mm',
}
for search, replace in replacements.iteritems():
converted = converted.replace(search, replace)
converted = re.sub('[-/][^%]', '', converted)
return converted.strip()
def add_js_formats(widget):
"""
Set data attributes for date and time format on a widget
"""
attrs = {
'data-dateFormat': datetime_format_to_js_date_format(
widget.format),
'data-timeFormat': datetime_format_to_js_time_format(
widget.format)
}
widget.attrs.update(attrs)
class DatePickerInput(forms.DateInput):
"""
DatePicker input that uses the jQuery UI datepicker. Data attributes are
used to pass the date format to the JS
"""
def __init__(self, *args, **kwargs):
super(DatePickerInput, self).__init__(*args, **kwargs)
add_js_formats(self)
class DateTimePickerInput(forms.DateTimeInput):
# Build a widget which uses the locale datetime format but without seconds.
# We also use data attributes to pass these formats to the JS datepicker.
def __init__(self, *args, **kwargs):
include_seconds = kwargs.pop('include_seconds', False)
super(DateTimePickerInput, self).__init__(*args, **kwargs)
if not include_seconds:
self.format = re.sub(':?%S', '', self.format)
add_js_formats(self)
class AdvancedSelect(forms.Select):
"""
Customised Select widget that allows a list of disabled values to be passed
to the constructor. Django's default Select widget doesn't allow this so
we have to override the render_option method and add a section that checks
for whether the widget is disabled.
"""
def __init__(self, attrs=None, choices=(), disabled_values=()):
self.disabled_values = set(force_text(v) for v in disabled_values)
super(AdvancedSelect, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
option_value = force_text(option_value)
if option_value in self.disabled_values:
selected_html = mark_safe(' disabled="disabled"')
elif option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option value="{0}"{1}>{2}</option>',
option_value,
selected_html,
force_text(option_label))
|
Python
| 0.000001
|
@@ -5499,16 +5499,17 @@
at_html(
+u
'%3Coption
|
83e136a0e0d93d1dde4966322a3b51f453d0a1ba
|
Add simple CSV exporter to examples.
|
tcflib/examples/csv_exporter.py
|
tcflib/examples/csv_exporter.py
|
Python
| 0
|
@@ -0,0 +1,1675 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0Aimport csv%0Afrom io import StringIO%0Afrom collections import OrderedDict%0A%0Afrom tcflib.service import ExportingWorker, run_as_cli%0A%0A%0Aclass CSVExporter(ExportingWorker):%0A%0A def export(self):%0A%0A columns = OrderedDict()%0A columns%5B'tokenID'%5D = %5Btoken.id for token in self.corpus.tokens%5D%0A columns%5B'token'%5D = %5Btoken.text for token in self.corpus.tokens%5D%0A if hasattr(self.corpus, 'postags'):%0A columns%5B'POStag'%5D = %5Btoken.tag for token in self.corpus.tokens%5D%0A if hasattr(self.corpus, 'lemmas'):%0A columns%5B'lemma'%5D = %5Btoken.lemma for token in self.corpus.tokens%5D%0A if hasattr(self.corpus, 'wsd'):%0A columns%5B'wordsenses'%5D = %5B', '.join(token.wordsenses)%0A for token in self.corpus.tokens%5D%0A if hasattr(self.corpus, 'namedentities'):%0A entities = %5B%5D%0A for token in self.corpus.tokens:%0A if not token.entity:%0A entities.append('')%0A elif token == token.entity.tokens%5B0%5D:%0A entities.append('B-%7B%7D'.format(token.entity.class_))%0A else:%0A entities.append('I-%7B%7D'.format(token.entity.class_))%0A columns%5B'NamedEntity'%5D = entities%0A # Write to CSV%0A with StringIO(newline='') as csvfile:%0A writer = csv.writer(csvfile)%0A writer.writerow(list(columns.keys()))%0A for row in zip(*columns.values()):%0A writer.writerow(row)%0A outstring = csvfile.getvalue()%0A return outstring.encode('utf-8')%0A%0Aif __name__ == '__main__':%0A run_as_cli(CSVExporter)
|
|
39809b37acc541f81dddbd69655d0c160e620f75
|
Update __init__.py
|
tendrl/commons/jobs/__init__.py
|
tendrl/commons/jobs/__init__.py
|
import json
import traceback
import etcd
import gevent.event
from tendrl.commons.event import Event
from tendrl.commons.flows.exceptions import FlowExecutionFailedError
from tendrl.commons.message import Message, ExceptionMessage
from tendrl.commons.objects.job import Job
from tendrl.commons.utils import etcd_util
class JobConsumerThread(gevent.greenlet.Greenlet):
EXCEPTION_BACKOFF = 5
def __init__(self):
super(JobConsumerThread, self).__init__()
self._complete = gevent.event.Event()
def _run(self):
Event(
Message(
priority="info",
publisher=NS.publisher_id,
payload={"message": "%s running" % self.__class__.__name__}
)
)
while not self._complete.is_set():
try:
gevent.sleep(2)
try:
jobs = NS.etcd_orm.client.read("/queue")
except etcd.EtcdKeyNotFound:
continue
for job in jobs.leaves:
try:
raw_job = {"job_id": job.key.split('/')[-1],
"status": None,
"payload": None,
"errors": None
}
result = etcd_util.read(job.key)
for item in result:
if item in raw_job:
raw_job[item] = result[item]
raw_job["payload"] = json.loads(
raw_job["payload"].decode('utf-8'))
except etcd.EtcdKeyNotFound:
continue
if raw_job['payload']["type"] == NS.type and \
raw_job['status'] == "new":
# Job routing
if raw_job.get("payload", {}).get("tags", []):
NS.node_context = NS.node_context.load()
tags = json.loads(NS.node_context.tags)
if set(tags).isdisjoint(
raw_job['payload']['tags']):
continue
if raw_job.get("payload", {}).get("node_ids", []):
if NS.node_context.node_id not in \
raw_job['payload']['node_ids']:
continue
raw_job['status'] = "processing"
Event(
Message(
priority="info",
publisher=NS.publisher_id,
payload={"message": "Processing Job %s" %
raw_job['job_id']
}
)
)
Job(job_id=raw_job['job_id'],
status=raw_job['status'],
payload=json.dumps(raw_job['payload']),
errors=raw_job['errors']).save()
if "integration_id" in raw_job['payload']:
raw_job['payload']['parameters']['integration_id'] = \
raw_job[
'payload']['integration_id']
raw_job['payload']['parameters']['node_ids'] = raw_job[
'payload']['node_ids']
current_ns, flow_name, obj_name = \
self._extract_fqdn(raw_job['payload']['run'])
if obj_name:
runnable_flow = current_ns.ns.get_obj_flow(
obj_name, flow_name)
else:
runnable_flow = current_ns.ns.get_flow(flow_name)
try:
runnable_flow(parameters=raw_job['payload'][
'parameters'], job_id=raw_job['job_id']).run()
raw_job['status'] = "finished"
# TODO(team) replace below raw write with a
# "EtcdJobQueue" class
Job(job_id=raw_job['job_id'],
status=raw_job['status'],
payload=json.dumps(raw_job['payload']),
errors=raw_job['errors']).save()
except FlowExecutionFailedError as e:
Event(
ExceptionMessage(
priority="error",
publisher=NS.publisher_id,
payload={"message": "error",
"exception": e
}
)
)
raw_job['status'] = "failed"
raw_job['errors'] = str(e)
Job(job_id=raw_job['job_id'],
status=raw_job['status'],
payload=json.dumps(raw_job['payload']),
errors=raw_job['errors']).save()
break
except Exception as ex:
Event(
ExceptionMessage(
priority="error",
publisher=NS.publisher_id,
payload={"message": "error traceback",
"exception": ex
}
)
)
self._complete.wait(self.EXCEPTION_BACKOFF)
def stop(self):
self._complete.set()
def _extract_fqdn(self, flow_fqdn):
ns, flow_name = flow_fqdn.split(".flows.")
obj_name = None
# check if the flow is bound to any object
try:
ns, obj_name = ns.split(".objects.")
except ValueError:
pass
ns_str = ns.split(".")[-1]
if "integrations" in ns:
return getattr(NS.integrations, ns_str), flow_name, obj_name
else:
return getattr(NS, ns_str), flow_name, obj_name
|
Python
| 0.000072
|
@@ -3212,391 +3212,8 @@
ve()
-%0A if %22integration_id%22 in raw_job%5B'payload'%5D:%0A raw_job%5B'payload'%5D%5B'parameters'%5D%5B'integration_id'%5D = %5C%0A raw_job%5B%0A 'payload'%5D%5B'integration_id'%5D%0A raw_job%5B'payload'%5D%5B'parameters'%5D%5B'node_ids'%5D = raw_job%5B%0A 'payload'%5D%5B'node_ids'%5D
%0A%0A
|
a509828f5d5040b1b005fe602ad0e53675b8cb52
|
add to test
|
test/solr_doc_manager_tester.py
|
test/solr_doc_manager_tester.py
|
Python
| 0
|
@@ -0,0 +1,2490 @@
+import unittest%0Aimport time%0Afrom solr_doc_manager import SolrDocManager%0Afrom pysolr import Solr%0A%0Aclass SolrDocManagerTester(unittest.TestCase):%0A%0A def __init__(self):%0A%0A super(SolrDocManagerTester, self).__init__()%0A self.solr = Solr(%22http://localhost:8080/solr/%22)%0A%0A def runTest(self):%0A%0A #Invalid URL%0A s = SolrDocManager(%22http://doesntexist.cskjdfhskdjfhdsom%22)%0A self.assertTrue(s.solr is None)%0A%0A #valid URL%0A SolrDoc = SolrDocManager(%22http://localhost:8080/solr/%22)%0A self.solr.delete(q ='*:*')%0A%0A%0A #test upsert%0A docc = %7B'_id': '1', 'name': 'John'%7D%0A SolrDoc.upsert(%5Bdocc%5D)%0A self.solr.commit()%0A res = self.solr.search('*:*')%0A for doc in res:%0A self.assertTrue(doc%5B'_id'%5D == '1' and doc%5B'name'%5D == 'John')%0A %0A docc = %7B'_id': '1', 'name': 'Paul'%7D%0A SolrDoc.upsert(%5Bdocc%5D)%0A self.solr.commit()%0A res = self.solr.search('*:*')%0A for doc in res:%0A self.assertTrue(doc%5B'_id'%5D == '1' and doc%5B'name'%5D == 'Paul')%0A%0A #test remove%0A SolrDoc.remove('1')%0A self.solr.commit()%0A res = self.solr.search('*:*')%0A self.assertTrue(len(res) == 0)%0A%0A #test search%0A docc = %7B'_id': '1', 'name': 'John'%7D%0A SolrDoc.upsert(%5Bdocc%5D)%0A docc = %7B'_id': '2', 'name': 'Paul'%7D%0A SolrDoc.upsert(%5Bdocc%5D)%0A self.solr.commit()%0A search = SolrDoc.search('*:*')%0A search2 = self.solr.search('*:*')%0A self.assertTrue(len(search) == len(search2))%0A self.assertTrue(len(search) != 0)%0A for i in range(0,len(search)):%0A self.assertTrue(list(search)%5Bi%5D == list(search2)%5Bi%5D)%0A %0A #test solr commit%0A docc = %7B'_id': '3', 'name': 'Waldo'%7D%0A SolrDoc.upsert(%5Bdocc%5D)%0A res = SolrDoc.search('Waldo')%0A assert(len(res) == 0)%0A time.sleep(1)%0A res = SolrDoc.search('Waldo')%0A assert(len(res) != 0)%0A%0A #test get last doc%0A docc = %7B'_id': '4', 'name': 'Hare', 'ts': '2'%7D%0A SolrDoc.upsert(%5Bdocc%5D)%0A docc = %7B'_id': '5', 'name': 'Tortoise', 'ts': '1'%7D%0A SolrDoc.upsert(%5Bdocc%5D)%0A self.solr.commit()%0A doc = SolrDoc.get_last_doc()%0A self.assertTrue(doc%5B'_id'%5D == '4')%0A%0A docc = %7B'_id': '6', 'name': 'HareTwin', 'ts':'2'%7D%0A self.solr.commit()%0A doc = SolrDoc.get_last_doc()%0A self.assertTrue(doc%5B'_id'%5D == '4' or doc%5B'_id'%5D == '6');%0A%0A %0A%0A %0A
|
|
4ca2ca05232357776e64a1e6eb76c0b26663a59e
|
add semigroup law tester
|
testers/semigroup_law_tester.py
|
testers/semigroup_law_tester.py
|
Python
| 0
|
@@ -0,0 +1,605 @@
+class SemigroupLawTester:%0A%0A def __init__(self, semigroup, value1, value2, value3):%0A self.semigroup = semigroup%0A self.value1 = value1%0A self.value2 = value2%0A self.value3 = value3%0A%0A def associativity_test(self):%0A%0A x = self.semigroup(self.value1)%5C%0A .concat(self.semigroup(self.value2))%5C%0A .concat(self.semigroup(self.value3))%0A%0A y = self.semigroup(self.value1).concat(%0A self.semigroup(self.value2).self.semigroup(self.value3)%0A )%0A%0A assert x == y%0A%0A def test(self):%0A self.associativity_test()%0A
|
|
95dea3ea1f39009303f91efd4b59648470b75e18
|
Remove repr implementation
|
modularodm/storage/base.py
|
modularodm/storage/base.py
|
import time
import random
from functools import wraps
import itertools
from ..translators import DefaultTranslator
class KeyExistsException(Exception): pass
class Logger(object):
def __init__(self):
self.listening = False
self.events = []
self.xtra = []
def listen(self, xtra=None):
self.xtra.append(xtra)
if self.listening:
return False
self.listening = True
self.events = []
return True
def record_event(self, event):
if self.listening:
self.events.append(event)
def report(self, sort_func=None):
out = {}
if sort_func is None:
sort_func = lambda e: e.func.__name__
heard = sorted(self.events, key=sort_func)
for key, group in itertools.groupby(heard, sort_func):
group = list(group)
num_events = len(group)
total_time = sum([event.elapsed_time for event in group])
out[key] = (num_events, total_time)
return out
def pop(self):
self.xtra.pop()
def clear(self):
self.listening = False
self.events = []
class LogEvent(object):
def __init__(self, func, start_time, stop_time, xtra=None):
self.func = func
self.start_time = start_time
self.stop_time = stop_time
self.elapsed_time = stop_time - start_time
self.xtra = xtra
def __repr__(self):
return 'LogEvent("{func}", {start_time}, {stop_time}, {xtra})'.format(
**self.__dict__
)
def logify(func):
@wraps(func)
def wrapped(this, *args, **kwargs):
if this.logger.listening:
start_time = time.time()
out = func(this, *args, **kwargs)
if this.logger.listening:
stop_time = time.time()
xtra = this.logger.xtra[-1]
this.logger.record_event(
LogEvent(
func,
start_time,
stop_time,
xtra
)
)
return out
return wrapped
class StorageMeta(type):
def __new__(mcs, name, bases, dct):
# Decorate methods
for key, value in dct.items():
if hasattr(value, '__call__') \
and not isinstance(value, type) \
and not key.startswith('_'):
dct[key] = logify(value)
# Run super-metaclass __new__
return super(StorageMeta, mcs).__new__(mcs, name, bases, dct)
class Storage(object):
"""Abstract base class for storage objects. Subclasses (e.g. PickleStorage,
MongoStorage, etc.) must define insert, update, get, remove, flush, and
find_all methods.
"""
__metaclass__ = StorageMeta
translator = DefaultTranslator()
logger = Logger()
def _ensure_index(self, key):
pass
# todo allow custom id generator
# todo increment n on repeated failures
def _generate_random_id(self, n=5):
"""Generated random alphanumeric key.
:param n: Number of characters in random key
"""
alphabet = '23456789abcdefghijkmnpqrstuvwxyz'
return ''.join(random.sample(alphabet, n))
def _optimistic_insert(self, primary_name, value, n=5):
"""Attempt to insert with randomly generated key until insert
is successful.
:param value:
:param label:
:param n: Number of characters in random key
"""
while True:
try:
key = self._generate_random_id(n)
value[primary_name] = key
self.insert(primary_name, key, value)
except KeyExistsException:
pass
break
return key
def insert(self, primary_name, key, value):
'''Insert a new record.
:param str primary_name: Name of primary key
:param key: The value of the primary key
:param dict value: The dictionary of attribute:value pairs
'''
raise NotImplementedError
def update(self, query, data):
"""Update multiple records with new data.
:param query: A query object.
:param dict data: Dictionary of key:value pairs.
"""
raise NotImplementedError
def get(self, primary_name, key):
"""Get a single record.
:param str primary_name: The name of the primary key.
:param key: The value of the primary key.
"""
raise NotImplementedError
def remove(self, *query):
"""Remove records.
"""
raise NotImplementedError
def flush(self):
"""Flush the database."""
raise NotImplementedError
def find_one(self, query=None, **kwargs):
"""Find a single record that matches ``query``.
"""
raise NotImplementedError
def find(self, query=None, **kwargs):
"""Query the database and return a query set.
"""
raise NotImplementedError
def __repr__(self):
return str(self.store)
|
Python
| 0
|
@@ -5034,60 +5034,4 @@
ror%0A
-%0A def __repr__(self):%0A return str(self.store)%0A
|
1c5fef3a34ed421610a4e9a38feb07e6545e5d13
|
Add tests for the `dirty_untar` rule
|
tests/rules/test_dirty_untar.py
|
tests/rules/test_dirty_untar.py
|
Python
| 0.000003
|
@@ -0,0 +1,1795 @@
+import os%0Aimport pytest%0Aimport tarfile%0Afrom thefuck.rules.dirty_untar import match, get_new_command, side_effect%0Afrom tests.utils import Command%0A%0A%0A@pytest.fixture%0Adef tar_error(tmpdir):%0A def fixture(filename):%0A path = os.path.join(str(tmpdir), filename)%0A%0A def reset(path):%0A with tarfile.TarFile(path, 'w') as archive:%0A for file in ('a', 'b', 'c'):%0A with open(file, 'w') as f:%0A f.write('*')%0A%0A archive.add(file)%0A%0A os.remove(file)%0A%0A with tarfile.TarFile(path, 'r') as archive:%0A archive.extractall()%0A%0A os.chdir(str(tmpdir))%0A reset(path)%0A%0A assert(set(os.listdir('.')) == %7Bfilename, 'a', 'b', 'c'%7D)%0A%0A return fixture%0A%0Aparametrize_filename = pytest.mark.parametrize('filename', %5B%0A 'foo.tar',%0A 'foo.tar.gz',%0A 'foo.tgz'%5D)%0A%0Aparametrize_script = pytest.mark.parametrize('script, fixed', %5B%0A ('tar xvf %7B%7D', 'mkdir -p foo && tar xvf %7B%7D -C foo'),%0A ('tar -xvf %7B%7D', 'mkdir -p foo && tar -xvf %7B%7D -C foo'),%0A ('tar --extract -f %7B%7D', 'mkdir -p foo && tar --extract -f %7B%7D -C foo')%5D)%0A%0A@parametrize_filename%0A@parametrize_script%0Adef test_match(tar_error, filename, script, fixed):%0A tar_error(filename)%0A assert match(Command(script=script.format(filename)), None)%0A%0A%0A@parametrize_filename%0A@parametrize_script%0Adef test_side_effect(tar_error, filename, script, fixed):%0A tar_error(filename)%0A side_effect(Command(script=script.format(filename)), None)%0A assert(os.listdir('.') == %5Bfilename%5D)%0A%0A%0A@parametrize_filename%0A@parametrize_script%0Adef test_get_new_command(tar_error, filename, script, fixed):%0A tar_error(filename)%0A assert get_new_command(Command(script=script.format(filename)), None) == fixed.format(filename)%0A
|
|
c5276d469b08b3262490047f2372a477814cb2fc
|
add server test for statelessCompute
|
tests/stateless_compute_test.py
|
tests/stateless_compute_test.py
|
Python
| 0
|
@@ -0,0 +1,1067 @@
+# -*- coding: utf-8 -*-%0Au%22%22%22Test statelessCompute API%0A%0A:copyright: Copyright (c) 2021 RadiaSoft LLC. All Rights Reserved.%0A:license: http://www.apache.org/licenses/LICENSE-2.0.html%0A%22%22%22%0Afrom __future__ import absolute_import, division, print_function%0Afrom pykern.pkcollections import PKDict%0Aimport pytest%0A%0Adef test_madx_calculate_bunch_parameters(fc):%0A from pykern import pkunit%0A r = _do(fc, 'calculate_bunch_parameters')%0A pkunit.pkok(r.command_beam, 'unexpected response=%7B%7D', r)%0A%0A%0Adef test_uknown_method(fc):%0A from pykern import pkunit%0A m = 'uknown'%0A r = _do(fc, m)%0A pkunit.pkre(f'method=%7Bm%7D not defined in schema', r.error)%0A%0A%0Adef _do(fc, method):%0A t = 'madx'%0A d = fc.sr_sim_data(sim_name='FODO PTC', sim_type=t)%0A return fc.sr_post(%0A 'statelessCompute',%0A PKDict(%0A bunch=d.models.bunch,%0A command_beam=d.models.command_beam,%0A method=method,%0A simulationId=d.models.simulation.simulationId,%0A simulationType=t,%0A variables=d.models.rpnVariables,%0A ),%0A )%0A
|
|
9b8069f66988ccdbfc76fdbbc7efb78285ed9900
|
Bump version to S22.1
|
src/ggrc/settings/default.py
|
src/ggrc/settings/default.py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
DEBUG = False
TESTING = False
# Flask-SQLAlchemy fix to be less than `wait_time` in /etc/mysql/my.cnf
SQLALCHEMY_POOL_RECYCLE = 120
# Settings in app.py
AUTOBUILD_ASSETS = False
ENABLE_JASMINE = False
DEBUG_ASSETS = False
FULLTEXT_INDEXER = None
USER_PERMISSIONS_PROVIDER = None
EXTENSIONS = []
exports = []
# Deployment-specific variables
COMPANY = "Company, Inc."
COMPANY_LOGO_TEXT = "Company GRC"
VERSION = "s22"
# Initialize from environment if present
import os
SQLALCHEMY_DATABASE_URI = os.environ.get('GGRC_DATABASE_URI', '')
SECRET_KEY = os.environ.get('GGRC_SECRET_KEY', 'Replace-with-something-secret')
|
Python
| 0
|
@@ -648,16 +648,18 @@
N = %22s22
+.1
%22%0A%0A# Ini
|
6c22f7bf2fe8db39446cddbd0fa9474486101a27
|
Add __init__, as django test finder isn't very smart
|
toolkit/diary/tests/__init__.py
|
toolkit/diary/tests/__init__.py
|
Python
| 0.000013
|
@@ -0,0 +1,165 @@
+from __future__ import absolute_import%0A%0Afrom .test_edit_views import *%0Afrom .test_mailout_view import *%0Afrom .test_models import *%0Afrom .test_public_views import *%0A%0A
|
|
623ea9e3d050f347eb404094d049a402b2bb367a
|
Create config.py
|
dasem/config.py
|
dasem/config.py
|
Python
| 0.000002
|
@@ -0,0 +1,123 @@
+%22%22%22config%22%22%22%0A%0A%0Afrom os.path import expanduser, join%0A%0A%0Adef data_directory():%0A return join(expanduser('~'), 'dasem_data')%0A
|
|
e35586efcfc0af4dcfe02c005a1435767f5ab3ed
|
add merge_book_lists.py
|
douban_spider/merge_book_lists.py
|
douban_spider/merge_book_lists.py
|
Python
| 0.000005
|
@@ -0,0 +1,650 @@
+# -*- coding: UTF-8 -*-%0A%0Aimport bloom_filter%0Aimport sys%0A%0A# %E6%8A%8Astr%E7%BC%96%E7%A0%81%E7%94%B1%E9%BB%98%E8%AE%A4ascii%EF%BC%88python2%E4%B8%BAascii%EF%BC%8Cpython3%E4%B8%BAutf8%EF%BC%89%E6%94%B9%E4%B8%BAutf8%0Areload(sys)%0Asys.setdefaultencoding('utf8')%0A%0A%22%22%22%0AMerge book list files into one, using bloom filter to remove duplicate books%0A%22%22%22%0A%0Adef main():%0A%09file_name = 'book_list'%0A%09bf = bloom_filter.BloomFilter(2000,14)%0A%09with open(file_name, 'a') as fn:%0A%09%09for i in range(len(sys.argv)):%0A%09%09%09if i %3E 0:%0A%09%09%09%09with open(sys.argv%5Bi%5D) as f:%0A%09%09%09%09%09for line in f.readlines():%0A%09%09%09%09%09%09if line != '%5Cn' and bf.add(line.strip()) == False:%0A%09%09%09%09%09%09%09fn.write(line.strip() + '%5Cn')%0A%0A%0Aif __name__ == '__main__':%0A%09if len(sys.argv) %3C 2:%0A%09%09print %22%E5%BF%85%E9%A1%BB%E8%BE%93%E5%85%A5%E8%87%B3%E5%B0%91%E4%B8%A4%E4%B8%AA%E5%8F%82%E6%95%B0(%E4%B9%A6%E7%B1%8D%E5%88%97%E8%A1%A8%E6%96%87%E4%BB%B6)%22%0A%09else:%0A%09%09main()
|
|
62fb38d0860b5feeee39764b6c66f5ceed39b984
|
Fix versions of protected/unprotected documents
|
alembic_migration/versions/077ddf78a1f3_fix_protected_docs_versions.py
|
alembic_migration/versions/077ddf78a1f3_fix_protected_docs_versions.py
|
Python
| 0
|
@@ -0,0 +1,736 @@
+%22%22%22Fix protected docs versions%0A%0ARevision ID: 077ddf78a1f3%0ARevises: 9739938498a8%0ACreate Date: 2017-10-30 12:05:51.679435%0A%0A%22%22%22%0Afrom alembic import op%0Aimport sqlalchemy as sa%0A%0A%0A# revision identifiers, used by Alembic.%0Arevision = '077ddf78a1f3'%0Adown_revision = '9739938498a8'%0Abranch_labels = None%0Adepends_on = None%0A%0Adef upgrade():%0A op.execute(%22%22%22%0A with versions_from_archives as (%0A select document_id, max(version) as version%0A from guidebook.documents_archives%0A group by document_id%0A )%0A update guidebook.documents as d%0A set version = va.version%0A from versions_from_archives va%0A where d.document_id = va.document_id%22%22%22)%0A%0A%0Adef downgrade():%0A # Not reversible%0A pass%0A
|
|
ebd8d2fb86b925f3c75ddfea0bbe9d7ab60b50b7
|
add notes for subprocess module
|
abc/sub_process.py
|
abc/sub_process.py
|
Python
| 0
|
@@ -0,0 +1,334 @@
+# -*- coding: UTF-8 -*-%0A__author__ = 'mcxiaoke'%0A%0Aimport subprocess%0A%0A# %E5%88%9B%E5%BB%BA%E5%AD%90%E8%BF%9B%E7%A8%8B%E5%B9%B6%E7%AD%89%E5%BE%85%E5%AE%83%E8%BF%94%E5%9B%9E%EF%BC%8C%E5%8F%82%E6%95%B0%E6%98%AFlist%0Asubprocess.call(%5B'ls', '-a'%5D)%0A# %E5%90%8C%E4%B8%8A%EF%BC%8C%E4%BD%86%E6%98%AF%E5%AD%90%E8%BF%9B%E7%A8%8B%E8%BF%94%E5%9B%9E%E5%80%BC%E4%B8%8D%E6%98%AF0%E6%97%B6%E4%BC%9A%E6%8A%9B%E5%BC%82%E5%B8%B8%0Asubprocess.check_call(%5B'ls', '-a'%5D)%0A# subprocess.check_call(%5B'ls2', '-la'%5D)%0A# %E5%90%8C%E4%B8%8A%EF%BC%8C%E4%BD%86%E6%98%AF%E8%BF%94%E5%9B%9E%E5%80%BC%E4%BB%A5%E5%AD%97%E7%AC%A6%E4%B8%B2%E7%9A%84%E5%BD%A2%E5%BC%8F%E8%BF%94%E5%9B%9E%0A# %E5%A6%82%E6%9E%9C%E8%A6%81%E6%8D%95%E8%8E%B7%E6%A0%87%E5%87%86%E9%94%99%E8%AF%AF%E8%BE%93%E5%87%BA%EF%BC%8C%E5%8F%AF%E4%BB%A5%E7%94%A8stderr=subprocess.STDOUT%0Aret = subprocess.check_output(%5B'ls', '-a'%5D)%0Aprint ret%0A
|
|
0e6fb27d26d5f0570baa414e679b96d6c3234491
|
add correct loop file (#8)
|
looptogetdata2.py
|
looptogetdata2.py
|
Python
| 0
|
@@ -0,0 +1,2255 @@
+from urllib2 import Request, urlopen, URLError%0Aimport json%0Aimport pandas%0A%0Adef getValidTimeseriesKey(timerseries_keys, offering_id):%0A%09invalid_offering = '9999999999'%0A%09if offering_id == invalid_offering:%0A%09%09return timeseries_keys%5B1%5D%0A%09else:%0A%09%09return timeseries_keys%5B0%5D%0A%0Arequestpoll = Request ('http://dd.eionet.europa.eu/vocabulary/aq/pollutant/json')%0Atry:%0A response = urlopen(requestpoll)%0A pollutant_prop = response.read()%0Aexcept URLError, e:%0A print 'error:', e%0A%0Ajson_pollutantlist = json.loads(pollutant_prop)%0Ajsonpollutantlistdictionaries = json_pollutantlist%5Bu'concepts'%5D%0A%0Alistofpollutants = %7B%7D%0Afor pollutant in jsonpollutantlistdictionaries:%0A statID = pollutant%5B'@id'%5D%0A pollutantname = pollutant%5Bu'prefLabel'%5D%5B0%5D%5B'@value'%5D%0A listofpollutants.update (%7BstatID:pollutantname%7D)%0A%0Aallstations = pandas.read_csv(%22liststations.csv%22)%0Aallstations = allstations.drop('Unnamed: 0', 1)%0Aallstations = allstations.set_index('ID')%0A%0AID=(3907, 3903)%0A%0Alistofstationsdata = %5B%5D%0A%0Afor i in ID:%0A url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(i))%0A request2 = Request (url)%0A try:%0A %09response = urlopen(request2)%0A %09station_data = response.read()%0A except URLError, e:%0A print 'error:', e%0A%0A station_prop_json = json.loads (station_data)%0A station_time_series = station_prop_json%5Bu'properties'%5D%5Bu'timeseries'%5D%0A timeseries_keys = (station_time_series.keys())%0A first_timeseries = station_time_series%5Btimeseries_keys%5B0%5D%5D%0A offering_id = first_timeseries%5Bu'offering'%5D%5Bu'id'%5D%0A first_timeserieskey = getValidTimeseriesKey(timeseries_keys, offering_id)%0A station_pollutant = first_timeseries%5Bu'category'%5D%5Bu'id'%5D%0A station_ID = first_timeseries%5Bu'feature'%5D%5Bu'id'%5D%0A StationName = allstations.loc%5B(int(station_ID) , 'place')%5D%0A PollutantName = listofpollutants.get(station_pollutant)%0A url2getdata = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(first_timeserieskey) +'/getData')%0A%0A request_time_series_data = Request(url2getdata)%0A try:%0A %09response = urlopen(request_time_series_data)%0A %09time_series_data = response.read()%0A except URLError, e:%0A print 'error:', e%0A%0A listofstationsdata.append((StationName, PollutantName, time_series_data))%0A%0Aprint listofstationsdata%0A
|
|
eb46f8046211eff81320faceda0c297b27bb419b
|
Add a new alert plugin for events from geomodel
|
alerts/geomodel.py
|
alerts/geomodel.py
|
Python
| 0
|
@@ -0,0 +1,995 @@
+#!/usr/bin/env python%0A%0A# This Source Code Form is subject to the terms of the Mozilla Public%0A# License, v. 2.0. If a copy of the MPL was not distributed with this%0A# file, You can obtain one at http://mozilla.org/MPL/2.0/.%0A# Copyright (c) 2015 Mozilla Corporation%0A#%0A# Contributors:%0A# Aaron Meihm %3Cameihm@mozilla.com%3E%0A%0Afrom lib.alerttask import AlertTask%0Aimport pyes%0A%0Aclass AlertGeomodel(AlertTask):%0A def main(self):%0A date_timedelta = dict(minutes=30)%0A%0A must = %5B%0A pyes.TermFilter('_type', 'geomodel'),%0A pyes.TermFilter('category', 'geomodelnotice'),%0A %5D%0A self.filtersManual(date_timedelta, must=must, must_not=%5B%5D)%0A self.searchEventsSimple()%0A self.walkEvents()%0A%0A # Set alert properties%0A def onEvent(self, event):%0A category = 'geomodel'%0A tags = %5B'geomodel'%5D%0A severity = 'NOTICE'%0A%0A summary = event%5B'_source'%5D%5B'summary'%5D%0A return self.createAlertDict(summary, category, tags, %5Bevent%5D, severity)%0A
|
|
6898b9462823449e767aa75b7ab38c3e87b61cc1
|
Check for page changes
|
macro/IsRecent.py
|
macro/IsRecent.py
|
Python
| 0
|
@@ -0,0 +1,872 @@
+# -*- coding: iso-8859-1 -*-%0D%0Au%22%22%22%0D%0A IsRecent - Check if a page was recently modified and highlight that fact%0D%0A%0D%0A @copyright: 2012 by Alan Snelson%0D%0A @license: BSD, see LICENSE for details.%0D%0A%0D%0A%22%22%22%0D%0A%0D%0Afrom datetime import datetime%0D%0Afrom MoinMoin.Page import Page%0D%0A%0D%0ADependencies = %5B'pages'%5D%0D%0A%0D%0Adef macro_IsRecent(macro, pageName=''):%0D%0A fmt = macro.formatter%0D%0A if (pageName == ''):%0D%0A return fmt.text('No page supplied')%0D%0A%0D%0A request = macro.request%0D%0A page = Page(request,pageName)%0D%0A log = page.lastEditInfo(request)%0D%0A now = datetime.now()%0D%0A delta = now - datetime.strptime(log%5B'time'%5D, %22%25Y-%25m-%25d %25H:%25M:%25S%22)%0D%0A if (delta.days %3E 7):%0D%0A return fmt.rawHTML(%22%3Ca href='http://mywiki%22 + pageName + %22'%3E%22 + pageName + %22%3C/a%3E%22)%0D%0A else:%0D%0A return fmt.rawHTML(%22%3Ca style='color:black;font-size:20px;' href='http://mywiki%22 + pageName + %22'%3E%22 + pageName + %22%3C/a%3E%22)%0D%0A
|
|
e8c64cff4daa8f563a2b19b933f89099f8a1a9b6
|
Remove socket session_id from all subscribed channels on disconnect.
|
django_socketio/views.py
|
django_socketio/views.py
|
from atexit import register
from datetime import datetime
from traceback import print_exc
from django.http import HttpResponse
from django_socketio import events
from django_socketio.channels import SocketIOChannelProxy
from django_socketio.settings import MESSAGE_LOG_FORMAT
# Maps open Socket.IO session IDs to request/socket pairs for
# guaranteeing the on_finish signal being sent when the server
# stops.
CLIENTS = {}
@register
def cleanup():
"""
Sends the on_finish signal to any open clients when the server
is unexpectedly stopped.
"""
for client in CLIENTS.values():
events.on_finish.send(*client)
def format_log(request, message):
"""
Formats a log message similar to gevent's pywsgi request logging.
"""
now = datetime.now().replace(microsecond=0)
log = MESSAGE_LOG_FORMAT % dict(request.META, MESSAGE=message, TIME=now)
return log + "\n"
def socketio(request):
"""
Socket.IO handler - maintains the lifecycle of a Socket.IO
request, sending the each of the events. Also handles
adding/removing request/socket pairs to the CLIENTS dict
which is used for sending on_finish events when the server
stops.
"""
context = {}
socket = SocketIOChannelProxy(request.environ["socketio"])
CLIENTS[socket.session.session_id] = (request, socket, context)
try:
if socket.on_connect():
events.on_connect.send(request, socket, context)
while True:
message = socket.recv()
if len(message) > 0:
if MESSAGE_LOG_FORMAT is not None:
socket.handler.server.log.write(format_log(request, message))
if message[0] == "__subscribe__" and len(message) == 2:
socket.subscribe(message[1])
events.on_subscribe.send(request, socket, context, message[1])
elif message[0] == "__unsubscribe__" and len(message) == 2:
events.on_unsubscribe.send(request, socket, context, message[1])
socket.unsubscribe(message[1])
else:
events.on_message.send(request, socket, context, message)
else:
if not socket.connected():
events.on_disconnect.send(request, socket, context)
break
except Exception, exception:
print_exc()
events.on_error.send(request, socket, context, exception)
events.on_finish.send(request, socket, context)
del CLIENTS[socket.session.session_id]
return HttpResponse("")
|
Python
| 0
|
@@ -2512,24 +2512,170 @@
t, context)%0A
+ from django_socketio.channels import CHANNELS%0A for channel in socket.channels:%0A CHANNELS%5Bchannel%5D.remove(socket.session.session_id)%0A
del CLIE
|
3587666f209a9e88672e9520c033682fcd28035a
|
add l10n_br_purchase/procurement.py
|
l10n_br_purchase/procurement.py
|
l10n_br_purchase/procurement.py
|
Python
| 0
|
@@ -0,0 +1,1911 @@
+# -*- encoding: utf-8 -*-%0A###############################################################################%0A# #%0A# Copyright (C) 2014 Renato Lima - Akretion #%0A# #%0A#This program is free software: you can redistribute it and/or modify #%0A#it under the terms of the GNU Affero General Public License as published by #%0A#the Free Software Foundation, either version 3 of the License, or #%0A#(at your option) any later version. #%0A# #%0A#This program is distributed in the hope that it will be useful, #%0A#but WITHOUT ANY WARRANTY; without even the implied warranty of #%0A#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #%0A#GNU Affero General Public License for more details. #%0A# #%0A#You should have received a copy of the GNU Affero General Public License #%0A#along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E. #%0A###############################################################################%0A%0Afrom openerp import models, api%0A%0A%0Aclass ProcurementOrder(models.Model):%0A _inherit = %22procurement.order%22%0A%0A @api.model%0A def _run_move_create(self, procurement):%0A result = super(ProcurementOrder, self)._run_move_create(procurement)%0A if procurement.purchase_line_id:%0A result.update(%7B%0A 'fiscal_category_id': procurement.purchase_line_id.fiscal_category_id.id,%0A 'fiscal_position': procurement.purchase_line_id.fiscal_position.id,%0A %7D)%0A return result
|
|
b1d643afb07cef02ab607943776ce120a7d47013
|
move unit test for matrix-vector conversion to new superoperator test module
|
qutip/tests/test_superoperator.py
|
qutip/tests/test_superoperator.py
|
Python
| 0
|
@@ -0,0 +1,2219 @@
+# This file is part of QuTIP.%0A#%0A# QuTIP is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# QuTIP is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with QuTIP. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A# Copyright (C) 2011, Paul D. Nation & Robert J. Johansson%0A#%0A###########################################################################%0A%0Afrom numpy.testing import assert_, run_module_suite%0A%0Afrom qutip import *%0A%0Aclass TestMatrixVector:%0A %22%22%22%0A A test class for the QuTiP function for matrix/vector conversion.%0A %22%22%22%0A%0A def testMatrixVectorMatrix(self):%0A %22%22%22%0A Superoperator: Conversion matrix to vector to matrix%0A %22%22%22%0A M = rand(10, 10)%0A V = mat2vec(M)%0A M2 = vec2mat(V)%0A assert_(norm(M-M2) == 0.0)%0A%0A def testVectorMatrixVector(self):%0A %22%22%22%0A Superoperator: Conversion vector to matrix to vector%0A %22%22%22%0A V = rand(100) # a row vector%0A M = vec2mat(V)%0A V2 = mat2vec(M).T # mat2vec returns a column vector%0A assert_(norm(V-V2) == 0.0)%0A %0A def testVectorMatrixIndexConversion(self):%0A %22%22%22%0A Superoperator: Conversion between matrix and vector indices%0A %22%22%22%0A N = 10 %0A for I in range(N*N):%0A i,j = vec2mat_index(N, I)%0A I2 = mat2vec_index(N, i, j) %0A assert_(I == I2)%0A%0A def testVectorMatrixIndexCompability(self):%0A %22%22%22%0A Superoperator: Test compability between matrix/vector conversion and the corresponding index conversion.%0A %22%22%22%0A N = 10 %0A M = rand(N, N)%0A V = mat2vec(M)%0A for I in range(N*N):%0A i,j = vec2mat_index(N, I)%0A assert_(V%5BI%5D%5B0%5D == M%5Bi,j%5D)%0A%0Aif __name__ == %22__main__%22:%0A run_module_suite()
|
|
b39dd2afea1f4662e17a927e7e6aa41e850f7470
|
Add a script for generating jamo character table
|
lib/gen-hangul.py
|
lib/gen-hangul.py
|
Python
| 0
|
@@ -0,0 +1,1412 @@
+#!/usr/bin/python3%0A%0A# Input: https://www.unicode.org/Public/UNIDATA/Jamo.txt%0A%0Aimport io%0Aimport re%0A%0Aclass Builder(object):%0A def __init__(self):%0A pass%0A%0A def read(self, infile):%0A chars = %5B%5D%0A for line in infile:%0A if line.startswith('#'):%0A continue%0A line = line.strip()%0A if len(line) == 0:%0A continue%0A data, _comment = line.split('#', 2)%0A codepoint, short_name = data.split(';')%0A short_name = short_name.strip()%0A%0A chars.append((codepoint, short_name))%0A%0A return chars%0A%0A def write(self, data):%0A print('''%5C%0Astruct HangulCharacter%0A%7B%0A gunichar uc;%0A const char *short_name;%0A%7D;''')%0A print('static const struct HangulCharacter hangul_chars%5B%5D =%5Cn %7B')%0A s = ''%0A offset = 0%0A for codepoint, short_name in data:%0A print(' %7B%7B 0x%7B0%7D, %22%7B1%7D%22 %7D%7D,'.format(codepoint, short_name))%0A print(' %7D;')%0A%0Aif __name__ == '__main__':%0A import argparse%0A%0A parser = argparse.ArgumentParser(description='build')%0A parser.add_argument('infile', type=argparse.FileType('r'),%0A help='input file')%0A args = parser.parse_args()%0A%0A builder = Builder()%0A # FIXME: argparse.FileType(encoding=...) is available since Python 3.4%0A data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))%0A builder.write(data)%0A
|
|
415e3e1ae3a6c5689f3960d2b3f589cf2c733144
|
Create conf.py
|
conf.py
|
conf.py
|
Python
| 0.000001
|
@@ -0,0 +1,905 @@
+# -*- coding: utf-8 -*-%0A#%0Aimport os%0A%0A# on_rtd is whether we are on readthedocs.org%0Aon_rtd = os.environ.get('READTHEDOCS', None) == 'True'%0A%0Aif not on_rtd: # only import and set the theme if we're building docs locally%0A import sphinx_rtd_theme%0A html_theme = 'sphinx_rtd_theme'%0A html_theme_path = %5Bsphinx_rtd_theme.get_html_theme_path()%5D%0A%0A # Override default css to get a larger width for local build%0A def setup(app):%0A app.add_stylesheet('mystyle.css')%0Aelse:%0A # Override default css to get a larger width for ReadTheDoc build%0A html_context = %7B%0A 'css_files': %5B%0A '_static/mystyle.css',%0A %5D,%0A %7D%0A%0A# otherwise, readthedocs.org uses their theme by default, so no need to specify it%0A%0A# The suffix of source filenames.%0Asource_suffix = '.rst'%0A%0A# The master toctree document.%0Amaster_doc = 'index'%0A%0A# General information about the project.%0Aproject = 'FIWARE-SDC'%0A
|
|
72f1dab3fe50a552480df522f6c8c4a7002a0952
|
Add TimestampsMixin exmples
|
examples/timestamp.py
|
examples/timestamp.py
|
Python
| 0
|
@@ -0,0 +1,1141 @@
+from __future__ import print_function%0A%0Aimport time%0Afrom datetime import datetime%0A%0Aimport sqlalchemy as sa%0Afrom sqlalchemy.ext.declarative import declarative_base%0Afrom sqlalchemy.orm import scoped_session, sessionmaker%0A%0Afrom sqlalchemy_mixins import TimestampsMixin%0A%0ABase = declarative_base()%0Aengine = sa.create_engine(%22sqlite:///:memory:%22)%0Asession = scoped_session(sessionmaker(bind=engine))%0A%0A%0Aclass BaseModel(Base, TimestampsMixin):%0A __abstract__ = True%0A pass%0A%0A%0Aclass User(BaseModel):%0A %22%22%22User Model to example.%22%22%22%0A%0A __tablename__ = %22users%22%0A%0A id = sa.Column(sa.Integer, primary_key=True)%0A name = sa.Column(sa.String)%0A%0A%0ABase.metadata.create_all(engine)%0A%0A%0Aprint(%22Current time: %22, datetime.utcnow())%0A# Current time: 2019-03-04 03:53:53.605602%0A%0Abob = User(name=%22Bob%22)%0Asession.add(bob)%0Asession.flush()%0A%0Aprint(%22Created Bob: %22, bob.created_at)%0A# Created Bob: 2019-03-04 03:53:53.606765%0A%0Aprint(%22Pre-update Bob: %22, bob.updated_at)%0A# Pre-update Bob: 2019-03-04 03:53:53.606769%0A%0Atime.sleep(5)%0A%0Abob.name = %22Robert%22%0Asession.commit()%0A%0Aprint(%22Updated Bob: %22, bob.updated_at)%0A# Updated Bob: 2019-03-04 03:53:58.613044%0A
|
|
7799b7a3ea1b1774ce24376ee918376b422daebd
|
Create cube.py
|
cube.py
|
cube.py
|
Python
| 0.000015
|
@@ -0,0 +1,1679 @@
+import numpy as np%0Aimport pandas as pd%0Aimport keras%0Aimport pandas as pd%0Aimport keras.preprocessing.text%0Aimport somecode as some%0A%0Aclass Cube:%0A %0A '''%0A %0A INTENDED USE %3E to be called through FastText() class. %0A %0A Takes in pandas dataframe with at least two columns where one%0A is the dependent variable, and one is text. %0A %0A EXAMPLE USE: %0A %0A Cube(data,var)%0A %0A If there is more than one possible depedent variable in df then%0A there you can run the moddle for any of it. %0A %0A %0A '''%0A %0A %0A def __init__(self,data,var):%0A %0A self.data = data%0A self.var = var%0A self.x,self.y = self._data_sets()%0A self.x_train, self.y_train, self.x_test, self.y_test = self._split_data()%0A %0A def _word_index(self):%0A%0A out = %5B%5D%0A i = 0%0A n = len(self.data)%0A%0A for item in self.data.text:%0A%0A temp = keras.preprocessing.text.one_hot(item, n, lower=True, split=%22 %22)%0A out.insert(i,temp)%0A i += 1%0A%0A return out%0A %0A%0A def _data_sets(self):%0A%0A data = self.data.sample(frac=1)%0A%0A x = self._word_index()%0A y = data%5Bself.var%5D%0A%0A return x,y%0A%0A%0A def _split_data(self):%0A%0A length = len(self.x)%0A i = length - (length / 3)%0A%0A self.x_test = self.x%5B:i%5D%0A self.x_test = np.array(self.x_test)%0A%0A self.x_train = self.x%5Bi+1:%5D%0A self.x_train = np.array(self.x_train)%0A%0A self.y_test = self.y%5B:i%5D%0A self.y_test = np.array(self.y_test)%0A self.y_train = self.y%5Bi+1:%5D%0A self.y_train = np.array(self.y_train)%0A%0A return self.x_train, self.y_train, self.x_test, self.y_test%0A
|
|
67d3b321edab1fe50f666d0ada86c8392be07199
|
add wire_callback
|
pyaudio/wire_callback.py
|
pyaudio/wire_callback.py
|
Python
| 0.000001
|
@@ -0,0 +1,718 @@
+#!/usr/bin/env python%0A%0A%22%22%22%0APyAudio Example: Make a wire between input and output (i.e., record a%0Afew samples and play them back immediately).%0A%0AThis is the callback (non-blocking) version.%0A%22%22%22%0A%0Aimport pyaudio%0Aimport time%0A%0AWIDTH = 2%0ACHANNELS = 2%0ARATE = 44100%0A%0Ap = pyaudio.PyAudio()%0A%0A%0Adef callback(in_data, frame_count, time_info, status):%0A return (in_data, pyaudio.paContinue)%0A%0A%0Astream = p.open(format=p.get_format_from_width(WIDTH),%0A channels=CHANNELS,%0A rate=RATE,%0A input=True,%0A output=True,%0A stream_callback=callback)%0A%0Astream.start_stream()%0A%0Awhile stream.is_active():%0A time.sleep(0.1)%0A%0Astream.stop_stream()%0Astream.close()%0A%0Ap.terminate()%0A
|
|
34815186871e27b977082d9c35dd0adc76d3af9f
|
update stencilview doc (128 levels, not 8)
|
kivy/uix/stencilview.py
|
kivy/uix/stencilview.py
|
'''
Stencil View
============
.. versionadded:: 1.0.4
:class:`StencilView` limits the drawing of child widgets to the StencilView's
bounding box. Any drawing outside the bounding box will be clipped (trashed).
The StencilView uses the stencil graphics instructions under the hood. It
provides an efficient way to clip the drawing area of children.
.. note::
As with the stencil graphics instructions, you cannot stack more than 8
stencil-aware widgets.
.. note::
StencilView is not a layout. Consequently, you have to manage the size and
position of its children directly. You can combine (subclass both)
a StencilView and a Layout in order to achieve a layout's behavior.
For example::
class BoxStencil(BoxLayout, StencilView):
pass
'''
__all__ = ('StencilView', )
from kivy.uix.widget import Widget
class StencilView(Widget):
'''StencilView class. See module documentation for more information.
'''
pass
|
Python
| 0
|
@@ -430,16 +430,18 @@
re than
+12
8%0A st
|
e8798ac01d3baed6785ee0683ec4989b97e47003
|
Implement local.shell operation
|
pyinfra/modules/local.py
|
pyinfra/modules/local.py
|
Python
| 0.00018
|
@@ -0,0 +1,1688 @@
+# pyinfra%0A# File: pyinfra/modules/local.py%0A# Desc: run stuff locally, within the context of operations%0A%0Afrom subprocess import Popen, PIPE%0A%0Aimport gevent%0Afrom termcolor import colored%0A%0Afrom pyinfra.api import operation%0Afrom pyinfra.api.util import read_buffer%0A%0A%0Adef _run_local(code, hostname, host, print_output=False, print_prefix=None):%0A '''Subprocess based implementation of pyinfra/api/ssh.py's run_shell_command.'''%0A process = Popen(code, shell=True, stdout=PIPE, stderr=PIPE)%0A%0A # Note that gevent's subprocess module does not allow for %22live%22 reading from a process,%0A # so the readlines() calls below only return once the process is complete. Thus the whole%0A # greenlet spawning/etc below is *currently* pointless.%0A%0A # TODO: implement fake file object as a pipe to read from/to as buffer, live%0A # see: https://bitbucket.org/eriks5/gevent-subprocess/src/550405f060a5f37167c0be042baaee6075b3d28e/src/gevsubprocess/pipe.py?at=default%0A stdout_reader = gevent.spawn(%0A read_buffer, process.stdout.readlines(),%0A print_output=print_output,%0A print_func=lambda line: u'%7B0%7D%7B1%7D'.format(print_prefix, line)%0A )%0A stderr_reader = gevent.spawn(%0A read_buffer, process.stderr.readlines(),%0A print_output=print_output,%0A print_func=lambda line: u'%7B0%7D%7B1%7D'.format(print_prefix, colored(line, 'red'))%0A )%0A%0A # Wait for the process to complete & return%0A gevent.wait((stdout_reader, stderr_reader))%0A return process.wait() %3C= 0%0A%0A%0A@operation%0Adef shell(*code):%0A '''Runs shell commands locally in a subprocess.'''%0A return %5B%0A (lambda *args, **kwargs: _run_local(c, *args, **kwargs), (), %7B%7D)%0A for c in code%0A %5D%0A
|
|
a2b9a17927d851b368d3ef8e869a295c8bd2e86b
|
add test for default clustering order of SELECT
|
test/cql-pytest/test_clustering_order.py
|
test/cql-pytest/test_clustering_order.py
|
Python
| 0.000012
|
@@ -0,0 +1,2357 @@
+# Copyright 2022-present ScyllaDB%0A#%0A# SPDX-License-Identifier: AGPL-3.0-or-later%0A%0A#############################################################################%0A# Tests for clustering key ordering, namely the WITH CLUSTERING ORDER BY%0A# setting in the table schema, and ORDER BY in select.%0A#%0A# We have many other tests for this feature - in C++ tests, in translated%0A# unit tests from Cassandra (cassandra_tests), and its interaction with%0A# other features (filtering, secondary indexes, etc.) in other test files.%0A%0Aimport pytest%0A%0Afrom util import new_test_table, unique_key_int%0A%0A@pytest.fixture(scope=%22module%22)%0Adef table_int_desc(cql, test_keyspace):%0A schema=%22k INT, c INT, PRIMARY KEY (k, c)%22%0A order=%22WITH CLUSTERING ORDER BY (c DESC)%22%0A with new_test_table(cql, test_keyspace, schema, order) as table:%0A yield table%0A%0A# Verify that if a table is created with descending order for its%0A# clustering key, the default ordering of SELECT is changed to descending%0A# order. This was contrary to our documentation which used to suggest%0A# that SELECT always defaults to ascending order.%0Adef test_select_default_order(cql, table_int_desc):%0A k = unique_key_int()%0A stmt = cql.prepare(f'INSERT INTO %7Btable_int_desc%7D (k, c) VALUES (%7Bk%7D, ?)')%0A numbers = range(5)%0A for i in numbers:%0A cql.execute(stmt, %5Bi%5D)%0A # In a table created with descending sort order, the default select%0A # order is descending:%0A rows = %5B(i,) for i in numbers%5D%0A reverse_rows = %5B(i,) for i in reversed(numbers)%5D%0A assert reverse_rows == list(cql.execute(f'SELECT c FROM %7Btable_int_desc%7D WHERE k = %7Bk%7D'))%0A # Confirm that when specifying the order explicitly, both work:%0A assert rows == list(cql.execute(f'SELECT c FROM %7Btable_int_desc%7D WHERE k = %7Bk%7D ORDER BY c ASC'))%0A assert reverse_rows == list(cql.execute(f'SELECT c FROM %7Btable_int_desc%7D WHERE k = %7Bk%7D ORDER BY c DESC'))%0A # Repeat the same three assertions as above, adding a %22limit%22 of N=3:%0A N=3%0A rows = rows%5B0:N%5D%0A reverse_rows = reverse_rows%5B0:N%5D%0A assert reverse_rows == list(cql.execute(f'SELECT c FROM %7Btable_int_desc%7D WHERE k = %7Bk%7D LIMIT %7BN%7D'))%0A assert rows == list(cql.execute(f'SELECT c FROM %7Btable_int_desc%7D WHERE k = %7Bk%7D ORDER BY c ASC LIMIT %7BN%7D'))%0A assert reverse_rows == list(cql.execute(f'SELECT c FROM %7Btable_int_desc%7D WHERE k = %7Bk%7D ORDER BY c DESC LIMIT %7BN%7D'))%0A
|
|
b602c3467ee5969bc3292b7e494d60b9ccdbbedb
|
remove sum or c number
|
qutip/tests/test_rand.py
|
qutip/tests/test_rand.py
|
#This file is part of QuTIP.
#
# QuTIP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QuTIP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuTIP. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011-2013, Paul D. Nation & Robert J. Johansson
#
###########################################################################
from numpy.testing import assert_, assert_equal, run_module_suite
from qutip import *
class TestRand:
"""
A test class for the built-in random quantum object generators.
"""
def testRandUnitary(self):
"random Unitary"
U=array([rand_unitary(5) for k in range(5)])
for k in range(5):
assert_equal(U[k]*U[k].dag()==qeye(5), True)
def testRandherm(self):
"random hermitian"
H=array([rand_herm(5) for k in range(5)])
for k in range(5):
assert_equal(H[k].isherm==True, True)
def testRanddm(self):
"random density matrix"
R=array([rand_dm(5) for k in range(5)])
for k in range(5):
assert_equal(sum(R[k].tr())-1.0<1e-15, True)
#verify all eigvals are >=0
assert_(not any(sp_eigs(R[k],vecs=False))<0)
#verify hermitian
assert_(R[k].isherm)
def testRandket(self):
"random ket"
P=array([rand_ket(5) for k in range(5)])
for k in range(5):
assert_equal(P[k].type=='ket', True)
if __name__ == "__main__":
run_module_suite()
|
Python
| 0.025778
|
@@ -1558,12 +1558,8 @@
ual(
-sum(
R%5Bk%5D
@@ -1563,17 +1563,16 @@
%5Bk%5D.tr()
-)
-1.0%3C1e-
|
796561ed822d64be6fd2ef299093711a8534d0e9
|
add package py-lmodule version 0.1.0 (#18856)
|
var/spack/repos/builtin/packages/py-lmodule/package.py
|
var/spack/repos/builtin/packages/py-lmodule/package.py
|
Python
| 0
|
@@ -0,0 +1,1035 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyLmodule(PythonPackage):%0A %22%22%22Lmodule is a Python API for Lmod module system. It's primary purpose is%0A to help automate module testing. Lmodule uses Lmod spider tool to query%0A all modules in-order to automate module testing. Lmodule can be used with%0A environment-modules to interact with module using the Module class.%22%22%22%0A%0A homepage = %22https://lmodule.readthedocs.io/en/latest/%22%0A url = %22https://pypi.io/packages/source/l/lmodule/lmodule-0.1.0.tar.gz%22%0A git = %22https://github.com/buildtesters/lmodule%22%0A%0A maintainers = %5B'shahzebsiddiqui'%5D%0A%0A version('0.1.0', sha256='cac8f3dad2df27b10e051b2c56ccbde1fcdd7044af594d13fd2e4144d3d46a29')%0A%0A depends_on('python@3.6.0:', type=('build', 'run'))%0A depends_on('py-setuptools', type='build')%0A depends_on('lmod@7.0:', type='run')%0A
|
|
f3bf91c8a9ba3a043f0ba4a11c2347e9b4a3c8be
|
Add linkins.script
|
linkins/script.py
|
linkins/script.py
|
Python
| 0
|
@@ -0,0 +1,767 @@
+import logging%0Aimport subprocess%0A%0Alog = logging.getLogger(__name__)%0Alog.propagate = False%0Ahandler = logging.StreamHandler()%0Afmt = logging.Formatter(%0A fmt='%25(script)s: %25(stream)s: %25(message)s',%0A )%0Ahandler.setFormatter(fmt)%0Alog.addHandler(handler)%0A%0Adef _logscript(fp, **kwargs):%0A for line in fp:%0A line = line.strip()%0A log.info(line, extra=kwargs)%0A%0Adef runscript(path, name):%0A proc = subprocess.Popen(%0A %5Bpath%5D,%0A stdout=subprocess.PIPE,%0A stderr=subprocess.PIPE,%0A )%0A with proc.stderr as fp:%0A _logscript(%0A fp,%0A script=name,%0A stream='STDERR',%0A )%0A with proc.stdout as fp:%0A _logscript(%0A fp,%0A script=name,%0A stream='STDOUT',%0A )%0A
|
|
2ff8505db7ee0b4dbf08a2a61d00daaf681f5492
|
Create dlpp.py
|
dlpp.py
|
dlpp.py
|
Python
| 0.000001
|
@@ -0,0 +1,2406 @@
+#!/usr/bin/env python%0A%0A# dl_poly_parse%0A# If ran as script, takes a DL_POLY OUTPUT file and returns the physical properties as a parsed%0A# file of simple columns, for easy readability by plotting software.%0A#%0A# To do:%0A# * give option to output as csv%0A# * give option to return properties as horizontally or vertically sorted%0A# * allow importing as library to get single properties as lists%0A%0Adef getLines(OUTPUT):%0A%09with open(OUTPUT, %22r%22) as f:%0A%09%09lines = f.readlines()%0A%09return lines%0A%0Adef getHeaders(OUTPUT, BREAK):%0A%09lines = getLines(OUTPUT)%0A%09firstBreak = lines.index(BREAK)%0A%09headers = lines%5BfirstBreak+2%5D.split() + lines%5BfirstBreak+3%5D.split() + lines%5BfirstBreak+4%5D.split()%0A%09headers.remove(%22(s)%22)%0A%09headers%5Bheaders.index(%22cpu%22)%5D = %22cpu (s)%22%0A%09return headers%0A%0A%0Adef getProperty(OUTPUT, property):%0A%09pass%0A%0Adef sortList(unsorted):%0A%09# returns list reading down each column of 3 in OUTPUT rather than across each row%0A%09# this puts certain values usefully adjacent to each other e.g. time, step, cpu%0A%09# but separates others e.g. alpha, beta, gamma%0A%09sort = %5B%5D%0A%09for i in range(0,len(unsorted)):%0A%09%09triple = unsorted%5Bi::10%5D%0A%09%09for j in triple:%0A%09%09%09sort.append(j)%0A%09return sort%5B:30%5D%0A%0Adef getAllProps(OUTPUT, BREAK):%0A%09# returns physical properties as a huge list of lists%0A%0A%09lines = getLines(OUTPUT)%0A%09headers = getHeaders(OUTPUT, BREAK)%0A%09properties = %5B%5D%0A%0A%09for i, l in enumerate(lines):%0A%09%09if l == BREAK and len(lines%5Bi+1%5D.split()) == 10: # data always found in lines of 10 after BREAK%0A%09%09%09values = lines%5Bi+1%5D.split() + lines%5Bi+2%5D.split() + lines%5Bi+3%5D.split()%0A%09%09%09%0A%09%09%09if properties == %5B%5D:%09# fill with lists of initial values if empty%0A%09%09%09%09properties = %5B%5Bv%5D for v in values%5D%0A%09%09%09else: %09%09%09%09%09# append otherwise%0A%09%09%09%09for j, p in enumerate(properties):%0A%09%09%09%09%09p.append(values%5Bj%5D)%0A%0A%09return len(properties%5B0%5D), headers, properties%0A%09%09# could optimise by initialising each list with zeroes%0A%0Adef main():%0A%09OUTPUT = %22OUTPUT%22%0A%09PARSED = %22PARSED%22%0A%09BREAK = %22 ------------------------------------------------------------------------------------------------------------------------%5Cn%22%0A%0A%09n, headers, properties = getAllProps(OUTPUT, BREAK)%0A%09sortedHeaders = sortList(headers)%0A%09sortedProps = sortList(properties)%0A%0A%09parsed = %22%22%0A%09for h in sortedHeaders:%0A%09%09parsed += %22%25-12s%22 %25 (h)%0A%09for i in range(0,n):%0A%09%09parsed += %22%5Cn%22%0A%09%09for p in sortedProps:%0A%09%09%09parsed += %22%25-11s %22 %25 (p%5Bi%5D)%0A%0A%0A%09with open(%22PARSED%22, %22w%22) as f:%0A%09%09f.write(parsed)%0A%0Aif __name__ == '__main__':%0A%09main()%0A
|
|
e13ed4cfa39b366685d058501be2e65b5bbf1230
|
Make language setup compatible with OSX Yosemite's `locale -a` output
|
dodo.py
|
dodo.py
|
import os
import fnmatch
import locale
import subprocess
DOIT_CONFIG = {
'default_tasks': ['flake8', 'test'],
'reporter': 'executed-only',
}
def recursive_glob(path, pattern):
"""recursively walk path directories and return files matching the pattern"""
for root, dirnames, filenames in os.walk(path, followlinks=True):
for filename in fnmatch.filter(filenames, pattern):
yield os.path.join(root, filename)
def task_flake8():
"""flake8 - static check for python files"""
yield {
'name': os.path.join(os.getcwd(), 'nikola'),
'actions': ['flake8 nikola/'],
}
def task_pep257():
"""pep257 -- static check for docstring style"""
yield {
'name': os.path.join(os.getcwd(), 'nikola'),
'actions': ["pep257 --count --match-dir='(?!^\.)(?!data).*' nikola/"],
}
def task_locale():
"""set environ locale vars used in nikola tests"""
def set_nikola_test_locales():
try:
out = subprocess.check_output(['locale', '-a'])
out = out.decode('utf-8')
locales = []
languages = set()
for line in out.splitlines():
if line.endswith('.utf8') and '_' in line:
lang = line.split('_')[0]
if lang not in languages:
try:
locale.setlocale(locale.LC_ALL, str(line))
except:
continue
languages.add(lang)
locales.append((lang, line))
if len(locales) == 2:
break
if len(locales) != 2:
return False # task failed
else:
os.environ['NIKOLA_LOCALE_DEFAULT'] = ','.join(locales[0])
os.environ['NIKOLA_LOCALE_OTHER'] = ','.join(locales[1])
finally:
# restore to default locale
locale.resetlocale()
return {'actions': [set_nikola_test_locales], 'verbosity': 2}
def task_doctest():
"""run doctests with py.test"""
return {
'actions': ['py.test --doctest-modules nikola/'],
'verbosity': 2,
}
def task_test():
"""run unit-tests using py.test"""
return {
'task_dep': ['locale', 'doctest'],
'actions': ['py.test tests/'],
}
def task_coverage():
"""run unit-tests using py.test, with coverage reporting"""
return {
'task_dep': ['locale', 'doctest'],
'actions': ['py.test --cov nikola --cov-report term-missing tests/'],
'verbosity': 2,
}
def task_gen_completion():
"""generate tab-completion scripts"""
cmd = 'nikola tabcompletion --shell {0} --hardcode-tasks > _nikola_{0}'
for shell in ('bash', 'zsh'):
yield {
'name': shell,
'actions': [cmd.format(shell)],
'targets': ['_nikola_{0}'.format(shell)],
}
|
Python
| 0.000003
|
@@ -1181,16 +1181,17 @@
if
+(
line.end
@@ -1204,16 +1204,44 @@
'.utf8')
+ or line.endswith('.UTF-8'))
and '_'
|
8967d4e0c5cd9adad7244cfc2ea78593be14b113
|
Add regression test template
|
templates/tests/regression_test.py
|
templates/tests/regression_test.py
|
Python
| 0.000001
|
@@ -0,0 +1,379 @@
+# coding: utf-8%0Afrom __future__ import unicode_literals%0A%0Aimport pytest%0A%0A%0Adef test_issueXXX():%0A %22%22%22Provide a description of what you're testing for here.%22%22%22%0A%0A # to use spaCy components, add the fixture names as arguments to the test%0A # for more info, check out the tests README:%0A # https://github.com/explosion/spaCy/blob/master/spacy/tests/README.md%0A # test stuff%0A
|
|
ca85166164cb5ac726723b83c00c6f305d0a0b45
|
Update forward compatibility horizon to 2022-05-16
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 5, 15)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1335,17 +1335,17 @@
22, 5, 1
-5
+6
)%0A_FORWA
|
31da2ab1401b933e031a91aac2aa474e4099aa58
|
Update forward compatibility horizon to 2019-01-31
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 1, 30)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
Python
| 0
|
@@ -1139,17 +1139,17 @@
19, 1, 3
-0
+1
)%0A%0A%0A@tf_
|
47734733a7ccbd242979b3c7ac9d792f59ac10d8
|
Test for HERMES spectra of HD22879
|
code/test_hd22879.py
|
code/test_hd22879.py
|
Python
| 0
|
@@ -0,0 +1,1601 @@
+import cPickle as pickle%0A%0Afrom stellar_parameters import Star%0Afrom channel import SpectralChannel%0A%0A%0A%0A%0Aclass spectrum(object):%0A pass%0A%0Aimport sick%0Aspec = sick.specutils.Spectrum.load(%22spectra/hermes-sun.fits%22)%0A%0A%0Ablue_channel = spectrum()%0Ablue_channel.dispersion = spec.disp%0Ablue_channel.flux = spec.flux%0Ablue_channel.variance = spec.variance%0A%0A%0Awith open(%22transitions.pkl%22, %22rb%22) as fp:%0A transitions = pickle.load(fp)%0A%0A%0A# Get just blue channel ones%0Atransition_indices = (blue_channel.dispersion%5B-1%5D %3E transitions%5B%22rest_wavelength%22%5D) * (transitions%5B%22rest_wavelength%22%5D %3E blue_channel.dispersion%5B0%5D)%0A%0Ause_regions = np.array(%5B%0A %5B4731.3, 4731.65%5D,%0A %5B4742.65, 4742.93%5D,%0A %5B4757.95, 4748.31%5D,%0A %5B4759.1, 4759.56%5D,%0A %5B4764.43, 4764.47%5D,%0A %5B4778.08, 4778.41%5D,%0A %5B4779.78, 4780.2%5D,%0A %5B4781.59, 4781.92%5D,%0A %5B4788.41, 4789%5D,%0A %5B4789.91, 4790.19%5D,%0A %5B4795.24, 4795.66%5D,%0A %5B4798.39, 4798.64%5D,%0A %5B4802.69, 4803.2%5D,%0A %5B4805.3, 4805.71%5D,%0A %5B4807.95, 4808.35%5D,%0A %5B4820.23, 4820.6%5D,%0A %5B4847.89, 4848.02%5D,%0A %5B4869.85, 4870.3%5D,%0A %5B4873.88, 4874.19%5D,%0A %5B4884.95, 4885.25%5D,%0A# %5B4889.9, 4892.67%5D,%0A %5B4894.7, 4895.0%5D%0A%5D)%0A%0A#use_regions = np.array(%5B%0A# %5B4705, 4850.%5D,%0A# %5B4880., 5000.%5D%0A#%5D)%0A%0Amask = np.empty(len(blue_channel.dispersion))%0Amask%5B:%5D = np.nan%0Afor row in use_regions:%0A indices = blue_channel.dispersion.searchsorted(row)%0A mask%5Bindices%5B0%5D:indices%5B1%5D + 1%5D = 1.%0A%0Aprint(np.sum(np.isfinite(mask)))%0A%0Ablue = SpectralChannel(blue_channel, transitions%5Btransition_indices%5D, mask=mask, redshift=False, continuum_order=3, wl_tolerance=0.1, wl_cont=2, outliers=True)%0A%0A%0A%0A%0A%0A
|
|
1a6f702b670a4cad2ec1cd4044759ecfc656c9f2
|
add thread
|
thread/thread.py
|
thread/thread.py
|
Python
| 0
|
@@ -0,0 +1,481 @@
+#!/usr/bin/env python %0A%0Aimport thread%0Afrom time import sleep, ctime%0A%0Adef thread0():%0A print '1 : start @ ', ctime()%0A sleep(4)%0A print '1 : end @ ', ctime()%0A%0Adef thread1():%0A print '2 : start @ ', ctime()%0A sleep(4)%0A print '2 : end @ ', ctime()%0A%0Adef main():%0A print 'starting at:', ctime()%0A thread.start_new_thread(thread0, ())%0A thread.start_new_thread(thread1, ())%0A sleep(6)%0A print 'all DONE at: ', ctime()%0A%0Aif __name__ == '__main__':%0A main()%0A%0A%0A%0A
|
|
2feed8b291fd4c8081bb81458bedd736c08c448e
|
Add CNN example script.
|
usr/examples/09-Feature-Detection/cnn.py
|
usr/examples/09-Feature-Detection/cnn.py
|
Python
| 0
|
@@ -0,0 +1,1259 @@
+# CMSIS CNN example.%0Aimport sensor, image, time, os%0A%0Asensor.reset() # Reset and initialize the sensor.%0Asensor.set_contrast(3)%0Asensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)%0Asensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)%0Asensor.set_windowing((200, 200)) # Set 128x128 window.%0Asensor.skip_frames(time = 100) # Wait for settings take effect.%0Asensor.set_auto_gain(False)%0Asensor.set_auto_exposure(False)%0A%0Alabels = %5B'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'%5D%0A%0Aclock = time.clock() # Create a clock object to track the FPS.%0Awhile(True):%0A clock.tick() # Update the FPS clock.%0A img = sensor.snapshot().lens_corr(1.6) # Take a picture and return the image.%0A out = img.classify_object()%0A # print label_id:confidence%0A #for i in range(0, len(out)):%0A # print(%22%25s:%25d %22%25(labels%5Bi%5D, out%5Bi%5D), end=%22%22)%0A max_idx = out.index(max(out))%0A print(%22%25s : %250.2f%25%25 %22%25(labels%5Bmax_idx%5D, (out%5Bmax_idx%5D/128)*100))%0A%0A #print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected%0A # to the IDE. The FPS should increase once disconnected.%0A
|
|
0080f3a4f93a22b9c563c20d2c93b00ce8b7c382
|
Set up game structure
|
game.py
|
game.py
|
Python
| 0.000003
|
@@ -0,0 +1,1012 @@
+%22%22%22%0D%0AA variant of Conway's Game of Life on a hexagonal grid.%0D%0A%0D%0ARules: B2/S12%0D%0A - Dead cells with two live neighbours are born.%0D%0A - Live cells with one or two live neighbours survive.%0D%0A - All other live cells die.%0D%0A%0D%0A%22%22%22%0D%0A%0D%0A# Rule Configuration%0D%0ASTATES = ('DEAD', 'ALIVE')%0D%0AB = (2,)%0D%0AS = (1, 2)%0D%0A%0D%0A%0D%0Aclass Game:%0D%0A def __init__(self, seed, max_steps=100):%0D%0A self.generation = seed%0D%0A self.max = max_steps%0D%0A self.count = 0%0D%0A%0D%0A def play(self):%0D%0A self.generation.draw()%0D%0A%0D%0A while self.count %3C self.max:%0D%0A self.generation = self.generation.tick()%0D%0A self.generation.draw()%0D%0A self.count += 1%0D%0A%0D%0A%0D%0Aclass Generation:%0D%0A def __init__(self, rows, cols):%0D%0A self.rows = rows%0D%0A self.cols = cols%0D%0A%0D%0A def draw(self):%0D%0A # TODO%0D%0A pass%0D%0A%0D%0A def tick(self):%0D%0A # TODO%0D%0A pass%0D%0A%0D%0A def _survives(self, row, col):%0D%0A # TODO%0D%0A pass%0D%0A%0D%0A def _is_born(self, row, col):%0D%0A # TODO%0D%0A pass%0D%0A
|
|
72e69f3535c7e2cd82cdda62636eabd7421ebddf
|
Add dump script for all hiddens
|
generative/tests/compare_test/concat_first/dump_hiddens.py
|
generative/tests/compare_test/concat_first/dump_hiddens.py
|
Python
| 0
|
@@ -0,0 +1,799 @@
+from __future__ import division%0Afrom __future__ import print_function%0Afrom __future__ import absolute_import%0A%0Aimport os%0Aimport subprocess%0A%0Aif __name__ == %22__main__%22:%0A for hiddens_dim in %5B512, 256, 128, 64, 32, 16%5D: %0A print('Dumping files for (%25d)' %25 hiddens_dim)%0A model_path = '/mnt/visual_communication_dataset/trained_models_5_30_18/hiddens_fc6/%25d/model_best.pth.tar' %25 hiddens_dim%0A out_dir = './dump_hiddens_outputs/%25d/' %25 hiddens_dim%0A if not os.path.isdir(out_dir):%0A os.makedirs(out_dir)%0A command = 'CUDA_VISIBLE_DEVICES=7 python dump.py %7Bmodel%7D --train-test-split-dir ./train_test_split/1 --out-dir %7Boutdir%7D --average-labels --overwrite-layer fc6 --cuda'.format(model=model_path, outdir=out_dir)%0A subprocess.call(command, shell=True)%0A%0A
|
|
6e0202bb2385821907627046aef28b042961a2be
|
Create gate.py
|
gate.py
|
gate.py
|
Python
| 0.000001
|
@@ -0,0 +1 @@
+%0A
|
|
e20e50c7cb1a22907bc83eec6c595a7bbaf8b8b9
|
Add test_github.py
|
tests/core/backends/test_github.py
|
tests/core/backends/test_github.py
|
Python
| 0.000004
|
@@ -0,0 +1,1328 @@
+# -*- coding: utf-8 -*-%0Aimport pytest%0Aimport requests%0A%0Afrom kawasemi.backends.github import GitHubChannel%0Afrom kawasemi.exceptions import HttpError, ImproperlyConfigured%0A%0A%0Aconfig = %7B%0A %22_backend%22: %22kawasemi.backends.github.GitHubChannel%22,%0A %22token%22: %22token%22,%0A %22owner%22: %22ymyzk%22,%0A %22repository%22: %22kawasemi%22%0A%7D%0A%0A%0A@pytest.fixture()%0Adef channel():%0A return GitHubChannel(**config)%0A%0A%0Aclass TestGitHubChannel(object):%0A def test_send(self, channel, mocker):%0A post = mocker.patch(%22requests.post%22)%0A response = requests.Response()%0A response.status_code = requests.codes.created%0A post.return_value = response%0A%0A channel.send(%22My Issue Title%22)%0A%0A channel.send(%22Issue Title%22, options=%7B%0A %22github%22: %7B%0A %22body%22: %22%22%22## ToDo%0A- %5B %5D Introduce A%0A- %5B %5D Refactor B%22%22%22,%0A %22labels%22: %5B%22enhancement%22%5D,%0A %22assignees%22: %5B%22ymyzk%22%5D%0A %7D%0A %7D)%0A%0A def test_send_fail_invalid_token(self, channel, mocker):%0A post = mocker.patch(%22requests.post%22)%0A response = requests.Response()%0A response.status_code = requests.codes.unauthorized%0A post.return_value = response%0A%0A with pytest.raises(HttpError):%0A channel.send(%22Test title%22, fail_silently=False)%0A%0A channel.send(%22Test title%22, fail_silently=True)%0A
|
|
aa1ca0b500af4ef89ba7ad7982b89ebe15252c1b
|
add heguilong answer for question3
|
question_3/heguilong.py
|
question_3/heguilong.py
|
Python
| 0.999995
|
@@ -0,0 +1,1068 @@
+%22%22%22%0AFile: heguilong.py%0AAuthor: heguilong%0AEmail: hgleagle@gmail.com%0AGithub: https://github.com/hgleagle%0ADescription:%0A%E7%BB%9F%E8%AE%A1%E4%B8%80%E4%B8%AA%E6%96%87%E4%BB%B6%E4%B8%AD%E6%AF%8F%E4%B8%AA%E5%8D%95%E8%AF%8D%E5%87%BA%E7%8E%B0%E7%9A%84%E6%AC%A1%E6%95%B0%EF%BC%8C%E5%88%97%E5%87%BA%E5%87%BA%E7%8E%B0%E9%A2%91%E7%8E%87%E6%9C%80%E5%A4%9A%E7%9A%845%E4%B8%AA%E5%8D%95%E8%AF%8D%E3%80%82%0A%22%22%22%0Aimport logging%0Aimport sys%0Aimport re%0Afrom collections import Counter%0A%0A%0Alogging.basicConfig(level=logging.DEBUG, format='%25(asctime)s - %25(levelname)s %5C%0A - %25(message)s')%0Aclass WordCount:%0A def __init__(self, file_name):%0A self.file_name = file_name%0A%0A def count_word(self, most_num):%0A %22%22%22print most counts words%0A%0A :most_num: print most counts words%0A%0A %22%22%22%0A with open(self.file_name, 'r') as f:%0A data = f.read().lower()%0A # characters and single quote not split%0A words = re.split(r'%5B%5E%5Cw%5C'%5D+', data)%0A logging.debug(words)%0A most_cnts_words = Counter(words).most_common(most_num)%0A print(most_cnts_words)%0A%0A%0Aif __name__ == '__main__':%0A if len(sys.argv) != 2:%0A print('Usage: python3 heguilong.py file_name')%0A sys.exit()%0A word_count = WordCount(sys.argv%5B1%5D)%0A word_count.count_word(5)%0A
|
|
69ba3715c762245e83d6b5388af4b77dfcc43dde
|
Create dataGenCore.py
|
bin/dataGenCore.py
|
bin/dataGenCore.py
|
Python
| 0.000002
|
@@ -0,0 +1,3126 @@
+#!/usr/bin python %0A%0Aimport time%0Aimport random%0Aimport base64%0Aimport os%0Aimport sys%0A%0Astart = time.time()%0A%0A# pwd = os.path.dirname(__file__)%0A# outputpath = os.path.normpath(pwd + '/../sample_data/' + sys.argv%5B1%5D)%0A%0Aoutputpath = os.path.normpath(sys.argv%5B1%5D)%0A# print outputpath%0A%0A#run for five minutes%0A# while time.time() %3C start + 300:%0A %0A#run forever%0Awhile (True):%0A t = time.strftime('%25Y-%25m-%25dT%25H:%25M:%25S')%0A timezone = time.strftime('%25z')%0A millis = %22%25.3d%22 %25 (time.time() %25 1 * 1000)%0A %0A #open file for append%0A outputfile = open(outputpath, 'a+')%0A%0A #create random values%0A level = random.sample(%5B'DEBUG', 'INFO', 'WARN', 'ERROR'%5D, 1)%5B0%5D%0A message = random.sample(%5B'Don%5C't worry, be happy.',%0A 'error, ERROR, Error!',%0A 'Nothing happened. This is worthless. %5C%0A Don%5C't log this.',%0A 'Hello world.'%5D, 1)%5B0%5D%0A%0A logger = random.sample(%5B'FooClass',%0A 'BarClass',%0A 'AuthClass',%0A 'LogoutClass',%0A 'BarClass',%0A 'BarClass',%0A 'BarClass',%0A 'BarClass'%5D, 1)%5B0%5D%0A%0A user = random.sample(%5B'jeff',%0A 'mo',%0A 'aaron',%0A 'rajesh',%0A 'sunil',%0A 'zach',%0A 'gus'%5D, 1)%5B0%5D%0A%0A ip = random.sample(%5B'1.2.3.4',%0A '4.31.2.1',%0A '1.2.3.',%0A '1.22.3.3',%0A '3.2.4.5',%0A '113.2.4.5'%5D, 1)%5B0%5D%0A%0A req_time = str(int(abs(random.normalvariate(0, 1)) * 1000))%0A session_length = str(random.randrange(1, 12240))%0A session_id = base64.b64encode(str(random.randrange(1000000, 1000000000)))%0A extra = random.sample(%5B'network=qa',%0A 'network=prod',%0A 'session_length=' + session_length,%0A 'session_id=%22' + session_id + '%22',%0A 'user=extrauser'%5D, 1)%5B0%5D%0A%0A fields = %5B%5D%0A fields.append('logger=' + logger)%0A fields.append('user=' + user)%0A fields.append('ip=' + ip)%0A fields.append('req_time=' + req_time)%0A fields.append(extra)%0A%0A fields.pop(random.randrange(0, len(fields)))%0A %0A # print to screen%0A# print %22%25s.%25s%25s %25s %25s %5B%25s%5D%22 %25 (t,%0A# millis,%0A# timezone,%0A# level,%0A# message,%0A# %22, %22.join(fields))%0A %0A # print to file %0A outputfile.write( %22%25s.%25s%25s %25s %25s %5B%25s%5D%5Cn%22 %25 (t,%0A millis,%0A timezone,%0A level,%0A message,%0A %22, %22.join(fields)))%0A outputfile.close()%0A #print newline%0A%0A # time.sleep(random.random())%0A
|
|
df784323d0da737755def4015840d118e3c8e595
|
Add test that detects censorship in HTTP pages based on HTTP body length
|
nettests/core/http_body_length.py
|
nettests/core/http_body_length.py
|
Python
| 0.000001
|
@@ -0,0 +1,2875 @@
+# -*- encoding: utf-8 -*-%0A#%0A# :authors: Arturo Filast%C3%B2%0A# :licence: see LICENSE%0A%0Afrom twisted.internet import defer%0Afrom twisted.python import usage%0Afrom ooni.templates import httpt%0A%0Aclass UsageOptions(usage.Options):%0A optParameters = %5B%0A %5B'url', 'u', None, 'Specify a single URL to test.'%5D,%0A %5B'factor', 'f', 0.8, 'What factor should be used for triggering censorship (0.8 == 80%25)'%5D%0A %5D%0A%0Aclass HTTPBodyLength(httpt.HTTPTest):%0A %22%22%22%0A Performs a two GET requests to the set of sites to be tested for%0A censorship, one over a known good control channel (Tor), the other over the%0A test network.%0A We then look at the response body lengths and see if the control response%0A differs from the experiment response by a certain factor.%0A %22%22%22%0A name = %22HTTP Body length test%22%0A author = %22Arturo Filast%C3%B2%22%0A version = %220.1%22%0A%0A usageOptions = UsageOptions%0A%0A inputFile = %5B'file', 'f', None, %0A 'List of URLS to perform GET and POST requests to'%5D%0A%0A # These values are used for determining censorship based on response body%0A # lengths%0A control_body_length = None%0A experiment_body_length = None%0A%0A def setUp(self):%0A %22%22%22%0A Check for inputs.%0A %22%22%22%0A if self.input:%0A self.url = self.input%0A elif self.localOptions%5B'url'%5D:%0A self.url = self.localOptions%5B'url'%5D%0A else:%0A raise Exception(%22No input specified%22)%0A%0A self.factor = self.localOptions%5B'factor'%5D%0A%0A def compare_body_lengths(self):%0A body_length_a = self.control_body_length%0A body_length_b = self.experiment_body_length%0A%0A rel = float(body_length_a)/float(body_length_b)%0A if rel %3E 1:%0A rel = 1/rel%0A%0A self.report%5B'body_proportion'%5D = rel%0A self.report%5B'factor'%5D = self.factor%0A if rel %3C self.factor:%0A self.report%5B'censorship'%5D = True%0A else:%0A self.report%5B'censorship'%5D = False%0A%0A def test_get(self):%0A def errback(failure):%0A log.err(%22There was an error while testing %25s%22 %25 self.url)%0A log.exception(failure)%0A%0A def control_body(result):%0A self.control_body_length = len(result)%0A if self.experiment_body_length:%0A self.compare_body_lengths()%0A%0A def experiment_body(result):%0A self.experiment_body_length = len(result)%0A if self.control_body_length:%0A self.compare_body_lengths()%0A%0A dl = %5B%5D%0A experiment_request = self.doRequest(self.url, method=%22GET%22,%0A body_processor=experiment_body)%0A control_request = self.doRequest(self.url, method=%22GET%22,%0A use_tor=True, body_processor=control_body)%0A dl.append(experiment_request)%0A dl.append(control_request)%0A d = defer.DeferredList(dl)%0A return d%0A%0A
|
|
e546e055b33c776fddaa244075d59a99978265ea
|
add reading
|
vehicles/management/commands/import_reading.py
|
vehicles/management/commands/import_reading.py
|
Python
| 0
|
@@ -0,0 +1,1729 @@
+from ciso8601 import parse_datetime%0Afrom django.utils.timezone import make_aware%0Afrom django.contrib.gis.geos import Point%0Afrom busstops.models import Service%0Afrom ...models import VehicleLocation, VehicleJourney%0Afrom ..import_live_vehicles import ImportLiveVehiclesCommand%0A%0A%0Aclass Command(ImportLiveVehiclesCommand):%0A url = 'http://rtl2.ods-live.co.uk/api/vehiclePositions'%0A source_name = 'Reading'%0A services = Service.objects.filter(operator__in=('RBUS', 'GLRB', 'KENN', 'NADS', 'THVB'), current=True)%0A%0A @staticmethod%0A def get_datetime(item):%0A return make_aware(parse_datetime(item%5B'observed'%5D))%0A%0A def get_vehicle(self, item):%0A vehicle = item%5B'vehicle'%5D%0A defaults = %7B%0A 'source': self.source%0A %7D%0A if vehicle.isdigit():%0A defaults%5B'fleet_number'%5D = vehicle%0A return self.vehicles.get_or_create(%0A defaults,%0A operator_id='RBUS',%0A code=vehicle%0A )%0A%0A def get_journey(self, item, vehicle):%0A journey = VehicleJourney()%0A journey.route_name = item%5B'service'%5D%0A%0A if vehicle.latest_location and vehicle.latest_location.journey.route_name == journey.route_name:%0A journey.service = vehicle.latest_location.journey.service%0A else:%0A try:%0A journey.service = self.services.get(line_name=item%5B'service'%5D)%0A except (Service.DoesNotExist, Service.MultipleObjectsReturned) as e:%0A print(e, item%5B'service'%5D)%0A%0A return journey%0A%0A def create_vehicle_location(self, item):%0A return VehicleLocation(%0A latlong=Point(float(item%5B'longitude'%5D), float(item%5B'latitude'%5D)),%0A heading=item%5B'bearing'%5D or None%0A )%0A
|
|
3b00930f9c6e6552bef5b5939916a1b8e737287a
|
Add a snippet.
|
python/pyaudio/read.py
|
python/pyaudio/read.py
|
Python
| 0.000002
|
@@ -0,0 +1,703 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A# See http://people.csail.mit.edu/hubert/pyaudio/docs/#example-blocking-mode-audio-i-o%0A%0Aimport pyaudio%0Aimport wave%0A%0ACHUNK = 1024%0A%0Awf = wave.open(%22test.wav%22, 'rb')%0A%0Aprint(wf.getnchannels())%0Aprint(wf.getframerate())%0A%0Ap = pyaudio.PyAudio()%0Aprint(p.get_device_count())%0A%0Astream = p.open(format=p.get_format_from_width(wf.getsampwidth()),%0A channels=wf.getnchannels(),%0A rate=wf.getframerate(),%0A output=True,%0A output_device_index=3)%0A%0A# read data%0Adata = wf.readframes(CHUNK)%0A%0Awhile len(data) %3E 0:%0A stream.write(data)%0A data = wf.readframes(CHUNK)%0A%0Astream.stop_stream()%0Astream.close()%0A%0Ap.terminate()%0A
|
|
3601a0dc9d762e17c24e0dbf86ee1ef4a00c49cd
|
Add tests for the authorize_user function
|
yithlibraryserver/tests/test_security.py
|
yithlibraryserver/tests/test_security.py
|
Python
| 0.000001
|
@@ -0,0 +1,1704 @@
+from pyramid.httpexceptions import HTTPBadRequest, HTTPUnauthorized%0A%0Afrom yithlibraryserver import testing%0Afrom yithlibraryserver.security import authorize_user%0A%0A%0Aclass AuthorizationTests(testing.TestCase):%0A%0A clean_collections = ('access_codes', 'users')%0A%0A def test_authorize_user(self):%0A%0A request = testing.FakeRequest(%7B%7D)%0A%0A # The authorization header is required%0A self.assertRaises(HTTPUnauthorized, authorize_user, request)%0A%0A request = testing.FakeRequest(%7B'Authorization': 'Basic foobar'%7D)%0A # Only the bearer method is allowed%0A self.assertRaises(HTTPBadRequest, authorize_user, request)%0A%0A request = testing.FakeRequest(%7B%0A 'Authorization': 'Bearer 1234',%0A %7D, self.db)%0A # Invalid code%0A self.assertRaises(HTTPUnauthorized, authorize_user, request)%0A%0A access_code_id = self.db.access_codes.insert(%7B%0A 'code': '1234',%0A 'user': 'user1',%0A %7D, safe=True)%0A request = testing.FakeRequest(%7B%0A 'Authorization': 'Bearer 1234',%0A %7D, self.db)%0A # Invalid user%0A self.assertRaises(HTTPUnauthorized, authorize_user, request)%0A%0A user_id = self.db.users.insert(%7B%0A 'username': 'user1',%0A %7D, safe=True)%0A self.db.access_codes.update(%7B'_id': access_code_id%7D, %7B%0A '$set': %7B'user': user_id%7D,%0A %7D, safe=True)%0A request = testing.FakeRequest(%7B%0A 'Authorization': 'Bearer 1234',%0A %7D, self.db)%0A # Invalid user%0A authorized_user = authorize_user(request)%0A self.assertEqual(authorized_user%5B'username'%5D, 'user1')%0A
|
|
66db96dc523ab838475eb3826766bb4278c18673
|
Add tests for remove_display_attributes.
|
tests/test_assess_cloud_display.py
|
tests/test_assess_cloud_display.py
|
Python
| 0
|
@@ -0,0 +1,913 @@
+from tests import TestCase%0A%0Afrom assess_cloud_display import remove_display_attributes%0Afrom utility import JujuAssertionError%0A%0A%0Aclass TestRemoveDisplayAttributes(TestCase):%0A%0A def test_remove_display_attributes(self):%0A cloud = %7B%0A 'defined': 'local',%0A 'description': 'Openstack Cloud',%0A 'type': 'openstack',%0A %7D%0A remove_display_attributes(cloud)%0A self.assertEqual(cloud, %7B'type': 'openstack'%7D)%0A%0A def test_remove_display_attributes_bad_defined(self):%0A with self.assertRaises(JujuAssertionError):%0A remove_display_attributes(%7B'defined': 'foo'%7D)%0A%0A def test_remove_display_attributes_bad_description(self):%0A with self.assertRaises(JujuAssertionError):%0A remove_display_attributes(%7B%0A 'defined': 'local',%0A 'description': 'bar',%0A 'type': 'openstack',%0A %7D)%0A
|
|
6c12786f74c17ab8328fed9bfebbb003f2e9f282
|
Add always true entry
|
zaifbot/rules/entry/always_true_entry.py
|
zaifbot/rules/entry/always_true_entry.py
|
Python
| 0
|
@@ -0,0 +1,282 @@
+from zaifbot.rules.entry.base import Entry%0A%0A%0Aclass AlwaysTrueEntry(Entry):%0A def __init__(self, currency_pair, amount, action, name=None):%0A super().__init__(currency_pair=currency_pair, amount=amount, action=action, name=name)%0A%0A def can_entry(self):%0A return True%0A
|
|
34be21749a0e42563c2f1c6912a2ae2a7c26091c
|
525. Contiguous Array. Array, TLE
|
p525_array_tle.py
|
p525_array_tle.py
|
Python
| 0.998773
|
@@ -0,0 +1,1373 @@
+import unittest%0A%0A%0Adef max_length(sums, lo, hi):%0A sum_ = (sums%5Bhi%5D - sums%5Blo%5D) %3C%3C 1%0A length = hi - lo%0A if sum_ %3E length:%0A more = 1%0A elif sum_ %3C length:%0A more = 0%0A else:%0A return length%0A if sums%5Blo%5D == more:%0A return max_length(sums, lo + 1, hi)%0A elif sums%5Bhi%5D == more:%0A return max_length(sums, lo, hi - 1)%0A else:%0A return max(max_length(sums, lo + 1, hi), max_length(sums, lo, hi - 1))%0A%0A%0Aclass Solution(object):%0A def findMaxLength(self, nums):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A n = len(nums)%0A sums = %5B0%5D * (n + 1)%0A sum_ = 0%0A for i in xrange(n):%0A sum_ += nums%5Bi%5D%0A sums%5Bi + 1%5D = sum_%0A return max_length(sums, 0, n)%0A%0A%0Aclass Test(unittest.TestCase):%0A def test(self):%0A self._test(%5B0, 1%5D, 2)%0A self._test(%5B0, 1, 0%5D, 2)%0A self._test(%5B1, 1, 0, 1, 0%5D, 4)%0A self._test(%5B1, 0, 1, 0, 1%5D, 4)%0A self._test(%5B0, 1, 1, 0, 1%5D, 4)%0A self._test(%5B0, 1, 1, 1, 1, 0, 0%5D, 4)%0A self._test(%5B0, 1, 1, 1, 0%5D, 2)%0A self._test(%5B0, 1, 0, 0, 1, 1, 1, 1, 0%5D, 6)%0A self._test(%5B0, 1, 1, 1, 1, 0, 0, 1, 0%5D, 6)%0A%0A def _test(self, nums, expected):%0A actual = Solution().findMaxLength(nums)%0A self.assertEqual(actual, expected)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
93f0f573c40ed7878f744a9fee2b2a9e85157d5e
|
append elevations to GPX from SRTM dataset with gpxelevations util in SRTM.py package
|
src/gpx_elev_enhancer.py
|
src/gpx_elev_enhancer.py
|
Python
| 0
|
@@ -0,0 +1,509 @@
+# Append elevations to GPX files%0A# 2015-05-08%0A# Lu LIU%0A#%0A%0Afrom os import listdir%0Afrom os.path import isfile, join%0Aimport srtm%0Aimport gpxpy%0A%0Agpx_file_dir = %22/Users/user/Research/data/GPX/Munich%22%0Agpx_files = %5Bf for f in listdir(gpx_file_dir) if isfile(join(gpx_file_dir, f))%5D%0Afor gpx_file in gpx_files:%0A print %22add elevations for %22 + gpx_file + %22...%22,%0A gpx = gpxpy.parse(open(join(gpx_file_dir, gpx_file)))%0A elev_data = srtm.get_data()%0A elev_data.add_elevations(gpx, smooth=True)%0A print %22 done!%22%0A
|
|
7e68ec932cb43fc5a98828a367a51593b419bee0
|
Add batch normalization
|
thinc/neural/_classes/batchnorm.py
|
thinc/neural/_classes/batchnorm.py
|
Python
| 0.000001
|
@@ -0,0 +1,2206 @@
+%0A%0Afrom .model import Model%0A%0A%0Aclass BatchNormalization(Model):%0A def predict_batch(self, X):%0A N, mu, var = _get_moments(self.ops, X)%0A return _forward(self.ops, X, mu, var)%0A %0A def begin_update(self, X, dropout=0.0):%0A N, mu, var = _get_moments(self.ops, X)%0A Xhat = _forward(self.ops, X, mu, var)%0A%0A def finish_update(dy, optimizer=None, **kwargs):%0A assert len(X) == len(dy)%0A dist, sum_dy, sum_dy_dist = _get_d_moments(self.ops, dy, X, mu)%0A if hasattr(dy, 'shape'):%0A d_xhat = N * dy - sum_dy - dist * var**(-1.) * sum_dy_dist%0A d_xhat *= var ** (-1. / 2)%0A d_xhat /= N%0A return d_xhat%0A else:%0A seqs = (dy, sum_dy, dist, sum_dy_dist)%0A output = %5B%5D%0A assert len(sum_dy) == len(dy)%0A assert len(dist) == len(dy)%0A assert len(sum_dy_dist) == len(dy)%0A for dy_, sum_dy_, dist_, sum_dy_dist_ in zip(*seqs):%0A d_xhat = N * dy_ - sum_dy_ - dist_ * var**(-1.) * sum_dy_dist_%0A d_xhat *= var ** (-1. / 2)%0A d_xhat /= N%0A output.append(d_xhat)%0A assert len(output) == len(dy), (len(output), len(dy))%0A return output%0A return Xhat, finish_update%0A%0A%0Adef _get_moments(ops, X):%0A if hasattr(X, 'shape') and len(X.shape) == 2:%0A mu = X.mean(axis=0)%0A var = X.var(axis=0) + 1e-8%0A return X.shape%5B0%5D, mu, var%0A else:%0A stacked = numpy.vstack(X)%0A return stacked.shape%5B0%5D, stacked.mean(axis=0), stacked.var(axis=0)%0A%0A%0Adef _get_d_moments(ops, dy, X, mu):%0A if hasattr(dy, 'shape'):%0A dist = X-mu%0A return dist, ops.xp.sum(dy, axis=0), ops.xp.sum(dy * dist, axis=0)%0A else:%0A sum_dy = %5Bops.xp.sum(seq, axis=0) for seq in dy%5D%0A dist = %5Bx-mu for x in X%5D%0A sum_dy_dot_dist = %5Bops.xp.sum(seq * d, axis=0) for seq, d in zip(dy, dist)%5D%0A return dist, sum_dy, sum_dy_dot_dist%0A%0A%0Adef _forward(ops, X, mu, var):%0A if hasattr(X, 'shape'):%0A return (X-mu) * var ** (-1./2.)%0A else:%0A return %5B_forward(x, mu, var) for x in X%5D%0A
|
|
c99a476b396422c0a673a78eb795df1cf94b8bb5
|
Define base Frame object.
|
hyper/http20/frame.py
|
hyper/http20/frame.py
|
Python
| 0
|
@@ -0,0 +1,366 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0Ahyper/http20/frame%0A~~~~~~~~~~~~~~~~~~%0A%0ADefines framing logic for HTTP/2.0. Provides both classes to represent framed%0Adata and logic for aiding the connection when it comes to reading from the%0Asocket.%0A%22%22%22%0Aclass Frame(object):%0A def __init__(self):%0A self.stream = None%0A%0A def serialize(self):%0A raise NotImplementedError()%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.