commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
d2d44a0bd1d144a3497e857e368deab7b7833b2f
|
Fix lint issue
|
tests/test_manifest.py
|
tests/test_manifest.py
|
# Copyright 2017 Jon Wayne Parrott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import mock
import pytest
import nox
from nox.manifest import _null_session_func
from nox.manifest import Manifest
def create_mock_sessions():
sessions = collections.OrderedDict()
sessions['foo'] = mock.Mock(spec=())
sessions['bar'] = mock.Mock(spec=())
return sessions
def test_init():
sessions = create_mock_sessions()
manifest = Manifest(sessions, mock.sentinel.CONFIG)
# Assert that basic properties look correctly.
assert len(manifest) == 2
assert manifest['foo'].func is sessions['foo']
assert manifest['bar'].func is sessions['bar']
def test_contains():
sessions = create_mock_sessions()
manifest = Manifest(sessions, mock.sentinel.CONFIG)
# Establish that contains works pre-iteration.
assert 'foo' in manifest
assert 'bar' in manifest
assert 'baz' not in manifest
# Establish that __contains__ works post-iteration.
for session in manifest:
pass
assert 'foo' in manifest
assert 'bar' in manifest
assert 'baz' not in manifest
# Establish that sessions themselves work.
assert manifest['foo'] in manifest
def test_getitem():
sessions = create_mock_sessions()
manifest = Manifest(sessions, mock.sentinel.CONFIG)
# Establish that each session is present, and a made-up session
# is not.
assert manifest['foo'].func is sessions['foo']
assert manifest['bar'].func is sessions['bar']
with pytest.raises(KeyError):
manifest['baz']
# Establish that the sessions are still present even after being
# consumed by iteration.
for session in manifest:
pass
assert manifest['foo'].func is sessions['foo']
assert manifest['bar'].func is sessions['bar']
def test_iteration():
sessions = create_mock_sessions()
manifest = Manifest(sessions, mock.sentinel.CONFIG)
# There should be two sessions in the queue.
assert len(manifest._queue) == 2
assert len(manifest._consumed) == 0
# The first item should be our "foo" session.
foo = next(manifest)
assert foo.func == sessions['foo']
assert foo in manifest._consumed
assert foo not in manifest._queue
assert len(manifest._consumed) == 1
assert len(manifest._queue) == 1
# The .next() or .__next__() methods can be called directly according
# to Python's data model.
bar = manifest.next()
assert bar.func == sessions['bar']
assert bar in manifest._consumed
assert bar not in manifest._queue
assert len(manifest._consumed) == 2
assert len(manifest._queue) == 0
# Continuing past the end raises StopIteration.
with pytest.raises(StopIteration):
manifest.__next__()
def test_len():
sessions = create_mock_sessions()
manifest = Manifest(sessions, mock.sentinel.CONFIG)
assert len(manifest) == 2
for session in manifest:
assert len(manifest) == 2
def test_filter_by_name():
sessions = create_mock_sessions()
manifest = Manifest(sessions, mock.sentinel.CONFIG)
manifest.filter_by_name(('foo',))
assert 'foo' in manifest
assert 'bar' not in manifest
def test_filter_by_name_not_found():
sessions = create_mock_sessions()
manifest = Manifest(sessions, mock.sentinel.CONFIG)
with pytest.raises(KeyError):
manifest.filter_by_name(('baz',))
def test_filter_by_keyword():
sessions = create_mock_sessions()
manifest = Manifest(sessions, mock.sentinel.CONFIG)
assert len(manifest) == 2
manifest.filter_by_keywords('foo or bar')
assert len(manifest) == 2
manifest.filter_by_keywords('foo')
assert len(manifest) == 1
def test_add_session_plain():
manifest = Manifest({}, mock.sentinel.CONFIG)
for session in manifest.make_session('my_session', lambda session: None):
manifest.add_session(session)
assert len(manifest) == 1
def test_add_session_parametrized():
manifest = Manifest({}, mock.sentinel.CONFIG)
# Define a session with parameters.
@nox.parametrize('param', ('a', 'b', 'c'))
def my_session(session, param):
pass
# Add the session to the manifest.
for session in manifest.make_session('my_session', my_session):
manifest.add_session(session)
assert len(manifest) == 3
def test_add_session_parametrized_noop():
manifest = Manifest({}, mock.sentinel.CONFIG)
# Define a session without any parameters.
@nox.parametrize('param', ())
def my_session(session, param):
pass
# Add the session to the manifest.
for session in manifest.make_session('my_session', my_session):
manifest.add_session(session)
assert len(manifest) == 1
def test_notify():
manifest = Manifest({}, mock.sentinel.CONFIG)
# Define a session.
def my_session(session):
pass
def notified(session):
pass
# Add the sessions to the manifest.
for session in manifest.make_session('my_session', my_session):
manifest.add_session(session)
for session in manifest.make_session('notified', notified):
manifest.add_session(session)
assert len(manifest) == 2
# Filter so only the first session is included in the queue.
manifest.filter_by_name(('my_session',))
assert len(manifest) == 1
# Notify the notified session.
manifest.notify('notified')
assert len(manifest) == 2
def test_notify_noop():
manifest = Manifest({}, mock.sentinel.CONFIG)
# Define a session and add it to the manifest.
def my_session(session):
pass
for session in manifest.make_session('my_session', my_session):
manifest.add_session(session)
assert len(manifest) == 1
# Establish idempotency; notifying a session already in the queue no-ops.
manifest.notify('my_session')
assert len(manifest) == 1
def test_notify_error():
manifest = Manifest({}, mock.sentinel.CONFIG)
with pytest.raises(ValueError):
manifest.notify('does_not_exist')
def test_add_session_idempotent():
manifest = Manifest({}, mock.sentinel.CONFIG)
for session in manifest.make_session('my_session', lambda session: None):
manifest.add_session(session)
manifest.add_session(session)
assert len(manifest) == 1
def test_null_session_function():
session = mock.Mock(spec=('skip',))
_null_session_func(session)
assert session.skip.called
|
Python
| 0.000002
|
@@ -5376,24 +5376,25 @@
pass%0A
+%0A
def noti
|
3cc7e0cebc8a7a7410ce6b239e55db0cf55b1dc8
|
Fix broken tests in test_messages
|
tests/test_messages.py
|
tests/test_messages.py
|
from datetime import date
import unittest
from mock import patch
from six import u
from twilio.rest.resources import Messages
DEFAULT = {
'From': None,
'DateSent<': None,
'DateSent>': None,
'DateSent': None,
}
class MessageTest(unittest.TestCase):
def setUp(self):
self.resource = Messages("foo", ("sid", "token"))
self.params = DEFAULT.copy()
def test_list_on(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(date_sent=date(2011, 1, 1))
self.params['DateSent'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_after(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(after=date(2011, 1, 1))
self.params['DateSent>'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_before(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(before=date(2011, 1, 1))
self.params['DateSent<'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_create(self):
with patch.object(self.resource, 'create_instance') as mock:
self.resource.create(
from_='+14155551234',
to='+14155556789',
body=u('ahoy hoy'),
)
mock.assert_called_with(
{
'from': '+14155551234',
'to': '+14155556789',
'body': u('ahoy hoy'),
},
)
def test_delete(self):
with patch.object(self.resource, 'delete_instance') as mock:
self.resource.delete('MM123')
mock.assert_called_with('MM123')
def test_redact(self):
with patch.object(self.resource, 'update_instance') as mock:
self.resource.redact('MM123')
mock.assert_called_with('MM123', {'Body': ''})
|
Python
| 0.000648
|
@@ -1476,17 +1476,17 @@
'
-f
+F
rom': '+
@@ -1972,24 +1972,28 @@
called_with(
+sid=
'MM123', %7B'B
@@ -1989,16 +1989,21 @@
MM123',
+body=
%7B'Body':
|
92af628970486f18ea5a01fe0293ac6a88607928
|
update mismatch test because of sensorsy bug fix.
|
tests/test_mismatch.py
|
tests/test_mismatch.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 10:08:25 2018
@author: cdeline
Using pytest to create unit tests for mismatch.py.
to run unit tests, run pytest from the command line in the bifacial_radiance directory
to run coverage tests, run py.test --cov-report term-missing --cov=bifacial_radiance
"""
import bifacial_radiance
import numpy as np
import pytest
import os
import pandas as pd
# try navigating to tests directory so tests run from here.
try:
os.chdir('tests')
except:
pass
TESTDIR = os.path.dirname(__file__) # this folder
TEST_ARRAY = np.array([[ 0, 23, 24, 47, 48, 71],
[ 1, 22, 25, 46, 49, 70],
[ 2, 21, 26, 45, 50, 69],
[ 3, 20, 27, 44, 51, 68],
[ 4, 19, 28, 43, 52, 67],
[ 5, 18, 29, 42, 53, 66],
[ 6, 17, 30, 41, 54, 65],
[ 7, 16, 31, 40, 55, 64],
[ 8, 15, 32, 39, 56, 63],
[ 9, 14, 33, 38, 57, 62],
[10, 13, 34, 37, 58, 61],
[11, 12, 35, 36, 59, 60]])
def test_setupforPVMismatch():
out = bifacial_radiance.mismatch._setupforPVMismatch(
portraitorlandscape='portrait',
sensorsy=12,
numcells=72)
np.testing.assert_array_equal(out[0], TEST_ARRAY)
assert out[1] == 6
assert out[2] == 12
def test_MAD():
assert bifacial_radiance.mismatch.mad_fn(TEST_ARRAY) == \
pytest.approx(2433.333,abs = 0.001)
temp = bifacial_radiance.mismatch.mad_fn(pd.DataFrame(TEST_ARRAY))
ans = pd.Series([15706.061,4936.190,2928.249,2081.526,1614.642,1318.8295])
pd.testing.assert_series_equal(temp,ans,check_less_precise=True)
# assert temp == \
# pytest.approx(2433.333,abs = 0.001)
def test_analysisIrradianceandPowerMismatch():
#analysisIrradianceandPowerMismatch(testfolder, writefiletitle,
# portraitorlandscape, bififactor,
# numcells=72, downsamplingmethod='byCenter'):
#testfolder = r'C:\Users\cdeline\Documents\Python Scripts\Bifacial_Radiance\tests\results_mismatch'
#writefiletitle = r'C:\Users\cdeline\Documents\Python Scripts\Bifacial_Radiance\tests\mismatch.txt'
testfolder = os.path.join(TESTDIR,'results_mismatch')
writefiletitle = os.path.join(TESTDIR,'mismatch.txt')
bififactor = 1
bifacial_radiance.mismatch.analysisIrradianceandPowerMismatch(testfolder, writefiletitle,
'portrait', bififactor=1,
numcells=72, downsamplingmethod='byCenter')
df_all = pd.read_csv(writefiletitle)
assert df_all.Mismatch_rel[0] == pytest.approx(0.410, abs = 0.001)
assert df_all["MAD/G_Total"][0] == pytest.approx(2.135, abs = 0.001)
|
Python
| 0
|
@@ -2291,27 +2291,8 @@
t')%0A
- bififactor = 1%0A
@@ -2629,11 +2629,11 @@
x(0.
-410
+376
, ab
@@ -2700,13 +2700,13 @@
rox(
-2.135
+1.987
, ab
|
f6ecf6a45e2749261a20869aca5dfca6d7c03494
|
Correct method doc.
|
qiprofile_rest_client/helpers/database.py
|
qiprofile_rest_client/helpers/database.py
|
"""Mongo Engine interaction utilities."""
def get_or_create(klass, pk, **non_pk):
"""
This function stands in for the Mongo Engine ``get_or_create``
collection method which was deprecated in mongoengine v0.8.0
and dropped in mongoengine v0.10.0, since MongoDB does not
support transactions.
If there is an object of the given Mongo Engine data model
class which matches the primary key, then that object is
returned. Otherwise, a new object is created with the content
prescribed by both the primary and non-primary parameters.
The create step is an upsert, i.e. a new object is created only
if it does not yet exist. The upsert allows for the small
possibility that an object is created after the fetch attempt
but before the create attempt. In that situation, the existing
object non-key content is modified and the modified object is
returned.
:Note: The idiom used in this function modifies the solution
proposed in http://stackoverflow.com/questions/25846462/mongoengine-replacing-get-or-create-with-upsert-update-one/25863633#25863633.
That StackOverflow work-around returns the following error:
ValueError: update only works with $ operators
The work-around to the StackOverflow work-around is to use
call *update* rather than *modify*.
:param klass: the Mongo Engine data model class
:param pk: the primary key {attribute: value} dictionary
:param non_pk: the non-key {attribute: value} dictionary
:return: the existing or new object
"""
try:
return klass.objects.get(**pk)
except klass.DoesNotExist:
mod_opts = {'set__' + attr: val for attr, val in non_pk.iteritems()}
return klass.objects(**pk).update_one(upsert=True, **mod_opts)
|
Python
| 0
|
@@ -65,18 +65,24 @@
ss,
-p
k
+ey=None
, **non_
pk):
@@ -77,18 +77,19 @@
, **non_
-p
k
+ey
):%0A %22
@@ -1314,29 +1314,57 @@
to
-use
+call
%0A
-call *update*
+the data model class *update_one* method
rat
@@ -1450,23 +1450,32 @@
ram
-p
k
+ey
: the
-primary
+secondary field
key
@@ -1485,32 +1485,40 @@
ttribute: value%7D
+%0A
dictionary%0A
@@ -1508,24 +1508,71 @@
dictionary
+, or None if no fields comprise a secondary key
%0A :param
@@ -1575,18 +1575,19 @@
ram non_
-p
k
+ey
: the no
@@ -1679,16 +1679,49 @@
try:%0A
+ # Search by primary key.%0A
@@ -1747,18 +1747,19 @@
s.get(**
-p
k
+ey
)%0A ex
@@ -1783,16 +1783,197 @@
tExist:%0A
+ # Create the new object as an upsert. Specify the MongoDB Engine%0A # set__*attribute* modification options for each non-primary%0A # key (attribute, value) pair.%0A
@@ -2025,18 +2025,19 @@
in non_
-p
k
+ey
.iterite
@@ -2077,10 +2077,11 @@
s(**
-p
k
+ey
).up
|
c2df896183f80fe3ca0eab259874bc4385d399e9
|
Clean up detrius in parallel test file
|
tests/test_parallel.py
|
tests/test_parallel.py
|
from __future__ import with_statement
from datetime import datetime
import copy
import getpass
import sys
import paramiko
from nose.tools import with_setup
from fudge import (Fake, clear_calls, clear_expectations, patch_object, verify,
with_patched_object, patched_context, with_fakes)
from fabric.context_managers import settings, hide, show
from fabric.network import (HostConnectionCache, join_host_strings, normalize,
denormalize)
from fabric.io import output_loop
import fabric.network # So I can call patch_object correctly. Sigh.
from fabric.state import env, output, _get_system_username
from fabric.operations import run, sudo
from fabric.decorators import parallel
from utils import *
from server import (server, PORT, RESPONSES, PASSWORDS, CLIENT_PRIVKEY, USER,
CLIENT_PRIVKEY_PASSPHRASE)
class TestParallel(FabricTest):
@server()
@parallel
def test_parallel(self):
"""
Want to do a simple call and respond
"""
env.pool_size = 10
cmd = "ls /simple"
with hide('everything'):
eq_(run(cmd), RESPONSES[cmd])
|
Python
| 0
|
@@ -41,670 +41,85 @@
rom
-datetime import datetime%0Aimport copy%0Aimport getpass%0Aimport sys%0A%0Aimport paramiko%0Afrom nose.tools import with_setup%0Afrom fudge import (Fake, clear_calls, clear_expectations, patch_object, verify,%0A with_patched_object, patched_context, with_fakes)%0A%0Afrom fabric.context_managers import settings, hide, show%0Afrom fabric.network import (HostConnectionCache, join_host_strings, normalize,%0A denormalize)%0Afrom fabric.io import output_loop%0Aimport fabric.network # So I can call patch_object correctly. Sigh.%0Afrom fabric.state import env, output, _get_system_username%0Afrom fabric.operations import run, sudo%0Afrom fabric.decorators import parallel%0A%0Afrom utils import *
+fabric.api import run, parallel, env, hide%0A%0Afrom utils import FabricTest, eq_
%0Afro
@@ -138,22 +138,15 @@
ort
-(
server,
- PORT,
RES
@@ -155,73 +155,9 @@
NSES
-, PASSWORDS, CLIENT_PRIVKEY, USER,%0A CLIENT_PRIVKEY_PASSPHRASE)
+%0A
%0A%0Acl
|
8f86eacf1b85a0c497f9e8586a59cc19e6a0484f
|
Stop passing a recorder argument unecessarily in tests
|
tests/test_pipeline.py
|
tests/test_pipeline.py
|
from __future__ import print_function
import pytest
from plumbium.processresult import record, pipeline, call
class DummyRecorder(object):
def write(self, results):
self.results = results
@pytest.fixture
def simple_pipeline():
@record('an_output')
def recorded_function():
call(['echo', 'test output'])
return 'test_result'
def a_pipeline():
recorded_function()
return a_pipeline
@pytest.fixture
def failing_pipeline():
@record('an_output')
def recorded_function():
raise IOError
def a_pipeline():
recorded_function()
return a_pipeline
def test_result(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run('test', simple_pipeline, str(tmpdir))
print(pipeline.results)
assert pipeline.results[0]['an_output'] == 'test_result'
def test_stdout_captured(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = DummyRecorder()
pipeline.run('test', simple_pipeline, str(tmpdir), recorder=recorder)
proc = pipeline.results[0].as_dict()
assert proc['printed_output'] == 'test output\n'
def test_exception_captured(failing_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = DummyRecorder()
pipeline.run('test', failing_pipeline, str(tmpdir), recorder=recorder)
proc = pipeline.results[0].as_dict()
assert 'IOError' in proc['exception']
def test_save_filename(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'test': 1},
filename='result_file_{metadata[test]:03d}'
)
assert 'result_file_001.tar.gz' in [f.basename for f in tmpdir.listdir()]
|
Python
| 0.000002
|
@@ -932,43 +932,8 @@
():%0A
- recorder = DummyRecorder()%0A
@@ -985,35 +985,16 @@
(tmpdir)
-, recorder=recorder
)%0A
@@ -1176,43 +1176,8 @@
():%0A
- recorder = DummyRecorder()%0A
@@ -1234,27 +1234,8 @@
dir)
-, recorder=recorder
)%0A
|
7b75f508bf651bdeb57bdc4d263ced26434054c8
|
add pct test
|
tests/test_pvmodule.py
|
tests/test_pvmodule.py
|
"""
Tests for pvmodules.
"""
from nose.tools import ok_
from pvmismatch.pvmismatch_lib.pvmodule import PVmodule, TCT96
def test_calc_mod():
pvmod = PVmodule()
ok_(isinstance(pvmod, PVmodule))
return pvmod
def test_calc_TCT_mod():
pvmod = PVmodule(cell_pos=TCT96)
ok_(isinstance(pvmod, PVmodule))
return pvmod
if __name__ == "__main__":
test_calc_mod()
test_calc_TCT_mod()
|
Python
| 0.000014
|
@@ -112,16 +112,23 @@
e, TCT96
+, PCT96
%0A%0A%0Adef t
@@ -340,16 +340,134 @@
pvmod%0A%0A%0A
+def test_calc_PCT_mod():%0A pvmod = PVmodule(cell_pos=PCT96)%0A ok_(isinstance(pvmod, PVmodule))%0A return pvmod%0A%0A%0A
if __nam
|
20d1ab60c718869d86deed5410d5aef428042195
|
remove unused json import
|
tests/test_redirect.py
|
tests/test_redirect.py
|
import pytest
import json
from urllib.parse import quote
from sanic.response import text, redirect
@pytest.fixture
def redirect_app(app):
@app.route('/redirect_init')
async def redirect_init(request):
return redirect("/redirect_target")
@app.route('/redirect_init_with_301')
async def redirect_init_with_301(request):
return redirect("/redirect_target", status=301)
@app.route('/redirect_target')
async def redirect_target(request):
return text('OK')
@app.route('/1')
def handler(request):
return redirect('/2')
@app.route('/2')
def handler(request):
return redirect('/3')
@app.route('/3')
def handler(request):
return text('OK')
@app.route('/redirect_with_header_injection')
async def redirect_with_header_injection(request):
return redirect("/unsafe\ntest-header: test-value\n\ntest-body")
return app
def test_redirect_default_302(redirect_app):
"""
We expect a 302 default status code and the headers to be set.
"""
request, response = redirect_app.test_client.get(
'/redirect_init',
allow_redirects=False)
assert response.status == 302
assert response.headers["Location"] == "/redirect_target"
assert response.headers["Content-Type"] == 'text/html; charset=utf-8'
def test_redirect_headers_none(redirect_app):
request, response = redirect_app.test_client.get(
uri="/redirect_init",
headers=None,
allow_redirects=False)
assert response.status == 302
assert response.headers["Location"] == "/redirect_target"
def test_redirect_with_301(redirect_app):
"""
Test redirection with a different status code.
"""
request, response = redirect_app.test_client.get(
"/redirect_init_with_301",
allow_redirects=False)
assert response.status == 301
assert response.headers["Location"] == "/redirect_target"
def test_get_then_redirect_follow_redirect(redirect_app):
"""
With `allow_redirects` we expect a 200.
"""
request, response = redirect_app.test_client.get(
"/redirect_init",
allow_redirects=True)
assert response.status == 200
assert response.text == 'OK'
def test_chained_redirect(redirect_app):
"""Test test_client is working for redirection"""
request, response = redirect_app.test_client.get('/1')
assert request.url.endswith('/1')
assert response.status == 200
assert response.text == 'OK'
try:
assert response.url.endswith('/3')
except AttributeError:
assert response.url.path.endswith('/3')
def test_redirect_with_header_injection(redirect_app):
"""
Test redirection to a URL with header and body injections.
"""
request, response = redirect_app.test_client.get(
"/redirect_with_header_injection",
allow_redirects=False)
assert response.status == 302
assert "test-header" not in response.headers
assert not response.text.startswith('test-body')
@pytest.mark.parametrize("test_str", ["sanic-test", "sanictest", "sanic test"])
async def test_redirect_with_params(app, test_client, test_str):
@app.route("/api/v1/test/<test>/")
async def init_handler(request, test):
assert test == test_str
return redirect("/api/v2/test/{}/".format(quote(test)))
@app.route("/api/v2/test/<test>/")
async def target_handler(request, test):
assert test == test_str
return text("OK")
test_cli = await test_client(app)
response = await test_cli.get("/api/v1/test/{}/".format(quote(test_str)))
assert response.status == 200
txt = await response.text()
assert txt == "OK"
|
Python
| 0.000002
|
@@ -11,20 +11,8 @@
est%0A
-import json%0A
from
|
9744226621e27d4bd5d19a52b75b718e86bfef87
|
Add extra filter for equipment
|
lims/equipment/views.py
|
lims/equipment/views.py
|
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
import django_filters
from lims.permissions.permissions import IsInAdminGroupOrRO
from .models import Equipment, EquipmentReservation
from .serializers import EquipmentSerializer, EquipmentReservationSerializer
class EquipmentViewSet(viewsets.ModelViewSet):
queryset = Equipment.objects.all()
serializer_class = EquipmentSerializer
filter_fields = ('can_reserve',)
search_fields = ('name',)
permission_classes = (IsInAdminGroupOrRO,)
class EquipmentReservationFilter(django_filters.FilterSet):
class Meta:
model = EquipmentReservation
fields = {
'id': ['exact'],
'start': ['exact', 'gte'],
'end': ['exact', 'lte'],
'equipment_reserved': ['exact'],
}
class EquipmentReservationViewSet(viewsets.ModelViewSet):
queryset = EquipmentReservation.objects.all()
serializer_class = EquipmentReservationSerializer
filter_class = EquipmentReservationFilter
def perform_create(self, serializer):
if self.request.user.groups.filter(name='staff').exists():
serializer.validated_data['is_confirmed'] = True
serializer.validated_data['confirmed_by'] = self.request.user
serializer.save(reserved_by=self.request.user)
def perform_update(self, serializer):
if (serializer.instance.reserved_by == self.request.user or
self.request.user.groups.filter(name='staff').exists()):
serializer.save()
else:
raise PermissionDenied()
def destroy(self, request, pk=None):
if (request.user == self.get_object().reserved_by or
request.user.groups.filter(name='staff').exists()):
return super(EquipmentReservationViewSet, self).destroy(request, self.get_object().id)
else:
return Response({'message': 'You must have permission to delete'}, status=403)
|
Python
| 0
|
@@ -510,16 +510,26 @@
eserve',
+ 'status',
)%0A se
|
e312e2c61d6ddba147be73e636b26b14aaf49f60
|
use django.conf.settings instead of plain settings
|
lingcod/kmlapp/tests.py
|
lingcod/kmlapp/tests.py
|
"""
Unit tests for the KML App
"""
import settings
from django.test import TestCase
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.auth.models import *
from lingcod.common import utils
from lingcod.mpa.models import MpaDesignation
Mpa = utils.get_mpa_class()
MpaArray = utils.get_array_class()
user = User.objects.get(username="dummy")
class KMLAppTest(TestCase):
def setUp(self):
g1 = GEOSGeometry('SRID=4326;POLYGON ((-120.42 34.37, -119.64 34.32, -119.63 34.12, -120.44 34.15, -120.42 34.37))')
g2 = GEOSGeometry('SRID=4326;POLYGON ((-121.42 34.37, -120.64 34.32, -120.63 34.12, -121.44 34.15, -121.42 34.37))')
g3 = GEOSGeometry('SRID=4326;POLYGON ((-122.42 34.37, -121.64 34.32, -121.63 34.12, -122.44 34.15, -122.42 34.37))')
g1.transform(settings.GEOMETRY_DB_SRID)
g2.transform(settings.GEOMETRY_DB_SRID)
g3.transform(settings.GEOMETRY_DB_SRID)
smr = MpaDesignation.objects.create(name="Reserve", acronym="R")
smr.save()
mpa1 = Mpa.objects.create( name='Test_MPA_1', user=user, geometry_final=g1)
mpa2 = Mpa.objects.create( name='Test_MPA_2', designation=smr, user=user, geometry_final=g2)
mpa3 = Mpa.objects.create( name='Test_MPA_3', designation=smr, user=user, geometry_final=g3)
mpa1.save()
mpa2.save()
mpa3.save()
array1 = MpaArray.objects.create( name='Test_Array_1', user=user)
array1.save()
array1.add_mpa(mpa1)
array2 = MpaArray.objects.create( name='Test_Array_2', user=user)
array2.save()
array2.add_mpa(mpa2)
def test_dummy_kml_view(self):
"""
Tests that dummy user can retrieve a KML file
"""
response = self.client.get('/kml/dummy/mpa.kml', {})
self.assertEquals(response.status_code, 200)
def test_valid_kml(self):
"""
Tests that dummy kml is valid (requires feedvalidator)
"""
import feedvalidator
from feedvalidator import compatibility
response = self.client.get('/kml/dummy/mpa.kml', {})
events = feedvalidator.validateString(response.content, firstOccurrenceOnly=1)['loggedEvents']
# Three levels of compatibility
# "A" is most basic level
# "AA" mimics online validator
# "AAA" is experimental; these rules WILL change or disappear in future versions
filterFunc = getattr(compatibility, "AA")
# there are a few bugs in feedvalidator, doesn't recognize valid ExtendedData element so we ignore
events = [x for x in filterFunc(events)
if not (isinstance(x,feedvalidator.logging.UndefinedElement) and x.params['element']==u'ExtendedData')]
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
if output:
print "\n".join(output)
raise Exception("Invalid KML")
else:
print "No KML errors or warnings"
self.assertEquals(response.status_code, 200)
|
Python
| 0
|
@@ -28,16 +28,33 @@
App%0A%22%22%22%0A
+from django.conf
import s
|
368d46ba4bec2da22abfba306badf39a3a552e88
|
Remove now-unused imports
|
tests/test_snippets.py
|
tests/test_snippets.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import re
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import pytest
import requests
from bs4 import BeautifulSoup
REQUESTS_TIMEOUT = 20
URL_TEMPLATE = '{}/{}/Firefox/default/default/default/en-US/{}/default/default/default/'
_user_agent_firefox = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.1) Gecko/20100101 Firefox/10.0.1'
def _get_redirect(url, user_agent=_user_agent_firefox, locale='en-US'):
headers = {'user-agent': user_agent,
'accept-language': locale}
return requests.get(url, headers=headers, timeout=REQUESTS_TIMEOUT)
def _parse_response(content):
return BeautifulSoup(content, 'html.parser')
@pytest.mark.parametrize(('version'), ['3', '5'], ids=['legacy', 'activitystream'])
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_response_codes(base_url, version, channel):
url = URL_TEMPLATE.format(base_url, version, channel)
r = _get_redirect(url)
assert r.status_code in (requests.codes.ok, requests.codes.no_content)
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_that_snippets_are_well_formed_xml(base_url, channel):
url = URL_TEMPLATE.format(base_url, '3', channel)
r = _get_redirect(url)
try:
print(r.content)
parseString('<div>{}</div>'.format(r.content))
except ExpatError as e:
raise AssertionError('Snippets at {0} do not contain well formed '
'xml: {1}'.format(url, e))
|
Python
| 0
|
@@ -197,30 +197,8 @@
/.%0A%0A
-import json%0Aimport re%0A
from
|
142ef9b907868f53c696bd4426a7f08b7ef57528
|
Change metatdata test
|
tests/test_tifffile.py
|
tests/test_tifffile.py
|
""" Test tifffile plugin functionality.
"""
import os
import numpy as np
from pytest import raises
from imageio.testing import run_tests_if_main, get_test_dir, need_internet
from imageio.core import get_remote_file
import imageio
test_dir = get_test_dir()
def test_tifffile_format():
# Test selection
for name in ['tiff', '.tif']:
format = imageio.formats[name]
assert format.name == 'TIFF'
def test_tifffile_reading_writing():
""" Test reading and saving tiff """
need_internet() # We keep a test image in the imageio-binary repo
im2 = np.ones((10, 10, 3), np.uint8) * 2
filename1 = os.path.join(test_dir, 'test_tiff.tiff')
# One image
imageio.imsave(filename1, im2)
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 1
# Multiple images
imageio.mimsave(filename1, [im2, im2, im2])
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 3, ims[0].shape
# remote multipage rgb file
filename2 = get_remote_file('images/multipage_rgb.tif')
img = imageio.mimread(filename2)
assert len(img) == 2
assert img[0].shape == (3, 10, 10)
# Mixed
W = imageio.save(filename1)
W.set_meta_data({'planarconfig': 'planar'})
assert W.format.name == 'TIFF'
W.append_data(im2)
W.append_data(im2)
W.close()
#
R = imageio.read(filename1)
assert R.format.name == 'TIFF'
ims = list(R) # == [im for im in R]
assert (ims[0] == im2).all()
meta = R.get_meta_data()
print(meta)
assert meta['is_rgb']
# Fail
raises(IndexError, R.get_data, -1)
raises(IndexError, R.get_data, 3)
# Ensure imwrite write works round trip
filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
R = imageio.imread(filename1)
imageio.imwrite(filename3, R)
R2 = imageio.imread(filename3)
assert (R == R2).all()
run_tests_if_main()
|
Python
| 0
|
@@ -1634,45 +1634,48 @@
-print(meta)%0A assert meta%5B'is_rgb'%5D
+assert meta%5B'orientation'%5D == 'top_left'
%0A
|
8d09e745f24e663cb81ff5be6bc7b643c6c5bd76
|
call it pennyblack 0.3.0
|
pennyblack/__init__.py
|
pennyblack/__init__.py
|
VERSION = (0, 3, 0, 'pre')
__version__ = '.'.join(map(str, VERSION))
# Do not use Django settings at module level as recommended
try:
from django.utils.functional import LazyObject
except ImportError:
pass
else:
class LazySettings(LazyObject):
def _setup(self):
from pennyblack import default_settings
self._wrapped = Settings(default_settings)
class Settings(object):
def __init__(self, settings_module):
for setting in dir(settings_module):
if setting == setting.upper():
setattr(self, setting, getattr(settings_module, setting))
settings = LazySettings()
def send_newsletter(newsletter_name, *args, **kwargs):
"""
Gets a newsletter by its name and tries to send it to receiver
"""
from pennyblack.models import Newsletter
newsletter = Newsletter.objects.get_workflow_newsletter_by_name(newsletter_name)
if newsletter:
newsletter.send(*args, **kwargs)
|
Python
| 0.999594
|
@@ -16,14 +16,8 @@
, 0,
- 'pre'
)%0A__
|
57d5622d205854eafd8babf8dfa1ad45bf05ebcb
|
Update ipc_lista1.15.py
|
lista1/ipc_lista1.15.py
|
lista1/ipc_lista1.15.py
|
#ipc_lista1.15
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
##Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês, sabendo-se que são descontados 11% para o Imposto de Renda, 8% para o INSS e 5% para o sindicato, faça um programa que nos dê:
#salário bruto.
#quanto pagou ao INSS.
#quanto pagou ao sindicato.
#o salário líquido.
#calcule os descontos e o salário líquido, conforme a tabela abaixo:
#+ Salário Bruto : R$
#- IR (11%) : R$
#- INSS (8%) : R$
#- Sindicato ( 5%) : R$
#= Salário Liquido : R$
#Obs.: Salário Bruto - Descontos = Salário Líquido.
qHora = input("Quanto você ganha por hora: ")
hT = input("Quantas horas você trabalhou: ")
SalBruto = qHora
ir = (11/100.0 * salBruto)
inss = (8/100.0m* SalBruto)
sindicato = (5/100.0 * SalBruto)
vT = ir + sindicato
SalLiq = SalBruto - vT
print "------------------------"
print "Seu salário bruto e: ",SalBruto
print '------------------------"
print "Valor dos impostos"
print "-------------------------"
print "IR: ",ir
print "INSS: ",inss
print"--------------------------"
print"Se salario liquido e: ",SalLiq
|
Python
| 0
|
@@ -874,17 +874,17 @@
(8/100.0
-m
+
* SalBru
|
b37432e914b6c6e45803a928f35fbaa8964780aa
|
test by uuid
|
tests/unit/test_log.py
|
tests/unit/test_log.py
|
import pytest
from raft import log
def mle(index, term, committed=False, msgid='', msg={}):
return dict(index=index, term=term, committed=committed,
msgid=msgid, msg=msg)
def test_le():
# a's term is greater than b's
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 3)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert ra > rb
# terms are equal
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert a <= b
assert b <= a
# terms equal but more commits in b
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4),
4: mle(4, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert rb > ra
def test_dump():
rl = log.RaftLog(None)
dump = {0: {'term': 0, 'msgid': '', 'committed': True,
'acked': [], 'msg': {}, 'index': 0}}
assert rl.dump() == dump
def test_get_max_index_term():
rl = log.RaftLog(None)
le = log.logentry(2, 'abcd', {})
rl.add(le)
assert rl.get_max_index_term() == (1, 2)
le = log.logentry(6, 'abcdefg', {})
rl.add(le)
assert rl.get_max_index_term() == (2, 6)
|
Python
| 0.000001
|
@@ -1396,8 +1396,188 @@
(2, 6)%0A
+%0Adef test_has_uuid():%0A rl = log.RaftLog(None)%0A le = log.logentry(2, 'abcd', %7B%7D)%0A rl.add(le)%0A assert rl.has_uuid('abcd') == True%0A assert rl.has_uuid('dcba') == False%0A
|
62d5a4446c4c0a919557dd5f2e95d21c5a8259a8
|
Test the optimized set
|
tests/unit/test_set.py
|
tests/unit/test_set.py
|
from pypred import PredicateSet, Predicate
class TestPredicateSet(object):
def test_two(self):
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p2])
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
def test_dup(self):
p1 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p1])
match = s.evaluate({'name': 'Jill'})
assert match == [p1]
|
Python
| 0.000043
|
@@ -1,8 +1,22 @@
+import pytest%0A
from pyp
@@ -25,16 +25,39 @@
d import
+ OptimizedPredicateSet,
Predica
@@ -459,32 +459,32 @@
name': 'Jill'%7D)%0A
-
assert m
@@ -497,8 +497,1244 @@
= %5Bp1%5D%0A%0A
+class TestOptPredicateSet(object):%0A def test_two(self):%0A p1 = Predicate(%22name is 'Jack'%22)%0A p2 = Predicate(%22name is 'Jill'%22)%0A s = OptimizedPredicateSet(%5Bp1, p2%5D)%0A match = s.evaluate(%7B'name': 'Jill'%7D)%0A assert match == %5Bp2%5D%0A%0A def test_dup(self):%0A p1 = Predicate(%22name is 'Jill'%22)%0A s = OptimizedPredicateSet(%5Bp1, p1%5D)%0A match = s.evaluate(%7B'name': 'Jill'%7D)%0A assert match == %5Bp1%5D%0A%0A def test_invalidate(self):%0A %22AST is invalidated when set changes%22%0A p1 = Predicate(%22name is 'Jack'%22)%0A p2 = Predicate(%22name is 'Jill'%22)%0A s = OptimizedPredicateSet(%5Bp1, p2%5D)%0A match = s.evaluate(%7B'name': 'Jill'%7D)%0A assert match == %5Bp2%5D%0A%0A p3 = Predicate(%22name is 'Joe'%22)%0A s.add(p3)%0A assert s.ast == None%0A match = s.evaluate(%7B'name': 'Joe'%7D)%0A assert match == %5Bp3%5D%0A%0A def test_finalize(self):%0A p1 = Predicate(%22name is 'Jack'%22)%0A p2 = Predicate(%22name is 'Jill'%22)%0A s = OptimizedPredicateSet(%5Bp1, p2%5D)%0A s.finalize()%0A match = s.evaluate(%7B'name': 'Jill'%7D)%0A assert match == %5Bp2%5D%0A%0A p3 = Predicate(%22name is 'Joe'%22)%0A with pytest.raises(Exception):%0A s.add(p3)%0A%0A
|
eb4a0ea5542e894e7d9519b5b03212122d26d220
|
Version Bump
|
littlepython/version.py
|
littlepython/version.py
|
version = '0.4.3'
|
Python
| 0.000001
|
@@ -12,7 +12,7 @@
0.4.
-3
+4
'%0A
|
cae0764f2cbb8d00de1832079e55b8e4d45f55f2
|
Fix for short OTU name when there is a species but no genus or higher
|
phylotoast/otu_calc.py
|
phylotoast/otu_calc.py
|
from __future__ import division
import ast
from collections import defaultdict
from phylotoast import biom_calc as bc
def otu_name(tax):
"""
Determine a simple Genus-species identifier for an OTU, if possible.
If OTU is not identified to the species level, name it as
Unclassified (familly/genus/etc...).
:type tax: list
:param tax: QIIME-style taxonomy identifiers, e.g.
["k__Bacteria", u"p__Firmicutes", u"c__Bacilli", ...
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
extract_name = lambda lvl: "_".join(lvl.split("_")[2:])
spname = "spp."
for lvl in tax[::-1]:
if len(lvl) <= 3:
continue
if lvl.startswith("s"):
spname = extract_name(lvl)
elif lvl.startswith("g"):
return "{}_{}".format(extract_name(lvl), spname)
else:
return "Unclassified_{}".format(extract_name(lvl))
def load_core_file(core_fp):
"""
For core OTU data file, returns Genus-species identifier for each data
entry.
:type core_fp: str
:param core_fp: A file containing core OTU data.
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
with open(core_fp, "rU") as in_f:
return {otu_name(ast.literal_eval(line.split("\t")[1]))
for line in in_f.readlines()[1:]}
def assign_otu_membership(biomfile):
"""
Determines the OTUIDs present in each sample.
:type biomfile: biom.table.Table
:param biomfile: BIOM table object from the biom-format library.
:rtype: dict
:return: Returns a dictionary keyed on Sample ID with sets containing
the IDs of OTUIDs found in each sample.
"""
samples = defaultdict(set)
_ = biomfile.pa()
for sid in biomfile.ids():
for otuid in biomfile.ids("observation"):
if biomfile.get_value_by_ids(otuid, sid) == 1:
samples[sid].add(otuid)
return samples
|
Python
| 0.00004
|
@@ -918,16 +918,101 @@
else:%0A
+ if spname != %22spp.%22:%0A return spname%0A else:%0A
|
7c4d3fffe62190b8c27317ed83bd5e7110b103ec
|
Update parser.py
|
MusicXMLParser/parser.py
|
MusicXMLParser/parser.py
|
'''
Takes a musicXML file, and creates a file that can be played by my MusicPlayer arduino library.
Written by Eivind Lie Andreassen, 2016
Licensed under Creative Commons Attribution-ShareAlike 4.0 International. http://creativecommons.org/licenses/by-sa/4.0/
'''
import xml.dom.minidom
import valueHelper
xmlPath = input("Enter path to MusicXML file: ")
savePath = input("Enter save path of converted file: ")
domTree = xml.dom.minidom.parse(xmlPath)
collection = domTree.documentElement
if(collection.hasAttribute("example")):
print(collection.getAttribute("sample"))
notesXML = collection.getElementsByTagName("note")
notes = []
noteLengths = []
for note in notesXML:
if (len(note.getElementsByTagName("rest"))>0):
noteValue = '0'
else:
noteValue = note.getElementsByTagName("step")[0].childNodes[0].data + note.getElementsByTagName("octave")[0].childNodes[0].data
if len(note.getElementsByTagName("alter")) > 0:
index = valueHelper.noteValues.index(noteValue) + int(note.getElementsByTagName("alter")[0].childNodes[0].data)
if(index < 0):
index = 0
elif(index >= len(valueHelper.noteValues)):
index = len(valueHelper.noteValues) - 1
noteValue = valueHelper.noteValues[index]
if(len(note.getElementsByTagName("type")) == 0):
continue
noteLength = valueHelper.lengthValues[note.getElementsByTagName("type")[0].childNodes[0].data]
if(len(note.getElementsByTagName("dot")) > 1):
noteLength *= 1.75
elif(len(note.getElementsByTagName("dot")) > 0):
noteLength *= 1.5
notes.append(noteValue)
noteLengths.append(noteLength)
output = "NoteNumber: " + str(len(notes)) + "\n\n"
output += "Notes:\n{"
for i in range(len(notes)):
output += notes[i]
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};\n\n"
output += "NoteLengths:\n{"
for i in range(len(notes)):
output += str(noteLengths[i])
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};"
with open(savePath, "w") as file:
file.write(output)
|
Python
| 0.000001
|
@@ -144,9 +144,12 @@
016%0A
-%09
+
Lice
@@ -163,113 +163,24 @@
der
-Creative Commons Attribution-ShareAlike 4.0 International. http://creativecommons.org/licenses/by-sa/4.0/
+the MIT license.
%0A'''
@@ -2148,8 +2148,9 @@
(output)
+%0A
|
83b83cb3491bd4ccf39e2c6ade72f8f526ea27fe
|
Increase toolbox reporting
|
ArcToolbox/Scripts/ExportFolder2PDF.py
|
ArcToolbox/Scripts/ExportFolder2PDF.py
|
#Export a folder of maps to PDFs at their Map Document set sizes
#Written using ArcGIS 10 and Python 2.6.5
#by: Guest
# https://gis.stackexchange.com/questions/7147/how-to-batch-export-mxd-to-pdf-files
import arcpy, os
#Read input parameter from user.
path = arcpy.GetParameterAsText(0)
#Write MXD names in folder to txt log file.
writeLog=open(path+"\FileListLog.txt","w")
for fileName in os.listdir(path):
fullPath = os.path.join(path, fileName)
if os.path.isfile(fullPath):
basename, extension = os.path.splitext(fullPath)
if extension == ".mxd":
writeLog.write(fullPath+"\n")
mxd = arcpy.mapping.MapDocument(fullPath)
print fileName + "\n"
del mxd
print "Done"
writeLog.close()
# Set all the parameters as variables here:
data_frame = 'PAGE_LAYOUT'
df_export_width = 1920
df_export_height = 1200
resolution = "300"
image_quality = "BETTER"
colorspace = "RGB"
compress_vectors = "True"
image_compression = "ADAPTIVE"
picture_symbol = 'VECTORIZE_BITMAP'
convert_markers = "False"
embed_fonts = "True"
layers_attributes = "LAYERS_ONLY"
georef_info = "False"
jpeg_compression_quality = 85
exportPath =arcpy.GetParameterAsText(1)
MXDread=open(path+"\FileListLog.txt","r")
for line in MXDread:
#Strip newline from line.
line=line.rstrip('\n')
if os.path.isfile(line):
basename, extension = os.path.splitext(line)
newName=basename.split('\\')[-1]
if extension.lower() == ".mxd":
print "Basename:" +newName
mxd = arcpy.mapping.MapDocument(line)
newPDF=exportPath+"\\"+newName+".pdf"
print newPDF
arcpy.mapping.ExportToPDF(mxd,newPDF, data_frame, df_export_width, df_export_height, resolution, image_quality, colorspace, compress_vectors, image_compression, picture_symbol, convert_markers, embed_fonts, layers_attributes, georef_info, jpeg_compression_quality)
print line + "Export Done"
MXDread.close()
item=path+"\FileListLog.txt"
os.remove(item)
del mxd
|
Python
| 0
|
@@ -678,21 +678,44 @@
-print
+arcpy.AddMessage('Found: ' +
fileNam
@@ -719,15 +719,9 @@
Name
- + %22%5Cn%22
+)
%0Adel
@@ -729,20 +729,32 @@
mxd%0A
-print
+arcpy.AddMessage(
%22Done%22
+)
%0Awri
@@ -1511,21 +1511,35 @@
-print
+# arcpy.AddMessage(
%22Basena
@@ -1551,16 +1551,18 @@
+newName
+ )
%0A
@@ -1670,20 +1670,48 @@
-print
+arcpy.AddMessage( 'Writing: ' +
newPDF
+ )
%0A
@@ -2012,34 +2012,46 @@
-print line + %22Export Done%22
+arcpy.AddMessage( 'Finished: ' + line)
%0AMXD
@@ -2108,15 +2108,35 @@
e(item)%0A
-
del mxd
+%0Aarcpy.GetMessages()
|
f5a3d65d56a1746fac3bd42d38537cca359a968c
|
improve range-only command detection
|
ex_command_parser.py
|
ex_command_parser.py
|
"""a simple 'parser' for :ex commands
"""
from collections import namedtuple
import re
# holds info about an ex command
EX_CMD = namedtuple('ex_command', 'name command forced range args')
EX_RANGE_REGEXP = re.compile(r'^(:?([.$%]|(:?/.*?/|\?.*?\?){1,2}|\d+)([-+]\d+)?)(([,;])(:?([.$]|(:?/.*?/|\?.*?\?){1,2}|\d+)([-+]\d+)?))?')
EX_ONLY_RANGE_REGEXP = re.compile(r'(?:([%$.]|\d+|/.*?(?<!\\)/|\?.*?\?)([-+]\d+)*(?:([,;])([%$.]|\d+|/.*?(?<!\\)/|\?.*?\?)([-+]\d+)*)?)|(^[/?].*$)')
EX_COMMANDS = {
('write', 'w'): {'command': 'ex_write_file', 'args': ['file_name']},
('wall', 'wa'): {'command': 'ex_write_all', 'args': []},
('pwd', 'pw'): {'command': 'ex_print_working_dir', 'args': []},
('buffers', 'buffers'): {'command': 'ex_prompt_select_open_file', 'args': []},
('ls', 'ls'): {'command': 'ex_prompt_select_open_file', 'args': []},
('map', 'map'): {'command': 'ex_map', 'args': []},
('abbreviate', 'ab'): {'command': 'ex_abbreviate', 'args': []},
('read', 'r'): {'command': 'ex_read_shell_out', 'args': ['shell_cmd']},
}
def find_command(cmd_name):
names = [x for x in EX_COMMANDS.keys() if x[0].startswith(cmd_name)]
# unknown command name
if not names: return None
# check for matches in known aliases and full names
full_match = [(x, y) for (x, y) in names if cmd_name in (x, y)]
if full_match:
return full_match[0]
else:
# partial match, but not a known alias
return names[0]
def is_only_range(cmd_line):
return EX_ONLY_RANGE_REGEXP.search(cmd_line) and \
EX_RANGE_REGEXP.search(cmd_line).span()[1] == len(cmd_line)
def get_cmd_line_range(cmd_line):
try:
start, end = EX_RANGE_REGEXP.search(cmd_line).span()
except AttributeError:
return None
return cmd_line[start:end]
def parse_command(cmd):
# strip :
cmd_name = cmd[1:]
# first the odd commands
if is_only_range(cmd_name):
return EX_CMD(name=':',
command='ex_goto',
forced=False,
range=cmd_name,
args=''
)
if cmd_name.startswith('!'):
cmd_name = '!'
args = cmd[2:]
return EX_CMD(name=cmd_name,
command=None,
forced=False,
range=None,
args=args
)
range = get_cmd_line_range(cmd_name)
if range: cmd_name = cmd_name[len(range):]
cmd_name, _, args = cmd_name.partition(' ')
args = re.sub(r' {2,}', ' ', args)
args = args.split(' ')
bang =False
if cmd_name.endswith('!'):
cmd_name = cmd_name[:-1]
bang = True
cmd_data = find_command(cmd_name)
if not cmd_data: return None
cmd_data = EX_COMMANDS[cmd_data]
cmd_args = {}
if cmd_data['args'] and args:
cmd_args = dict(zip(cmd_data['args'], args))
return EX_CMD(name=cmd_name,
command=cmd_data['command'],
forced=bang,
range=range,
args=cmd_args
)
|
Python
| 0.000001
|
@@ -1490,24 +1490,37 @@
(cmd_line):%0A
+ try:%0A
return E
@@ -1578,16 +1578,20 @@
+
EX_RANGE
@@ -1642,16 +1642,96 @@
d_line)%0A
+ except AttributeError:%0A return EX_ONLY_RANGE_REGEXP.search(cmd_line)%0A
%0A%0Adef ge
|
b614436766e8ee3316936c5718262b35cfae3869
|
Add slug field on save
|
memex_explorer/base/models.py
|
memex_explorer/base/models.py
|
from django.db import models
class Project(models.Model):
name = models.CharField(max_length=64)
slug = models.SlugField(max_length=64, unique=True)
description = models.TextField()
icon = models.CharField(max_length=64)
def __str__(self):
return self.name
class DataModel(models.Model):
name = models.CharField(max_length=64)
project = models.ForeignKey(Project)
def __str__(self):
return self.name
class Crawl(models.Model):
name = models.CharField(max_length=64)
slug = models.CharField(max_length=64)
description = models.TextField()
crawler = models.CharField(max_length=64)
status = models.CharField(max_length=64)
config = models.CharField(max_length=64)
seeds_list = models.CharField(max_length=64)
pages_crawled = models.BigIntegerField()
harvest_rate = models.FloatField()
project = models.ForeignKey(Project)
data_model = models.ForeignKey(DataModel)
def __str__(self):
return self.name
class DataSource(models.Model):
name = models.CharField(max_length=64)
data_uri = models.CharField(max_length=200)
description = models.TextField()
project = models.ForeignKey(Project)
crawl = models.ForeignKey(Crawl)
def __str__(self):
return self.name
|
Python
| 0.000001
|
@@ -23,16 +23,55 @@
models%0A%0A
+from django.utils.text import slugify%0A%0A
%0Aclass P
@@ -313,32 +313,341 @@
turn self.name%0A%0A
+ def save(self, *args, **kwargs):%0A if not self.id:%0A # Newly created object, so save to get self.id%0A super(Project, self).save(*args, **kwargs)%0A self.slug = '%25i-%25s' %25 (%0A self.id, slugify(self.name)%0A )%0A super(Project, self).save(*args, **kwargs)%0A%0A
%0Aclass DataModel
|
047db1c64cd5b7ef070f73e1d580e36236ac9613
|
Print warning when using deprecated 'python3' module
|
mesonbuild/modules/python3.py
|
mesonbuild/modules/python3.py
|
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sysconfig
from .. import mesonlib, dependencies
from . import ExtensionModule
from mesonbuild.modules import ModuleReturnValue
from ..interpreterbase import noKwargs, permittedKwargs
from ..build import known_shmod_kwargs
class Python3Module(ExtensionModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.snippets.add('extension_module')
@permittedKwargs(known_shmod_kwargs)
def extension_module(self, interpreter, state, args, kwargs):
if 'name_prefix' in kwargs:
raise mesonlib.MesonException('Name_prefix is set automatically, specifying it is forbidden.')
if 'name_suffix' in kwargs:
raise mesonlib.MesonException('Name_suffix is set automatically, specifying it is forbidden.')
host_system = state.host_machine.system
if host_system == 'darwin':
# Default suffix is 'dylib' but Python does not use it for extensions.
suffix = 'so'
elif host_system == 'windows':
# On Windows the extension is pyd for some unexplainable reason.
suffix = 'pyd'
else:
suffix = []
kwargs['name_prefix'] = ''
kwargs['name_suffix'] = suffix
return interpreter.func_shared_module(None, args, kwargs)
@noKwargs
def find_python(self, state, args, kwargs):
py3 = dependencies.ExternalProgram('python3', mesonlib.python_command, silent=True)
return ModuleReturnValue(py3, [py3])
@noKwargs
def language_version(self, state, args, kwargs):
return ModuleReturnValue(sysconfig.get_python_version(), [])
@noKwargs
def sysconfig_path(self, state, args, kwargs):
if len(args) != 1:
raise mesonlib.MesonException('sysconfig_path() requires passing the name of path to get.')
path_name = args[0]
valid_names = sysconfig.get_path_names()
if path_name not in valid_names:
raise mesonlib.MesonException('{} is not a valid path name {}.'.format(path_name, valid_names))
# Get a relative path without a prefix, e.g. lib/python3.6/site-packages
path = sysconfig.get_path(path_name, vars={'base': '', 'platbase': '', 'installed_base': ''})[1:]
return ModuleReturnValue(path, [])
def initialize(*args, **kwargs):
return Python3Module(*args, **kwargs)
|
Python
| 0.000007
|
@@ -776,16 +776,35 @@
edKwargs
+, FeatureDeprecated
%0Afrom ..
@@ -875,16 +875,67 @@
odule):%0A
+ @FeatureDeprecated('python3 module', '0.48.0')%0A
def
|
b7ca5d4a78bd988863aa0c292144bb5968eeb9c8
|
Add --allow_other_files to review command
|
mesonwrap/tools/reviewtool.py
|
mesonwrap/tools/reviewtool.py
|
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys, os, re
import urllib.request, json, hashlib
import tempfile
import git
import shutil
from mesonwrap import upstream
from mesonwrap.tools import environment
def print_status(msg, check):
'''
Prints msg with success indicator based on check parameter.
Returns: check
'''
OK_CHR = '\u2611'
FAIL_CHR = '\u2612'
status = OK_CHR if check else FAIL_CHR
print('{msg}: {status}'.format(msg=msg, status=status))
return check
class Reviewer:
def __init__(self, project, pull_id):
self._github = environment.Github()
self._org = self._github.get_organization('mesonbuild')
self._project = self._org.get_repo(project)
self._pull = self._project.get_pull(pull_id)
def review(self):
with tempfile.TemporaryDirectory() as tmpdir:
return self.review_int(tmpdir)
def review_int(self, tmpdir):
head_dir = os.path.join(tmpdir, 'head')
project = self._pull.base.repo.name
branch = self._pull.base.ref
head_repo = git.Repo.clone_from(self._pull.head.repo.clone_url, head_dir,
branch=self._pull.head.ref)
if not self.check_basics(head_repo, project, branch): return False
if not self.check_files(head_dir): return False
upwrap = upstream.UpstreamWrap.from_file(os.path.join(head_dir, 'upstream.wrap'))
if not self.check_wrapformat(upwrap): return False
if not self.check_download(tmpdir, upwrap): return False
if not self.check_extract(tmpdir, upwrap): return False
return True
@staticmethod
def check_has_no_path_separators(name, value):
return print_status(name + ' has no path separators',
'/' not in value and '\\' not in value)
def check_wrapformat(self, upwrap):
if not print_status('upstream.wrap has directory', upwrap.has_directory): return False
if not self.check_has_no_path_separators('upstream.wrap directory',
upwrap.directory): return False
if not print_status('upstream.wrap has source_url', upwrap.has_source_url): return False
if not print_status('upstream.wrap has source_filename', upwrap.has_source_filename): return False
if not self.check_has_no_path_separators('upstream.wrap source_filename',
upwrap.source_filename): return False
if not print_status('upstream.wrap has source_hash', upwrap.has_source_hash): return False
return True
def check_files(self, head_dir):
found = False
permitted_files = ['upstream.wrap', 'meson.build', 'readme.txt',
'meson_options.txt', '.gitignore', 'LICENSE.build']
for root, dirs, files in os.walk(head_dir):
if '.git' in dirs:
dirs.remove('.git')
for fname in files:
if fname not in permitted_files:
if not found:
print('Non-buildsystem files found:')
found = True
abs_name = os.path.join(root, fname)
rel_name = abs_name[len(head_dir)+1:]
print(' ', rel_name)
if not print_status('Repo contains only buildsystem files', not found):
return False
return True
@staticmethod
def isfile(head_dir, filename):
return os.path.isfile(os.path.join(head_dir, filename))
def check_basics(self, head_repo, project, branch):
print('Inspecting project %s, branch %s.' % (project, branch))
head_dir = head_repo.working_dir
if not print_status('Repo name valid', re.fullmatch('[a-z0-9._]+', project)): return False
if not print_status('Branch name valid', re.fullmatch('[a-z0-9._]+', branch)): return False
if not print_status('Target branch is not master', branch != 'master'): return False
if not print_status('Has readme.txt', self.isfile(head_dir, 'readme.txt')): return False
if not print_status('Has LICENSE.build', self.isfile(head_dir, 'LICENSE.build')): return False
if not print_status('Has upstream.wrap', self.isfile(head_dir, 'upstream.wrap')): return False
if not print_status('Has toplevel meson.build', self.isfile(head_dir, 'meson.build')): return False
return True
@staticmethod
def _fetch(url):
data = None
exc = None
try:
with urllib.request.urlopen(url) as u:
data = u.read()
except Exception as e:
exc = e
return (data, exc)
def check_download(self, tmpdir, upwrap):
source_data, download_exc = self._fetch(upwrap.source_url)
if not print_status('Download url works', download_exc is None):
print(' error:', str(e))
return False
with open(os.path.join(tmpdir, upwrap.source_filename), 'wb') as f:
f.write(source_data)
h = hashlib.sha256()
h.update(source_data)
calculated_hash = h.hexdigest()
if not print_status('Hash matches', calculated_hash == upwrap.source_hash):
print(' expected:', upwrap.source_hash)
print(' got:', calculated_hash)
return False
return True
def check_extract(self, tmpdir, upwrap):
# TODO lead_directory_missing
srcdir = os.path.join(tmpdir, 'src')
os.mkdir(srcdir)
shutil.unpack_archive(os.path.join(tmpdir, upwrap.source_filename), srcdir)
srcdir = os.path.join(srcdir, upwrap.directory)
if not print_status('upstream.wrap directory {!r} exists'.format(upwrap.directory),
os.path.exists(srcdir)): return False
shutil.copytree(os.path.join(tmpdir, 'head'), srcdir,
ignore=shutil.ignore_patterns('.git', 'readme.txt', 'upstream.wrap'))
return True
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('name')
parser.add_argument('pull_request', type=int)
args = parser.parse_args(args)
r = Reviewer(args.name, args.pull_request)
if not r.review():
sys.exit(1)
|
Python
| 0
|
@@ -1335,16 +1335,51 @@
pull_id)
+%0A self.strict_fileset = True
%0A%0A de
@@ -3992,24 +3992,64 @@
not found):%0A
+ if self.strict_fileset:%0A
@@ -6799,16 +6799,84 @@
pe=int)%0A
+ parser.add_argument('--allow_other_files', action='store_true')%0A
args
@@ -6949,16 +6949,66 @@
equest)%0A
+ r.strict_fileset = not args.allow_other_files%0A
if n
|
db931b67874d7dbba2aebf64e90f29b39555a529
|
Add lizard-security again (I pruned too much)
|
ddsc_worker/settings.py
|
ddsc_worker/settings.py
|
# Base Django settings, suitable for production.
# Imported (and partly overridden) by developmentsettings.py which also
# imports localsettings.py (which isn't stored in svn). Buildout takes care
# of using the correct one.
# So: "DEBUG = TRUE" goes into developmentsettings.py and per-developer
# database ports go into localsettings.py. May your hear turn purple if you
# ever put personal settings into this file or into developmentsettings.py!
import os
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
# Enable 'old' /media directories in addition to /static.
'staticfiles.finders.LegacyAppDirectoriesFinder',
# Enable support for django-compressor.
'compressor.finders.CompressorFinder',
)
# Set matplotlib defaults.
# Uncomment this when using lizard-map.
# import matplotlib
# # Force matplotlib to not use any Xwindows backend.
# matplotlib.use('Agg')
# import lizard_map.matplotlib_settings
# SETTINGS_DIR allows media paths and so to be relative to this settings file
# instead of hardcoded to c:\only\on\my\computer.
SETTINGS_DIR = os.path.dirname(os.path.realpath(__file__))
# BUILDOUT_DIR is for access to the "surrounding" buildout, for instance for
# BUILDOUT_DIR/var/static files to give django-staticfiles a proper place
# to place all collected static files.
BUILDOUT_DIR = os.path.abspath(os.path.join(SETTINGS_DIR, '..'))
# Production, so DEBUG is False. developmentsettings.py sets it to True.
DEBUG = False
# Show template debug information for faulty templates. Only used when DEBUG
# is set to True.
TEMPLATE_DEBUG = True
# ADMINS get internal error mails, MANAGERS get 404 mails.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# TODO: Switch this to the real production database.
# ^^^ 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# In case of geodatabase, prepend with:
# django.contrib.gis.db.backends.(postgis)
DATABASES = {
'default': {
'NAME': '', # via localproductionsettings!
'ENGINE': '', # via localproductionsettings!
'USER': '', # via localproductionsettings!
'PASSWORD': '', # via localproductionsettings!
'HOST': '', # via localproductionsettings!
'PORT': '', # via localproductionsettings!
}
}
# Almost always set to 1. Django allows multiple sites in one database.
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name although not all
# choices may be available on all operating systems. If running in a Windows
# environment this must be set to the same as your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'nl-NL'
# For at-runtime language switching. Note: they're shown in reverse order in
# the interface!
LANGUAGES = (
# ('en', 'English'),
('nl', 'Nederlands'),
)
# If you set this to False, Django will make some optimizations so as not to
# load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds user-uploaded media.
MEDIA_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'media')
# Absolute path to the directory where django-staticfiles'
# "bin/django build_static" places all collected static files from all
# applications' /media directory.
STATIC_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
MEDIA_URL = '/media/'
# URL for the per-application /media static files collected by
# django-staticfiles. Use it in templates like
# "{{ MEDIA_URL }}mypackage/my.css".
STATIC_URL = '/static_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '' # via localproductionsettings!
ROOT_URLCONF = 'ddsc_worker.urls'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
CACHES = {
'default': {
'KEY_PREFIX': BUILDOUT_DIR,
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
MIDDLEWARE_CLASSES = (
# Gzip needs to be at the top.
'django.middleware.gzip.GZipMiddleware',
# Below is the default list, don't modify it.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Lizard security.
'tls.TLSRequestMiddleware',
'lizard_security.middleware.SecurityMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'lizard_security.backends.DDSCPermissionBackend',
)
INSTALLED_APPS = (
# Default apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#
'django.contrib.admin',
#
# 'lizard_security',
# 'lizard_ui', # after lizard-security
'ddsc_worker',
'ddsc_core', # after lizard-security
'south',
'compressor',
'staticfiles',
'raven.contrib.django',
'django_extensions',
#
'django.contrib.gis',
'django.contrib.markup',
)
# TODO: Put your real url here to configure Sentry.
SENTRY_DSN = 'http://some:thing@sentry.lizardsystem.nl/1'
# TODO: add gauges ID here. Generate one separately for the staging, too.
UI_GAUGES_SITE_ID = '' # Staging has a separate one.
try:
# For local production overrides (DB passwords, for instance):
from ddsc_worker.localproductionsettings import * # NOQA
except ImportError:
pass
|
Python
| 0
|
@@ -5219,17 +5219,17 @@
,%0A #%0A
-#
+
'liza
|
18c83c53263b3b70225fc657f064bd6d13e73881
|
use iterator in csv dump
|
cla_backend/apps/cla_butler/qs_to_file.py
|
cla_backend/apps/cla_butler/qs_to_file.py
|
# -*- coding: utf-8 -*-
import csv
from datetime import datetime
import json
import os
import re
import time
from django.db import IntegrityError
from django.db.models import ForeignKey
from django.core import serializers
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.dateparse import parse_datetime
from jsonfield import JSONField
from cla_common.money_interval.fields import MoneyIntervalField
from cla_common.money_interval.models import MoneyInterval
RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
RE_DATETIME = re.compile(r'(\d{4})-(\d\d?)-(\d\d?) (\d\d?):(\d\d?):(\d\d?)\.(\d{6})\+(\d\d?):(\d\d?)$')
WRITE_MODE = 'wb'
APPEND_MODE = 'a'
class QuerysetToFile(object):
def __init__(self, path):
if not os.path.exists(path):
os.makedirs(path)
self.path = path
def get_file_path(self, model, ext='csv'):
return os.path.join(self.path, '%s.%s' % (model.__name__, ext))
def get_name(self, field):
field_name = field.name
if isinstance(field, ForeignKey):
field_name = '%s_id' % field_name
return field_name
def get_value(self, instance, field):
val = getattr(instance, self.get_name(field))
if isinstance(val, MoneyInterval):
val = json.dumps(val.as_dict())
if hasattr(val, 'pk'):
val = val.pk
if val is None:
val = ''
try:
return unicode(val).encode('utf-8')
except UnicodeDecodeError:
return val
def dump_to_csv(self, qs):
"""dump queryset to .csv"""
file_path = self.get_file_path(qs.model)
if os.path.isfile(file_path):
write_mode = APPEND_MODE
else:
write_mode = WRITE_MODE
field_names = [self.get_name(f) for f in qs.model._meta.fields]
with open(file_path, write_mode) as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
if write_mode == WRITE_MODE:
writer.writerow(field_names)
for instance in qs:
writer.writerow(
[self.get_value(instance, f) for f in
qs.model._meta.fields])
csvfile.close()
def dump_to_yml(self, qs):
"""dump queryset to .yaml"""
file_path = self.get_file_path(qs.model, 'yaml')
if os.path.isfile(file_path):
write_mode = APPEND_MODE
else:
write_mode = WRITE_MODE
with open(file_path, write_mode) as yamlfile:
yamlfile.write(serializers.serialize('yaml', qs))
yamlfile.close()
def dump_values(self, qs):
"""dump queryset to .json"""
file_path = self.get_file_path(qs.model, 'json')
if os.path.isfile(file_path):
write_mode = APPEND_MODE
else:
write_mode = WRITE_MODE
with open(file_path, write_mode) as jsonfile:
jsonfile.write(json.dumps(list(qs.values()), cls=DjangoJSONEncoder))
jsonfile.close()
def dump(self, qs):
print 'starting dump of %s' % qs.model.__name__
start = time.time()
self.dump_to_csv(qs)
print 'Time to dump %s: %s' % (qs.model.__name__, start - time.time())
def set_value(self, val, field):
if val == '' and field.empty_strings_allowed and not field.null:
val = ''
elif val in field.empty_values:
val = None
elif isinstance(field, MoneyIntervalField):
val = MoneyInterval.from_dict(json.loads(val))
elif isinstance(field, JSONField):
val = json.dumps(val)
elif RE_DATE.match(val):
val = datetime.strptime(val, '%Y-%m-%d').date()
elif RE_DATETIME.match(val):
val = parse_datetime(val)
return val
def load(self, model):
"""Load .csv file to model"""
file_path = self.get_file_path(model)
with open(file_path, 'rb') as csvfile:
reader = csv.DictReader(csvfile, quoting=csv.QUOTE_ALL)
failed_objects = []
for row in reader:
obj = model()
for f in model._meta.fields:
n = self.get_name(f)
try:
val = self.set_value(row[n], f)
except Exception as sv:
print row[n]
print f.name
print sv
raise
try:
setattr(obj, n, val)
except Exception as set_e:
print set_e
print val
print val.__class__.__name__
print f.name
print f.__class__.__name__
raise
try:
obj.save()
except IntegrityError:
print 'Try to save failed object at end'
failed_objects.append(obj)
except Exception as e:
print e
raise
[o.save() for o in failed_objects]
|
Python
| 0.000001
|
@@ -2078,16 +2078,27 @@
ce in qs
+.iterator()
:%0A
|
1018d6bde32a8d18a2315dafd084826443209ba1
|
Update clock.py
|
examples/clock.py
|
examples/clock.py
|
#!/usr/bin/python
import time
import datetime
from Adafruit_LED_Backpack import SevenSegment
# ===========================================================================
# Clock Example
# ===========================================================================
segment = SevenSegment.SevenSegment(address=0x70)
# Initialize the display. Must be called once before using the display.
segment.begin()
print "Press CTRL+Z to exit"
# Continually update the time on a 4 char, 7-segment display
while(True):
now = datetime.datetime.now()
hour = now.hour
minute = now.minute
second = now.second
if hour >= 22 or hour < 7:
segment.set_brightness(0)
else:
segment.set_brightness(5)
if hour == 24:
hour = 12
else:
hour = hour % 12
A = int(hour / 10)
if A == 0:
A = ' '
segment.clear()
# Set hours
segment.set_digit(0, A) # Tens
segment.set_digit(1, hour % 10) # Ones
# Set minutes
segment.set_digit(2, int(minute / 10)) # Tens
segment.set_digit(3, minute % 10) # Ones
# Toggle colon
segment.set_colon(second % 2) # Toggle colon at 1Hz
# Write the display buffer to the hardware. This must be called to
# update the actual display LEDs.
segment.write_display()
# Wait a quarter second (less than 1 second to prevent colon blinking getting$
time.sleep(0.25)
|
Python
| 0.000002
|
@@ -628,16 +628,34 @@
hour %3C 7
+ and minutes == 26
:%0A se
|
37da8a56f127a871c4133f0ba58921779e9b487c
|
Update __init__.py
|
deepdish/io/__init__.py
|
deepdish/io/__init__.py
|
from __future__ import division, print_function, absolute_import
from .mnist import load_mnist
from .norb import load_small_norb
from .casia import load_casia
from .cifar import load_cifar_10
try:
import tables
_pytables_ok = True
except ImportError:
_pytables_ok = False
del tables
if _pytables_ok:
from .hdf5io import load, save
else:
def _f(*args, **kwargs):
raise ImportError("You need PyTables for this function")
load = save = _f
__all__ = ['load_mnist', 'load_small_norb', 'load_casia', 'load_cifar_10']
|
Python
| 0
|
@@ -233,16 +233,31 @@
= True%0A
+ del tables%0A
except I
@@ -296,19 +296,8 @@
alse
-%0Adel tables
%0A%0Aif
|
08d6784b452d729d7cecc3865212b0046d43301d
|
Allow limit to be passed to tasks API
|
deferred_manager/api.py
|
deferred_manager/api.py
|
import datetime
import json
import webapp2
from operator import itemgetter
from google.appengine.ext import db
from google.appengine.api.logservice import logservice
from .models import TaskState, QueueState
def _serializer(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if hasattr(obj, '__dict__'):
return obj.__dict__
raise ValueError(obj)
def serialize_model(obj):
return json.dumps(db.to_dict(obj), default=_serializer)
def dump(obj):
return json.dumps(obj, default=_serializer)
class QueueListHandler(webapp2.RequestHandler):
def get(self):
ctx = {
"queues": map(db.to_dict, QueueState.all())
}
self.response.content_type = "application/json"
self.response.write(dump(ctx))
class QueueHandler(webapp2.RequestHandler):
def get(self, queue_name):
queue_state = QueueState.get_by_key_name(queue_name)
if not queue_state:
self.response.set_status(404)
return
tasks = TaskState.all().ancestor(queue_state).order("-deferred_at").fetch(limit=1000)
ctx = db.to_dict(queue_state)
stats = queue_state.get_queue_statistics()
ctx['stats'] = {k: getattr(stats, k) for k in ("tasks", "executed_last_minute", "in_flight", "enforced_rate",)}
if stats.oldest_eta_usec:
ctx['stats']['oldest_eta'] = datetime.datetime.utcfromtimestamp(stats.oldest_eta_usec / 1e6)
ctx['tasks'] = map(db.to_dict, tasks)
self.response.content_type = "application/json"
self.response.write(dump(ctx))
def delete(self, queue_name):
# DELETE == purge in this case
queue_state = QueueState.get_by_key_name(queue_name)
if not queue_state:
self.response.set_status(404)
return
queue_state.get_queue_statistics().queue.purge()
rpcs = []
for task in TaskState.all().ancestor(queue_state).filter('is_complete', False).filter('is_running', False).run():
task.is_complete = task.is_permanently_failed = True
task.was_purged = True
rpcs.append(db.put_async(task))
for rpc in rpcs:
rpc.get_result()
self.response.content_type = "application/json"
self.response.write(dump({
"message": "Purging " + queue_name
}))
class TaskInfoHandler(webapp2.RequestHandler):
def get(self, queue_name, task_name):
queue_state = QueueState.get_by_key_name(queue_name)
task_state = TaskState.get_by_key_name(task_name, parent=queue_state)
if not (queue_state and task_state):
self.response.set_status(404)
return
ctx = {
'task': db.to_dict(task_state),
}
if task_state.request_log_ids:
ctx['logs'] = sorted(get_logs(task_state.request_log_ids, logservice.LOG_LEVEL_INFO), key=itemgetter('start_time'), reverse=True)
self.response.content_type = "application/json"
self.response.write(dump(ctx))
class LogHandler(webapp2.RequestHandler):
def get(self, log_id):
log_level = int(self.request.GET.get('level', logservice.LOG_LEVEL_INFO))
ctx = {
'log': next(get_logs([log_id], log_level), None)
}
self.response.content_type = "application/json"
self.response.write(dump(ctx))
def get_logs(log_ids, log_level):
for request_log in logservice.fetch(minimum_log_level=log_level,
include_incomplete=True,
include_app_logs=True,
request_ids=log_ids):
d = {name: getattr(request_log, name)
for name, val in request_log.__class__.__dict__.iteritems()
if isinstance(val, property) and not name.startswith('_')
}
d['start_time'] = datetime.datetime.fromtimestamp(request_log.start_time)
if request_log.end_time:
d['end_time'] = datetime.datetime.fromtimestamp(request_log.end_time)
d['duration'] = (d['end_time'] - d['start_time']).total_seconds()
d['app_logs'] = [{
'time': datetime.datetime.fromtimestamp(app_log.time),
'level': app_log.level,
'message': app_log.message
}
for app_log in d['app_logs']
if app_log.level >= log_level
]
yield d
|
Python
| 0
|
@@ -1107,13 +1107,49 @@
mit=
+int(self.request.GET.get('limit',
1000)
+))
%0A%0A
@@ -2400,20 +2400,16 @@
eue_name
-
%0A
|
0d914a4843e5959c108077e8c5275a1ddd05f617
|
Upgrade version number
|
djaloha/__init__.py
|
djaloha/__init__.py
|
# -*- coding: utf-8 -*-
VERSION = (0, 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
# if VERSION[2]:
# version = '%s.%s' % (version, VERSION[2])
# if VERSION[3] != "final":
# version = '%s%s%s' % (version, VERSION[3], VERSION[4])
return version
__version__ = get_version()
|
Python
| 0.000001
|
@@ -32,17 +32,17 @@
N = (0,
-1
+2
)%0A%0Adef g
|
0f5433458be9add6a879e8e490017663714d7664
|
fix cron job FailedRunsNotificationCronJob to import get_class routine from new place
|
django_cron/cron.py
|
django_cron/cron.py
|
from django.conf import settings
from django_cron import CronJobBase, Schedule
from django_cron.models import CronJobLog
from django_cron.management.commands.runcrons import get_class
from django_common.helper import send_mail
class FailedRunsNotificationCronJob(CronJobBase):
"""
Send email if cron failed to run X times in a row
"""
RUN_EVERY_MINS = 30
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'django_cron.FailedRunsNotificationCronJob'
def do(self):
CRONS_TO_CHECK = map(lambda x: get_class(x), settings.CRON_CLASSES)
EMAILS = [admin[1] for admin in settings.ADMINS]
try:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = settings.FAILED_RUNS_CRONJOB_EMAIL_PREFIX
except:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = ''
for cron in CRONS_TO_CHECK:
try:
min_failures = cron.MIN_NUM_FAILURES
except AttributeError:
min_failures = 10
failures = 0
jobs = CronJobLog.objects.filter(code=cron.code).order_by('-end_time')[:min_failures]
message = ''
for job in jobs:
if not job.is_success:
failures += 1
message += 'Job ran at %s : \n\n %s \n\n' % (job.start_time, job.message)
if failures == min_failures:
send_mail(
'%s%s failed %s times in a row!' % (FAILED_RUNS_CRONJOB_EMAIL_PREFIX, cron.code, \
min_failures), message,
settings.DEFAULT_FROM_EMAIL, EMAILS
)
|
Python
| 0
|
@@ -71,16 +71,27 @@
Schedule
+, get_class
%0Afrom dj
@@ -128,71 +128,8 @@
bLog
-%0Afrom django_cron.management.commands.runcrons import get_class
%0A%0Afr
|
57184440872c8c29906c84a919624e7878f7d75c
|
fix compat
|
skitai/backbone/https_server.py
|
skitai/backbone/https_server.py
|
#!/usr/bin/env python
from . import http_server
from ..counter import counter
import socket, time
from rs4 import asyncore
import ssl
from skitai import lifetime
import os, sys, errno
import skitai
from errno import EWOULDBLOCK
from aquests.protocols.http2 import H2_PROTOCOLS
from ..handlers import vhost_handler
class https_channel (http_server.http_channel):
ac_out_buffer_size = 65536
ac_in_buffer_size = 65536
def send(self, data):
try:
result = self.socket.send(data)
except ssl.SSLError as why:
if why.errno == ssl.SSL_ERROR_WANT_WRITE:
return 0
elif why.errno == ssl.SSL_ERROR_ZERO_RETURN:
self.handle_close ()
return 0
else:
raise
if result <= 0:
return 0
else:
self.server.bytes_out.increment(result)
return result
def recv(self, buffer_size = 65535):
try:
result = self.socket.recv(buffer_size)
if result is None:
return b''
elif result == b'':
self.handle_close()
return b''
else:
self.server.bytes_in.increment(len(result))
return result
except MemoryError:
lifetime.shutdown (1, 1.0)
except ssl.SSLError as why:
if why.errno == ssl.SSL_ERROR_WANT_READ:
try:
raise BlockingIOError
except NameError:
raise socket.error (EWOULDBLOCK)
# closed connection
elif why.errno in (ssl.SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_EOF):
self.handle_close ()
return b''
else:
raise
class https_server (http_server.http_server):
CERTINFO = None
def __init__ (self, ip, port, ctx, quic = None, server_logger = None, request_logger = None):
super ().__init__ (ip, port, server_logger, request_logger)
self.ctx = ctx
self.socket = self.ctx.wrap_socket (self.socket, server_side = True)
if quic:
from . import http3_server
ctx = http3_server.init_context (*self.CERTINFO)
self.altsvc = http3_server.http3_server (ip, quic, ctx, server_logger, request_logger)
def install_handler (self, handler, back = 1):
super ().install_handler (handler, back)
if self.altsvc and isinstance (handler, vhost_handler.Handler):
self.altsvc.install_handler (handler)
def serve (self, sub_server = None):
self.altsvc and self.altsvc._serve ()
super ().serve (sub_server)
def handle_accept (self):
self.total_clients.inc()
try:
conn, addr = self.accept()
except socket.error:
#self.log_info ('server accept() threw an exception', 'warning')
return
except TypeError:
if os.name == "nt":
self.log_info ('server accept() threw EWOULDBLOCK', 'warning')
return
except:
self.trace()
https_channel (self, conn, addr)
def init_context (certfile, keyfile, pass_phrase):
https_server.CERTINFO = (certfile, keyfile, pass_phrase)
try:
protocol = ssl.PROTOCOL_TLS
except AttributeError:
protocol = ssl.PROTOCOL_SSLv23
ctx = ssl.SSLContext (protocol)
try:
ctx.set_alpn_protocols (H2_PROTOCOLS)
except AttributeError:
ctx.set_npn_protocols (H2_PROTOCOLS)
ctx.load_cert_chain (certfile, keyfile, pass_phrase)
ctx.check_hostname = False
return ctx
|
Python
| 0.000001
|
@@ -2157,16 +2157,197 @@
f quic:%0A
+ if sys.version_info.major == 3 and sys.version_info.minor %3C 6:%0A self.log ('unsupoorted Python version for QUIC, DISABLED', 'error')%0A else:%0A
@@ -2377,16 +2377,20 @@
_server%0A
+
@@ -2442,16 +2442,20 @@
RTINFO)%0A
+
|
7f3b2b0ab21e4dadffb55da912684eb84ce6da3d
|
Check if remot git is already on commit
|
gitric/api.py
|
gitric/api.py
|
from __future__ import with_statement
from fabric.state import env
from fabric.api import local, run, abort, task
from fabric.context_managers import settings
@task
def allow_dirty():
'''allow pushing even when the working copy is dirty'''
env.gitric_allow_dirty = True
@task
def force_push():
'''allow pushing even when history will be lost'''
env.gitric_force_push = True
def git_seed(repo_path, commit=None, ignore_untracked_files=False):
'''seed a remote git repository'''
commit = _get_commit(commit)
force = ('gitric_force_push' in env) and '-f' or ''
dirty_working_copy = _is_dirty(commit, ignore_untracked_files)
if dirty_working_copy and 'gitric_allow_dirty' not in env:
abort(
'Working copy is dirty. This check can be overridden by\n'
'importing gitric.api.allow_dirty and adding allow_dirty to your '
'call.')
# initialize the remote repository (idempotent)
run('git init %s' % repo_path)
# silence git complaints about pushes coming in on the current branch
# the pushes only seed the immutable object store and do not modify the
# working copy
run('GIT_DIR=%s/.git git config receive.denyCurrentBranch ignore' %
repo_path)
# a target doesn't need to keep track of which branch it is on so we always
# push to its "master"
with settings(warn_only=True):
push = local(
'git push git+ssh://%s@%s:%s%s %s:refs/heads/master %s' % (
env.user, env.host, env.port, repo_path, commit, force))
if push.failed:
abort(
'%s is a non-fast-forward\n'
'push. The seed will abort so you don\'t lose information. '
'If you are doing this\nintentionally import '
'gitric.api.force_push and add it to your call.' % commit)
def git_reset(repo_path, commit=None):
'''checkout a sha1 on a remote git repo'''
commit = _get_commit(commit)
run('cd %s && git reset --hard %s' % (repo_path, commit))
def _get_commit(commit):
if commit is None:
# if no commit is specified we will push HEAD
commit = local('git rev-parse HEAD', capture=True)
return commit
def _is_dirty(commit, ignore_untracked_files):
untracked_files = '--untracked-files=no' if ignore_untracked_files else ''
return local('git status %s --porcelain' % untracked_files, capture=True) != ''
|
Python
| 0
|
@@ -106,16 +106,26 @@
rt, task
+, cd, puts
%0Afrom fa
@@ -995,24 +995,234 @@
repo_path)%0A
+%0A # finis execution if remote git it's already on commit.%0A with cd(repo_path):%0A if run('git rev-parse HEAD') == commit:%0A puts('Remote already on commit %25s' %25 commit)%0A return%0A%0A
# silenc
|
47e7fcc3b837b459a2800e09ee87c2a6f87cdfba
|
Update SController.py
|
skype_controller/SController.py
|
skype_controller/SController.py
|
"""Import somes important packages"""
import Skype4Py
import config as gbconfig
import json
from common import get_project_path
# Get Skype class instance
SKYPE_OBJ = Skype4Py.Skype()
# Establish the connection from the Skype object to the Skype ddclient.
SKYPE_OBJ.Attach()
# Get all contact from object. This function might not be used in this case
"""Function to get file contains list of skype's contact"""
returndata = {}
try:
root_path = get_project_path()
# print root_path
file_path = "%s/%s" % (root_path, gbconfig.FILE_CONTACT)
filename = open(file_path, 'r')
returndata = json.loads(filename.read())
filename.close()
except Exception as ex:
print 'What the fuck? I could not load your file: %s - %s' % (gbconfig.FILE_CONTACT, ex)
return returndata
def main_function():
"""Runable function"""
get_file()
for contact, message in get_file().iteritems():
SKYPE_OBJ.SendMessage(contact, message)
print "Message has been sent"
if __name__ == "__main__":
main_function()
|
Python
| 0.000001
|
@@ -349,16 +349,31 @@
s case%0A%0A
+def get_file():
%0A%0A %22%22
|
8cca0499a6cecdb54a3f51987976c3aa1b9abf70
|
Fix spelling
|
fetch_papers.py
|
fetch_papers.py
|
"""
Queries arxiv API and downloads papers (the query is a parameter).
The script is intended to enrich an existing database pickle (by default db.p),
so this file will be loaded first, and then new results will be added to it.
"""
import urllib
import time
import feedparser
import os
import cPickle as pickle
import argparse
import random
import utils
def encode_feedparser_dict(d):
"""
helper function to get rid of feedparser bs with a deep copy.
I hate when libs wrap simple things in their own classes.
"""
if isinstance(d, feedparser.FeedParserDict) or isinstance(d, dict):
j = {}
for k in d.keys():
j[k] = encode_feedparser_dict(d[k])
return j
elif isinstance(d, list):
l = []
for k in d:
l.append(encode_feedparser_dict(k))
return l
else:
return d
def parse_arxiv_url(url):
"""
examples is http://arxiv.org/abs/1512.08756v2
we want to extract the raw id and the version
"""
ix = url.rfind('/')
idversion = j['id'][ix+1:] # extract just the id (and the version)
parts = idversion.split('v')
assert len(parts) == 2, 'error parsing url ' + url
return parts[0], int(parts[1])
if __name__ == "__main__":
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--db_path', dest='db_path', type=str, default='db.p', help='database pickle filename that we enrich')
parser.add_argument('--search_query', dest='search_query', type=str,
default='cat:cs.CV+OR+cat:cs.LG+OR+cat:cs.CL+OR+cat:cs.NE+OR+cat:stat.ML',
help='query used for arxiv API. See http://arxiv.org/help/api/user-manual#detailed_examples')
parser.add_argument('--start_index', dest='start_index', type=int, default=0, help='0 = most recent API result')
parser.add_argument('--max_index', dest='max_index', type=int, default=10000, help='upper bound on paper index we will fetch')
parser.add_argument('--results_per_iteration', dest='results_per_iteration', type=int, default=100, help='passed to arxiv API')
parser.add_argument('--wait_time', dest='wait_time', type=float, default=5.0, help='lets be gentle to arxiv API (in number of seconds)')
parser.add_argument('--break_on_no_added', dest='break_on_no_added', type=int, default=1, help='break out early if all returned query papers are already in db? 1=yes, 0=no')
args = parser.parse_args()
# misc hardcoded variables
base_url = 'http://export.arxiv.org/api/query?' # base api query url
print 'Searching arXiv for %s' % (args.search_query, )
# lets load the existing database to memory
try:
db = pickle.load(open(args.db_path, 'rb'))
except Exception, e:
print 'error loading existing database:'
print e
print 'starting from an empty database'
db = {}
# -----------------------------------------------------------------------------
# main loop where we fetch the new results
print 'database has %d entries at start' % (len(db), )
num_added_total = 0
for i in range(args.start_index, args.max_index, args.results_per_iteration):
print "Results %i - %i" % (i,i+args.results_per_iteration)
query = 'search_query=%s&sortBy=lastUpdatedDate&start=%i&max_results=%i' % (args.search_query,
i, args.results_per_iteration)
response = urllib.urlopen(base_url+query).read()
parse = feedparser.parse(response)
num_added = 0
num_skipped = 0
for e in parse.entries:
j = encode_feedparser_dict(e)
# extract just the raw arxiv id and version for this paper
rawid, version = parse_arxiv_url(j['id'])
j['_rawid'] = rawid
j['_version'] = version
# add to our database if we didn't have it before, or if this is a new version
if not rawid in db or j['_version'] > db[rawid]['_version']:
db[rawid] = j
print 'updated %s added %s' % (j['updated'], j['title'])
num_added += 1
else:
num_skipped += 1
# print some information
print 'Added %d papers, already had %d.' % (num_added, num_skipped)
if len(parse.entries) == 0:
print 'Received no results from arxiv. Rate limiting? Exitting. Restart later maybe.'
print response
break
if num_added == 0 and args.break_on_no_added == 1:
print 'No new papers were added. Assuming no new papers exist. Exitting.'
break
print 'Sleeping for %i seconds' % (args.wait_time , )
time.sleep(args.wait_time + random.uniform(0, 3))
# save the database before we quit
print 'saving database with %d papers to %s' % (len(db), args.db_path)
utils.safe_pickle_dump(db, args.db_path)
|
Python
| 0.999999
|
@@ -4164,25 +4164,24 @@
miting? Exit
-t
ing. Restart
@@ -4357,17 +4357,16 @@
st. Exit
-t
ing.'%0A
|
f21e732eada64a18e08524052ec66ce8705d9e9b
|
make imagemagick env var default to 'convert' instead of None
|
glc/config.py
|
glc/config.py
|
"""
glc.config
==========
At the moment this only houses the environmental variable
for the ImageMagick binary. If you don't want to set that,
or can't for some reason, you can replace ``None`` with the
path where the ``convert`` application that comes with it
lives in.
(c) 2016 LeoV
https://github.com/leovoel/
"""
import os
IMAGEMAGICK_BINARY = os.getenv("IMAGEMAGICK_BINARY", None)
|
Python
| 0.000113
|
@@ -202,20 +202,25 @@
place %60%60
-None
+%22convert%22
%60%60 with
@@ -297,16 +297,57 @@
lives in
+, if it doesn't happen to be in your PATH
.%0A%0A (
@@ -466,10 +466,15 @@
Y%22,
-None
+%22convert%22
)%0A
|
88099c8e7386a3594a8528bf338c35eff80c6b1c
|
Fix typo
|
fabric/tunnels.py
|
fabric/tunnels.py
|
import errno
import select
import socket
import time
from threading import Event, Thread
# TODO: inherit from invoke.util.ExceptionHandlingThread
class Listener(Thread):
def __init__(self,
local_host, local_port,
remote_host, remote_port,
transport, finished
):
super(Listener, self).__init__()
self.local_address = (local_host, local_port)
self.remote_address = (remote_host, remote_port)
self.transport = transport
self.finished = finished
def run(self):
# Track each tunnel that gets opened during our lifetime
tunnels = []
# Set up OS-level listener socket on forwarded port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# TODO: why do we want REUSEADDR exactly? and is it portable?
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# NOTE: choosing to deal with nonblocking semantics and a fast loop,
# versus an older approach which blocks & expects outer scope to cause
# a socket exception by close()ing the socket.
sock.setblocking(0)
sock.bind(self.local_address)
sock.listen(1)
while not self.finished.is_set():
# Main loop-wait: accept connections on the local listener
# NOTE: EAGAIN means "you're nonblocking and nobody happened to
# connect at this point in time"
try:
tun_sock, local_addr = sock.accept()
# Set TCP_NODELAY to match OpenSSH's forwarding socket behavior
tun_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except socket.error as e:
if e.errno is errno.EAGAIN:
# TODO: make configurable
time.sleep(0.01)
continue
raise
# Set up direct-tcpip channel on server end
# TODO: refactor w/ what's used for gateways
channel = self.transport.open_channel(
'direct-tcpip',
self.remote_address,
local_addr,
)
# Set up 'worker' thread for this specific connection to our
# tunnel, plus its dedicated signal event (which will appear as a
# public attr, no need to track both independently).
finished = Event()
tunnel = Tunnel(channel=channel, sock=tun_sock, finished=finished)
tunnel.start()
tunnels.append(tunnel)
# Propogate shutdown signal to all tunnels & wait for closure
# TODO: would be nice to have some output or at least logging here,
# especially for "sets up a handful of tunnels" use cases like
# forwarding nontrivial HTTP traffic.
for tunnel in tunnels:
tunnel.finished.set()
# TODO: handle in-thread errors
tunnel.join()
# All we have left to close is our own sock.
# TODO: handle errors?
sock.close()
# TODO: inherit from the 'safe' error-handling thread class in Invoke?
class Tunnel(Thread):
"""
Thread that forwards data between an SSH channel and a local socket.
"""
def __init__(self, channel, sock, finished):
self.channel = channel
self.sock = sock
self.finished = finished
self.socket_chunk_size = 1024
self.channel_chunk_size = 1024
super(Tunnel, self).__init__()
def run(self):
try:
empty_sock, empty_chan = None, None
while not self.finished.is_set():
r, w, x = select.select([self.sock, self.channel], [], [], 1)
if self.sock in r:
empty_sock = self.read_and_write(
self.sock, self.channel, self.socket_chunk_size
)
if self.channel in r:
empty_chan = self.read_and_write(
self.channel, self.socket, self.channel_chunk_size
)
if empty_sock or empty_chan:
break
finally:
self.channel.close()
self.sock.close()
def read_and_write(self, reader, writer, chunk_size):
"""
Read ``chunk_size`` from ``reader``, writing result to ``writer``.
Returns ``None`` if successful, or ``True`` if the read was empty.
"""
data = reader.recv(chunk_size)
if len(data) == 0:
return True
writer.sendall(data)
|
Python
| 0.999999
|
@@ -3993,18 +3993,16 @@
elf.sock
-et
, self.c
|
3904b17a5a5a46d91f26dd384ade75c7f270541a
|
fix a bug
|
lib/core/manager.py
|
lib/core/manager.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import binascii
import json
import copy
from os import walk
from lib.core import log
from lib.core.data import kb
from lib.core.data import conf
from lib.core.common import paths
from lib.core.pluginbase import PluginBase
from lib.controller.controller import start
from lib.core.option import setMultipleTarget
from lib.core.option import initializeKb
from thirdparty import requests
def ListPlugins():
"""
显示插件列表
:return: list(dict) expName,appName, appVersion, description
"""
list_plugin_info = []
plugin_info = {}
zsp = PluginBase(package='zsplugins')
plugin_zsp = zsp.make_plugin_source(searchpath=[paths.ZEROSCAN_PLUGINS_PATH])
expNames = plugin_zsp.list_plugins()
for expName in expNames:
plugin_tmp = InfoPlugin(expName)
plugin_info["expName"] = expName
plugin_info["appName"] = plugin_tmp["appName"]
plugin_info["appVersion"] = plugin_tmp["appVersion"]
plugin_info["description"] = plugin_tmp["description"]
pi_tmp = copy.deepcopy(plugin_info)#python的优化造成必须使用deepcopy
list_plugin_info.append(pi_tmp)
return list_plugin_info
def SearchPlugin(keyword):
"""
搜索插件
:param keyword: string, 插件信息
:return: list, 插件列表
"""
return_list = []
plugin_info = {}
zsp = PluginBase(package='zsplugins')
plugin_zsp = zsp.make_plugin_source(searchpath=[paths.ZEROSCAN_PLUGINS_PATH])
expNames = plugin_zsp.list_plugins()
for expName in expNames:
if keyword in expName:
plugin_tmp = InfoPlugin(expName)
plugin_info["expName"] = expName
plugin_info["appName"] = plugin_tmp["appName"]
plugin_info["appVersion"] = plugin_tmp["appVersion"]
plugin_info["description"] = plugin_tmp["description"]
pi_tmp = copy.deepcopy(plugin_info)#python的优化造成必须使用deepcopy
return_list.append(pi_tmp)
return return_list
def InfoPlugin(plugin):
"""
显示插件信息
:param plugin: string, 插件名
:return: dict, 所有的插件信息
"""
zsp = PluginBase(package='zsplugins')
plugin_zsp = zsp.make_plugin_source(searchpath=[paths.ZEROSCAN_PLUGINS_PATH])
zspi = plugin_zsp.load_plugin('%s'%plugin)
zspi_tmp = zspi.expInfo()
return zspi_tmp
def ShowOptions():
"""
显示插件设置项
kb.CurrentPlugin
:return:插件的options
"""
zspi_to_re = []
zspi_dict_tmp = {}
zsp = PluginBase(package='zsplugins')
plugin_zsp = zsp.make_plugin_source(searchpath=[paths.ZEROSCAN_PLUGINS_PATH])
zspi = plugin_zsp.load_plugin('%s'%(kb.CurrentPlugin))
zspi_tmp = zspi.expInfo()
for list_tmp in zspi_tmp["options"]:
if list_tmp["Name"] == "URL":
if conf.url:
zspi_dict_tmp["Name"] = "URL"
zspi_dict_tmp["Current Setting"] = conf.url
zspi_dict_tmp["Required"] = True
zspi_dict_tmp["Description"] = "URL or URL file"
elif conf.urlFile:
zspi_dict_tmp["Name"] = "URL"
zspi_dict_tmp["Current Setting"] = conf.urlFile
zspi_dict_tmp["Required"] = True
zspi_dict_tmp["Description"] = "URL or URL file"
else:
zspi_dict_tmp["Name"] = "URL"
zspi_dict_tmp["Current Setting"] = ""
zspi_dict_tmp["Required"] = True
zspi_dict_tmp["Description"] = "URL or URL file"
if list_tmp["Name"] == "Thread":
zspi_dict_tmp["Name"] = "Thread"
zspi_dict_tmp["Current Setting"] = conf.threads
zspi_dict_tmp["Required"] = False
zspi_dict_tmp["Description"] = "Threads"
if list_tmp["Name"] == "Cookie":
zspi_dict_tmp["Name"] = "Cookie"
zspi_dict_tmp["Current Setting"] = conf.cookie
zspi_dict_tmp["Required"] = False
zspi_dict_tmp["Description"] = "Cookie"
if list_tmp["Name"] == "Report":
zspi_dict_tmp["Name"] = "Report"
zspi_dict_tmp["Current Setting"] = conf.report
zspi_dict_tmp["Required"] = False
zspi_dict_tmp["Description"] = "do you need a html report?"
_=copy.deepcopy(zspi_dict_tmp)
zspi_to_re.append(_)
return zspi_to_re
def SetOption(option, value):
"""
设置插件选项
:param option: string, 设置项名称
:param value: string, 设置值
:return:
"""
#TODO
#目标如果在文件中,必须将文件放在targets目录下
if option.upper() == "URL":
if "targets" in option:
conf.urlFile = str(value)
return "%s => %s" % (option, value)
else:
#这个是要check的
conf.url = str(value)
return "%s => %s" % (option, value)
elif option == "Thread":
conf.threads = value
return "%s => %s" % (option, value)
elif option == "Cookie":
conf.cookie = str(value)
return "%s => %s" % (option, value)
elif option == "Report":
conf.report = value
return "%s => %s" % (option, value)
else:
return "Invalid option: %s" % option
def ClearConf():
"""
清除变量
:return:
"""
conf.urlFile = ""
conf.url = ""
conf.threads = 1
conf.cookie = ""
conf.report = False
def ExecPlugin():
"""
执行插件
:return:
"""
setMultipleTarget()
start()
|
Python
| 0.000016
|
@@ -4533,22 +4533,26 @@
ets%22 in
-option
+str(value)
:%0A
|
699022ad6252ed73ed9e56ea050201020b2295af
|
add trailing slash to url for consistency
|
fixcity/urls.py
|
fixcity/urls.py
|
from django.conf.urls.defaults import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^$', 'fixcity.bmabr.views.index'),
(r'^about/$', 'fixcity.bmabr.views.about'),
(r'^faq/$', 'fixcity.bmabr.views.faq'),
(r'^contact/$', 'fixcity.bmabr.views.contact'),
(r'^verification-kit/$', 'fixcity.bmabr.views.verification_kit'),
# Account URL overrides.
# Note these go first because django just iterates over these patterns and uses
# the FIRST match.
# XXX I think the auth application provides some generic passwd reset views
# we could use? see http://www.stonemind.net/blog/2007/04/13/django-registration-for-newbies/
(r'^accounts/activate/(?P<activation_key>\w+)/$', 'fixcity.bmabr.views.activate'),
# Accounts URLs - anything for django-registration that we didn't override.
(r'^accounts/', include('registration.urls')),
(r'^profile/$', 'fixcity.bmabr.views.profile'),
(r'^geocode/$', 'fixcity.bmabr.views.geocode'),
(r'^reverse/$', 'fixcity.bmabr.views.reverse_geocode'),
(r'verify/$','fixcity.bmabr.views.verify'),
(r'verify/communityboard/(?P<cb_id>\d+)/$', 'fixcity.bmabr.views.verify_by_communityboard'),
(r'submit/all/$','fixcity.bmabr.views.submit_all'),
(r'submit/$','fixcity.bmabr.views.submit'),
(r'built/$','fixcity.bmabr.views.built'),
(r'^rack/(?P<rack_id>\d+)/$', 'fixcity.bmabr.views.rack'),
(r'^rack/(?P<rack_id>\d+)/edit/$', 'fixcity.bmabr.views.rack_edit'),
(r'^rack/(?P<rack_id>\d+)/support/$', 'fixcity.bmabr.views.support'),
# KML URLs
(r'rack/all.kml$', 'fixcity.bmabr.views.rack_all_kml'),
(r'rack/requested.kml$', 'fixcity.bmabr.views.rack_requested_kml'),
(r'rack/pendding.kml$', 'fixcity.bmabr.views.rack_pendding_kml'),
(r'rack/built.kml$', 'fixcity.bmabr.views.rack_pendding_kml'),
(r'rack/(?P<rack_id>\d+).kml', 'fixcity.bmabr.views.rack_by_id_kml'),
(r'communityboards.kml','fixcity.bmabr.views.community_board_kml'),
(r'communityboard/(?P<cb_id>\d+).kml','fixcity.bmabr.views.community_board_kml_by_id'),
# different views for adding infomation, rack, comments, photos.
(r'^rack/new/$', 'fixcity.bmabr.views.newrack_form'), # view for rack request form.
(r'^rack/(?P<rack_id>\d+)/photos/', 'fixcity.bmabr.views.updatephoto'),
(r'^rack/$', 'fixcity.bmabr.views.rack_index'),
(r'^comment/add/$', 'fixcity.bmabr.views.add_comment'),
# different ways of viewing information
(r'^neighborhoods/$', 'fixcity.bmabr.views.neighborhoods'),
(r'^communityboard/$', 'fixcity.bmabr.views.communityboard'),
# Static media for dev work. For deployment, these should be served
# by a front-end server eg. apache!
# see http://docs.djangoproject.com/en/dev/howto/static-files/
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_DOC_ROOT, 'show_indexes': True}),
(r'^uploads/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^cb1racks$', 'fixcity.bmabr.views.cb1racks'),
(r'^admin/(.*)', admin.site.root),
)
handler500 = 'fixcity.bmabr.views.server_error'
|
Python
| 0
|
@@ -3448,16 +3448,17 @@
cb1racks
+/
$', 'fix
|
d338dc15c57e3aea12de78354da908b1457c5055
|
Clean up command more (#30)
|
earwigbot/commands/lag.py
|
earwigbot/commands/lag.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2012 Ben Kurtovic <ben.kurtovic@verizon.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from earwigbot import exceptions
from earwigbot.commands import Command
class Lag(Command):
"""Return the replag for a specific database on the Toolserver."""
name = "lag"
commands = ["lag", "replag", "maxlag"]
def process(self, data):
if data.kwargs and "project" in data.kwargs and "lang" in data.kwargs:
project, lang = data.kwargs["project"], data.kwargs["lang"]
site = self.get_site(data, project, lang)
if not site:
return
elif data.args:
if len(data.args) > 1:
name = " ".join(data.args)
self.reply(data, "unknown site: \x0302{0}\x0F.".format(name))
return
name = data.args[0]
if "." in name:
lang, project = name.split(".")[:2]
elif ":" in name:
project, lang = name.split(":")[:2]
else:
try:
site = self.bot.wiki.get_site(name)
except exceptions.SiteNotFoundError:
msg = "unknown site: \x0302{0}\x0F.".format(name)
self.reply(data, msg)
return
site = self.get_site(data, project, lang)
if not site:
return
else:
site = self.bot.wiki.get_site()
msg = "\x0302{0}\x0F: Toolserver replag is {1} seconds; database maxlag is {2} seconds"
msg = msg.format(site.name, site.get_replag(), site.get_maxlag())
self.reply(data, msg)
def get_site(self, data, project, lang):
try:
site = self.bot.wiki.get_site(project=project, lang=lang)
except exceptions.SiteNotFoundError:
try:
site = self.bot.wiki.add_site(project=project, lang=lang)
except exceptions.APIError:
msg = "site \x0302{0}:{1}\x0F not found."
self.reply(data, msg.format(project, lang))
return
return site
|
Python
| 0.000001
|
@@ -1398,16 +1398,318 @@
data):%0A
+ site = self.get_site(data)%0A if not site:%0A return%0A%0A msg = %22%5Cx0302%7B0%7D%5Cx0F: Toolserver replag is %7B1%7D seconds; database maxlag is %7B2%7D seconds.%22%0A msg = msg.format(site.name, site.get_replag(), site.get_maxlag())%0A self.reply(data, msg)%0A%0A def get_site(self):%0A
@@ -1855,38 +1855,38 @@
g%22%5D%0A
-site =
+return
self.get_site(d
@@ -1875,32 +1875,51 @@
rn self.get_site
+_from_proj_and_lang
(data, project,
@@ -1916,36 +1916,33 @@
project, lang)%0A
-
+%0A
if not s
@@ -1936,34 +1936,35 @@
if not
-site:%0A
+data.args:%0A
@@ -1973,37 +1973,35 @@
turn
-%0A elif data.args:%0A
+ self.bot.wiki.get_site()%0A%0A
@@ -2035,28 +2035,24 @@
-
name = %22 %22.j
@@ -2066,20 +2066,16 @@
a.args)%0A
-
@@ -2140,20 +2140,16 @@
(name))%0A
-
@@ -2163,28 +2163,24 @@
urn%0A
-
name = data.
@@ -2195,20 +2195,16 @@
-
-
if %22.%22 i
@@ -2203,36 +2203,32 @@
if %22.%22 in name:%0A
-
lang
@@ -2267,20 +2267,16 @@
-
-
elif %22:%22
@@ -2277,36 +2277,32 @@
if %22:%22 in name:%0A
-
proj
@@ -2337,28 +2337,24 @@
:2%5D%0A
-
else:%0A
@@ -2351,36 +2351,32 @@
se:%0A
-
-
try:%0A
@@ -2376,34 +2376,30 @@
- site =
+return
self.bot.wi
@@ -2420,36 +2420,32 @@
me)%0A
-
-
except exception
@@ -2457,36 +2457,32 @@
eNotFoundError:%0A
-
@@ -2539,36 +2539,32 @@
-
-
self.reply(data,
@@ -2577,36 +2577,32 @@
-
return%0A
@@ -2596,34 +2596,30 @@
urn%0A
- site =
+return
self.get_si
@@ -2616,24 +2616,43 @@
elf.get_site
+_from_proj_and_lang
(data, proje
@@ -2665,332 +2665,44 @@
ng)%0A
- if not site:%0A return%0A else:%0A site = self.bot.wiki.get_site()%0A%0A msg = %22%5Cx0302%7B0%7D%5Cx0F: Toolserver replag is %7B1%7D seconds; database maxlag is %7B2%7D seconds%22%0A msg = msg.format(site.name, site.get_replag(), site.get_maxlag())%0A self.reply(data, msg)%0A%0A def get_site
+%0A def get_site_from_proj_and_lang
(sel
|
09a313a2cd74c391c12761306cb8ae641e9f0d28
|
fix logs app prompt
|
ebcli/controllers/logs.py
|
ebcli/controllers/logs.py
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings, flag_text
from ..operations import logsops
from ..objects.exceptions import InvalidOptionsError, NotFoundError
class LogsController(AbstractBaseController):
class Meta:
label = 'logs'
description = strings['logs.info']
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
arguments = AbstractBaseController.Meta.arguments + [
(['-a', '--all'], dict(
action='store_true', help=flag_text['logs.all'])),
(['-z', '--zip'], dict(
action='store_true', help=flag_text['logs.zip'])),
(['-i', '--instance'], dict(help=flag_text['logs.instance'])),
(['--stream'], dict(action='store_true',
help=flag_text['logs.stream'])),
]
epilog = strings['logs.epilog']
def do_command(self):
env_name = self.get_env_name()
if self.app.pargs.stream:
try:
return logsops.stream_logs(env_name)
except NotFoundError:
raise NotFoundError(strings['cloudwatch-stream.notsetup'])
all = self.app.pargs.all
instance = self.app.pargs.instance
zip = self.app.pargs.zip
if all and instance:
raise InvalidOptionsError(strings['logs.allandinstance'])
if zip:
info_type = 'bundle'
do_zip = True
elif all:
info_type = 'bundle'
do_zip = False
else:
info_type = 'tail'
do_zip = False
logsops.logs(env_name, info_type, do_zip=do_zip,
instance_id=instance)
|
Python
| 0.000001
|
@@ -1528,16 +1528,55 @@
(self):%0A
+ app_name = self.get_app_name()%0A
|
d3cd1778f4ccb1651feb2186ecfdd0c81f86088c
|
Improve Instruction parsing
|
instruction.py
|
instruction.py
|
class Instruction(object):
def __init__(self, line):
instr = line.split(' ')
self.name = instr[0]
self.ops = []
if len(instr) > 4:
raise Exception('too many operands: {}'.format(line))
# iterate through operands, perform some loose checks, and append
# to self.ops
for i, each in enumerate(instr[1:]):
if each.endswith(','):
each = each[:-1]
if each.startswith('$'):
self.ops.append(each[1:])
else:
self.ops.append(each)
|
Python
| 0.000072
|
@@ -339,11 +339,8 @@
for
- i,
eac
@@ -348,18 +348,8 @@
in
-enumerate(
inst
@@ -353,17 +353,16 @@
nstr%5B1:%5D
-)
:%0A
@@ -439,125 +439,62 @@
-if each.startswith('$'):%0A self.ops.append(each%5B1:%5D)%0A else:%0A self.ops.append(
+self.ops.append(each%5B1:%5D if each.startswith('$') else
each
|
23c95dcba178b3876bf07bec4b0c4f5c06895181
|
add padding to file names
|
copyfiles.py
|
copyfiles.py
|
import hashlib
import os
import random
import shutil
import string
BLOCKSIZE = 65536
class FileCopier(object):
def __init__(self, dest_dir, copy):
self._dest_dir = dest_dir
self._copy = copy
def copy_file(self, in_path, date, subject):
out_dir = self._get_directory_name(date, subject)
out_file = self._get_file_name(in_path, date)
self._create_dir(out_dir)
out_path = os.path.join(out_dir, out_file)
if os.path.exists(out_path):
if self._hash_file(in_path) == self._hash_file(out_path):
print 'File already exists - skipping'
return
else:
basename, extension = os.path.splitext(out_path)
rand_str = ''.join(random.choice(string.lowercase) for i in range(10))
out_file = basename + '-' + rand_str + extension
out_path = os.path.join(out_dir, out_file)
self._copy_file(in_path, out_path)
def _get_directory_name(self, date, subject):
month_dir = '%(y)s-%(m)s' % {'y': date.year, 'm': date.month}
subject_dir = ('%(y)s_%(m)s_%(d)s-%(subj)s' %
{'y': date.year, 'm': date.month, 'd': date.day,
'subj': subject})
return os.path.join(self._dest_dir, month_dir, subject_dir)
def _get_file_name(self, in_path, date):
file_basename = ('%(y)s_%(m)s_%(d)s-%(hr)s_%(min)s_%(sec)s' %
{'y': date.year, 'm': date.month, 'd': date.day,
'hr': date.hour, 'min': date.minute,
'sec': date.second})
extension = os.path.splitext(in_path)[1]
return file_basename + extension
def _create_dir(self, path):
if not os.path.exists(path):
os.makedirs(path)
def _hash_file(self, path):
hasher = hashlib.md5()
with open(path, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
def _copy_file(self, in_path, out_path):
print 'Copying file from', in_path, 'to', out_path
if self._copy:
shutil.copy2(in_path, out_path)
else:
os.link(in_path, out_path)
|
Python
| 0.000001
|
@@ -1055,15 +1055,19 @@
%25(y)
-s
+04d
-%25(m)
-s
+02d
' %25
@@ -1128,29 +1128,35 @@
= ('%25(y)
-s
+04d
_%25(m)
-s
+02d
_%25(d)
-s
+02d
-%25(subj)
@@ -1422,21 +1422,27 @@
%25(y)
-s
+04d
_%25(m)
-s
+02d
_%25(d)
-s
+02d
-%25(h
|
daa29e745256b164b3375e502444ec247aa0d892
|
implement get_default_args for the future repr development
|
logwrap/func_helpers.py
|
logwrap/func_helpers.py
|
# Copyright 2016 Mirantis, Inc.
# Copyright 2016 Alexey Stepanov aka penguinolog
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""func_helpers module
This is no reason to import this submodule directly, all required methods is
available from the main module.
"""
from __future__ import absolute_import
import collections
import inspect
import sys
# pylint: disable=no-member
def get_arg_names(func):
"""get argument names for function
:param func: Function to extract arguments from
:type func: callable
:return: list of function argument names
:rtype: list
>>> def tst_1():
... pass
>>> get_arg_names(tst_1)
[]
>>> def tst_2(arg):
... pass
>>> get_arg_names(tst_2)
['arg']
"""
# noinspection PyUnresolvedReferences
if sys.version_info[0:2] < (3, 0):
# pylint: disable=deprecated-method
# noinspection PyDeprecation
spec = inspect.getargspec(func=func)
# pylint: enable=deprecated-method
args = spec.args[:]
if spec.varargs:
args.append(spec.varargs)
if spec.keywords:
args.append(spec.keywords)
return args
return list(inspect.signature(obj=func).parameters.keys())
def get_call_args(func, *positional, **named):
"""get real function call arguments without calling function
:param func: Function to bind arguments
:type func: callable
:type positional: iterable
:type named: dict
:rtype: collections.OrderedDict
>>> def tst(arg, darg=2, *args, **kwargs):
... pass
>>> get_call_args(tst, *(1, ))
OrderedDict([('arg', 1), ('darg', 2), ('args', ()), ('kwargs', {})])
"""
# noinspection PyUnresolvedReferences
if sys.version_info[0:2] < (3, 5): # apply_defaults is py35 feature
# pylint: disable=deprecated-method
orig_args = inspect.getcallargs(func, *positional, **named)
# pylint: enable=deprecated-method
# Construct OrderedDict as Py3
arguments = collections.OrderedDict(
[(key, orig_args[key]) for key in get_arg_names(func)]
)
return arguments
sig = inspect.signature(func).bind(*positional, **named)
sig.apply_defaults() # after bind we doesn't have defaults
return sig.arguments
# pylint: enable=no-member
__all__ = ['get_arg_names', 'get_call_args']
|
Python
| 0
|
@@ -1355,17 +1355,17 @@
%5D %3C (3,
-0
+3
):%0A
@@ -2828,16 +2828,1449 @@
guments%0A
+%0A%0Adef get_default_args(func):%0A %22%22%22Get function defaults from it's signature%0A%0A :param func: target function%0A :type func: function%0A :rtype: collections.OrderedDict%0A%0A %3E%3E%3E def tst0():pass%0A%0A %3E%3E%3E get_default_args(tst0)%0A OrderedDict()%0A%0A %3E%3E%3E def tst1(a): pass%0A%0A %3E%3E%3E get_default_args(tst1)%0A OrderedDict()%0A%0A %3E%3E%3E def tst2(a, b): pass%0A%0A %3E%3E%3E get_default_args(tst2)%0A OrderedDict()%0A%0A %3E%3E%3E def tst3(a=0): pass%0A%0A %3E%3E%3E get_default_args(tst3)%0A OrderedDict(%5B('a', 0)%5D)%0A%0A %3E%3E%3E def tst4(a, b=1): pass%0A%0A %3E%3E%3E get_default_args(tst4)%0A OrderedDict(%5B('b', 1)%5D)%0A%0A %3E%3E%3E def tst5(a=0, b=1): pass%0A%0A %3E%3E%3E get_default_args(tst5)%0A OrderedDict(%5B('a', 0), ('b', 1)%5D)%0A %22%22%22%0A if sys.version_info%5B0:2%5D %3C (3, 0):%0A # pylint: disable=deprecated-method%0A # noinspection PyDeprecation%0A spec = inspect.getargspec(func)%0A # pylint: enable=deprecated-method%0A if not spec.defaults:%0A return collections.OrderedDict()%0A collector = %5B%5D%0A for val in range(1, len(spec.defaults)+1):%0A collector.append((spec.args%5B-val%5D, spec.defaults%5B-val%5D))%0A return collections.OrderedDict(reversed(collector))%0A sig = inspect.signature(func)%0A result = collections.OrderedDict(%0A %5B%0A (arg.name, arg.default)%0A for arg in sig.parameters.values()%0A if arg.default != inspect.Parameter.empty%0A %5D%0A )%0A return result%0A
# pylint
|
d223ee2988be1eb439ebde1146c28ffc83576a29
|
Return empty list on exception
|
fetch_mentions.py
|
fetch_mentions.py
|
import time
import requests
import json
import tweepy
from tweepy.error import TweepError
from raven import Client
from settings import consumer_key, consumer_secret, key, secret, count, pubKey, payload_type, SENTRY_DSN,\
overlap_count, waiting_period, speed_layer_endpoint_url, password, timeout, ssl_verification
client = Client(SENTRY_DSN)
class GetMentions(object):
latest_tweet_id = 0
since_id = 1
api = None
data = {"payloadType": payload_type, "pubKey": pubKey}
overlap_count = overlap_count
headers = {'content-type': 'application/json'}
def authenticate(self):
"""
Authenticate twitter credentials
"""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(key, secret)
self.api = tweepy.API(auth)
def verify_credentials(self):
"""
Return after Verifying twitter credentials
"""
try:
verified = self.api.verify_credentials()
except TweepError, errors:
client.captureException()
for msg in errors.message:
# If Rate limit exceeded, will retry after 15 minutes
if msg['code'] == 88:
print "Sleeping for 15 minutes, Rate limit hit"
time.sleep(15 * 60)
return True
return False
else:
return verified
def get_mentions(self):
"""
Fetch mentions from twitter
"""
try:
print "Fetching mentions"
mentions = self.api.mentions_timeline(count=count, since_id=self.since_id)
except TweepError, errors:
client.captureException()
for msg in errors.message:
# If Rate limit exceeded, will retry after 15 minutes
if msg['code'] == 88:
print "Sleeping for 15 minutes, Rate limit hit"
time.sleep(15 * 60)
break
return None
else:
return mentions
def process_mentions(self):
"""
Send the twitter mentions to rest end point server
"""
mentions = self.get_mentions()
for mention in mentions:
self.data.update({"transactionId": mention.id, "transactionSent": mention.created_at.isoformat(),
"transactionData": mention._json})
print "--------- Sending to Rest End point ---------"
try:
resp = requests.post(speed_layer_endpoint_url, auth=(pubKey, password), headers=self.headers, timeout=timeout,
data=json.dumps(self.data), verify=ssl_verification)
except Exception as e:
print 'Failed to post to HTTP endpoint due to "%s"' % e.message
if mention.id > self.latest_tweet_id:
# get highest tweet id so that it could be used as since_id later
self.latest_tweet_id = mention.id
print "Latest mention ID so far: {0}".format(self.latest_tweet_id)
print "Mentions Tweet ID: {0}".format(mention.id)
print "Mentions Tweet: {0}".format(mention.text)
if self.overlap_count:
# Force duplicate tweets
try:
self.since_id = mentions[-1 * (overlap_count + 1)].id
except IndexError:
print "Mentions overlap index out of range"
client.captureException()
self.since_id = self.latest_tweet_id
else:
self.since_id = self.latest_tweet_id
if __name__ == "__main__":
obj = GetMentions()
obj.authenticate()
while obj.verify_credentials():
obj.process_mentions()
print "Waiting for %s seconds" % waiting_period
time.sleep(waiting_period)
else:
print "Verification Failed"
|
Python
| 0.999717
|
@@ -1995,16 +1995,42 @@
break%0A
+ print msg%0A
@@ -2040,20 +2040,18 @@
return
-None
+%5B%5D
%0A
|
e3dc9028199794498e05e43ed2f40f565e64ffde
|
Make Vina clean after running
|
docking/autodock_vina.py
|
docking/autodock_vina.py
|
from tempfile import mkdtemp
from shutil import rmtree
from os.path import exists
import subprocess
import numpy as np
import re
import pybel
class autodock_vina:
def __init__(self, protein, size=(10,10,10), center=(0,0,0), auto_ligand=None, exhaustivness=8, num_modes=9, energy_range=3, seed=None, prefix_dir='/tmp', ncpu=1, executable=None, autocleanup=True):
self.dir = mkdtemp(dir = prefix_dir, prefix='autodock_vina_')
# define binding site
self.size = size
self.center = center
# center automaticaly on ligand
if auto_ligand:
self.center = tuple(np.array([atom.coords for atom in auto_ligand], dtype=np.float16).mean(axis=0))
# autodetect Vina executable
if not executable:
self.executable = subprocess.check_output(['which', 'vina']).split('\n')[0]
else:
self.executable = executable
# detect version
self.version = subprocess.check_output([self.executable, '--version']).split(' ')[2]
self.autocleanup = autocleanup
# write protein to file
self.protein_file = self.dir + '/protein.pdbqt'
protein.write('pdbqt', self.protein_file, opt={'r':None,})
#pregenerate common Vina parameters
self.params = []
self.params = self.params + ['--center_x', str(self.center[0]), '--center_y', str(self.center[1]), '--center_z', str(self.center[2])]
self.params = self.params + ['--size_x', str(self.size[0]), '--size_y', str(self.size[1]), '--size_z', str(self.size[2])]
self.params = self.params + ['--cpu', str(ncpu)]
self.params = self.params + ['--exhaustiveness', str(exhaustivness)]
if not seed is None:
self.params = self.params + ['--seed', str(seed)]
self.params = self.params + ['--num_modes', str(num_modes)]
self.params = self.params + ['--energy_range', str(energy_range)]
def score(self, ligands):
output_array = []
n = 1
ligand_dir = mkdtemp(dir = self.dir, prefix='ligands_')
for ligand in ligands:
# write ligand to file
ligand_file = ligand_dir + '/' + str(n) + '.pdbqt'
ligand.write('pdbqt', ligand_file, overwrite=True)
output_array.append(parse_vina_scoring_output(subprocess.check_output([self.executable, '--score_only', '--receptor', self.protein_file, '--ligand', ligand_file] + self.params)))
n +=1
return output_array
def dock(self, ligands):
output_array = []
n = 1
ligand_dir = mkdtemp(dir = self.dir, prefix='ligands_')
for ligand in ligands:
# write ligand to file
ligand_file = ligand_dir + '/' + str(n) + '.pdbqt'
ligand_outfile = ligand_dir + '/' + str(n) + '_out.pdbqt'
ligand.write('pdbqt', ligand_file, overwrite=True)
vina = parse_vina_docking_output(subprocess.check_output([self.executable, '--receptor', self.protein_file, '--ligand', ligand_file, '--out', ligand_outfile] + self.params))
output_array.append(zip([lig for lig in pybel.readfile('pdbqt', ligand_outfile)], vina))
n +=1
return output_array
# !!! FIX: this does not delete directory for some reason
def __enter__(self):
return self
def __exit__(self):
if exists(self.dir) and self.autocleanup:
rmtree(self.dir)
dasdsad
def parse_vina_scoring_output(output):
out = {}
r = re.compile('^(Affinity:|\s{4})')
for line in output.split('\n')[13:]: # skip some output
if r.match(line):
m = line.replace(' ','').split(':')
if m[0] == 'Affinity':
m[1] = m[1].replace('(kcal/mol)','')
out[m[0].lower()] = float(m[1])
return out
def parse_vina_docking_output(output):
out = []
r = re.compile('^\s+\d\s+')
for line in output.split('\n')[13:]: # skip some output
if r.match(line):
s = line.split()
out.append({'affinity': s[1], 'rmsd_lb': s[2], 'rmsd_ub': s[3]})
return out
|
Python
| 0.000002
|
@@ -383,58 +383,19 @@
r =
-mkdtemp(dir = prefix_dir, prefix='autodock_vina_')
+prefix_dir,
%0A
@@ -1036,23 +1036,14 @@
-%0A # writ
+# shar
e pr
@@ -1047,28 +1047,29 @@
protein to
-file
+class
%0A sel
@@ -1081,110 +1081,27 @@
tein
-_file = self.dir + '/protein.pdbqt'%0A protein.write('pdbqt', self.protein_file, opt=%7B'r':None,%7D)
+ = protein%0A
%0A
@@ -1841,32 +1841,257 @@
self, ligands):%0A
+ tmp_dir = mkdtemp(dir = self.dir, prefix='autodock_vina_')%0A # write protein to file%0A protein_file = tmp_dir + '/protein.pdbqt'%0A self.protein.write('pdbqt', protein_file, opt=%7B'r':None,%7D)%0A %0A
output_a
@@ -2141,37 +2141,36 @@
= mkdtemp(dir =
-self.
+tmp_
dir, prefix='lig
@@ -2491,37 +2491,32 @@
, '--receptor',
-self.
protein_file, '-
@@ -2565,32 +2565,56 @@
n +=1%0A
+ rmtree(tmp_dir)%0A
return o
@@ -2659,32 +2659,257 @@
self, ligands):%0A
+ tmp_dir = mkdtemp(dir = self.dir, prefix='autodock_vina_')%0A # write protein to file%0A protein_file = tmp_dir + '/protein.pdbqt'%0A self.protein.write('pdbqt', protein_file, opt=%7B'r':None,%7D)%0A %0A
output_a
@@ -2959,37 +2959,36 @@
= mkdtemp(dir =
-self.
+tmp_
dir, prefix='lig
@@ -3358,21 +3358,16 @@
eptor',
-self.
protein_
@@ -3570,271 +3570,50 @@
r
-eturn output_array%0A %0A %0A # !!! FIX: this does not delete directory for some reason%0A def __enter__(self):%0A return self%0A %0A def __exit__(self):%0A if exists(self.dir) and self.autocleanup:%0A rmtree(self.dir)%0A dasdsad
+mtree(tmp_dir)%0A return output_array
%0A
|
8d0a41391fae5c66c296d5dfacc0ac6f82a6b355
|
fix gridsearch path
|
gridsearch.py
|
gridsearch.py
|
import time
import itertools as it
from gensim.models import word2vec
from goethe.corpora import Corpus
model_config = {
'size': [200, 300, 400, 500, 600],
'window': [5, 10, 20],
'sg': [0, 1] # Skip-gram or CBOW
}
sample_size = 10000000
epochs = 10
def train_model(config):
size, window, sg = config
sentences = Corpus('../corpora/eval/eval.tokens.txt', limit=sample_size)
model = word2vec.Word2Vec(sentences=sentences, size=size, window=window,
iter=epochs, workers=4)
name = 'n{}_size{}_epochs{}_sg{}_window{}'.format(sample_size, size, epochs, sg, window)
return name, model
def minutes(t0):
t1 = time.time()
return int((t1-t0)/60)
if __name__ == '__main__':
parameters = it.product(model_config['size'], model_config['window'],
model_config['sg'])
t0 = time.time()
for p in parameters:
name, model = train_model(p)
model.save('models/' + name + '.model')
print('{}\', saved model: {}'.format(minutes(t0), name))
|
Python
| 0.000001
|
@@ -356,24 +356,8 @@
eval
-/eval.tokens.txt
', l
|
b57e84fc157b7bbe564a7bd8799c06bec5b507eb
|
Remove unused property and non applicable TODO.
|
st2reactor/st2reactor/container/manager.py
|
st2reactor/st2reactor/container/manager.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import signal
import eventlet
from st2common import log as logging
from st2reactor.container.process_container import ProcessSensorContainer
from st2common.services.sensor_watcher import SensorWatcher
from st2common.models.system.common import ResourceReference
LOG = logging.getLogger(__name__)
class SensorContainerManager(object):
# TODO: Load balancing for sensors.
def __init__(self, max_containers=10):
self._max_containers = max_containers
self._sensor_container = None
self._sensors_watcher = SensorWatcher(create_handler=self._handle_create_sensor,
update_handler=self._handle_update_sensor,
delete_handler=self._handle_delete_sensor,
queue_suffix='sensor_container')
self._container_thread = None
def run_sensors(self, sensors):
"""
:param sensors: A list of DB models of sensors to run.
:type sensors: ``list``
"""
if sensors:
LOG.info('Setting up container to run %d sensors.', len(sensors))
sensors_to_run = []
for sensor in sensors:
# TODO: Directly pass DB object to the ProcessContainer
sensors_to_run.append(self._to_sensor_object(sensor))
LOG.info('(PID:%s) SensorContainer started.', os.getpid())
self._setup_sigterm_handler()
self._spin_container_and_wait(sensors_to_run)
def _spin_container_and_wait(self, sensors):
try:
self._sensor_container = ProcessSensorContainer(sensors=sensors)
self._container_thread = eventlet.spawn(self._sensor_container.run)
LOG.debug('Starting sensor CUD watcher...')
self._sensors_watcher.start()
exit_code = self._container_thread.wait()
LOG.error('Process container quit with exit_code %d.', exit_code)
LOG.error('(PID:%s) SensorContainer stopped.', os.getpid())
except (KeyboardInterrupt, SystemExit):
self._sensor_container.shutdown()
self._sensors_watcher.stop()
LOG.info('(PID:%s) SensorContainer stopped. Reason - %s', os.getpid(),
sys.exc_info()[0].__name__)
self._container_thread = eventlet.kill(self._container_thread)
return 0
def _setup_sigterm_handler(self):
def sigterm_handler(signum=None, frame=None):
# This will cause SystemExit to be throw and we call sensor_container.shutdown()
# there which cleans things up.
sys.exit(0)
# Register a SIGTERM signal handler which calls sys.exit which causes SystemExit to
# be thrown. We catch SystemExit and handle cleanup there.
signal.signal(signal.SIGTERM, sigterm_handler)
def _to_sensor_object(self, sensor_db):
file_path = sensor_db.artifact_uri.replace('file://', '')
class_name = sensor_db.entry_point.split('.')[-1]
sensor_obj = {
'pack': sensor_db.pack,
'file_path': file_path,
'class_name': class_name,
'trigger_types': sensor_db.trigger_types,
'poll_interval': sensor_db.poll_interval,
'ref': self._get_sensor_ref(sensor_db)
}
return sensor_obj
#################################################
# Event handler methods for the sensor CUD events
#################################################
def _handle_create_sensor(self, sensor):
LOG.info('Adding sensor %s.', self._get_sensor_ref(sensor))
self._sensor_container.add_sensor(sensor=self._to_sensor_object(sensor))
def _handle_update_sensor(self, sensor):
sensor_ref = self._get_sensor_ref(sensor)
LOG.info('Sensor %s updated. Reloading sensor.', sensor_ref)
sensor_obj = self._to_sensor_object(sensor)
try:
self._sensor_container.remove_sensor(sensor=sensor_obj)
except:
LOG.exception('Failed to reload sensor %s', sensor_ref)
else:
self._sensor_container.add_sensor(sensor=sensor_obj)
LOG.info('Sensor %s reloaded.', sensor_ref)
def _handle_delete_sensor(self, sensor):
LOG.info('Unloading sensor %s.', self._get_sensor_ref(sensor))
self._sensor_container.remove_sensor(sensor=self._to_sensor_object(sensor))
def _get_sensor_ref(self, sensor):
return ResourceReference.to_string_reference(pack=sensor.pack, name=sensor.name)
|
Python
| 0
|
@@ -1137,136 +1137,32 @@
t):%0A
- # TODO: Load balancing for sensors.%0A def __init__(self, max_containers=10):%0A self._max_containers = max_containers
+%0A def __init__(self):
%0A
|
f682300d4a8ab7e13ad0e26d2b37fdf24cdbdce9
|
Bump development version
|
filer/__init__.py
|
filer/__init__.py
|
# -*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '1.2.6.rc1' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
|
Python
| 0
|
@@ -90,17 +90,17 @@
1.2.6.rc
-1
+2
' # pra
|
05c8dffcbfc08bbfd98d0f6a506af245719b3ac8
|
FIX dependency
|
stock_picking_ean128_report/__openerp__.py
|
stock_picking_ean128_report/__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Picking EAN128 Report',
'version': '1.0',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock Picking EAN128 Report
===========================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'stock_ean128',
],
'data': [
'wizard/stock_print_remit_view.xml',
'report/stock_report.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.000001
|
@@ -1314,16 +1314,40 @@
an128',%0A
+ 'report_aeroo',%0A
%5D,%0A
|
9608fff230665f5120e6ea98d4ae0efc91a345ef
|
Revert "Add teuthology git version query/logging"
|
teuthology/__init__.py
|
teuthology/__init__.py
|
from gevent import monkey
monkey.patch_all(
dns=False,
# Don't patch subprocess to avoid http://tracker.ceph.com/issues/14990
subprocess=False,
)
import sys
# Don't write pyc files
sys.dont_write_bytecode = True
from .orchestra import monkey
monkey.patch_all()
import logging
import os
import subprocess
__version__ = '1.0.0'
# do our best, but if it fails, continue with above
try:
__version__ += '-' + subprocess.check_output(
'git rev-parse --short HEAD'.split(),
cwd=os.path.dirname(os.path.realpath(__file__))
).strip()
except Exception as e:
# before logging; should be unusual
print >>sys.stderr, 'Can\'t get version from git rev-parse', e
# If we are running inside a virtualenv, ensure we have its 'bin' directory in
# our PATH. This doesn't happen automatically if scripts are called without
# first activating the virtualenv.
exec_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
if os.path.split(exec_dir)[-1] == 'bin' and exec_dir not in os.environ['PATH']:
os.environ['PATH'] = ':'.join((exec_dir, os.environ['PATH']))
# We don't need to see log entries for each connection opened
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARN)
# if requests doesn't bundle it, shut it up anyway
logging.getLogger('urllib3.connectionpool').setLevel(
logging.WARN)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s')
log = logging.getLogger(__name__)
log.info('teuthology version: %s', __version__)
def setup_log_file(log_path):
root_logger = logging.getLogger()
handlers = root_logger.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler) and \
handler.stream.name == log_path:
log.debug("Already logging to %s; not adding new handler",
log_path)
return
formatter = logging.Formatter(
fmt=u'%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
datefmt='%Y-%m-%dT%H:%M:%S')
handler = logging.FileHandler(filename=log_path)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
root_logger.log.info('teuthology version: %s', __version__)
|
Python
| 0
|
@@ -298,403 +298,32 @@
os%0A
-import subprocess%0A%0A__version__ = '1.0.0'%0A%0A# do our best, but if it fails, continue with above%0A%0Atry:%0A __version__ += '-' + subprocess.check_output(%0A 'git rev-parse --short HEAD'.split(),%0A cwd=os.path.dirname(os.path.realpath(__file__))%0A ).strip()%0Aexcept Exception as e:%0A # before logging; should be unusual%0A print %3E%3Esys.stderr, 'Can%5C't get version from git rev-parse', e
+%0A%0A__version__ = '0.1.0'%0A
%0A%0A#
@@ -1147,57 +1147,8 @@
_)%0A%0A
-log.info('teuthology version: %25s', __version__)%0A%0A
%0Adef
@@ -1782,68 +1782,4 @@
er)%0A
- root_logger.log.info('teuthology version: %25s', __version__)%0A
|
0e4a61b97ee7fd2cdfea73ce13bfc2ebfc25c08b
|
Fix setting loglevel on python 2.6
|
flexget/logger.py
|
flexget/logger.py
|
from __future__ import absolute_import, division, unicode_literals
import logging
import logging.handlers
import string
import sys
import threading
import warnings
# A level more detailed than DEBUG
TRACE = 5
# A level more detailed than INFO
VERBOSE = 15
class FlexGetLogger(logging.Logger):
"""Custom logger that adds task and execution info to log records."""
local = threading.local()
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
extra = {'task': getattr(FlexGetLogger.local, 'task', '')}
return logging.Logger.makeRecord(self, name, level, fn, lno, msg, args, exc_info, func, extra)
def trace(self, msg, *args, **kwargs):
"""Log at TRACE level (more detailed than DEBUG)."""
self.log(TRACE, msg, *args, **kwargs)
def verbose(self, msg, *args, **kwargs):
"""Log at VERBOSE level (displayed when FlexGet is run interactively.)"""
self.log(VERBOSE, msg, *args, **kwargs)
class FlexGetFormatter(logging.Formatter):
"""Custom formatter that can handle both regular log records and those created by FlexGetLogger"""
plain_fmt = '%(asctime)-15s %(levelname)-8s %(name)-29s %(message)s'
flexget_fmt = '%(asctime)-15s %(levelname)-8s %(name)-13s %(task)-15s %(message)s'
def __init__(self):
logging.Formatter.__init__(self, self.plain_fmt, '%Y-%m-%d %H:%M')
def format(self, record):
if hasattr(record, 'task'):
self._fmt = self.flexget_fmt
else:
self._fmt = self.plain_fmt
record.message = record.getMessage()
if string.find(self._fmt, "%(asctime)") >= 0:
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
# Replace newlines in log messages with \n
s = s.replace('\n', '\\n')
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s += "\n"
s += record.exc_text
return s
def set_execution(execution):
FlexGetLogger.local.execution = execution
def set_task(task):
FlexGetLogger.local.task = task
_logging_configured = False
_buff_handler = None
_logging_started = False
def initialize(unit_test=False):
"""Prepare logging.
"""
global _logging_configured, _logging_started, _buff_handler
if _logging_configured:
return
warnings.simplefilter('once')
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(VERBOSE, 'VERBOSE')
_logging_configured = True
# with unit test we want a bit simpler setup
if unit_test:
logging.basicConfig()
_logging_started = True
return
# Store any log messages in a buffer until we `start` function is run
logger = logging.getLogger()
_buff_handler = logging.handlers.BufferingHandler(1000 * 1000)
logger.addHandler(_buff_handler)
logger.setLevel(logging.NOTSET)
def start(filename=None, level=logging.INFO, to_console=True, to_file=True):
"""After initialization, start file logging.
"""
global _logging_started
assert _logging_configured
if _logging_started:
return
# root logger
logger = logging.getLogger()
logger.setLevel(level)
formatter = FlexGetFormatter()
if to_file:
file_handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1000 * 1024, backupCount=9)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# without --cron we log to console
if to_console:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# flush what we have stored from the plugin initialization
logger.removeHandler(_buff_handler)
if _buff_handler:
for record in _buff_handler.buffer:
if logger.isEnabledFor(record.levelno):
logger.handle(record)
_buff_handler.flush()
_logging_started = True
# Set our custom logger class as default
logging.setLoggerClass(FlexGetLogger)
|
Python
| 0.000007
|
@@ -3446,24 +3446,212 @@
getLogger()%0A
+ if not isinstance(level, int):%0A # Python logging api is horrible. This is getting the level number, which is required on python 2.6.%0A level = logging.getLevelName(level)%0A
logger.s
|
26123b15e28975c331b4c29e86bf69f2bee3a2c2
|
Add option to get_or_create_inspection to filter inspections with content.
|
thezombies/tasks/urls.py
|
thezombies/tasks/urls.py
|
from __future__ import absolute_import
from django.db import transaction
from django.conf import settings
from celery import shared_task
import requests
from requests.exceptions import (MissingSchema)
from cachecontrol import CacheControl
from .utils import ResultDict, logger
from thezombies.models import URLInspection
REQUEST_TIMEOUT = getattr(settings, 'REQUEST_TIMEOUT', 60)
session = CacheControl(requests.Session(), cache_etags=False)
@shared_task
def check_and_correct_url(url, method='GET'):
"""Check a url for issues, record exceptions, and attempt to correct the url.
:param url: URL to check and correct
:param method: http method to use, as a string. Default is 'GET'
"""
returnval = ResultDict({'initial_url': url})
req = requests.Request(method.upper(), url)
try:
preq = req.prepare()
except MissingSchema as e:
returnval.add_error(e)
new_url = 'http://{}'.format(req.url)
req.url = new_url
try:
preq = req.prepare()
returnval['corrected_url'] = preq.url
except Exception as e:
returnval.add_error(e)
except Exception as e:
returnval.add_error(e)
return returnval
@shared_task
def request_url(url, method='GET'):
"""Task to request a url, a GET request by default. Tracks and returns errors.
Will not raise an Exception, but may return None for response
:param url: URL to request
:param method: http method to use, as a string. Default is 'GET'
"""
resp = None
checker_result = check_and_correct_url(url)
valid_url = checker_result.get('corrected_url', url)
returnval = ResultDict(checker_result)
try:
resp = session.request(method.upper(), valid_url, allow_redirects=True, timeout=REQUEST_TIMEOUT)
except requests.exceptions.Timeout as e:
returnval.add_error(e)
returnval['timeout'] = True
except Exception as e:
returnval.add_error(e)
# a non-None requests.Response will evaluate to False if it carries an HTTPError value
if resp is not None:
try:
resp.raise_for_status()
except Exception as e:
returnval.add_error(e)
returnval['response'] = resp
return returnval
@shared_task
def get_or_create_inspection(url):
"""Task to get the lastest URLInspection or create a new one if none exists.
:param url: The url to retrieve.
"""
latest_dates = URLInspection.objects.datetimes('created_at', 'minute')
recent_inspections = None
if latest_dates:
latest_date = latest_dates.latest()
recent_inspections = URLInspection.objects.filter(requested_url=url, created_at__day=latest_date.day, parent_id__isnull=True)
inspection = None
if recent_inspections and recent_inspections.count() > 0:
inspection = recent_inspections.latest()
else:
logger.info('No stored inspection, fetch url')
fetch_val = request_url(url)
response = fetch_val.get('response', None)
with transaction.atomic():
if response is not None:
inspection = URLInspection.objects.create_from_response(response)
inspection.save()
else:
timeout = fetch_val.get('timeout', False)
inspection = URLInspection.objects.create(requested_url=url, timeout=timeout)
inspection.save()
return ResultDict({'inspection_id': getattr(inspection, 'id', None), 'url': url})
|
Python
| 0
|
@@ -2300,16 +2300,36 @@
tion(url
+, with_content=False
):%0A %22
@@ -2700,64 +2700,274 @@
url,
- created_at__day=latest_date.day, parent_id__isnull=True
+%0A created_at__day=latest_date.day,%0A parent_id__isnull=True,%0A content__isnull=(not with_content)
)%0A%0A
|
574ad672167d06841fb37501050848d49c31d7ae
|
fix absolute import
|
vispy/scene/visuals/line/line.py
|
vispy/scene/visuals/line/line.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Line visual implementing Agg- and GL-based drawing modes.
TODO:
* Agg support is very minimal; needs attention.
* Optimization--avoid creating new buffers, avoid triggering program
recompile.
"""
from __future__ import division
import numpy as np
from vispy import gloo
from ....color import Color
from ...shaders import ModularProgram, Function
from ..visual import Visual
from .line_agg import LineAgg
try:
import OpenGL.GL
HAVE_PYOPENGL = True
except ImportError:
HAVE_PYOPENGL = False
vec2to4 = Function("""
vec4 vec2to4(vec2 input) {
return vec4(input, 0, 1);
}
""")
vec3to4 = Function("""
vec4 vec3to4(vec3 input) {
return vec4(input, 1);
}
""")
class Line(Visual):
VERTEX_SHADER = """
varying vec4 v_color;
void main(void)
{
gl_Position = $transform($position);
v_color = $color;
}
"""
FRAGMENT_SHADER = """
varying vec4 v_color;
void main()
{
gl_FragColor = v_color;
}
"""
def __init__(self, pos=None, color=(0.5, 0.5, 0.5, 1), width=1,
connect='strip', mode='agg', antialias=True, **kwds):
Visual.__init__(self, **kwds)
# todo: move this to set_data and allow mode switch after init
if mode not in ('agg', 'gl'):
raise ValueError('mode argument must be "agg" or "gl".')
self._mode = mode
self._vbo = None
self._color = None
self._pos_expr = None
self._connect = None
self._width = None
self._antialias = antialias
# Reference to a LineAgg visual that will do our drawing if mode=='agg'
# todo: this is a bit of a hack to get agg and gl_lines available via
# the same class. It can probably be cleaned up..
self._agg_line = None
self._program = ModularProgram(self.VERTEX_SHADER,
self.FRAGMENT_SHADER)
self.set_data(pos, color, width, connect)
@property
def antialias(self):
return self._antialias
@antialias.setter
def antialias(self, aa):
self._antialias = aa
self.update()
def set_data(self, pos=None, color=None, width=None, connect=None, mode=None):
""" Set the data used to draw this visual.
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' mode.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
mode : str
* "agg" uses anti-grain geometry to draw nicely antialiased lines
with proper joins and endcaps.
* "gl" uses OpenGL's built-in line rendering. This is much faster,
but produces much lower-quality results and is not guaranteed to
obey the requested line width or join/endcap styles.
"""
if mode is not None:
raise NotImplementedError("Line mode can only be set during "
"initialization (for now).")
if self._mode == 'agg':
# use a separate method for updating agg lines
self._agg_set_data(pos, color, width, connect)
return
# for non-agg lines:
if width is not None:
self._width = width
if pos is not None:
vbo = gloo.VertexBuffer(np.asarray(pos, dtype=np.float32))
if pos.shape[-1] == 2:
self._pos_expr = vec2to4(vbo)
elif pos.shape[-1] == 3:
self._pos_expr = vec3to4(vbo)
else:
raise TypeError("pos array should have 2 or 3 elements in last"
" axis.")
self._vbo = vbo
if color is not None:
if isinstance(color, np.ndarray) and color.ndim > 1:
self._color = gloo.VertexBuffer(color.astype(np.float32))
else:
self._color = Color(color).rgba
if connect is not None:
if isinstance(connect, np.ndarray):
self._connect = gloo.IndexBuffer(connect.astype(np.uint32))
else:
self._connect = connect
self.update()
def _agg_set_data(self, pos=None, color=None, width=None, connect=None):
if connect is not None:
if connect != 'strip':
raise NotImplementedError("Only 'strip' connection mode "
"allowed for agg-mode lines.")
self._connect = connect
if color is not None:
if isinstance(color, np.ndarray) and color.ndim > 1:
raise NotImplementedError("Color arrays not implemented for "
"agg mode lines.")
self._color = Color(color).rgba
if width is not None:
self._width = width
style = {
'color': self._color,
'width': self._width,
'antialias': self._antialias,
}
if pos is not None:
self._agg_line = LineAgg(paths=[pos], style=[style])
else:
self._agg_line = None
self.update()
def draw(self, event):
if self._mode == 'agg':
self._agg_draw(event)
return
if self._pos_expr is None:
return
xform = event.render_transform.shader_map()
self._program.vert['transform'] = xform
self._program.vert['position'] = self._pos_expr
self._program.vert['color'] = self._color
gloo.set_state('translucent')
if HAVE_PYOPENGL:
OpenGL.GL.glLineWidth(self._width)
if self._antialias:
OpenGL.GL.glEnable(OpenGL.GL.GL_LINE_SMOOTH)
else:
OpenGL.GL.glDisable(OpenGL.GL.GL_LINE_SMOOTH)
if self._connect == 'strip':
self._program.draw('line_strip')
elif self._connect == 'segments':
self._program.draw('lines')
elif isinstance(self._connect, gloo.IndexBuffer):
self._program.draw('lines', self._connect)
else:
raise ValueError("Invalid line connect mode: %r" % self._connect)
def _agg_draw(self, event):
if self._agg_line is None:
return
self._agg_line.transform = self.transform
self._agg_line.draw(event)
|
Python
| 0.000015
|
@@ -411,13 +411,12 @@
rom
-vispy
+....
imp
|
0e4c8fb4965eadf8cf45ff0f6d3406df17015f46
|
remove print
|
timeside/server/tasks.py
|
timeside/server/tasks.py
|
from __future__ import absolute_import
import time
import gc
from celery import shared_task
from celery.result import AsyncResult
from celery.result import GroupResult
from .models import Item, Selection, Preset, Experience, Task
from .models import _DONE
from celery.task import chord
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def task_run(task_id):
print(task_id)
task = Task.objects.get(uuid=task_id)
results = []
if task.selection:
for item in task.selection.get_all_items():
results.append(experience_run.delay(str(task.experience.uuid), str(item.uuid)))
results_id = [res.id for res in results]
elif task.item:
results.append(experience_run.delay(str(task.experience.uuid), str(task.item.uuid)))
results_id = [res.id for res in results]
task_monitor.delay(task_id, results_id)
@shared_task
def experience_run(exp_id, item_id):
item = Item.objects.get(uuid=item_id)
experience = Experience.objects.get(uuid=exp_id)
item.run(experience)
gc.collect()
@shared_task
def task_monitor(task_id, results_id):
results = [AsyncResult(id) for id in results_id]
while not all([res.ready() for res in results]):
time.sleep(1)
task = Task.objects.get(uuid=task_id)
task.status_setter(_DONE)
|
Python
| 0.000793
|
@@ -405,27 +405,8 @@
d):%0A
- print(task_id)%0A
|
02afb9ef300e4d171b89f7160845cebbe512a6ed
|
removes __all__ as binary
|
feets/extractors/__init__.py
|
feets/extractors/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# FUTURE
# =============================================================================
from __future__ import unicode_literals, print_function
# =============================================================================
# DOCS
# =============================================================================
__doc__ = """Features extractors classes and register utilities"""
__all__ = [
b"DATAS",
b"register_extractor",
b"registered_extractors",
b"is_registered",
b"available_features",
b"extractor_of",
b"sort_by_dependencies",
b"ExtractorBadDefinedError",
b"Extractor"]
# =============================================================================
# IMPORTS
# =============================================================================
import inspect
import six
from .core import Extractor, ExtractorBadDefinedError, DATAS # noqa
# =============================================================================
# REGISTER UTILITY
# =============================================================================
_extractors = {}
def register_extractor(cls):
if not inspect.isclass(cls) or not issubclass(cls, Extractor):
msg = "'cls' must be a subclass of Extractor. Found: {}"
raise TypeError(msg.format(cls))
for d in cls.get_dependencies():
if d not in _extractors.keys():
msg = "Dependency '{}' from extractor {}".format(d, cls)
raise ExtractorBadDefinedError(msg)
_extractors.update((f, cls) for f in cls.get_features())
return cls
def registered_extractors():
return dict(_extractors)
def is_registered(obj):
if isinstance(obj, six.string_types):
features = [obj]
elif not inspect.isclass(obj) or not issubclass(obj, Extractor):
msg = "'cls' must be a subclass of Extractor. Found: {}"
raise TypeError(msg.format(obj))
else:
features = obj.get_features()
return {f: (f in _extractors) for f in features}
def available_features():
return _extractors.keys()
def extractor_of(feature):
return _extractors[feature]
def sort_by_dependencies(exts, retry=None):
"""Calculate the Feature Extractor Resolution Order.
"""
sorted_ext, features_from_sorted = [], set()
pending = [(e, 0) for e in exts]
retry = len(_extractors) * 100 if retry is None else retry
while pending:
ext, cnt = pending.pop(0)
if not isinstance(ext, Extractor) and not issubclass(ext, Extractor):
msg = "Only Extractor instances are allowed. Found {}."
raise TypeError(msg.format(type(ext)))
deps = ext.get_dependencies()
if deps.difference(features_from_sorted):
if cnt + 1 > retry:
msg = "Maximun retry ({}) to sort achieved from extractor {}."
raise RuntimeError(msg.format(retry, type(ext)))
pending.append((ext, cnt + 1))
else:
sorted_ext.append(ext)
features_from_sorted.update(ext.get_features())
return tuple(sorted_ext)
# =============================================================================
# REGISTERS
# =============================================================================
from .ext_amp import * # noqa
from .ext_amplitude import * # noqa
from .ext_anderson_darling import * # noqa
from .ext_autocor_length import * # noqa
from .ext_beyond1_std import * # noqa
from .ext_car import * # noqa
from .ext_color import * # noqa
from .ext_con import * # noqa
from .ext_eta_color import * # noqa
from .ext_eta_e import * # noqa
from .ext_flux_percentile_ratio import * # noqa
from .ext_fourier_components import * # noqa
from .ext_gskew import * # noqa
from .ext_linear_trend import * # noqa
from .ext_lomb_scargle import * # noqa
from .ext_max_slope import * # noqa
from .ext_mean import * # noqa
from .ext_mean_variance import * # noqa
from .ext_median_abs_dev import * # noqa
from .ext_median_brp import * # noqa
from .ext_pair_slope_trend import * # noqa
from .ext_percent_amplitude import * # noqa
from .ext_percent_difference_flux_percentile import * # noqa
from .ext_q31 import * # noqa
from .ext_q31 import * # noqa
from .ext_rcs import * # noqa
#~ from .ext_signature import * # noqa
from .ext_skew import * # noqa
from .ext_slotted_a_length import * # noqa
from .ext_small_kurtosis import * # noqa
from .ext_std import * # noqa
from .ext_stetson import * # noqa
from .ext_structure_functions import * # noqa
for cls in sort_by_dependencies(Extractor.__subclasses__()):
register_extractor(cls)
del cls
|
Python
| 0.999999
|
@@ -1349,26 +1349,8 @@
port
- unicode_literals,
pri
@@ -1615,17 +1615,16 @@
= %5B%0A
-b
%22DATAS%22,
@@ -1624,25 +1624,24 @@
DATAS%22,%0A
-b
%22register_ex
@@ -1654,17 +1654,16 @@
r%22,%0A
-b
%22registe
@@ -1683,17 +1683,16 @@
s%22,%0A
-b
%22is_regi
@@ -1704,17 +1704,16 @@
d%22,%0A
-b
%22availab
@@ -1730,17 +1730,16 @@
s%22,%0A
-b
%22extract
@@ -1750,17 +1750,16 @@
f%22,%0A
-b
%22sort_by
@@ -1774,25 +1774,24 @@
ncies%22,%0A
-b
%22ExtractorBa
@@ -1810,17 +1810,16 @@
r%22,%0A
-b
%22Extract
|
0138eacf0d518b86e819a70000b7b527434a6b35
|
Change les arguments passés à celery pour gérer la sérialisation JSON.
|
libretto/signals.py
|
libretto/signals.py
|
# coding: utf-8
from __future__ import unicode_literals
from celery_haystack.signals import CelerySignalProcessor
from django.contrib.admin.models import LogEntry
from reversion.models import Version, Revision
from .tasks import auto_invalidate
class CeleryAutoInvalidator(CelerySignalProcessor):
def enqueue(self, action, instance, sender, **kwargs):
if sender in (LogEntry, Revision, Version):
return
auto_invalidate.delay(action, instance)
|
Python
| 0
|
@@ -157,16 +157,67 @@
ogEntry%0A
+from django.contrib.sessions.models import Session%0A
from rev
@@ -434,16 +434,25 @@
ogEntry,
+ Session,
Revisio
@@ -530,10 +530,33 @@
instance
+.__class__, instance.pk
)%0A
|
a3816e9dbd70c41b12307e776a87d3c5056030dd
|
fix settings
|
explorer/utils.py
|
explorer/utils.py
|
import functools
import csv
import json
import re
import string
from time import time
from explorer import app_settings
from django.db import connections, connection, models, transaction, DatabaseError
from django.http import HttpResponse
from six.moves import cStringIO
import sqlparse
EXPLORER_PARAM_TOKEN = "$$"
# SQL Specific Things
def passes_blacklist(sql):
clean = functools.reduce(lambda sql, term: sql.upper().replace(term, ""), app_settings.EXPLORER_SQL_WHITELIST, sql)
return not any(write_word in clean.upper() for write_word in app_settings.EXPLORER_SQL_BLACKLIST)
def get_connection():
return connections[app_settings.EXPLORER_CONNECTION_NAME] if app_settings.EXPLORER_CONNECTION_NAME else connection
def schema_info():
"""
Construct schema information via introspection of the django models in the database.
:return: Schema information of the following form, sorted by db_table_name.
[
("package.name -> ModelClass", "db_table_name",
[
("db_column_name", "DjangoFieldType"),
(...),
]
)
]
"""
ret = []
apps = [a for a in models.get_apps() if a.__package__ not in app_settings.EXPLORER_SCHEMA_EXCLUDE_APPS]
for app in apps:
for model in models.get_models(app):
friendly_model = "%s -> %s" % (app.__package__, model._meta.object_name)
ret.append((
friendly_model,
model._meta.db_table,
[_format_field(f) for f in model._meta.fields]
))
# Do the same thing for many_to_many fields. These don't show up in the field list of the model
# because they are stored as separate "through" relations and have their own tables
ret += [(
friendly_model,
m2m.rel.through._meta.db_table,
[_format_field(f) for f in m2m.rel.through._meta.fields]
) for m2m in model._meta.many_to_many]
return sorted(ret, key=lambda t: t[1])
def _format_field(field):
return (field.get_attname_column()[1], field.get_internal_type())
def param(name):
return "%s%s%s" % (EXPLORER_PARAM_TOKEN, name, EXPLORER_PARAM_TOKEN)
def swap_params(sql, params):
p = params.items() if params else {}
for k, v in p:
sql = sql.replace(param(k), str(v))
return sql
def extract_params(text):
regex = re.compile("\$\$([a-zA-Z0-9_|-]+)\$\$")
params = re.findall(regex, text)
return dict(zip(params, ['' for i in range(len(params))]))
def write_csv(headers, data):
csv_data = cStringIO()
writer = csv.writer(csv_data, delimeter=settings.CSV_DELIMETER)
writer.writerow(headers)
for row in data:
writer.writerow(row)
return csv_data.getvalue()
def get_filename_for_title(title):
# build list of valid chars, build filename from title and replace spaces
valid_chars = '-_.() %s%s' % (string.ascii_letters, string.digits)
filename = ''.join(c for c in title if c in valid_chars)
filename = filename.replace(' ', '_')
return filename
def build_stream_response(query):
data = csv_report(query)
response = HttpResponse(data, content_type='text')
return response
def build_download_response(query):
data = csv_report(query)
response = HttpResponse(data, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % (
get_filename_for_title(query.title)
)
response['Content-Length'] = len(data)
return response
def csv_report(query):
try:
res = query.execute()
return write_csv(res.headers, res.data)
except DatabaseError as e:
return str(e)
# Helpers
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.views import login
from django.contrib.auth import REDIRECT_FIELD_NAME
def safe_admin_login_prompt(request):
defaults = {
'template_name': 'admin/login.html',
'authentication_form': AdminAuthenticationForm,
'extra_context': {
'title': 'Log in',
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
},
}
return login(request, **defaults)
def shared_dict_update(target, source):
for k_d1 in target:
if k_d1 in source:
target[k_d1] = source[k_d1]
return target
def safe_cast(val, to_type, default=None):
try:
return to_type(val)
except ValueError:
return default
def safe_json(val):
try:
return json.loads(val)
except ValueError:
return None
def get_int_from_request(request, name, default):
val = request.GET.get(name, default)
return safe_cast(val, int, default) if val else None
def get_json_from_request(request, name):
val = request.GET.get(name, None)
return safe_json(val) if val else None
def url_get_rows(request):
return get_int_from_request(request, 'rows', app_settings.EXPLORER_DEFAULT_ROWS)
def url_get_query_id(request):
return get_int_from_request(request, 'query_id', None)
def url_get_log_id(request):
return get_int_from_request(request, 'querylog_id', None)
def url_get_params(request):
return get_json_from_request(request, 'params')
def user_can_see_query(request, kwargs):
if not request.user.is_anonymous() and 'query_id' in kwargs:
allowed_queries = app_settings.EXPLORER_GET_USER_QUERY_VIEWS().get(request.user.id, [])
return int(kwargs['query_id']) in allowed_queries
return False
def fmt_sql(sql):
return sqlparse.format(sql, reindent=True, keyword_case='upper')
|
Python
| 0.000001
|
@@ -2765,16 +2765,20 @@
limeter=
+app_
settings
|
e8b5ca4f27f7b74cec5349b8a03a28239afefba9
|
format all item fields, not just those used by 'by' option
|
flinck/brain.py
|
flinck/brain.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""IMDB movie searcher.
"""
from __future__ import print_function
import logging
import os
import re
import sys
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
import omdb
import requests
from .config import config, FIELDS
# Regex to extract title and year. Should work as long as they are at the
# beginning and in that order.
FNAME_SPLIT_RE = '|'.join([r'\W%s(?:\W|$)' % x
for x in ('dvdrip', r'vost\w+', '1080p',
'720p', 'multi',
r'[\(\[]\D+[\)\]]', # parentheses
'bluray', 'x264', 'ac3', # format
r'b[dr]rip', 'xvid', 'divx', 'fansub',
r'S\d+(E\d+)?', # seasons
'avi', 'mkv')])
CACHED_RESULTS = {}
logger = logging.getLogger(__name__)
def scrub(text, chars, new):
"""Replace chars.
"""
for char in chars:
if char in text:
text = text.replace(char, new)
return text.strip()
def google_search_by(title, year):
engine_id = '009217259823014548361:0gf2jfpzpbm'
url = (u'https://www.googleapis.com/customsearch/v1?key='
'%s&cx=%s&q=%s+%s' % (config['google_api_key'], engine_id,
quote(title), year))
r = requests.get(url)
if r.status_code == 200:
json = r.json()
if 'items' in json:
return r.json()['items'][0]['link'].strip('/').split('/')[-1]
def format_field(item, field):
"""Tweak the string representation of the item field
"""
if item.get(field, None) == 'N/A':
item[field] = 'Unknown'
else:
try:
if field in ('country', 'genre'):
item[field] = item[field].split(',')[0]
elif field == 'director':
item[field] = item[field].replace(', ', ' and ')
elif field == 'runtime':
item[field] = re.findall(r'\d+', item['runtime']
)[0].zfill(3) + ' min'
elif field == 'decade':
item['decade'] = item['year'].strip(u'–')[:-1] + '0s'
elif field == 'rating':
item['rating'] = item.pop('imdb_rating')
except Exception:
item[field] = 'Unknown'
def format_item(item, fields):
"""Strip item from needless keys, format others values adequately
"""
for key in list(item):
if key not in FIELDS:
item.pop(key, None)
for field in fields:
format_field(item, field)
return item
def to_unicode(text):
try:
return unicode(text, sys.getfilesystemencoding(), errors="ignore")
except NameError:
pass # Python3, no conversion needed
return text
def search_by(title, year, fields, imdb_id=None):
"""Search movie infos using its title and year
"""
if (title, year) in CACHED_RESULTS:
item = CACHED_RESULTS[(title, year)]
logger.debug('Get from cache: %s' % item)
else:
query = {'fullplot': False, 'tomatoes': False}
if imdb_id:
query['imdbid'] = imdb_id
else:
query['title'] = title
query['year'] = year
logger.debug('Query: %s' % query)
item = omdb.get(**query)
if item:
item['title'] = to_unicode(title) # force original title
item = format_item(item, fields)
CACHED_RESULTS[(title, year)] = item
elif not imdb_id and config['google_api_key']:
imdb_id = google_search_by(title, year)
if imdb_id:
item = search_by(title, year, fields, imdb_id)
return item
def search_filename(fname, fields):
"""Extract movie title/date from filename and return dict with movies infos
"""
path_tokens = os.path.normpath(fname).split(os.sep)
candidate = path_tokens[-1]
res = re.split(FNAME_SPLIT_RE, candidate,
flags=re.I | re.U)[0].strip()
res = scrub(res, '[({])}', ' ')
res = ' '.join([x for x in re.split(r'[\s\._]', res, flags=re.U) if x])
years = re.findall(r'((?:19|20)\d\d)', res)
if years:
toks = re.split(r'(%s)' % years[-1], res)
else:
toks = [res]
title = toks[0].strip()
year = toks[1] if len(toks) > 1 else None
item = search_by(title, year, fields)
if item:
item['filename'] = fname
return item
|
Python
| 0.000001
|
@@ -1598,33 +1598,25 @@
format_
-field(item, field
+item(item
):%0A %22
@@ -1671,24 +1671,77 @@
eld%0A %22%22%22%0A
+ for field in set(list(item) + list(FIELDS)):%0A
if item.
@@ -1767,32 +1767,36 @@
'N/A':%0A
+
item%5Bfield%5D = 'U
@@ -1803,30 +1803,16 @@
nknown'%0A
- else:%0A
try:
@@ -1820,22 +1820,19 @@
- if
+for
field i
@@ -1859,36 +1859,32 @@
'):%0A
-
item%5Bfield%5D = it
@@ -1919,166 +1919,89 @@
-
-elif field == 'director':%0A item%5Bfield%5D = item%5Bfield%5D.replace(', ', ' and ')%0A elif field == 'runtime':%0A item%5Bfield
+item%5B'director'%5D = item%5B'director'%5D.replace(', ', ' and ')%0A item%5B'runtime'
%5D =
@@ -2072,20 +2072,16 @@
-
)%5B0%5D.zfi
@@ -2099,52 +2099,8 @@
in'%0A
- elif field == 'decade':%0A
@@ -2161,52 +2161,8 @@
0s'%0A
- elif field == 'rating':%0A
@@ -2210,20 +2210,16 @@
g')%0A
-
-
except E
@@ -2224,28 +2224,24 @@
Exception:%0A
-
item
@@ -2264,267 +2264,8 @@
wn'%0A
-%0A%0Adef format_item(item, fields):%0A %22%22%22Strip item from needless keys, format others values adequately%0A %22%22%22%0A for key in list(item):%0A if key not in FIELDS:%0A item.pop(key, None)%0A for field in fields:%0A format_field(item, field)%0A
@@ -3116,24 +3116,16 @@
tem(item
-, fields
)%0A
|
e7d44dd3c62c26af4bcf926b4c15f18ee01c3fe0
|
Update to the docs
|
tool/biobambam_filter.py
|
tool/biobambam_filter.py
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import shlex
import subprocess
import sys
try:
if hasattr(sys, '_run_from_cmdl') is True:
raise ImportError
from pycompss.api.parameter import FILE_IN, FILE_OUT
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
except ImportError:
print("[Warning] Cannot import \"pycompss\" API packages.")
print(" Using mock decorators.")
from dummy_pycompss import FILE_IN, FILE_OUT
from dummy_pycompss import task
from dummy_pycompss import compss_wait_on
from basic_modules.metadata import Metadata
from basic_modules.tool import Tool
from utils import logger
# ------------------------------------------------------------------------------
class biobambam(Tool):
"""
Tool to sort and filter bam files
"""
def __init__(self, configuration=None):
"""
Init function
"""
print("BioBamBam2 Filter")
Tool.__init__(self)
@task(returns=bool, bam_file_in=FILE_IN, bam_file_out=FILE_OUT,
isModifier=False)
def biobambam_filter_alignments(self, bam_file_in, bam_file_out):
"""
Sorts and filters the bam file.
It is important that all duplicate alignments have been removed. This
can be run as an intermediate step, but should always be run as a check
to ensure that the files are sorted and duplicates have been removed.
Parameters
----------
bam_file_in : str
Location of the input bam file
bam_file_out : str
Location of the output bam file
tmp_dir : str
Tmp location for intermediate files during the sorting
Returns
-------
bam_file_out : str
Location of the output bam file
"""
td_list = bam_file_in.split("/")
print("BIOBAMBAM: bam_file_in:", bam_file_in)
tmp_dir = "/".join(td_list[0:-1])
command_line = 'bamsormadup --tmpfile=' + tmp_dir
args = shlex.split(command_line)
bam_tmp_out = tmp_dir + '/' + td_list[-1] + '.filtered.tmp.bam'
try:
with open(bam_file_in, "r") as f_in:
with open(bam_tmp_out, "w") as f_out:
process = subprocess.Popen(args, stdin=f_in, stdout=f_out)
process.wait()
except IOError:
return False
try:
with open(bam_file_out, "wb") as f_out:
with open(bam_tmp_out, "rb") as f_in:
f_out.write(f_in.read())
except IOError:
return False
return True
def run(self, input_files, input_metadata, output_files):
"""
The main function to run BioBAMBAMfilter to remove duplicates and
spurious reads from the FASTQ files before analysis.
Parameters
----------
input_files : dict
List of input bam file locations where 0 is the bam data file
metadata : dict
output_files : dict
Returns
-------
output_files : list
Filtered bam fie.
output_metadata : list
List of matching metadata dict objects
"""
logger.info("BIOBAMBAM FILTER: Ready to run")
results = self.biobambam_filter_alignments(input_files['input'], output_files['output'])
results = compss_wait_on(results)
if results is False:
logger.fatal("BIOBAMBAM: run failed")
return {}, {}
logger.info("BIOBAMBAM FILTER: completed")
output_metadata = {
"bam": Metadata(
data_type="data_chip_seq",
file_type="BAM",
file_path=output_files["output"],
sources=[input_metadata["input"].file_path],
taxon_id=input_metadata["input"].taxon_id,
meta_data={
"assembly": input_metadata["input"].meta_data["assembly"],
"tool": "biobambam_filter"
}
)
}
return (
{"bam": output_files['output']},
output_metadata
)
# ------------------------------------------------------------------------------
|
Python
| 0
|
@@ -3785,19 +3785,19 @@
files :
-lis
+dic
t%0A
@@ -3846,19 +3846,19 @@
adata :
-lis
+dic
t%0A
|
eb102bb8550d59b34373f1806633a6079f7064a8
|
Make sure that all requests for static files are correctly hidden from output
|
devserver/utils/http.py
|
devserver/utils/http.py
|
from django.conf import settings
from django.core.servers.basehttp import WSGIRequestHandler
from django.db import connection
from devserver.utils.time import ms_from_timedelta
from datetime import datetime
class SlimWSGIRequestHandler(WSGIRequestHandler):
"""
Hides all requests that originate from ```MEDIA_URL`` as well as any
request originating with a prefix included in ``DEVSERVER_IGNORED_PREFIXES``.
"""
def handle(self, *args, **kwargs):
self._start_request = datetime.now()
return WSGIRequestHandler.handle(self, *args, **kwargs)
def log_message(self, format, *args):
duration = datetime.now() - self._start_request
env = self.get_environ()
if settings.MEDIA_URL.startswith('http:'):
if ('http://%s%s' % (env['HTTP_HOST'], self.path)).startswith(settings.MEDIA_URL):
return
# if self.path.startswith(settings.MEDIA_URL):
# return
for path in getattr(settings, 'DEVSERVER_IGNORED_PREFIXES', []):
if self.path.startswith(path):
return
format += " (time: %.2fs; sql: %dms (%dq))"
args = list(args) + [
ms_from_timedelta(duration) / 1000,
sum(float(c.get('time', 0)) for c in connection.queries) * 1000,
len(connection.queries),
]
return WSGIRequestHandler.log_message(self, format, *args)
|
Python
| 0.000001
|
@@ -304,17 +304,41 @@
te from
-%60
+either %60%60STATIC_URL%60%60 or
%60%60MEDIA_
@@ -343,16 +343,21 @@
A_URL%60%60
+%0A
as well
@@ -362,20 +362,16 @@
l as any
-%0A
request
@@ -409,16 +409,21 @@
uded in
+%0A
%60%60DEVSER
@@ -764,101 +764,76 @@
-if settings.MEDIA_URL.startswith('http:'):%0A if ('http://%25s%25s' %25 (env%5B'HTTP_HOST'%5D,
+for url in (settings.STATIC_URL, settings.MEDIA_URL):%0A if
sel
@@ -838,18 +838,16 @@
elf.path
-))
.startsw
@@ -850,34 +850,19 @@
rtswith(
-settings.MEDIA_URL
+url
):%0A
@@ -891,21 +891,96 @@
-%0A # if
+ elif url.startswith('http:'):%0A if ('http://%25s%25s' %25 (env%5B'HTTP_HOST'%5D,
sel
@@ -977,32 +977,34 @@
OST'%5D, self.path
+))
.startswith(sett
@@ -1003,37 +1003,25 @@
ith(
-settings.MEDIA_URL):%0A
+url):%0A
#
@@ -1020,9 +1020,12 @@
-#
+
@@ -1024,32 +1024,33 @@
return%0A
+%0A
for path
|
866e5fee6d39da9eb1a4893f12b1fe7aafbdbefd
|
update a comment about None gradient
|
examples/train_mlp.py
|
examples/train_mlp.py
|
##############
#
# This example demonstrates the basics of using `equinox.jitf` and `equinox.gradf`.
#
# Here we'll use them to facilitate training a simple MLP: to automatically take gradients and jit with respect to
# all the jnp.arrays constituting the parameters. (But not with respect to anything else, like the choice of activation
# function -- as that isn't something we can differentiate/JIT anyway!)
#
#############
import functools as ft
import jax
import jax.numpy as jnp
import jax.random as jrandom
import optax
import equinox as eqx
# Toy data
def get_data(dataset_size, *, key):
x = jrandom.normal(key, (dataset_size, 1))
y = 5 * x - 2
return x, y
# Simple dataloader
def dataloader(arrays, key, batch_size):
dataset_size = arrays[0].shape[0]
assert all(array.shape[0] == dataset_size for array in arrays)
indices = jnp.arange(dataset_size)
while True:
perm = jrandom.permutation(key, indices)
(key,) = jrandom.split(key, 1)
start = 0
end = batch_size
while end < dataset_size:
batch_perm = perm[start:end]
yield tuple(array[batch_perm] for array in arrays)
start = end
end = start + batch_size
def main(
dataset_size=10000,
batch_size=256,
learning_rate=3e-3,
steps=1000,
width_size=8,
depth=1,
seed=5678,
):
data_key, loader_key, model_key = jrandom.split(jrandom.PRNGKey(seed), 3)
data = get_data(dataset_size, key=data_key)
data = dataloader(data, batch_size=batch_size, key=loader_key)
# We happen to be using an Equinox model here, but that *is not important*.
# `equinox.jitf` and `equinox.gradf` will work just fine on any PyTree you like.
# (Here, `model` is actually a PyTree -- have a look at the `build_model.py` example for more on that.)
model = eqx.nn.MLP(
in_size=1, out_size=1, width_size=depth, depth=depth, key=model_key
)
# `jitf` and `value_and_grad_f` are thin wrappers around the usual `jax` functions; they just flatten the
# input PyTrees and filter them according to their filter functions. In this case we're asking to only
# JIT/optimise with respect to arrays of floating point numbers, i.e. the parameters of our model. (So that
# for example we will statically JIT-compile with respect to any boolean flags.)
@ft.partial(eqx.jitf, filter_fn=eqx.is_inexact_array)
@ft.partial(eqx.value_and_grad_f, filter_fn=eqx.is_inexact_array)
def loss(model, x, y):
pred_y = jax.vmap(model)(x)
return jnp.mean((y - pred_y) ** 2)
optim = optax.sgd(learning_rate)
opt_state = optim.init(model)
for step, (x, y) in zip(range(steps), data):
value, grads = loss(model, x, y)
updates, opt_state = optim.update(grads, opt_state)
# Essentially equivalent to optax.apply_updates, it just doesn't try to update anything with a zero gradient.
# Anything we filtered out above will have a zero gradient. But in general some of the things we filtered out
# might not even be jnp.arrays, and might not even have a notion of addition. We don't want to try adding zero
# to arbitrary Python objects.
model = eqx.apply_updates(model, updates)
print(step, value)
return value # Final loss
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -2923,297 +2923,23 @@
h a
-zero gradient.%0A # Anything we filtered out above will have a zero gradient. But in general some of the things we filtered out%0A # might not even be jnp.arrays, and might not even have a notion of addition. We don't want to try adding zero%0A # to arbitrary Python objects
+%60None%60 gradient
.%0A
|
c053d6d46e6c1102b712649c8d91841d57b32ca7
|
add todo
|
buncuts/utils.py
|
buncuts/utils.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import codecs
import re
default_delimeter = "。!?▲"
default_quote_dict = {"「": "」", "『": "』"}
def split_into_sentences(text=sys.stdin, sentence_delim=default_delimeter,
quote_dict=default_quote_dict,
output=sys.stdout, append=False,
is_dir=False, limit=float('inf'),
echo=False):
# TODO: implement the echo and limit option.
# TODO: implement encoding option.
count = 0
# create a list from chars in a string
sentence_delim = list(sentence_delim)
if append:
mode = 'a'
else:
mode = 'w'
if text is not sys.stdin:
text_file = codecs.open(text, 'r', 'sjis')
else:
text_file = text
if output is not sys.stdout:
if is_dir:
path = os.path.join(output, os.path.basename(text))
output_file = codecs.open(path, mode, 'sjis')
else:
output_file = codecs.open(output, mode, 'sjis')
else:
output_file = output
for line in text_file:
# [todo] - skip empty line.
count += 1
# strip half/full width spaces
# strip() somehow don't work very well.
# use re instead.
line = re.sub(r"^[ \n]+|[ ]+$", "", line)
line_splitted, count_added = chunk_splitter(line,
sentence_delim, quote_dict)
output_file.write(line_splitted)
count += count_added
# close files
if text is not sys.stdin:
text_file.close()
if output is not sys.stdout:
output_file.close()
def chunk_splitter(chunk,
sentence_delim=default_delimeter,
quote_dict=default_quote_dict):
"""Chunk splitter.
Reads in a chunk, returns the splitted string and a count as a tuple:
(result, count)
"""
result = ""
count = 0
length = len(chunk)
outside_quote = True
quote_chars = quote_dict.keys()
current_quote = ""
current_close_quote = ""
for i in xrange(length):
char = chunk[i]
result = ''.join((result, char)) # append char to the result
# TODO: Should use a FILO to avoid multiple embeded quotations.
if outside_quote:
if char in quote_chars:
outside_quote = False
current_quote = char
current_close_quote = quote_dict[current_quote]
elif char in sentence_delim:
count += 1
# add a newline after a sentence delimeter.
if i < length - 1 and chunk[i+1] != '\n':
result = ''.join((result, '\n'))
elif i == length - 1:
result = ''.join((result, '\n'))
elif char == current_close_quote:
outside_quote = True
return result, count
|
Python
| 0
|
@@ -648,16 +648,57 @@
option.
+%0A # TODO: implement chunk_size option.
%0A%0A co
|
cf80d48226d9b8c80d3528f5870f85af1a335472
|
Fix a couple of minor bugs in the certbot_wrapper
|
tools/certbot_wrapper.py
|
tools/certbot_wrapper.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tools/certbot_wrapper.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import argparse
import logging
import os
import socket
import subprocess
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import king_phisher.color as color
import king_phisher.utilities as utilities
import advancedhttpserver
import smoke_zephyr.utilities
import yaml
LETS_ENCRYPT_LIVE_PATH = '/etc/letsencrypt/live'
PARSER_EPILOG = """
This tool facilitates issuing certificates with the certbot utility while the
King Phisher server is running.
"""
PARSER_EPILOG = PARSER_EPILOG.replace('\n', ' ')
PARSER_EPILOG = PARSER_EPILOG.strip()
def main():
parser = argparse.ArgumentParser(description='King Phisher Certbot Wrapper Utility', conflict_handler='resolve')
utilities.argp_add_args(parser)
parser.add_argument('--certbot', dest='certbot_bin', help='the path to the certbot binary to use')
parser.add_argument('server_config', type=argparse.FileType('r'), help='the server configuration file')
parser.add_argument('hostnames', nargs='+', help='the host names to request certificates for')
parser.epilog = PARSER_EPILOG
arguments = parser.parse_args()
server_config = yaml.load(arguments.server_config)
web_root = server_config['server']['web_root']
if os.getuid():
color.print_error('this tool must be ran as root')
return os.EX_NOPERM
certbot_bin = arguments.certbot_bin or smoke_zephyr.utilities.which('certbot')
if certbot_bin is None:
color.print_error('could not identify the path to the certbot binary, make sure that it is')
color.print_error('installed and see: https://certbot.eff.org/ for more details')
return os.EX_NOTFOUND
if not os.access(certbot_bin, os.R_OK | os.X_OK):
color.print_error('found insufficient permissions on the certbot binary')
return os.EX_NOPERM
logger = logging.getLogger('KingPhisher.Tool.CLI.CertbotWrapper')
logger.info('using certbot binary at: ' + certbot_bin)
logger.debug('getting server binding information')
if server_config['server'].get('addresses'):
address = server_config['server']['addresses'][0]
else:
address = server_config['server']['address']
address['ssl'] = bool(server_config['server'].get('ssl_cert'))
logger.debug("checking that the king phisher server is running on: {host}:{port} (ssl={ssl})".format(**address))
try:
rpc = advancedhttpserver.RPCClient((address['host'], address['port']), use_ssl=address['ssl'])
version = rpc('version')
except (advancedhttpserver.RPCError, socket.error):
logger.error('received an rpc error while checking the version', exc_info=True)
color.print_error('failed to verify that the king phisher server is running')
return os.EX_UNAVAILABLE
logger.info('connected to server version: ' + version['version'])
vhost_directories = server_config['server']['vhost_directories']
if len(arguments.hostnames) > 1 and not vhost_directories:
color.print_error('vhost_directories must be true to specify multiple hostnames')
return os.EX_CONFIG
for hostname in arguments.hostnames:
if vhost_directories:
directory = os.path.join(web_root, hostname)
else:
directory = web_root
if os.path.split(os.path.abspath(directory))[-1] != hostname:
color.print_error('when the vhost_directories option is not set, the web_root option')
color.print_error('must be: ' + os.path.join(web_root, hostname))
return os.EX_CONFIG
if not os.path.exists(directory):
os.mkdir(directory, mode=0o775)
logger.info('created directory for host at: ' + directory)
certbot_args = (certbot_bin, 'certonly', '--webroot', '-w', directory, '-d', hostname)
logger.info('running certbot command: ' + ' '.join(certbot_args))
proc_h = subprocess.Popen(certbot_args, shell=False)
status = proc_h.wait()
if status != os.EX_OK:
color.print_error('certbot exited with exit status: ' + int(status))
break
color.print_good('certbot exited with successful status code')
if not os.path.isdir(os.path.join(LETS_ENCRYPT_LIVE_PATH, hostname)):
logger.warning('failed to find the new hostname in: ' + LETS_ENCRYPT_LIVE_PATH)
continue
color.print_status('copy the following lines into the server configuration file under')
color.print_status('the \'ssl_hosts:\' section to use the certificates with king phisher')
print(" - host: {0}".format(hostname))
print(" ssl_cert: /etc/letsencrypt/live/{0}/fullchain.pem".format(hostname))
print(" ssl_key: /etc/letsencrypt/live/{0}/privkey.pem".format(hostname))
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000001
|
@@ -3180,16 +3180,19 @@
.EX_
-NOTFOUND
+UNAVAILABLE
%0A%09if
@@ -5366,19 +5366,19 @@
us: ' +
-int
+str
(status)
|
649d4b7fb22c92fa116fb574c1e4a07578ca6faa
|
Create methods for getting revisions.
|
forum/models.py
|
forum/models.py
|
from django.db import models
import django.contrib.auth.models as auth
class User(auth.User):
"""Model for representing users.
It has few fields that aren't in the standard authentication user
table, and are needed for the forum to work, like footers.
"""
display_name = models.CharField(max_length=30, null=True)
footer = models.TextField(null=True)
def __str__(self):
"""Show display name or user name."""
return self.display_name or self.username
class Thread(models.Model):
"""Model for representing threads."""
title = models.CharField(max_length=100)
views = models.PositiveIntegerField(default=0)
sticky = models.BooleanField(default=False)
closed = models.BooleanField(default=False)
def __str__(self):
"""Show thread title."""
return self.title
class Post(models.Model):
"""Model for representing posts.
Actual posts are stored in PostRevision, this only stores the
thread number. The first created revision contains the author
of post and date of its creation. The last revision contains actual
text post.
"""
thread = models.ForeignKey(Thread)
class PostRevision(models.Model):
"""Model for representing post revisions.
The first revision for given post contains its author and date to
show to the user. The last revision shows the date it was created
on.
"""
post = models.ForeignKey(Post)
author = models.ForeignKey(User)
date_created = models.DateTimeField(auto_now=True)
text = models.TextField()
class Meta:
ordering = ['date_created']
|
Python
| 0
|
@@ -1168,16 +1168,166 @@
hread)%0A%0A
+ def first_revision(self):%0A return self.postrevision_set.first()%0A%0A def last_revision(self):%0A return self.postrevision_set.last()%0A%0A
class Po
|
84840387f3c0bda539673c64da5ae30ea5787626
|
Replace deprecated functions nn 2 rnn (#7338)
|
research/deep_speech/deep_speech_model.py
|
research/deep_speech/deep_speech_model.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Network structure for DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Supported rnn cells.
SUPPORTED_RNNS = {
"lstm": tf.nn.rnn_cell.BasicLSTMCell,
"rnn": tf.nn.rnn_cell.RNNCell,
"gru": tf.nn.rnn_cell.GRUCell,
}
# Parameters for batch normalization.
_BATCH_NORM_EPSILON = 1e-5
_BATCH_NORM_DECAY = 0.997
# Filters of convolution layer
_CONV_FILTERS = 32
def batch_norm(inputs, training):
"""Batch normalization layer.
Note that the momentum to use will affect validation accuracy over time.
Batch norm has different behaviors during training/evaluation. With a large
momentum, the model takes longer to get a near-accurate estimation of the
moving mean/variance over the entire training dataset, which means we need
more iterations to see good evaluation results. If the training data is evenly
distributed over the feature space, we can also try setting a smaller momentum
(such as 0.1) to get good evaluation result sooner.
Args:
inputs: input data for batch norm layer.
training: a boolean to indicate if it is in training stage.
Returns:
tensor output from batch norm layer.
"""
return tf.layers.batch_normalization(
inputs=inputs, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON,
fused=True, training=training)
def _conv_bn_layer(inputs, padding, filters, kernel_size, strides, layer_id,
training):
"""Defines 2D convolutional + batch normalization layer.
Args:
inputs: input data for convolution layer.
padding: padding to be applied before convolution layer.
filters: an integer, number of output filters in the convolution.
kernel_size: a tuple specifying the height and width of the 2D convolution
window.
strides: a tuple specifying the stride length of the convolution.
layer_id: an integer specifying the layer index.
training: a boolean to indicate which stage we are in (training/eval).
Returns:
tensor output from the current layer.
"""
# Perform symmetric padding on the feature dimension of time_step
# This step is required to avoid issues when RNN output sequence is shorter
# than the label length.
inputs = tf.pad(
inputs,
[[0, 0], [padding[0], padding[0]], [padding[1], padding[1]], [0, 0]])
inputs = tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding="valid", use_bias=False, activation=tf.nn.relu6,
name="cnn_{}".format(layer_id))
return batch_norm(inputs, training)
def _rnn_layer(inputs, rnn_cell, rnn_hidden_size, layer_id, is_batch_norm,
is_bidirectional, training):
"""Defines a batch normalization + rnn layer.
Args:
inputs: input tensors for the current layer.
rnn_cell: RNN cell instance to use.
rnn_hidden_size: an integer for the dimensionality of the rnn output space.
layer_id: an integer for the index of current layer.
is_batch_norm: a boolean specifying whether to perform batch normalization
on input states.
is_bidirectional: a boolean specifying whether the rnn layer is
bi-directional.
training: a boolean to indicate which stage we are in (training/eval).
Returns:
tensor output for the current layer.
"""
if is_batch_norm:
inputs = batch_norm(inputs, training)
# Construct forward/backward RNN cells.
fw_cell = rnn_cell(num_units=rnn_hidden_size,
name="rnn_fw_{}".format(layer_id))
bw_cell = rnn_cell(num_units=rnn_hidden_size,
name="rnn_bw_{}".format(layer_id))
if is_bidirectional:
outputs, _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=fw_cell, cell_bw=bw_cell, inputs=inputs, dtype=tf.float32,
swap_memory=True)
rnn_outputs = tf.concat(outputs, -1)
else:
rnn_outputs = tf.nn.dynamic_rnn(
fw_cell, inputs, dtype=tf.float32, swap_memory=True)
return rnn_outputs
class DeepSpeech2(object):
"""Define DeepSpeech2 model."""
def __init__(self, num_rnn_layers, rnn_type, is_bidirectional,
rnn_hidden_size, num_classes, use_bias):
"""Initialize DeepSpeech2 model.
Args:
num_rnn_layers: an integer, the number of rnn layers. By default, it's 5.
rnn_type: a string, one of the supported rnn cells: gru, rnn and lstm.
is_bidirectional: a boolean to indicate if the rnn layer is bidirectional.
rnn_hidden_size: an integer for the number of hidden states in each unit.
num_classes: an integer, the number of output classes/labels.
use_bias: a boolean specifying whether to use bias in the last fc layer.
"""
self.num_rnn_layers = num_rnn_layers
self.rnn_type = rnn_type
self.is_bidirectional = is_bidirectional
self.rnn_hidden_size = rnn_hidden_size
self.num_classes = num_classes
self.use_bias = use_bias
def __call__(self, inputs, training):
# Two cnn layers.
inputs = _conv_bn_layer(
inputs, padding=(20, 5), filters=_CONV_FILTERS, kernel_size=(41, 11),
strides=(2, 2), layer_id=1, training=training)
inputs = _conv_bn_layer(
inputs, padding=(10, 5), filters=_CONV_FILTERS, kernel_size=(21, 11),
strides=(2, 1), layer_id=2, training=training)
# output of conv_layer2 with the shape of
# [batch_size (N), times (T), features (F), channels (C)].
# Convert the conv output to rnn input.
batch_size = tf.shape(inputs)[0]
feat_size = inputs.get_shape().as_list()[2]
inputs = tf.reshape(
inputs,
[batch_size, -1, feat_size * _CONV_FILTERS])
# RNN layers.
rnn_cell = SUPPORTED_RNNS[self.rnn_type]
for layer_counter in xrange(self.num_rnn_layers):
# No batch normalization on the first layer.
is_batch_norm = (layer_counter != 0)
inputs = _rnn_layer(
inputs, rnn_cell, self.rnn_hidden_size, layer_counter + 1,
is_batch_norm, self.is_bidirectional, training)
# FC layer with batch norm.
inputs = batch_norm(inputs, training)
logits = tf.layers.dense(inputs, self.num_classes, use_bias=self.use_bias)
return logits
|
Python
| 0.000003
|
@@ -987,27 +987,27 @@
tm%22: tf.
-nn.rnn_cell
+contrib.rnn
.BasicLS
@@ -1028,27 +1028,27 @@
nn%22: tf.
-nn.rnn_cell
+contrib.rnn
.RNNCell
@@ -1067,19 +1067,19 @@
tf.
-nn.rnn_cell
+contrib.rnn
.GRU
|
65eb9bcd58d78fd80fabd03a26be73335f1a1122
|
Update ci nose config to run with four processes
|
goldstone/settings/ci.py
|
goldstone/settings/ci.py
|
"""Settings for accessing a distributed docker instance."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .docker import * # pylint: disable=W0614,W0401
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('GS_DEBUG', True))
TEMPLATE_DEBUG = bool(os.environ.get('GS_TEMPLATE_DEBUG', True))
STATIC_ROOT = os.path.join(os.getcwd(), 'static')
INSTALLED_APPS += (
'django_nose',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = (
'--verbosity=2',
'--detailed-errors',
'--with-xunit',
'--xunit-file=/reports/nosetests.xml',
'--with-coverage',
'--cover-package=goldstone',
)
|
Python
| 0
|
@@ -1189,14 +1189,35 @@
goldstone',%0A
+ '--processes=4',%0A
)%0A
|
ba19e9ae4a8bb36317ac4d8b7f64ca549a4c5d8e
|
Use Django built-in get_valid_filename (#466)
|
explorer/exporters.py
|
explorer/exporters.py
|
import codecs
import csv
import json
import string
import uuid
from datetime import datetime
from io import StringIO, BytesIO
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.module_loading import import_string
from django.utils.text import slugify
from explorer import app_settings
def get_exporter_class(format):
class_str = dict(getattr(app_settings, 'EXPLORER_DATA_EXPORTERS'))[format]
return import_string(class_str)
class BaseExporter:
name = ''
content_type = ''
file_extension = ''
def __init__(self, query):
self.query = query
def get_output(self, **kwargs):
value = self.get_file_output(**kwargs).getvalue()
return value
def get_file_output(self, **kwargs):
res = self.query.execute_query_only()
return self._get_output(res, **kwargs)
def _get_output(self, res, **kwargs):
"""
:param res: QueryResult
:param kwargs: Optional. Any exporter-specific arguments.
:return: File-like object
"""
raise NotImplementedError
def get_filename(self):
# build list of valid chars, build filename from title & replace spaces
valid_chars = f'-_.() {string.ascii_letters}{string.digits}'
filename = ''.join(c for c in self.query.title if c in valid_chars)
filename = filename.replace(' ', '_')
return f'{filename}{self.file_extension}'
class CSVExporter(BaseExporter):
name = 'CSV'
content_type = 'text/csv'
file_extension = '.csv'
def _get_output(self, res, **kwargs):
delim = kwargs.get('delim') or app_settings.CSV_DELIMETER
delim = '\t' if delim == 'tab' else str(delim)
delim = app_settings.CSV_DELIMETER if len(delim) > 1 else delim
csv_data = StringIO()
csv_data.write(codecs.BOM_UTF8.decode('utf-8'))
writer = csv.writer(csv_data, delimiter=delim)
writer.writerow(res.headers)
for row in res.data:
writer.writerow([s for s in row])
return csv_data
class JSONExporter(BaseExporter):
name = 'JSON'
content_type = 'application/json'
file_extension = '.json'
def _get_output(self, res, **kwargs):
data = []
for row in res.data:
data.append(
dict(zip(
[str(h) if h is not None else '' for h in res.headers],
row
))
)
json_data = json.dumps(data, cls=DjangoJSONEncoder)
return StringIO(json_data)
class ExcelExporter(BaseExporter):
name = 'Excel'
content_type = 'application/vnd.ms-excel'
file_extension = '.xlsx'
def _get_output(self, res, **kwargs):
import xlsxwriter
output = BytesIO()
wb = xlsxwriter.Workbook(output, {'in_memory': True})
ws = wb.add_worksheet(name=self._format_title())
# Write headers
row = 0
col = 0
header_style = wb.add_format({'bold': True})
for header in res.header_strings:
ws.write(row, col, header, header_style)
col += 1
# Write data
row = 1
col = 0
for data_row in res.data:
for data in data_row:
# xlsxwriter can't handle timezone-aware datetimes or
# UUIDs, so we help out here and just cast it to a
# string
if isinstance(data, datetime) or isinstance(data, uuid.UUID):
data = str(data)
# JSON and Array fields
if isinstance(data, dict) or isinstance(data, list):
data = json.dumps(data)
ws.write(row, col, data)
col += 1
row += 1
col = 0
wb.close()
return output
def _format_title(self):
# XLSX writer wont allow sheet names > 31 characters or that
# contain invalid characters
# https://github.com/jmcnamara/XlsxWriter/blob/master/xlsxwriter/
# test/workbook/test_check_sheetname.py
title = slugify(self.query.title)
return title[:31]
|
Python
| 0
|
@@ -262,16 +262,36 @@
t import
+ get_valid_filename,
slugify
@@ -1142,299 +1142,60 @@
-# build list of valid chars, build filename from title & replace spaces%0A valid_chars = f'-_.() %7Bstring.ascii_letters%7D%7Bstring.digits%7D'%0A filename = ''.join(c for c in self.query.title if c in valid_chars)%0A filename = filename.replace(' ', '_')%0A return f'%7Bfilename%7D%7B
+return get_valid_filename(self.query.title or '') +
self
@@ -1213,10 +1213,8 @@
sion
-%7D'
%0A%0A%0Ac
|
738fb4e15c0f34d7ebdf99852770cf94c96fbd21
|
Fix sub issue (assignment is overridden by last chanel) :: refs #272
|
src/magistral/client/sub/GroupConsumer.py
|
src/magistral/client/sub/GroupConsumer.py
|
'''
Created on 11 Aug 2016
@author: rizarse
'''
import logging
import threading
from os.path import expanduser
from kafka.consumer.group import KafkaConsumer
from magistral.client.Configs import Configs
from magistral.client.MagistralException import MagistralException
from kafka.structs import TopicPartition
from magistral.Message import Message
class GroupConsumer(threading.Thread):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def __init__(self, threadId, name, sKey, bootstrapServers, groupId, permissions, cipher = None, uid = None):
threading.Thread.__init__(self)
self.threadId = threadId
self.name = name
self.group = groupId;
self.subKey = sKey;
self.cipher = None if cipher is None else cipher;
configs = Configs.consumerConfigs();
configs["bootstrap_servers"] = bootstrapServers;
configs["group_id"] = groupId;
configs['enable_auto_commit'] = False;
self.__isAlive = True
home = expanduser("~")
if uid == None:
self.__consumer = KafkaConsumer(
bootstrap_servers = bootstrapServers,
check_crcs = False,
exclude_internal_topics = True,
session_timeout_ms = 20000,
fetch_min_bytes = 128,
fetch_max_wait_ms = 256,
enable_auto_commit = False,
max_in_flight_requests_per_connection = 10,
group_id = groupId);
else:
self.__consumer = KafkaConsumer(
bootstrap_servers = bootstrapServers,
check_crcs = False,
exclude_internal_topics = True,
session_timeout_ms = 20000,
fetch_min_bytes = 128,
fetch_max_wait_ms = 256,
enable_auto_commit = False,
max_in_flight_requests_per_connection = 10,
security_protocol = 'SSL',
ssl_check_hostname = False,
ssl_keyfile = home + '/magistral/' + uid + '/key.pem',
ssl_cafile = home + '/magistral/' + uid + '/ca.pem',
ssl_certfile = home + '/magistral/' + uid + '/certificate.pem',
group_id = groupId);
self.permissions = permissions;
self.map = {}
self.__offsets = {}
def recordsTotally(self, data):
size = 0;
for val in data.values():
if len(val) > 0: size = size + len(val);
return size;
def consumerRecord2Message(self, record):
payload = record[6]
if self.cipher is not None:
try:
payload = self.cipher.decrypt(payload)
except:
pass
msg = Message(record[0], record[1], payload, record[2], record[3])
return msg
def run(self):
threadLock.acquire()
while self.__isAlive:
try:
data = self.__consumer.poll(512);
for values in data.values():
for value in values:
msg = self.consumerRecord2Message(value);
listener = self.map[msg.topic()][msg.channel()];
if listener is not None: listener(msg);
self.__consumer.commit_async();
except:
pass
threadLock.release()
# ////////////////////////////////////////////////////////////////////////////////////
def subscribe(self, topic, channel = -1, listener = None, callback = None):
assert channel is not None and isinstance(channel, int), "Channel expected as int argument"
if (channel < -1): channel = -1;
etopic = self.subKey + "." + topic;
self.logger.debug("Subscribe -> %s : %s | key = %s", topic, channel, self.subKey);
if (self.permissions == None or len(self.permissions) == 0):
raise MagistralException("User has no permissions for topic [" + topic + "].");
self.fch = [];
for meta in self.permissions:
if (meta.topic() != topic): continue;
if channel == -1:
self.fch = meta.channels();
elif channel in meta.channels():
self.fch = [ channel ];
if (len(self.fch) == 0):
npgex = "No permissions for topic [" + topic + "] granted";
self.logger.error(npgex);
raise MagistralException(npgex);
if (self.map == None or etopic not in self.map):
self.map[etopic] = {}
# // Assign Topic-partition pairs to listen
tpas = [];
for ch in self.fch:
tpas.append(TopicPartition(etopic, ch));
if (listener is not None): self.map[etopic][ch] = listener;
self.__consumer.assign(tpas);
if callback is not None:
callback(self.__consumer.assignment());
return self.__consumer.assignment();
def unsubscribe(self, topic):
self.consumer.assign([]);
self.map.remove(topic);
def close(self):
self.__isAlive = False
self.__consumer.pause()
self.__consumer.close()
logging.getLogger('kafka.conn').setLevel(logging.FATAL)
logging.getLogger('kafka.cluster').setLevel(logging.FATAL)
logging.getLogger('kafka.consumer.group').setLevel(logging.INFO)
logging.getLogger('kafka.consumer.fetcher').setLevel(logging.INFO)
logging.getLogger('kafka.coordinator.consumer').setLevel(logging.INFO)
logging.getLogger('kafka.producer.record_accumulator').setLevel(logging.INFO)
threadLock = threading.Lock()
|
Python
| 0
|
@@ -5424,25 +5424,154 @@
listener
-;
+%0A %0A ca = self.__consumer.assignment()%0A if (ca is not None):%0A for tp in ca: tpas.append(tp)
%0A
|
39c16a3552ef882441b26a4c6defc57d9ea42010
|
return JSON rather than request.response objects
|
mondo.py
|
mondo.py
|
import requests
class MondoClient():
url = 'https://production-api.gmon.io/'
def __init__(self, url = None):
if url != None:
self.url = url
def token(self, client_id, client_secret, username, password):
"""
Acquiring an access token
"""
payload = {'grant_type': 'password', 'client_id': client_id, 'client_secret': client_secret, 'username': username, 'password': password }
r = requests.post(self.url + '/oauth2/token', payload)
return r
def refresh_token(self, client_id, client_secret, refresh_token):
"""
Refreshing a proviously acquired token
"""
payload = {'grant_type': 'refresh_token', 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token }
r = requests.post(self.url + '/oauth2/token', payload)
return r
def transaction(self, id, access_token, merchant = True):
"""
Getting details about a transaction
"""
headers = {'Authorization': 'Bearer ' + access_token}
params = {}
if merchant:
params['expand[]'] = 'merchant'
r = requests.get(self.url + '/transactions/' + id, params=params, headers=headers)
return r
def transactions(self, access_token, account_id, limit = 100, since = None, before = None):
"""
List transactions
"""
headers = {'Authorization': 'Bearer ' + access_token}
params = {'limit': limit, "account_id": account_id}
if since != None:
params['since'] = since
if before != None:
params['before'] = before
r = requests.get(self.url + '/transactions', params=params, headers=headers)
return r
def authenticate(self, access_token, client_id, user_id):
"""
authenticate user
"""
headers = {'Authorization': 'Bearer ' + str(access_token)}
r = requests.get(self.url + '/ping/whoami', headers=headers)
return r
def accounts(self, access_token):
"""
detailed information about customer's accounts
"""
headers = {'Authorization': 'Bearer ' + access_token}
r = requests.get(self.url + '/accounts', headers=headers)
return r
def create_feed_item(self, access_token, account_id, title, image_url, background_color = '#FCF1EE', body_color = '#FCF1EE', title_color = '#333', body = ''):
"""
publish a new feed entry
"""
headers = {'Authorization': 'Bearer ' + access_token}
payload = {
"account_id": account_id,
"type": "basic",
"params[title]": title,
"params[image_url]": image_url,
"params[background_color]": background_color,
"params[body_color]": body_color,
"params[title_color]": title_color,
"params[body]": body
}
r = requests.post(self.url + '/feed', data=payload, headers=headers)
return r
def register_webhook(self, access_token, account_id, url):
"""
registering a webhook
"""
headers = {'Authorization': 'Bearer ' + access_token}
payload = {"account_id": account_id, "url": url}
r = requests.post(self.url + '/feed', data=payload, headers=headers)
return r
|
Python
| 0.000019
|
@@ -507,33 +507,40 @@
return r
+.json()
%0A
-
%0A def refresh
@@ -876,32 +876,39 @@
return r
+.json()
%0A%0A def transa
@@ -1272,32 +1272,39 @@
return r
+.json()
%0A%0A def transa
@@ -1781,32 +1781,39 @@
return r
+.json()
%0A%0A def authen
@@ -2056,32 +2056,39 @@
return r
+.json()
%0A%0A def accoun
@@ -2312,32 +2312,32 @@
aders=headers)%0A%0A
-
return r
@@ -2328,32 +2328,39 @@
return r
+.json()
%0A%0A def create
|
0fe56f804a7ed4958f33a534e30e3b6c79526ea6
|
Update SimplyPremiumCom.py
|
module/plugins/accounts/SimplyPremiumCom.py
|
module/plugins/accounts/SimplyPremiumCom.py
|
# -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.plugins.Account import Account
class SimplyPremiumCom(Account):
__name__ = "SimplyPremiumCom"
__type__ = "account"
__version__ = "0.03"
__description__ = """Simply-Premium.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("EvolutionClip", "evolutionclip@live.de")]
def loadAccountInfo(self, user, req):
validuntil = -1
trafficleft = None
json_data = req.load('http://www.simply-premium.com/api/user.php?format=json')
self.logDebug("JSON data: " + json_data)
json_data = json_loads(json_data)
if 'vip' in json_data['result'] and json_data['result']['vip'] == 0:
return {"premium": False}
if 'timeend' in json_data['result'] and json_data['result']['timeend']:
validuntil = float(json_data['result']['timeend'])
if 'traffic' in json_data['result'] and json_data['result']['traffic']:
trafficleft = float(json_data['result']['traffic']) / 1024 #@TODO: Remove `/ 1024` in 0.4.10
return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
def login(self, user, data, req):
req.cj.setCookie("simply-premium.com", "lang", "EN")
if data['password'] == '' or data['password'] == '0':
post_data = {"key": user}
else:
post_data = {"login_name": user, "login_pass": data['password']}
html = req.load("http://www.simply-premium.com/login.php", post=post_data, decode=True)
if 'logout' not in html:
self.wrongPassword()
|
Python
| 0
|
@@ -231,17 +231,17 @@
_ = %220.0
-3
+4
%22%0A%0A _
@@ -991,32 +991,39 @@
data%5B'result'%5D%5B'
+remain_
traffic'%5D:%0A
@@ -1070,16 +1070,23 @@
sult'%5D%5B'
+remain_
traffic'
|
9d9b77d3785377320c6e9e835b38779035657e82
|
Fix error for users in local groups
|
indico/modules/search/controllers.py
|
indico/modules/search/controllers.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import math
from flask import jsonify, session
from marshmallow import INCLUDE, fields
from marshmallow_enum import EnumField
from sqlalchemy.orm import undefer
from indico.core.db import db
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.modules.categories import Category
from indico.modules.categories.controllers.base import RHDisplayCategoryBase
from indico.modules.events import Event
from indico.modules.groups import GroupProxy
from indico.modules.search.base import IndicoSearchProvider, SearchOptions, SearchTarget, get_search_provider
from indico.modules.search.result_schemas import CategoryResultSchema, EventResultSchema, ResultSchema
from indico.modules.search.schemas import DetailedCategorySchema, EventSchema
from indico.modules.search.views import WPCategorySearch, WPSearch
from indico.util.caching import memoize_redis
from indico.web.args import use_kwargs
from indico.web.rh import RH
@memoize_redis(3600)
def get_groups(user):
access = [user.identifier] + [x.identifier for x in user.local_groups]
if user.can_get_all_multipass_groups:
access += [GroupProxy(x.name, x.provider.name, x).identifier
for x in user.iter_all_multipass_groups()]
return access
class RHSearchDisplay(RH):
def _process(self):
return WPSearch.render_template('search.html')
class RHCategorySearchDisplay(RHDisplayCategoryBase):
def _process(self):
return WPCategorySearch.render_template('category_search.html', self.category)
class RHAPISearch(RH):
"""API for searching across all records with the current search provider.
Besides pagination, filters or placeholders may be passed as query parameters.
Since `type` may be a list, the results from the search provider are not mixed with
the InternalSearch.
"""
@use_kwargs({
'page': fields.Int(missing=1),
'q': fields.String(required=True),
'type': fields.List(EnumField(SearchTarget), missing=None)
}, location='query', unknown=INCLUDE)
def _process(self, page, q, type, **params):
search_provider = get_search_provider()
if type == [SearchTarget.category]:
search_provider = InternalSearch
access = get_groups(session.user) if session.user else []
result = search_provider().search(q, access, page, type, **params)
return ResultSchema().dump(result)
class RHAPISearchOptions(RH):
def _process(self):
search_provider = get_search_provider()()
placeholders = search_provider.get_placeholders()
sort_options = search_provider.get_sort_options()
return jsonify(SearchOptions(placeholders, sort_options).dump())
class InternalSearch(IndicoSearchProvider):
def search(self, query, access, page=1, object_types=(), **params):
if object_types == [SearchTarget.category]:
total, results = InternalSearch.search_categories(page, query, params.get('category_id'))
elif object_types == [SearchTarget.event]:
total, results = InternalSearch.search_events(page, query, params.get('category_id'))
else:
total, results = 0, []
return {
'total': total,
'pages': math.ceil(total / self.RESULTS_PER_PAGE),
'results': results,
}
@staticmethod
def search_categories(page, q, category_id):
query = Category.query if not category_id else Category.get(category_id).deep_children_query
results = (query
.filter(Category.title_matches(q),
~Category.is_deleted)
.options(undefer('chain'))
.order_by(db.func.lower(Category.title))
.paginate(page, IndicoSearchProvider.RESULTS_PER_PAGE))
# XXX should we only show categories the user can access?
# this would be nicer but then we can't easily paginate...
res = DetailedCategorySchema(many=True).dump(results.items)
return results.total, CategoryResultSchema(many=True).load(res)
@staticmethod
def search_events(page, q, category_id):
filters = [
Event.title_matches(q),
Event.effective_protection_mode == ProtectionMode.public,
~Event.is_deleted
]
if category_id is not None:
filters.append(Event.category_chain_overlaps(category_id))
results = (Event.query
.filter(*filters)
.order_by(db.func.lower(Event.title))
.paginate(page, IndicoSearchProvider.RESULTS_PER_PAGE))
res = EventSchema(many=True).dump(results.items)
return results.total, EventResultSchema(many=True).load(res)
|
Python
| 0
|
@@ -1224,13 +1224,50 @@
ier%5D
+%0A access
+
+=
%5B
-x
+GroupProxy(x.id, _group=x)
.ide
|
a58de69cd0b93f1967a9b56812b1972a3ab9e5d1
|
Update main.py
|
WikiQA_CNN+Feat/main.py
|
WikiQA_CNN+Feat/main.py
|
import numpy as np
from dl_text import *
import model
import wiki_utils as wk
from dl_text.metrics import eval_metric
glove_fname = 'D:/workspace/Trec_QA-master/data/Glove/glove.6B.50d.txt'
################### DEFINING MODEL ###################
lrmodel = model.cnn
model_name = lrmodel.func_name
################### DEFINING HYPERPARAMETERS ###################
dimx = 60
dimy = 60
dimft = 44
batch_size = 50
vocab_size = 8000
embedding_dim = 50
nb_filter = 120,
filter_length = (50,4)
depth = 1
nb_epoch = 3
ques, ans, label_train, train_len, test_len, wordVec_model, res_fname, pred_fname, feat_train, feat_test = wk.load_wiki(model_name, glove_fname)
data_l , data_r, embedding_matrix = dl.process_data(ques, ans,
wordVec_model,dimx=dimx,
dimy=dimy,vocab_size=vocab_size,
embedding_dim=embedding_dim)
X_train_l,X_test_l,X_dev_l,X_train_r,X_test_r,X_dev_r = wk.prepare_train_test(data_l,data_r,
train_len,test_len)
if model_name == 'cnn_ft':
lrmodel = lrmodel(embedding_matrix, dimx=dimx, dimy=dimy, dimft=dimft, nb_filter = 120,
embedding_dim = 50, filter_length = (50,4), vocab_size = 8000, depth = 1)
print '\n',model_name,'model built \n'
lrmodel.fit([X_train_l, X_train_r,feat_train],label_train,batch_size=batch_size,nb_epoch=nb_epoch,verbose=2)
map_val, mrr_val = eval_metric(lrmodel, X_test_l, X_test_r, res_fname, pred_fname, feat_test=feat_test)
else:
lrmodel = lrmodel(embedding_matrix, dimx=dimx, dimy=dimy, nb_filter = 120,
embedding_dim = 50, filter_length = (50,4), vocab_size = 8000, depth = 1)
print '\n', model_name,'model built \n'
lrmodel.fit([X_train_l, X_train_r],label_train,batch_size=batch_size,nb_epoch=nb_epoch,verbose=2)
map_val, mrr_val = eval_metric(lrmodel, X_test_l, X_test_r, res_fname, pred_fname)
print 'MAP : ',map_val,' MRR : ',mrr_val
|
Python
| 0.000001
|
@@ -1,12 +1,85 @@
+%22%22%22%0A** deeplean-ai.com **%0A** dl-lab **%0Acreated by :: GauravBh1010tt%0A%22%22%22%0A%0A
import numpy
|
6bb9eb81540720c67b709f3f2c971343e7e94f21
|
change --cookbook-path to --cookbooks
|
fastfood/shell.py
|
fastfood/shell.py
|
# -*- coding: utf-8 -*-
"""fastfood - cookbook wizardry"""
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import os
import sys
import threading
from fastfood import manifest
_local = threading.local()
LOG = logging.getLogger(__name__)
NAMESPACE = 'fastfood'
def _fastfood_gen(args):
print(args)
def _fastfood_new(args):
cookbook_name = args.cookbook_name
templatepack = args.template_pack
cookbooks = args.cookbook_path
return manifest.create_new_cookbook(
cookbook_name, templatepack, cookbooks)
def _fastfood_build(args):
print(args)
def _split_key_val(option):
key_val = option.split(':', 1)
assert len(key_val) == 2, "Bad option %s" % option
return key_val
def getenv(option_name, default=None):
env = "%s_%s" % (NAMESPACE.upper(), option_name.upper())
return os.environ.get(env, default)
def main():
"""fastfood command line interface."""
import argparse
import traceback
class HelpfulParser(argparse.ArgumentParser):
def error(self, message, print_help=False):
if 'too few arguments' in message:
sys.argv.insert(0, os.path.basename(sys.argv.pop(0)))
message = ("%s. Try getting help with `%s -h`"
% (message, " ".join(sys.argv)))
if print_help:
self.print_help()
sys.stderr.write('\nerror: %s\n' % message)
sys.exit(2)
parser = HelpfulParser(
description=__doc__.splitlines()[0],
epilog="\n".join(__doc__.splitlines()[1:]),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
verbose = parser.add_mutually_exclusive_group()
verbose.add_argument('-v', dest='loglevel', action='store_const',
const=logging.INFO,
help="Set log-level to INFO.")
verbose.add_argument('-vv', dest='loglevel', action='store_const',
const=logging.DEBUG,
help="Set log-level to DEBUG.")
parser.set_defaults(loglevel=logging.WARNING)
parser.add_argument('--template-pack', help='template pack location', metavar='template_pack',
default=getenv('template_pack', os.path.join(os.getenv('HOME'), '.fastfood')))
parser.add_argument('--cookbook-path', help='cookbooks directory', metavar='cookbook_path',
default=getenv('cookbook_path', os.path.join(os.getenv('HOME'), 'cookbooks')))
subparsers = parser.add_subparsers(
dest='_subparsers', title='fastfood commands',
description='operations...',
help='...')
#
# `fastfood gen`
#
gen_parser = subparsers.add_parser(
'gen', help='Create a new recipe for an existing cookbook.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
gen_parser.add_argument('stencil_set',
help="Stencil set to use.")
gen_parser.add_argument('options', nargs='*', type=_split_key_val,
metavar='option',
help="Stencil options.")
gen_parser.add_argument('--force, -f', action='store_true', default=False,
help="Overwrite existing files.")
gen_parser.set_defaults(func=_fastfood_gen)
#
# `fastfood new`
#
new_parser = subparsers.add_parser(
'new', help='Create a cookbook.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
new_parser.add_argument('cookbook_name',
help="Name of the new cookbook.")
new_parser.set_defaults(func=_fastfood_new)
#
# `fastfood build`
#
build_parser = subparsers.add_parser(
'build', help='Create or update a cookbook using a config',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
build_parser.add_argument('config_file',
help="JSON config file")
build_parser.set_defaults(func=_fastfood_build)
setattr(_local, 'argparser', parser)
args = parser.parse_args()
if getattr(args, 'options', None):
args.options = {k:v for k,v in args.options}
try:
result = args.func(args)
except Exception as err:
traceback.print_exc()
# todo: tracack in -v or -vv mode?
sys.stderr.write("%s\n" % repr(err))
sys.stderr.flush()
sys.exit(1)
except KeyboardInterrupt:
sys.exit("\nStahp")
else:
import ipdb;ipdb.set_trace()
# result
if __name__ == '__main__':
main()
|
Python
| 0.000009
|
@@ -488,21 +488,17 @@
cookbook
-_path
+s
%0A ret
@@ -2375,13 +2375,9 @@
book
--path
+s
', h
@@ -2406,33 +2406,8 @@
ry',
- metavar='cookbook_path',
%0A
@@ -2455,13 +2455,9 @@
book
-_path
+s
', o
|
187e8237f9ba56dc517b2ad6e58be3e8031fa9df
|
Update __init__.py
|
examples/__init__.py
|
examples/__init__.py
|
# coding: utf-8
# In[1]:
import sys
sys.path.append("./examples")#add the examples as a module
|
Python
| 0.000072
|
@@ -13,19 +13,8 @@
f-8%0A
-%0A# In%5B1%5D:%0A%0A
impo
@@ -55,36 +55,5 @@
es%22)
-#add the examples as a module
%0A
|
5060294de04033eb99bbaf91353794fd5e484217
|
Modify only to attempt decryption on strings
|
figgypy/config.py
|
figgypy/config.py
|
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import logging
import os
import seria
import yaml
logger = logging.getLogger('figgypy')
if len(logger.handlers) == 0:
logger.addHandler(logging.NullHandler())
gpg_loaded = False
try:
import gnupg
gpg_loaded = True
except ImportError:
logging.info('could not load gnupg, will be unable to unpack secrets')
pass
class FiggyPyError(Exception):
pass
class Config(object):
"""Configuration object
Object can be created with a filename only, relative path, or absolute path.
If only name or relative path is provided, look in this order:
1. current directory
2. `~/.config/<file_name>`
3. `/etc/<file_name>`
It is a good idea to include you __package__ in the file name.
For example, `cfg = Config(os.path.join(__package__, 'config.yaml'))`.
This way it will look for your_package/config.yaml,
~/.config/your_package/config.yaml, and /etc/your_package/config.yaml.
"""
_dirs = [
os.curdir,
os.path.join(os.path.expanduser("~"), '.config'),
"/etc/"
]
def __init__(self, f):
self._f = self._get_file(f)
self._cfg = self._get_cfg(self._f)
def _get_cfg(self, f):
"""Get configuration from config file"""
try:
with open(f, 'r') as _fo:
try:
_seria_in = seria.load(_fo)
_y = _seria_in.dump('yaml')
except Exception as e:
raise
except IOError:
raise FiggyPyError("could not open configuration file")
_cfg = yaml.load(_y)
self._post_load_process(_cfg)
for k, v in _cfg.items():
setattr(self, k, v)
def _decrypt_and_update(self, obj):
"""Decrypt and update configuration.
Do this only from _post_load_process so that we can verify gpg
is ready. If we did them in the same function we would end up
calling the gpg checks several times, potentially, since we are
calling this recursively.
"""
if isinstance(obj, list):
res_v = []
for item in obj:
res_v.append(self._decrypt_and_update(item))
return res_v
elif isinstance(obj, dict):
for k, v in obj.items():
obj[k] = self._decrypt_and_update(v)
else:
if 'BEGIN PGP' in obj:
try:
decrypted = self.gpg.decrypt(obj)
if decrypted.ok:
obj = decrypted.data.decode('utf-8')
else:
logger.error("gpg error unpacking secrets %s" % decrypted.stderr)
except Exception as e:
logger.error("error unpacking secrets %s" % e)
return obj
def _post_load_process(self, cfg):
if gpg_loaded:
gpgbinary='gpg'
gnupghome=None
try:
if 'FIGGY_GPG_BINARY' in os.environ:
gpgbinary = os.environ['FIGGY_GPG_BINARY']
if 'FIGGY_GPG_HOME' in os.environ:
gnupghome = os.environ['FIGGY_GPG_HOME']
self.gpg = gnupg.GPG(gpgbinary=gpgbinary, gnupghome=gnupghome)
return self._decrypt_and_update(cfg)
except OSError as e:
if len(e.args) == 2:
if (e.args[1] == 'The system cannot find the file specified'
or 'No such file or directory' in e.args[1]):
# frobnicate
if not 'FIGGY_GPG_BINARY' in os.environ:
logger.error(
"cannot find gpg executable, path=%s, try setting GPG_BINARY env variable" % gpgbinary)
else:
logger.error("cannot find gpg executable, path=%s" % gpgbinary)
else:
logger.error("cannot setup gpg, %s" % e)
return cfg
def _get_file(self, f):
"""Get a config file if possible"""
if os.path.isabs(f):
return f
else:
for d in Config._dirs:
_f = os.path.join(d, f)
if os.path.isfile(_f):
return _f
raise FiggyPyError("could not find configuration file {} in dirs {}"
.format(f, Config._dirs))
|
Python
| 0.000001
|
@@ -2427,32 +2427,53 @@
)%0A else:%0A
+ try:%0A
if '
@@ -2499,37 +2499,45 @@
+
try:%0A
+
@@ -2594,16 +2594,20 @@
+
+
if decry
@@ -2615,16 +2615,20 @@
ted.ok:%0A
+
@@ -2692,38 +2692,46 @@
+
+
else:%0A
+
@@ -2812,32 +2812,36 @@
+
except Exception
@@ -2863,32 +2863,36 @@
+
+
logger.error(%22er
@@ -2914,32 +2914,139 @@
ecrets %25s%22 %25 e)%0A
+ except TypeError as e:%0A logger.info('Pass on decryption. Only decrypt strings')%0A
return o
|
9ee0ad7dfad15e3d933b4d1c3fab508d99480748
|
Fix example.
|
examples/faithful.py
|
examples/faithful.py
|
import numpy as np
import matplotlib.pyplot as plt
from gmm.algorithm import GMM
# Read in dataset from file
with open('faithful.txt', 'rt') as f:
data = []
for row in f:
cols = row.strip('\r\n').split(' ')
data.append(np.fromiter(map(lambda x: float(x), cols), np.float))
data = np.array(data)
# Initialize GMM algorithm
means = np.array([np.array([4.0, 80], np.float), np.array([2.0, 55], np.float)])
covariances = np.array([np.identity(3), np.identity(2)])
mixing_probs = np.array([1/2, 1/2], np.float)
gmm_model = GMM(means, covariances, mixing_probs)
# Fit GMM to the data
gmm_model.fit(data)
# Cluster data
labelled = gmm_model.cluster(data)
# Plot clustered data with the location of Gaussian mixtures
plt.figure()
# Plot contours of Gaussian mixtures
for mean, cov in zip(gmm_model.means, gmm_model.covariances):
# Create grid
mean_x = mean[0]
std_x = np.sqrt(cov[0][0])
mean_y = mean[1]
std_y = np.sqrt(cov[1][1])
x = np.linspace(mean_x - 3*std_x, mean_x + 3*std_x, 100)
y = np.linspace(mean_y - 3*std_y, mean_y + 3*std_y, 100)
X, Y = np.meshgrid(x, y)
# Tabulate pdf values
Z = np.empty(X.shape, np.float)
for i in np.arange(X.shape[0]):
for j in np.arange(X.shape[1]):
v = np.array([X[i][j], Y[i][j]])
Z[i][j] = gmm_model.multivariate_normal_pdf(v, mean, cov)
# Plot contours
plt.contour(X, Y, Z)
# Plot features assigned to each Gaussian mixture
markers = ['o', '+']
colors = ['r', 'b']
for d, l in zip(data, labelled):
plt.scatter(d[0], d[1], color=colors[l], marker=markers[l])
plt.savefig('scatter_plot.pdf')
|
Python
| 0.000004
|
@@ -464,17 +464,17 @@
dentity(
-3
+2
), np.id
|
5437cdfe48c32cd100bb7fbbb2590fc7e11ab78b
|
read the docs theme modification
|
docs/readthedocs/conf.py
|
docs/readthedocs/conf.py
|
# -*- coding: utf-8 -*-
#
# pvtest documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 04 11:15:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pvtest'
copyright = u'2017, zied abaoub'
author = u'zied abaoub'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pvtestdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pvtest.tex', u'pvtest Documentation',
u'zied abaoub', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pvtest', u'pvtest Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pvtest', u'pvtest Documentation',
author, 'pvtest', 'One line description of project.',
'Miscellaneous'),
]
|
Python
| 0
|
@@ -720,16 +720,42 @@
h('.'))%0A
+import sphinx_rtd_theme%0A%0A%0A
from rec
@@ -2812,25 +2812,32 @@
heme = '
-alabaster
+sphinx_rtd_theme
'%0A%0A# The
|
18d4aabe9b818b62800526470007220c3bcc49db
|
Don't fail if the last line of allfilters.dat is blank
|
fsps/filters.py
|
fsps/filters.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tools for working with the FSPS filter set.
This module uses filter information shipped with FSPS itself in
``$SPS_HOME/data``.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["find_filter", "FILTERS", "get_filter", "list_filters"]
import os
import numpy as np
from pkg_resources import resource_stream, resource_exists
# Cache for $SPS_HOME/data/magsun.dat parsed by numpy
MSUN_TABLE = None
# Cache for $SPS_HOME/data/filter_lambda_eff.dat parsed by numpy
LAMBDA_EFF_TABLE = None
# Cache for bandpass transmission listings: a dictionary keyed by bandpass
# name with values of wavelength, transmission tuples.
TRANS_CACHE = None
class Filter(object):
def __init__(self, index, name, fullname):
self.index = index - 1
self.name = name.lower()
self.fullname = fullname
def __str__(self):
return "<Filter({0})>".format(self.name)
def __repr__(self):
return "<Filter({0})>".format(self.name)
@property
def msun_ab(self):
"""Solar absolute magnitude in Filter, AB zeropoint."""
# if self._msun_ab is None:
if MSUN_TABLE is None:
self._load_msun_table()
if self.index < MSUN_TABLE.shape[0]:
assert MSUN_TABLE[self.index, 0] == self.index + 1
return float(MSUN_TABLE[self.index, 1])
else:
return np.nan
@property
def msun_vega(self):
"""Solar absolute magnitude in Filter, VEGAMAG zeropoint."""
if MSUN_TABLE is None:
self._load_msun_table()
if self.index < MSUN_TABLE.shape[0]:
assert MSUN_TABLE[self.index, 0] == self.index + 1
return float(MSUN_TABLE[self.index, 2])
else:
return np.nan
@property
def lambda_eff(self):
"""Effective wavelength of Filter, in Angstroms."""
if LAMBDA_EFF_TABLE is None:
self._load_lambda_eff_table()
if self.index < LAMBDA_EFF_TABLE.shape[0]:
assert LAMBDA_EFF_TABLE[self.index, 0] == self.index + 1
return float(LAMBDA_EFF_TABLE[self.index, 1])
else:
return np.nan
@property
def transmission(self):
"""Returns filter transmission: a tuple of wavelength (Angstroms) and
an un-normalized transmission arrays.
"""
if TRANS_CACHE is None:
# Load the cache for all filters.
self._load_transmission_cache()
try:
return TRANS_CACHE[self.name]
except KeyError as e:
e.args += ("Could not find transmission data "
"for {0}".format(self.name))
raise
def _load_msun_table(self):
global MSUN_TABLE
MSUN_TABLE = np.loadtxt(
os.path.expandvars("$SPS_HOME/data/magsun.dat"))
def _load_lambda_eff_table(self):
global LAMBDA_EFF_TABLE
LAMBDA_EFF_TABLE = np.loadtxt(
os.path.expandvars("$SPS_HOME/data/filter_lambda_eff.dat"))
def _load_transmission_cache(self):
"""Parse the allfilters.dat file into the TRANS_CACHE."""
global TRANS_CACHE
path = os.path.expandvars("$SPS_HOME/data/allfilters.dat")
names = list_filters()
TRANS_CACHE = {}
filter_index = -1
lambdas, trans = [], []
with open(path) as f:
for line in f:
line.strip()
if line[0].startswith("#"):
# Close out filter
if filter_index > -1:
TRANS_CACHE[names[filter_index]] = (
np.array(lambdas), np.array(trans))
# Start new filter
filter_index += 1
lambdas, trans = [], []
else:
l, t = line.split()
lambdas.append(float(l))
trans.append(float(t))
def _load_filter_dict():
"""
Load the filter list, creating a dictionary of :class:`Filter` instances.
"""
# Load filter table from FSPS
filter_list_path = os.path.expandvars(
os.path.join("$SPS_HOME", "data", "FILTER_LIST"))
filters = {}
with open(filter_list_path) as f:
for line in f:
columns = line.strip().split()
fsps_id, key = columns[:2]
comment = ' '.join(columns[2:])
filters[key.lower()] = Filter(int(fsps_id), key, comment)
return filters
FILTERS = _load_filter_dict()
def find_filter(band):
"""
Find the FSPS name for a filter.
Usage:
::
>>> import fsps
>>> fsps.find_filter("F555W")
['wfpc2_f555w', 'wfc_acs_f555w']
:param band:
Something like the name of the band.
"""
b = band.lower()
possible = []
for k in FILTERS.keys():
if b in k:
possible.append(k)
return possible
def get_filter(name):
"""Returns the :class:`fsps.filters.Filter` instance associated with the
filter name.
:param name:
Name of the filter, as found with :func:`find_filter`.
"""
try:
return FILTERS[name.lower()]
except KeyError as e:
e.args += ("Filter {0} does not exist. "
"Try using fsps.find_filter('{0}').".format(name),)
raise
def list_filters():
"""Returns a list of all FSPS filter names.
Filters are sorted by their FSPS index.
"""
lst = [(name, f.index) for name, f in FILTERS.items()]
lst.sort(key=lambda x: x[1])
return [l[0] for l in lst]
|
Python
| 0.999566
|
@@ -3887,32 +3887,61 @@
else:%0A
+ try:%0A
@@ -3956,32 +3956,36 @@
= line.split()%0A
+
@@ -4033,16 +4033,20 @@
+
trans.ap
@@ -4060,16 +4060,84 @@
oat(t))%0A
+ except(ValueError):%0A pass
%0A%0Adef _l
|
0e75bfd460444b0d967751414832112b8f35a56f
|
change global method_supported variable to Crypto class varaible
|
fukei/crypto.py
|
fukei/crypto.py
|
#!/usr/bin/env python
import sys
import hashlib
import string
import struct
import logging
from utils import lazy_property
logger = logging.getLogger('crypto')
def random_string(length):
import M2Crypto.Rand
return M2Crypto.Rand.rand_bytes(length)
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
(a, b) = struct.unpack('<QQ', s)
table = [c for c in string.maketrans('', '')]
for i in xrange(1, 1024):
table.sort(lambda x, y: int(a % (ord(x) + i) - a % (ord(y) + i)))
encrypt_table = ''.join(table)
decrypt_table = string.maketrans(encrypt_table, string.maketrans('', ''))
return encrypt_table, decrypt_table
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
# TODO: cache the results
m = []
i = 0
while len(''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = ''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
return (key, iv)
method_supported = {
'aes-128-cfb': (16, 16),
'aes-192-cfb': (24, 16),
'aes-256-cfb': (32, 16),
'bf-cfb': (16, 8),
'camellia-128-cfb': (16, 16),
'camellia-192-cfb': (24, 16),
'camellia-256-cfb': (32, 16),
'cast5-cfb': (16, 8),
'des-cfb': (8, 8),
'idea-cfb': (16, 8),
'rc2-cfb': (16, 8),
'rc4': (16, 0),
'seed-cfb': (16, 16),
}
class Crypto(object):
method = None
key = None
encrypt_table = decrypt_table = None
@classmethod
def init_table(cls, key, method):
cls.method = None if method == 'table' else method.lower()
cls.key = key
if cls.method:
try:
__import__('M2Crypto')
except ImportError:
logger.error(
'M2Crypto is required to use encryption other than default method')
sys.exit(1)
if method:
cls.encrypt_table, cls.decrypt_table = get_table(key)
else:
try:
# make an Encryptor to test if the settings if OK
Crypto()
except Exception as e:
logger.error(e)
sys.exit(1)
def __init__(self):
self.iv = None
self.iv_sent = False
self.cipher_iv = ''
self.decipher = None
self.cipher = None
self.set_cihper()
@lazy_property
def cipher_len(self):
return method_supported.get(self.method, None)
@property
def iv_len(self):
return len(self.cipher_iv)
def set_cihper(self):
if self.method:
self.cipher = self.get_cipher(iv=random_string(32), op=1)
def get_cipher(self, iv=None, op=0):
import M2Crypto.EVP
password = self.key.encode('utf-8')
method = self.method
m = self.cipher_len
if m:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
if iv is None:
iv = iv_[:m[1]]
if op == 1:
# this iv is for cipher, not decipher
self.cipher_iv = iv[:m[1]]
return M2Crypto.EVP.Cipher(method.replace('-', '_'),
key, iv, op, key_as_bytes=0, d='md5', salt=None, i=1, padding=1)
logger.error('method %s not supported' % method)
sys.exit(1)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.method is None:
return string.translate(buf, self.encrypt_table)
else:
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.method is None:
return string.translate(buf, self.decrypt_table)
else:
if self.decipher is None:
decipher_iv_len = self.cipher_len[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def setup_table(key, method='table'):
Crypto.init_table(key, method)
def new_crypto():
return Crypto()
|
Python
| 0.000036
|
@@ -1230,32 +1230,61 @@
, iv)%0A%0A%0A
-method_supported
+%0A%0Aclass Crypto(object):%0A%0A METHOD_SUPPORTED
= %7B%0A
@@ -1640,33 +1640,14 @@
6),%0A
-%7D%0A%0A%0Aclass Crypto(object):
+ %7D%0A
%0A
@@ -2673,24 +2673,29 @@
urn
-method_supported
+slef.METHOD_SUPPORTED
.get
|
dc03a20265c2fc611c7b2027e76d01a495ef2e7e
|
fix typo
|
examples/ssd/eval.py
|
examples/ssd/eval.py
|
from __future__ import division
import argparse
import sys
import time
import chainer
from chainer import iterators
from chainercv.datasets import VOCDetectionDataset
from chainercv.datasets import voc_detection_label_names
from chainercv.evaluations import eval_detection_voc
from chainercv.links import SSD300
from chainercv.links import SSD512
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
if args.model == 'ssd300':
model = SSD300(pretrained_model='voc0712')
elif args.model == 'ssd512':
model = SSD512(pretrained_model='voc0712')
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
model.use_preset('evaluate')
dataset = VOCDetectionDataset(
year='2007', split='test', use_difficult=True, return_difficult=True)
iterator = iterators.SerialIterator(
dataset, args.batchsize, repeat=False, shuffle=False)
start_time = time.time()
pred_bboxes = list()
pred_labels = list()
pred_scores = list()
gt_bboxes = list()
gt_labels = list()
gt_difficults = list()
while True:
try:
batch = next(iterator)
except StopIteration:
break
imgs, bboxes, labels, difficults = zip(*batch)
gt_bboxes.extend(bboxes)
gt_labels.extend(labels)
gt_difficults.extend(difficults)
bboxes, labels, scores = model.predict(imgs)
pred_bboxes.extend(bboxes)
pred_labels.extend(labels)
pred_scores.extend(scores)
fps = len(gt_bboxes) / (time.time() - start_time)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
len(gt_bboxes), len(dataset), fps))
sys.stdout.flush()
eval_ = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print()
print('mAP: {:f}'.format(eval_['map']))
for l, name in enumerate(voc_detection_label_names):
if l in eval_:
print('{:s}: {;f}'.format(name, eval_[l]))
else:
print('{:s}: -'.format(name))
if __name__ == '__main__':
main()
|
Python
| 0.999991
|
@@ -2301,9 +2301,9 @@
%7D: %7B
-;
+:
f%7D'.
|
a27f561ff24b41f215bdb3e33cdcdfcc4c43bf93
|
fix imports, crashes with TypeError, now
|
examples/testknn2.py
|
examples/testknn2.py
|
# -*- coding: utf-8 -*-
"""
SVM for Wind Power Prediction (SAES, global timeout)
================================
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Third Party
#from sklearn.neighbors import KNeighborsRegressor
from windml.datasets.nrel import NREL
from windml.mapping.power_mapping import PowerMapping
import math
from wdknn import KNN
# First Party
from metaopt.core.param.util import param
from metaopt.core.returns.util.decorator import maximize
from metaopt.core.returns.util.decorator import minimize
feature_window, horizon = 1, 3
train_step, test_step = 50,50 #only every n data points
park_id=NREL.park_id['lancaster']
windpark = NREL().get_windpark_nearest(park_id, 3, 2004, 2005)
target = windpark.get_target()
mapping = PowerMapping()
X = mapping.get_features_park(windpark, feature_window, horizon)
y = mapping.get_labels_turbine(target, feature_window, horizon)
train_to, test_to = int(math.floor(len(X) * 0.5)), len(X)
X_train=X[:train_to:train_step]
y_train=y[:train_to:train_step]
X_test=X[train_to:test_to:test_step]
y_test=y[train_to:test_to:test_step]
@minimize("Score")
#@param.float("C", interval=[1, 1000], step=1.0)
#@param.float("C_exp", interval=[0, 5], step=1)
#@param.float("gamma", interval=[0.0001, 1.0], step=0.00001)
#@param.float("gamma_exp", interval=[-5, 0], step=1)
@param.float("a",interval=[5,100.0],step=50)
@param.float("b",interval=[5,100.0],step=50)
@param.float("c",interval=[5,100.0],step=50)
@param.float("d",interval=[5,100.0],step=50)
def f(a,b,c,d):
clf = KNN(n_neighbors=5, weights=[a,b,c,d])
clf.fit(X_train, y_train)
return clf.score(X_test, y_test)
def main():
from metaopt.core.main import optimize
from metaopt.core.main import custom_optimize
from metaopt.optimizer.saes import SAESOptimizer
from metaopt.optimizer.gridsearch import GridSearchOptimizer
from metaopt.invoker.dualthread import DualThreadInvoker
from metaopt.invoker.pluggable import PluggableInvoker
from metaopt.plugins.print import PrintPlugin
from metaopt.plugins.visualize import VisualizeLandscapePlugin
from metaopt.plugins.visualize import VisualizeBestFitnessPlugin
timeout = 20
optimizer = SAESOptimizer(mu=5, lamb=5)
#optimizer = GridSearchOptimizer()
visualize_landscape_plugin = VisualizeLandscapePlugin()
visualize_best_fitness_plugin = VisualizeBestFitnessPlugin()
plugins = [
PrintPlugin(),
visualize_landscape_plugin,
visualize_best_fitness_plugin
]
#invoker = PluggableInvoker(invoker=DualThreadInvoker(),plugins=plugins)
#optimum = custom_optimize(invoker=invoker,f=f, timeout=timeout, optimizer=optimizer)
optimum = optimize(f=f, timeout=timeout, optimizer=optimizer,
plugins=plugins)
print("The optimal parameters are %s." % str(optimum))
# visualize_landscape_plugin.show_surface_plot()
# visualize_landscape_plugin.show_image_plot()
#
visualize_best_fitness_plugin.show_fitness_invocations_plot()
# visualize_best_fitness_plugin.show_fitness_time_plot()
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -420,23 +420,8 @@
KNN
-%0A%0A# First Party
%0Afro
@@ -439,137 +439,139 @@
ore.
-param.util import param%0Afrom metaopt.core.returns.util.decorator import maximize%0Afrom metaopt.core.returns.util.decorator
+returnspec.util.decorator import minimize%0Afrom metaopt.core.paramspec.util import param%0Afrom metaopt.core.optimize.optimize
import
mini
@@ -570,18 +570,33 @@
ort
-min
+opt
imize%0A%0A
+# First Party%0A%0A
feat
@@ -621,17 +621,16 @@
n = 1, 3
-
%0Atrain_s
@@ -1635,17 +1635,16 @@
,b,c,d%5D)
-
%0A clf
@@ -1721,101 +1721,8 @@
():%0A
- from metaopt.core.main import optimize%0A from metaopt.core.main import custom_optimize%0A
@@ -1770,16 +1770,17 @@
timizer%0A
+%0A
from
@@ -1792,72 +1792,17 @@
opt.
-optimizer.gridsearch import GridSearchOptimizer%0A%0A from metaop
+concurren
t.in
@@ -1860,16 +1860,27 @@
metaopt.
+concurrent.
invoker.
@@ -1941,23 +1941,29 @@
ugin
-s
.print
+.status
import
Prin
@@ -1958,16 +1958,22 @@
import
+Status
PrintPlu
@@ -1991,33 +1991,32 @@
m metaopt.plugin
-s
.visualize impor
@@ -2000,32 +2000,46 @@
.plugin.visualiz
+ation.landscap
e import Visuali
@@ -2079,17 +2079,16 @@
t.plugin
-s
.visuali
@@ -2088,17 +2088,34 @@
visualiz
-e
+ation.best_fitness
import
@@ -2158,17 +2158,16 @@
out = 20
-
%0A opt
@@ -2393,16 +2393,22 @@
+Status
PrintPlu
|
528b10713d98e2603fad62c3fb252464c08896f0
|
make '/mc_trks' absolute path to avoid confusion
|
examples/tonphdf5.py
|
examples/tonphdf5.py
|
#!/usr/bin/env python
"""
Converts hits in a Jpp-ROOT file to HDF5.
"""
from km3pipe.pumps.aanet import AanetPump
from km3pipe import Pipeline, Module
import sys
import pandas as pd
import h5py
if len(sys.argv) < 3:
sys.exit('Usage: {0} FILENAME.root OUTPUTFILENAME.h5'.format(sys.argv[0]))
FILEPATH = sys.argv[1]
OUTPUTFILEPATH = sys.argv[2]
class HDF5Sink(Module):
def __init__(self, **context):
super(self.__class__, self).__init__(**context)
self.filename = self.get('filename') or 'dump.h5'
self.hits = {}
self.mc_tracks = {}
self.index = 0
print("Processing {0}...".format(self.filename))
def process(self, blob):
try:
self._add_hits(blob['Hits'])
except KeyError:
print("No hits found. Skipping...")
try:
self._add_mc_tracks(blob['MCTracks'])
except KeyError:
print("No MC tracks found. Skipping...")
self.index += 1
return blob
def _add_hits(self, hits):
for hit in hits:
self.hits.setdefault('event_id', []).append(self.index)
self.hits.setdefault('id', []).append(hit.id)
self.hits.setdefault('pmt_id', []).append(hit.pmt_id)
self.hits.setdefault('time', []).append(hit.t)
self.hits.setdefault('tot', []).append(ord(hit.tot))
self.hits.setdefault('triggered', []).append(bool(hit.trig))
self.hits.setdefault('dom_id', []).append(hit.dom_id)
self.hits.setdefault('channel_id', []).append(ord(hit.channel_id))
def _add_mc_tracks(self, mc_tracks):
for mc_track in mc_tracks:
self.mc_tracks.setdefault('event_id', []).append(self.index)
self.mc_tracks.setdefault('id', []).append(mc_track.id)
self.mc_tracks.setdefault('x', []).append(mc_track.pos.x)
self.mc_tracks.setdefault('y', []).append(mc_track.pos.y)
self.mc_tracks.setdefault('z', []).append(mc_track.pos.z)
self.mc_tracks.setdefault('dx', []).append(mc_track.dir.x)
self.mc_tracks.setdefault('dy', []).append(mc_track.dir.y)
self.mc_tracks.setdefault('dz', []).append(mc_track.dir.z)
self.mc_tracks.setdefault('time', []).append(mc_track.t)
self.mc_tracks.setdefault('energy', []).append(mc_track.E)
self.mc_tracks.setdefault('type', []).append(mc_track.type)
def finish(self):
h5 = h5py.File(self.filename, 'w')
if self.hits:
df = pd.DataFrame(self.hits)
rec = df.to_records(index=False)
h5.create_dataset('/hits', data=rec)
print("Finished writing hits in {0}".format(self.filename))
if self.mc_tracks:
df = pd.DataFrame(self.mc_tracks)
rec = df.to_records(index=False)
h5.create_dataset('mc_tracks', data=rec)
print("Finished writing MC tracks in {0}".format(self.filename))
h5.close()
pipe = Pipeline()
pipe.attach(AanetPump, filename=FILEPATH)
pipe.attach(HDF5Sink, filename=OUTPUTFILEPATH)
pipe.drain()
|
Python
| 0.000004
|
@@ -2882,16 +2882,17 @@
ataset('
+/
mc_track
|
2c09c700b524a7272436feff19c4128ca3211725
|
Update word2vec.py
|
examples/word2vec.py
|
examples/word2vec.py
|
import copy
import gensim
import logging
import pyndri
import sys
logging.basicConfig(level=logging.INFO)
if len(sys.argv) <= 1:
logging.error('Usage: python %s <path-to-indri-index>'.format(sys.argv[0]))
sys.exit(0)
logging.info('Initializing word2vec.')
word2vec_init = gensim.models.Word2Vec(
size=300, # Embedding size
window=5, # One-sided window size
sg=True, # Skip-gram.
min_count=5, # Minimum word frequency.
sample=1e-3, # Sub-sample treshold.
hs=False, # Hierarchical softmax.
negative=10, # Number of negative examples.
iter=1, # Number of iterations.
workers=8, # Number of workers.
)
with pyndri.open(sys.argv[1]) as index:
logging.info('Loading vocabulary.')
dictionary = pyndri.extract_dictionary(index)
sentences = pyndri.compat.IndriSentences(index, dictionary)
logging.info('Constructing word2vec vocabulary.')
# Build vocab.
word2vec_init.build_vocab(sentences, trim_rule=None)
models = [word2vec_init]
for epoch in range(1, 5 + 1):
logging.info('Epoch %d', epoch)
model = copy.deepcopy(models[-1])
model.train(sentences)
models.append(model)
logging.info('Trained models: %s', models)
|
Python
| 0.000014
|
@@ -476,16 +476,17 @@
sample t
+h
reshold.
|
2ece6032b2344e3cf6304a757714d0ecf5015324
|
split drive controller to allow other pwm interface eg raspy juice
|
fishpi/vehicle/test_drive.py
|
fishpi/vehicle/test_drive.py
|
#!/usr/bin/python
#
# FishPi - An autonomous drop in the ocean
#
# Simple test of PWM motor and servo drive
#
import raspberrypi
from time import sleep
from drive_controller import DriveController
if __name__ == "__main__":
print "testing drive controller..."
drive = DriveController(debug=True, i2c_bus=raspberrypi.i2c_bus())
print "run full ahead for 5 sec..."
drive.set_throttle(1.0)
sleep(5)
print "run 50% ahead for 5 sec..."
drive.set_throttle(0.5)
sleep(5)
print "run 0% for 5 sec..."
drive.set_throttle(0.0)
sleep(5)
print "run 50% reverse for 5 sec"
drive.set_throttle(-0.5)
sleep(5)
print "run full reverse for 5 sec"
drive.set_throttle(-1.0)
sleep(5)
print "and back to neutral..."
drive.set_throttle(0.0)
sleep(5)
print "check out of bounds errors"
try:
drive.set_throttle(15.0)
except ValueError:
print "caught 15"
try:
drive.set_throttle(-10.0)
except ValueError:
print "caught -10"
# test steering
print "steer hard to port for 5 sec"
drive.set_heading(-0.785398)
sleep(5)
print "steer to port for 5 sec"
drive.set_heading(-0.3927)
sleep(5)
print "and back to neutral..."
drive.set_heading(0.0)
sleep(5)
print "steer to starboard for 5 sec"
drive.set_heading(0.3927)
sleep(5)
print "steer hard to starboard for 5 sec"
drive.set_heading(0.785398)
sleep(5)
print "and back to neutral..."
drive.set_heading(0.0)
sleep(5)
|
Python
| 0
|
@@ -177,16 +177,24 @@
import
+Adafruit
DriveCon
@@ -281,16 +281,24 @@
drive =
+Adafruit
DriveCon
|
3566c7689f59715f2d58886f52d3dd0c00a0ce4e
|
change manage.py
|
maili-develop/manage.py
|
maili-develop/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maili.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
Python
| 0.000002
|
@@ -64,16 +64,130 @@
ain__%22:%0A
+ if 'test' in sys.argv:%0A os.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22, %22test.settings%22)%0A else:%0A
os.e
|
49f05720e0ca80b800b72936a06ea8e3d5bc5bdb
|
use DerivO3CPU model instead of MinorCPU model
|
configs/fault_injector/injector_system.py
|
configs/fault_injector/injector_system.py
|
# import the m5 (gem5) library created when gem5 is built
import m5
# import all of the SimObjects
from m5.objects import *
from FaultParser import *
import argparse
#parse and save the arguments
parser = argparse.ArgumentParser(description='Gem5')
parser.add_argument('-fe', '--fault-enabled', dest='faultEnabled',
action='store_true',
help='It is true if the BPU is faulted')
parser.set_defaults(faultEnabled=False)
parser.add_argument('-b', '--benchmark', type=str, dest='benchmark',
help='Benchmark set on which to run the simulation')
parser.add_argument('-l', '--label', type=str, dest='label',
help='Fault name')
parser.add_argument('-sb', '--stuck-bit', type=int, dest='stuckBit',
help='Stuck bit to 1 or 0')
parser.add_argument('-f', '--field', type=int, dest='field',
help='Field where to inject the fault')
parser.add_argument('-e', '--entry', type=int, dest='entry',
help='Entry where to inject the fault')
parser.add_argument('-bp', '--bit-position', type=int, dest='bitPosition',
help='Bit position of the field where to inject the fault')
parser.add_argument('-tb', '--tick-begin', type=int, dest='tickBegin',
help='Inject fault at this tick')
parser.add_argument('-te', '--tick-end', type=int, dest='tickEnd',
help='Remove fault at this tick')
args = parser.parse_args()
# create the system we are going to simulate
system = System()
# Set the clock fequency of the system (and all of its children)
system.clk_domain = SrcClockDomain()
system.clk_domain.clock = '1GHz'
system.clk_domain.voltage_domain = VoltageDomain()
# Set up the system
system.mem_mode = 'timing' # Use timing accesses
system.mem_ranges = [AddrRange('512MB')] # Create an address range
# Create a simple CPU
system.cpu = MinorCPU()
# Create a memory bus, a coherent crossbar, in this case
system.membus = SystemXBar()
# Hook the CPU ports up to the membus
system.cpu.icache_port = system.membus.slave
system.cpu.dcache_port = system.membus.slave
# create the interrupt controller for the CPU and connect to the membus
system.cpu.createInterruptController()
# Create a DDR3 memory controller and connect it to the membus
system.mem_ctrl = DDR3_1600_x64()
system.mem_ctrl.range = system.mem_ranges[0]
system.mem_ctrl.port = system.membus.master
# Connect the system up to the membus
system.system_port = system.membus.slave
# Create a process for a simple "Hello World" application
process = LiveProcess()
# Set the command
process.cmd = args.benchmark.split()
# Set the cpu to use the process as its workload and create thread contexts
system.cpu.workload = process
system.cpu.createThreads()
# set up the root SimObject and start the simulation
root = Root(full_system = False, system = system)
system.cpu.branchPred = GShareBP();
#run all the simulation
if args.faultEnabled:
system.cpu.branchPred.faultEnabled = True
system.cpu.branchPred.faultLabel = args.label
system.cpu.branchPred.faultStuckBit = args.stuckBit
system.cpu.branchPred.faultField = args.field
system.cpu.branchPred.faultEntry = args.entry
system.cpu.branchPred.faultBitPosition = args.bitPosition
system.cpu.branchPred.faultPermanent = \
(args.tickBegin == 0 and args.tickEnd == -1)
system.cpu.branchPred.faultTickBegin = args.tickBegin
system.cpu.branchPred.faultTickEnd = args.tickEnd
else:
system.cpu.branchPred.faultEnabled = False
m5.instantiate()
print "Beginning simulation!"
exit_event = m5.simulate()
print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause())
|
Python
| 0
|
@@ -117,16 +117,46 @@
mport *%0A
+from m5.util import addToPath%0A
from Fau
@@ -190,16 +190,162 @@
gparse%0A%0A
+import os%0A%0A# Add configs/common to execution path in order to import Caches classes%0AaddToPath(os.path.join('..', 'common'))%0Afrom Caches import *%0A%0A
#parse a
@@ -2109,13 +2109,15 @@
u =
-Minor
+DerivO3
CPU(
@@ -2126,219 +2126,226 @@
%0A# C
-reate a memory bus, a coherent crossbar, in this case%0Asystem.membus = SystemXBar()%0A%0A# Hook the CPU ports up to the membus%0Asystem.cpu.icache_port = system.membus.slave%0Asystem.cpu.dcache_port = system.membus.slave
+aches%0Aicache = L1_ICache(size=%224MB%22)%0Adcache = L1_DCache(size=%224MB%22)%0A%0Asystem.cpu.addPrivateSplitL1Caches(icache, dcache, None, None)%0A%0A# Create a memory bus, a coherent crossbar, in this case%0Asystem.membus = SystemXBar()
%0A%0A#
@@ -2721,16 +2721,59 @@
.slave%0A%0A
+system.cpu.connectAllPorts(system.membus)%0A%0A
# Create
|
cf79339061669bace0c97ca3e3452b27b77ad8da
|
Fix region not being passed to JAAS
|
conjureup/controllers/bootstrap/common.py
|
conjureup/controllers/bootstrap/common.py
|
from pathlib import Path
from conjureup import events, juju
from conjureup.app_config import app
from conjureup.models.step import StepModel
from conjureup.telemetry import track_event
class BaseBootstrapController:
msg_cb = NotImplementedError()
def is_existing_controller(self):
controllers = juju.get_controllers()['controllers']
return app.provider.controller in controllers
async def run(self):
await app.provider.configure_tools()
if app.is_jaas or self.is_existing_controller():
await self.do_add_model()
else:
await self.do_bootstrap()
async def do_add_model(self):
self.emit('Creating Juju model.')
await juju.add_model(app.provider.model,
app.provider.controller,
app.provider.cloud,
app.provider.credential)
self.emit('Juju model created.')
events.Bootstrapped.set()
async def do_bootstrap(self):
await self.pre_bootstrap()
self.emit('Bootstrapping Juju controller.')
track_event("Juju Bootstrap", "Started", "")
cloud_with_region = app.provider.cloud
if app.provider.region:
cloud_with_region = '/'.join([app.provider.cloud,
app.provider.region])
success = await juju.bootstrap(app.provider.controller,
cloud_with_region,
app.provider.model,
credential=app.provider.credential)
if not success:
log_file = '{}-bootstrap.err'.format(app.provider.controller)
log_file = Path(app.config['spell-dir']) / log_file
err_log = log_file.read_text('utf8').splitlines()
app.log.error("Error bootstrapping controller: "
"{}".format(err_log))
app.sentry.context.merge({'extra': {'err_log': err_log[-400:]}})
raise Exception('Unable to bootstrap (cloud type: {})'.format(
app.provider.cloud_type))
self.emit('Bootstrap complete.')
track_event("Juju Bootstrap", "Done", "")
await juju.login() # login to the newly created (default) model
step = StepModel({},
filename='00_post-bootstrap',
name='post-bootstrap')
await step.run(self.msg_cb, 'Juju Post-Bootstrap')
events.Bootstrapped.set()
async def pre_bootstrap(self):
""" runs pre bootstrap script if exists
"""
step = StepModel({},
filename='00_pre-bootstrap',
name='pre-bootstrap')
await step.run(self.msg_cb)
def emit(self, msg):
app.log.info(msg)
self.msg_cb(msg)
|
Python
| 0
|
@@ -692,24 +692,229 @@
ju model.')%0A
+ cloud_with_region = app.provider.cloud%0A if app.provider.region:%0A cloud_with_region = '/'.join(%5Bapp.provider.cloud,%0A app.provider.region%5D)%0A
awai
@@ -942,32 +942,32 @@
provider.model,%0A
-
@@ -1033,34 +1033,33 @@
-app.provider.cloud
+cloud_with_region
,%0A
|
ed267933edf8b6e2e2b63d11ece7457943fe9646
|
Add documentation for powerline.lib.shell.run_cmd
|
powerline/lib/shell.py
|
powerline/lib/shell.py
|
# vim:fileencoding=utf-8:noet
from __future__ import absolute_import, unicode_literals, division, print_function
from subprocess import Popen, PIPE
from locale import getlocale, getdefaultlocale, LC_MESSAGES
def _get_shell_encoding():
return getlocale(LC_MESSAGES)[1] or getdefaultlocale()[1] or 'utf-8'
def run_cmd(pl, cmd, stdin=None):
try:
p = Popen(cmd, stdout=PIPE, stdin=PIPE)
except OSError as e:
pl.exception('Could not execute command ({0}): {1}', e, cmd)
return None
else:
stdout, err = p.communicate(stdin)
stdout = stdout.decode(_get_shell_encoding())
return stdout.strip()
def asrun(pl, ascript):
'''Run the given AppleScript and return the standard output and error.'''
return run_cmd(pl, ['osascript', '-'], ascript)
def readlines(cmd, cwd):
'''Run command and read its output, line by line
:param list cmd:
Command which will be run.
:param str cwd:
Working directory of the command which will be run.
'''
p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, cwd=cwd)
encoding = _get_shell_encoding()
p.stderr.close()
with p.stdout:
for line in p.stdout:
yield line[:-1].decode(encoding)
|
Python
| 0.000001
|
@@ -338,16 +338,313 @@
=None):%0A
+%09'''Run command and return its stdout, stripped%0A%0A%09If running command fails returns None and logs failure to %60%60pl%60%60 argument.%0A%0A%09:param PowerlineLogger pl:%0A%09%09Logger used to log failures.%0A%09:param list cmd:%0A%09%09Command which will be run.%0A%09:param str stdin:%0A%09%09String passed to command. May be None.%0A%09'''%0A
%09try:%0A%09%09
|
be8fd3f10dbfd8e2099a046340a8e51758e60bd5
|
Add signout view so we can signout (by entering url manually)
|
makerbase/views/auth.py
|
makerbase/views/auth.py
|
import json
from urllib import urlencode
from urlparse import parse_qs, urlsplit, urlunsplit
from flask import redirect, request, url_for
from flaskext.login import LoginManager, login_user
import requests
from makerbase import app
from makerbase.models import User
login_manager = LoginManager()
login_manager.setup_app(app, add_context_processor=True)
login_manager.user_loader(User.get)
@app.route('/signin/github')
def signin_github():
urlparts = urlsplit(request.base_url)
params = {
'client_id': app.config['GITHUB_CLIENT_ID'],
'redirect_url': urlunsplit((urlparts.scheme, urlparts.netloc, url_for('complete_github'), None, None)),
'scope': '',
}
redirect_url = 'https://github.com/login/oauth/authorize?%s' % urlencode(params)
return redirect(redirect_url)
@app.route('/complete/github')
def complete_github():
try:
code = request.args.get('code')
except KeyError:
raise # TODO
params = {
'client_id': app.config['GITHUB_CLIENT_ID'],
'client_secret': app.config['GITHUB_SECRET'],
'code': code,
}
token_resp = requests.post('https://github.com/login/oauth/access_token', data=params)
token_params = parse_qs(token_resp.content)
access_token = token_params['access_token']
user_resp = requests.get('https://api.github.com/user', data={'access_token': access_token})
github_user = json.loads(user_resp.content)
userid = u"github:%s" % github_user['login']
user = User.get(userid)
if user is None:
user = User(userid)
user.name = github_user['name']
user.avatar_url = github_user['avatar_url']
user.profile_url = github_user['html_url']
user.save()
login_user(user)
return redirect(url_for('home'))
|
Python
| 0
|
@@ -183,16 +183,29 @@
gin_user
+, logout_user
%0Aimport
@@ -818,24 +818,119 @@
rect_url)%0A%0A%0A
+@app.route('/signout')%0Adef signout():%0A logout_user()%0A return redirect(url_for('home'))%0A%0A%0A
@app.route('
|
df27f7ad62eebf29b42bfa9b2bce7d73739c4a8e
|
Fix minor template modernization bug
|
enhydris/conf/settings.py
|
enhydris/conf/settings.py
|
# Enhydris settings for {{ project_name }} project.
#
# Generated by 'enhydris-admin newinstance' using Enhydris {{ enhydris_version }}
# and Django {{ django_version }}.
#
# For more information on this file, see
# http://enhydris.readthedocs.org/en/{{ enhydris_docs_version }}/general/install.html#settings-reference
from enhydris.settings.base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# SECURITY_WARNING: Keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# List the domains through which this instance will be accessible. Enhydris
# will refuse to serve other domains.
ALLOWED_HOSTS = ['example.com']
# Administrators who will be notified whenever attention is needed.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Whenever the system emails users, this is the address from which the emails
# will appear to have been sent.
DEFAULT_FROM_EMAIL = 'noreply@example.com'
# Whenever the system emails administrators, this is the address from which the
# emails will appear to have been sent.
SERVER_EMAIL = DEFAULT_FROM_EMAIL
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'enhydris_db',
'USER': 'enhydris_user',
'PASSWORD': 'topsecret',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Default system timezone, such as 'Europe/Athens'.
TIME_ZONE = 'UTC'
# You should probably leave this as it is. See the installation instructions if
# in doubt.
SITE_ID = 1
# Where static files will be stored. By "static" files we mean css, javascript,
# and images of the skin. These files will be copied there at installation
# time when executing "python manage.py collectstatic".
STATIC_ROOT = '/var/cache/enhydris/static/'
# Where media files will be stored. "media" files are static files uploaded
# by users, e.g. images and videos of stations. The web server must be
# configured to map the URL /enhydris-media/ to that directory. See the
# installation instructions for more information.
MEDIA_ROOT = '/var/lib/enhydris/media/'
# The web server must be configured to map the URLs below to the directories
# specified by STATIC_ROOT and MEDIA_ROOT above. Enhydris will use the settings
# below to create appropriate links.
STATIC_URL = '/enhydris-static/'
MEDIA_URL = '/enhydris-media/'
# Mail server settings; used whenever the system needs to email users or
# admins.
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.example.com'
EMAIL_HOST_USER = 'emailuser'
EMAIL_HOST_PASSWORD = 'topsecret'
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# The above are only the settings that are absolutely essential. Check the
# installation instructions for more settings that you can set.
|
Python
| 0
|
@@ -433,31 +433,8 @@
True
-%0ATEMPLATE_DEBUG = DEBUG
%0A%0A#
|
4464919c5114193179490c151844fd771bfd880b
|
fix setup without flask installed
|
flask_ecstatic.py
|
flask_ecstatic.py
|
"""Serves static files with optional directory index.
Files in static folder are automatically served on static URL by Flask.
See http://flask.pocoo.org/docs/0.10/api/#application-object.
It's recommended to specify static folder and URL path directly on Flask application object,
unless you need additional static folders, or have multiple route handlers for the URL path,
e.g. when serving static files on root URL ('') for any path unmatched with previous routes.
"""
__all__ = 'add'.split()
__version__ = '0.1.1'
import os
from flask import send_from_directory
def add(app, url = None, path = None, endpoint=None, index='index.html'):
"""Adds static files endpoint with optional directory index."""
url = url or app.static_url_path or ''
path = os.path.abspath(path or app.static_folder or '.')
endpoint = endpoint or 'static_' + os.path.basename(path)
if path == app.static_folder:
if url != app.static_url_path:
raise ValueError('Files in `{}` path are automatically served on `{}` URL by Flask.'
' Use different path for serving them at `{}` URL'.format(path, app.static_url_path, url))
else:
@app.route(url + '/<path:filename>', endpoint = endpoint)
def static_files(filename):
return send_from_directory(path, filename)
if index:
@app.route(url + '/', endpoint = endpoint + '_index')
def static_index():
return send_from_directory(path, index)
if url:
@app.route(url, endpoint = endpoint + '_index_bare')
def static_index_bare():
return send_from_directory(path, index)
|
Python
| 0.000001
|
@@ -527,46 +527,8 @@
t os
-%0Afrom flask import send_from_directory
%0A%0Ade
@@ -596,16 +596,42 @@
ex.html'
+, send_from_directory=None
):%0A %22
@@ -693,16 +693,96 @@
dex.%22%22%22%0A
+%0A if not send_from_directory:%0A from flask import send_from_directory%0A%0A
url
|
bbb0325ab44f57cbc728f3eae16eaec887e87517
|
Add track_opens parameter to Message class
|
flask_pystmark.py
|
flask_pystmark.py
|
from flask import current_app
from pystmark import (send, send_batch, get_delivery_stats, get_bounces,
get_bounce, get_bounce_dump, get_bounce_tags,
activate_bounce, Message as _Message)
from __about__ import __version__, __title__, __description__
__all__ = ['__version__', '__title__', '__description__', 'Pystmark',
'Message']
class Pystmark(object):
''' A wrapper around the Simple API of pystmark.
Refer to http://pystmark.readthedocs.org/en/latest/api.html#simple-api for
more details.
:param app: Flask app to initialize with. Defaults to `None`
'''
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
''' Initialize Pystmark with a Flask app '''
app.pystmark = self
def send(self, message, **request_args):
'''Send a message.
:param message: Message to send.
:type message: `dict` or :class:`Message`
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`pystmark.SendResponse`
'''
return self._pystmark_call(send, message, **request_args)
def send_batch(self, messages, **request_args):
'''Send a batch of messages.
:param messages: Messages to send.
:type message: A list of `dict` or :class:`Message`
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`pystmark.BatchSendResponse`
'''
return self._pystmark_call(send_batch, messages, **request_args)
def get_delivery_stats(self, **request_args):
'''Get delivery stats for your Postmark account.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`pystmark.DeliveryStatsResponse`
'''
return self._pystmark_call(get_delivery_stats, **request_args)
def get_bounces(self, **request_args):
'''Get a paginated list of bounces.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`pystmark.BouncesResponse`
'''
return self._pystmark_call(get_bounces, **request_args)
def get_bounce_tags(self, **request_args):
'''Get a list of tags for bounces associated with your Postmark server.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`pystmark.BounceTagsResponse`
'''
return self._pystmark_call(get_bounce_tags, **request_args)
def get_bounce(self, bounce_id, **request_args):
'''Get a single bounce.
:param bounce_id: The bounce's id. Get the id with :func:`get_bounces`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`pystmark.BounceResponse`
'''
return self._pystmark_call(get_bounce, bounce_id, **request_args)
def get_bounce_dump(self, bounce_id, **request_args):
'''Get the raw email dump for a single bounce.
:param bounce_id: The bounce's id. Get the id with :func:`get_bounces`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`pystmark.BounceDumpResponse`
'''
return self._pystmark_call(get_bounce_dump, bounce_id, **request_args)
def activate_bounce(self, bounce_id, **request_args):
'''Activate a deactivated bounce.
:param bounce_id: The bounce's id. Get the id with :func:`get_bounces`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`pystmark.BounceActivateResponse`
'''
return self._pystmark_call(activate_bounce, bounce_id, **request_args)
def _pystmark_call(self, method, *args, **kwargs):
''' Wraps a call to the pystmark Simple API, adding configured
settings
'''
kwargs = self._apply_config(**kwargs)
return method(*args, **kwargs)
@staticmethod
def _apply_config(**kwargs):
'''Adds the current_app's pystmark configuration to a dict. If a
configuration value has been specified in \*\*kwargs, it will not
be overriden by the app's configuration.
:param kwargs: Keyword arguments to be passed to the pystmark Simple
API
'''
kwargs = dict(**kwargs)
kwargs.setdefault('api_key', current_app.config['PYSTMARK_API_KEY'])
kwargs.setdefault('secure', current_app.config.get('PYSTMARK_HTTPS',
True))
kwargs.setdefault('test', current_app.config.get('PYSTMARK_TEST_API',
False))
return kwargs
class Message(_Message):
''' A container for message(s) to send to the Postmark API.
You can populate this message with defaults for initializing an
:class:`Interface` from the pystmark library. The message will be combined
with the final message and verified before transmission.
Refer to http://pystmark.readthedocs.org/en/latest/api.html#message-object
for more details.
:param sender: Email address of the sender. Defaults to
PYSTMARK_DEFAULT_SENDER if defined.
:param to: Destination email address.
:param cc: A list of cc'd email addresses.
:param bcc: A list of bcc'd email address.
:param subject: The message subject.
:param tag: Tag your emails with this.
:param html: HTML body content.
:param text: Text body content.
:param reply_to: Email address to reply to. Defaults to
PYSTMARK_DEFAULT_REPLY_TO, if defined.
:param headers: Additional headers to include with the email. If you do
not have the headers formatted for the Postmark API, use
:meth:`Message.add_header`. Defaults to PYSTMARK_DEFAULT_HEADERS, if
defined.
:type headers: A list of `dict`, each with the keys 'Name' and
'Value'.
:param attachments: Attachments to include with the email. If you do not
have the attachments formatted for the Postmark API, use
:meth:`Message.attach_file` or :meth:`Message.attach_binary`.
:type attachments: A list of `dict`, each with the keys 'Name',
'Content' and 'ContentType'.
:param verify: Verify the message when initialized.
Defaults to PYSTMARK_VERIFY_MESSAGES if provided, otherwise `False`.
'''
def __init__(self, sender=None, to=None, cc=None, bcc=None, subject=None,
tag=None, html=None, text=None, reply_to=None, headers=None,
attachments=None, verify=None):
if sender is None:
sender = current_app.config.get('PYSTMARK_DEFAULT_SENDER')
if reply_to is None:
reply_to = current_app.config.get('PYSTMARK_DEFAULT_REPLY_TO')
if headers is None:
headers = current_app.config.get('PYSTMARK_DEFAULT_HEADERS')
if verify is None:
verify = current_app.config.get('PYSTMARK_VERIFY_MESSAGES', False)
super(Message, self).__init__(sender=sender, to=to, cc=cc, bcc=bcc,
subject=subject, tag=tag, html=html,
text=text, reply_to=reply_to,
headers=headers, attachments=attachments,
verify=verify)
|
Python
| 0
|
@@ -6846,24 +6846,42 @@
verify=None
+, track_opens=None
):%0A i
@@ -7630,10 +7630,35 @@
y=verify
+, track_opens=track_opens
)%0A
|
b0d531b744f17d1204c2eb3a036b35f12d70fc1f
|
Update corpus save function definition
|
src/project/corpus.py
|
src/project/corpus.py
|
import sys
import copy
from os import listdir
from os.path import isdir, isfile, join
from gensim import models
from gensim.interfaces import TransformationABC
from gensim.corpora import Dictionary, MmCorpus, TextCorpus
class Corpus(object):
"""Wrapper class around Corpus streaming"""
def __init__(self, dir=None):
if dir:
docs = [join(dir, doc) for doc in listdir(dir) if isfile(join(dir, doc))]
""" Construct dictionary without having all texts in memory, based off the example in the Gensim docs"""
dictionary = Dictionary(open(doc).read().lower().split() for doc in docs)
once_words = [id for id, freq in dictionary.dfs.iteritems() if freq is 1]
dictionary.filter_tokens(once_words) # Exclude if appears once
dictionary.compactify() # Remove gaps in ids left by removing words
self.dictionary = dictionary
self.docs = PaperCorpus(docs)
else:
self.dictionary = Dictionary([])
self.docs = PaperCorpus([])
self.transformation = IdentityTransformation()
return
def __iter__(self):
# Apply transformation to corpus if it exists
docs = self.transformation[self.docs]
if type(self.docs) is PaperCorpus:
# Need to convert to a vector representation if still in plain text
for doc in self.docs.get_texts():
yield self.dictionary.doc2bow(doc)
else:
for doc in docs:
yield doc
def save(self, file):
# TODO: Investigate saving to another file format, more memory efficient?
corpus = [vector for vector in self]
MmCorpus.serialize(file, corpus)
Dictionary.save(self.dictionary, file+".dict")
def load(self, dictionary, corpus):
if isfile(dictionary) and isfile(corpus):
self.dictionary = Dictionary.load(dictionary)
self.docs = MmCorpus(corpus)
return True
return False
def __len__(self):
return len(self.docs)
def transform_corpus(self, transformation):
"""
Function to transform one corpus representation into another. Applying Transformations can be can be costly
as they are done on the fly. Save to disk first if access will be frequent
transformation: Transformation to be applied to the corpus
returns: Corpus object with transformation applied
"""
docs = self.transformation[self.docs]
transformed_model = transformation(docs)
new_corpus = Corpus()
new_corpus.dictionary = copy.copy(self.dictionary)
new_corpus.docs = copy.copy(docs)
new_corpus.transformation = transformed_model
return new_corpus
class PaperCorpus(TextCorpus):
# Wrap plain text document streaming - allows us to apply transformations to it
def get_texts(self):
for doc in self.input:
handle = open(doc, "r")
yield handle.read().lower().split()
class IdentityTransformation(TransformationABC):
# Identity transformation which returns the input corpus
def __getitem__(self, vec):
return vec
def corpus_equal(corpus1, corpus2):
if len(corpus1) == len(corpus2):
for doc1, doc2 in zip(corpus1, corpus2):
if doc1 != doc2:
return False
return True
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]):
load_corpus = Corpus()
corpus = Corpus(sys.argv[1])
# TODO: Write proper tests
# Tests if applying a transformation to a non-saved corpus results in a new representation
tfid_corpus = corpus.transform_corpus(models.TfidfModel)
print "Test 1"
# TODO: Fails possibly caused by the corpus not being in a vector representation? Investigate!
if corpus_equal(corpus, tfid_corpus):
print "tfid corpus is equal to corpus that hasn't been saved"
else:
print "tfid corpus is not equal to corpus that hasn't been saved"
corpus.save(sys.argv[2])
load_corpus.load(sys.argv[2]+".dict", sys.argv[2])
tfid_corpus_load = load_corpus.transform_corpus(models.TfidfModel)
# Tests if applying a transformation to a saved corpus results in a new representation
print "Test 2"
if corpus_equal(load_corpus, tfid_corpus_load):
print "tfid corpus is equal to corpus that has been saved"
else:
print "tfid corpus is not equal to corpus that has been saved"
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
Python
| 0
|
@@ -1570,16 +1570,28 @@
ve(self,
+ dictionary,
file):%0A
@@ -1729,94 +1729,92 @@
-MmCorpus.serialize(file, corpus)%0A Dictionary.save(self.dictionary, file+%22.dict%22
+Dictionary.save(self.dictionary, dictionary)%0A MmCorpus.serialize(file, corpus
)%0A%0A
@@ -3440,16 +3440,17 @@
n True%0A%0A
+%0A
def main
@@ -3524,16 +3524,40 @@
argv%5B2%5D)
+ and isfile(sys.argv%5B3%5D)
:%0A
@@ -4180,32 +4180,45 @@
save(sys.argv%5B2%5D
+, sys.argv%5B3%5D
)%0A load_c
@@ -4243,16 +4243,8 @@
v%5B2%5D
-+%22.dict%22
, sy
@@ -4250,17 +4250,17 @@
ys.argv%5B
-2
+3
%5D)%0A
|
b83e371f37477b5eaf552ac78383e3f0ac94bc21
|
Change in presseurop RSS feed
|
modules/presseurop/backend.py
|
modules/presseurop/backend.py
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
"backend for http://www.presseurop.eu"
from weboob.capabilities.messages import ICapMessages, Thread
from weboob.tools.capabilities.messages.GenericBackend import GenericNewspaperBackend
from weboob.tools.backend import BackendConfig
from weboob.tools.value import Value
from .browser import NewspaperPresseuropBrowser
from .tools import rssid, url2id
from weboob.tools.newsfeed import Newsfeed
class NewspaperPresseuropBackend(GenericNewspaperBackend, ICapMessages):
MAINTAINER = 'Florent Fourcot'
EMAIL = 'weboob@flo.fourcot.fr'
VERSION = '0.d'
LICENSE = 'AGPLv3+'
STORAGE = {'seen': {}}
NAME = 'presseurop'
DESCRIPTION = u'Presseurop website'
BROWSER = NewspaperPresseuropBrowser
RSSID = staticmethod(rssid)
URL2ID = staticmethod(url2id)
RSSSIZE = 50
CONFIG = BackendConfig(Value('lang', label='Lang of articles',
choices={'fr': 'fr', 'de': 'de', 'en': 'en', 'cs': 'cs', 'es': 'es', 'it': 'it', 'nl': 'nl', 'pl': 'pl', 'pt': 'pt', 'ro': 'ro'}, default='fr'))
def __init__(self, *args, **kwargs):
GenericNewspaperBackend.__init__(self, *args, **kwargs)
self.RSS_FEED = 'http://www.presseurop.eu/%s/rss.xml' % (self.config['lang'].get())
def iter_threads(self):
for article in Newsfeed(self.RSS_FEED, self.RSSID).iter_entries():
thread = Thread(article.link)
thread.title = article.title
thread.date = article.datetime
yield(thread)
|
Python
| 0
|
@@ -1526,9 +1526,10 @@
E =
-5
+14
0%0A
|
a3ae8a7ece6bc75437b9848cf43335690250c128
|
exit http server gracefully
|
presstatic/__main__.py
|
presstatic/__main__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import SimpleHTTPServer
import SocketServer
from clint.textui import colored, puts, indent
from presstatic import help
from presstatic.builders import SiteBuilder
from presstatic.storage import s3
def http_server_on_dir(host, port, dir):
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
os.chdir(dir)
httpd = SocketServer.TCPServer((host, int(port)), Handler)
with indent(4, quote='>>'):
puts(colored.green("Serving {path}".format(path=dir)))
puts(colored.yellow("@ {host}:{port} ".format(host=host, port=port)))
httpd.serve_forever()
def main():
cli_parser = argparse.ArgumentParser(prog='presstatic')
cli_parser.add_argument('-output',
help="relative directory for the generated files.",
default='public')
cli_parser.add_argument('-http',
metavar='HOST:PORT',
help="creates an HTTP Server with <directory> as root dir.")
cli_parser.add_argument('-s3',
help="deploy on the specified S3 bucket.",
metavar='bucket')
cli_parser.add_argument('directory',
help='directory containing the static website.')
cli_args = cli_parser.parse_args()
site_builder = SiteBuilder(cli_args.directory, output=cli_args.output)
site_builder.build()
if cli_args.http:
host, port = cli_args.http.split(':')
root_dir = os.path.join(cli_args.directory, cli_args.output)
http_server_on_dir(host, port, root_dir)
elif cli_args.s3:
s3.S3Storage(cli_args.s3).store(site_builder.output_path)
puts(help.s3_setup(bucket=cli_args.s3))
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -274,39 +274,32 @@
%0Adef http_server
-_on_dir
(host, port, dir
@@ -608,24 +608,38 @@
ort=port)))%0A
+%0A try:%0A
httpd.se
@@ -652,16 +652,84 @@
rever()%0A
+ except KeyboardInterrupt:%0A pass%0A httpd.server_close()%0A
%0A%0Adef ma
@@ -1706,15 +1706,8 @@
rver
-_on_dir
(hos
@@ -1721,24 +1721,25 @@
, root_dir)%0A
+%0A
elif cli
|
c272b597a7ec2a1a5596daecce144001ae8a7d99
|
fix simulate device
|
fluxghost/websocket/touch.py
|
fluxghost/websocket/touch.py
|
from uuid import UUID
import logging
import json
from fluxclient.upnp.task import UpnpTask
from .base import WebSocketBase
logger = logging.getLogger("WS.DISCOVER")
class WebsocketTouch(WebSocketBase):
def __init__(self, *args):
WebSocketBase.__init__(self, *args)
def on_text_message(self, message):
try:
payload = json.loads(message)
uuid = UUID(hex=payload["uuid"])
password = payload.get("password")
self.touch_device(uuid, password)
except Exception:
logger.exception("Touch error")
self.close()
def _run_auth(self, task, password=None):
ttl = 3
while True:
try:
if password:
return task.auth_with_password(password)
else:
return task.auth_without_password()
except RuntimeError as e:
if e.args[0] == "TIMEOUT" and ttl > 0:
logger.warn("Remote no response, retry")
ttl -= 1
else:
raise
def touch_device(self, uuid, password=None):
try:
if uuid.hex == "0" * 32:
self.send_text(json.dumps({
"serial": "SIMULATE00",
"name": "Simulate Device",
"has_response": True,
"reachable": True,
"auth": True
}))
task = UpnpTask(uuid, lookup_timeout=30.0)
resp = self._run_auth(task, password)
self.send_text(json.dumps({
"uuid": uuid.hex,
"serial": task.serial,
"name": task.name,
"has_response": resp is not None,
"reachable": True,
"auth": resp and resp.get("status") == "ok"
}))
except RuntimeError as err:
logger.debug("Error: %s" % err)
self.send_text(json.dumps({
"uuid": uuid.hex,
"has_response": False,
"reachable": False,
"auth": False
}))
|
Python
| 0.000002
|
@@ -1256,32 +1256,36 @@
+
%22serial%22: %22SIMUL
@@ -1292,16 +1292,20 @@
ATE00%22,%0A
+
@@ -1339,16 +1339,20 @@
evice%22,%0A
+
@@ -1389,32 +1389,36 @@
+
%22reachable%22: Tru
@@ -1428,32 +1428,36 @@
+
%22auth%22: True%0A
@@ -1453,16 +1453,20 @@
%22: True%0A
+
@@ -1464,32 +1464,55 @@
%7D))
+%0A return
%0A%0A ta
|
4bc9217f15ee394332ab54efdb96ded056825c2b
|
Add todo to handle shuffling of tracks.
|
mopidy_pandora/doubleclick.py
|
mopidy_pandora/doubleclick.py
|
import logging
import time
from mopidy.internal import encoding
from pandora.errors import PandoraException
from mopidy_pandora.library import PandoraUri
logger = logging.getLogger(__name__)
class DoubleClickHandler(object):
def __init__(self, config, client):
self.on_pause_resume_click = config["on_pause_resume_click"]
self.on_pause_next_click = config["on_pause_next_click"]
self.on_pause_previous_click = config["on_pause_previous_click"]
self.double_click_interval = config['double_click_interval']
self.client = client
self._click_time = 0
def set_click_time(self, click_time=None):
if click_time is None:
self._click_time = time.time()
else:
self._click_time = click_time
def get_click_time(self):
return self._click_time
def is_double_click(self):
double_clicked = self._click_time > 0 and time.time() - self._click_time < float(self.double_click_interval)
if double_clicked is False:
self._click_time = 0
return double_clicked
def on_change_track(self, active_track_uri, new_track_uri):
from mopidy_pandora.uri import PandoraUri
if not self.is_double_click():
return False
if active_track_uri is not None:
new_track_index = int(PandoraUri.parse(new_track_uri).index)
active_track_index = int(PandoraUri.parse(active_track_uri).index)
if new_track_index > active_track_index or new_track_index == 0 and active_track_index == 2:
return self.process_click(self.on_pause_next_click, active_track_uri)
elif new_track_index < active_track_index or new_track_index == active_track_index:
return self.process_click(self.on_pause_previous_click, active_track_uri)
return False
def on_resume_click(self, track_uri, time_position):
if not self.is_double_click() or time_position == 0:
return False
return self.process_click(self.on_pause_resume_click, track_uri)
def process_click(self, method, track_uri):
self.set_click_time(0)
uri = PandoraUri.parse(track_uri)
logger.info("Triggering event '%s' for song: %s", method, uri.name)
func = getattr(self, method)
try:
func(uri.token)
except PandoraException as e:
logger.error('Error calling event: %s', encoding.locale_decode(e))
return False
return True
def thumbs_up(self, track_token):
return self.client.add_feedback(track_token, True)
def thumbs_down(self, track_token):
return self.client.add_feedback(track_token, False)
def sleep(self, track_token):
return self.client.sleep_song(track_token)
def add_artist_bookmark(self, track_token):
return self.client.add_artist_bookmark(track_token)
def add_song_bookmark(self, track_token):
return self.client.add_song_bookmark(track_token)
|
Python
| 0
|
@@ -1467,16 +1467,233 @@
index)%0A%0A
+ # TODO: the order of the tracks will no longer be sequential if the user has 'shuffled' the tracklist%0A # Need to find a better approach for determining whether 'next' or 'previous' was clicked.%0A
|
d4f083d0cc1096152d7d5cd13c63e82f1a58a298
|
handle testrunner exits as PR failures
|
spawner.py
|
spawner.py
|
#!/usr/bin/env python3
# Parse the YAML file, start the testrunners in parallel,
# and wait for them.
import os
import sys
import traceback
import subprocess
import utils.parser as parser
import utils.ghupdate as ghupdate
def main():
"Main entry point."
try:
n = parse_suites()
except SyntaxError as e:
# print the error to give feedback, but exit nicely
traceback.print_exc()
msg = e.msg
if e.__cause__ is not None:
msg += ": " + e.__cause__.msg
update_gh('error', msg)
else:
spawn_testrunners(n)
count_failures(n)
def parse_suites():
yml_file = os.path.join('checkouts',
os.environ['github_repo'],
'.redhat-ci.yml')
# this should have been checked already
assert os.path.isfile(yml_file)
for idx, suite in enumerate(parser.load_suites(yml_file)):
suite_dir = 'state/suite-%d/parsed' % idx
parser.flush_suite(suite, suite_dir)
# return the number of testsuites
return idx + 1
def spawn_testrunners(n):
testrunner = os.path.join(sys.path[0], "testrunner")
runners = []
for i in range(n):
p = subprocess.Popen([testrunner, str(i)])
runners.append(p)
# We don't implement any fail fast here, so just do a
# naive wait to collect them all.
failed = False
for runner in runners:
if runner.wait() != 0:
failed = True
# NB: When we say 'failed' here, we're talking about
# infrastructure failure. Bad PR code should never cause
# rc != 0.
if failed:
raise Exception("at least one runner failed")
def count_failures(n):
# It's helpful to have an easy global way to figure out
# if any of the suites failed, e.g. for integration in
# Jenkins. Let's write a 'failures' file counting the
# number of failed suites.
failed = 0
for i in range(n):
with open("state/suite-%d/rc" % i) as f:
if int(f.read().strip()) != 0:
failed += 1
with open("state/failures", "w") as f:
f.write("%d" % failed)
def update_gh(state, description):
args = {'repo': os.environ['github_repo'],
'commit': os.environ['github_commit'],
'token': os.environ['github_token'],
'state': state,
'context': 'Red Hat CI',
'description': description}
ghupdate.send(**args)
if os.path.isfile('state/is_merge_sha'):
with open('state/sha') as f:
args['commit'] = f.read()
ghupdate.send(**args)
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0
|
@@ -1942,32 +1942,336 @@
i in range(n):%0A
+ # If the rc file doesn't exist but the runner exited%0A # nicely, then it means there was a semantic error%0A # in the YAML (e.g. bad Docker image, bad ostree%0A # revision, etc...).%0A if not os.path.isfile(%22state/suite-%25d/rc%22 %25 i):%0A failed += 1%0A else:%0A
with ope
@@ -2315,16 +2315,20 @@
+
if int(f
@@ -2342,32 +2342,36 @@
.strip()) != 0:%0A
+
|
aaa7aad003f58e8f3ea809c5e7a3be860f68b8ea
|
Clean up `cache` module
|
gapipy/cache.py
|
gapipy/cache.py
|
import collections
from time import time
try:
import cPickle as pickle
except ImportError:
import pickle
def update(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def _items(mappingorseq):
"""Wrapper for efficient iteraiton over mappings represeted by dicts or
sequeunces::
>>> for k, v in _items([i, i*i] for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items({i: i*i} for i in xrange(5)):
... assert k*k == v
"""
return mappingorseq.iteritems() if hasattr(mappingorseq, 'iteritems') \
else mappingorseq
class BaseCache(object):
"""Base class for the cache system. All the cache systems will implemtent
this API or a superset of it.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`set`.
"""
def __init__(self, default_timeout=300, **kwargs):
self.default_timeout = default_timeout
def get(self, key):
return None
def set(self, key, value):
pass
def delete(self, key):
pass
def clear(self):
pass
def count(self):
raise NotImplementedError
def is_cached(self, key):
return False
class NullCache(BaseCache):
"""
A cache that doesn't cache.
"""
class SimpleCache(BaseCache):
"""Simply memory cache for single process environments. This class exists
mainly for a development server and is not 100% thread safe.
:param threshold: the maximum number of items the cache stores before
it starts evicting keys.
"""
def __init__(self, threshold=500, default_timeout=300, **kwargs):
BaseCache.__init__(self, default_timeout)
self._cache = {}
self._threshold = threshold
def _prune(self):
if len(self._cache) > self._threshold:
now = time()
# Prune expired keys, or every few keys.
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
if expires <= now or idx % 3 == 0:
self._cache.pop(key, None)
def get(self, key):
expires, value = self._cache.get(key, (0, None))
if expires > time():
return pickle.loads(value)
def set(self, key, data_dict, timeout=None):
if timeout is None:
timeout = self.default_timeout
self._prune()
self._cache[key] = (time() + timeout, pickle.dumps(data_dict,
pickle.HIGHEST_PROTOCOL))
def delete(self, key):
return self._cache.pop(key, None)
def clear(self):
self._cache.clear()
def count(self):
return len(self._cache)
def is_cached(self, key):
return key in self._cache
class RedisCache(BaseCache):
"""Uses the Redis key-value store as a cache backend.
"""
_connection_pool_cache = {}
def __init__(self, host='localhost', port=6379, password=None,
db=0, default_timeout=300, key_prefix=None, **kwargs):
BaseCache.__init__(self, default_timeout)
self.key_prefix = key_prefix or ''
self._client = self._get_client(host, port, password, db)
@classmethod
def _get_client(cls, host, port, password, db):
"""
Retrieves a connection pool from a class-local cache (or creates it if
necessary), returns a Redis client instance that uses that pool.
"""
try:
import redis
except ImportError:
raise RuntimeError('no redis module found')
credentials = (host, port, password, db)
pool = cls._connection_pool_cache.get(
credentials,
redis.ConnectionPool(host=host, port=port, password=password, db=db))
cls._connection_pool_cache[credentials] = pool
return redis.Redis(connection_pool=pool)
def load_object(self, value):
"""The reversal of `dump_object`. This might be called with None.
"""
if value is None:
return None
return pickle.loads(value)
def dump_object(self, value):
return pickle.dumps(value)
def get(self, key):
return self.load_object(self._client.get(self.key_prefix + key))
def set(self, key, data_dict, timeout=None):
if timeout is None:
timeout = self.default_timeout
data = self.dump_object(data_dict)
return self._client.setex(self.key_prefix + key, data, timeout)
def delete(self, key):
return self._client.delete(self.key_prefix + key)
def clear(self):
cache_keys = self._client.keys('{}*'.format(self.key_prefix))
map(self._client.delete, cache_keys)
def info(self):
return self._client.info()
def is_cached(self, key):
return self._client.exists(self.key_prefix + key)
|
Python
| 0.000009
|
@@ -1,23 +1,4 @@
-import collections%0A
from
@@ -95,637 +95,8 @@
e%0A%0A%0A
-def update(d, u):%0A for k, v in u.iteritems():%0A if isinstance(v, collections.Mapping):%0A r = update(d.get(k, %7B%7D), v)%0A d%5Bk%5D = r%0A else:%0A d%5Bk%5D = u%5Bk%5D%0A return d%0A%0A%0Adef _items(mappingorseq):%0A %22%22%22Wrapper for efficient iteraiton over mappings represeted by dicts or%0A sequeunces::%0A%0A %3E%3E%3E for k, v in _items(%5Bi, i*i%5D for i in xrange(5)):%0A ... assert k*k == v%0A%0A %3E%3E%3E for k, v in _items(%7Bi: i*i%7D for i in xrange(5)):%0A ... assert k*k == v%0A %22%22%22%0A return mappingorseq.iteritems() if hasattr(mappingorseq, 'iteritems') %5C%0A else mappingorseq%0A%0A%0A
clas
@@ -189,17 +189,16 @@
l implem
-t
ent%0A
@@ -1201,33 +1201,48 @@
s):%0A
-BaseCache
+super(SimpleCache, self)
.__init__(se
@@ -1231,38 +1231,32 @@
self).__init__(
-self,
default_timeout)
@@ -1246,32 +1246,42 @@
(default_timeout
+, **kwargs
)%0A self._
@@ -1300,25 +1300,24 @@
self.
-_
threshold =
@@ -1384,17 +1384,16 @@
%3E self.
-_
threshol
@@ -2391,25 +2391,21 @@
backend.
-%0A
%22%22%22%0A
+%0A
_con
@@ -2551,20 +2551,18 @@
_prefix=
-None
+''
, **kwar
@@ -2574,25 +2574,39 @@
-BaseCache
+super(RedisCache, self)
.__init_
@@ -2599,38 +2599,32 @@
self).__init__(
-self,
default_timeout)
@@ -2622,16 +2622,26 @@
_timeout
+, **kwargs
)%0A
@@ -2674,14 +2674,8 @@
efix
- or ''
%0A
@@ -3519,22 +3519,15 @@
ith
+%60
None
+%60
.
-%0A
%22%22%22%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.